summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpi_lpss.c33
-rw-r--r--drivers/acpi/acpi_platform.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c3
-rw-r--r--drivers/acpi/apei/bert.c10
-rw-r--r--drivers/acpi/apei/erst.c2
-rw-r--r--drivers/acpi/apei/ghes.c19
-rw-r--r--drivers/acpi/apei/hest.c2
-rw-r--r--drivers/acpi/arm64/Kconfig10
-rw-r--r--drivers/acpi/arm64/Makefile1
-rw-r--r--drivers/acpi/arm64/agdi.c116
-rw-r--r--drivers/acpi/battery.c12
-rw-r--r--drivers/acpi/bus.c46
-rw-r--r--drivers/acpi/cppc_acpi.c9
-rw-r--r--drivers/acpi/ec.c90
-rw-r--r--drivers/acpi/fan.h44
-rw-r--r--drivers/acpi/fan_attr.c137
-rw-r--r--drivers/acpi/fan_core.c (renamed from drivers/acpi/fan.c)204
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/osl.c19
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/pci_root.c3
-rw-r--r--drivers/acpi/processor_idle.c15
-rw-r--r--drivers/acpi/property.c29
-rw-r--r--drivers/acpi/scan.c31
-rw-r--r--drivers/acpi/sleep.c13
-rw-r--r--drivers/acpi/tables.c2
-rw-r--r--drivers/acpi/video_detect.c75
-rw-r--r--drivers/acpi/x86/utils.c21
-rw-r--r--drivers/amba/bus.c73
-rw-r--r--drivers/ata/Kconfig8
-rw-r--r--drivers/ata/acard-ahci.c2
-rw-r--r--drivers/ata/ahci.c113
-rw-r--r--drivers/ata/ahci.h4
-rw-r--r--drivers/ata/ahci_brcm.c2
-rw-r--r--drivers/ata/ahci_ceva.c2
-rw-r--r--drivers/ata/ahci_da850.c2
-rw-r--r--drivers/ata/ahci_dm816.c2
-rw-r--r--drivers/ata/ahci_imx.c2
-rw-r--r--drivers/ata/ahci_mtk.c2
-rw-r--r--drivers/ata/ahci_mvebu.c2
-rw-r--r--drivers/ata/ahci_octeon.c2
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/ahci_qoriq.c4
-rw-r--r--drivers/ata/ahci_st.c2
-rw-r--r--drivers/ata/ahci_sunxi.c2
-rw-r--r--drivers/ata/ahci_xgene.c4
-rw-r--r--drivers/ata/ata_piix.c5
-rw-r--r--drivers/ata/libahci.c4
-rw-r--r--drivers/ata/libahci_platform.c3
-rw-r--r--drivers/ata/libata-acpi.c29
-rw-r--r--drivers/ata/libata-core.c22
-rw-r--r--drivers/ata/libata-eh.c49
-rw-r--r--drivers/ata/libata-sata.c10
-rw-r--r--drivers/ata/libata-scsi.c95
-rw-r--r--drivers/ata/libata-sff.c136
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/ata/pata_arasan_cf.c3
-rw-r--r--drivers/ata/pata_artop.c31
-rw-r--r--drivers/ata/pata_atiixp.c4
-rw-r--r--drivers/ata/pata_cs5520.c5
-rw-r--r--drivers/ata/pata_ep93xx.c4
-rw-r--r--drivers/ata/pata_ftide010.c6
-rw-r--r--drivers/ata/pata_hpt366.c49
-rw-r--r--drivers/ata/pata_hpt37x.c115
-rw-r--r--drivers/ata/pata_hpt3x2n.c38
-rw-r--r--drivers/ata/pata_imx.c15
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c2
-rw-r--r--drivers/ata/pata_macio.c24
-rw-r--r--drivers/ata/pata_mpc52xx.c7
-rw-r--r--drivers/ata/pata_ns87415.c4
-rw-r--r--drivers/ata/pata_octeon_cf.c10
-rw-r--r--drivers/ata/pata_of_platform.c2
-rw-r--r--drivers/ata/pata_pdc202xx_old.c2
-rw-r--r--drivers/ata/pata_platform.c18
-rw-r--r--drivers/ata/pata_pxa.c10
-rw-r--r--drivers/ata/pata_samsung_cf.c12
-rw-r--r--drivers/ata/pata_triflex.c5
-rw-r--r--drivers/ata/sata_fsl.c14
-rw-r--r--drivers/ata/sata_gemini.c6
-rw-r--r--drivers/ata/sata_highbank.c7
-rw-r--r--drivers/ata/sata_inic162x.c10
-rw-r--r--drivers/ata/sata_mv.c8
-rw-r--r--drivers/ata/sata_rcar.c35
-rw-r--r--drivers/ata/sata_svw.c10
-rw-r--r--drivers/ata/sata_vsc.c10
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/atm/nicstar.c10
-rw-r--r--drivers/auxdisplay/lcd2s.c24
-rw-r--r--drivers/base/arch_topology.c45
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/devtmpfs.c2
-rw-r--r--drivers/base/init.c1
-rw-r--r--drivers/base/memory.c147
-rw-r--r--drivers/base/node.c48
-rw-r--r--drivers/base/power/domain.c42
-rw-r--r--drivers/base/power/main.c16
-rw-r--r--drivers/base/power/runtime.c5
-rw-r--r--drivers/base/power/wakeirq.c2
-rw-r--r--drivers/base/power/wakeup.c4
-rw-r--r--drivers/base/regmap/internal.h2
-rw-r--r--drivers/base/regmap/regmap-irq.c6
-rw-r--r--drivers/base/regmap/regmap.c11
-rw-r--r--drivers/base/topology.c20
-rw-r--r--drivers/bcma/driver_chipcommon.c2
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c6
-rw-r--r--drivers/bcma/driver_gpio.c1
-rw-r--r--drivers/bcma/driver_pci_host.c6
-rw-r--r--drivers/bcma/main.c4
-rw-r--r--drivers/bcma/sprom.c4
-rw-r--r--drivers/block/aoe/aoeblk.c1
-rw-r--r--drivers/block/aoe/aoecmd.c5
-rw-r--r--drivers/block/drbd/drbd_actlog.c5
-rw-r--r--drivers/block/drbd/drbd_bitmap.c7
-rw-r--r--drivers/block/drbd/drbd_int.h4
-rw-r--r--drivers/block/drbd/drbd_receiver.c36
-rw-r--r--drivers/block/drbd/drbd_req.c8
-rw-r--r--drivers/block/drbd/drbd_worker.c10
-rw-r--r--drivers/block/floppy.c10
-rw-r--r--drivers/block/loop.c42
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c5
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h1
-rw-r--r--drivers/block/null_blk/main.c54
-rw-r--r--drivers/block/pktcdvd.c21
-rw-r--r--drivers/block/rnbd/rnbd-clt.c28
-rw-r--r--drivers/block/rnbd/rnbd-clt.h1
-rw-r--r--drivers/block/rnbd/rnbd-proto.h4
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.c61
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.h18
-rw-r--r--drivers/block/rnbd/rnbd-srv-sysfs.c1
-rw-r--r--drivers/block/rnbd/rnbd-srv.c46
-rw-r--r--drivers/block/rnbd/rnbd-srv.h1
-rw-r--r--drivers/block/sunvdc.c1
-rw-r--r--drivers/block/virtio_blk.c94
-rw-r--r--drivers/block/xen-blkback/blkback.c25
-rw-r--r--drivers/block/xen-blkback/xenbus.c1
-rw-r--r--drivers/block/xen-blkfront.c68
-rw-r--r--drivers/block/zram/zram_drv.c26
-rw-r--r--drivers/bluetooth/Kconfig1
-rw-r--r--drivers/bluetooth/ath3k.c1
-rw-r--r--drivers/bluetooth/bcm203x.c1
-rw-r--r--drivers/bluetooth/btintel.c11
-rw-r--r--drivers/bluetooth/btintel.h1
-rw-r--r--drivers/bluetooth/btmrvl_debugfs.c2
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c2
-rw-r--r--drivers/bluetooth/btmtk.c1
-rw-r--r--drivers/bluetooth/btmtk.h43
-rw-r--r--drivers/bluetooth/btmtksdio.c471
-rw-r--r--drivers/bluetooth/btmtkuart.c198
-rw-r--r--drivers/bluetooth/btrtl.c21
-rw-r--r--drivers/bluetooth/btusb.c100
-rw-r--r--drivers/bluetooth/hci_bcm.c46
-rw-r--r--drivers/bluetooth/hci_h5.c13
-rw-r--r--drivers/bluetooth/hci_ll.c2
-rw-r--r--drivers/bluetooth/hci_serdev.c3
-rw-r--r--drivers/bus/imx-weim.c135
-rw-r--r--drivers/bus/moxtet.c4
-rw-r--r--drivers/cdrom/gdrom.c1
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/atmel-rng.c148
-rw-r--r--drivers/char/hw_random/cavium-rng-vf.c2
-rw-r--r--drivers/char/hw_random/core.c162
-rw-r--r--drivers/char/hw_random/nomadik-rng.c4
-rw-r--r--drivers/char/hw_random/optee-rng.c6
-rw-r--r--drivers/char/random.c2857
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c5
-rw-r--r--drivers/char/tpm/st33zp24/spi.c9
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c3
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.h2
-rw-r--r--drivers/char/tpm/tpm-chip.c46
-rw-r--r--drivers/char/tpm/tpm-dev-common.c8
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm2-space.c73
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c3
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c2
-rw-r--r--drivers/char/tpm/xen-tpmfront.c8
-rw-r--r--drivers/char/virtio_console.c7
-rw-r--r--drivers/clk/Kconfig2
-rw-r--r--drivers/clk/clk-lmk04832.c4
-rw-r--r--drivers/clk/clk-scmi.c71
-rw-r--r--drivers/clk/qcom/dispcc-sc7180.c5
-rw-r--r--drivers/clk/qcom/dispcc-sc7280.c5
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c5
-rw-r--r--drivers/clk/qcom/gdsc.c26
-rw-r--r--drivers/clk/qcom/gdsc.h8
-rw-r--r--drivers/clk/samsung/Kconfig9
-rw-r--r--drivers/clk/samsung/Makefile1
-rw-r--r--drivers/clk/samsung/clk-fsd.c1803
-rw-r--r--drivers/clk/samsung/clk-pll.c1
-rw-r--r--drivers/clk/samsung/clk-pll.h1
-rw-r--r--drivers/clocksource/Kconfig10
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/acpi_pm.c6
-rw-r--r--drivers/clocksource/arm_arch_timer.c13
-rw-r--r--drivers/clocksource/exynos_mct.c39
-rw-r--r--drivers/clocksource/timer-atcpit100.c266
-rw-r--r--drivers/clocksource/timer-imx-sysctr.c2
-rw-r--r--drivers/clocksource/timer-imx-tpm.c14
-rw-r--r--drivers/clocksource/timer-microchip-pit64b.c8
-rw-r--r--drivers/clocksource/timer-of.c6
-rw-r--r--drivers/clocksource/timer-ti-dm-systimer.c7
-rw-r--r--drivers/counter/counter-sysfs.c17
-rw-r--r--drivers/cpufreq/amd-pstate-trace.h22
-rw-r--r--drivers/cpufreq/amd-pstate.c59
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c10
-rw-r--r--drivers/cpufreq/cpufreq_governor.c6
-rw-r--r--drivers/cpufreq/cpufreq_governor.h12
-rw-r--r--drivers/cpufreq/cpufreq_governor_attr_set.c5
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c10
-rw-r--r--drivers/cpufreq/intel_pstate.c38
-rw-r--r--drivers/cpufreq/longhaul.c4
-rw-r--r--drivers/cpufreq/powernow-k8.c6
-rw-r--r--drivers/cpuidle/cpuidle-haltpoll.c4
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c28
-rw-r--r--drivers/crypto/Kconfig10
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c3
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c3
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c3
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c3
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c2
-rw-r--r--drivers/crypto/atmel-aes.c1
-rw-r--r--drivers/crypto/atmel-sha.c1
-rw-r--r--drivers/crypto/atmel-tdes.c1
-rw-r--r--drivers/crypto/caam/pdb.h2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.c8
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_req.h2
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c83
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c5
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c16
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c7
-rw-r--r--drivers/crypto/ccree/cc_cipher.c2
-rw-r--r--drivers/crypto/gemini/sl3516-ce-cipher.c6
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c19
-rw-r--r--drivers/crypto/hisilicon/qm.c72
-rw-r--r--drivers/crypto/hisilicon/qm.h441
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c43
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h6
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c80
-rw-r--r--drivers/crypto/hisilicon/sgl.c2
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h2
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c17
-rw-r--r--drivers/crypto/ixp4xx_crypto.c1
-rw-r--r--drivers/crypto/marvell/Kconfig1
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c5
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_main.c1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c14
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h19
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c25
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c27
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c56
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c17
-rw-r--r--drivers/crypto/mxs-dcp.c2
-rw-r--r--drivers/crypto/nx/nx-common-pseries.c4
-rw-r--r--drivers/crypto/omap-aes.c2
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c23
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h24
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_drv.c7
-rw-r--r--drivers/crypto/qat/qat_common/Makefile1
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c37
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h4
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.h14
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_pfvf.c42
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_pm.c137
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_pm.h44
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c42
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c4
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h1
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c7
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c9
-rw-r--r--drivers/crypto/qcom-rng.c17
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c1
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c2
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c2
-rw-r--r--drivers/crypto/vmx/Kconfig4
-rw-r--r--drivers/crypto/xilinx/Makefile1
-rw-r--r--drivers/crypto/xilinx/zynqmp-sha.c264
-rw-r--r--drivers/dax/device.c3
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/dma/pl330.c4
-rw-r--r--drivers/edac/altera_edac.c40
-rw-r--r--drivers/edac/amd64_edac.c109
-rw-r--r--drivers/edac/amd64_edac.h24
-rw-r--r--drivers/edac/edac_device_sysfs.c31
-rw-r--r--drivers/edac/edac_mc.c4
-rw-r--r--drivers/edac/edac_pci_sysfs.c26
-rw-r--r--drivers/firmware/Kconfig1
-rw-r--r--drivers/firmware/arm_scmi/Kconfig56
-rw-r--r--drivers/firmware/arm_scmi/Makefile8
-rw-r--r--drivers/firmware/arm_scmi/clock.c34
-rw-r--r--drivers/firmware/arm_scmi/common.h26
-rw-r--r--drivers/firmware/arm_scmi/driver.c236
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c3
-rw-r--r--drivers/firmware/arm_scmi/optee.c567
-rw-r--r--drivers/firmware/arm_scmi/smc.c98
-rw-r--r--drivers/firmware/arm_scmi/virtio.c613
-rw-r--r--drivers/firmware/arm_sdei.c13
-rw-r--r--drivers/firmware/efi/apple-properties.c2
-rw-r--r--drivers/firmware/efi/efi-pstore.c2
-rw-r--r--drivers/firmware/efi/efi.c2
-rw-r--r--drivers/firmware/efi/libstub/riscv-stub.c17
-rw-r--r--drivers/firmware/efi/mokvar-table.c2
-rw-r--r--drivers/firmware/efi/vars.c5
-rw-r--r--drivers/firmware/imx/rm.c45
-rw-r--r--drivers/firmware/imx/scu-pd.c4
-rw-r--r--drivers/firmware/qcom_scm.c215
-rw-r--r--drivers/firmware/qcom_scm.h7
-rw-r--r--drivers/firmware/ti_sci.c2
-rw-r--r--drivers/firmware/xilinx/zynqmp.c26
-rw-r--r--drivers/gpio/gpio-74x164.c4
-rw-r--r--drivers/gpio/gpio-max3191x.c4
-rw-r--r--drivers/gpio/gpio-max7301.c4
-rw-r--r--drivers/gpio/gpio-mc33880.c4
-rw-r--r--drivers/gpio/gpio-mt7621.c1
-rw-r--r--drivers/gpio/gpio-omap.c7
-rw-r--r--drivers/gpio/gpio-pisosr.c4
-rw-r--r--drivers/gpio/gpio-rcar.c2
-rw-r--r--drivers/gpio/gpio-sim.c4
-rw-r--r--drivers/gpio/gpio-tegra186.c2
-rw-r--r--drivers/gpio/gpio-tqmx86.c3
-rw-r--r--drivers/gpio/gpio-ts4900.c24
-rw-r--r--drivers/gpio/gpiolib-acpi.c6
-rw-r--r--drivers/gpio/gpiolib.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/arm/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/Kconfig3
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c5
-rw-r--r--drivers/gpu/drm/drm_cache.c2
-rw-r--r--drivers/gpu/drm/drm_connector.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c16
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c36
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_pll.c6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/hs.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c1
-rw-r--r--drivers/gpu/drm/panel/Kconfig1
-rw-r--r--drivers/gpu/drm/panel/panel-abt-y030xx067a.c4
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c4
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c3
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-ej030na.c4
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c4
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c4
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c4
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-db7430.c3
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d27a1.c3
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c3
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c2
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c4
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c4
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c4
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c4
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c3
-rw-r--r--drivers/gpu/drm/panel/panel-widechips-ws2401.c3
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c28
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h8
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c4
-rw-r--r--drivers/gpu/drm/tiny/ili9163.c4
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c4
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c4
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c4
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c4
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c4
-rw-r--r--drivers/gpu/drm/tiny/repaper.c4
-rw-r--r--drivers/gpu/drm/tiny/st7586.c4
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c6
-rw-r--r--drivers/hid/hid-debug.c5
-rw-r--r--drivers/hid/hid-elo.c7
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hid-logitech-dj.c1
-rw-r--r--drivers/hid/hid-nintendo.c4
-rw-r--r--drivers/hid/hid-thrustmaster.c8
-rw-r--r--drivers/hid/hid-vivaldi.c2
-rw-r--r--drivers/hid/uhid.c2
-rw-r--r--drivers/hv/channel_mgmt.c19
-rw-r--r--drivers/hv/hv_balloon.c2
-rw-r--r--drivers/hv/hv_common.c4
-rw-r--r--drivers/hv/hv_snapshot.c7
-rw-r--r--drivers/hv/hyperv_vmbus.h14
-rw-r--r--drivers/hv/vmbus_drv.c4
-rw-r--r--drivers/hwmon/Kconfig49
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/adcxx.c4
-rw-r--r--drivers/hwmon/adt7310.c95
-rw-r--r--drivers/hwmon/adt7410.c82
-rw-r--r--drivers/hwmon/adt7x10.c479
-rw-r--r--drivers/hwmon/adt7x10.h10
-rw-r--r--drivers/hwmon/aquacomputer_d5next.c379
-rw-r--r--drivers/hwmon/asus-ec-sensors.c716
-rw-r--r--drivers/hwmon/asus_wmi_ec_sensors.c3
-rw-r--r--drivers/hwmon/asus_wmi_sensors.c1
-rw-r--r--drivers/hwmon/axi-fan-control.c3
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c84
-rw-r--r--drivers/hwmon/hwmon.c40
-rw-r--r--drivers/hwmon/lm70.c16
-rw-r--r--drivers/hwmon/lm83.c476
-rw-r--r--drivers/hwmon/max1111.c3
-rw-r--r--drivers/hwmon/max31722.c4
-rw-r--r--drivers/hwmon/max6639.c62
-rw-r--r--drivers/hwmon/mlxreg-fan.c84
-rw-r--r--drivers/hwmon/nct6775.c146
-rw-r--r--drivers/hwmon/occ/common.c19
-rw-r--r--drivers/hwmon/occ/common.h2
-rw-r--r--drivers/hwmon/occ/sysfs.c46
-rw-r--r--drivers/hwmon/pmbus/Kconfig33
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/adm1275.c40
-rw-r--r--drivers/hwmon/pmbus/lm25066.c14
-rw-r--r--drivers/hwmon/pmbus/pli1209bc.c146
-rw-r--r--drivers/hwmon/pmbus/pmbus.h2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c132
-rw-r--r--drivers/hwmon/pmbus/xdpe12284.c32
-rw-r--r--drivers/hwmon/powr1220.c235
-rw-r--r--drivers/hwmon/sch5627.c71
-rw-r--r--drivers/hwmon/sch5636.c10
-rw-r--r--drivers/hwmon/sch56xx-common.c44
-rw-r--r--drivers/hwmon/scpi-hwmon.c6
-rw-r--r--drivers/hwmon/tc654.c104
-rw-r--r--drivers/hwmon/tmp464.c712
-rw-r--r--drivers/hwmon/vexpress-hwmon.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-config.h2
-rw-r--r--drivers/hwtracing/intel_th/msu.c4
-rw-r--r--drivers/idle/intel_idle.c111
-rw-r--r--drivers/iio/accel/bma400_spi.c4
-rw-r--r--drivers/iio/accel/bmc150-accel-spi.c4
-rw-r--r--drivers/iio/accel/bmi088-accel-spi.c4
-rw-r--r--drivers/iio/accel/kxsd9-spi.c4
-rw-r--r--drivers/iio/accel/mma7455_spi.c4
-rw-r--r--drivers/iio/accel/sca3000.c4
-rw-r--r--drivers/iio/adc/ad7266.c4
-rw-r--r--drivers/iio/adc/ltc2496.c4
-rw-r--r--drivers/iio/adc/mcp320x.c4
-rw-r--r--drivers/iio/adc/mcp3911.c4
-rw-r--r--drivers/iio/adc/ti-adc12138.c4
-rw-r--r--drivers/iio/adc/ti-ads7950.c4
-rw-r--r--drivers/iio/adc/ti-ads8688.c4
-rw-r--r--drivers/iio/adc/ti-tlc4541.c4
-rw-r--r--drivers/iio/amplifiers/ad8366.c4
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c4
-rw-r--r--drivers/iio/dac/ad5360.c4
-rw-r--r--drivers/iio/dac/ad5380.c4
-rw-r--r--drivers/iio/dac/ad5446.c4
-rw-r--r--drivers/iio/dac/ad5449.c4
-rw-r--r--drivers/iio/dac/ad5504.c4
-rw-r--r--drivers/iio/dac/ad5592r.c4
-rw-r--r--drivers/iio/dac/ad5624r_spi.c4
-rw-r--r--drivers/iio/dac/ad5686-spi.c4
-rw-r--r--drivers/iio/dac/ad5761.c4
-rw-r--r--drivers/iio/dac/ad5764.c4
-rw-r--r--drivers/iio/dac/ad5791.c4
-rw-r--r--drivers/iio/dac/ad8801.c4
-rw-r--r--drivers/iio/dac/ltc1660.c4
-rw-r--r--drivers/iio/dac/ltc2632.c4
-rw-r--r--drivers/iio/dac/mcp4922.c4
-rw-r--r--drivers/iio/dac/ti-dac082s085.c4
-rw-r--r--drivers/iio/dac/ti-dac7311.c3
-rw-r--r--drivers/iio/frequency/adf4350.c4
-rw-r--r--drivers/iio/gyro/bmg160_spi.c4
-rw-r--r--drivers/iio/gyro/fxas21002c_spi.c4
-rw-r--r--drivers/iio/health/afe4403.c4
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_spi.c4
-rw-r--r--drivers/iio/magnetometer/hmc5843_spi.c4
-rw-r--r--drivers/iio/potentiometer/max5487.c4
-rw-r--r--drivers/iio/pressure/ms5611_spi.c4
-rw-r--r--drivers/iio/pressure/zpa2326_spi.c4
-rw-r--r--drivers/infiniband/core/rw.c1
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c3
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c61
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c17
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c1
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c2
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/applespi.c4
-rw-r--r--drivers/input/misc/adxl34x-spi.c4
-rw-r--r--drivers/input/mouse/elan_i2c_core.c64
-rw-r--r--drivers/input/rmi4/Kconfig2
-rw-r--r--drivers/input/tablet/aiptek.c10
-rw-r--r--drivers/input/touchscreen/Kconfig4
-rw-r--r--drivers/input/touchscreen/ads7846.c4
-rw-r--r--drivers/input/touchscreen/cyttsp4_spi.c4
-rw-r--r--drivers/input/touchscreen/goodix.c34
-rw-r--r--drivers/input/touchscreen/tsc2005.c4
-rw-r--r--drivers/input/touchscreen/zinitix.c44
-rw-r--r--drivers/iommu/Kconfig6
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h1
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h1
-rw-r--r--drivers/iommu/amd/init.c10
-rw-r--r--drivers/iommu/amd/io_pgtable.c12
-rw-r--r--drivers/iommu/amd/iommu.c10
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c5
-rw-r--r--drivers/iommu/intel/Kconfig2
-rw-r--r--drivers/iommu/intel/iommu.c6
-rw-r--r--drivers/iommu/intel/svm.c9
-rw-r--r--drivers/iommu/ioasid.c39
-rw-r--r--drivers/iommu/iommu-sva-lib.c39
-rw-r--r--drivers/iommu/iommu-sva-lib.h7
-rw-r--r--drivers/iommu/mtk_iommu.c34
-rw-r--r--drivers/iommu/mtk_iommu_v1.c42
-rw-r--r--drivers/iommu/tegra-smmu.c4
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/irq-apple-aic.c551
-rw-r--r--drivers/irqchip/irq-ativic32.c156
-rw-r--r--drivers/irqchip/irq-ftintc010.c1
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-gic.c104
-rw-r--r--drivers/irqchip/irq-imx-intmux.c8
-rw-r--r--drivers/irqchip/irq-lpc32xx.c34
-rw-r--r--drivers/irqchip/irq-meson-gpio.c106
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c28
-rw-r--r--drivers/irqchip/irq-nvic.c24
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c461
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c3
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c3
-rw-r--r--drivers/irqchip/irq-sifive-plic.c38
-rw-r--r--drivers/irqchip/irq-stm32-exti.c50
-rw-r--r--drivers/irqchip/irq-ts4800.c25
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c46
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c30
-rw-r--r--drivers/irqchip/qcom-pdc.c137
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c6
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c2
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c4
-rw-r--r--drivers/isdn/mISDN/dsp_pipeline.c6
-rw-r--r--drivers/leds/leds-cr0014114.c4
-rw-r--r--drivers/leds/leds-dac124s085.c4
-rw-r--r--drivers/leds/leds-el15203000.c4
-rw-r--r--drivers/leds/leds-spi-byte.c4
-rw-r--r--drivers/md/Kconfig1
-rw-r--r--drivers/md/bcache/btree.c6
-rw-r--r--drivers/md/bcache/io.c3
-rw-r--r--drivers/md/bcache/journal.c16
-rw-r--r--drivers/md/bcache/movinggc.c4
-rw-r--r--drivers/md/bcache/request.c26
-rw-r--r--drivers/md/bcache/super.c9
-rw-r--r--drivers/md/bcache/writeback.c21
-rw-r--r--drivers/md/dm-cache-target.c26
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-crypt.c46
-rw-r--r--drivers/md/dm-integrity.c5
-rw-r--r--drivers/md/dm-io.c5
-rw-r--r--drivers/md/dm-log-writes.c39
-rw-r--r--drivers/md/dm-rq.c26
-rw-r--r--drivers/md/dm-snap.c21
-rw-r--r--drivers/md/dm-thin.c41
-rw-r--r--drivers/md/dm-writecache.c7
-rw-r--r--drivers/md/dm-zoned-metadata.c26
-rw-r--r--drivers/md/dm-zoned-target.c3
-rw-r--r--drivers/md/dm.c172
-rw-r--r--drivers/md/md-faulty.c4
-rw-r--r--drivers/md/md-multipath.c13
-rw-r--r--drivers/md/md.c31
-rw-r--r--drivers/md/raid1-10.c5
-rw-r--r--drivers/md/raid1.c58
-rw-r--r--drivers/md/raid1.h1
-rw-r--r--drivers/md/raid10.c47
-rw-r--r--drivers/md/raid10.h1
-rw-r--r--drivers/md/raid5-cache.c42
-rw-r--r--drivers/md/raid5-ppl.c29
-rw-r--r--drivers/md/raid5.c29
-rw-r--r--drivers/media/Kconfig11
-rw-r--r--drivers/media/Makefile4
-rw-r--r--drivers/media/cec/platform/Makefile16
-rw-r--r--drivers/media/cec/platform/cros-ec/cros-ec-cec.c2
-rw-r--r--drivers/media/cec/platform/seco/seco-cec.c57
-rw-r--r--drivers/media/common/Kconfig16
-rw-r--r--drivers/media/common/Makefile7
-rw-r--r--drivers/media/common/saa7146/Kconfig2
-rw-r--r--drivers/media/common/videobuf2/Makefile8
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c12
-rw-r--r--drivers/media/dvb-core/Kconfig2
-rw-r--r--drivers/media/dvb-frontends/Kconfig647
-rw-r--r--drivers/media/dvb-frontends/Makefile193
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.c2
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c4
-rw-r--r--drivers/media/dvb-frontends/si21xx.c2
-rw-r--r--drivers/media/dvb-frontends/stv0299.c9
-rw-r--r--drivers/media/dvb-frontends/tda8083.c2
-rw-r--r--drivers/media/firewire/Makefile2
-rw-r--r--drivers/media/i2c/Kconfig1909
-rw-r--r--drivers/media/i2c/Makefile192
-rw-r--r--drivers/media/i2c/adv7180.c10
-rw-r--r--drivers/media/i2c/adv7183.c51
-rw-r--r--drivers/media/i2c/adv748x/adv748x-csi2.c18
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c3
-rw-r--r--drivers/media/i2c/adv7604.c2
-rw-r--r--drivers/media/i2c/adv7842.c2
-rw-r--r--drivers/media/i2c/ccs/Kconfig2
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c1
-rw-r--r--drivers/media/i2c/cx25840/Kconfig2
-rw-r--r--drivers/media/i2c/dw9714.c42
-rw-r--r--drivers/media/i2c/et8ek8/Kconfig2
-rw-r--r--drivers/media/i2c/hi847.c3012
-rw-r--r--drivers/media/i2c/imx274.c2
-rw-r--r--drivers/media/i2c/isl7998x.c1628
-rw-r--r--drivers/media/i2c/m5mols/Kconfig2
-rw-r--r--drivers/media/i2c/m5mols/m5mols.h3
-rw-r--r--drivers/media/i2c/m5mols/m5mols_capture.c1
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c29
-rw-r--r--drivers/media/i2c/max2175.c2
-rw-r--r--drivers/media/i2c/max9286.c125
-rw-r--r--drivers/media/i2c/ml86v7667.c5
-rw-r--r--drivers/media/i2c/mt9m001.c8
-rw-r--r--drivers/media/i2c/mt9m111.c15
-rw-r--r--drivers/media/i2c/noon010pc30.c75
-rw-r--r--drivers/media/i2c/og01a1b.c1128
-rw-r--r--drivers/media/i2c/ov08d10.c1528
-rw-r--r--drivers/media/i2c/ov2740.c8
-rw-r--r--drivers/media/i2c/ov5640.c14
-rw-r--r--drivers/media/i2c/ov5648.c16
-rw-r--r--drivers/media/i2c/ov5675.c32
-rw-r--r--drivers/media/i2c/ov5693.c9
-rw-r--r--drivers/media/i2c/ov6650.c206
-rw-r--r--drivers/media/i2c/ov8865.c12
-rw-r--r--drivers/media/i2c/ov9640.c8
-rw-r--r--drivers/media/i2c/saa7115.c2
-rw-r--r--drivers/media/i2c/tc358743.c26
-rw-r--r--drivers/media/i2c/tvp5150.c6
-rw-r--r--drivers/media/mc/mc-entity.c55
-rw-r--r--drivers/media/mmc/Kconfig1
-rw-r--r--drivers/media/pci/Kconfig31
-rw-r--r--drivers/media/pci/Makefile22
-rw-r--r--drivers/media/pci/bt8xx/Kconfig2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c4
-rw-r--r--drivers/media/pci/cobalt/Kconfig2
-rw-r--r--drivers/media/pci/cx18/Kconfig2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c4
-rw-r--r--drivers/media/pci/cx18/cx18-dvb.c2
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-queue.h6
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c2
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c3
-rw-r--r--drivers/media/pci/cx88/cx88.h2
-rw-r--r--drivers/media/pci/dt3155/Kconfig2
-rw-r--r--drivers/media/pci/intel/ipu3/Kconfig2
-rw-r--r--drivers/media/pci/ivtv/Kconfig2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.h1
-rw-r--r--drivers/media/pci/ivtv/ivtv-gpio.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c10
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.h25
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.c11
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.h8
-rw-r--r--drivers/media/pci/meye/Kconfig2
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-dvb.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c11
-rw-r--r--drivers/media/pci/saa7146/Kconfig6
-rw-r--r--drivers/media/pci/saa7164/saa7164-cmd.c1
-rw-r--r--drivers/media/pci/sta2x11/Kconfig2
-rw-r--r--drivers/media/pci/tw5864/Kconfig2
-rw-r--r--drivers/media/pci/tw68/Kconfig2
-rw-r--r--drivers/media/pci/tw686x/Kconfig2
-rw-r--r--drivers/media/platform/Kconfig696
-rw-r--r--drivers/media/platform/Makefile110
-rw-r--r--drivers/media/platform/allegro-dvt/Kconfig19
-rw-r--r--drivers/media/platform/amlogic/Kconfig5
-rw-r--r--drivers/media/platform/amlogic/Makefile2
-rw-r--r--drivers/media/platform/amlogic/meson-ge2d/Kconfig14
-rw-r--r--drivers/media/platform/amlogic/meson-ge2d/Makefile (renamed from drivers/media/platform/meson/ge2d/Makefile)0
-rw-r--r--drivers/media/platform/amlogic/meson-ge2d/ge2d-regs.h (renamed from drivers/media/platform/meson/ge2d/ge2d-regs.h)0
-rw-r--r--drivers/media/platform/amlogic/meson-ge2d/ge2d.c (renamed from drivers/media/platform/meson/ge2d/ge2d.c)24
-rw-r--r--drivers/media/platform/amphion/Kconfig22
-rw-r--r--drivers/media/platform/amphion/Makefile20
-rw-r--r--drivers/media/platform/amphion/vdec.c1656
-rw-r--r--drivers/media/platform/amphion/venc.c1358
-rw-r--r--drivers/media/platform/amphion/vpu.h362
-rw-r--r--drivers/media/platform/amphion/vpu_cmds.c433
-rw-r--r--drivers/media/platform/amphion/vpu_cmds.h25
-rw-r--r--drivers/media/platform/amphion/vpu_codec.h68
-rw-r--r--drivers/media/platform/amphion/vpu_color.c183
-rw-r--r--drivers/media/platform/amphion/vpu_core.c879
-rw-r--r--drivers/media/platform/amphion/vpu_core.h15
-rw-r--r--drivers/media/platform/amphion/vpu_dbg.c494
-rw-r--r--drivers/media/platform/amphion/vpu_defs.h187
-rw-r--r--drivers/media/platform/amphion/vpu_drv.c261
-rw-r--r--drivers/media/platform/amphion/vpu_helpers.c414
-rw-r--r--drivers/media/platform/amphion/vpu_helpers.h71
-rw-r--r--drivers/media/platform/amphion/vpu_imx8q.c271
-rw-r--r--drivers/media/platform/amphion/vpu_imx8q.h115
-rw-r--r--drivers/media/platform/amphion/vpu_malone.c1644
-rw-r--r--drivers/media/platform/amphion/vpu_malone.h44
-rw-r--r--drivers/media/platform/amphion/vpu_mbox.c118
-rw-r--r--drivers/media/platform/amphion/vpu_mbox.h16
-rw-r--r--drivers/media/platform/amphion/vpu_msgs.c385
-rw-r--r--drivers/media/platform/amphion/vpu_msgs.h14
-rw-r--r--drivers/media/platform/amphion/vpu_rpc.c259
-rw-r--r--drivers/media/platform/amphion/vpu_rpc.h461
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.c713
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.h55
-rw-r--r--drivers/media/platform/amphion/vpu_windsor.c1173
-rw-r--r--drivers/media/platform/amphion/vpu_windsor.h37
-rw-r--r--drivers/media/platform/aspeed/Kconfig13
-rw-r--r--drivers/media/platform/aspeed/Makefile2
-rw-r--r--drivers/media/platform/aspeed/aspeed-video.c (renamed from drivers/media/platform/aspeed-video.c)301
-rw-r--r--drivers/media/platform/atmel/Kconfig28
-rw-r--r--drivers/media/platform/atmel/Makefile4
-rw-r--r--drivers/media/platform/atmel/atmel-isc-base.c397
-rw-r--r--drivers/media/platform/atmel/atmel-isc-clk.c311
-rw-r--r--drivers/media/platform/atmel/atmel-isc.h11
-rw-r--r--drivers/media/platform/atmel/atmel-sama5d2-isc.c24
-rw-r--r--drivers/media/platform/atmel/atmel-sama7g5-isc.c32
-rw-r--r--drivers/media/platform/atmel/microchip-csi2dc.c792
-rw-r--r--drivers/media/platform/cadence/Kconfig16
-rw-r--r--drivers/media/platform/chips-media/Kconfig20
-rw-r--r--drivers/media/platform/chips-media/Makefile (renamed from drivers/media/platform/coda/Makefile)0
-rw-r--r--drivers/media/platform/chips-media/coda-bit.c (renamed from drivers/media/platform/coda/coda-bit.c)0
-rw-r--r--drivers/media/platform/chips-media/coda-common.c (renamed from drivers/media/platform/coda/coda-common.c)1
-rw-r--r--drivers/media/platform/chips-media/coda-gdi.c (renamed from drivers/media/platform/coda/coda-gdi.c)0
-rw-r--r--drivers/media/platform/chips-media/coda-h264.c (renamed from drivers/media/platform/coda/coda-h264.c)0
-rw-r--r--drivers/media/platform/chips-media/coda-jpeg.c (renamed from drivers/media/platform/coda/coda-jpeg.c)0
-rw-r--r--drivers/media/platform/chips-media/coda-mpeg2.c (renamed from drivers/media/platform/coda/coda-mpeg2.c)0
-rw-r--r--drivers/media/platform/chips-media/coda-mpeg4.c (renamed from drivers/media/platform/coda/coda-mpeg4.c)0
-rw-r--r--drivers/media/platform/chips-media/coda.h (renamed from drivers/media/platform/coda/coda.h)0
-rw-r--r--drivers/media/platform/chips-media/coda_regs.h (renamed from drivers/media/platform/coda/coda_regs.h)2
-rw-r--r--drivers/media/platform/chips-media/imx-vdoa.c (renamed from drivers/media/platform/coda/imx-vdoa.c)9
-rw-r--r--drivers/media/platform/chips-media/imx-vdoa.h (renamed from drivers/media/platform/coda/imx-vdoa.h)0
-rw-r--r--drivers/media/platform/chips-media/trace.h (renamed from drivers/media/platform/coda/trace.h)2
-rw-r--r--drivers/media/platform/intel/Kconfig14
-rw-r--r--drivers/media/platform/intel/Makefile2
-rw-r--r--drivers/media/platform/intel/pxa_camera.c (renamed from drivers/media/platform/pxa_camera.c)21
-rw-r--r--drivers/media/platform/marvell/Kconfig (renamed from drivers/media/platform/marvell-ccic/Kconfig)9
-rw-r--r--drivers/media/platform/marvell/Makefile (renamed from drivers/media/platform/marvell-ccic/Makefile)0
-rw-r--r--drivers/media/platform/marvell/cafe-driver.c (renamed from drivers/media/platform/marvell-ccic/cafe-driver.c)0
-rw-r--r--drivers/media/platform/marvell/mcam-core.c (renamed from drivers/media/platform/marvell-ccic/mcam-core.c)0
-rw-r--r--drivers/media/platform/marvell/mcam-core.h (renamed from drivers/media/platform/marvell-ccic/mcam-core.h)0
-rw-r--r--drivers/media/platform/marvell/mmp-driver.c (renamed from drivers/media/platform/marvell-ccic/mmp-driver.c)8
-rw-r--r--drivers/media/platform/mediatek/Kconfig8
-rw-r--r--drivers/media/platform/mediatek/Makefile5
-rw-r--r--drivers/media/platform/mediatek/jpeg/Kconfig16
-rw-r--r--drivers/media/platform/mediatek/jpeg/Makefile (renamed from drivers/media/platform/mtk-jpeg/Makefile)0
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c (renamed from drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c)47
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h (renamed from drivers/media/platform/mtk-jpeg/mtk_jpeg_core.h)2
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c (renamed from drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_hw.c)0
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h (renamed from drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_hw.h)0
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_parse.c (renamed from drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_parse.c)0
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_parse.h (renamed from drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_parse.h)0
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h (renamed from drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_reg.h)0
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c (renamed from drivers/media/platform/mtk-jpeg/mtk_jpeg_enc_hw.c)0
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.h (renamed from drivers/media/platform/mtk-jpeg/mtk_jpeg_enc_hw.h)0
-rw-r--r--drivers/media/platform/mediatek/mdp/Kconfig17
-rw-r--r--drivers/media/platform/mediatek/mdp/Makefile (renamed from drivers/media/platform/mtk-mdp/Makefile)2
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c (renamed from drivers/media/platform/mtk-mdp/mtk_mdp_comp.c)40
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_comp.h (renamed from drivers/media/platform/mtk-mdp/mtk_mdp_comp.h)2
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_core.c (renamed from drivers/media/platform/mtk-mdp/mtk_mdp_core.c)3
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_core.h (renamed from drivers/media/platform/mtk-mdp/mtk_mdp_core.h)0
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_ipi.h (renamed from drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h)0
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c (renamed from drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c)0
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.h (renamed from drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h)0
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_regs.c (renamed from drivers/media/platform/mtk-mdp/mtk_mdp_regs.c)0
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_regs.h (renamed from drivers/media/platform/mtk-mdp/mtk_mdp_regs.h)0
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c (renamed from drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c)0
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.h (renamed from drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h)0
-rw-r--r--drivers/media/platform/mediatek/vcodec/Kconfig36
-rw-r--r--drivers/media/platform/mediatek/vcodec/Makefile (renamed from drivers/media/platform/mtk-vcodec/Makefile)6
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c)4
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.h (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h)1
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c)199
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c200
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.h56
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.c169
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.h19
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateful.c)2
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c)21
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h)77
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c)1
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.h (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h)0
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c)22
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_pm.c (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c)55
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_pm.h (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.h)3
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw.c (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.c)0
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw.h (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.h)2
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_priv.h (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_priv.h)0
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_scp.c)0
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c)2
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_intr.c43
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_intr.h (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h)5
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.c (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c)59
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.h (renamed from drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h)8
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_if.c (renamed from drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c)2
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_if.c (renamed from drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c)2
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_if.c (renamed from drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c)2
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_if.c (renamed from drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c)2
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec_drv_base.h (renamed from drivers/media/platform/mtk-vcodec/vdec_drv_base.h)0
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec_drv_if.c (renamed from drivers/media/platform/mtk-vcodec/vdec_drv_if.c)21
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec_drv_if.h (renamed from drivers/media/platform/mtk-vcodec/vdec_drv_if.h)0
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec_ipi_msg.h (renamed from drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h)16
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c290
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h153
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec_vpu_if.c (renamed from drivers/media/platform/mtk-vcodec/vdec_vpu_if.c)46
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec_vpu_if.h (renamed from drivers/media/platform/mtk-vcodec/vdec_vpu_if.h)22
-rw-r--r--drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c (renamed from drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c)2
-rw-r--r--drivers/media/platform/mediatek/vcodec/venc/venc_vp8_if.c (renamed from drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c)2
-rw-r--r--drivers/media/platform/mediatek/vcodec/venc_drv_base.h (renamed from drivers/media/platform/mtk-vcodec/venc_drv_base.h)0
-rw-r--r--drivers/media/platform/mediatek/vcodec/venc_drv_if.c (renamed from drivers/media/platform/mtk-vcodec/venc_drv_if.c)0
-rw-r--r--drivers/media/platform/mediatek/vcodec/venc_drv_if.h (renamed from drivers/media/platform/mtk-vcodec/venc_drv_if.h)0
-rw-r--r--drivers/media/platform/mediatek/vcodec/venc_ipi_msg.h (renamed from drivers/media/platform/mtk-vcodec/venc_ipi_msg.h)0
-rw-r--r--drivers/media/platform/mediatek/vcodec/venc_vpu_if.c (renamed from drivers/media/platform/mtk-vcodec/venc_vpu_if.c)0
-rw-r--r--drivers/media/platform/mediatek/vcodec/venc_vpu_if.h (renamed from drivers/media/platform/mtk-vcodec/venc_vpu_if.h)0
-rw-r--r--drivers/media/platform/mediatek/vpu/Kconfig15
-rw-r--r--drivers/media/platform/mediatek/vpu/Makefile (renamed from drivers/media/platform/mtk-vpu/Makefile)0
-rw-r--r--drivers/media/platform/mediatek/vpu/mtk_vpu.c (renamed from drivers/media/platform/mtk-vpu/mtk_vpu.c)10
-rw-r--r--drivers/media/platform/mediatek/vpu/mtk_vpu.h (renamed from drivers/media/platform/mtk-vpu/mtk_vpu.h)0
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c145
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h20
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c45
-rw-r--r--drivers/media/platform/nvidia/Kconfig5
-rw-r--r--drivers/media/platform/nvidia/Makefile3
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/Kconfig17
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/Makefile3
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/dmabuf-cache.c (renamed from drivers/staging/media/tegra-vde/dmabuf-cache.c)2
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/h264.c946
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/iommu.c (renamed from drivers/staging/media/tegra-vde/iommu.c)2
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/trace.h (renamed from drivers/staging/media/tegra-vde/trace.h)2
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/v4l2.c1018
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/vde.c551
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/vde.h242
-rw-r--r--drivers/media/platform/nxp/Kconfig55
-rw-r--r--drivers/media/platform/nxp/Makefile8
-rw-r--r--drivers/media/platform/nxp/fsl-viu.c (renamed from drivers/media/platform/fsl-viu.c)2
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/Kconfig (renamed from drivers/media/platform/imx-jpeg/Kconfig)3
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/Makefile (renamed from drivers/media/platform/imx-jpeg/Makefile)0
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.c (renamed from drivers/media/platform/imx-jpeg/mxc-jpeg-hw.c)0
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h (renamed from drivers/media/platform/imx-jpeg/mxc-jpeg-hw.h)0
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c (renamed from drivers/media/platform/imx-jpeg/mxc-jpeg.c)63
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h (renamed from drivers/media/platform/imx-jpeg/mxc-jpeg.h)1
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c (renamed from drivers/staging/media/imx/imx7-mipi-csis.c)123
-rw-r--r--drivers/media/platform/nxp/imx-pxp.c (renamed from drivers/media/platform/imx-pxp.c)0
-rw-r--r--drivers/media/platform/nxp/imx-pxp.h (renamed from drivers/media/platform/imx-pxp.h)0
-rw-r--r--drivers/media/platform/nxp/mx2_emmaprp.c (renamed from drivers/media/platform/mx2_emmaprp.c)0
-rw-r--r--drivers/media/platform/qcom/Kconfig6
-rw-r--r--drivers/media/platform/qcom/Makefile3
-rw-r--r--drivers/media/platform/qcom/camss/Kconfig9
-rw-r--r--drivers/media/platform/qcom/camss/Makefile3
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen2.c (renamed from drivers/media/platform/qcom/camss/camss-csid-170.c)34
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c91
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h5
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c19
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c199
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c42
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h7
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c12
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-170.c12
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-480.c564
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c29
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h4
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c5
-rw-r--r--drivers/media/platform/qcom/camss/camss.c368
-rw-r--r--drivers/media/platform/qcom/camss/camss.h20
-rw-r--r--drivers/media/platform/qcom/venus/Kconfig14
-rw-r--r--drivers/media/platform/qcom/venus/core.h4
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c2
-rw-r--r--drivers/media/platform/qcom/venus/venc.c4
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c6
-rw-r--r--drivers/media/platform/renesas/Kconfig121
-rw-r--r--drivers/media/platform/renesas/Makefile15
-rw-r--r--drivers/media/platform/renesas/rcar-fcp.c (renamed from drivers/media/platform/rcar-fcp.c)0
-rw-r--r--drivers/media/platform/renesas/rcar-isp.c (renamed from drivers/media/platform/rcar-isp.c)32
-rw-r--r--drivers/media/platform/renesas/rcar-vin/Kconfig (renamed from drivers/media/platform/rcar-vin/Kconfig)6
-rw-r--r--drivers/media/platform/renesas/rcar-vin/Makefile (renamed from drivers/media/platform/rcar-vin/Makefile)0
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c (renamed from drivers/media/platform/rcar-vin/rcar-core.c)389
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-csi2.c (renamed from drivers/media/platform/rcar-vin/rcar-csi2.c)132
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c (renamed from drivers/media/platform/rcar-vin/rcar-dma.c)2
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c (renamed from drivers/media/platform/rcar-vin/rcar-v4l2.c)0
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-vin.h (renamed from drivers/media/platform/rcar-vin/rcar-vin.h)20
-rw-r--r--drivers/media/platform/renesas/rcar_drif.c (renamed from drivers/media/platform/rcar_drif.c)0
-rw-r--r--drivers/media/platform/renesas/rcar_fdp1.c (renamed from drivers/media/platform/rcar_fdp1.c)0
-rw-r--r--drivers/media/platform/renesas/rcar_jpu.c (renamed from drivers/media/platform/rcar_jpu.c)2
-rw-r--r--drivers/media/platform/renesas/renesas-ceu.c (renamed from drivers/media/platform/renesas-ceu.c)0
-rw-r--r--drivers/media/platform/renesas/sh_vou.c (renamed from drivers/media/platform/sh_vou.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/Makefile (renamed from drivers/media/platform/vsp1/Makefile)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1.h (renamed from drivers/media/platform/vsp1/vsp1.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_brx.c (renamed from drivers/media/platform/vsp1/vsp1_brx.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_brx.h (renamed from drivers/media/platform/vsp1/vsp1_brx.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_clu.c (renamed from drivers/media/platform/vsp1/vsp1_clu.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_clu.h (renamed from drivers/media/platform/vsp1/vsp1_clu.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_dl.c (renamed from drivers/media/platform/vsp1/vsp1_dl.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_dl.h (renamed from drivers/media/platform/vsp1/vsp1_dl.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.c (renamed from drivers/media/platform/vsp1/vsp1_drm.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.h (renamed from drivers/media/platform/vsp1/vsp1_drm.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c (renamed from drivers/media/platform/vsp1/vsp1_drv.c)47
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.c (renamed from drivers/media/platform/vsp1/vsp1_entity.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.h (renamed from drivers/media/platform/vsp1/vsp1_entity.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hgo.c (renamed from drivers/media/platform/vsp1/vsp1_hgo.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hgo.h (renamed from drivers/media/platform/vsp1/vsp1_hgo.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hgt.c (renamed from drivers/media/platform/vsp1/vsp1_hgt.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hgt.h (renamed from drivers/media/platform/vsp1/vsp1_hgt.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_histo.c (renamed from drivers/media/platform/vsp1/vsp1_histo.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_histo.h (renamed from drivers/media/platform/vsp1/vsp1_histo.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hsit.c (renamed from drivers/media/platform/vsp1/vsp1_hsit.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hsit.h (renamed from drivers/media/platform/vsp1/vsp1_hsit.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_lif.c (renamed from drivers/media/platform/vsp1/vsp1_lif.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_lif.h (renamed from drivers/media/platform/vsp1/vsp1_lif.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_lut.c (renamed from drivers/media/platform/vsp1/vsp1_lut.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_lut.h (renamed from drivers/media/platform/vsp1/vsp1_lut.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.c (renamed from drivers/media/platform/vsp1/vsp1_pipe.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.h (renamed from drivers/media/platform/vsp1/vsp1_pipe.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_regs.h (renamed from drivers/media/platform/vsp1/vsp1_regs.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rpf.c (renamed from drivers/media/platform/vsp1/vsp1_rpf.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rwpf.c (renamed from drivers/media/platform/vsp1/vsp1_rwpf.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rwpf.h (renamed from drivers/media/platform/vsp1/vsp1_rwpf.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_sru.c (renamed from drivers/media/platform/vsp1/vsp1_sru.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_sru.h (renamed from drivers/media/platform/vsp1/vsp1_sru.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_uds.c (renamed from drivers/media/platform/vsp1/vsp1_uds.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_uds.h (renamed from drivers/media/platform/vsp1/vsp1_uds.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_uif.c (renamed from drivers/media/platform/vsp1/vsp1_uif.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_uif.h (renamed from drivers/media/platform/vsp1/vsp1_uif.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c (renamed from drivers/media/platform/vsp1/vsp1_video.c)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.h (renamed from drivers/media/platform/vsp1/vsp1_video.h)0
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_wpf.c (renamed from drivers/media/platform/vsp1/vsp1_wpf.c)0
-rw-r--r--drivers/media/platform/rockchip/Kconfig6
-rw-r--r--drivers/media/platform/rockchip/Makefile3
-rw-r--r--drivers/media/platform/rockchip/rga/Kconfig14
-rw-r--r--drivers/media/platform/rockchip/rkisp1/Kconfig19
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c28
-rw-r--r--drivers/media/platform/samsung/Kconfig10
-rw-r--r--drivers/media/platform/samsung/Makefile7
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/Kconfig10
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/Makefile (renamed from drivers/media/platform/exynos-gsc/Makefile)0
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.c (renamed from drivers/media/platform/exynos-gsc/gsc-core.c)14
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.h (renamed from drivers/media/platform/exynos-gsc/gsc-core.h)0
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c (renamed from drivers/media/platform/exynos-gsc/gsc-m2m.c)0
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-regs.c (renamed from drivers/media/platform/exynos-gsc/gsc-regs.c)0
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-regs.h (renamed from drivers/media/platform/exynos-gsc/gsc-regs.h)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/Kconfig (renamed from drivers/media/platform/exynos4-is/Kconfig)3
-rw-r--r--drivers/media/platform/samsung/exynos4-is/Makefile (renamed from drivers/media/platform/exynos4-is/Makefile)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/common.c (renamed from drivers/media/platform/exynos4-is/common.c)5
-rw-r--r--drivers/media/platform/samsung/exynos4-is/common.h (renamed from drivers/media/platform/exynos4-is/common.h)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-capture.c (renamed from drivers/media/platform/exynos4-is/fimc-capture.c)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.c (renamed from drivers/media/platform/exynos4-is/fimc-core.c)11
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.h (renamed from drivers/media/platform/exynos4-is/fimc-core.h)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-command.h (renamed from drivers/media/platform/exynos4-is/fimc-is-command.h)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-errno.c (renamed from drivers/media/platform/exynos4-is/fimc-is-errno.c)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h (renamed from drivers/media/platform/exynos4-is/fimc-is-errno.h)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c (renamed from drivers/media/platform/exynos4-is/fimc-is-i2c.c)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.h (renamed from drivers/media/platform/exynos4-is/fimc-is-i2c.h)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-param.c (renamed from drivers/media/platform/exynos4-is/fimc-is-param.c)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-param.h (renamed from drivers/media/platform/exynos4-is/fimc-is-param.h)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-regs.c (renamed from drivers/media/platform/exynos4-is/fimc-is-regs.c)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-regs.h (renamed from drivers/media/platform/exynos4-is/fimc-is-regs.h)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-sensor.c (renamed from drivers/media/platform/exynos4-is/fimc-is-sensor.c)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-sensor.h (renamed from drivers/media/platform/exynos4-is/fimc-is-sensor.h)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is.c (renamed from drivers/media/platform/exynos4-is/fimc-is.c)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is.h (renamed from drivers/media/platform/exynos4-is/fimc-is.h)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c (renamed from drivers/media/platform/exynos4-is/fimc-isp-video.c)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-isp-video.h (renamed from drivers/media/platform/exynos4-is/fimc-isp-video.h)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-isp.c (renamed from drivers/media/platform/exynos4-is/fimc-isp.c)2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-isp.h (renamed from drivers/media/platform/exynos4-is/fimc-isp.h)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite-reg.c (renamed from drivers/media/platform/exynos4-is/fimc-lite-reg.c)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite-reg.h (renamed from drivers/media/platform/exynos4-is/fimc-lite-reg.h)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite.c (renamed from drivers/media/platform/exynos4-is/fimc-lite.c)17
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite.h (renamed from drivers/media/platform/exynos4-is/fimc-lite.h)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-m2m.c (renamed from drivers/media/platform/exynos4-is/fimc-m2m.c)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-reg.c (renamed from drivers/media/platform/exynos4-is/fimc-reg.c)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-reg.h (renamed from drivers/media/platform/exynos4-is/fimc-reg.h)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.c (renamed from drivers/media/platform/exynos4-is/media-dev.c)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.h (renamed from drivers/media/platform/exynos4-is/media-dev.h)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/mipi-csis.c (renamed from drivers/media/platform/exynos4-is/mipi-csis.c)0
-rw-r--r--drivers/media/platform/samsung/exynos4-is/mipi-csis.h (renamed from drivers/media/platform/exynos4-is/mipi-csis.h)0
-rw-r--r--drivers/media/platform/samsung/s3c-camif/Kconfig15
-rw-r--r--drivers/media/platform/samsung/s3c-camif/Makefile (renamed from drivers/media/platform/s3c-camif/Makefile)0
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-capture.c (renamed from drivers/media/platform/s3c-camif/camif-capture.c)0
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-core.c (renamed from drivers/media/platform/s3c-camif/camif-core.c)0
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-core.h (renamed from drivers/media/platform/s3c-camif/camif-core.h)0
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-regs.c (renamed from drivers/media/platform/s3c-camif/camif-regs.c)0
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-regs.h (renamed from drivers/media/platform/s3c-camif/camif-regs.h)0
-rw-r--r--drivers/media/platform/samsung/s5p-g2d/Kconfig11
-rw-r--r--drivers/media/platform/samsung/s5p-g2d/Makefile (renamed from drivers/media/platform/s5p-g2d/Makefile)0
-rw-r--r--drivers/media/platform/samsung/s5p-g2d/g2d-hw.c (renamed from drivers/media/platform/s5p-g2d/g2d-hw.c)0
-rw-r--r--drivers/media/platform/samsung/s5p-g2d/g2d-regs.h (renamed from drivers/media/platform/s5p-g2d/g2d-regs.h)0
-rw-r--r--drivers/media/platform/samsung/s5p-g2d/g2d.c (renamed from drivers/media/platform/s5p-g2d/g2d.c)10
-rw-r--r--drivers/media/platform/samsung/s5p-g2d/g2d.h (renamed from drivers/media/platform/s5p-g2d/g2d.h)0
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/Kconfig12
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/Makefile (renamed from drivers/media/platform/s5p-jpeg/Makefile)0
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c (renamed from drivers/media/platform/s5p-jpeg/jpeg-core.c)2
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-core.h (renamed from drivers/media/platform/s5p-jpeg/jpeg-core.h)2
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos3250.c (renamed from drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c)0
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos3250.h (renamed from drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.h)2
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos4.c (renamed from drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c)0
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos4.h (renamed from drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h)0
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-s5p.c (renamed from drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c)2
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-s5p.h (renamed from drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h)2
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-regs.h (renamed from drivers/media/platform/s5p-jpeg/jpeg-regs.h)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/Kconfig9
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/Makefile (renamed from drivers/media/platform/s5p-mfc/Makefile)0
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/regs-mfc-v10.h (renamed from drivers/media/platform/s5p-mfc/regs-mfc-v10.h)0
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/regs-mfc-v6.h (renamed from drivers/media/platform/s5p-mfc/regs-mfc-v6.h)0
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/regs-mfc-v7.h (renamed from drivers/media/platform/s5p-mfc/regs-mfc-v7.h)0
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/regs-mfc-v8.h (renamed from drivers/media/platform/s5p-mfc/regs-mfc-v8.h)0
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/regs-mfc.h (renamed from drivers/media/platform/s5p-mfc/regs-mfc.h)0
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c (renamed from drivers/media/platform/s5p-mfc/s5p_mfc.c)11
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.c (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.h (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.c (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.h (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.h)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.h (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.h)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_common.h)1
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.h (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_debug.h (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_debug.h)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_dec.c)20
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.h (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_dec.h)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_enc.c)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.h (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_enc.h)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_intr.c (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_intr.c)0
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_intr.h (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_intr.h)0
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_iommu.h (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h)0
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.c (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_opr.c)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.h (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_opr.h)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.c (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c)0
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.h (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.h)0
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.h (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.h)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.c (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_pm.c)2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.h (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_pm.h)2
-rw-r--r--drivers/media/platform/st/Kconfig6
-rw-r--r--drivers/media/platform/st/Makefile7
-rw-r--r--drivers/media/platform/st/sti/Kconfig5
-rw-r--r--drivers/media/platform/st/sti/Makefile6
-rw-r--r--drivers/media/platform/st/sti/bdisp/Kconfig10
-rw-r--r--drivers/media/platform/st/sti/bdisp/Makefile (renamed from drivers/media/platform/sti/bdisp/Makefile)0
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-debug.c (renamed from drivers/media/platform/sti/bdisp/bdisp-debug.c)0
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-filter.h (renamed from drivers/media/platform/sti/bdisp/bdisp-filter.h)0
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-hw.c (renamed from drivers/media/platform/sti/bdisp/bdisp-hw.c)0
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-reg.h (renamed from drivers/media/platform/sti/bdisp/bdisp-reg.h)0
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c (renamed from drivers/media/platform/sti/bdisp/bdisp-v4l2.c)10
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp.h (renamed from drivers/media/platform/sti/bdisp/bdisp.h)0
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/Kconfig (renamed from drivers/media/platform/sti/c8sectpfe/Kconfig)1
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/Makefile (renamed from drivers/media/platform/sti/c8sectpfe/Makefile)0
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.c (renamed from drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c)0
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.h (renamed from drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.h)0
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c (renamed from drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c)0
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.h (renamed from drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h)0
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.c (renamed from drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c)0
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h (renamed from drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.h)0
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.c (renamed from drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c)0
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.h (renamed from drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.h)0
-rw-r--r--drivers/media/platform/st/sti/delta/Kconfig36
-rw-r--r--drivers/media/platform/st/sti/delta/Makefile (renamed from drivers/media/platform/sti/delta/Makefile)0
-rw-r--r--drivers/media/platform/st/sti/delta/delta-cfg.h (renamed from drivers/media/platform/sti/delta/delta-cfg.h)0
-rw-r--r--drivers/media/platform/st/sti/delta/delta-debug.c (renamed from drivers/media/platform/sti/delta/delta-debug.c)0
-rw-r--r--drivers/media/platform/st/sti/delta/delta-debug.h (renamed from drivers/media/platform/sti/delta/delta-debug.h)0
-rw-r--r--drivers/media/platform/st/sti/delta/delta-ipc.c (renamed from drivers/media/platform/sti/delta/delta-ipc.c)0
-rw-r--r--drivers/media/platform/st/sti/delta/delta-ipc.h (renamed from drivers/media/platform/sti/delta/delta-ipc.h)0
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mem.c (renamed from drivers/media/platform/sti/delta/delta-mem.c)0
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mem.h (renamed from drivers/media/platform/sti/delta/delta-mem.h)0
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c (renamed from drivers/media/platform/sti/delta/delta-mjpeg-dec.c)0
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mjpeg-fw.h (renamed from drivers/media/platform/sti/delta/delta-mjpeg-fw.h)0
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mjpeg-hdr.c (renamed from drivers/media/platform/sti/delta/delta-mjpeg-hdr.c)0
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mjpeg.h (renamed from drivers/media/platform/sti/delta/delta-mjpeg.h)0
-rw-r--r--drivers/media/platform/st/sti/delta/delta-v4l2.c (renamed from drivers/media/platform/sti/delta/delta-v4l2.c)0
-rw-r--r--drivers/media/platform/st/sti/delta/delta.h (renamed from drivers/media/platform/sti/delta/delta.h)0
-rw-r--r--drivers/media/platform/st/sti/hva/Kconfig26
-rw-r--r--drivers/media/platform/st/sti/hva/Makefile (renamed from drivers/media/platform/sti/hva/Makefile)0
-rw-r--r--drivers/media/platform/st/sti/hva/hva-debugfs.c (renamed from drivers/media/platform/sti/hva/hva-debugfs.c)0
-rw-r--r--drivers/media/platform/st/sti/hva/hva-h264.c (renamed from drivers/media/platform/sti/hva/hva-h264.c)0
-rw-r--r--drivers/media/platform/st/sti/hva/hva-hw.c (renamed from drivers/media/platform/sti/hva/hva-hw.c)0
-rw-r--r--drivers/media/platform/st/sti/hva/hva-hw.h (renamed from drivers/media/platform/sti/hva/hva-hw.h)0
-rw-r--r--drivers/media/platform/st/sti/hva/hva-mem.c (renamed from drivers/media/platform/sti/hva/hva-mem.c)0
-rw-r--r--drivers/media/platform/st/sti/hva/hva-mem.h (renamed from drivers/media/platform/sti/hva/hva-mem.h)0
-rw-r--r--drivers/media/platform/st/sti/hva/hva-v4l2.c (renamed from drivers/media/platform/sti/hva/hva-v4l2.c)0
-rw-r--r--drivers/media/platform/st/sti/hva/hva.h (renamed from drivers/media/platform/sti/hva/hva.h)0
-rw-r--r--drivers/media/platform/st/stm32/Kconfig31
-rw-r--r--drivers/media/platform/st/stm32/Makefile (renamed from drivers/media/platform/stm32/Makefile)0
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d-hw.c (renamed from drivers/media/platform/stm32/dma2d/dma2d-hw.c)0
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d-regs.h (renamed from drivers/media/platform/stm32/dma2d/dma2d-regs.h)0
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d.c (renamed from drivers/media/platform/stm32/dma2d/dma2d.c)9
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d.h (renamed from drivers/media/platform/stm32/dma2d/dma2d.h)0
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c (renamed from drivers/media/platform/stm32/stm32-dcmi.c)53
-rw-r--r--drivers/media/platform/sunxi/Kconfig4
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/Kconfig3
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h2
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c2
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/Kconfig3
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c6
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/Kconfig14
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/Kconfig14
-rw-r--r--drivers/media/platform/ti/Kconfig69
-rw-r--r--drivers/media/platform/ti/Makefile7
-rw-r--r--drivers/media/platform/ti/am437x/Kconfig (renamed from drivers/media/platform/am437x/Kconfig)3
-rw-r--r--drivers/media/platform/ti/am437x/Makefile (renamed from drivers/media/platform/am437x/Makefile)0
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe.c (renamed from drivers/media/platform/am437x/am437x-vpfe.c)0
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe.h (renamed from drivers/media/platform/am437x/am437x-vpfe.h)0
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe_regs.h (renamed from drivers/media/platform/am437x/am437x-vpfe_regs.h)0
-rw-r--r--drivers/media/platform/ti/cal/Makefile3
-rw-r--r--drivers/media/platform/ti/cal/cal-camerarx.c (renamed from drivers/media/platform/ti-vpe/cal-camerarx.c)6
-rw-r--r--drivers/media/platform/ti/cal/cal-video.c (renamed from drivers/media/platform/ti-vpe/cal-video.c)3
-rw-r--r--drivers/media/platform/ti/cal/cal.c (renamed from drivers/media/platform/ti-vpe/cal.c)0
-rw-r--r--drivers/media/platform/ti/cal/cal.h (renamed from drivers/media/platform/ti-vpe/cal.h)0
-rw-r--r--drivers/media/platform/ti/cal/cal_regs.h (renamed from drivers/media/platform/ti-vpe/cal_regs.h)0
-rw-r--r--drivers/media/platform/ti/davinci/Kconfig (renamed from drivers/media/platform/davinci/Kconfig)18
-rw-r--r--drivers/media/platform/ti/davinci/Makefile (renamed from drivers/media/platform/davinci/Makefile)0
-rw-r--r--drivers/media/platform/ti/davinci/ccdc_hw_device.h (renamed from drivers/media/platform/davinci/ccdc_hw_device.h)0
-rw-r--r--drivers/media/platform/ti/davinci/dm355_ccdc.c (renamed from drivers/media/platform/davinci/dm355_ccdc.c)0
-rw-r--r--drivers/media/platform/ti/davinci/dm355_ccdc_regs.h (renamed from drivers/media/platform/davinci/dm355_ccdc_regs.h)0
-rw-r--r--drivers/media/platform/ti/davinci/dm644x_ccdc.c (renamed from drivers/media/platform/davinci/dm644x_ccdc.c)0
-rw-r--r--drivers/media/platform/ti/davinci/dm644x_ccdc_regs.h (renamed from drivers/media/platform/davinci/dm644x_ccdc_regs.h)0
-rw-r--r--drivers/media/platform/ti/davinci/isif.c (renamed from drivers/media/platform/davinci/isif.c)0
-rw-r--r--drivers/media/platform/ti/davinci/isif_regs.h (renamed from drivers/media/platform/davinci/isif_regs.h)0
-rw-r--r--drivers/media/platform/ti/davinci/vpbe.c (renamed from drivers/media/platform/davinci/vpbe.c)0
-rw-r--r--drivers/media/platform/ti/davinci/vpbe_display.c (renamed from drivers/media/platform/davinci/vpbe_display.c)0
-rw-r--r--drivers/media/platform/ti/davinci/vpbe_osd.c (renamed from drivers/media/platform/davinci/vpbe_osd.c)0
-rw-r--r--drivers/media/platform/ti/davinci/vpbe_osd_regs.h (renamed from drivers/media/platform/davinci/vpbe_osd_regs.h)0
-rw-r--r--drivers/media/platform/ti/davinci/vpbe_venc.c (renamed from drivers/media/platform/davinci/vpbe_venc.c)0
-rw-r--r--drivers/media/platform/ti/davinci/vpbe_venc_regs.h (renamed from drivers/media/platform/davinci/vpbe_venc_regs.h)0
-rw-r--r--drivers/media/platform/ti/davinci/vpfe_capture.c (renamed from drivers/media/platform/davinci/vpfe_capture.c)0
-rw-r--r--drivers/media/platform/ti/davinci/vpif.c (renamed from drivers/media/platform/davinci/vpif.c)123
-rw-r--r--drivers/media/platform/ti/davinci/vpif.h (renamed from drivers/media/platform/davinci/vpif.h)0
-rw-r--r--drivers/media/platform/ti/davinci/vpif_capture.c (renamed from drivers/media/platform/davinci/vpif_capture.c)27
-rw-r--r--drivers/media/platform/ti/davinci/vpif_capture.h (renamed from drivers/media/platform/davinci/vpif_capture.h)0
-rw-r--r--drivers/media/platform/ti/davinci/vpif_display.c (renamed from drivers/media/platform/davinci/vpif_display.c)24
-rw-r--r--drivers/media/platform/ti/davinci/vpif_display.h (renamed from drivers/media/platform/davinci/vpif_display.h)0
-rw-r--r--drivers/media/platform/ti/davinci/vpss.c (renamed from drivers/media/platform/davinci/vpss.c)0
-rw-r--r--drivers/media/platform/ti/omap/Kconfig (renamed from drivers/media/platform/omap/Kconfig)3
-rw-r--r--drivers/media/platform/ti/omap/Makefile (renamed from drivers/media/platform/omap/Makefile)0
-rw-r--r--drivers/media/platform/ti/omap/omap_vout.c (renamed from drivers/media/platform/omap/omap_vout.c)0
-rw-r--r--drivers/media/platform/ti/omap/omap_vout_vrfb.c (renamed from drivers/media/platform/omap/omap_vout_vrfb.c)0
-rw-r--r--drivers/media/platform/ti/omap/omap_vout_vrfb.h (renamed from drivers/media/platform/omap/omap_vout_vrfb.h)0
-rw-r--r--drivers/media/platform/ti/omap/omap_voutdef.h (renamed from drivers/media/platform/omap/omap_voutdef.h)0
-rw-r--r--drivers/media/platform/ti/omap/omap_voutlib.c (renamed from drivers/media/platform/omap/omap_voutlib.c)0
-rw-r--r--drivers/media/platform/ti/omap/omap_voutlib.h (renamed from drivers/media/platform/omap/omap_voutlib.h)0
-rw-r--r--drivers/media/platform/ti/omap3isp/Kconfig21
-rw-r--r--drivers/media/platform/ti/omap3isp/Makefile (renamed from drivers/media/platform/omap3isp/Makefile)0
-rw-r--r--drivers/media/platform/ti/omap3isp/cfa_coef_table.h (renamed from drivers/media/platform/omap3isp/cfa_coef_table.h)0
-rw-r--r--drivers/media/platform/ti/omap3isp/gamma_table.h (renamed from drivers/media/platform/omap3isp/gamma_table.h)0
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.c (renamed from drivers/media/platform/omap3isp/isp.c)0
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.h (renamed from drivers/media/platform/omap3isp/isp.h)0
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccdc.c (renamed from drivers/media/platform/omap3isp/ispccdc.c)0
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccdc.h (renamed from drivers/media/platform/omap3isp/ispccdc.h)0
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccp2.c (renamed from drivers/media/platform/omap3isp/ispccp2.c)0
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccp2.h (renamed from drivers/media/platform/omap3isp/ispccp2.h)0
-rw-r--r--drivers/media/platform/ti/omap3isp/ispcsi2.c (renamed from drivers/media/platform/omap3isp/ispcsi2.c)0
-rw-r--r--drivers/media/platform/ti/omap3isp/ispcsi2.h (renamed from drivers/media/platform/omap3isp/ispcsi2.h)0
-rw-r--r--drivers/media/platform/ti/omap3isp/ispcsiphy.c (renamed from drivers/media/platform/omap3isp/ispcsiphy.c)0
-rw-r--r--drivers/media/platform/ti/omap3isp/ispcsiphy.h (renamed from drivers/media/platform/omap3isp/ispcsiphy.h)0
-rw-r--r--drivers/media/platform/ti/omap3isp/isph3a.h (renamed from drivers/media/platform/omap3isp/isph3a.h)0
-rw-r--r--drivers/media/platform/ti/omap3isp/isph3a_aewb.c (renamed from drivers/media/platform/omap3isp/isph3a_aewb.c)0
-rw-r--r--drivers/media/platform/ti/omap3isp/isph3a_af.c (renamed from drivers/media/platform/omap3isp/isph3a_af.c)0
-rw-r--r--drivers/media/platform/ti/omap3isp/isphist.c (renamed from drivers/media/platform/omap3isp/isphist.c)0
-rw-r--r--drivers/media/platform/ti/omap3isp/isphist.h (renamed from drivers/media/platform/omap3isp/isphist.h)0
-rw-r--r--drivers/media/platform/ti/omap3isp/isppreview.c (renamed from drivers/media/platform/omap3isp/isppreview.c)0
-rw-r--r--drivers/media/platform/ti/omap3isp/isppreview.h (renamed from drivers/media/platform/omap3isp/isppreview.h)0
-rw-r--r--drivers/media/platform/ti/omap3isp/ispreg.h (renamed from drivers/media/platform/omap3isp/ispreg.h)0
-rw-r--r--drivers/media/platform/ti/omap3isp/ispresizer.c (renamed from drivers/media/platform/omap3isp/ispresizer.c)0
-rw-r--r--drivers/media/platform/ti/omap3isp/ispresizer.h (renamed from drivers/media/platform/omap3isp/ispresizer.h)0
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.c (renamed from drivers/media/platform/omap3isp/ispstat.c)5
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.h (renamed from drivers/media/platform/omap3isp/ispstat.h)0
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.c (renamed from drivers/media/platform/omap3isp/ispvideo.c)0
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.h (renamed from drivers/media/platform/omap3isp/ispvideo.h)0
-rw-r--r--drivers/media/platform/ti/omap3isp/luma_enhance_table.h (renamed from drivers/media/platform/omap3isp/luma_enhance_table.h)0
-rw-r--r--drivers/media/platform/ti/omap3isp/noise_filter_table.h (renamed from drivers/media/platform/omap3isp/noise_filter_table.h)0
-rw-r--r--drivers/media/platform/ti/omap3isp/omap3isp.h (renamed from drivers/media/platform/omap3isp/omap3isp.h)0
-rw-r--r--drivers/media/platform/ti/vpe/Makefile (renamed from drivers/media/platform/ti-vpe/Makefile)4
-rw-r--r--drivers/media/platform/ti/vpe/csc.c (renamed from drivers/media/platform/ti-vpe/csc.c)0
-rw-r--r--drivers/media/platform/ti/vpe/csc.h (renamed from drivers/media/platform/ti-vpe/csc.h)0
-rw-r--r--drivers/media/platform/ti/vpe/sc.c (renamed from drivers/media/platform/ti-vpe/sc.c)0
-rw-r--r--drivers/media/platform/ti/vpe/sc.h (renamed from drivers/media/platform/ti-vpe/sc.h)0
-rw-r--r--drivers/media/platform/ti/vpe/sc_coeff.h (renamed from drivers/media/platform/ti-vpe/sc_coeff.h)0
-rw-r--r--drivers/media/platform/ti/vpe/vpdma.c (renamed from drivers/media/platform/ti-vpe/vpdma.c)0
-rw-r--r--drivers/media/platform/ti/vpe/vpdma.h (renamed from drivers/media/platform/ti-vpe/vpdma.h)0
-rw-r--r--drivers/media/platform/ti/vpe/vpdma_priv.h (renamed from drivers/media/platform/ti-vpe/vpdma_priv.h)0
-rw-r--r--drivers/media/platform/ti/vpe/vpe.c (renamed from drivers/media/platform/ti-vpe/vpe.c)0
-rw-r--r--drivers/media/platform/ti/vpe/vpe_regs.h (renamed from drivers/media/platform/ti-vpe/vpe_regs.h)0
-rw-r--r--drivers/media/platform/via/Kconfig14
-rw-r--r--drivers/media/platform/via/Makefile2
-rw-r--r--drivers/media/platform/via/via-camera.c (renamed from drivers/media/platform/via-camera.c)0
-rw-r--r--drivers/media/platform/via/via-camera.h (renamed from drivers/media/platform/via-camera.h)0
-rw-r--r--drivers/media/platform/xilinx/Kconfig10
-rw-r--r--drivers/media/platform/xilinx/xilinx-csi2rxss.c106
-rw-r--r--drivers/media/radio/Kconfig348
-rw-r--r--drivers/media/radio/Makefile43
-rw-r--r--drivers/media/radio/radio-sf16fmi.c2
-rw-r--r--drivers/media/radio/si470x/Kconfig2
-rw-r--r--drivers/media/radio/wl128x/Kconfig2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c3
-rw-r--r--drivers/media/rc/Kconfig376
-rw-r--r--drivers/media/rc/Makefile47
-rw-r--r--drivers/media/rc/fintek-cir.c2
-rw-r--r--drivers/media/rc/gpio-ir-tx.c28
-rw-r--r--drivers/media/rc/igorplugusb.c2
-rw-r--r--drivers/media/rc/iguanair.c2
-rw-r--r--drivers/media/rc/ir-hix5hd2.c2
-rw-r--r--drivers/media/rc/ir-imon-decoder.c2
-rw-r--r--drivers/media/rc/ir-jvc-decoder.c2
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c2
-rw-r--r--drivers/media/rc/ir-nec-decoder.c2
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c2
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c2
-rw-r--r--drivers/media/rc/ir-rcmm-decoder.c2
-rw-r--r--drivers/media/rc/ir-sanyo-decoder.c4
-rw-r--r--drivers/media/rc/ir-sharp-decoder.c2
-rw-r--r--drivers/media/rc/ir-sony-decoder.c2
-rw-r--r--drivers/media/rc/ir-xmp-decoder.c2
-rw-r--r--drivers/media/rc/ir_toy.c2
-rw-r--r--drivers/media/rc/ite-cir.c2
-rw-r--r--drivers/media/rc/keymaps/Makefile34
-rw-r--r--drivers/media/rc/lirc_dev.c36
-rw-r--r--drivers/media/rc/meson-ir-tx.c2
-rw-r--r--drivers/media/rc/mtk-cir.c39
-rw-r--r--drivers/media/rc/nuvoton-cir.c2
-rw-r--r--drivers/media/rc/rc-core-priv.h2
-rw-r--r--drivers/media/rc/rc-ir-raw.c2
-rw-r--r--drivers/media/rc/rc-loopback.c6
-rw-r--r--drivers/media/rc/st_rc.c2
-rw-r--r--drivers/media/rc/sunxi-cir.c2
-rw-r--r--drivers/media/rc/winbond-cir.c2
-rw-r--r--drivers/media/spi/Kconfig26
-rw-r--r--drivers/media/spi/Makefile7
-rw-r--r--drivers/media/spi/cxd2880-spi.c4
-rw-r--r--drivers/media/spi/gs1662.c4
-rw-r--r--drivers/media/test-drivers/Kconfig8
-rw-r--r--drivers/media/test-drivers/Makefile14
-rw-r--r--drivers/media/test-drivers/vicodec/Kconfig2
-rw-r--r--drivers/media/test-drivers/vidtv/Kconfig1
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_s302m.c17
-rw-r--r--drivers/media/test-drivers/vimc/Kconfig3
-rw-r--r--drivers/media/test-drivers/vimc/vimc-capture.c9
-rw-r--r--drivers/media/test-drivers/vimc/vimc-common.h7
-rw-r--r--drivers/media/test-drivers/vimc/vimc-core.c10
-rw-r--r--drivers/media/test-drivers/vivid/Kconfig2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.h5
-rw-r--r--drivers/media/test-drivers/vivid/vivid-ctrls.c32
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-cap.c10
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-out.c12
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-touch.c10
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.c15
-rw-r--r--drivers/media/test-drivers/vivid/vivid-touch-cap.c2
-rw-r--r--drivers/media/tuners/Kconfig241
-rw-r--r--drivers/media/tuners/Makefile66
-rw-r--r--drivers/media/tuners/e4000.c6
-rw-r--r--drivers/media/tuners/fc2580.c6
-rw-r--r--drivers/media/tuners/msi001.c3
-rw-r--r--drivers/media/tuners/tuner-types.c2
-rw-r--r--drivers/media/tuners/xc2028-types.h (renamed from drivers/media/tuners/tuner-xc2028-types.h)6
-rw-r--r--drivers/media/tuners/xc2028.c (renamed from drivers/media/tuners/tuner-xc2028.c)6
-rw-r--r--drivers/media/tuners/xc2028.h (renamed from drivers/media/tuners/tuner-xc2028.h)2
-rw-r--r--drivers/media/tuners/xc4000.c2
-rw-r--r--drivers/media/usb/Kconfig33
-rw-r--r--drivers/media/usb/Makefile40
-rw-r--r--drivers/media/usb/airspy/Kconfig2
-rw-r--r--drivers/media/usb/au0828/Kconfig6
-rw-r--r--drivers/media/usb/cpia2/Kconfig2
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig34
-rw-r--r--drivers/media/usb/dvb-usb/Kconfig368
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c2
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c13
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx.h2
-rw-r--r--drivers/media/usb/go7007/s2250-board.c10
-rw-r--r--drivers/media/usb/go7007/snd-go7007.c2
-rw-r--r--drivers/media/usb/gspca/Kconfig121
-rw-r--r--drivers/media/usb/gspca/Makefile88
-rw-r--r--drivers/media/usb/gspca/gl860/Kconfig2
-rw-r--r--drivers/media/usb/gspca/jl2005bcd.c4
-rw-r--r--drivers/media/usb/gspca/m5602/Kconfig2
-rw-r--r--drivers/media/usb/gspca/pac7302.c1
-rw-r--r--drivers/media/usb/hackrf/Kconfig2
-rw-r--r--drivers/media/usb/hdpvr/Kconfig2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c4
-rw-r--r--drivers/media/usb/msi2500/Kconfig2
-rw-r--r--drivers/media/usb/pvrusb2/Kconfig2
-rw-r--r--drivers/media/usb/pwc/Kconfig2
-rw-r--r--drivers/media/usb/pwc/pwc-uncompress.c2
-rw-r--r--drivers/media/usb/s2255/Kconfig2
-rw-r--r--drivers/media/usb/stk1160/stk1160-core.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c16
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c142
-rw-r--r--drivers/media/usb/stk1160/stk1160.h23
-rw-r--r--drivers/media/usb/stkwebcam/Kconfig2
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c7
-rw-r--r--drivers/media/usb/tm6000/tm6000-cards.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-i2c.c2
-rw-r--r--drivers/media/usb/usbtv/Kconfig2
-rw-r--r--drivers/media/usb/uvc/Kconfig2
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c9
-rw-r--r--drivers/media/usb/zr364xx/Kconfig2
-rw-r--r--drivers/media/v4l2-core/Kconfig12
-rw-r--r--drivers/media/v4l2-core/Makefile34
-rw-r--r--drivers/media/v4l2-core/tuner-core.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c20
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c68
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c46
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c14
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c53
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c8
-rw-r--r--drivers/memory/brcmstb_dpfe.c2
-rw-r--r--drivers/memory/emif.c8
-rw-r--r--drivers/memory/fsl_ifc.c9
-rw-r--r--drivers/memory/mtk-smi.c71
-rw-r--r--drivers/memory/of_memory.c29
-rw-r--r--drivers/memory/tegra/Kconfig1
-rw-r--r--drivers/memory/tegra/tegra20-emc.c2
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c2
-rw-r--r--drivers/memory/tegra/tegra30-emc.c131
-rw-r--r--drivers/memstick/core/ms_block.c64
-rw-r--r--drivers/memstick/core/ms_block.h1
-rw-r--r--drivers/memstick/core/mspro_block.c57
-rw-r--r--drivers/mfd/arizona-spi.c4
-rw-r--r--drivers/mfd/da9052-spi.c3
-rw-r--r--drivers/mfd/ezx-pcap.c8
-rw-r--r--drivers/mfd/lpc_ich.c59
-rw-r--r--drivers/mfd/madera-spi.c4
-rw-r--r--drivers/mfd/mc13xxx-spi.c3
-rw-r--r--drivers/mfd/rsmu_spi.c4
-rw-r--r--drivers/mfd/stmpe-spi.c4
-rw-r--r--drivers/mfd/tps65912-spi.c4
-rw-r--r--drivers/misc/ad525x_dpot-spi.c3
-rw-r--r--drivers/misc/bcm-vk/bcm_vk.h2
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c4
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h6
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_packets.h4
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_packets.h4
-rw-r--r--drivers/misc/hi6421v600-irq.c6
-rw-r--r--drivers/misc/lattice-ecp3-config.c4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_spi.c4
-rw-r--r--drivers/misc/sgi-xp/xpnet.c2
-rw-r--r--drivers/mmc/core/block.c2
-rw-r--r--drivers/mmc/core/bus.c9
-rw-r--r--drivers/mmc/core/bus.h3
-rw-r--r--drivers/mmc/core/host.c24
-rw-r--r--drivers/mmc/core/mmc.c39
-rw-r--r--drivers/mmc/core/mmc_ops.c13
-rw-r--r--drivers/mmc/core/mmc_ops.h3
-rw-r--r--drivers/mmc/core/sd.c27
-rw-r--r--drivers/mmc/core/sdio.c5
-rw-r--r--drivers/mmc/core/sdio_bus.c7
-rw-r--r--drivers/mmc/host/Kconfig13
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/davinci_mmc.c12
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c27
-rw-r--r--drivers/mmc/host/dw_mmc.c12
-rw-r--r--drivers/mmc/host/dw_mmc.h2
-rw-r--r--drivers/mmc/host/litex_mmc.c661
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c15
-rw-r--r--drivers/mmc/host/mmc_spi.c3
-rw-r--r--drivers/mmc/host/mtk-sd.c4
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c2
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c29
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c6
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c10
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c133
-rw-r--r--drivers/mmc/host/sdhci-tegra.c15
-rw-r--r--drivers/mmc/host/sdhci_am654.c28
-rw-r--r--drivers/mmc/host/sh_mmcif.c7
-rw-r--r--drivers/mmc/host/sunxi-mmc.c9
-rw-r--r--drivers/mmc/host/tmio_mmc.h4
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c7
-rw-r--r--drivers/mtd/devices/mchp23k256.c4
-rw-r--r--drivers/mtd/devices/mchp48l640.c4
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c4
-rw-r--r--drivers/mtd/devices/sst25l.c4
-rw-r--r--drivers/mtd/mtdswap.c2
-rw-r--r--drivers/mtd/nand/raw/Kconfig3
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c1
-rw-r--r--drivers/mtd/spi-nor/controllers/Kconfig36
-rw-r--r--drivers/mtd/spi-nor/controllers/Makefile3
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi.h21
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/amt.c4
-rw-r--r--drivers/net/arcnet/com20020-pci.c3
-rw-r--r--drivers/net/bareudp.c19
-rw-r--r--drivers/net/bonding/bond_alb.c31
-rw-r--r--drivers/net/bonding/bond_main.c324
-rw-r--r--drivers/net/bonding/bond_netlink.c59
-rw-r--r--drivers/net/bonding/bond_options.c74
-rw-r--r--drivers/net/bonding/bond_procfs.c1
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c8
-rw-r--r--drivers/net/caif/caif_serial.c2
-rw-r--r--drivers/net/can/c_can/c_can_ethtool.c9
-rw-r--r--drivers/net/can/dev/bittiming.c20
-rw-r--r--drivers/net/can/dev/dev.c2
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c4
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c359
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/softing/softing_main.c5
-rw-r--r--drivers/net/can/spi/hi311x.c10
-rw-r--r--drivers/net/can/spi/mcp251x.c8
-rw-r--r--drivers/net/can/spi/mcp251xfd/Makefile2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c4
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c353
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c4
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c143
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c153
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h62
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c24
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c417
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c22
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c6
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h96
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c9
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.h8
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c6
-rw-r--r--drivers/net/can/usb/gs_usb.c456
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c4
-rw-r--r--drivers/net/can/usb/ucan.c4
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/can/vxcan.c21
-rw-r--r--drivers/net/can/xilinx_can.c9
-rw-r--r--drivers/net/dsa/Kconfig12
-rw-r--r--drivers/net/dsa/Makefile3
-rw-r--r--drivers/net/dsa/b53/b53_common.c87
-rw-r--r--drivers/net/dsa/b53/b53_priv.h25
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c19
-rw-r--r--drivers/net/dsa/b53/b53_serdes.h5
-rw-r--r--drivers/net/dsa/b53/b53_spi.c4
-rw-r--r--drivers/net/dsa/b53/b53_srab.c35
-rw-r--r--drivers/net/dsa/bcm_sf2.c54
-rw-r--r--drivers/net/dsa/dsa_loop.c3
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c9
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c2
-rw-r--r--drivers/net/dsa/lan9303-core.c16
-rw-r--r--drivers/net/dsa/lantiq_gswip.c62
-rw-r--r--drivers/net/dsa/microchip/ksz8.h1
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c92
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h4
-rw-r--r--drivers/net/dsa/microchip/ksz8795_spi.c15
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c156
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c1
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h3
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c16
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c21
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h15
-rw-r--r--drivers/net/dsa/mt7530.c21
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c925
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h51
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c94
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h11
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c316
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h3
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c28
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c41
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h16
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c81
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h5
-rw-r--r--drivers/net/dsa/mv88e6xxx/smi.c35
-rw-r--r--drivers/net/dsa/ocelot/felix.c843
-rw-r--r--drivers/net/dsa/ocelot/felix.h9
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c48
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c48
-rw-r--r--drivers/net/dsa/qca/ar9331.c45
-rw-r--r--drivers/net/dsa/qca8k.c1582
-rw-r--r--drivers/net/dsa/qca8k.h54
-rw-r--r--drivers/net/dsa/realtek-smi-core.c523
-rw-r--r--drivers/net/dsa/realtek/Kconfig40
-rw-r--r--drivers/net/dsa/realtek/Makefile6
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c290
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c581
-rw-r--r--drivers/net/dsa/realtek/realtek.h (renamed from drivers/net/dsa/realtek-smi-core.h)91
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c (renamed from drivers/net/dsa/rtl8365mb.c)734
-rw-r--r--drivers/net/dsa/realtek/rtl8366-core.c (renamed from drivers/net/dsa/rtl8366.c)164
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c (renamed from drivers/net/dsa/rtl8366rb.c)460
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c47
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c202
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c16
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-spi.c6
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c32
-rw-r--r--drivers/net/ethernet/3com/typhoon.c24
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c12
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/agere/et131x.c14
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c2
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c5
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c12
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c5
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c6
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c5
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c83
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c44
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c56
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h499
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c152
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c22
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c12
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c7
-rw-r--r--drivers/net/ethernet/cadence/macb.h4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c88
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c59
-rw-r--r--drivers/net/ethernet/cortina/gemini.c8
-rw-r--r--drivers/net/ethernet/davicom/Kconfig31
-rw-r--r--drivers/net/ethernet/davicom/Makefile1
-rw-r--r--drivers/net/ethernet/davicom/dm9051.c1260
-rw-r--r--drivers/net/ethernet/davicom/dm9051.h162
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c2
-rw-r--r--drivers/net/ethernet/dlink/sundance.c60
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/faraday/Kconfig12
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c437
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h32
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c171
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h12
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.c54
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.h5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h38
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c41
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c14
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c150
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c1
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c93
-rw-r--r--drivers/net/ethernet/fungible/Kconfig28
-rw-r--r--drivers/net/ethernet/fungible/Makefile7
-rw-r--r--drivers/net/ethernet/fungible/funcore/Makefile5
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.c843
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.h150
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_hci.h1202
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.c601
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.h175
-rw-r--r--drivers/net/ethernet/fungible/funeth/Kconfig17
-rw-r--r--drivers/net/ethernet/fungible/funeth/Makefile10
-rw-r--r--drivers/net/ethernet/fungible/funeth/fun_port.h97
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth.h171
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_devlink.c40
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_devlink.h13
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c1162
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ktls.c155
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ktls.h30
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c2091
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_rx.c826
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_trace.h117
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_tx.c763
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_txrx.h264
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c6
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c79
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h8
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.h4
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c281
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c48
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c92
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c155
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c57
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c23
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h29
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c498
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_status.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c62
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c258
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile15
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h276
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c102
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c170
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c345
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h46
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c376
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc_int.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c596
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c514
-rw-r--r--drivers/net/ethernet/intel/ice/ice_osdep.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c111
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c2185
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h163
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c994
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c152
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c1029
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h290
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c532
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.h52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c211
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c)3994
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h82
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h346
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.c439
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c707
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c103
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c396
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h28
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c38
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c6
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c22
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c35
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c36
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c207
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h1
-rw-r--r--drivers/net/ethernet/jme.c3
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c14
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c331
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c247
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c131
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c224
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h30
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c117
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c79
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c170
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c30
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c50
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c75
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c48
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c55
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h5
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c124
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.h30
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.c5
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.h3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c87
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.h1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c55
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h6
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c14
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c412
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.c132
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.h44
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c380
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.c231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c372
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c212
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c170
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c223
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c844
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c128
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c180
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c142
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c129
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c253
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c143
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c159
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h76
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c178
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c305
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c6
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c9
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c6
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c380
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c276
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h221
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c566
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h10
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c34
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c148
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h56
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c45
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c9
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c618
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h121
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c85
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c34
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c44
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c26
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h94
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h335
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c42
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c37
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c60
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c685
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c251
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c20
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c4
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana.h15
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c70
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c35
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c662
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h18
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c54
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c13
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mrp.c64
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c251
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.c41
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.h5
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c66
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile8
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c58
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h7
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h49
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c470
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c1350
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h106
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/rings.c275
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/xsk.c408
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c1524
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h129
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/rings.c195
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c58
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h204
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2132
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h87
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c66
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_dp.c442
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_dp.h215
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c51
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c170
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h41
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c32
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c49
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c5
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c5
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c17
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c164
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c206
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c125
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c37
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c67
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c90
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h38
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c47
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c3
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c94
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c71
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c15
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c8
-rw-r--r--drivers/net/ethernet/sfc/ef10.c26
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c9
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c63
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c2
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h5
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c18
-rw-r--r--drivers/net/ethernet/sfc/rx_common.h6
-rw-r--r--drivers/net/ethernet/sfc/siena.c8
-rw-r--r--drivers/net/ethernet/socionext/netsec.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c388
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c188
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c8
-rw-r--r--drivers/net/ethernet/sun/cassini.c23
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunhme.c6
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c56
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c228
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h5
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c4
-rw-r--r--drivers/net/ethernet/ti/cpts.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c25
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c6
-rw-r--r--drivers/net/ethernet/wiznet/w5100-spi.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c7
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h20
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c608
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c6
-rw-r--r--drivers/net/ethernet/xscale/Kconfig4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c85
-rw-r--r--drivers/net/ethernet/xscale/ptp_ixp46x.c1
-rw-r--r--drivers/net/fjes/fjes_main.c5
-rw-r--r--drivers/net/geneve.c89
-rw-r--r--drivers/net/gtp.c567
-rw-r--r--drivers/net/hamradio/baycom_epp.c4
-rw-r--r--drivers/net/hamradio/dmascc.c7
-rw-r--r--drivers/net/hyperv/netvsc.c25
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/ieee802154/adf7242.c4
-rw-r--r--drivers/net/ieee802154/at86rf230.c4
-rw-r--r--drivers/net/ieee802154/atusb.c186
-rw-r--r--drivers/net/ieee802154/ca8210.c6
-rw-r--r--drivers/net/ieee802154/cc2520.c4
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c2
-rw-r--r--drivers/net/ieee802154/mcr20a.c4
-rw-r--r--drivers/net/ieee802154/mrf24j40.c4
-rw-r--r--drivers/net/ipa/Kconfig2
-rw-r--r--drivers/net/ipa/gsi_trans.c11
-rw-r--r--drivers/net/ipa/gsi_trans.h10
-rw-r--r--drivers/net/ipa/ipa_data-v3.1.c2
-rw-r--r--drivers/net/ipa/ipa_data-v3.5.1.c2
-rw-r--r--drivers/net/ipa/ipa_data-v4.11.c2
-rw-r--r--drivers/net/ipa/ipa_data-v4.2.c2
-rw-r--r--drivers/net/ipa/ipa_data-v4.5.c2
-rw-r--r--drivers/net/ipa/ipa_data-v4.9.c2
-rw-r--r--drivers/net/ipa/ipa_data.h2
-rw-r--r--drivers/net/ipa/ipa_endpoint.c217
-rw-r--r--drivers/net/ipa/ipa_endpoint.h8
-rw-r--r--drivers/net/ipa/ipa_power.c178
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c2
-rw-r--r--drivers/net/loopback.c6
-rw-r--r--drivers/net/macsec.c6
-rw-r--r--drivers/net/macvlan.c22
-rw-r--r--drivers/net/macvtap.c6
-rw-r--r--drivers/net/mctp/Kconfig12
-rw-r--r--drivers/net/mctp/Makefile1
-rw-r--r--drivers/net/mctp/mctp-i2c.c1082
-rw-r--r--drivers/net/mctp/mctp-serial.c2
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c76
-rw-r--r--drivers/net/mdio/mdio-mux.c4
-rw-r--r--drivers/net/mdio/mdio-xgene.c3
-rw-r--r--drivers/net/mhi_net.c2
-rw-r--r--drivers/net/net_failover.c2
-rw-r--r--drivers/net/netdevsim/Makefile2
-rw-r--r--drivers/net/netdevsim/dev.c102
-rw-r--r--drivers/net/netdevsim/hwstats.c486
-rw-r--r--drivers/net/netdevsim/netdevsim.h25
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/pcs/pcs-xpcs.c41
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/aquantia_main.c4
-rw-r--r--drivers/net/phy/at803x.c146
-rw-r--r--drivers/net/phy/dp83640.c19
-rw-r--r--drivers/net/phy/dp83822.c2
-rw-r--r--drivers/net/phy/marvell.c8
-rw-r--r--drivers/net/phy/meson-gxl.c31
-rw-r--r--drivers/net/phy/micrel.c1103
-rw-r--r--drivers/net/phy/microchip_t1.c359
-rw-r--r--drivers/net/phy/mscc/mscc_main.c3
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c2
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c2
-rw-r--r--drivers/net/phy/phy-core.c22
-rw-r--r--drivers/net/phy/phy_device.c19
-rw-r--r--drivers/net/phy/phylink.c90
-rw-r--r--drivers/net/phy/sfp-bus.c6
-rw-r--r--drivers/net/phy/sfp.c48
-rw-r--r--drivers/net/phy/spi_ks8995.c4
-rw-r--r--drivers/net/plip/plip.c2
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/sb1000.c2
-rw-r--r--drivers/net/slip/slip.c2
-rw-r--r--drivers/net/tap.c38
-rw-r--r--drivers/net/team/team.c5
-rw-r--r--drivers/net/tun.c102
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/asix.h10
-rw-r--r--drivers/net/usb/asix_common.c81
-rw-r--r--drivers/net/usb/asix_devices.c104
-rw-r--r--drivers/net/usb/cdc_mbim.c1
-rw-r--r--drivers/net/usb/gl620a.c2
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/lan78xx.c7
-rw-r--r--drivers/net/usb/smsc95xx.c53
-rw-r--r--drivers/net/veth.c194
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--drivers/net/vrf.c9
-rw-r--r--drivers/net/vxlan/Makefile7
-rw-r--r--drivers/net/vxlan/vxlan_core.c (renamed from drivers/net/vxlan.c)495
-rw-r--r--drivers/net/vxlan/vxlan_multicast.c272
-rw-r--r--drivers/net/vxlan/vxlan_private.h162
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c999
-rw-r--r--drivers/net/wan/Kconfig3
-rw-r--r--drivers/net/wan/ixp4xx_hss.c39
-rw-r--r--drivers/net/wan/lmc/lmc_main.c3
-rw-r--r--drivers/net/wan/slic_ds26522.c3
-rw-r--r--drivers/net/wireguard/device.c38
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c153
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h296
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c331
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c36
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c15
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h27
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h40
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c17
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c35
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c7
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h12
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c19
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c515
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h180
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h13
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c357
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c35
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c471
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h143
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c23
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c116
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c10
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c40
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c25
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c300
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h132
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c23
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c22
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h38
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c72
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c61
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h2
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c6
-rw-r--r--drivers/net/wireless/ath/regd.c10
-rw-r--r--drivers/net/wireless/ath/spectral_common.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c107
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c36
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h14
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c35
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c34
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c78
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/cisco/airo.c2
-rw-r--r--drivers/net/wireless/intel/Makefile1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c229
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c331
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c181
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/net.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c405
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c362
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rfi.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c294
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c313
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c112
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.h21
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c410
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/rx.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c63
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c194
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c236
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c91
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h76
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c422
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h118
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Kconfig13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c225
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c466
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c188
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h54
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c310
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c777
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c152
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c1533
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h63
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c691
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h130
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c259
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h893
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c1212
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c106
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Kconfig11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c65
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/dma.c121
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c70
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c209
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c70
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c313
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h63
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c126
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h64
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c89
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c40
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c306
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c252
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c125
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c9
-rw-r--r--drivers/net/wireless/ray_cs.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c298
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h5
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c59
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h9
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c127
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h52
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c5
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c47
-rw-r--r--drivers/net/wireless/realtek/rtw88/sar.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig4
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile13
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c40
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h5
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c41
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c679
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h291
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c93
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.c160
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c686
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h491
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c646
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h84
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c147
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c361
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h81
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c521
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h75
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h217
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c79
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c86
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c2744
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h49
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c46
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c529
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.h76
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c43
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h3
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c4
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c3
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c4
-rw-r--r--drivers/net/wireless/zydas/zd1201.c3
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_debugfs.c5
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c54
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h7
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.c6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.h6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.c21
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.h133
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.c742
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.h142
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.h1
-rw-r--r--drivers/net/wwan/qcom_bam_dmux.c2
-rw-r--r--drivers/net/wwan/wwan_core.c36
-rw-r--r--drivers/net/xen-netfront.c93
-rw-r--r--drivers/nfc/nfcmrvl/spi.c3
-rw-r--r--drivers/nfc/port100.c2
-rw-r--r--drivers/nfc/st-nci/spi.c4
-rw-r--r--drivers/nfc/st-nci/vendor_cmds.c2
-rw-r--r--drivers/nfc/st21nfca/i2c.c3
-rw-r--r--drivers/nfc/st21nfca/vendor_cmds.c4
-rw-r--r--drivers/nfc/st95hf/core.c4
-rw-r--r--drivers/nfc/trf7970a.c4
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.c17
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.h16
-rw-r--r--drivers/ntb/msi.c6
-rw-r--r--drivers/nvdimm/blk.c8
-rw-r--r--drivers/nvdimm/btt.c11
-rw-r--r--drivers/nvdimm/btt_devs.c1
-rw-r--r--drivers/nvdimm/bus.c1
-rw-r--r--drivers/nvdimm/nd_virtio.c6
-rw-r--r--drivers/nvdimm/pfn_devs.c1
-rw-r--r--drivers/nvdimm/pmem.h1
-rw-r--r--drivers/nvme/host/Kconfig8
-rw-r--r--drivers/nvme/host/Makefile2
-rw-r--r--drivers/nvme/host/constants.c185
-rw-r--r--drivers/nvme/host/core.c268
-rw-r--r--drivers/nvme/host/fabrics.c9
-rw-r--r--drivers/nvme/host/fc.c22
-rw-r--r--drivers/nvme/host/ioctl.c38
-rw-r--r--drivers/nvme/host/multipath.c32
-rw-r--r--drivers/nvme/host/nvme.h47
-rw-r--r--drivers/nvme/host/pci.c18
-rw-r--r--drivers/nvme/host/rdma.c117
-rw-r--r--drivers/nvme/host/tcp.c51
-rw-r--r--drivers/nvme/target/admin-cmd.c6
-rw-r--r--drivers/nvme/target/configfs.c66
-rw-r--r--drivers/nvme/target/core.c12
-rw-r--r--drivers/nvme/target/fc.c16
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c27
-rw-r--r--drivers/nvme/target/io-cmd-file.c17
-rw-r--r--drivers/nvme/target/loop.c6
-rw-r--r--drivers/nvme/target/nvmet.h4
-rw-r--r--drivers/nvme/target/passthru.c10
-rw-r--r--drivers/nvme/target/rdma.c8
-rw-r--r--drivers/nvme/target/tcp.c6
-rw-r--r--drivers/nvme/target/zns.c20
-rw-r--r--drivers/of/of_reserved_mem.c9
-rw-r--r--drivers/pci/controller/pcie-apple.c2
-rw-r--r--drivers/pci/iov.c43
-rw-r--r--drivers/pci/pci-driver.c35
-rw-r--r--drivers/pcmcia/Kconfig2
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/pxa2xx_stargate2.c137
-rw-r--r--drivers/perf/Kconfig16
-rw-r--r--drivers/perf/Makefile2
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c584
-rw-r--r--drivers/perf/arm-cci.c2
-rw-r--r--drivers/perf/arm-ccn.c10
-rw-r--r--drivers/perf/arm-cmn.c23
-rw-r--r--drivers/perf/arm_pmu.c6
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c2
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c758
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c2
-rw-r--r--drivers/perf/thunderx2_pmu.c6
-rw-r--r--drivers/perf/xgene_pmu.c8
-rw-r--r--drivers/phy/freescale/Kconfig10
-rw-r--r--drivers/phy/freescale/Makefile1
-rw-r--r--drivers/phy/freescale/phy-fsl-lynx-28g.c623
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c25
-rw-r--r--drivers/pinctrl/pinctrl-starfive.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c87
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c15
-rw-r--r--drivers/platform/chrome/cros_ec.c4
-rw-r--r--drivers/platform/chrome/cros_ec.h2
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c4
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c4
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c4
-rw-r--r--drivers/platform/olpc/olpc-xo175-ec.c4
-rw-r--r--drivers/platform/x86/Kconfig12
-rw-r--r--drivers/platform/x86/Makefile2
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c174
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c348
-rw-r--r--drivers/pnp/driver.c2
-rw-r--r--drivers/pnp/pnpacpi/core.c4
-rw-r--r--drivers/powercap/Kconfig8
-rw-r--r--drivers/powercap/Makefile1
-rw-r--r--drivers/powercap/dtpm.c333
-rw-r--r--drivers/powercap/dtpm_cpu.c55
-rw-r--r--drivers/powercap/dtpm_devfreq.c203
-rw-r--r--drivers/powercap/dtpm_subsys.h22
-rw-r--r--drivers/ptp/ptp_clock.c11
-rw-r--r--drivers/ptp/ptp_idt82p33.c344
-rw-r--r--drivers/ptp/ptp_idt82p33.h151
-rw-r--r--drivers/ptp/ptp_ocp.c1713
-rw-r--r--drivers/ptp/ptp_pch.c195
-rw-r--r--drivers/ptp/ptp_sysfs.c4
-rw-r--r--drivers/ptp/ptp_vclock.c56
-rw-r--r--drivers/regulator/Kconfig20
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/max8973-regulator.c2
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c37
-rw-r--r--drivers/regulator/qcom_smd-regulator.c4
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c291
-rw-r--r--drivers/regulator/rt5190a-regulator.c513
-rw-r--r--drivers/regulator/sc2731-regulator.c2
-rw-r--r--drivers/regulator/ti-abb-regulator.c6
-rw-r--r--drivers/regulator/tps6286x-regulator.c159
-rw-r--r--drivers/regulator/vctrl-regulator.c5
-rw-r--r--drivers/regulator/virtual.c41
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c7
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c36
-rw-r--r--drivers/rtc/rtc-ds1302.c3
-rw-r--r--drivers/rtc/rtc-ds1305.c4
-rw-r--r--drivers/rtc/rtc-ds1343.c4
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/block/scm_blk.c1
-rw-r--r--drivers/s390/block/scm_blk.h1
-rw-r--r--drivers/s390/net/ctcm_fsms.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/lcs.c8
-rw-r--r--drivers/s390/net/netiucv.c6
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c1
-rw-r--r--drivers/scsi/dpt/dpti_i2o.h2
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.h20
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c13
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_edif_bsg.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h2
-rw-r--r--drivers/scsi/scsi_debug.c1
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsicam.c1
-rw-r--r--drivers/scsi/sd.c115
-rw-r--r--drivers/scsi/sd.h12
-rw-r--r--drivers/scsi/sg.c5
-rw-r--r--drivers/scsi/sr.c131
-rw-r--r--drivers/scsi/sr.h6
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/ufs/ufshpb.c4
-rw-r--r--drivers/scsi/xen-scsifront.c3
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/amlogic/meson-secure-pwrc.c22
-rw-r--r--drivers/soc/atmel/soc.c3
-rw-r--r--drivers/soc/atmel/soc.h1
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c8
-rw-r--r--drivers/soc/fsl/guts.c14
-rw-r--r--drivers/soc/fsl/qe/qe.c4
-rw-r--r--drivers/soc/fsl/qe/qe_io.c2
-rw-r--r--drivers/soc/imx/gpcv2.c3
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c66
-rw-r--r--drivers/soc/imx/soc-imx.c3
-rw-r--r--drivers/soc/ixp4xx/Kconfig1
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c33
-rw-r--r--drivers/soc/mediatek/mt8167-pm-domains.h16
-rw-r--r--drivers/soc/mediatek/mt8173-pm-domains.h22
-rw-r--r--drivers/soc/mediatek/mt8183-mmsys.h2
-rw-r--r--drivers/soc/mediatek/mt8183-pm-domains.h32
-rw-r--r--drivers/soc/mediatek/mt8186-mmsys.h115
-rw-r--r--drivers/soc/mediatek/mt8186-pm-domains.h344
-rw-r--r--drivers/soc/mediatek/mt8192-mmsys.h3
-rw-r--r--drivers/soc/mediatek/mt8192-pm-domains.h44
-rw-r--r--drivers/soc/mediatek/mt8195-pm-domains.h613
-rw-r--r--drivers/soc/mediatek/mtk-infracfg.c19
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c18
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.h3
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c45
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.c17
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.h8
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c71
-rw-r--r--drivers/soc/microchip/Kconfig10
-rw-r--r--drivers/soc/microchip/Makefile1
-rw-r--r--drivers/soc/microchip/mpfs-sys-controller.c196
-rw-r--r--drivers/soc/qcom/apr.c1
-rw-r--r--drivers/soc/qcom/llcc-qcom.c107
-rw-r--r--drivers/soc/qcom/mdt_loader.c232
-rw-r--r--drivers/soc/qcom/ocmem.c1
-rw-r--r--drivers/soc/qcom/qcom_aoss.c8
-rw-r--r--drivers/soc/qcom/rpmpd.c20
-rw-r--r--drivers/soc/qcom/socinfo.c12
-rw-r--r--drivers/soc/renesas/Kconfig12
-rw-r--r--drivers/soc/renesas/renesas-soc.c68
-rw-r--r--drivers/soc/rockchip/Kconfig8
-rw-r--r--drivers/soc/rockchip/Makefile1
-rw-r--r--drivers/soc/rockchip/dtpm.c65
-rw-r--r--drivers/soc/samsung/exynos-chipid.c2
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c24
-rw-r--r--drivers/soc/tegra/pmc.c16
-rw-r--r--drivers/soc/ti/k3-ringacc.c15
-rw-r--r--drivers/soc/ti/k3-socinfo.c1
-rw-r--r--drivers/soc/ti/smartreflex.c13
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c4
-rw-r--r--drivers/soundwire/dmi-quirks.c2
-rw-r--r--drivers/spi/Kconfig50
-rw-r--r--drivers/spi/Makefile4
-rw-r--r--drivers/spi/spi-amd.c87
-rw-r--r--drivers/spi/spi-ath79.c34
-rw-r--r--drivers/spi/spi-bcm2835aux.c21
-rw-r--r--drivers/spi/spi-bitbang-txrx.h66
-rw-r--r--drivers/spi/spi-cadence-xspi.c4
-rw-r--r--drivers/spi/spi-fsi.c10
-rw-r--r--drivers/spi/spi-geni-qcom.c7
-rw-r--r--drivers/spi/spi-gpio.c42
-rw-r--r--drivers/spi/spi-intel-pci.c (renamed from drivers/mtd/spi-nor/controllers/intel-spi-pci.c)50
-rw-r--r--drivers/spi/spi-intel-platform.c (renamed from drivers/mtd/spi-nor/controllers/intel-spi-platform.c)21
-rw-r--r--drivers/spi/spi-intel.c (renamed from drivers/mtd/spi-nor/controllers/intel-spi.c)850
-rw-r--r--drivers/spi/spi-intel.h19
-rw-r--r--drivers/spi/spi-lantiq-ssc.c8
-rw-r--r--drivers/spi/spi-mem.c6
-rw-r--r--drivers/spi/spi-mpc512x-psc.c47
-rw-r--r--drivers/spi/spi-mt65xx.c134
-rw-r--r--drivers/spi/spi-mtk-nor.c71
-rw-r--r--drivers/spi/spi-npcm-fiu.c14
-rw-r--r--drivers/spi/spi-pic32.c9
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c320
-rw-r--r--drivers/spi/spi-pxa2xx.c68
-rw-r--r--drivers/spi/spi-qup.c5
-rw-r--r--drivers/spi/spi-rockchip-sfc.c4
-rw-r--r--drivers/spi/spi-rockchip.c122
-rw-r--r--drivers/spi/spi-s3c24xx.c47
-rw-r--r--drivers/spi/spi-s3c64xx.c80
-rw-r--r--drivers/spi/spi-slave-system-control.c3
-rw-r--r--drivers/spi/spi-slave-time.c3
-rw-r--r--drivers/spi/spi-st-ssc4.c31
-rw-r--r--drivers/spi/spi-stm32.c2
-rw-r--r--drivers/spi/spi-sun4i.c2
-rw-r--r--drivers/spi/spi-sunplus-sp7021.c584
-rw-r--r--drivers/spi/spi-tegra114.c4
-rw-r--r--drivers/spi/spi-tegra20-slink.c8
-rw-r--r--drivers/spi/spi-tegra210-quad.c341
-rw-r--r--drivers/spi/spi-tle62x0.c3
-rw-r--r--drivers/spi/spi-topcliff-pch.c15
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c5
-rw-r--r--drivers/spi/spi.c291
-rw-r--r--drivers/spi/spidev.c35
-rw-r--r--drivers/staging/fbtft/fbtft.h92
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c7
-rw-r--r--drivers/staging/greybus/gpio.c5
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/atomisp/Kconfig2
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_acc.c28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c148
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c21
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm.c7
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_acc_types.h5
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_env.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_event_public.h33
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_irq.h77
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h31
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.c4
-rw-r--r--drivers/staging/media/hantro/Kconfig2
-rw-r--r--drivers/staging/media/hantro/TODO7
-rw-r--r--drivers/staging/media/hantro/hantro.h1
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c52
-rw-r--r--drivers/staging/media/hantro/hantro_g2_hevc_dec.c27
-rw-r--r--drivers/staging/media/hantro/hantro_h1_jpeg_enc.c41
-rw-r--r--drivers/staging/media/hantro/hantro_h1_regs.h2
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h13
-rw-r--r--drivers/staging/media/hantro/hantro_jpeg.c86
-rw-r--r--drivers/staging/media/hantro/hantro_jpeg.h2
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c77
-rw-r--r--drivers/staging/media/hantro/imx8m_vpu_hw.c62
-rw-r--r--drivers/staging/media/hantro/rockchip_vpu2_hw_jpeg_enc.c44
-rw-r--r--drivers/staging/media/hantro/rockchip_vpu_hw.c6
-rw-r--r--drivers/staging/media/hantro/sunxi_vpu_hw.c4
-rw-r--r--drivers/staging/media/imx/Kconfig2
-rw-r--r--drivers/staging/media/imx/Makefile1
-rw-r--r--drivers/staging/media/imx/TODO25
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c7
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c25
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c64
-rw-r--r--drivers/staging/media/imx/imx8mq-mipi-csi2.c81
-rw-r--r--drivers/staging/media/ipu3/Kconfig2
-rw-r--r--drivers/staging/media/max96712/Kconfig2
-rw-r--r--drivers/staging/media/max96712/max96712.c2
-rw-r--r--drivers/staging/media/meson/vdec/Kconfig2
-rw-r--r--drivers/staging/media/meson/vdec/esparser.c7
-rw-r--r--drivers/staging/media/meson/vdec/vdec_helpers.c8
-rw-r--r--drivers/staging/media/meson/vdec/vdec_helpers.h4
-rw-r--r--drivers/staging/media/meson/vdec/vdec_platform.c12
-rw-r--r--drivers/staging/media/omap4iss/Kconfig2
-rw-r--r--drivers/staging/media/rkvdec/Kconfig2
-rw-r--r--drivers/staging/media/sunxi/cedrus/Kconfig2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h3
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c4
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h265.c4
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c25
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.h2
-rw-r--r--drivers/staging/media/tegra-vde/Kconfig10
-rw-r--r--drivers/staging/media/tegra-vde/Makefile3
-rw-r--r--drivers/staging/media/tegra-vde/TODO4
-rw-r--r--drivers/staging/media/tegra-vde/uapi.h73
-rw-r--r--drivers/staging/media/tegra-vde/vde.c1358
-rw-r--r--drivers/staging/media/tegra-vde/vde.h125
-rw-r--r--drivers/staging/media/tegra-video/Kconfig2
-rw-r--r--drivers/staging/media/zoran/Kconfig40
-rw-r--r--drivers/staging/media/zoran/Makefile8
-rw-r--r--drivers/staging/media/zoran/videocodec.c68
-rw-r--r--drivers/staging/media/zoran/videocodec.h4
-rw-r--r--drivers/staging/media/zoran/zoran.h18
-rw-r--r--drivers/staging/media/zoran/zoran_card.c398
-rw-r--r--drivers/staging/media/zoran/zoran_device.c15
-rw-r--r--drivers/staging/media/zoran/zoran_device.h2
-rw-r--r--drivers/staging/media/zoran/zoran_driver.c56
-rw-r--r--drivers/staging/media/zoran/zr36016.c25
-rw-r--r--drivers/staging/media/zoran/zr36016.h2
-rw-r--r--drivers/staging/media/zoran/zr36050.c24
-rw-r--r--drivers/staging/media/zoran/zr36050.h2
-rw-r--r--drivers/staging/media/zoran/zr36060.c23
-rw-r--r--drivers/staging/media/zoran/zr36060.h2
-rw-r--r--drivers/staging/most/video/Kconfig2
-rw-r--r--drivers/staging/pi433/pi433_if.c4
-rw-r--r--drivers/staging/r8188eu/include/rtw_cmd.h10
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h8
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c7
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c22
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c16
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_cmd.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h8
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/Kconfig2
-rw-r--r--drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h2
-rw-r--r--drivers/staging/wfx/bus_spi.c3
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c4
-rw-r--r--drivers/target/target_core_iblock.c12
-rw-r--r--drivers/target/target_core_pscsi.c1
-rw-r--r--drivers/tee/amdtee/call.c2
-rw-r--r--drivers/tee/amdtee/shm_pool.c55
-rw-r--r--drivers/tee/optee/Kconfig8
-rw-r--r--drivers/tee/optee/call.c2
-rw-r--r--drivers/tee/optee/core.c21
-rw-r--r--drivers/tee/optee/device.c5
-rw-r--r--drivers/tee/optee/ffa_abi.c67
-rw-r--r--drivers/tee/optee/optee_private.h7
-rw-r--r--drivers/tee/optee/smc_abi.c129
-rw-r--r--drivers/tee/tee_core.c5
-rw-r--r--drivers/tee/tee_private.h15
-rw-r--r--drivers/tee/tee_shm.c320
-rw-r--r--drivers/tee/tee_shm_pool.c162
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c2
-rw-r--r--drivers/thermal/intel/Kconfig14
-rw-r--r--drivers/thermal/intel/Makefile1
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c23
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c153
-rw-r--r--drivers/thermal/intel/intel_hfi.c569
-rw-r--r--drivers/thermal/intel/intel_hfi.h17
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c9
-rw-r--r--drivers/thermal/intel/therm_throt.c22
-rw-r--r--drivers/thermal/qcom/lmh.c62
-rw-r--r--drivers/thermal/qcom/tsens.c5
-rw-r--r--drivers/thermal/tegra/tegra-bpmp-thermal.c13
-rw-r--r--drivers/thermal/thermal_netlink.c58
-rw-r--r--drivers/thermal/thermal_netlink.h14
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c12
-rw-r--r--drivers/tty/serial/max3100.c5
-rw-r--r--drivers/tty/serial/max310x.c3
-rw-r--r--drivers/tty/serial/sc16is7xx.c4
-rw-r--r--drivers/tty/tty_io.c2
-rw-r--r--drivers/usb/class/usbtmc.c13
-rw-r--r--drivers/usb/core/hcd-pci.c4
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c1
-rw-r--r--drivers/usb/gadget/function/rndis.c1
-rw-r--r--drivers/usb/gadget/legacy/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/core.c3
-rw-r--r--drivers/usb/gadget/udc/max3420_udc.c4
-rw-r--r--drivers/usb/host/max3421-hcd.c3
-rw-r--r--drivers/usb/host/xen-hcd.c26
-rw-r--r--drivers/usb/musb/omap2430.c1
-rw-r--r--drivers/usb/typec/port-mapper.c2
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c34
-rw-r--r--drivers/vdpa/vdpa.c2
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c2
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c2
-rw-r--r--drivers/vfio/pci/Kconfig5
-rw-r--r--drivers/vfio/pci/Makefile4
-rw-r--r--drivers/vfio/pci/hisilicon/Kconfig15
-rw-r--r--drivers/vfio/pci/hisilicon/Makefile4
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c1326
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h116
-rw-r--r--drivers/vfio/pci/mlx5/Kconfig10
-rw-r--r--drivers/vfio/pci/mlx5/Makefile4
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c259
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h36
-rw-r--r--drivers/vfio/pci/mlx5/main.c676
-rw-r--r--drivers/vfio/pci/vfio_pci.c1
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c162
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c2
-rw-r--r--drivers/vfio/vfio.c296
-rw-r--r--drivers/vhost/iotlb.c11
-rw-r--r--drivers/vhost/net.c1
-rw-r--r--drivers/vhost/vdpa.c2
-rw-r--r--drivers/vhost/vhost.c11
-rw-r--r--drivers/vhost/vsock.c3
-rw-r--r--drivers/video/backlight/ams369fg06.c3
-rw-r--r--drivers/video/backlight/corgi_lcd.c3
-rw-r--r--drivers/video/backlight/ili922x.c3
-rw-r--r--drivers/video/backlight/l4f00242t03.c3
-rw-r--r--drivers/video/backlight/lm3630a_bl.c1
-rw-r--r--drivers/video/backlight/lms501kf03.c3
-rw-r--r--drivers/video/backlight/ltv350qv.c3
-rw-r--r--drivers/video/backlight/qcom-wled.c1
-rw-r--r--drivers/video/backlight/tdo24m.c3
-rw-r--r--drivers/video/backlight/tosa_lcd.c4
-rw-r--r--drivers/video/backlight/vgg2432a4.c4
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/atafb.c35
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c11
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c10
-rw-r--r--drivers/video/fbdev/aty/mach64_ct.c4
-rw-r--r--drivers/video/fbdev/aty/mach64_gx.c2
-rw-r--r--drivers/video/fbdev/au1100fb.c2
-rw-r--r--drivers/video/fbdev/au1100fb.h2
-rw-r--r--drivers/video/fbdev/au1200fb.c4
-rw-r--r--drivers/video/fbdev/cirrusfb.c16
-rw-r--r--drivers/video/fbdev/controlfb.c2
-rw-r--r--drivers/video/fbdev/core/fb_defio.c9
-rw-r--r--drivers/video/fbdev/core/fbcvt.c53
-rw-r--r--drivers/video/fbdev/core/fbmem.c8
-rw-r--r--drivers/video/fbdev/da8xx-fb.c7
-rw-r--r--drivers/video/fbdev/imxfb.c2
-rw-r--r--drivers/video/fbdev/kyro/STG4000InitDevice.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c2
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfb_accel.c2
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.c3
-rw-r--r--drivers/video/fbdev/nvidia/nv_i2c.c2
-rw-r--r--drivers/video/fbdev/ocfb.c2
-rw-r--r--drivers/video/fbdev/offb.c2
-rw-r--r--drivers/video/fbdev/omap/lcd_ams_delta.c16
-rw-r--r--drivers/video/fbdev/omap/lcd_mipid.c4
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c13
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c8
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c11
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c11
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c1
-rw-r--r--drivers/video/fbdev/pxa168fb.c15
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c10
-rw-r--r--drivers/video/fbdev/s3c-fb.c20
-rw-r--r--drivers/video/fbdev/savage/savagefb.h1
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c7
-rw-r--r--drivers/video/fbdev/sis/sis_main.c2
-rw-r--r--drivers/video/fbdev/sm712fb.c46
-rw-r--r--drivers/video/fbdev/smscufx.c3
-rw-r--r--drivers/video/fbdev/ssd1307fb.c7
-rw-r--r--drivers/video/fbdev/stifb.c45
-rw-r--r--drivers/video/fbdev/udlfb.c8
-rw-r--r--drivers/video/fbdev/via/lcd.c2
-rw-r--r--drivers/video/fbdev/via/viafbdev.c10
-rw-r--r--drivers/video/fbdev/w100fb.c15
-rw-r--r--drivers/virt/Kconfig11
-rw-r--r--drivers/virt/Makefile1
-rw-r--r--drivers/virt/vmgenid.c100
-rw-r--r--drivers/virtio/Kconfig1
-rw-r--r--drivers/virtio/virtio.c56
-rw-r--r--drivers/virtio/virtio_mem.c9
-rw-r--r--drivers/virtio/virtio_vdpa.c2
-rw-r--r--drivers/visorbus/vbuschannel.h2
-rw-r--r--drivers/xen/gntalloc.c25
-rw-r--r--drivers/xen/grant-table.c71
-rw-r--r--drivers/xen/pvcalls-front.c8
-rw-r--r--drivers/xen/xenbus/xenbus_client.c24
2971 files changed, 144412 insertions, 45550 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 273741dedfd2..1e34f846508f 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -302,7 +302,7 @@ config ACPI_IPMI
help
This driver enables the ACPI to access the BMC controller. And it
uses the IPMI request/response message to communicate with BMC
- controller, which can be found on on the server.
+ controller, which can be found on the server.
To compile this driver as a module, choose M here:
the module will be called as acpi_ipmi.
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index bb757148e7ba..b5a8d3e00a52 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -81,6 +81,9 @@ obj-$(CONFIG_ACPI_AC) += ac.o
obj-$(CONFIG_ACPI_BUTTON) += button.o
obj-$(CONFIG_ACPI_TINY_POWER_BUTTON) += tiny-power-button.o
obj-$(CONFIG_ACPI_FAN) += fan.o
+fan-objs := fan_core.o
+fan-objs += fan_attr.o
+
obj-$(CONFIG_ACPI_VIDEO) += video.o
obj-$(CONFIG_ACPI_TAD) += acpi_tad.o
obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index bcae0f03572b..fbe0756259c5 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -21,6 +21,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/pwm.h>
+#include <linux/pxa2xx_ssp.h>
#include <linux/suspend.h>
#include <linux/delay.h>
@@ -82,7 +83,7 @@ struct lpss_device_desc {
const char *clk_con_id;
unsigned int prv_offset;
size_t prv_size_override;
- struct property_entry *properties;
+ const struct property_entry *properties;
void (*setup)(struct lpss_private_data *pdata);
bool resume_from_noirq;
};
@@ -219,10 +220,16 @@ static void bsw_pwm_setup(struct lpss_private_data *pdata)
pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
}
-static const struct lpss_device_desc lpt_dev_desc = {
+static const struct property_entry lpt_spi_properties[] = {
+ PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_LPT_SSP),
+ { }
+};
+
+static const struct lpss_device_desc lpt_spi_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
| LPSS_SAVE_CTX,
.prv_offset = 0x800,
+ .properties = lpt_spi_properties,
};
static const struct lpss_device_desc lpt_i2c_dev_desc = {
@@ -282,9 +289,15 @@ static const struct lpss_device_desc bsw_uart_dev_desc = {
.properties = uart_properties,
};
+static const struct property_entry byt_spi_properties[] = {
+ PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BYT_SSP),
+ { }
+};
+
static const struct lpss_device_desc byt_spi_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
.prv_offset = 0x400,
+ .properties = byt_spi_properties,
};
static const struct lpss_device_desc byt_sdio_dev_desc = {
@@ -305,11 +318,17 @@ static const struct lpss_device_desc bsw_i2c_dev_desc = {
.resume_from_noirq = true,
};
+static const struct property_entry bsw_spi_properties[] = {
+ PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BSW_SSP),
+ { }
+};
+
static const struct lpss_device_desc bsw_spi_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
| LPSS_NO_D3_DELAY,
.prv_offset = 0x400,
.setup = lpss_deassert_reset,
+ .properties = bsw_spi_properties,
};
static const struct x86_cpu_id lpss_cpu_ids[] = {
@@ -329,8 +348,8 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "INTL9C60", LPSS_ADDR(lpss_dma_desc) },
/* Lynxpoint LPSS devices */
- { "INT33C0", LPSS_ADDR(lpt_dev_desc) },
- { "INT33C1", LPSS_ADDR(lpt_dev_desc) },
+ { "INT33C0", LPSS_ADDR(lpt_spi_dev_desc) },
+ { "INT33C1", LPSS_ADDR(lpt_spi_dev_desc) },
{ "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) },
{ "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) },
{ "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) },
@@ -356,8 +375,8 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
/* Broadwell LPSS devices */
- { "INT3430", LPSS_ADDR(lpt_dev_desc) },
- { "INT3431", LPSS_ADDR(lpt_dev_desc) },
+ { "INT3430", LPSS_ADDR(lpt_spi_dev_desc) },
+ { "INT3431", LPSS_ADDR(lpt_spi_dev_desc) },
{ "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
{ "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) },
{ "INT3434", LPSS_ADDR(lpt_uart_dev_desc) },
@@ -366,7 +385,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "INT3437", },
/* Wildcat Point LPSS devices */
- { "INT3438", LPSS_ADDR(lpt_dev_desc) },
+ { "INT3438", LPSS_ADDR(lpt_spi_dev_desc) },
{ }
};
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 78d621290a35..de3cbf152dee 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -95,7 +95,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
* Name of the platform device will be the same as @adev's.
*/
struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
- struct property_entry *properties)
+ const struct property_entry *properties)
{
struct platform_device *pdev = NULL;
struct platform_device_info pdevinfo;
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 915c2433463d..e7c30ce06e18 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -169,6 +169,9 @@ acpi_ns_walk_namespace(acpi_object_type type,
if (start_node == ACPI_ROOT_OBJECT) {
start_node = acpi_gbl_root_node;
+ if (!start_node) {
+ return_ACPI_STATUS(AE_NO_NAMESPACE);
+ }
}
/* Null child means "get first node" */
diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
index 19e50fcbf4d6..598fd19b65fa 100644
--- a/drivers/acpi/apei/bert.c
+++ b/drivers/acpi/apei/bert.c
@@ -29,6 +29,7 @@
#undef pr_fmt
#define pr_fmt(fmt) "BERT: " fmt
+#define ACPI_BERT_PRINT_MAX_LEN 1024
static int bert_disable;
@@ -58,8 +59,11 @@ static void __init bert_print_all(struct acpi_bert_region *region,
}
pr_info_once("Error records from previous boot:\n");
-
- cper_estatus_print(KERN_INFO HW_ERR, estatus);
+ if (region_len < ACPI_BERT_PRINT_MAX_LEN)
+ cper_estatus_print(KERN_INFO HW_ERR, estatus);
+ else
+ pr_info_once("Max print length exceeded, table data is available at:\n"
+ "/sys/firmware/acpi/tables/data/BERT");
/*
* Because the boot error source is "one-time polled" type,
@@ -77,7 +81,7 @@ static int __init setup_bert_disable(char *str)
{
bert_disable = 1;
- return 0;
+ return 1;
}
__setup("bert_disable", setup_bert_disable);
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 242f3c2d5533..698d67cee052 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -891,7 +891,7 @@ EXPORT_SYMBOL_GPL(erst_clear);
static int __init setup_erst_disable(char *str)
{
erst_disable = 1;
- return 0;
+ return 1;
}
__setup("erst_disable", setup_erst_disable);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 0c5c9acc6254..d91ad378c00d 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1457,33 +1457,35 @@ static struct platform_driver ghes_platform_driver = {
.remove = ghes_remove,
};
-static int __init ghes_init(void)
+void __init acpi_ghes_init(void)
{
int rc;
+ sdei_init();
+
if (acpi_disabled)
- return -ENODEV;
+ return;
switch (hest_disable) {
case HEST_NOT_FOUND:
- return -ENODEV;
+ return;
case HEST_DISABLED:
pr_info(GHES_PFX "HEST is not enabled!\n");
- return -EINVAL;
+ return;
default:
break;
}
if (ghes_disable) {
pr_info(GHES_PFX "GHES is not enabled!\n");
- return -EINVAL;
+ return;
}
ghes_nmi_init_cxt();
rc = platform_driver_register(&ghes_platform_driver);
if (rc)
- goto err;
+ return;
rc = apei_osc_setup();
if (rc == 0 && osc_sb_apei_support_acked)
@@ -1494,9 +1496,4 @@ static int __init ghes_init(void)
pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
else
pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
-
- return 0;
-err:
- return rc;
}
-device_initcall(ghes_init);
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 0edc1ed47673..6aef1ee5e1bd 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -224,7 +224,7 @@ err:
static int __init setup_hest_disable(char *str)
{
hest_disable = HEST_DISABLED;
- return 0;
+ return 1;
}
__setup("hest_disable", setup_hest_disable);
diff --git a/drivers/acpi/arm64/Kconfig b/drivers/acpi/arm64/Kconfig
index 6dba187f4f2e..d4a72835f328 100644
--- a/drivers/acpi/arm64/Kconfig
+++ b/drivers/acpi/arm64/Kconfig
@@ -8,3 +8,13 @@ config ACPI_IORT
config ACPI_GTDT
bool
+
+config ACPI_AGDI
+ bool "Arm Generic Diagnostic Dump and Reset Device Interface"
+ depends on ARM_SDE_INTERFACE
+ help
+ Arm Generic Diagnostic Dump and Reset Device Interface (AGDI) is
+ a standard that enables issuing a non-maskable diagnostic dump and
+ reset command.
+
+ If set, the kernel parses AGDI table and listens for the command.
diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile
index 66acbe77f46e..7b9e4045659d 100644
--- a/drivers/acpi/arm64/Makefile
+++ b/drivers/acpi/arm64/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_ACPI_AGDI) += agdi.o
obj-$(CONFIG_ACPI_IORT) += iort.o
obj-$(CONFIG_ACPI_GTDT) += gtdt.o
obj-y += dma.o
diff --git a/drivers/acpi/arm64/agdi.c b/drivers/acpi/arm64/agdi.c
new file mode 100644
index 000000000000..4df337d545b7
--- /dev/null
+++ b/drivers/acpi/arm64/agdi.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This file implements handling of
+ * Arm Generic Diagnostic Dump and Reset Interface table (AGDI)
+ *
+ * Copyright (c) 2022, Ampere Computing LLC
+ */
+
+#define pr_fmt(fmt) "ACPI: AGDI: " fmt
+
+#include <linux/acpi.h>
+#include <linux/arm_sdei.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+struct agdi_data {
+ int sdei_event;
+};
+
+static int agdi_sdei_handler(u32 sdei_event, struct pt_regs *regs, void *arg)
+{
+ nmi_panic(regs, "Arm Generic Diagnostic Dump and Reset SDEI event issued");
+ return 0;
+}
+
+static int agdi_sdei_probe(struct platform_device *pdev,
+ struct agdi_data *adata)
+{
+ int err;
+
+ err = sdei_event_register(adata->sdei_event, agdi_sdei_handler, pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register for SDEI event %d",
+ adata->sdei_event);
+ return err;
+ }
+
+ err = sdei_event_enable(adata->sdei_event);
+ if (err) {
+ sdei_event_unregister(adata->sdei_event);
+ dev_err(&pdev->dev, "Failed to enable event %d\n",
+ adata->sdei_event);
+ return err;
+ }
+
+ return 0;
+}
+
+static int agdi_probe(struct platform_device *pdev)
+{
+ struct agdi_data *adata = dev_get_platdata(&pdev->dev);
+
+ if (!adata)
+ return -EINVAL;
+
+ return agdi_sdei_probe(pdev, adata);
+}
+
+static int agdi_remove(struct platform_device *pdev)
+{
+ struct agdi_data *adata = dev_get_platdata(&pdev->dev);
+ int err, i;
+
+ err = sdei_event_disable(adata->sdei_event);
+ if (err)
+ return err;
+
+ for (i = 0; i < 3; i++) {
+ err = sdei_event_unregister(adata->sdei_event);
+ if (err != -EINPROGRESS)
+ break;
+
+ schedule();
+ }
+
+ return err;
+}
+
+static struct platform_driver agdi_driver = {
+ .driver = {
+ .name = "agdi",
+ },
+ .probe = agdi_probe,
+ .remove = agdi_remove,
+};
+
+void __init acpi_agdi_init(void)
+{
+ struct acpi_table_agdi *agdi_table;
+ struct agdi_data pdata;
+ struct platform_device *pdev;
+ acpi_status status;
+
+ status = acpi_get_table(ACPI_SIG_AGDI, 0,
+ (struct acpi_table_header **) &agdi_table);
+ if (ACPI_FAILURE(status))
+ return;
+
+ if (agdi_table->flags & ACPI_AGDI_SIGNALING_MODE) {
+ pr_warn("Interrupt signaling is not supported");
+ goto err_put_table;
+ }
+
+ pdata.sdei_event = agdi_table->sdei_event;
+
+ pdev = platform_device_register_data(NULL, "agdi", 0, &pdata, sizeof(pdata));
+ if (IS_ERR(pdev))
+ goto err_put_table;
+
+ if (platform_driver_register(&agdi_driver))
+ platform_device_unregister(pdev);
+
+err_put_table:
+ acpi_put_table((struct acpi_table_header *)agdi_table);
+}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index ea31ae01458b..dc208f5f5a1f 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -59,6 +59,10 @@ MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
static const struct acpi_device_id battery_device_ids[] = {
{"PNP0C0A", 0},
+
+ /* Microsoft Surface Go 3 */
+ {"MSHW0146", 0},
+
{"", 0},
};
@@ -1148,6 +1152,14 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad"),
},
},
+ {
+ /* Microsoft Surface Go 3 */
+ .callback = battery_notification_delay_quirk,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
+ },
+ },
{},
};
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 07f604832fd6..3e58b613a2c4 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -26,6 +26,7 @@
#include <asm/mpspec.h>
#include <linux/dmi.h>
#endif
+#include <linux/acpi_agdi.h>
#include <linux/acpi_iort.h>
#include <linux/acpi_viot.h>
#include <linux/pci.h>
@@ -283,6 +284,8 @@ EXPORT_SYMBOL_GPL(osc_pc_lpi_support_confirmed);
bool osc_sb_native_usb4_support_confirmed;
EXPORT_SYMBOL_GPL(osc_sb_native_usb4_support_confirmed);
+bool osc_sb_cppc_not_supported;
+
static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
static void acpi_bus_osc_negotiate_platform_control(void)
{
@@ -332,21 +335,38 @@ static void acpi_bus_osc_negotiate_platform_control(void)
if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
return;
- kfree(context.ret.pointer);
+ capbuf_ret = context.ret.pointer;
+ if (context.ret.length <= OSC_SUPPORT_DWORD) {
+ kfree(context.ret.pointer);
+ return;
+ }
+
+#ifdef CONFIG_X86
+ if (boot_cpu_has(X86_FEATURE_HWP))
+ osc_sb_cppc_not_supported = !(capbuf_ret[OSC_SUPPORT_DWORD] &
+ (OSC_SB_CPC_SUPPORT | OSC_SB_CPCV2_SUPPORT));
+#endif
- /* Now run _OSC again with query flag clear */
+ /*
+ * Now run _OSC again with query flag clear and with the caps
+ * supported by both the OS and the platform.
+ */
capbuf[OSC_QUERY_DWORD] = 0;
+ capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD];
+ kfree(context.ret.pointer);
if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
return;
capbuf_ret = context.ret.pointer;
- osc_sb_apei_support_acked =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
- osc_pc_lpi_support_confirmed =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
- osc_sb_native_usb4_support_confirmed =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
+ if (context.ret.length > OSC_SUPPORT_DWORD) {
+ osc_sb_apei_support_acked =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
+ osc_pc_lpi_support_confirmed =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
+ osc_sb_native_usb4_support_confirmed =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
+ }
kfree(context.ret.pointer);
}
@@ -1043,7 +1063,12 @@ struct bus_type acpi_bus_type = {
.remove = acpi_device_remove,
.uevent = acpi_device_uevent,
};
-EXPORT_SYMBOL_GPL(acpi_bus_type);
+
+int acpi_bus_for_each_dev(int (*fn)(struct device *, void *), void *data)
+{
+ return bus_for_each_dev(&acpi_bus_type, NULL, data, fn);
+}
+EXPORT_SYMBOL_GPL(acpi_bus_for_each_dev);
/* --------------------------------------------------------------------------
Initialization/Cleanup
@@ -1331,6 +1356,8 @@ static int __init acpi_init(void)
pci_mmcfg_late_init();
acpi_iort_init();
+ acpi_hest_init();
+ acpi_ghes_init();
acpi_scan_init();
acpi_ec_init();
acpi_debugfs_init();
@@ -1339,6 +1366,7 @@ static int __init acpi_init(void)
acpi_debugger_init();
acpi_setup_sb_notify_handler();
acpi_viot_init();
+ acpi_agdi_init();
return 0;
}
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 866560cbb082..d418449194ee 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -633,8 +633,8 @@ static bool is_cppc_supported(int revision, int num_ent)
* )
*/
-#ifndef init_freq_invariance_cppc
-static inline void init_freq_invariance_cppc(void) { }
+#ifndef arch_init_invariance_cppc
+static inline void arch_init_invariance_cppc(void) { }
#endif
/**
@@ -656,6 +656,9 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
acpi_status status;
int ret = -EFAULT;
+ if (osc_sb_cppc_not_supported)
+ return -ENODEV;
+
/* Parse the ACPI _CPC table for this CPU. */
status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
ACPI_TYPE_PACKAGE);
@@ -816,7 +819,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
- init_freq_invariance_cppc();
+ arch_init_invariance_cppc();
kfree(output.pointer);
return 0;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 46710380a402..a1b871a418f8 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -168,7 +168,7 @@ struct acpi_ec_query {
};
static int acpi_ec_submit_query(struct acpi_ec *ec);
-static bool advance_transaction(struct acpi_ec *ec, bool interrupt);
+static void advance_transaction(struct acpi_ec *ec, bool interrupt);
static void acpi_ec_event_handler(struct work_struct *work);
struct acpi_ec *first_ec;
@@ -441,36 +441,35 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
return true;
}
-static bool acpi_ec_submit_event(struct acpi_ec *ec)
+static void acpi_ec_submit_event(struct acpi_ec *ec)
{
+ /*
+ * It is safe to mask the events here, because acpi_ec_close_event()
+ * will run at least once after this.
+ */
acpi_ec_mask_events(ec);
if (!acpi_ec_event_enabled(ec))
- return false;
-
- if (ec->event_state == EC_EVENT_READY) {
- ec_dbg_evt("Command(%s) submitted/blocked",
- acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
+ return;
- ec->event_state = EC_EVENT_IN_PROGRESS;
- /*
- * If events_to_process is greqter than 0 at this point, the
- * while () loop in acpi_ec_event_handler() is still running
- * and incrementing events_to_process will cause it to invoke
- * acpi_ec_submit_query() once more, so it is not necessary to
- * queue up the event work to start the same loop again.
- */
- if (ec->events_to_process++ > 0)
- return true;
+ if (ec->event_state != EC_EVENT_READY)
+ return;
- ec->events_in_progress++;
- return queue_work(ec_wq, &ec->work);
- }
+ ec_dbg_evt("Command(%s) submitted/blocked",
+ acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
+ ec->event_state = EC_EVENT_IN_PROGRESS;
/*
- * The event handling work has not been completed yet, so it needs to be
- * flushed.
+ * If events_to_process is greater than 0 at this point, the while ()
+ * loop in acpi_ec_event_handler() is still running and incrementing
+ * events_to_process will cause it to invoke acpi_ec_submit_query() once
+ * more, so it is not necessary to queue up the event work to start the
+ * same loop again.
*/
- return true;
+ if (ec->events_to_process++ > 0)
+ return;
+
+ ec->events_in_progress++;
+ queue_work(ec_wq, &ec->work);
}
static void acpi_ec_complete_event(struct acpi_ec *ec)
@@ -655,11 +654,10 @@ static void acpi_ec_spurious_interrupt(struct acpi_ec *ec, struct transaction *t
acpi_ec_mask_events(ec);
}
-static bool advance_transaction(struct acpi_ec *ec, bool interrupt)
+static void advance_transaction(struct acpi_ec *ec, bool interrupt)
{
struct transaction *t = ec->curr;
bool wakeup = false;
- bool ret = false;
u8 status;
ec_dbg_stm("%s (%d)", interrupt ? "IRQ" : "TASK", smp_processor_id());
@@ -724,12 +722,10 @@ static bool advance_transaction(struct acpi_ec *ec, bool interrupt)
out:
if (status & ACPI_EC_FLAG_SCI)
- ret = acpi_ec_submit_event(ec);
+ acpi_ec_submit_event(ec);
if (wakeup && interrupt)
wake_up(&ec->wait);
-
- return ret;
}
static void start_transaction(struct acpi_ec *ec)
@@ -1242,6 +1238,7 @@ static void acpi_ec_event_handler(struct work_struct *work)
acpi_ec_submit_query(ec);
spin_lock_irq(&ec->lock);
+
ec->events_to_process--;
}
@@ -1250,27 +1247,30 @@ static void acpi_ec_event_handler(struct work_struct *work)
* event handling work again regardless of whether or not the query
* queued up above is processed successfully.
*/
- if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT)
+ if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
+ bool guard_timeout;
+
acpi_ec_complete_event(ec);
- else
- acpi_ec_close_event(ec);
- spin_unlock_irq(&ec->lock);
+ ec_dbg_evt("Event stopped");
- ec_dbg_evt("Event stopped");
+ spin_unlock_irq(&ec->lock);
+
+ guard_timeout = !!ec_guard(ec);
- if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && ec_guard(ec)) {
spin_lock_irq(&ec->lock);
/* Take care of SCI_EVT unless someone else is doing that. */
- if (!ec->curr)
+ if (guard_timeout && !ec->curr)
advance_transaction(ec, false);
+ } else {
+ acpi_ec_close_event(ec);
- spin_unlock_irq(&ec->lock);
+ ec_dbg_evt("Event stopped");
}
- spin_lock_irq(&ec->lock);
ec->events_in_progress--;
+
spin_unlock_irq(&ec->lock);
}
@@ -2051,6 +2051,11 @@ void acpi_ec_set_gpe_wake_mask(u8 action)
acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
}
+static bool acpi_ec_work_in_progress(struct acpi_ec *ec)
+{
+ return ec->events_in_progress + ec->queries_in_progress > 0;
+}
+
bool acpi_ec_dispatch_gpe(void)
{
bool work_in_progress = false;
@@ -2081,8 +2086,12 @@ bool acpi_ec_dispatch_gpe(void)
*/
spin_lock_irq(&first_ec->lock);
- if (acpi_ec_gpe_status_set(first_ec))
- work_in_progress = advance_transaction(first_ec, false);
+ if (acpi_ec_gpe_status_set(first_ec)) {
+ pm_pr_dbg("ACPI EC GPE status set\n");
+
+ advance_transaction(first_ec, false);
+ work_in_progress = acpi_ec_work_in_progress(first_ec);
+ }
spin_unlock_irq(&first_ec->lock);
@@ -2099,8 +2108,7 @@ bool acpi_ec_dispatch_gpe(void)
spin_lock_irq(&first_ec->lock);
- work_in_progress = first_ec->events_in_progress +
- first_ec->queries_in_progress > 0;
+ work_in_progress = acpi_ec_work_in_progress(first_ec);
spin_unlock_irq(&first_ec->lock);
} while (work_in_progress && !pm_wakeup_pending());
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index dd9bb8ca2244..44728529a5b6 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -6,9 +6,53 @@
*
* Add new device IDs before the generic ACPI fan one.
*/
+
+#ifndef _ACPI_FAN_H_
+#define _ACPI_FAN_H_
+
#define ACPI_FAN_DEVICE_IDS \
{"INT3404", }, /* Fan */ \
{"INTC1044", }, /* Fan for Tiger Lake generation */ \
{"INTC1048", }, /* Fan for Alder Lake generation */ \
{"INTC10A2", }, /* Fan for Raptor Lake generation */ \
{"PNP0C0B", } /* Generic ACPI fan */
+
+#define ACPI_FPS_NAME_LEN 20
+
+struct acpi_fan_fps {
+ u64 control;
+ u64 trip_point;
+ u64 speed;
+ u64 noise_level;
+ u64 power;
+ char name[ACPI_FPS_NAME_LEN];
+ struct device_attribute dev_attr;
+};
+
+struct acpi_fan_fif {
+ u8 revision;
+ u8 fine_grain_ctrl;
+ u8 step_size;
+ u8 low_speed_notification;
+};
+
+struct acpi_fan_fst {
+ u64 revision;
+ u64 control;
+ u64 speed;
+};
+
+struct acpi_fan {
+ bool acpi4;
+ struct acpi_fan_fif fif;
+ struct acpi_fan_fps *fps;
+ int fps_count;
+ struct thermal_cooling_device *cdev;
+ struct device_attribute fst_speed;
+ struct device_attribute fine_grain_control;
+};
+
+int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst);
+int acpi_fan_create_attributes(struct acpi_device *device);
+void acpi_fan_delete_attributes(struct acpi_device *device);
+#endif
diff --git a/drivers/acpi/fan_attr.c b/drivers/acpi/fan_attr.c
new file mode 100644
index 000000000000..f15157d40713
--- /dev/null
+++ b/drivers/acpi/fan_attr.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * fan_attr.c - Create extra attributes for ACPI Fan driver
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2022 Intel Corporation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+
+#include "fan.h"
+
+MODULE_LICENSE("GPL");
+
+static ssize_t show_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct acpi_fan_fps *fps = container_of(attr, struct acpi_fan_fps, dev_attr);
+ int count;
+
+ if (fps->control == 0xFFFFFFFF || fps->control > 100)
+ count = scnprintf(buf, PAGE_SIZE, "not-defined:");
+ else
+ count = scnprintf(buf, PAGE_SIZE, "%lld:", fps->control);
+
+ if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:");
+ else
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->trip_point);
+
+ if (fps->speed == 0xFFFFFFFF)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:");
+ else
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->speed);
+
+ if (fps->noise_level == 0xFFFFFFFF)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:");
+ else
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->noise_level * 100);
+
+ if (fps->power == 0xFFFFFFFF)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined\n");
+ else
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld\n", fps->power);
+
+ return count;
+}
+
+static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *acpi_dev = container_of(dev, struct acpi_device, dev);
+ struct acpi_fan_fst fst;
+ int status;
+
+ status = acpi_fan_get_fst(acpi_dev, &fst);
+ if (status)
+ return status;
+
+ return sprintf(buf, "%lld\n", fst.speed);
+}
+
+static ssize_t show_fine_grain_control(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *acpi_dev = container_of(dev, struct acpi_device, dev);
+ struct acpi_fan *fan = acpi_driver_data(acpi_dev);
+
+ return sprintf(buf, "%d\n", fan->fif.fine_grain_ctrl);
+}
+
+int acpi_fan_create_attributes(struct acpi_device *device)
+{
+ struct acpi_fan *fan = acpi_driver_data(device);
+ int i, status;
+
+ sysfs_attr_init(&fan->fine_grain_control.attr);
+ fan->fine_grain_control.show = show_fine_grain_control;
+ fan->fine_grain_control.store = NULL;
+ fan->fine_grain_control.attr.name = "fine_grain_control";
+ fan->fine_grain_control.attr.mode = 0444;
+ status = sysfs_create_file(&device->dev.kobj, &fan->fine_grain_control.attr);
+ if (status)
+ return status;
+
+ /* _FST is present if we are here */
+ sysfs_attr_init(&fan->fst_speed.attr);
+ fan->fst_speed.show = show_fan_speed;
+ fan->fst_speed.store = NULL;
+ fan->fst_speed.attr.name = "fan_speed_rpm";
+ fan->fst_speed.attr.mode = 0444;
+ status = sysfs_create_file(&device->dev.kobj, &fan->fst_speed.attr);
+ if (status)
+ goto rem_fine_grain_attr;
+
+ for (i = 0; i < fan->fps_count; ++i) {
+ struct acpi_fan_fps *fps = &fan->fps[i];
+
+ snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i);
+ sysfs_attr_init(&fps->dev_attr.attr);
+ fps->dev_attr.show = show_state;
+ fps->dev_attr.store = NULL;
+ fps->dev_attr.attr.name = fps->name;
+ fps->dev_attr.attr.mode = 0444;
+ status = sysfs_create_file(&device->dev.kobj, &fps->dev_attr.attr);
+ if (status) {
+ int j;
+
+ for (j = 0; j < i; ++j)
+ sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr);
+ goto rem_fst_attr;
+ }
+ }
+
+ return 0;
+
+rem_fst_attr:
+ sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
+
+rem_fine_grain_attr:
+ sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr);
+
+ return status;
+}
+
+void acpi_fan_delete_attributes(struct acpi_device *device)
+{
+ struct acpi_fan *fan = acpi_driver_data(device);
+ int i;
+
+ for (i = 0; i < fan->fps_count; ++i)
+ sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
+
+ sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
+ sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr);
+}
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan_core.c
index 5cd0ceb50bc8..b9a9a59ddcc1 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan_core.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * acpi_fan.c - ACPI Fan Driver ($Revision: 29 $)
+ * fan_core.c - ACPI Fan core Driver
*
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2022 Intel Corporation. All rights reserved.
*/
#include <linux/kernel.h>
@@ -45,33 +46,6 @@ static const struct dev_pm_ops acpi_fan_pm = {
#define FAN_PM_OPS_PTR NULL
#endif
-#define ACPI_FPS_NAME_LEN 20
-
-struct acpi_fan_fps {
- u64 control;
- u64 trip_point;
- u64 speed;
- u64 noise_level;
- u64 power;
- char name[ACPI_FPS_NAME_LEN];
- struct device_attribute dev_attr;
-};
-
-struct acpi_fan_fif {
- u64 revision;
- u64 fine_grain_ctrl;
- u64 step_size;
- u64 low_speed_notification;
-};
-
-struct acpi_fan {
- bool acpi4;
- struct acpi_fan_fif fif;
- struct acpi_fan_fps *fps;
- int fps_count;
- struct thermal_cooling_device *cdev;
-};
-
static struct platform_driver acpi_fan_driver = {
.probe = acpi_fan_probe,
.remove = acpi_fan_remove,
@@ -89,25 +63,29 @@ static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long
struct acpi_device *device = cdev->devdata;
struct acpi_fan *fan = acpi_driver_data(device);
- if (fan->acpi4)
- *state = fan->fps_count - 1;
- else
+ if (fan->acpi4) {
+ if (fan->fif.fine_grain_ctrl)
+ *state = 100 / fan->fif.step_size;
+ else
+ *state = fan->fps_count - 1;
+ } else {
*state = 1;
+ }
+
return 0;
}
-static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
+int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_fan *fan = acpi_driver_data(device);
union acpi_object *obj;
acpi_status status;
- int control, i;
+ int ret = 0;
status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev, "Get fan state failed\n");
- return status;
+ return -ENODEV;
}
obj = buffer.pointer;
@@ -115,35 +93,52 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
obj->package.count != 3 ||
obj->package.elements[1].type != ACPI_TYPE_INTEGER) {
dev_err(&device->dev, "Invalid _FST data\n");
- status = -EINVAL;
+ ret = -EINVAL;
goto err;
}
- control = obj->package.elements[1].integer.value;
+ fst->revision = obj->package.elements[0].integer.value;
+ fst->control = obj->package.elements[1].integer.value;
+ fst->speed = obj->package.elements[2].integer.value;
+
+err:
+ kfree(obj);
+ return ret;
+}
+
+static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
+{
+ struct acpi_fan *fan = acpi_driver_data(device);
+ struct acpi_fan_fst fst;
+ int status, i;
+
+ status = acpi_fan_get_fst(device, &fst);
+ if (status)
+ return status;
+
+ if (fan->fif.fine_grain_ctrl) {
+ /* This control should be same what we set using _FSL by spec */
+ if (fst.control > 100) {
+ dev_dbg(&device->dev, "Invalid control value returned\n");
+ goto match_fps;
+ }
+
+ *state = (int) fst.control / fan->fif.step_size;
+ return 0;
+ }
+
+match_fps:
for (i = 0; i < fan->fps_count; i++) {
- /*
- * When Fine Grain Control is set, return the state
- * corresponding to maximum fan->fps[i].control
- * value compared to the current speed. Here the
- * fan->fps[] is sorted array with increasing speed.
- */
- if (fan->fif.fine_grain_ctrl && control < fan->fps[i].control) {
- i = (i > 0) ? i - 1 : 0;
+ if (fst.control == fan->fps[i].control)
break;
- } else if (control == fan->fps[i].control) {
- break;
- }
}
if (i == fan->fps_count) {
dev_dbg(&device->dev, "Invalid control value returned\n");
- status = -EINVAL;
- goto err;
+ return -EINVAL;
}
*state = i;
-err:
- kfree(obj);
return status;
}
@@ -187,15 +182,30 @@ static int fan_set_state_acpi4(struct acpi_device *device, unsigned long state)
{
struct acpi_fan *fan = acpi_driver_data(device);
acpi_status status;
+ u64 value = state;
+ int max_state;
- if (state >= fan->fps_count)
+ if (fan->fif.fine_grain_ctrl)
+ max_state = 100 / fan->fif.step_size;
+ else
+ max_state = fan->fps_count - 1;
+
+ if (state > max_state)
return -EINVAL;
- status = acpi_execute_simple_method(device->handle, "_FSL",
- fan->fps[state].control);
+ if (fan->fif.fine_grain_ctrl) {
+ value *= fan->fif.step_size;
+ /* Spec allows compensate the last step only */
+ if (value + fan->fif.step_size > 100)
+ value = 100;
+ } else {
+ value = fan->fps[state].control;
+ }
+
+ status = acpi_execute_simple_method(device->handle, "_FSL", value);
if (ACPI_FAILURE(status)) {
dev_dbg(&device->dev, "Failed to set state by _FSL\n");
- return status;
+ return -ENODEV;
}
return 0;
@@ -237,7 +247,8 @@ static int acpi_fan_get_fif(struct acpi_device *device)
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_fan *fan = acpi_driver_data(device);
struct acpi_buffer format = { sizeof("NNNN"), "NNNN" };
- struct acpi_buffer fif = { sizeof(fan->fif), &fan->fif };
+ u64 fields[4];
+ struct acpi_buffer fif = { sizeof(fields), fields };
union acpi_object *obj;
acpi_status status;
@@ -258,6 +269,17 @@ static int acpi_fan_get_fif(struct acpi_device *device)
status = -EINVAL;
}
+ fan->fif.revision = fields[0];
+ fan->fif.fine_grain_ctrl = fields[1];
+ fan->fif.step_size = fields[2];
+ fan->fif.low_speed_notification = fields[3];
+
+ /* If there is a bug in step size and set as 0, change to 1 */
+ if (!fan->fif.step_size)
+ fan->fif.step_size = 1;
+ /* If step size > 9, change to 9 (by spec valid values 1-9) */
+ else if (fan->fif.step_size > 9)
+ fan->fif.step_size = 9;
err:
kfree(obj);
return status;
@@ -270,39 +292,6 @@ static int acpi_fan_speed_cmp(const void *a, const void *b)
return fps1->speed - fps2->speed;
}
-static ssize_t show_state(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct acpi_fan_fps *fps = container_of(attr, struct acpi_fan_fps, dev_attr);
- int count;
-
- if (fps->control == 0xFFFFFFFF || fps->control > 100)
- count = scnprintf(buf, PAGE_SIZE, "not-defined:");
- else
- count = scnprintf(buf, PAGE_SIZE, "%lld:", fps->control);
-
- if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9)
- count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:");
- else
- count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->trip_point);
-
- if (fps->speed == 0xFFFFFFFF)
- count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:");
- else
- count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->speed);
-
- if (fps->noise_level == 0xFFFFFFFF)
- count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:");
- else
- count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->noise_level * 100);
-
- if (fps->power == 0xFFFFFFFF)
- count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined\n");
- else
- count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld\n", fps->power);
-
- return count;
-}
-
static int acpi_fan_get_fps(struct acpi_device *device)
{
struct acpi_fan *fan = acpi_driver_data(device);
@@ -347,25 +336,6 @@ static int acpi_fan_get_fps(struct acpi_device *device)
sort(fan->fps, fan->fps_count, sizeof(*fan->fps),
acpi_fan_speed_cmp, NULL);
- for (i = 0; i < fan->fps_count; ++i) {
- struct acpi_fan_fps *fps = &fan->fps[i];
-
- snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i);
- sysfs_attr_init(&fps->dev_attr.attr);
- fps->dev_attr.show = show_state;
- fps->dev_attr.store = NULL;
- fps->dev_attr.attr.name = fps->name;
- fps->dev_attr.attr.mode = 0444;
- status = sysfs_create_file(&device->dev.kobj, &fps->dev_attr.attr);
- if (status) {
- int j;
-
- for (j = 0; j < i; ++j)
- sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr);
- break;
- }
- }
-
err:
kfree(obj);
return status;
@@ -396,6 +366,10 @@ static int acpi_fan_probe(struct platform_device *pdev)
if (result)
return result;
+ result = acpi_fan_create_attributes(device);
+ if (result)
+ return result;
+
fan->acpi4 = true;
} else {
result = acpi_device_update_power(device, NULL);
@@ -437,12 +411,8 @@ static int acpi_fan_probe(struct platform_device *pdev)
return 0;
err_end:
- if (fan->acpi4) {
- int i;
-
- for (i = 0; i < fan->fps_count; ++i)
- sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
- }
+ if (fan->acpi4)
+ acpi_fan_delete_attributes(device);
return result;
}
@@ -453,10 +423,8 @@ static int acpi_fan_remove(struct platform_device *pdev)
if (fan->acpi4) {
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
- int i;
- for (i = 0; i < fan->fps_count; ++i)
- sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
+ acpi_fan_delete_attributes(device);
}
sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
sysfs_remove_link(&fan->cdev->device.kobj, "device");
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 457e11d851b8..628bf8f18130 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -96,8 +96,6 @@ void acpi_scan_table_notify(void);
extern struct list_head acpi_bus_id_list;
-#define ACPI_MAX_DEVICE_INSTANCES 4096
-
struct acpi_device_bus_id {
const char *bus_id;
struct ida instance_ida;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 45c5c0e45e33..7a70c4bfc23c 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -642,22 +642,24 @@ u64 acpi_os_get_timer(void)
(ACPI_100NSEC_PER_SEC / HZ);
}
-acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
+acpi_status acpi_os_read_port(acpi_io_address port, u32 *value, u32 width)
{
u32 dummy;
- if (!value)
+ if (value)
+ *value = 0;
+ else
value = &dummy;
- *value = 0;
if (width <= 8) {
- *(u8 *) value = inb(port);
+ *value = inb(port);
} else if (width <= 16) {
- *(u16 *) value = inw(port);
+ *value = inw(port);
} else if (width <= 32) {
- *(u32 *) value = inl(port);
+ *value = inl(port);
} else {
- BUG();
+ pr_debug("%s: Access width %d not supported\n", __func__, width);
+ return AE_BAD_PARAMETER;
}
return AE_OK;
@@ -674,7 +676,8 @@ acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
} else if (width <= 32) {
outl(value, port);
} else {
- BUG();
+ pr_debug("%s: Access width %d not supported\n", __func__, width);
+ return AE_BAD_PARAMETER;
}
return AE_OK;
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index d54fb8e54671..58647051c948 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -185,7 +185,7 @@ static acpi_status acpi_pci_link_check_current(struct acpi_resource *resource,
if (!p || !p->interrupt_count) {
/*
* IRQ descriptors may have no IRQ# bits set,
- * particularly those those w/ _STA disabled
+ * particularly those w/ _STA disabled
*/
pr_debug("Blank _CRS IRQ resource\n");
return AE_OK;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index b76db99cced3..6f9e75d14808 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -22,8 +22,6 @@
#include <linux/slab.h>
#include <linux/dmi.h>
#include <linux/platform_data/x86/apple.h>
-#include <acpi/apei.h> /* for acpi_hest_init() */
-
#include "internal.h"
#define ACPI_PCI_ROOT_CLASS "pci_bridge"
@@ -943,7 +941,6 @@ out_release_info:
void __init acpi_pci_root_init(void)
{
- acpi_hest_init();
if (acpi_pci_disabled)
return;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f8e9fa82cb9b..32b20efff5f8 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -1080,6 +1080,11 @@ static int flatten_lpi_states(struct acpi_processor *pr,
return 0;
}
+int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
+{
+ return -EOPNOTSUPP;
+}
+
static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
{
int ret, i;
@@ -1088,6 +1093,11 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
struct acpi_device *d = NULL;
struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
+ /* make sure our architecture has support */
+ ret = acpi_processor_ffh_lpi_probe(pr->id);
+ if (ret == -EOPNOTSUPP)
+ return ret;
+
if (!osc_pc_lpi_support_confirmed)
return -EOPNOTSUPP;
@@ -1139,11 +1149,6 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
return 0;
}
-int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
-{
- return -ENODEV;
-}
-
int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
{
return -ENODEV;
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index d0986bda2964..12bbfe833609 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -541,7 +541,8 @@ acpi_device_data_of_node(const struct fwnode_handle *fwnode)
if (is_acpi_device_node(fwnode)) {
const struct acpi_device *adev = to_acpi_device_node(fwnode);
return &adev->data;
- } else if (is_acpi_data_node(fwnode)) {
+ }
+ if (is_acpi_data_node(fwnode)) {
const struct acpi_data_node *dn = to_acpi_data_node(fwnode);
return &dn->data;
}
@@ -685,7 +686,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
*/
if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) {
if (index)
- return -EINVAL;
+ return -ENOENT;
device = acpi_fetch_acpi_dev(obj->reference.handle);
if (!device)
@@ -739,14 +740,19 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -EINVAL;
}
- /* assume following integer elements are all args */
+ /*
+ * Assume the following integer elements are all args.
+ * Stop counting on the first reference or end of the
+ * package arguments. In case of neither reference,
+ * nor integer, return an error, we can't parse it.
+ */
for (i = 0; element + i < end && i < num_args; i++) {
int type = element[i].type;
+ if (type == ACPI_TYPE_LOCAL_REFERENCE)
+ break;
if (type == ACPI_TYPE_INTEGER)
nargs++;
- else if (type == ACPI_TYPE_LOCAL_REFERENCE)
- break;
else
return -EINVAL;
}
@@ -950,7 +956,7 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
if (proptype != DEV_PROP_STRING && nval > obj->package.count)
return -EOVERFLOW;
- else if (nval <= 0)
+ if (nval == 0)
return -EINVAL;
items = obj->package.elements;
@@ -1012,14 +1018,10 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
const struct list_head *head;
struct list_head *next;
- if (!child || is_acpi_device_node(child)) {
+ if ((!child || is_acpi_device_node(child)) && adev) {
struct acpi_device *child_adev;
- if (adev)
- head = &adev->children;
- else
- goto nondev;
-
+ head = &adev->children;
if (list_empty(head))
goto nondev;
@@ -1089,7 +1091,8 @@ acpi_node_get_parent(const struct fwnode_handle *fwnode)
if (is_acpi_data_node(fwnode)) {
/* All data nodes have parent pointer so just return that */
return to_acpi_data_node(fwnode)->parent;
- } else if (is_acpi_device_node(fwnode)) {
+ }
+ if (is_acpi_device_node(fwnode)) {
struct device *dev = to_acpi_device_node(fwnode)->dev.parent;
if (dev)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 1331756d4cfc..c116e3a65ab4 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -477,7 +477,8 @@ static void acpi_device_del(struct acpi_device *device)
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
if (!strcmp(acpi_device_bus_id->bus_id,
acpi_device_hid(device))) {
- ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no);
+ ida_free(&acpi_device_bus_id->instance_ida,
+ device->pnp.instance_no);
if (ida_is_empty(&acpi_device_bus_id->instance_ida)) {
list_del(&acpi_device_bus_id->node);
kfree_const(acpi_device_bus_id->bus_id);
@@ -642,7 +643,7 @@ static int acpi_device_set_name(struct acpi_device *device,
struct ida *instance_ida = &acpi_device_bus_id->instance_ida;
int result;
- result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL);
+ result = ida_alloc(instance_ida, GFP_KERNEL);
if (result < 0)
return result;
@@ -1377,11 +1378,11 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
if (info->valid & ACPI_VALID_HID) {
acpi_add_id(pnp, info->hardware_id.string);
pnp->type.platform_id = 1;
- if (info->valid & ACPI_VALID_CID) {
- cid_list = &info->compatible_id_list;
- for (i = 0; i < cid_list->count; i++)
- acpi_add_id(pnp, cid_list->ids[i].string);
- }
+ }
+ if (info->valid & ACPI_VALID_CID) {
+ cid_list = &info->compatible_id_list;
+ for (i = 0; i < cid_list->count; i++)
+ acpi_add_id(pnp, cid_list->ids[i].string);
}
if (info->valid & ACPI_VALID_ADR) {
pnp->bus_address = info->address;
@@ -1734,17 +1735,21 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
bool is_serial_bus_slave = false;
static const struct acpi_device_id ignore_serial_bus_ids[] = {
/*
- * These devices have multiple I2cSerialBus resources and an i2c-client
- * must be instantiated for each, each with its own i2c_device_id.
- * Normally we only instantiate an i2c-client for the first resource,
- * using the ACPI HID as id. These special cases are handled by the
- * drivers/platform/x86/i2c-multi-instantiate.c driver, which knows
- * which i2c_device_id to use for each resource.
+ * These devices have multiple SerialBus resources and a client
+ * device must be instantiated for each of them, each with
+ * its own device id.
+ * Normally we only instantiate one client device for the first
+ * resource, using the ACPI HID as id. These special cases are handled
+ * by the drivers/platform/x86/serial-multi-instantiate.c driver, which
+ * knows which client device id to use for each resource.
*/
{"BSG1160", },
{"BSG2150", },
+ {"CSC3551", },
{"INT33FE", },
{"INT3515", },
+ /* Non-conforming _HID for Cirrus Logic already released */
+ {"CLSA0100", },
/*
* HIDs of device with an UartSerialBusV2 resource for which userspace
* expects a regular tty cdev to be created (instead of the in kernel
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index d4fbea91ab6b..c992e57b2c79 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -758,6 +758,8 @@ bool acpi_s2idle_wake(void)
return true;
}
+ pm_pr_dbg("Rearming ACPI SCI for wakeup\n");
+
pm_wakeup_clear(acpi_sci_irq);
rearm_wake_irq(acpi_sci_irq);
}
@@ -869,12 +871,7 @@ static inline void acpi_sleep_syscore_init(void) {}
#ifdef CONFIG_HIBERNATION
static unsigned long s4_hardware_signature;
static struct acpi_table_facs *facs;
-static int sigcheck = -1; /* Default behaviour is just to warn */
-
-void __init acpi_check_s4_hw_signature(int check)
-{
- sigcheck = check;
-}
+int acpi_check_s4_hw_signature = -1; /* Default behaviour is just to warn */
static int acpi_hibernation_begin(pm_message_t stage)
{
@@ -999,7 +996,7 @@ static void acpi_sleep_hibernate_setup(void)
hibernation_set_ops(old_suspend_ordering ?
&acpi_hibernation_ops_old : &acpi_hibernation_ops);
sleep_states[ACPI_STATE_S4] = 1;
- if (!sigcheck)
+ if (!acpi_check_s4_hw_signature)
return;
acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
@@ -1011,7 +1008,7 @@ static void acpi_sleep_hibernate_setup(void)
*/
s4_hardware_signature = facs->hardware_signature;
- if (sigcheck > 0) {
+ if (acpi_check_s4_hw_signature > 0) {
/*
* If we're actually obeying the ACPI specification
* then the signature is written out as part of the
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 34600b5b9d8e..ceee808f7f2a 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -545,7 +545,7 @@ static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = {
ACPI_SIG_WDDT, ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT,
ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT,
ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT,
- ACPI_SIG_NHLT, ACPI_SIG_AEST };
+ ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI };
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 4f64713e9917..becc198e4c22 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -415,6 +415,81 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "GA503"),
},
},
+ /*
+ * Clevo NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2 have both a
+ * working native and video interface. However the default detection
+ * mechanism first registers the video interface before unregistering
+ * it again and switching to the native interface during boot. This
+ * results in a dangling SBIOS request for backlight change for some
+ * reason, causing the backlight to switch to ~2% once per boot on the
+ * first power cord connect or disconnect event. Setting the native
+ * interface explicitly circumvents this buggy behaviour, by avoiding
+ * the unregistering process.
+ */
+ {
+ .callback = video_detect_force_native,
+ .ident = "Clevo NL5xRU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Clevo NL5xRU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Clevo NL5xRU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Clevo NL5xRU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Clevo NL5xRU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Clevo NL5xNU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Clevo NL5xNU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Clevo NL5xNU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ },
+ },
/*
* Desktops which falsely report a backlight and which our heuristics
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index ffdeed5334d6..664070fc8349 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -285,6 +285,27 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
},
{
+ /* Lenovo Yoga Tablet 1050F/L */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
+ /* Partial match on beginning of BIOS version */
+ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ },
+ {
+ /* Nextbook Ares 8 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ },
+ {
/* Whitelabel (sold as various brands) TM800A550L */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index e1a5eca3ae3c..d3bd14aaabf6 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -370,6 +370,7 @@ int amba_driver_register(struct amba_driver *drv)
return driver_register(&drv->drv);
}
+EXPORT_SYMBOL(amba_driver_register);
/**
* amba_driver_unregister - remove an AMBA device driver
@@ -383,7 +384,7 @@ void amba_driver_unregister(struct amba_driver *drv)
{
driver_unregister(&drv->drv);
}
-
+EXPORT_SYMBOL(amba_driver_unregister);
static void amba_device_release(struct device *dev)
{
@@ -642,6 +643,7 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
return amba_device_add(dev, parent);
}
+EXPORT_SYMBOL(amba_device_register);
/**
* amba_device_put - put an AMBA device
@@ -668,66 +670,7 @@ void amba_device_unregister(struct amba_device *dev)
{
device_unregister(&dev->dev);
}
-
-
-struct find_data {
- struct amba_device *dev;
- struct device *parent;
- const char *busid;
- unsigned int id;
- unsigned int mask;
-};
-
-static int amba_find_match(struct device *dev, void *data)
-{
- struct find_data *d = data;
- struct amba_device *pcdev = to_amba_device(dev);
- int r;
-
- r = (pcdev->periphid & d->mask) == d->id;
- if (d->parent)
- r &= d->parent == dev->parent;
- if (d->busid)
- r &= strcmp(dev_name(dev), d->busid) == 0;
-
- if (r) {
- get_device(dev);
- d->dev = pcdev;
- }
-
- return r;
-}
-
-/**
- * amba_find_device - locate an AMBA device given a bus id
- * @busid: bus id for device (or NULL)
- * @parent: parent device (or NULL)
- * @id: peripheral ID (or 0)
- * @mask: peripheral ID mask (or 0)
- *
- * Return the AMBA device corresponding to the supplied parameters.
- * If no device matches, returns NULL.
- *
- * NOTE: When a valid device is found, its refcount is
- * incremented, and must be decremented before the returned
- * reference.
- */
-struct amba_device *
-amba_find_device(const char *busid, struct device *parent, unsigned int id,
- unsigned int mask)
-{
- struct find_data data;
-
- data.dev = NULL;
- data.parent = parent;
- data.busid = busid;
- data.id = id;
- data.mask = mask;
-
- bus_for_each_dev(&amba_bustype, NULL, &data, amba_find_match);
-
- return data.dev;
-}
+EXPORT_SYMBOL(amba_device_unregister);
/**
* amba_request_regions - request all mem regions associated with device
@@ -749,6 +692,7 @@ int amba_request_regions(struct amba_device *dev, const char *name)
return ret;
}
+EXPORT_SYMBOL(amba_request_regions);
/**
* amba_release_regions - release mem regions associated with device
@@ -763,11 +707,4 @@ void amba_release_regions(struct amba_device *dev)
size = resource_size(&dev->res);
release_mem_region(dev->res.start, size);
}
-
-EXPORT_SYMBOL(amba_driver_register);
-EXPORT_SYMBOL(amba_driver_unregister);
-EXPORT_SYMBOL(amba_device_register);
-EXPORT_SYMBOL(amba_device_unregister);
-EXPORT_SYMBOL(amba_find_device);
-EXPORT_SYMBOL(amba_request_regions);
EXPORT_SYMBOL(amba_release_regions);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index cb54631fd950..e5641e6c52ee 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -115,14 +115,14 @@ config SATA_AHCI
If unsure, say N.
-config SATA_MOBILE_LPM_POLICY
- int "Default SATA Link Power Management policy for mobile chipsets"
+config SATA_LPM_POLICY
+ int "Default SATA Link Power Management policy for low power chipsets"
range 0 4
default 0
depends on SATA_AHCI
help
Select the Default SATA Link Power Management (LPM) policy to use
- for mobile / laptop variants of chipsets / "South Bridges".
+ for chipsets / "South Bridges" designated as supporting low power.
The value set has the following meanings:
0 => Keep firmware settings
@@ -283,7 +283,7 @@ config SATA_FSL
config SATA_GEMINI
tristate "Gemini SATA bridge support"
- depends on ARCH_GEMINI || COMPILE_TEST
+ depends on ARCH_GEMINI || (OF && COMPILE_TEST)
select SATA_HOST
default ARCH_GEMINI
help
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 536d4cb8f08b..7654a40c12b4 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -265,7 +265,7 @@ static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
!(qc->flags & ATA_QCFLAG_FAILED)) {
ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
- qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15];
+ qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15];
} else
ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ab5811ef5a53..84456c05e845 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -50,7 +50,7 @@ enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
board_ahci_ign_iferr,
- board_ahci_mobile,
+ board_ahci_low_power,
board_ahci_no_debounce_delay,
board_ahci_nomsi,
board_ahci_noncq,
@@ -135,8 +135,8 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_mobile] = {
- AHCI_HFLAGS (AHCI_HFLAG_IS_MOBILE),
+ [board_ahci_low_power] = {
+ AHCI_HFLAGS (AHCI_HFLAG_USE_LPM_POLICY),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
@@ -275,13 +275,13 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2929), board_ahci_mobile }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292a), board_ahci_mobile }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292b), board_ahci_mobile }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292c), board_ahci_mobile }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292f), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x2929), board_ahci_low_power }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292a), board_ahci_low_power }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292b), board_ahci_low_power }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292c), board_ahci_low_power }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292f), board_ahci_low_power }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x294e), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x294e), board_ahci_low_power }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
@@ -291,9 +291,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b29), board_ahci_mobile }, /* PCH M AHCI */
+ { PCI_VDEVICE(INTEL, 0x3b29), board_ahci_low_power }, /* PCH M AHCI */
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_low_power }, /* PCH M RAID */
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */
@@ -316,9 +316,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
- { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_low_power }, /* CPT M AHCI */
{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1c05), board_ahci_mobile }, /* CPT M RAID */
+ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci_low_power }, /* CPT M RAID */
{ PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
@@ -327,29 +327,29 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG/Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
{ PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
- { PCI_VDEVICE(INTEL, 0x1e03), board_ahci_mobile }, /* Panther M AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e03), board_ahci_low_power }, /* Panther M AHCI */
{ PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x1e07), board_ahci_mobile }, /* Panther M RAID */
+ { PCI_VDEVICE(INTEL, 0x1e07), board_ahci_low_power }, /* Panther M RAID */
{ PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
- { PCI_VDEVICE(INTEL, 0x8c03), board_ahci_mobile }, /* Lynx M AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c03), board_ahci_low_power }, /* Lynx M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c05), board_ahci_mobile }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c05), board_ahci_low_power }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c07), board_ahci_mobile }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c07), board_ahci_low_power }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_mobile }, /* Lynx M RAID */
- { PCI_VDEVICE(INTEL, 0x9c02), board_ahci_mobile }, /* Lynx LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c03), board_ahci_mobile }, /* Lynx LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c04), board_ahci_mobile }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c05), board_ahci_mobile }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c06), board_ahci_mobile }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_mobile }, /* Cannon Lake PCH-LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_low_power }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci_low_power }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci_low_power }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci_low_power }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c05), board_ahci_low_power }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c06), board_ahci_low_power }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_low_power }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_low_power }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_low_power }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_low_power }, /* Cannon Lake PCH-LP AHCI */
{ PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
@@ -381,26 +381,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
- { PCI_VDEVICE(INTEL, 0x9c83), board_ahci_mobile }, /* Wildcat LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c85), board_ahci_mobile }, /* Wildcat LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c87), board_ahci_mobile }, /* Wildcat LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_mobile }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c83), board_ahci_low_power }, /* Wildcat LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c85), board_ahci_low_power }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci_low_power }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_low_power }, /* Wildcat LP RAID */
{ PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
- { PCI_VDEVICE(INTEL, 0x8c83), board_ahci_mobile }, /* 9 Series M AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c83), board_ahci_low_power }, /* 9 Series M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c85), board_ahci_mobile }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c85), board_ahci_low_power }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c87), board_ahci_mobile }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci_low_power }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_mobile }, /* 9 Series M RAID */
- { PCI_VDEVICE(INTEL, 0x9d03), board_ahci_mobile }, /* Sunrise LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9d05), board_ahci_mobile }, /* Sunrise LP RAID */
- { PCI_VDEVICE(INTEL, 0x9d07), board_ahci_mobile }, /* Sunrise LP RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_low_power }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x9d03), board_ahci_low_power }, /* Sunrise LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9d05), board_ahci_low_power }, /* Sunrise LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci_low_power }, /* Sunrise LP RAID */
{ PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
- { PCI_VDEVICE(INTEL, 0xa103), board_ahci_mobile }, /* Sunrise M AHCI */
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci_low_power }, /* Sunrise M AHCI */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0xa107), board_ahci_mobile }, /* Sunrise M RAID */
+ { PCI_VDEVICE(INTEL, 0xa107), board_ahci_low_power }, /* Sunrise M RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
@@ -413,13 +413,13 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
{ PCI_VDEVICE(INTEL, 0x06d7), board_ahci }, /* Comet Lake-H RAID */
{ PCI_VDEVICE(INTEL, 0xa386), board_ahci }, /* Comet Lake PCH-V RAID */
- { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_mobile }, /* Bay Trail AHCI */
- { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
- { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
- { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
- { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */
- { PCI_VDEVICE(INTEL, 0x02d3), board_ahci_mobile }, /* Comet Lake PCH-U AHCI */
- { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_mobile }, /* Comet Lake PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_low_power }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_low_power }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_low_power }, /* Cherry Tr. AHCI */
+ { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_low_power }, /* ApolloLake AHCI */
+ { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
+ { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -447,7 +447,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
{ PCI_VDEVICE(AMD, 0x7801), board_ahci_no_debounce_delay }, /* AMD Hudson-2 (AHCI mode) */
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
- { PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */
+ { PCI_VDEVICE(AMD, 0x7901), board_ahci_low_power }, /* AMD Green Sardine */
/* AMD is using RAID class only for ahci controllers */
{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
@@ -582,6 +582,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
.driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9235),
+ .driver_data = board_ahci_no_debounce_delay },
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */
@@ -737,7 +739,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
- tf.command = ATA_BUSY;
+ tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
@@ -806,7 +808,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
- tf.command = ATA_BUSY;
+ tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_link_hardreset(link, timing, deadline, &online,
@@ -889,7 +891,8 @@ static int ahci_pci_device_suspend(struct device *dev)
}
ahci_pci_disable_interrupts(host);
- return ata_host_suspend(host, PMSG_SUSPEND);
+ ata_host_suspend(host, PMSG_SUSPEND);
+ return 0;
}
static int ahci_pci_device_resume(struct device *dev)
@@ -1592,11 +1595,11 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
static void ahci_update_initial_lpm_policy(struct ata_port *ap,
struct ahci_host_priv *hpriv)
{
- int policy = CONFIG_SATA_MOBILE_LPM_POLICY;
+ int policy = CONFIG_SATA_LPM_POLICY;
- /* Ignore processing for non mobile platforms */
- if (!(hpriv->flags & AHCI_HFLAG_IS_MOBILE))
+ /* Ignore processing for chipsets that don't use policy */
+ if (!(hpriv->flags & AHCI_HFLAG_USE_LPM_POLICY))
return;
/* user modified policy via module param */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index eeac5482f1d1..5badbaca05a0 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -235,8 +235,8 @@ enum {
AHCI_HFLAG_YES_ALPM = (1 << 23), /* force ALPM cap on */
AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read
only registers */
- AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use
- SATA_MOBILE_LPM_POLICY
+ AHCI_HFLAG_USE_LPM_POLICY = (1 << 25), /* chipset that should use
+ SATA_LPM_POLICY
as default lpm_policy */
AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during
suspend/resume */
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 64dd8aa397d5..ab8552b1ff2a 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -427,7 +427,7 @@ static const struct of_device_id ahci_of_match[] = {
{.compatible = "brcm,bcm63138-ahci", .data = (void *)BRCM_SATA_BCM7445},
{.compatible = "brcm,bcm-nsp-ahci", .data = (void *)BRCM_SATA_NSP},
{.compatible = "brcm,bcm7216-ahci", .data = (void *)BRCM_SATA_BCM7216},
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index acf59f51b356..cb24ecf36faf 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -363,7 +363,7 @@ static SIMPLE_DEV_PM_OPS(ahci_ceva_pm_ops, ceva_ahci_suspend, ceva_ahci_resume);
static const struct of_device_id ceva_ahci_of_match[] = {
{ .compatible = "ceva,ahci-1v84" },
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ceva_ahci_of_match);
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index 0e8276600712..052c28e250aa 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -241,7 +241,7 @@ static SIMPLE_DEV_PM_OPS(ahci_da850_pm_ops, ahci_platform_suspend,
static const struct of_device_id ahci_da850_of_match[] = {
{ .compatible = "ti,da850-ahci", },
- { },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_da850_of_match);
diff --git a/drivers/ata/ahci_dm816.c b/drivers/ata/ahci_dm816.c
index 8bec41041671..8a92112dcd59 100644
--- a/drivers/ata/ahci_dm816.c
+++ b/drivers/ata/ahci_dm816.c
@@ -176,7 +176,7 @@ static SIMPLE_DEV_PM_OPS(ahci_dm816_pm_ops,
static const struct of_device_id ahci_dm816_of_match[] = {
{ .compatible = "ti,dm816-ahci", },
- { },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_dm816_of_match);
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 388baf528fa8..79aa9f285312 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -811,7 +811,7 @@ static const struct of_device_id imx_ahci_of_match[] = {
{ .compatible = "fsl,imx6q-ahci", .data = (void *)AHCI_IMX6Q },
{ .compatible = "fsl,imx6qp-ahci", .data = (void *)AHCI_IMX6QP },
{ .compatible = "fsl,imx8qm-ahci", .data = (void *)AHCI_IMX8QM },
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
index d9b08ae7c3b2..1f6c85fde983 100644
--- a/drivers/ata/ahci_mtk.c
+++ b/drivers/ata/ahci_mtk.c
@@ -169,7 +169,7 @@ static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "mediatek,mtk-ahci", },
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 3ad46d26d9d5..991413a272e6 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -239,7 +239,7 @@ static const struct of_device_id ahci_mvebu_of_match[] = {
.compatible = "marvell,armada-3700-ahci",
.data = &ahci_mvebu_armada_3700_plat_data,
},
- { },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
diff --git a/drivers/ata/ahci_octeon.c b/drivers/ata/ahci_octeon.c
index 5a44e089c6bb..b9460b91288f 100644
--- a/drivers/ata/ahci_octeon.c
+++ b/drivers/ata/ahci_octeon.c
@@ -80,7 +80,7 @@ static int ahci_octeon_remove(struct platform_device *pdev)
static const struct of_device_id octeon_ahci_match[] = {
{ .compatible = "cavium,octeon-7130-sata-uctl", },
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, octeon_ahci_match);
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 3aab2e3d57f3..28a8de5b48b9 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -88,7 +88,7 @@ static const struct of_device_id ahci_of_match[] = {
{ .compatible = "snps,dwc-ahci", },
{ .compatible = "hisilicon,hisi-ahci", },
{ .compatible = "cavium,octeon-7130-ahci", },
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index bf5b388bd4e0..6cd61842ad48 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -77,7 +77,7 @@ static const struct of_device_id ahci_qoriq_of_match[] = {
{ .compatible = "fsl,ls1088a-ahci", .data = (void *)AHCI_LS1088A},
{ .compatible = "fsl,ls2088a-ahci", .data = (void *)AHCI_LS2088A},
{ .compatible = "fsl,lx2160a-ahci", .data = (void *)AHCI_LX2160A},
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_qoriq_of_match);
@@ -123,7 +123,7 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
- tf.command = ATA_BUSY;
+ tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_link_hardreset(link, timing, deadline, &online,
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index c268264c2129..7526653c843b 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -232,7 +232,7 @@ static SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
static const struct of_device_id st_ahci_match[] = {
{ .compatible = "st,ahci", },
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, st_ahci_match);
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index 56b695136977..c7273c1cb0c7 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -286,7 +286,7 @@ static SIMPLE_DEV_PM_OPS(ahci_sunxi_pm_ops, ahci_platform_suspend,
static const struct of_device_id ahci_sunxi_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-ahci", },
{ .compatible = "allwinner,sun8i-r40-ahci", },
- { },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_sunxi_of_match);
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 8e206379d699..7bb5db17f864 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -365,7 +365,7 @@ static int xgene_ahci_do_hardreset(struct ata_link *link,
do {
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
- tf.command = ATA_BUSY;
+ tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_link_hardreset(link, timing, deadline, online,
ahci_check_ready);
@@ -726,7 +726,7 @@ MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match);
static const struct of_device_id xgene_ahci_of_match[] = {
{.compatible = "apm,xgene-ahci", .data = (void *) XGENE_AHCI_V1},
{.compatible = "apm,xgene-ahci-v2", .data = (void *) XGENE_AHCI_V2},
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, xgene_ahci_of_match);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 27b0d903f91f..ade5e894563b 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -993,11 +993,8 @@ static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = pci_get_drvdata(pdev);
unsigned long flags;
- int rc = 0;
- rc = ata_host_suspend(host, mesg);
- if (rc)
- return rc;
+ ata_host_suspend(host, mesg);
/* Some braindamaged ACPI suspend implementations expect the
* controller to be awake on entry; otherwise, it burns cpu
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 0ed484e04fd6..cf8c7fd59ada 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1561,7 +1561,7 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
- tf.command = ATA_BUSY;
+ tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_link_hardreset(link, timing, deadline, online,
@@ -2033,7 +2033,7 @@ static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
!(qc->flags & ATA_QCFLAG_FAILED)) {
ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
- qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15];
+ qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15];
} else
ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 18296443ccba..65227ef6b846 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -733,7 +733,8 @@ int ahci_platform_suspend_host(struct device *dev)
if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS)
ahci_platform_disable_phys(hpriv);
- return ata_host_suspend(host, PMSG_SUSPEND);
+ ata_host_suspend(host, PMSG_SUSPEND);
+ return 0;
}
EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 8cfa8c96bb13..3d345d173556 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -546,13 +546,13 @@ static void ata_acpi_gtf_to_tf(struct ata_device *dev,
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf->protocol = ATA_PROT_NODATA;
- tf->feature = gtf->tf[0]; /* 0x1f1 */
+ tf->error = gtf->tf[0]; /* 0x1f1 */
tf->nsect = gtf->tf[1]; /* 0x1f2 */
tf->lbal = gtf->tf[2]; /* 0x1f3 */
tf->lbam = gtf->tf[3]; /* 0x1f4 */
tf->lbah = gtf->tf[4]; /* 0x1f5 */
tf->device = gtf->tf[5]; /* 0x1f6 */
- tf->command = gtf->tf[6]; /* 0x1f7 */
+ tf->status = gtf->tf[6]; /* 0x1f7 */
}
static int ata_acpi_filter_tf(struct ata_device *dev,
@@ -679,7 +679,7 @@ static int ata_acpi_run_tf(struct ata_device *dev,
"(%s) rejected by device (Stat=0x%02x Err=0x%02x)",
tf.command, tf.feature, tf.nsect, tf.lbal,
tf.lbam, tf.lbah, tf.device, descr,
- rtf.command, rtf.feature);
+ rtf.status, rtf.error);
rc = 0;
break;
@@ -689,7 +689,7 @@ static int ata_acpi_run_tf(struct ata_device *dev,
"(%s) failed (Emask=0x%x Stat=0x%02x Err=0x%02x)",
tf.command, tf.feature, tf.nsect, tf.lbal,
tf.lbam, tf.lbah, tf.device, descr,
- err_mask, rtf.command, rtf.feature);
+ err_mask, rtf.status, rtf.error);
rc = -EIO;
break;
}
@@ -800,27 +800,6 @@ static int ata_acpi_push_id(struct ata_device *dev)
}
/**
- * ata_acpi_on_suspend - ATA ACPI hook called on suspend
- * @ap: target ATA port
- *
- * This function is called when @ap is about to be suspended. All
- * devices are already put to sleep but the port_suspend() callback
- * hasn't been executed yet. Error return from this function aborts
- * suspend.
- *
- * LOCKING:
- * EH context.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int ata_acpi_on_suspend(struct ata_port *ap)
-{
- /* nada */
- return 0;
-}
-
-/**
* ata_acpi_on_resume - ATA ACPI hook called on resume
* @ap: target ATA port
*
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 0c854aebfe0b..cceedde51126 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1171,7 +1171,7 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
ata_dev_warn(dev,
"failed to read native max address (err_mask=0x%x)\n",
err_mask);
- if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
+ if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
return -EACCES;
return -EIO;
}
@@ -1235,7 +1235,7 @@ static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
"failed to set max address (err_mask=0x%x)\n",
err_mask);
if (err_mask == AC_ERR_DEV &&
- (tf.feature & (ATA_ABORTED | ATA_IDNF)))
+ (tf.error & (ATA_ABORTED | ATA_IDNF)))
return -EACCES;
return -EIO;
}
@@ -1584,7 +1584,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
/* perform minimal error analysis */
if (qc->flags & ATA_QCFLAG_FAILED) {
- if (qc->result_tf.command & (ATA_ERR | ATA_DF))
+ if (qc->result_tf.status & (ATA_ERR | ATA_DF))
qc->err_mask |= AC_ERR_DEV;
if (!qc->err_mask)
@@ -1593,7 +1593,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
if (qc->err_mask & ~AC_ERR_OTHER)
qc->err_mask &= ~AC_ERR_OTHER;
} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
- qc->result_tf.command |= ATA_SENSE;
+ qc->result_tf.status |= ATA_SENSE;
}
/* finish up */
@@ -1813,7 +1813,7 @@ retry:
return 0;
}
- if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
+ if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) {
/* Device or controller might have reported
* the wrong device class. Give a shot at the
* other IDENTIFY if the current one is
@@ -3569,7 +3569,7 @@ EXPORT_SYMBOL_GPL(ata_wait_after_reset);
* Kernel thread context (may sleep)
*
* RETURNS:
- * 0 on success, -errno otherwise.
+ * Always 0.
*/
int ata_std_prereset(struct ata_link *link, unsigned long deadline)
{
@@ -4384,7 +4384,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
/* A clean abort indicates an original or just out of spec drive
and we should continue as we issue the setup based on the
drive reported working geometry */
- if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
+ if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
err_mask = 0;
return err_mask;
@@ -5179,10 +5179,9 @@ EXPORT_SYMBOL_GPL(ata_sas_port_resume);
*
* Suspend @host. Actual operation is performed by port suspend.
*/
-int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
+void ata_host_suspend(struct ata_host *host, pm_message_t mesg)
{
host->dev->power.power_state = mesg;
- return 0;
}
EXPORT_SYMBOL_GPL(ata_host_suspend);
@@ -6099,11 +6098,8 @@ EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = pci_get_drvdata(pdev);
- int rc = 0;
- rc = ata_host_suspend(host, mesg);
- if (rc)
- return rc;
+ ata_host_suspend(host, mesg);
ata_pci_device_do_suspend(pdev, mesg);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 7951fd946bf9..3307ed45fe4d 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1386,7 +1386,7 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
if (err_mask == AC_ERR_DEV)
- *r_sense_key = tf.feature >> 4;
+ *r_sense_key = tf.error >> 4;
return err_mask;
}
@@ -1429,12 +1429,12 @@ static void ata_eh_request_sense(struct ata_queued_cmd *qc,
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
/* Ignore err_mask; ATA_ERR might be set */
- if (tf.command & ATA_SENSE) {
+ if (tf.status & ATA_SENSE) {
ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
} else {
ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
- tf.command, err_mask);
+ tf.status, err_mask);
}
}
@@ -1557,7 +1557,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
const struct ata_taskfile *tf)
{
unsigned int tmp, action = 0;
- u8 stat = tf->command, err = tf->feature;
+ u8 stat = tf->status, err = tf->error;
if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
qc->err_mask |= AC_ERR_HSM;
@@ -1594,7 +1594,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
tmp = atapi_eh_request_sense(qc->dev,
qc->scsicmd->sense_buffer,
- qc->result_tf.feature >> 4);
+ qc->result_tf.error >> 4);
if (!tmp)
qc->flags |= ATA_QCFLAG_SENSE_VALID;
else
@@ -2360,7 +2360,7 @@ static void ata_eh_link_report(struct ata_link *link)
cmd->hob_feature, cmd->hob_nsect,
cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
cmd->device, qc->tag, data_buf, cdb_buf,
- res->command, res->feature, res->nsect,
+ res->status, res->error, res->nsect,
res->lbal, res->lbam, res->lbah,
res->hob_feature, res->hob_nsect,
res->hob_lbal, res->hob_lbam, res->hob_lbah,
@@ -2368,28 +2368,28 @@ static void ata_eh_link_report(struct ata_link *link)
qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
#ifdef CONFIG_ATA_VERBOSE_ERROR
- if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
- ATA_SENSE | ATA_ERR)) {
- if (res->command & ATA_BUSY)
+ if (res->status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
+ ATA_SENSE | ATA_ERR)) {
+ if (res->status & ATA_BUSY)
ata_dev_err(qc->dev, "status: { Busy }\n");
else
ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
- res->command & ATA_DRDY ? "DRDY " : "",
- res->command & ATA_DF ? "DF " : "",
- res->command & ATA_DRQ ? "DRQ " : "",
- res->command & ATA_SENSE ? "SENSE " : "",
- res->command & ATA_ERR ? "ERR " : "");
+ res->status & ATA_DRDY ? "DRDY " : "",
+ res->status & ATA_DF ? "DF " : "",
+ res->status & ATA_DRQ ? "DRQ " : "",
+ res->status & ATA_SENSE ? "SENSE " : "",
+ res->status & ATA_ERR ? "ERR " : "");
}
if (cmd->command != ATA_CMD_PACKET &&
- (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
- ATA_IDNF | ATA_ABORTED)))
+ (res->error & (ATA_ICRC | ATA_UNC | ATA_AMNF | ATA_IDNF |
+ ATA_ABORTED)))
ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
- res->feature & ATA_ICRC ? "ICRC " : "",
- res->feature & ATA_UNC ? "UNC " : "",
- res->feature & ATA_AMNF ? "AMNF " : "",
- res->feature & ATA_IDNF ? "IDNF " : "",
- res->feature & ATA_ABORTED ? "ABRT " : "");
+ res->error & ATA_ICRC ? "ICRC " : "",
+ res->error & ATA_UNC ? "UNC " : "",
+ res->error & ATA_AMNF ? "AMNF " : "",
+ res->error & ATA_IDNF ? "IDNF " : "",
+ res->error & ATA_ABORTED ? "ABRT " : "");
#endif
}
}
@@ -3902,11 +3902,6 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
}
}
- /* tell ACPI we're suspending */
- rc = ata_acpi_on_suspend(ap);
- if (rc)
- goto out;
-
/* suspend */
ata_eh_freeze_port(ap);
@@ -3914,7 +3909,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
rc = ap->ops->port_suspend(ap, ap->pm_mesg);
ata_acpi_set_state(ap, ap->pm_mesg);
- out:
+
/* update the flags */
spin_lock_irqsave(ap->lock, flags);
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 071158c0c44c..044a16daa2d4 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -191,8 +191,8 @@ EXPORT_SYMBOL_GPL(ata_tf_to_fis);
void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
{
- tf->command = fis[2]; /* status */
- tf->feature = fis[3]; /* error */
+ tf->status = fis[2];
+ tf->error = fis[3];
tf->lbal = fis[4];
tf->lbam = fis[5];
@@ -1406,8 +1406,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
*tag = buf[0] & 0x1f;
- tf->command = buf[2];
- tf->feature = buf[3];
+ tf->status = buf[2];
+ tf->error = buf[3];
tf->lbal = buf[4];
tf->lbam = buf[5];
tf->lbah = buf[6];
@@ -1482,7 +1482,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
if (dev->class == ATA_DEV_ZAC &&
- ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) {
+ ((qc->result_tf.status & ATA_SENSE) || qc->result_tf.auxiliary)) {
char sense_key, asc, ascq;
sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index ed8be585a98f..9df1a20b77dd 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -680,7 +680,7 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
*/
static void ata_dump_status(struct ata_port *ap, struct ata_taskfile *tf)
{
- u8 stat = tf->command, err = tf->feature;
+ u8 stat = tf->status, err = tf->error;
if (stat & ATA_BUSY) {
ata_port_warn(ap, "status=0x%02x {Busy} ", stat);
@@ -871,8 +871,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
* onto sense key, asc & ascq.
*/
if (qc->err_mask ||
- tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
- ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
+ tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
+ ata_to_sense_error(qc->ap->print_id, tf->status, tf->error,
&sense_key, &asc, &ascq, verbose);
ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
} else {
@@ -901,13 +901,13 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
* Copy registers into sense buffer.
*/
desc[2] = 0x00;
- desc[3] = tf->feature; /* == error reg */
+ desc[3] = tf->error;
desc[5] = tf->nsect;
desc[7] = tf->lbal;
desc[9] = tf->lbam;
desc[11] = tf->lbah;
desc[12] = tf->device;
- desc[13] = tf->command; /* == status reg */
+ desc[13] = tf->status;
/*
* Fill in Extend bit, and the high order bytes
@@ -922,8 +922,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
}
} else {
/* Fixed sense format */
- desc[0] = tf->feature;
- desc[1] = tf->command; /* status */
+ desc[0] = tf->error;
+ desc[1] = tf->status;
desc[2] = tf->device;
desc[3] = tf->nsect;
desc[7] = 0;
@@ -972,14 +972,14 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
* onto sense key, asc & ascq.
*/
if (qc->err_mask ||
- tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
- ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
+ tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
+ ata_to_sense_error(qc->ap->print_id, tf->status, tf->error,
&sense_key, &asc, &ascq, verbose);
ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
} else {
/* Could not decode error */
ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n",
- tf->command, qc->err_mask);
+ tf->status, qc->err_mask);
ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
return;
}
@@ -1314,21 +1314,10 @@ static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
* @plba: the LBA
* @plen: the transfer length
*/
-static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
+static inline void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
{
- u64 lba = 0;
- u32 len = 0;
-
- lba |= ((u64)cdb[2]) << 24;
- lba |= ((u64)cdb[3]) << 16;
- lba |= ((u64)cdb[4]) << 8;
- lba |= ((u64)cdb[5]);
-
- len |= ((u32)cdb[7]) << 8;
- len |= ((u32)cdb[8]);
-
- *plba = lba;
- *plen = len;
+ *plba = get_unaligned_be32(&cdb[2]);
+ *plen = get_unaligned_be16(&cdb[7]);
}
/**
@@ -1341,27 +1330,10 @@ static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
* @plba: the LBA
* @plen: the transfer length
*/
-static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
+static inline void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
{
- u64 lba = 0;
- u32 len = 0;
-
- lba |= ((u64)cdb[2]) << 56;
- lba |= ((u64)cdb[3]) << 48;
- lba |= ((u64)cdb[4]) << 40;
- lba |= ((u64)cdb[5]) << 32;
- lba |= ((u64)cdb[6]) << 24;
- lba |= ((u64)cdb[7]) << 16;
- lba |= ((u64)cdb[8]) << 8;
- lba |= ((u64)cdb[9]);
-
- len |= ((u32)cdb[10]) << 24;
- len |= ((u32)cdb[11]) << 16;
- len |= ((u32)cdb[12]) << 8;
- len |= ((u32)cdb[13]);
-
- *plba = lba;
- *plen = len;
+ *plba = get_unaligned_be64(&cdb[2]);
+ *plen = get_unaligned_be32(&cdb[10]);
}
/**
@@ -1390,19 +1362,22 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf->protocol = ATA_PROT_NODATA;
- if (cdb[0] == VERIFY) {
+ switch (cdb[0]) {
+ case VERIFY:
if (scmd->cmd_len < 10) {
fp = 9;
goto invalid_fld;
}
scsi_10_lba_len(cdb, &block, &n_block);
- } else if (cdb[0] == VERIFY_16) {
+ break;
+ case VERIFY_16:
if (scmd->cmd_len < 16) {
fp = 15;
goto invalid_fld;
}
scsi_16_lba_len(cdb, &block, &n_block);
- } else {
+ break;
+ default:
fp = 0;
goto invalid_fld;
}
@@ -1534,8 +1509,13 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
int rc;
u16 fp = 0;
- if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16)
+ switch (cdb[0]) {
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_16:
tf_flags |= ATA_TFLAG_WRITE;
+ break;
+ }
/* Calculate the SCSI LBA, transfer length and FUA. */
switch (cdb[0]) {
@@ -2493,7 +2473,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
/* fill these in, for the case where they are -not- overwritten */
cmd->sense_buffer[0] = 0x70;
- cmd->sense_buffer[2] = qc->tf.feature >> 4;
+ cmd->sense_buffer[2] = qc->tf.error >> 4;
ata_qc_reinit(qc);
@@ -2845,7 +2825,8 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
* 12 and 16 byte CDBs use different offsets to
* provide the various register values.
*/
- if (cdb[0] == ATA_16) {
+ switch (cdb[0]) {
+ case ATA_16:
/*
* 16-byte CDB - may contain extended commands.
*
@@ -2871,7 +2852,8 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
tf->lbah = cdb[12];
tf->device = cdb[13];
tf->command = cdb[14];
- } else if (cdb[0] == ATA_12) {
+ break;
+ case ATA_12:
/*
* 12-byte CDB - incapable of extended commands.
*/
@@ -2884,7 +2866,8 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
tf->lbah = cdb[7];
tf->device = cdb[8];
tf->command = cdb[9];
- } else {
+ break;
+ default:
/*
* 32-byte CDB - may contain extended command fields.
*
@@ -2908,6 +2891,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
tf->device = cdb[24];
tf->command = cdb[25];
tf->auxiliary = get_unaligned_be32(&cdb[28]);
+ break;
}
/* For NCQ commands copy the tag value */
@@ -3672,7 +3656,7 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
goto invalid_fld;
}
- len = (cdb[7] << 8) + cdb[8];
+ len = get_unaligned_be16(&cdb[7]);
hdr_len = 8;
}
@@ -3698,7 +3682,7 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
if (six_byte)
bd_len = p[3];
else
- bd_len = (p[6] << 8) + p[7];
+ bd_len = get_unaligned_be16(&p[6]);
len -= hdr_len;
p += hdr_len;
@@ -3722,7 +3706,7 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
goto invalid_param_len;
spg = p[1];
- pg_len = (p[2] << 8) | p[3];
+ pg_len = get_unaligned_be16(&p[2]);
p += 4;
len -= 4;
} else {
@@ -3933,7 +3917,6 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
case MODE_SELECT:
case MODE_SELECT_10:
return ata_scsi_mode_select_xlat;
- break;
case ZBC_IN:
return ata_scsi_zbc_in_xlat;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 75217828dfe3..b3be7a8f5bea 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -70,22 +70,35 @@ EXPORT_SYMBOL_GPL(ata_sff_check_status);
/**
* ata_sff_altstatus - Read device alternate status reg
* @ap: port where the device is
+ * @status: pointer to a status value
*
- * Reads ATA taskfile alternate status register for
- * currently-selected device and return its value.
+ * Reads ATA alternate status register for currently-selected device
+ * and return its value.
*
- * Note: may NOT be used as the check_altstatus() entry in
- * ata_port_operations.
+ * RETURN:
+ * true if the register exists, false if not.
*
* LOCKING:
* Inherited from caller.
*/
-static u8 ata_sff_altstatus(struct ata_port *ap)
+static bool ata_sff_altstatus(struct ata_port *ap, u8 *status)
{
- if (ap->ops->sff_check_altstatus)
- return ap->ops->sff_check_altstatus(ap);
+ u8 tmp;
+
+ if (ap->ops->sff_check_altstatus) {
+ tmp = ap->ops->sff_check_altstatus(ap);
+ goto read;
+ }
+ if (ap->ioaddr.altstatus_addr) {
+ tmp = ioread8(ap->ioaddr.altstatus_addr);
+ goto read;
+ }
+ return false;
- return ioread8(ap->ioaddr.altstatus_addr);
+read:
+ if (status)
+ *status = tmp;
+ return true;
}
/**
@@ -104,12 +117,9 @@ static u8 ata_sff_irq_status(struct ata_port *ap)
{
u8 status;
- if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
- status = ata_sff_altstatus(ap);
- /* Not us: We are busy */
- if (status & ATA_BUSY)
- return status;
- }
+ /* Not us: We are busy */
+ if (ata_sff_altstatus(ap, &status) && (status & ATA_BUSY))
+ return status;
/* Clear INTRQ latch */
status = ap->ops->sff_check_status(ap);
return status;
@@ -129,10 +139,7 @@ static u8 ata_sff_irq_status(struct ata_port *ap)
static void ata_sff_sync(struct ata_port *ap)
{
- if (ap->ops->sff_check_altstatus)
- ap->ops->sff_check_altstatus(ap);
- else if (ap->ioaddr.altstatus_addr)
- ioread8(ap->ioaddr.altstatus_addr);
+ ata_sff_altstatus(ap, NULL);
}
/**
@@ -164,12 +171,12 @@ EXPORT_SYMBOL_GPL(ata_sff_pause);
void ata_sff_dma_pause(struct ata_port *ap)
{
- if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
- /* An altstatus read will cause the needed delay without
- messing up the IRQ status */
- ata_sff_altstatus(ap);
+ /*
+ * An altstatus read will cause the needed delay without
+ * messing up the IRQ status
+ */
+ if (ata_sff_altstatus(ap, NULL))
return;
- }
/* There are no DMA controllers without ctl. BUG here to ensure
we never violate the HDMA1:0 transition timing and risk
corruption. */
@@ -265,20 +272,26 @@ EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
* @ap: port where the device is
* @ctl: value to write
*
- * Writes ATA taskfile device control register.
+ * Writes ATA device control register.
*
- * Note: may NOT be used as the sff_set_devctl() entry in
- * ata_port_operations.
+ * RETURN:
+ * true if the register exists, false if not.
*
* LOCKING:
* Inherited from caller.
*/
-static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
+static bool ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
{
- if (ap->ops->sff_set_devctl)
+ if (ap->ops->sff_set_devctl) {
ap->ops->sff_set_devctl(ap, ctl);
- else
+ return true;
+ }
+ if (ap->ioaddr.ctl_addr) {
iowrite8(ctl, ap->ioaddr.ctl_addr);
+ return true;
+ }
+
+ return false;
}
/**
@@ -357,8 +370,6 @@ static void ata_dev_select(struct ata_port *ap, unsigned int device,
*/
void ata_sff_irq_on(struct ata_port *ap)
{
- struct ata_ioports *ioaddr = &ap->ioaddr;
-
if (ap->ops->sff_irq_on) {
ap->ops->sff_irq_on(ap);
return;
@@ -367,8 +378,7 @@ void ata_sff_irq_on(struct ata_port *ap)
ap->ctl &= ~ATA_NIEN;
ap->last_ctl = ap->ctl;
- if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
- ata_sff_set_devctl(ap, ap->ctl);
+ ata_sff_set_devctl(ap, ap->ctl);
ata_wait_idle(ap);
if (ap->ops->sff_irq_clear)
@@ -439,8 +449,8 @@ void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- tf->command = ata_sff_check_status(ap);
- tf->feature = ioread8(ioaddr->error_addr);
+ tf->status = ata_sff_check_status(ap);
+ tf->error = ioread8(ioaddr->error_addr);
tf->nsect = ioread8(ioaddr->nsect_addr);
tf->lbal = ioread8(ioaddr->lbal_addr);
tf->lbam = ioread8(ioaddr->lbam_addr);
@@ -1634,14 +1644,14 @@ void ata_sff_lost_interrupt(struct ata_port *ap)
return;
/* See if the controller thinks it is still busy - if so the command
isn't a lost IRQ but is still in progress */
- status = ata_sff_altstatus(ap);
+ if (WARN_ON_ONCE(!ata_sff_altstatus(ap, &status)))
+ return;
if (status & ATA_BUSY)
return;
/* There was a command running, we are no longer busy and we have
no interrupt. */
- ata_port_warn(ap, "lost interrupt (Status 0x%x)\n",
- status);
+ ata_port_warn(ap, "lost interrupt (Status 0x%x)\n", status);
/* Run the host interrupt logic as if the interrupt had not been
lost */
ata_sff_port_intr(ap, qc);
@@ -1662,8 +1672,7 @@ void ata_sff_freeze(struct ata_port *ap)
ap->ctl |= ATA_NIEN;
ap->last_ctl = ap->ctl;
- if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)
- ata_sff_set_devctl(ap, ap->ctl);
+ ata_sff_set_devctl(ap, ap->ctl);
/* Under certain circumstances, some controllers raise IRQ on
* ATA_NIEN manipulation. Also, many controllers fail to mask
@@ -1708,16 +1717,15 @@ EXPORT_SYMBOL_GPL(ata_sff_thaw);
* Kernel thread context (may sleep)
*
* RETURNS:
- * 0 on success, -errno otherwise.
+ * Always 0.
*/
int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_eh_context *ehc = &link->eh_context;
int rc;
- rc = ata_std_prereset(link, deadline);
- if (rc)
- return rc;
+ /* The standard prereset is best-effort and always returns 0 */
+ ata_std_prereset(link, deadline);
/* if we're about to do hardreset, nothing more to do */
if (ehc->i.action & ATA_EH_HARDRESET)
@@ -1752,10 +1760,13 @@ EXPORT_SYMBOL_GPL(ata_sff_prereset);
* correctly storing and echoing back the
* ATA shadow register contents.
*
+ * RETURN:
+ * true if device is present, false if not.
+ *
* LOCKING:
* caller.
*/
-static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
+static bool ata_devchk(struct ata_port *ap, unsigned int device)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
u8 nsect, lbal;
@@ -1775,9 +1786,9 @@ static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
lbal = ioread8(ioaddr->lbal_addr);
if ((nsect == 0x55) && (lbal == 0xaa))
- return 1; /* we found a device */
+ return true; /* we found a device */
- return 0; /* nothing found */
+ return false; /* nothing found */
}
/**
@@ -1814,7 +1825,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
memset(&tf, 0, sizeof(tf));
ap->ops->sff_tf_read(ap, &tf);
- err = tf.feature;
+ err = tf.error;
if (r_err)
*r_err = err;
@@ -1831,9 +1842,10 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
/* determine if device is ATA or ATAPI */
class = ata_port_classify(ap, &tf);
-
- if (class == ATA_DEV_UNKNOWN) {
- /* If the device failed diagnostic, it's likely to
+ switch (class) {
+ case ATA_DEV_UNKNOWN:
+ /*
+ * If the device failed diagnostic, it's likely to
* have reported incorrect device signature too.
* Assume ATA device if the device seems present but
* device signature is invalid with diagnostic
@@ -1843,10 +1855,12 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
class = ATA_DEV_ATA;
else
class = ATA_DEV_NONE;
- } else if ((class == ATA_DEV_ATA) &&
- (ap->ops->sff_check_status(ap) == 0))
- class = ATA_DEV_NONE;
-
+ break;
+ case ATA_DEV_ATA:
+ if (ap->ops->sff_check_status(ap) == 0)
+ class = ATA_DEV_NONE;
+ break;
+ }
return class;
}
EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
@@ -2059,10 +2073,8 @@ void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
return;
/* set up device control */
- if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
- ata_sff_set_devctl(ap, ap->ctl);
+ if (ata_sff_set_devctl(ap, ap->ctl))
ap->last_ctl = ap->ctl;
- }
}
EXPORT_SYMBOL_GPL(ata_sff_postreset);
@@ -2172,18 +2184,18 @@ EXPORT_SYMBOL_GPL(ata_sff_std_ports);
#ifdef CONFIG_PCI
-static int ata_resources_present(struct pci_dev *pdev, int port)
+static bool ata_resources_present(struct pci_dev *pdev, int port)
{
int i;
/* Check the PCI resources for this channel are enabled */
- port = port * 2;
+ port *= 2;
for (i = 0; i < 2; i++) {
if (pci_resource_start(pdev, port + i) == 0 ||
pci_resource_len(pdev, port + i) == 0)
- return 0;
+ return false;
}
- return 1;
+ return true;
}
/**
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 51e01acdd241..c9c2496d91ea 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -107,7 +107,6 @@ static inline void ata_sas_free_tag(unsigned int tag, struct ata_port *ap) { }
#ifdef CONFIG_ATA_ACPI
extern unsigned int ata_acpi_gtf_filter;
extern void ata_acpi_dissociate(struct ata_host *host);
-extern int ata_acpi_on_suspend(struct ata_port *ap);
extern void ata_acpi_on_resume(struct ata_port *ap);
extern int ata_acpi_on_devcfg(struct ata_device *dev);
extern void ata_acpi_on_disable(struct ata_device *dev);
@@ -117,7 +116,6 @@ extern void ata_acpi_bind_dev(struct ata_device *dev);
extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
#else
static inline void ata_acpi_dissociate(struct ata_host *host) { }
-static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
static inline void ata_acpi_on_resume(struct ata_port *ap) { }
static inline int ata_acpi_on_devcfg(struct ata_device *dev) { return 0; }
static inline void ata_acpi_on_disable(struct ata_device *dev) { }
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 24c3d5e1fca3..e89617ed9175 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -937,7 +937,8 @@ static int arasan_cf_suspend(struct device *dev)
dmaengine_terminate_all(acdev->dma_chan);
cf_exit(acdev);
- return ata_host_suspend(host, PMSG_SUSPEND);
+ ata_host_suspend(host, PMSG_SUSPEND);
+ return 0;
}
static int arasan_cf_resume(struct device *dev)
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index ad3c5808aaad..20a8f31a3f57 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -28,7 +28,7 @@
#include <linux/ata.h>
#define DRV_NAME "pata_artop"
-#define DRV_VERSION "0.4.6"
+#define DRV_VERSION "0.4.8"
/*
* The ARTOP has 33 Mhz and "over clocked" timing tables. Until we
@@ -315,12 +315,15 @@ static struct ata_port_operations artop6260_ops = {
static void atp8xx_fixup(struct pci_dev *pdev)
{
- if (pdev->device == 0x0005)
+ u8 reg;
+
+ switch (pdev->device) {
+ case 0x0005:
/* BIOS may have left us in UDMA, clear it before libata probe */
pci_write_config_byte(pdev, 0x54, 0);
- else if (pdev->device == 0x0008 || pdev->device == 0x0009) {
- u8 reg;
-
+ break;
+ case 0x0008:
+ case 0x0009:
/* Mac systems come up with some registers not set as we
will need them */
@@ -338,6 +341,7 @@ static void atp8xx_fixup(struct pci_dev *pdev)
/* Enable IRQ output and burst mode */
pci_read_config_byte(pdev, 0x4a, &reg);
pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80);
+ break;
}
}
@@ -394,16 +398,19 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- if (id->driver_data == 0) /* 6210 variant */
+ switch (id->driver_data) {
+ case 0: /* 6210 variant */
ppi[0] = &info_6210;
- else if (id->driver_data == 1) /* 6260 */
+ break;
+ case 1: /* 6260 */
ppi[0] = &info_626x;
- else if (id->driver_data == 2) { /* 6280 or 6280 + fast */
- unsigned long io = pci_resource_start(pdev, 4);
-
- ppi[0] = &info_628x;
- if (inb(io) & 0x10)
+ break;
+ case 2: /* 6280 or 6280 + fast */
+ if (inb(pci_resource_start(pdev, 4)) & 0x10)
ppi[0] = &info_628x_fast;
+ else
+ ppi[0] = &info_628x;
+ break;
}
BUG_ON(ppi[0] == NULL);
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index c3a65ccd4b79..efdb94cff68b 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -102,7 +102,7 @@ static int atiixp_prereset(struct ata_link *link, unsigned long deadline)
static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio)
{
- static u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
+ static const u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = 2 * ap->port_no + adev->devno;
@@ -149,7 +149,7 @@ static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev)
static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
- static u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 };
+ static const u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dma = adev->dma_mode;
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 24ce8665b1f9..f4289a532f87 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -259,11 +259,8 @@ static int cs5520_reinit_one(struct pci_dev *pdev)
static int cs5520_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = pci_get_drvdata(pdev);
- int rc = 0;
- rc = ata_host_suspend(host, mesg);
- if (rc)
- return rc;
+ ata_host_suspend(host, mesg);
pci_save_state(pdev);
return 0;
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index b78f71c70f27..6c75a22db12b 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -416,8 +416,8 @@ static void ep93xx_pata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ep93xx_pata_data *drv_data = ap->host->private_data;
- tf->command = ep93xx_pata_check_status(ap);
- tf->feature = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE);
+ tf->status = ep93xx_pata_check_status(ap);
+ tf->error = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE);
tf->nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT);
tf->lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL);
tf->lbam = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAM);
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 34cb104f6b43..2e35505b683c 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -554,10 +554,8 @@ static int pata_ftide010_remove(struct platform_device *pdev)
}
static const struct of_device_id pata_ftide010_of_match[] = {
- {
- .compatible = "faraday,ftide010",
- },
- {},
+ { .compatible = "faraday,ftide010", },
+ { /* sentinel */ }
};
static struct platform_driver pata_ftide010_driver = {
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 778c893f276b..c99e8f0708b3 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -23,7 +23,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt366"
-#define DRV_VERSION "0.6.11"
+#define DRV_VERSION "0.6.13"
struct hpt_clock {
u8 xfer_mode;
@@ -278,6 +278,40 @@ static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev)
hpt366_set_mode(ap, adev, adev->dma_mode);
}
+/**
+ * hpt366_prereset - reset the hpt36x bus
+ * @link: ATA link to reset
+ * @deadline: deadline jiffies for the operation
+ *
+ * Perform the initial reset handling for the 36x series controllers.
+ * Reset the hardware and state machine,
+ */
+
+static int hpt366_prereset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ /*
+ * HPT36x chips have one channel per function and have
+ * both channel enable bits located differently and visible
+ * to both functions -- really stupid design decision... :-(
+ * Bit 4 is for the primary channel, bit 5 for the secondary.
+ */
+ static const struct pci_bits hpt366_enable_bits = {
+ 0x50, 1, 0x30, 0x30
+ };
+ u8 mcr2;
+
+ if (!pci_test_config_bits(pdev, &hpt366_enable_bits))
+ return -ENOENT;
+
+ pci_read_config_byte(pdev, 0x51, &mcr2);
+ if (mcr2 & 0x80)
+ pci_write_config_byte(pdev, 0x51, mcr2 & ~0x80);
+
+ return ata_sff_prereset(link, deadline);
+}
+
static struct scsi_host_template hpt36x_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
@@ -288,6 +322,7 @@ static struct scsi_host_template hpt36x_sht = {
static struct ata_port_operations hpt366_port_ops = {
.inherits = &ata_bmdma_port_ops,
+ .prereset = hpt366_prereset,
.cable_detect = hpt36x_cable_detect,
.mode_filter = hpt366_filter,
.set_piomode = hpt366_set_piomode,
@@ -304,16 +339,20 @@ static struct ata_port_operations hpt366_port_ops = {
static void hpt36x_init_chipset(struct pci_dev *dev)
{
- u8 drive_fast;
+ u8 mcr1;
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
- pci_read_config_byte(dev, 0x51, &drive_fast);
- if (drive_fast & 0x80)
- pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
+ /*
+ * Now we'll have to force both channels enabled if at least one
+ * of them has been enabled by BIOS...
+ */
+ pci_read_config_byte(dev, 0x50, &mcr1);
+ if (mcr1 & 0x30)
+ pci_write_config_byte(dev, 0x50, mcr1 | 0x30);
}
/**
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 6fa4a2faf49c..156f304ef051 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -23,7 +23,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt37x"
-#define DRV_VERSION "0.6.23"
+#define DRV_VERSION "0.6.25"
struct hpt_clock {
u8 xfer_speed;
@@ -394,6 +394,7 @@ static int hpt37x_pre_reset(struct ata_link *link, unsigned long deadline)
{ 0x50, 1, 0x04, 0x04 },
{ 0x54, 1, 0x04, 0x04 }
};
+ u8 mcr2;
if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no]))
return -ENOENT;
@@ -402,25 +403,29 @@ static int hpt37x_pre_reset(struct ata_link *link, unsigned long deadline)
pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
udelay(100);
+ /*
+ * Disable the "fast interrupt" prediction. Don't hold off
+ * on interrupts. (== 0x01 despite what the docs say)
+ */
+ pci_read_config_byte(pdev, 0x51 + 4 * ap->port_no, &mcr2);
+ /* Is it HPT370/A? */
+ if (pdev->device == PCI_DEVICE_ID_TTI_HPT366 && pdev->revision < 5) {
+ mcr2 &= ~0x02;
+ mcr2 |= 0x01;
+ } else {
+ mcr2 &= ~0x07;
+ }
+ pci_write_config_byte(pdev, 0x51 + 4 * ap->port_no, mcr2);
+
return ata_sff_prereset(link, deadline);
}
-static void hpt370_set_mode(struct ata_port *ap, struct ata_device *adev,
+static void hpt37x_set_mode(struct ata_port *ap, struct ata_device *adev,
u8 mode)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u32 addr1, addr2;
+ int addr = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
u32 reg, timing, mask;
- u8 fast;
-
- addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
- addr2 = 0x51 + 4 * ap->port_no;
-
- /* Fast interrupt prediction disable, hold off interrupt disable */
- pci_read_config_byte(pdev, addr2, &fast);
- fast &= ~0x02;
- fast |= 0x01;
- pci_write_config_byte(pdev, addr2, fast);
/* Determine timing mask and find matching mode entry */
if (mode < XFER_MW_DMA_0)
@@ -432,34 +437,34 @@ static void hpt370_set_mode(struct ata_port *ap, struct ata_device *adev,
timing = hpt37x_find_mode(ap, mode);
- pci_read_config_dword(pdev, addr1, &reg);
+ pci_read_config_dword(pdev, addr, &reg);
reg = (reg & ~mask) | (timing & mask);
- pci_write_config_dword(pdev, addr1, reg);
+ pci_write_config_dword(pdev, addr, reg);
}
/**
- * hpt370_set_piomode - PIO setup
+ * hpt37x_set_piomode - PIO setup
* @ap: ATA interface
* @adev: device on the interface
*
* Perform PIO mode setup.
*/
-static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev)
+static void hpt37x_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
- hpt370_set_mode(ap, adev, adev->pio_mode);
+ hpt37x_set_mode(ap, adev, adev->pio_mode);
}
/**
- * hpt370_set_dmamode - DMA timing setup
+ * hpt37x_set_dmamode - DMA timing setup
* @ap: ATA interface
* @adev: Device being configured
*
* Set up the channel for MWDMA or UDMA modes.
*/
-static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+static void hpt37x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
- hpt370_set_mode(ap, adev, adev->dma_mode);
+ hpt37x_set_mode(ap, adev, adev->dma_mode);
}
/**
@@ -499,63 +504,6 @@ static void hpt370_bmdma_stop(struct ata_queued_cmd *qc)
ata_bmdma_stop(qc);
}
-static void hpt372_set_mode(struct ata_port *ap, struct ata_device *adev,
- u8 mode)
-{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u32 addr1, addr2;
- u32 reg, timing, mask;
- u8 fast;
-
- addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
- addr2 = 0x51 + 4 * ap->port_no;
-
- /* Fast interrupt prediction disable, hold off interrupt disable */
- pci_read_config_byte(pdev, addr2, &fast);
- fast &= ~0x07;
- pci_write_config_byte(pdev, addr2, fast);
-
- /* Determine timing mask and find matching mode entry */
- if (mode < XFER_MW_DMA_0)
- mask = 0xcfc3ffff;
- else if (mode < XFER_UDMA_0)
- mask = 0x31c001ff;
- else
- mask = 0x303c0000;
-
- timing = hpt37x_find_mode(ap, mode);
-
- pci_read_config_dword(pdev, addr1, &reg);
- reg = (reg & ~mask) | (timing & mask);
- pci_write_config_dword(pdev, addr1, reg);
-}
-
-/**
- * hpt372_set_piomode - PIO setup
- * @ap: ATA interface
- * @adev: device on the interface
- *
- * Perform PIO mode setup.
- */
-
-static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- hpt372_set_mode(ap, adev, adev->pio_mode);
-}
-
-/**
- * hpt372_set_dmamode - DMA timing setup
- * @ap: ATA interface
- * @adev: Device being configured
- *
- * Set up the channel for MWDMA or UDMA modes.
- */
-
-static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev)
-{
- hpt372_set_mode(ap, adev, adev->dma_mode);
-}
-
/**
* hpt37x_bmdma_stop - DMA engine stop
* @qc: ATA command
@@ -593,8 +541,8 @@ static struct ata_port_operations hpt370_port_ops = {
.mode_filter = hpt370_filter,
.cable_detect = hpt37x_cable_detect,
- .set_piomode = hpt370_set_piomode,
- .set_dmamode = hpt370_set_dmamode,
+ .set_piomode = hpt37x_set_piomode,
+ .set_dmamode = hpt37x_set_dmamode,
.prereset = hpt37x_pre_reset,
};
@@ -608,8 +556,7 @@ static struct ata_port_operations hpt370a_port_ops = {
};
/*
- * Configuration for HPT371 and HPT302. Slightly different PIO and DMA
- * mode setting functionality.
+ * Configuration for HPT371 and HPT302.
*/
static struct ata_port_operations hpt302_port_ops = {
@@ -618,8 +565,8 @@ static struct ata_port_operations hpt302_port_ops = {
.bmdma_stop = hpt37x_bmdma_stop,
.cable_detect = hpt37x_cable_detect,
- .set_piomode = hpt372_set_piomode,
- .set_dmamode = hpt372_set_dmamode,
+ .set_piomode = hpt37x_set_piomode,
+ .set_dmamode = hpt37x_set_dmamode,
.prereset = hpt37x_pre_reset,
};
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 1d9d4eec5b8a..1f6afd8ee29b 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -24,10 +24,9 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt3x2n"
-#define DRV_VERSION "0.3.15"
+#define DRV_VERSION "0.3.18"
enum {
- HPT_PCI_FAST = (1 << 31),
PCI66 = (1 << 1),
USE_DPLL = (1 << 0)
};
@@ -37,11 +36,6 @@ struct hpt_clock {
u32 timing;
};
-struct hpt_chip {
- const char *name;
- struct hpt_clock *clocks[3];
-};
-
/* key for bus clock timings
* bit
* 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA.
@@ -168,11 +162,24 @@ static int hpt3x2n_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ static const struct pci_bits hpt3x2n_enable_bits[] = {
+ { 0x50, 1, 0x04, 0x04 },
+ { 0x54, 1, 0x04, 0x04 }
+ };
+ u8 mcr2;
+
+ if (!pci_test_config_bits(pdev, &hpt3x2n_enable_bits[ap->port_no]))
+ return -ENOENT;
/* Reset the state machine */
pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
udelay(100);
+ /* Fast interrupt prediction disable, hold off interrupt disable */
+ pci_read_config_byte(pdev, 0x51 + 4 * ap->port_no, &mcr2);
+ mcr2 &= ~0x07;
+ pci_write_config_byte(pdev, 0x51 + 4 * ap->port_no, mcr2);
+
return ata_sff_prereset(link, deadline);
}
@@ -180,17 +187,8 @@ static void hpt3x2n_set_mode(struct ata_port *ap, struct ata_device *adev,
u8 mode)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u32 addr1, addr2;
+ int addr = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
u32 reg, timing, mask;
- u8 fast;
-
- addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
- addr2 = 0x51 + 4 * ap->port_no;
-
- /* Fast interrupt prediction disable, hold off interrupt disable */
- pci_read_config_byte(pdev, addr2, &fast);
- fast &= ~0x07;
- pci_write_config_byte(pdev, addr2, fast);
/* Determine timing mask and find matching mode entry */
if (mode < XFER_MW_DMA_0)
@@ -202,9 +200,9 @@ static void hpt3x2n_set_mode(struct ata_port *ap, struct ata_device *adev,
timing = hpt3x2n_find_mode(ap, mode);
- pci_read_config_dword(pdev, addr1, &reg);
+ pci_read_config_dword(pdev, addr, &reg);
reg = (reg & ~mask) | (timing & mask);
- pci_write_config_dword(pdev, addr1, reg);
+ pci_write_config_dword(pdev, addr, reg);
}
/**
@@ -244,7 +242,7 @@ static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- int mscreg = 0x50 + 2 * ap->port_no;
+ int mscreg = 0x50 + 4 * ap->port_no;
u8 bwsr_stat, msc_stat;
pci_read_config_byte(pdev, 0x6A, &bwsr_stat);
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 2e538726802b..150939275b1b 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -223,17 +223,14 @@ static int pata_imx_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct pata_imx_priv *priv = host->private_data;
- int ret;
- ret = ata_host_suspend(host, PMSG_SUSPEND);
- if (!ret) {
- __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
- priv->ata_ctl =
- __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
- clk_disable_unprepare(priv->clk);
- }
+ ata_host_suspend(host, PMSG_SUSPEND);
- return ret;
+ __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
+ priv->ata_ctl = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
}
static int pata_imx_resume(struct device *dev)
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 17b557c91e1c..e225913a619d 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -293,7 +293,7 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
static const struct of_device_id ixp4xx_pata_of_match[] = {
{ .compatible = "intel,ixp4xx-compact-flash", },
- { },
+ { /* sentinel */ }
};
static struct platform_driver ixp4xx_pata_platform_driver = {
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 16e8aa184a75..42798402cf63 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -853,12 +853,8 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
#ifdef CONFIG_PM_SLEEP
static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg)
{
- int rc;
-
/* First, core libata suspend to do most of the work */
- rc = ata_host_suspend(priv->host, mesg);
- if (rc)
- return rc;
+ ata_host_suspend(priv->host, mesg);
/* Restore to default timings */
pata_macio_default_timings(priv);
@@ -1333,19 +1329,11 @@ static int pata_macio_pci_resume(struct pci_dev *pdev)
static const struct of_device_id pata_macio_match[] =
{
- {
- .name = "IDE",
- },
- {
- .name = "ATA",
- },
- {
- .type = "ide",
- },
- {
- .type = "ata",
- },
- {},
+ { .name = "IDE", },
+ { .name = "ATA", },
+ { .type = "ide", },
+ { .type = "ata", },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, pata_macio_match);
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index f1d352d5f128..3250ef317df6 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -736,7 +736,7 @@ static int mpc52xx_ata_probe(struct platform_device *op)
}
/* Prepare our private structure */
- priv = devm_kzalloc(&op->dev, sizeof(*priv), GFP_ATOMIC);
+ priv = devm_kzalloc(&op->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
rv = -ENOMEM;
goto err1;
@@ -824,7 +824,8 @@ mpc52xx_ata_suspend(struct platform_device *op, pm_message_t state)
{
struct ata_host *host = platform_get_drvdata(op);
- return ata_host_suspend(host, state);
+ ata_host_suspend(host, state);
+ return 0;
}
static int
@@ -849,7 +850,7 @@ mpc52xx_ata_resume(struct platform_device *op)
static const struct of_device_id mpc52xx_ata_of_match[] = {
{ .compatible = "fsl,mpc5200-ata", },
{ .compatible = "mpc5200-ata", },
- {},
+ { /* sentinel */ }
};
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index f4949e704356..9dd6bffefb48 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -264,8 +264,8 @@ void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- tf->command = ns87560_check_status(ap);
- tf->feature = ioread8(ioaddr->error_addr);
+ tf->status = ns87560_check_status(ap);
+ tf->error = ioread8(ioaddr->error_addr);
tf->nsect = ioread8(ioaddr->nsect_addr);
tf->lbal = ioread8(ioaddr->lbal_addr);
tf->lbam = ioread8(ioaddr->lbam_addr);
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 05c2ab375756..6b5ed3046b44 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -382,7 +382,7 @@ static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf)
void __iomem *base = ap->ioaddr.data_addr;
blob = __raw_readw(base + 0xc);
- tf->feature = blob >> 8;
+ tf->error = blob >> 8;
blob = __raw_readw(base + 2);
tf->nsect = blob & 0xff;
@@ -394,7 +394,7 @@ static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf)
blob = __raw_readw(base + 6);
tf->device = blob & 0xff;
- tf->command = blob >> 8;
+ tf->status = blob >> 8;
if (tf->flags & ATA_TFLAG_LBA48) {
if (likely(ap->ioaddr.ctl_addr)) {
@@ -1006,10 +1006,8 @@ static void octeon_cf_shutdown(struct device *dev)
}
static const struct of_device_id octeon_cf_match[] = {
- {
- .compatible = "cavium,ebt3000-compact-flash",
- },
- {},
+ { .compatible = "cavium,ebt3000-compact-flash", },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, octeon_cf_match);
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index c3a40b717dcd..ac5a633c00a5 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -79,7 +79,7 @@ static int pata_of_platform_probe(struct platform_device *ofdev)
static const struct of_device_id pata_of_platform_match[] = {
{ .compatible = "ata-generic", },
- { },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, pata_of_platform_match);
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index b99849095853..f894ff2de0a9 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -78,7 +78,7 @@ static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *a
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
- static u16 pio_timing[5] = {
+ static const u16 pio_timing[5] = {
0x0913, 0x050C , 0x0308, 0x0206, 0x0104
};
u8 r_ap, r_bp;
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 87c7c90676ca..21fb059859bd 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -200,22 +200,16 @@ static int pata_platform_probe(struct platform_device *pdev)
/*
* Get the I/O base first
*/
- io_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (io_res == NULL) {
- io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(io_res == NULL))
- return -EINVAL;
- }
+ io_res = platform_get_mem_or_io(pdev, 0);
+ if (!io_res)
+ return -EINVAL;
/*
* Then the CTL base
*/
- ctl_res = platform_get_resource(pdev, IORESOURCE_IO, 1);
- if (ctl_res == NULL) {
- ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (unlikely(ctl_res == NULL))
- return -EINVAL;
- }
+ ctl_res = platform_get_mem_or_io(pdev, 1);
+ if (!ctl_res)
+ return -EINVAL;
/*
* And the IRQ
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 41430f79663c..985f42c4fd70 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -164,10 +164,10 @@ static int pxa_ata_probe(struct platform_device *pdev)
struct resource *cmd_res;
struct resource *ctl_res;
struct resource *dma_res;
- struct resource *irq_res;
struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
struct dma_slave_config config;
int ret = 0;
+ int irq;
/*
* Resource validation, three resources are needed:
@@ -205,9 +205,9 @@ static int pxa_ata_probe(struct platform_device *pdev)
/*
* IRQ pin
*/
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (unlikely(irq_res == NULL))
- return -EINVAL;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
/*
* Allocate the host
@@ -287,7 +287,7 @@ static int pxa_ata_probe(struct platform_device *pdev)
/*
* Activate the ATA host
*/
- ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt,
+ ret = ata_host_activate(host, irq, ata_sff_interrupt,
pdata->irq_flags, &pxa_ata_sht);
if (ret)
dma_release_channel(data->dma_chan);
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index 3da0e8e30286..aba1536ddd44 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -213,7 +213,7 @@ static void pata_s3c_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- tf->feature = ata_inb(ap->host, ioaddr->error_addr);
+ tf->error = ata_inb(ap->host, ioaddr->error_addr);
tf->nsect = ata_inb(ap->host, ioaddr->nsect_addr);
tf->lbal = ata_inb(ap->host, ioaddr->lbal_addr);
tf->lbam = ata_inb(ap->host, ioaddr->lbam_addr);
@@ -308,8 +308,7 @@ static void pata_s3c_dev_select(struct ata_port *ap, unsigned int device)
/*
* pata_s3c_devchk - PATA device presence detection
*/
-static unsigned int pata_s3c_devchk(struct ata_port *ap,
- unsigned int device)
+static bool pata_s3c_devchk(struct ata_port *ap, unsigned int device)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
u8 nsect, lbal;
@@ -329,9 +328,9 @@ static unsigned int pata_s3c_devchk(struct ata_port *ap,
lbal = ata_inb(ap->host, ioaddr->lbal_addr);
if ((nsect == 0x55) && (lbal == 0xaa))
- return 1; /* we found a device */
+ return true; /* we found a device */
- return 0; /* nothing found */
+ return false; /* nothing found */
}
/*
@@ -608,7 +607,8 @@ static int pata_s3c_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
- return ata_host_suspend(host, PMSG_SUSPEND);
+ ata_host_suspend(host, PMSG_SUSPEND);
+ return 0;
}
static int pata_s3c_resume(struct device *dev)
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 8a033598e7e1..782162d2f3f8 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -198,11 +198,8 @@ static const struct pci_device_id triflex[] = {
static int triflex_ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = pci_get_drvdata(pdev);
- int rc = 0;
- rc = ata_host_suspend(host, mesg);
- if (rc)
- return rc;
+ ata_host_suspend(host, mesg);
/*
* We must not disable or powerdown the device.
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 556034a15430..b9a4f68b371d 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1544,7 +1544,9 @@ static int sata_fsl_remove(struct platform_device *ofdev)
static int sata_fsl_suspend(struct platform_device *op, pm_message_t state)
{
struct ata_host *host = platform_get_drvdata(op);
- return ata_host_suspend(host, state);
+
+ ata_host_suspend(host, state);
+ return 0;
}
static int sata_fsl_resume(struct platform_device *op)
@@ -1577,13 +1579,9 @@ static int sata_fsl_resume(struct platform_device *op)
#endif
static const struct of_device_id fsl_sata_match[] = {
- {
- .compatible = "fsl,pq-sata",
- },
- {
- .compatible = "fsl,pq-sata-v2",
- },
- {},
+ { .compatible = "fsl,pq-sata", },
+ { .compatible = "fsl,pq-sata-v2", },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_sata_match);
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index 440a63de20d0..00e1c7941d0e 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -419,10 +419,8 @@ static int gemini_sata_remove(struct platform_device *pdev)
}
static const struct of_device_id gemini_sata_of_match[] = {
- {
- .compatible = "cortina,gemini-sata-bridge",
- },
- {},
+ { .compatible = "cortina,gemini-sata-bridge", },
+ { /* sentinel */ }
};
static struct platform_driver gemini_sata_driver = {
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index b29d3f1d64b0..dfbf9493e451 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -400,7 +400,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
- tf.command = ATA_BUSY;
+ tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
do {
@@ -444,7 +444,7 @@ static struct scsi_host_template ahci_highbank_platform_sht = {
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "calxeda,hb-ahci" },
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
@@ -587,7 +587,8 @@ static int ahci_highbank_suspend(struct device *dev)
writel(ctl, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
- return ata_host_suspend(host, PMSG_SUSPEND);
+ ata_host_suspend(host, PMSG_SUSPEND);
+ return 0;
}
static int ahci_highbank_resume(struct device *dev)
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 781901151d82..11e518f0111c 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -557,13 +557,13 @@ static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
void __iomem *port_base = inic_port_base(ap);
- tf->feature = readb(port_base + PORT_TF_FEATURE);
+ tf->error = readb(port_base + PORT_TF_FEATURE);
tf->nsect = readb(port_base + PORT_TF_NSECT);
tf->lbal = readb(port_base + PORT_TF_LBAL);
tf->lbam = readb(port_base + PORT_TF_LBAM);
tf->lbah = readb(port_base + PORT_TF_LBAH);
tf->device = readb(port_base + PORT_TF_DEVICE);
- tf->command = readb(port_base + PORT_TF_COMMAND);
+ tf->status = readb(port_base + PORT_TF_COMMAND);
}
static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc)
@@ -580,11 +580,11 @@ static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc)
*/
inic_tf_read(qc->ap, &tf);
- if (!(tf.command & ATA_ERR))
+ if (!(tf.status & ATA_ERR))
return false;
- rtf->command = tf.command;
- rtf->feature = tf.feature;
+ rtf->status = tf.status;
+ rtf->error = tf.error;
return true;
}
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 53446b997740..de5bd02cad44 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4235,10 +4235,10 @@ static int mv_platform_remove(struct platform_device *pdev)
static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
{
struct ata_host *host = platform_get_drvdata(pdev);
+
if (host)
- return ata_host_suspend(host, state);
- else
- return 0;
+ ata_host_suspend(host, state);
+ return 0;
}
static int mv_platform_resume(struct platform_device *pdev)
@@ -4277,7 +4277,7 @@ static int mv_platform_resume(struct platform_device *pdev)
static const struct of_device_id mv_sata_dt_ids[] = {
{ .compatible = "marvell,armada-370-sata", },
{ .compatible = "marvell,orion-sata", },
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
#endif
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 3d96b6faa3f0..590ebea99601 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -18,10 +18,6 @@
#define DRV_NAME "sata_rcar"
-/* SH-Navi2G/ATAPI-ATA compatible task registers */
-#define DATA_REG 0x100
-#define SDEVCON_REG 0x138
-
/* SH-Navi2G/ATAPI module compatible control registers */
#define ATAPI_CONTROL1_REG 0x180
#define ATAPI_STATUS_REG 0x184
@@ -283,8 +279,7 @@ static void sata_rcar_dev_select(struct ata_port *ap, unsigned int device)
ata_sff_pause(ap); /* needed; also flushes, for mmio */
}
-static unsigned int sata_rcar_ata_devchk(struct ata_port *ap,
- unsigned int device)
+static bool sata_rcar_ata_devchk(struct ata_port *ap, unsigned int device)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
u8 nsect, lbal;
@@ -304,9 +299,9 @@ static unsigned int sata_rcar_ata_devchk(struct ata_port *ap,
lbal = ioread32(ioaddr->lbal_addr);
if (nsect == 0x55 && lbal == 0xaa)
- return 1; /* found a device */
+ return true; /* found a device */
- return 0; /* nothing found */
+ return false; /* nothing found */
}
static int sata_rcar_wait_after_reset(struct ata_link *link,
@@ -399,8 +394,8 @@ static void sata_rcar_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- tf->command = sata_rcar_check_status(ap);
- tf->feature = ioread32(ioaddr->error_addr);
+ tf->status = sata_rcar_check_status(ap);
+ tf->error = ioread32(ioaddr->error_addr);
tf->nsect = ioread32(ioaddr->nsect_addr);
tf->lbal = ioread32(ioaddr->lbal_addr);
tf->lbam = ioread32(ioaddr->lbam_addr);
@@ -857,7 +852,7 @@ static const struct of_device_id sata_rcar_match[] = {
.compatible = "renesas,rcar-gen3-sata",
.data = (void *)RCAR_GEN3_SATA
},
- { },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sata_rcar_match);
@@ -945,19 +940,17 @@ static int sata_rcar_suspend(struct device *dev)
struct ata_host *host = dev_get_drvdata(dev);
struct sata_rcar_priv *priv = host->private_data;
void __iomem *base = priv->base;
- int ret;
- ret = ata_host_suspend(host, PMSG_SUSPEND);
- if (!ret) {
- /* disable interrupts */
- iowrite32(0, base + ATAPI_INT_ENABLE_REG);
- /* mask */
- iowrite32(priv->sataint_mask, base + SATAINTMASK_REG);
+ ata_host_suspend(host, PMSG_SUSPEND);
- pm_runtime_put(dev);
- }
+ /* disable interrupts */
+ iowrite32(0, base + ATAPI_INT_ENABLE_REG);
+ /* mask */
+ iowrite32(priv->sataint_mask, base + SATAINTMASK_REG);
- return ret;
+ pm_runtime_put(dev);
+
+ return 0;
}
static int sata_rcar_resume(struct device *dev)
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index f8552559db7f..2e3418a82b44 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -194,24 +194,24 @@ static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- u16 nsect, lbal, lbam, lbah, feature;
+ u16 nsect, lbal, lbam, lbah, error;
- tf->command = k2_stat_check_status(ap);
+ tf->status = k2_stat_check_status(ap);
tf->device = readw(ioaddr->device_addr);
- feature = readw(ioaddr->error_addr);
+ error = readw(ioaddr->error_addr);
nsect = readw(ioaddr->nsect_addr);
lbal = readw(ioaddr->lbal_addr);
lbam = readw(ioaddr->lbam_addr);
lbah = readw(ioaddr->lbah_addr);
- tf->feature = feature;
+ tf->error = error;
tf->nsect = nsect;
tf->lbal = lbal;
tf->lbam = lbam;
tf->lbah = lbah;
if (tf->flags & ATA_TFLAG_LBA48) {
- tf->hob_feature = feature >> 8;
+ tf->hob_feature = error >> 8;
tf->hob_nsect = nsect >> 8;
tf->hob_lbal = lbal >> 8;
tf->hob_lbam = lbam >> 8;
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 8fa952cb9f7f..87e4ed66b306 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -183,24 +183,24 @@ static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- u16 nsect, lbal, lbam, lbah, feature;
+ u16 nsect, lbal, lbam, lbah, error;
- tf->command = ata_sff_check_status(ap);
+ tf->status = ata_sff_check_status(ap);
tf->device = readw(ioaddr->device_addr);
- feature = readw(ioaddr->error_addr);
+ error = readw(ioaddr->error_addr);
nsect = readw(ioaddr->nsect_addr);
lbal = readw(ioaddr->lbal_addr);
lbam = readw(ioaddr->lbam_addr);
lbah = readw(ioaddr->lbah_addr);
- tf->feature = feature;
+ tf->error = error;
tf->nsect = nsect;
tf->lbal = lbal;
tf->lbam = lbam;
tf->lbah = lbah;
if (tf->flags & ATA_TFLAG_LBA48) {
- tf->hob_feature = feature >> 8;
+ tf->hob_feature = error >> 8;
tf->hob_nsect = nsect >> 8;
tf->hob_lbal = lbal >> 8;
tf->hob_lbam = lbam >> 8;
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 422753d52244..a31ffe16e626 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1112,6 +1112,8 @@ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags);
skb_data3 = skb->data[3];
paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&eni_dev->pci_dev->dev, paddr))
+ return enq_next;
ENI_PRV_PADDR(skb) = paddr;
/* prepare DMA queue entries */
j = 0;
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 3bc3c314a467..4f67404fe64c 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1676,6 +1676,8 @@ static int fs_init(struct fs_dev *dev)
dev->hw_base = pci_resource_start(pci_dev, 0);
dev->base = ioremap(dev->hw_base, 0x1000);
+ if (!dev->base)
+ return 1;
reset_chip (dev);
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index bc5a6ab6fa4b..1a50de39f5b5 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -861,7 +861,6 @@ static void ns_init_card_error(ns_dev *card, int error)
static scq_info *get_scq(ns_dev *card, int size, u32 scd)
{
scq_info *scq;
- int i;
if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
return NULL;
@@ -875,9 +874,8 @@ static scq_info *get_scq(ns_dev *card, int size, u32 scd)
kfree(scq);
return NULL;
}
- scq->skb = kmalloc_array(size / NS_SCQE_SIZE,
- sizeof(*scq->skb),
- GFP_KERNEL);
+ scq->skb = kcalloc(size / NS_SCQE_SIZE, sizeof(*scq->skb),
+ GFP_KERNEL);
if (!scq->skb) {
dma_free_coherent(&card->pcidev->dev,
2 * size, scq->org, scq->dma);
@@ -890,15 +888,11 @@ static scq_info *get_scq(ns_dev *card, int size, u32 scd)
scq->last = scq->base + (scq->num_entries - 1);
scq->tail = scq->last;
scq->scd = scd;
- scq->num_entries = size / NS_SCQE_SIZE;
scq->tbd_count = 0;
init_waitqueue_head(&scq->scqfull_waitq);
scq->full = 0;
spin_lock_init(&scq->lock);
- for (i = 0; i < scq->num_entries; i++)
- scq->skb[i] = NULL;
-
return scq;
}
diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c
index 38ba08628ccb..2578b2d45439 100644
--- a/drivers/auxdisplay/lcd2s.c
+++ b/drivers/auxdisplay/lcd2s.c
@@ -238,7 +238,7 @@ static int lcd2s_redefine_char(struct charlcd *lcd, char *esc)
if (buf[1] > 7)
return 1;
- i = 0;
+ i = 2;
shift = 0;
value = 0;
while (*esc && i < LCD2S_CHARACTER_SIZE + 2) {
@@ -298,6 +298,10 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c,
I2C_FUNC_SMBUS_WRITE_BLOCK_DATA))
return -EIO;
+ lcd2s = devm_kzalloc(&i2c->dev, sizeof(*lcd2s), GFP_KERNEL);
+ if (!lcd2s)
+ return -ENOMEM;
+
/* Test, if the display is responding */
err = lcd2s_i2c_smbus_write_byte(i2c, LCD2S_CMD_DISPLAY_OFF);
if (err < 0)
@@ -307,12 +311,6 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c,
if (!lcd)
return -ENOMEM;
- lcd2s = kzalloc(sizeof(struct lcd2s_data), GFP_KERNEL);
- if (!lcd2s) {
- err = -ENOMEM;
- goto fail1;
- }
-
lcd->drvdata = lcd2s;
lcd2s->i2c = i2c;
lcd2s->charlcd = lcd;
@@ -321,26 +319,24 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c,
err = device_property_read_u32(&i2c->dev, "display-height-chars",
&lcd->height);
if (err)
- goto fail2;
+ goto fail1;
err = device_property_read_u32(&i2c->dev, "display-width-chars",
&lcd->width);
if (err)
- goto fail2;
+ goto fail1;
lcd->ops = &lcd2s_ops;
err = charlcd_register(lcd2s->charlcd);
if (err)
- goto fail2;
+ goto fail1;
i2c_set_clientdata(i2c, lcd2s);
return 0;
-fail2:
- kfree(lcd2s);
fail1:
- kfree(lcd);
+ charlcd_free(lcd2s->charlcd);
return err;
}
@@ -349,7 +345,7 @@ static int lcd2s_i2c_remove(struct i2c_client *i2c)
struct lcd2s_data *lcd2s = i2c_get_clientdata(i2c);
charlcd_unregister(lcd2s->charlcd);
- kfree(lcd2s->charlcd);
+ charlcd_free(lcd2s->charlcd);
return 0;
}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 976154140f0b..1d6636ebaac5 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -339,6 +339,46 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
return !ret;
}
+#ifdef CONFIG_ACPI_CPPC_LIB
+#include <acpi/cppc_acpi.h>
+
+void topology_init_cpu_capacity_cppc(void)
+{
+ struct cppc_perf_caps perf_caps;
+ int cpu;
+
+ if (likely(acpi_disabled || !acpi_cpc_valid()))
+ return;
+
+ raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
+ GFP_KERNEL);
+ if (!raw_capacity)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ if (!cppc_get_perf_caps(cpu, &perf_caps) &&
+ (perf_caps.highest_perf >= perf_caps.nominal_perf) &&
+ (perf_caps.highest_perf >= perf_caps.lowest_perf)) {
+ raw_capacity[cpu] = perf_caps.highest_perf;
+ pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
+ cpu, raw_capacity[cpu]);
+ continue;
+ }
+
+ pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
+ pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
+ goto exit;
+ }
+
+ topology_normalize_cpu_scale();
+ schedule_work(&update_topology_flags_work);
+ pr_debug("cpu_capacity: cpu_capacity initialization done\n");
+
+exit:
+ free_raw_capacity();
+}
+#endif
+
#ifdef CONFIG_CPU_FREQ
static cpumask_var_t cpus_to_visit;
static void parsing_done_workfn(struct work_struct *work);
@@ -387,9 +427,8 @@ static int __init register_cpufreq_notifier(void)
int ret;
/*
- * on ACPI-based systems we need to use the default cpu capacity
- * until we have the necessary code to parse the cpu capacity, so
- * skip registering cpufreq notifier.
+ * On ACPI-based systems skip registering cpufreq notifier as cpufreq
+ * information is not needed for cpu capacity initialization.
*/
if (!acpi_disabled || !raw_capacity)
return -EINVAL;
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 7476f393df97..8feb85e186e3 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -16,7 +16,7 @@
#include <linux/kdev_t.h>
#include <linux/err.h>
#include <linux/slab.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/mutex.h>
#include "base.h"
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 7bb957b11861..3d6430eb0c6a 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -21,7 +21,7 @@
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
#include <linux/netdevice.h>
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 5fc258073bc7..2ef23fce0860 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -275,7 +275,7 @@ static ssize_t print_cpus_isolated(struct device *dev,
return -ENOMEM;
cpumask_andnot(isolated, cpu_possible_mask,
- housekeeping_cpumask(HK_FLAG_DOMAIN));
+ housekeeping_cpumask(HK_TYPE_DOMAIN));
len = sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(isolated));
free_cpumask_var(isolated);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index f41063ac1aee..db5a03a0618e 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -17,7 +17,7 @@
#include <linux/syscalls.h>
#include <linux/mount.h>
#include <linux/device.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/namei.h>
#include <linux/fs.h>
#include <linux/shmem_fs.h>
diff --git a/drivers/base/init.c b/drivers/base/init.c
index a9f57c22fb9e..d8d0fe687111 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -35,5 +35,6 @@ void __init driver_init(void)
auxiliary_bus_init();
cpu_dev_init();
memory_dev_init();
+ node_dev_init();
container_dev_init();
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 365cd4a7f239..7222ff9b5e05 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -215,6 +215,7 @@ static int memory_block_online(struct memory_block *mem)
adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
nr_vmemmap_pages);
+ mem->zone = zone;
return ret;
}
@@ -225,6 +226,9 @@ static int memory_block_offline(struct memory_block *mem)
unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
int ret;
+ if (!mem->zone)
+ return -EINVAL;
+
/*
* Unaccount before offlining, such that unpopulated zone and kthreads
* can properly be torn down in offline_pages().
@@ -234,7 +238,7 @@ static int memory_block_offline(struct memory_block *mem)
-nr_vmemmap_pages);
ret = offline_pages(start_pfn + nr_vmemmap_pages,
- nr_pages - nr_vmemmap_pages, mem->group);
+ nr_pages - nr_vmemmap_pages, mem->zone, mem->group);
if (ret) {
/* offline_pages() failed. Account back. */
if (nr_vmemmap_pages)
@@ -246,6 +250,7 @@ static int memory_block_offline(struct memory_block *mem)
if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
+ mem->zone = NULL;
return ret;
}
@@ -411,11 +416,10 @@ static ssize_t valid_zones_show(struct device *dev,
*/
if (mem->state == MEM_ONLINE) {
/*
- * The block contains more than one zone can not be offlined.
- * This can happen e.g. for ZONE_DMA and ZONE_DMA32
+ * If !mem->zone, the memory block spans multiple zones and
+ * cannot get offlined.
*/
- default_zone = test_pages_in_a_zone(start_pfn,
- start_pfn + nr_pages);
+ default_zone = mem->zone;
if (!default_zone)
return sysfs_emit(buf, "%s\n", "none");
len += sysfs_emit_at(buf, len, "%s", default_zone->name);
@@ -555,6 +559,8 @@ static ssize_t hard_offline_page_store(struct device *dev,
return -EINVAL;
pfn >>= PAGE_SHIFT;
ret = memory_failure(pfn, 0);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
return ret ? ret : count;
}
@@ -613,11 +619,7 @@ static const struct attribute_group *memory_memblk_attr_groups[] = {
NULL,
};
-/*
- * register_memory - Setup a sysfs device for a memory block
- */
-static
-int register_memory(struct memory_block *memory)
+static int __add_memory_block(struct memory_block *memory)
{
int ret;
@@ -641,9 +643,85 @@ int register_memory(struct memory_block *memory)
return ret;
}
-static int init_memory_block(unsigned long block_id, unsigned long state,
- unsigned long nr_vmemmap_pages,
- struct memory_group *group)
+static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
+ int nid)
+{
+ const unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+ const unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+ struct zone *zone, *matching_zone = NULL;
+ pg_data_t *pgdat = NODE_DATA(nid);
+ int i;
+
+ /*
+ * This logic only works for early memory, when the applicable zones
+ * already span the memory block. We don't expect overlapping zones on
+ * a single node for early memory. So if we're told that some PFNs
+ * of a node fall into this memory block, we can assume that all node
+ * zones that intersect with the memory block are actually applicable.
+ * No need to look at the memmap.
+ */
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ zone = pgdat->node_zones + i;
+ if (!populated_zone(zone))
+ continue;
+ if (!zone_intersects(zone, start_pfn, nr_pages))
+ continue;
+ if (!matching_zone) {
+ matching_zone = zone;
+ continue;
+ }
+ /* Spans multiple zones ... */
+ matching_zone = NULL;
+ break;
+ }
+ return matching_zone;
+}
+
+#ifdef CONFIG_NUMA
+/**
+ * memory_block_add_nid() - Indicate that system RAM falling into this memory
+ * block device (partially) belongs to the given node.
+ * @mem: The memory block device.
+ * @nid: The node id.
+ * @context: The memory initialization context.
+ *
+ * Indicate that system RAM falling into this memory block (partially) belongs
+ * to the given node. If the context indicates ("early") that we are adding the
+ * node during node device subsystem initialization, this will also properly
+ * set/adjust mem->zone based on the zone ranges of the given node.
+ */
+void memory_block_add_nid(struct memory_block *mem, int nid,
+ enum meminit_context context)
+{
+ if (context == MEMINIT_EARLY && mem->nid != nid) {
+ /*
+ * For early memory we have to determine the zone when setting
+ * the node id and handle multiple nodes spanning a single
+ * memory block by indicate via zone == NULL that we're not
+ * dealing with a single zone. So if we're setting the node id
+ * the first time, determine if there is a single zone. If we're
+ * setting the node id a second time to a different node,
+ * invalidate the single detected zone.
+ */
+ if (mem->nid == NUMA_NO_NODE)
+ mem->zone = early_node_zone_for_memory_block(mem, nid);
+ else
+ mem->zone = NULL;
+ }
+
+ /*
+ * If this memory block spans multiple nodes, we only indicate
+ * the last processed node. If we span multiple nodes (not applicable
+ * to hotplugged memory), zone == NULL will prohibit memory offlining
+ * and consequently unplug.
+ */
+ mem->nid = nid;
+}
+#endif
+
+static int add_memory_block(unsigned long block_id, unsigned long state,
+ unsigned long nr_vmemmap_pages,
+ struct memory_group *group)
{
struct memory_block *mem;
int ret = 0;
@@ -663,17 +741,30 @@ static int init_memory_block(unsigned long block_id, unsigned long state,
mem->nr_vmemmap_pages = nr_vmemmap_pages;
INIT_LIST_HEAD(&mem->group_next);
+#ifndef CONFIG_NUMA
+ if (state == MEM_ONLINE)
+ /*
+ * MEM_ONLINE at this point implies early memory. With NUMA,
+ * we'll determine the zone when setting the node id via
+ * memory_block_add_nid(). Memory hotplug updated the zone
+ * manually when memory onlining/offlining succeeds.
+ */
+ mem->zone = early_node_zone_for_memory_block(mem, NUMA_NO_NODE);
+#endif /* CONFIG_NUMA */
+
+ ret = __add_memory_block(mem);
+ if (ret)
+ return ret;
+
if (group) {
mem->group = group;
list_add(&mem->group_next, &group->memory_blocks);
}
- ret = register_memory(mem);
-
- return ret;
+ return 0;
}
-static int add_memory_block(unsigned long base_section_nr)
+static int __init add_boot_memory_block(unsigned long base_section_nr)
{
int section_count = 0;
unsigned long nr;
@@ -685,11 +776,18 @@ static int add_memory_block(unsigned long base_section_nr)
if (section_count == 0)
return 0;
- return init_memory_block(memory_block_id(base_section_nr),
- MEM_ONLINE, 0, NULL);
+ return add_memory_block(memory_block_id(base_section_nr),
+ MEM_ONLINE, 0, NULL);
+}
+
+static int add_hotplug_memory_block(unsigned long block_id,
+ unsigned long nr_vmemmap_pages,
+ struct memory_group *group)
+{
+ return add_memory_block(block_id, MEM_OFFLINE, nr_vmemmap_pages, group);
}
-static void unregister_memory(struct memory_block *memory)
+static void remove_memory_block(struct memory_block *memory)
{
if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
return;
@@ -728,8 +826,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size,
return -EINVAL;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
- ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages,
- group);
+ ret = add_hotplug_memory_block(block_id, vmemmap_pages, group);
if (ret)
break;
}
@@ -740,7 +837,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size,
mem = find_memory_block_by_id(block_id);
if (WARN_ON_ONCE(!mem))
continue;
- unregister_memory(mem);
+ remove_memory_block(mem);
}
}
return ret;
@@ -769,7 +866,7 @@ void remove_memory_block_devices(unsigned long start, unsigned long size)
if (WARN_ON_ONCE(!mem))
continue;
unregister_memory_block_under_nodes(mem);
- unregister_memory(mem);
+ remove_memory_block(mem);
}
}
@@ -829,7 +926,7 @@ void __init memory_dev_init(void)
*/
for (nr = 0; nr <= __highest_present_section_nr;
nr += sections_per_block) {
- ret = add_memory_block(nr);
+ ret = add_boot_memory_block(nr);
if (ret)
panic("%s() failed to add memory block: %d\n", __func__,
ret);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 87acc47e8951..ec8bb24a5a22 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -796,15 +796,12 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
}
static void do_register_memory_block_under_node(int nid,
- struct memory_block *mem_blk)
+ struct memory_block *mem_blk,
+ enum meminit_context context)
{
int ret;
- /*
- * If this memory block spans multiple nodes, we only indicate
- * the last processed node.
- */
- mem_blk->nid = nid;
+ memory_block_add_nid(mem_blk, nid, context);
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
@@ -857,7 +854,7 @@ static int register_mem_block_under_node_early(struct memory_block *mem_blk,
if (page_nid != nid)
continue;
- do_register_memory_block_under_node(nid, mem_blk);
+ do_register_memory_block_under_node(nid, mem_blk, MEMINIT_EARLY);
return 0;
}
/* mem section does not span the specified node */
@@ -873,7 +870,7 @@ static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
{
int nid = *(int *)arg;
- do_register_memory_block_under_node(nid, mem_blk);
+ do_register_memory_block_under_node(nid, mem_blk, MEMINIT_HOTPLUG);
return 0;
}
@@ -892,8 +889,9 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
}
-void link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
- enum meminit_context context)
+void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
+ unsigned long end_pfn,
+ enum meminit_context context)
{
walk_memory_blocks_func_t func;
@@ -1065,26 +1063,30 @@ static const struct attribute_group *cpu_root_attr_groups[] = {
};
#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
-static int __init register_node_type(void)
+void __init node_dev_init(void)
{
- int ret;
+ static struct notifier_block node_memory_callback_nb = {
+ .notifier_call = node_memory_callback,
+ .priority = NODE_CALLBACK_PRI,
+ };
+ int ret, i;
BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
- if (!ret) {
- static struct notifier_block node_memory_callback_nb = {
- .notifier_call = node_memory_callback,
- .priority = NODE_CALLBACK_PRI,
- };
- register_hotmemory_notifier(&node_memory_callback_nb);
- }
+ if (ret)
+ panic("%s() failed to register subsystem: %d\n", __func__, ret);
+
+ register_hotmemory_notifier(&node_memory_callback_nb);
/*
- * Note: we're not going to unregister the node class if we fail
- * to register the node state class attribute files.
+ * Create all node devices, which will properly link the node
+ * to applicable memory block devices and already created cpu devices.
*/
- return ret;
+ for_each_online_node(i) {
+ ret = register_one_node(i);
+ if (ret)
+ panic("%s() failed to add node: %d\n", __func__, ret);
+ }
}
-postcore_initcall(register_node_type);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 5db704f02e71..1ee878d126fd 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -636,6 +636,18 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
atomic_read(&genpd->sd_count) > 0)
return -EBUSY;
+ /*
+ * The children must be in their deepest (powered-off) states to allow
+ * the parent to be powered off. Note that, there's no need for
+ * additional locking, as powering on a child, requires the parent's
+ * lock to be acquired first.
+ */
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ struct generic_pm_domain *child = link->child;
+ if (child->state_idx < child->state_count - 1)
+ return -EBUSY;
+ }
+
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
enum pm_qos_flags_status stat;
@@ -1073,6 +1085,13 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
|| atomic_read(&genpd->sd_count) > 0)
return;
+ /* Check that the children are in their deepest (powered-off) state. */
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ struct generic_pm_domain *child = link->child;
+ if (child->state_idx < child->state_count - 1)
+ return;
+ }
+
/* Choose the deepest state when suspending */
genpd->state_idx = genpd->state_count - 1;
if (_genpd_power_off(genpd, false))
@@ -2058,9 +2077,9 @@ static int genpd_remove(struct generic_pm_domain *genpd)
kfree(link);
}
- genpd_debug_remove(genpd);
list_del(&genpd->gpd_list_node);
genpd_unlock(genpd);
+ genpd_debug_remove(genpd);
cancel_work_sync(&genpd->power_off_work);
if (genpd_is_cpu_domain(genpd))
free_cpumask_var(genpd->cpus);
@@ -2248,12 +2267,8 @@ int of_genpd_add_provider_simple(struct device_node *np,
/* Parse genpd OPP table */
if (genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table(&genpd->dev);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
/*
* Save table for faster processing while setting performance
@@ -2312,9 +2327,8 @@ int of_genpd_add_provider_onecell(struct device_node *np,
if (genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
- i, ret);
+ dev_err_probe(&genpd->dev, ret,
+ "Failed to add OPP table for index %d\n", i);
goto error;
}
@@ -2672,12 +2686,8 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
ret = genpd_add_device(pd, dev, base_dev);
mutex_unlock(&gpd_list_lock);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to add to PM domain %s: %d",
- pd->name, ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 04ea92cbd9cf..c50139207794 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -485,7 +485,7 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev);
trace_device_pm_callback_end(dev, error);
- suspend_report_result(cb, error);
+ suspend_report_result(dev, cb, error);
initcall_debug_report(dev, calltime, cb, error);
@@ -1568,7 +1568,7 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev, state);
trace_device_pm_callback_end(dev, error);
- suspend_report_result(cb, error);
+ suspend_report_result(dev, cb, error);
initcall_debug_report(dev, calltime, cb, error);
@@ -1855,7 +1855,7 @@ unlock:
device_unlock(dev);
if (ret < 0) {
- suspend_report_result(callback, ret);
+ suspend_report_result(dev, callback, ret);
pm_runtime_put(dev);
return ret;
}
@@ -1960,10 +1960,10 @@ int dpm_suspend_start(pm_message_t state)
}
EXPORT_SYMBOL_GPL(dpm_suspend_start);
-void __suspend_report_result(const char *function, void *fn, int ret)
+void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
{
if (ret)
- pr_err("%s(): %pS returns %d\n", function, fn, ret);
+ dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
@@ -2018,7 +2018,9 @@ static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
void device_pm_check_callbacks(struct device *dev)
{
- spin_lock_irq(&dev->power.lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
dev->power.no_pm_callbacks =
(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
!dev->bus->suspend && !dev->bus->resume)) &&
@@ -2027,7 +2029,7 @@ void device_pm_check_callbacks(struct device *dev)
(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
!dev->driver->suspend && !dev->driver->resume));
- spin_unlock_irq(&dev->power.lock);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
}
bool dev_pm_skip_suspend(struct device *dev)
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 2f3cce17219b..d4059e6ffeae 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1476,11 +1476,16 @@ EXPORT_SYMBOL_GPL(pm_runtime_enable);
static void pm_runtime_disable_action(void *data)
{
+ pm_runtime_dont_use_autosuspend(data);
pm_runtime_disable(data);
}
/**
* devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
+ *
+ * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
+ * you at driver exit time if needed.
+ *
* @dev: Device to handle.
*/
int devm_pm_runtime_enable(struct device *dev)
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 0004db4a9d3b..d487a6bac630 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -289,7 +289,7 @@ EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
*
* Enables wakeirq conditionally. We need to enable wake-up interrupt
* lazily on the first rpm_suspend(). This is needed as the consumer device
- * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would
+ * starts in RPM_SUSPENDED state, and the first pm_runtime_get() would
* otherwise try to disable already disabled wakeirq. The wake-up interrupt
* starts disabled with IRQ_NOAUTOEN set.
*
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 8666590201c9..a57d469676ca 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -587,7 +587,7 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
* @ws: Wakeup source to handle.
*
* Update the @ws' statistics and, if @ws has just been activated, notify the PM
- * core of the event by incrementing the counter of of wakeup events being
+ * core of the event by incrementing the counter of the wakeup events being
* processed.
*/
static void wakeup_source_activate(struct wakeup_source *ws)
@@ -733,7 +733,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
/*
* Increment the counter of registered wakeup events and decrement the
- * couter of wakeup events in progress simultaneously.
+ * counter of wakeup events in progress simultaneously.
*/
cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
trace_wakeup_source_deactivate(ws->name, cec);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index b1905916f7af..b4df36c7b17d 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -31,6 +31,7 @@ struct regmap_format {
size_t buf_size;
size_t reg_bytes;
size_t pad_bytes;
+ size_t reg_downshift;
size_t val_bytes;
void (*format_write)(struct regmap *map,
unsigned int reg, unsigned int val);
@@ -62,6 +63,7 @@ struct regmap {
regmap_unlock unlock;
void *lock_arg; /* This is passed to lock/unlock functions */
gfp_t alloc_flags;
+ unsigned int reg_base;
struct device *dev; /* Device we do I/O on */
void *work_buf; /* Scratch buffer used to format I/O */
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 4a446259a184..400c7412a7dc 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -535,7 +535,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
/*
* Ignore masked IRQs and ack if we need to; we ack early so
- * there is no race between handling and acknowleding the
+ * there is no race between handling and acknowledging the
* interrupt. We assume that typically few of the interrupts
* will fire simultaneously so don't worry about overhead from
* doing a write per register.
@@ -1045,7 +1045,7 @@ int devm_regmap_add_irq_chip_fwnode(struct device *dev,
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
/**
- * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
+ * devm_regmap_add_irq_chip() - Resource managed regmap_add_irq_chip()
*
* @dev: The device pointer on which irq_chip belongs to.
* @map: The regmap for the device.
@@ -1074,7 +1074,7 @@ EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
/**
* devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
*
- * @dev: Device for which which resource was allocated.
+ * @dev: Device for which the resource was allocated.
* @irq: Primary IRQ for the device.
* @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
*
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 8f9fe5fd4707..5e12f7cb5147 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -821,8 +821,11 @@ struct regmap *__regmap_init(struct device *dev,
else
map->alloc_flags = GFP_KERNEL;
+ map->reg_base = config->reg_base;
+
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
+ map->format.reg_downshift = config->reg_downshift;
map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
config->val_bits + config->pad_bits, 8);
@@ -1735,6 +1738,8 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
return ret;
}
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
map->format.format_reg(map->work_buf, reg, map->reg_shift);
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
map->write_flag_mask);
@@ -1905,6 +1910,8 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
return ret;
}
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
map->format.format_write(map, reg, val);
trace_regmap_hw_write_start(map, reg, 1);
@@ -2346,6 +2353,8 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
unsigned int reg = regs[i].reg;
unsigned int val = regs[i].def;
trace_regmap_hw_write_start(map, reg, 1);
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
map->format.format_reg(u8, reg, map->reg_shift);
u8 += reg_bytes + pad_bytes;
map->format.format_val(u8, val, 0);
@@ -2673,6 +2682,8 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
return ret;
}
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
map->format.format_reg(map->work_buf, reg, map->reg_shift);
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
map->read_flag_mask);
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index fc24e89f9592..e9d1efcda89b 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -14,11 +14,11 @@
#include <linux/hardirq.h>
#include <linux/topology.h>
-#define define_id_show_func(name) \
+#define define_id_show_func(name, fmt) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
- return sysfs_emit(buf, "%d\n", topology_##name(dev->id)); \
+ return sysfs_emit(buf, fmt "\n", topology_##name(dev->id)); \
}
#define define_siblings_read_func(name, mask) \
@@ -42,22 +42,25 @@ static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
off, count); \
}
-define_id_show_func(physical_package_id);
+define_id_show_func(physical_package_id, "%d");
static DEVICE_ATTR_RO(physical_package_id);
#ifdef TOPOLOGY_DIE_SYSFS
-define_id_show_func(die_id);
+define_id_show_func(die_id, "%d");
static DEVICE_ATTR_RO(die_id);
#endif
#ifdef TOPOLOGY_CLUSTER_SYSFS
-define_id_show_func(cluster_id);
+define_id_show_func(cluster_id, "%d");
static DEVICE_ATTR_RO(cluster_id);
#endif
-define_id_show_func(core_id);
+define_id_show_func(core_id, "%d");
static DEVICE_ATTR_RO(core_id);
+define_id_show_func(ppin, "0x%llx");
+static DEVICE_ATTR_ADMIN_RO(ppin);
+
define_siblings_read_func(thread_siblings, sibling_cpumask);
static BIN_ATTR_RO(thread_siblings, 0);
static BIN_ATTR_RO(thread_siblings_list, 0);
@@ -87,7 +90,7 @@ static BIN_ATTR_RO(package_cpus, 0);
static BIN_ATTR_RO(package_cpus_list, 0);
#ifdef TOPOLOGY_BOOK_SYSFS
-define_id_show_func(book_id);
+define_id_show_func(book_id, "%d");
static DEVICE_ATTR_RO(book_id);
define_siblings_read_func(book_siblings, book_cpumask);
static BIN_ATTR_RO(book_siblings, 0);
@@ -95,7 +98,7 @@ static BIN_ATTR_RO(book_siblings_list, 0);
#endif
#ifdef TOPOLOGY_DRAWER_SYSFS
-define_id_show_func(drawer_id);
+define_id_show_func(drawer_id, "%d");
static DEVICE_ATTR_RO(drawer_id);
define_siblings_read_func(drawer_siblings, drawer_cpumask);
static BIN_ATTR_RO(drawer_siblings, 0);
@@ -145,6 +148,7 @@ static struct attribute *default_attrs[] = {
#ifdef TOPOLOGY_DRAWER_SYSFS
&dev_attr_drawer_id.attr,
#endif
+ &dev_attr_ppin.attr,
NULL
};
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index 62f5bfa5065d..fd91a39f02c7 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -303,7 +303,7 @@ u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value)
EXPORT_SYMBOL_GPL(bcma_chipco_gpio_outen);
/*
- * If the bit is set to 0, chipcommon controlls this GPIO,
+ * If the bit is set to 0, chipcommon controls this GPIO,
* if the bit is set to 1, it is used by some part of the chip and not our code.
*/
u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value)
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index 3056f81efca4..263ef6fa1d0f 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -206,7 +206,7 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
usleep_range(2000, 2500);
}
-/* Disable to allow reading SPROM. Don't know the adventages of enabling it. */
+/* Disable to allow reading SPROM. Don't know the advantages of enabling it. */
void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable)
{
struct bcma_bus *bus = cc->core->bus;
@@ -234,7 +234,7 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4313:
/*
- * enable 12 mA drive strenth for 4313 and set chipControl
+ * enable 12 mA drive strength for 4313 and set chipControl
* register bit 1
*/
bcma_chipco_chipctl_maskset(cc, 0,
@@ -249,7 +249,7 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
case BCMA_CHIP_ID_BCM43224:
case BCMA_CHIP_ID_BCM43421:
/*
- * enable 12 mA drive strenth for 43224 and set chipControl
+ * enable 12 mA drive strength for 43224 and set chipControl
* register bit 15
*/
if (bus->chipinfo.rev == 0) {
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 8a1e4705bc87..1e74ec1c7f23 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -181,7 +181,6 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->set = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
- chip->owner = THIS_MODULE;
chip->parent = bus->dev;
#if IS_BUILTIN(CONFIG_OF)
chip->of_node = cc->core->dev.of_node;
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index 6f8fc5f587fe..aa0581cda718 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -61,7 +61,7 @@ static u32 bcma_get_cfgspace_addr(struct bcma_drv_pci *pc, unsigned int dev,
{
u32 addr = 0;
- /* Issue config commands only when the data link is up (atleast
+ /* Issue config commands only when the data link is up (at least
* one external pcie device is present).
*/
if (dev >= 2 || !(bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_LSREG)
@@ -295,7 +295,7 @@ static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
if (cap_ptr == 0x00)
return cap_ptr;
- /* loop thr'u the capability list and see if the requested capabilty
+ /* loop through the capability list and see if the requested capability
* exists */
bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id, sizeof(u8));
while (cap_id != req_cap_id) {
@@ -317,7 +317,7 @@ static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
*buflen = 0;
- /* copy the cpability data excluding cap ID and next ptr */
+ /* copy the capability data excluding cap ID and next ptr */
cap_data = cap_ptr + 2;
if ((bufsize + cap_data) > PCI_CONFIG_SPACE_SIZE)
bufsize = PCI_CONFIG_SPACE_SIZE - cap_data;
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 8e7ca3e4c8c4..44392b624b20 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -293,7 +293,7 @@ static int bcma_register_devices(struct bcma_bus *bus)
int err;
list_for_each_entry(core, &bus->cores, list) {
- /* We support that cores ourself */
+ /* We support that core ourselves */
switch (core->id.id) {
case BCMA_CORE_4706_CHIPCOMMON:
case BCMA_CORE_CHIPCOMMON:
@@ -369,7 +369,7 @@ void bcma_unregister_cores(struct bcma_bus *bus)
if (bus->hosttype == BCMA_HOSTTYPE_SOC)
platform_device_unregister(bus->drv_cc.watchdog);
- /* Now noone uses internally-handled cores, we can free them */
+ /* Now no one uses internally-handled cores, we can free them */
list_for_each_entry_safe(core, tmp, &bus->cores, list) {
list_del(&core->list);
put_device(&core->dev);
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index bd2c923a6586..3da01f173c63 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -28,7 +28,7 @@ static int(*get_fallback_sprom)(struct bcma_bus *dev, struct ssb_sprom *out);
* callback handler which fills the SPROM data structure. The fallback is
* used for PCI based BCMA devices, where no valid SPROM can be found
* in the shadow registers and to provide the SPROM for SoCs where BCMA is
- * to controll the system bus.
+ * to control the system bus.
*
* This function is useful for weird architectures that have a half-assed
* BCMA device hardwired to their PCI bus.
@@ -281,7 +281,7 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8);
SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0);
- /* Extract cores power info info */
+ /* Extract core's power info */
for (i = 0; i < ARRAY_SIZE(pwr_info_offset); i++) {
o = pwr_info_offset[i];
SPEX(core_pwr_info[i].itssi_2g, o + SSB_SROM8_2G_MAXP_ITSSI,
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 52484bcdedb9..8a91fcac6f82 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -12,7 +12,6 @@
#include <linux/ioctl.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
-#include <linux/genhd.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
#include <linux/export.h>
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 6af111f568e4..384073ef2323 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -10,7 +10,6 @@
#include <linux/blk-mq.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/genhd.h>
#include <linux/moduleparam.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
@@ -1019,9 +1018,9 @@ bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
iter.bi_size = cnt;
__bio_for_each_segment(bv, bio, iter, iter) {
- char *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
+ char *p = bvec_kmap_local(&bv);
skb_copy_bits(skb, soff, p, bv.bv_len);
- kunmap_atomic(p);
+ kunmap_local(p);
soff += bv.bv_len;
}
}
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 72cf7603d51f..f5bcded3640d 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -138,15 +138,14 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
op_flags |= REQ_FUA | REQ_PREFLUSH;
op_flags |= REQ_SYNC;
- bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
- bio_set_dev(bio, bdev->md_bdev);
+ bio = bio_alloc_bioset(bdev->md_bdev, 1, op | op_flags, GFP_NOIO,
+ &drbd_md_io_bio_set);
bio->bi_iter.bi_sector = sector;
err = -EIO;
if (bio_add_page(bio, device->md_io.page, size, 0) != size)
goto out;
bio->bi_private = device;
bio->bi_end_io = drbd_md_endio;
- bio_set_op_attrs(bio, op, op_flags);
if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL)
/* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index c1f816f896a8..df25eecf80af 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -976,12 +976,13 @@ static void drbd_bm_endio(struct bio *bio)
static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
{
- struct bio *bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
struct drbd_device *device = ctx->device;
+ unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
+ struct bio *bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op,
+ GFP_NOIO, &drbd_md_io_bio_set);
struct drbd_bitmap *b = device->bitmap;
struct page *page;
unsigned int len;
- unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
sector_t on_disk_sector =
device->ldev->md.md_offset + device->ldev->md.bm_offset;
@@ -1006,14 +1007,12 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
- bio_set_dev(bio, device->ldev->md_bdev);
bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
bio_add_page(bio, page, len, 0);
bio->bi_private = ctx;
bio->bi_end_io = drbd_bm_endio;
- bio_set_op_attrs(bio, op, 0);
if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
bio_io_error(bio);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index f27d5b0f9a0b..4b55e864a0a3 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -27,7 +27,6 @@
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
-#include <linux/genhd.h>
#include <linux/idr.h>
#include <linux/dynamic_debug.h>
#include <net/tcp.h>
@@ -638,9 +637,6 @@ enum {
STATE_SENT, /* Do not change state/UUIDs while this is set */
CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
* pending, from drbd worker context.
- * If set, bdi_write_congested() returns true,
- * so shrink_page_list() would not recurse into,
- * and potentially deadlock on, this drbd worker.
*/
DISCONNECT_SENT,
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 6df2539e215b..fa00cf2ea952 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1279,16 +1279,16 @@ static void one_flush_endio(struct bio *bio)
static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
{
- struct bio *bio = bio_alloc(GFP_NOIO, 0);
+ struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0,
+ REQ_OP_FLUSH | REQ_PREFLUSH, GFP_NOIO);
struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
- if (!bio || !octx) {
- drbd_warn(device, "Could not allocate a bio, CANNOT ISSUE FLUSH\n");
+
+ if (!octx) {
+ drbd_warn(device, "Could not allocate a octx, CANNOT ISSUE FLUSH\n");
/* FIXME: what else can I do now? disconnecting or detaching
* really does not help to improve the state of the world, either.
*/
- kfree(octx);
- if (bio)
- bio_put(bio);
+ bio_put(bio);
ctx->error = -ENOMEM;
put_ldev(device);
@@ -1298,10 +1298,8 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
octx->device = device;
octx->ctx = ctx;
- bio_set_dev(bio, device->ldev->backing_bdev);
bio->bi_private = octx;
bio->bi_end_io = one_flush_endio;
- bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
device->flush_jif = jiffies;
set_bit(FLUSH_PENDING, &device->flags);
@@ -1646,7 +1644,6 @@ int drbd_submit_peer_request(struct drbd_device *device,
unsigned data_size = peer_req->i.size;
unsigned n_bios = 0;
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
- int err = -ENOMEM;
/* TRIM/DISCARD: for now, always use the helper function
* blkdev_issue_zeroout(..., discard=true).
@@ -1687,15 +1684,10 @@ int drbd_submit_peer_request(struct drbd_device *device,
* generated bio, but a bio allocated on behalf of the peer.
*/
next_bio:
- bio = bio_alloc(GFP_NOIO, nr_pages);
- if (!bio) {
- drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages);
- goto fail;
- }
+ bio = bio_alloc(device->ldev->backing_bdev, nr_pages, op | op_flags,
+ GFP_NOIO);
/* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, device->ldev->backing_bdev);
- bio_set_op_attrs(bio, op, op_flags);
bio->bi_private = peer_req;
bio->bi_end_io = drbd_peer_request_endio;
@@ -1726,14 +1718,6 @@ next_bio:
drbd_submit_bio_noacct(device, fault_type, bio);
} while (bios);
return 0;
-
-fail:
- while (bios) {
- bio = bios;
- bios = bios->bi_next;
- bio_put(bio);
- }
- return err;
}
static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
@@ -2033,10 +2017,10 @@ static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_req
D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
bio_for_each_segment(bvec, bio, iter) {
- void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
+ void *mapped = bvec_kmap_local(&bvec);
expect = min_t(int, data_size, bvec.bv_len);
err = drbd_recv_all_warn(peer_device->connection, mapped, expect);
- kunmap(bvec.bv_page);
+ kunmap_local(mapped);
if (err)
return err;
data_size -= expect;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 3235532ae077..2f608525a879 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -30,7 +30,8 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio
return NULL;
memset(req, 0, sizeof(*req));
- req->private_bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set);
+ req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, bio_src,
+ GFP_NOIO, &drbd_io_bio_set);
req->private_bio->bi_private = req;
req->private_bio->bi_end_io = drbd_request_endio;
@@ -909,8 +910,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se
switch (rbm) {
case RB_CONGESTED_REMOTE:
- return bdi_read_congested(
- device->ldev->backing_bdev->bd_disk->bdi);
+ return 0;
case RB_LEAST_PENDING:
return atomic_read(&device->local_cnt) >
atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
@@ -1151,8 +1151,6 @@ drbd_submit_req_private_bio(struct drbd_request *req)
else
type = DRBD_FAULT_DT_RD;
- bio_set_dev(bio, device->ldev->backing_bdev);
-
/* State may have changed since we grabbed our reference on the
* ->ldev member. Double check, and short-circuit to endio.
* In case the last activity log transaction failed to get on
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 64563bfdf0da..1b48c8172a07 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -326,9 +326,9 @@ void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
bio_for_each_segment(bvec, bio, iter) {
u8 *src;
- src = kmap_atomic(bvec.bv_page);
- crypto_shash_update(desc, src + bvec.bv_offset, bvec.bv_len);
- kunmap_atomic(src);
+ src = bvec_kmap_local(&bvec);
+ crypto_shash_update(desc, src, bvec.bv_len);
+ kunmap_local(src);
/* REQ_OP_WRITE_SAME has only one segment,
* checksum the payload only once. */
@@ -1523,9 +1523,9 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
drbd_al_begin_io(device, &req->i);
- req->private_bio = bio_clone_fast(req->master_bio, GFP_NOIO,
+ req->private_bio = bio_alloc_clone(device->ldev->backing_bdev,
+ req->master_bio, GFP_NOIO,
&drbd_io_bio_set);
- bio_set_dev(req->private_bio, device->ldev->backing_bdev);
req->private_bio->bi_private = req;
req->private_bio->bi_end_io = drbd_request_endio;
submit_bio_noacct(req->private_bio);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index e611411a934c..8c647532e3ce 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2485,11 +2485,9 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
}
if (CT(raw_cmd->cmd[COMMAND]) == FD_READ)
- memcpy_to_page(bv.bv_page, bv.bv_offset, dma_buffer,
- size);
+ memcpy_to_bvec(&bv, dma_buffer);
else
- memcpy_from_page(dma_buffer, bv.bv_page, bv.bv_offset,
- size);
+ memcpy_from_bvec(dma_buffer, &bv);
remaining -= size;
dma_buffer += size;
@@ -4129,15 +4127,13 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
cbdata.drive = drive;
- bio_init(&bio, &bio_vec, 1);
- bio_set_dev(&bio, bdev);
+ bio_init(&bio, bdev, &bio_vec, 1, REQ_OP_READ);
bio_add_page(&bio, page, block_size(bdev), 0);
bio.bi_iter.bi_sector = 0;
bio.bi_flags |= (1 << BIO_QUIET);
bio.bi_private = &cbdata;
bio.bi_end_io = floppy_rb0_cb;
- bio_set_op_attrs(&bio, REQ_OP_READ, 0);
init_completion(&cbdata.complete);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 19fe19eaa50e..3e636a75c83a 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -86,6 +86,7 @@
#include <linux/uaccess.h>
#define LOOP_IDLE_WORKER_TIMEOUT (60 * HZ)
+#define LOOP_DEFAULT_HW_Q_DEPTH (128)
static DEFINE_IDR(loop_index_idr);
static DEFINE_MUTEX(loop_ctl_mutex);
@@ -309,12 +310,11 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
* a.k.a. discard/zerorange.
*/
struct file *file = lo->lo_backing_file;
- struct request_queue *q = lo->lo_queue;
int ret;
mode |= FALLOC_FL_KEEP_SIZE;
- if (!blk_queue_discard(q)) {
+ if (!blk_queue_discard(lo->lo_queue)) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -328,8 +328,7 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
static int lo_req_flush(struct loop_device *lo, struct request *rq)
{
- struct file *file = lo->lo_backing_file;
- int ret = vfs_fsync(file, 0);
+ int ret = vfs_fsync(lo->lo_backing_file, 0);
if (unlikely(ret && ret != -EINVAL))
ret = -EIO;
@@ -681,33 +680,33 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
{
- return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset);
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset);
}
static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
{
- return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
}
static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
{
int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
- return sprintf(buf, "%s\n", autoclear ? "1" : "0");
+ return sysfs_emit(buf, "%s\n", autoclear ? "1" : "0");
}
static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
{
int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
- return sprintf(buf, "%s\n", partscan ? "1" : "0");
+ return sysfs_emit(buf, "%s\n", partscan ? "1" : "0");
}
static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
{
int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
- return sprintf(buf, "%s\n", dio ? "1" : "0");
+ return sysfs_emit(buf, "%s\n", dio ? "1" : "0");
}
LOOP_ATTR_RO(backing_file);
@@ -1261,7 +1260,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) {
/* If any pages were dirtied after invalidate_bdev(), try again */
err = -EAGAIN;
- pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
+ pr_warn("%s: loop%d (%s) still has dirty pages (nrpages=%lu)\n",
__func__, lo->lo_number, lo->lo_file_name,
lo->lo_device->bd_inode->i_mapping->nrpages);
goto out_unfreeze;
@@ -1481,7 +1480,7 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
/* invalidate_bdev should have truncated all the pages */
if (lo->lo_device->bd_inode->i_mapping->nrpages) {
err = -EAGAIN;
- pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
+ pr_warn("%s: loop%d (%s) still has dirty pages (nrpages=%lu)\n",
__func__, lo->lo_number, lo->lo_file_name,
lo->lo_device->bd_inode->i_mapping->nrpages);
goto out_unfreeze;
@@ -1786,6 +1785,24 @@ module_param(max_loop, int, 0444);
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
module_param(max_part, int, 0444);
MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
+
+static int hw_queue_depth = LOOP_DEFAULT_HW_Q_DEPTH;
+
+static int loop_set_hw_queue_depth(const char *s, const struct kernel_param *p)
+{
+ int ret = kstrtoint(s, 10, &hw_queue_depth);
+
+ return (ret || (hw_queue_depth < 1)) ? -EINVAL : 0;
+}
+
+static const struct kernel_param_ops loop_hw_qdepth_param_ops = {
+ .set = loop_set_hw_queue_depth,
+ .get = param_get_int,
+};
+
+device_param_cb(hw_queue_depth, &loop_hw_qdepth_param_ops, &hw_queue_depth, 0444);
+MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 128");
+
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
@@ -1980,7 +1997,7 @@ static int loop_add(int i)
lo->tag_set.ops = &loop_mq_ops;
lo->tag_set.nr_hw_queues = 1;
- lo->tag_set.queue_depth = 128;
+ lo->tag_set.queue_depth = hw_queue_depth;
lo->tag_set.numa_node = NUMA_NO_NODE;
lo->tag_set.cmd_size = sizeof(struct loop_cmd);
lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
@@ -2074,6 +2091,7 @@ static void loop_remove(struct loop_device *lo)
del_gendisk(lo->lo_disk);
blk_cleanup_disk(lo->lo_disk);
blk_mq_free_tag_set(&lo->tag_set);
+
mutex_lock(&loop_ctl_mutex);
idr_remove(&loop_index_idr, lo->lo_number);
mutex_unlock(&loop_ctl_mutex);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 2b588b62cbbb..4fbaf0b4958b 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -19,7 +19,6 @@
#include <linux/compat.h>
#include <linux/fs.h>
#include <linux/module.h>
-#include <linux/genhd.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/bio.h>
@@ -161,9 +160,7 @@ static bool mtip_check_surprise_removal(struct driver_data *dd)
static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
unsigned int tag)
{
- struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];
-
- return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(hctx->tags, tag));
+ return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(dd->tags.tags[0], tag));
}
/*
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 88f4206310e4..6816beb45352 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -15,7 +15,6 @@
#include <linux/rwsem.h>
#include <linux/ata.h>
#include <linux/interrupt.h>
-#include <linux/genhd.h>
/* Offset of Subsystem Device ID in pci confoguration space */
#define PCI_SUBSYSTEM_DEVICEID 0x2E
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 13004beb48ca..05b1120e6623 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -431,9 +431,10 @@ static ssize_t nullb_device_power_store(struct config_item *item,
if (!dev->power && newp) {
if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
return count;
- if (null_add_dev(dev)) {
+ ret = null_add_dev(dev);
+ if (ret) {
clear_bit(NULLB_DEV_FL_UP, &dev->flags);
- return -ENOMEM;
+ return ret;
}
set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
@@ -719,26 +720,25 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
return NULL;
}
-static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
+static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, struct bio *bio)
{
struct nullb_cmd *cmd;
DEFINE_WAIT(wait);
- cmd = __alloc_cmd(nq);
- if (cmd || !can_wait)
- return cmd;
-
do {
- prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
+ /*
+ * This avoids multiple return statements, multiple calls to
+ * __alloc_cmd() and a fast path call to prepare_to_wait().
+ */
cmd = __alloc_cmd(nq);
- if (cmd)
- break;
-
+ if (cmd) {
+ cmd->bio = bio;
+ return cmd;
+ }
+ prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
io_schedule();
+ finish_wait(&nq->wait, &wait);
} while (1);
-
- finish_wait(&nq->wait, &wait);
- return cmd;
}
static void end_cmd(struct nullb_cmd *cmd)
@@ -777,24 +777,22 @@ static void null_complete_rq(struct request *rq)
end_cmd(blk_mq_rq_to_pdu(rq));
}
-static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
+static struct nullb_page *null_alloc_page(void)
{
struct nullb_page *t_page;
- t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
+ t_page = kmalloc(sizeof(struct nullb_page), GFP_NOIO);
if (!t_page)
- goto out;
+ return NULL;
- t_page->page = alloc_pages(gfp_flags, 0);
- if (!t_page->page)
- goto out_freepage;
+ t_page->page = alloc_pages(GFP_NOIO, 0);
+ if (!t_page->page) {
+ kfree(t_page);
+ return NULL;
+ }
memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
return t_page;
-out_freepage:
- kfree(t_page);
-out:
- return NULL;
}
static void null_free_page(struct nullb_page *t_page)
@@ -932,7 +930,7 @@ static struct nullb_page *null_insert_page(struct nullb *nullb,
spin_unlock_irq(&nullb->lock);
- t_page = null_alloc_page(GFP_NOIO);
+ t_page = null_alloc_page();
if (!t_page)
goto out_lock;
@@ -1476,12 +1474,8 @@ static void null_submit_bio(struct bio *bio)
sector_t nr_sectors = bio_sectors(bio);
struct nullb *nullb = bio->bi_bdev->bd_disk->private_data;
struct nullb_queue *nq = nullb_to_queue(nullb);
- struct nullb_cmd *cmd;
-
- cmd = alloc_cmd(nq, 1);
- cmd->bio = bio;
- null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
+ null_handle_cmd(alloc_cmd(nq, bio), sector, nr_sectors, bio_op(bio));
}
static bool should_timeout_request(struct request *rq)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 2b6b70a39e76..e745fc29e55d 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1020,9 +1020,8 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
continue;
bio = pkt->r_bios[f];
- bio_reset(bio);
+ bio_reset(bio, pd->bdev, REQ_OP_READ);
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
- bio_set_dev(bio, pd->bdev);
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
@@ -1034,7 +1033,6 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
BUG();
atomic_inc(&pkt->io_wait);
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
pkt_queue_bio(pd, bio);
frames_read++;
}
@@ -1235,9 +1233,8 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
{
int f;
- bio_reset(pkt->w_bio);
+ bio_reset(pkt->w_bio, pd->bdev, REQ_OP_WRITE);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
- bio_set_dev(pkt->w_bio, pd->bdev);
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
@@ -1270,7 +1267,6 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
/* Start the write request */
atomic_set(&pkt->io_wait, 1);
- bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0);
pkt_queue_bio(pd, pkt->w_bio);
}
@@ -2298,12 +2294,12 @@ static void pkt_end_io_read_cloned(struct bio *bio)
static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
{
- struct bio *cloned_bio = bio_clone_fast(bio, GFP_NOIO, &pkt_bio_set);
+ struct bio *cloned_bio =
+ bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set);
struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
psd->pd = pd;
psd->bio = bio;
- bio_set_dev(cloned_bio, pd->bdev);
cloned_bio->bi_private = psd;
cloned_bio->bi_end_io = pkt_end_io_read_cloned;
pd->stats.secs_r += bio_sectors(bio);
@@ -2404,18 +2400,11 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
static void pkt_submit_bio(struct bio *bio)
{
- struct pktcdvd_device *pd;
- char b[BDEVNAME_SIZE];
+ struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata;
struct bio *split;
blk_queue_split(&bio);
- pd = bio->bi_bdev->bd_disk->queue->queuedata;
- if (!pd) {
- pr_err("%s incorrect request queue\n", bio_devname(bio, b));
- goto end_io;
- }
-
pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
(unsigned long long)bio->bi_iter.bi_sector,
(unsigned long long)bio_end_sector(bio));
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index c08971de369f..2f378684b735 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -23,7 +23,6 @@ MODULE_LICENSE("GPL");
static int rnbd_client_major;
static DEFINE_IDA(index_ida);
-static DEFINE_MUTEX(ida_lock);
static DEFINE_MUTEX(sess_lock);
static LIST_HEAD(sess_list);
@@ -55,9 +54,7 @@ static void rnbd_clt_put_dev(struct rnbd_clt_dev *dev)
if (!refcount_dec_and_test(&dev->refcount))
return;
- mutex_lock(&ida_lock);
- ida_simple_remove(&index_ida, dev->clt_device_id);
- mutex_unlock(&ida_lock);
+ ida_free(&index_ida, dev->clt_device_id);
kfree(dev->hw_queues);
kfree(dev->pathname);
rnbd_clt_put_sess(dev->sess);
@@ -87,7 +84,6 @@ static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
dev->discard_granularity = le32_to_cpu(rsp->discard_granularity);
dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
dev->secure_discard = le16_to_cpu(rsp->secure_discard);
- dev->rotational = rsp->rotational;
dev->wc = !!(rsp->cache_policy & RNBD_WRITEBACK);
dev->fua = !!(rsp->cache_policy & RNBD_FUA);
@@ -1262,9 +1258,9 @@ find_and_get_or_create_sess(const char *sessname,
struct rtrs_clt_ops rtrs_ops;
sess = find_or_create_sess(sessname, &first);
- if (sess == ERR_PTR(-ENOMEM))
+ if (sess == ERR_PTR(-ENOMEM)) {
return ERR_PTR(-ENOMEM);
- else if ((nr_poll_queues && !first) || (!nr_poll_queues && sess->nr_poll_queues)) {
+ } else if ((nr_poll_queues && !first) || (!nr_poll_queues && sess->nr_poll_queues)) {
/*
* A device MUST have its own session to use the polling-mode.
* It must fail to map new device with the same session.
@@ -1343,7 +1339,7 @@ static inline void rnbd_init_hw_queue(struct rnbd_clt_dev *dev,
static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
{
- int i;
+ unsigned long i;
struct blk_mq_hw_ctx *hctx;
struct rnbd_queue *q;
@@ -1410,8 +1406,10 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
dev->read_only = false;
}
- if (!dev->rotational)
- blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
+ /*
+ * Network device does not need rotational
+ */
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
err = add_disk(dev->gd);
if (err)
blk_cleanup_disk(dev->gd);
@@ -1459,10 +1457,8 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
goto out_alloc;
}
- mutex_lock(&ida_lock);
- ret = ida_simple_get(&index_ida, 0, 1 << (MINORBITS - RNBD_PART_BITS),
- GFP_KERNEL);
- mutex_unlock(&ida_lock);
+ ret = ida_alloc_max(&index_ida, 1 << (MINORBITS - RNBD_PART_BITS),
+ GFP_KERNEL);
if (ret < 0) {
pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
pathname, sess->sessname, ret);
@@ -1610,13 +1606,13 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
}
rnbd_clt_info(dev,
- "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d, wc: %d, fua: %d)\n",
+ "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n",
dev->gd->disk_name, dev->nsectors,
dev->logical_block_size, dev->physical_block_size,
dev->max_write_same_sectors, dev->max_discard_sectors,
dev->discard_granularity, dev->discard_alignment,
dev->secure_discard, dev->max_segments,
- dev->max_hw_sectors, dev->rotational, dev->wc, dev->fua);
+ dev->max_hw_sectors, dev->wc, dev->fua);
mutex_unlock(&dev->lock);
rnbd_clt_put_sess(sess);
diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
index 0c2cae7f39b9..62bf7c3fa63c 100644
--- a/drivers/block/rnbd/rnbd-clt.h
+++ b/drivers/block/rnbd/rnbd-clt.h
@@ -118,7 +118,6 @@ struct rnbd_clt_dev {
enum rnbd_access_mode access_mode;
u32 nr_poll_queues;
bool read_only;
- bool rotational;
bool wc;
bool fua;
u32 max_hw_sectors;
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
index de5d5a8df81d..c4a68b3a1cbe 100644
--- a/drivers/block/rnbd/rnbd-proto.h
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -128,7 +128,7 @@ enum rnbd_cache_policy {
* @logical_block_size: logical block size device supports in bytes
* @max_segments: max segments hardware support in one transfer
* @secure_discard: supports secure discard
- * @rotation: is a rotational disc?
+ * @obsolete_rotational: obsolete, not in used.
* @cache_policy: support write-back caching or FUA?
*/
struct rnbd_msg_open_rsp {
@@ -144,7 +144,7 @@ struct rnbd_msg_open_rsp {
__le16 logical_block_size;
__le16 max_segments;
__le16 secure_discard;
- u8 rotational;
+ u8 obsolete_rotational;
u8 cache_policy;
u8 reserved[10];
};
diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c
index b241a099aeae..c5d0a0391165 100644
--- a/drivers/block/rnbd/rnbd-srv-dev.c
+++ b/drivers/block/rnbd/rnbd-srv-dev.c
@@ -12,8 +12,7 @@
#include "rnbd-srv-dev.h"
#include "rnbd-log.h"
-struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
- struct bio_set *bs)
+struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags)
{
struct rnbd_dev *dev;
int ret;
@@ -30,7 +29,6 @@ struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
dev->blk_open_flags = flags;
bdevname(dev->bdev, dev->name);
- dev->ibd_bio_set = bs;
return dev;
@@ -44,60 +42,3 @@ void rnbd_dev_close(struct rnbd_dev *dev)
blkdev_put(dev->bdev, dev->blk_open_flags);
kfree(dev);
}
-
-void rnbd_dev_bi_end_io(struct bio *bio)
-{
- struct rnbd_dev_blk_io *io = bio->bi_private;
-
- rnbd_endio(io->priv, blk_status_to_errno(bio->bi_status));
- bio_put(bio);
-}
-
-/**
- * rnbd_bio_map_kern - map kernel address into bio
- * @data: pointer to buffer to map
- * @bs: bio_set to use.
- * @len: length in bytes
- * @gfp_mask: allocation flags for bio allocation
- *
- * Map the kernel address into a bio suitable for io to a block
- * device. Returns an error pointer in case of error.
- */
-struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
- unsigned int len, gfp_t gfp_mask)
-{
- unsigned long kaddr = (unsigned long)data;
- unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = kaddr >> PAGE_SHIFT;
- const int nr_pages = end - start;
- int offset, i;
- struct bio *bio;
-
- bio = bio_alloc_bioset(gfp_mask, nr_pages, bs);
- if (!bio)
- return ERR_PTR(-ENOMEM);
-
- offset = offset_in_page(kaddr);
- for (i = 0; i < nr_pages; i++) {
- unsigned int bytes = PAGE_SIZE - offset;
-
- if (len <= 0)
- break;
-
- if (bytes > len)
- bytes = len;
-
- if (bio_add_page(bio, virt_to_page(data), bytes,
- offset) < bytes) {
- /* we don't support partial mappings */
- bio_put(bio);
- return ERR_PTR(-EINVAL);
- }
-
- data += bytes;
- len -= bytes;
- offset = 0;
- }
-
- return bio;
-}
diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h
index 0eb23850afb9..2c3df02b5e8e 100644
--- a/drivers/block/rnbd/rnbd-srv-dev.h
+++ b/drivers/block/rnbd/rnbd-srv-dev.h
@@ -14,25 +14,16 @@
struct rnbd_dev {
struct block_device *bdev;
- struct bio_set *ibd_bio_set;
fmode_t blk_open_flags;
char name[BDEVNAME_SIZE];
};
-struct rnbd_dev_blk_io {
- struct rnbd_dev *dev;
- void *priv;
- /* have to be last member for front_pad usage of bioset_init */
- struct bio bio;
-};
-
/**
* rnbd_dev_open() - Open a device
+ * @path: path to open
* @flags: open flags
- * @bs: bio_set to use during block io,
*/
-struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
- struct bio_set *bs);
+struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags);
/**
* rnbd_dev_close() - Close a device
@@ -41,11 +32,6 @@ void rnbd_dev_close(struct rnbd_dev *dev);
void rnbd_endio(void *priv, int error);
-void rnbd_dev_bi_end_io(struct bio *bio);
-
-struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
- unsigned int len, gfp_t gfp_mask);
-
static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev)
{
return queue_max_segments(bdev_get_queue(dev->bdev));
diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c
index 4db98e0e76f0..feaa76c5a342 100644
--- a/drivers/block/rnbd/rnbd-srv-sysfs.c
+++ b/drivers/block/rnbd/rnbd-srv-sysfs.c
@@ -13,7 +13,6 @@
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/stat.h>
-#include <linux/genhd.h>
#include <linux/list.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 1ee808fc600c..6499efae5c43 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -114,6 +114,12 @@ rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess)
return sess_dev;
}
+static void rnbd_dev_bi_end_io(struct bio *bio)
+{
+ rnbd_endio(bio->bi_private, blk_status_to_errno(bio->bi_status));
+ bio_put(bio);
+}
+
static int process_rdma(struct rnbd_srv_session *srv_sess,
struct rtrs_srv_op *id, void *data, u32 datalen,
const void *usr, size_t usrlen)
@@ -123,7 +129,6 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
struct rnbd_srv_sess_dev *sess_dev;
u32 dev_id;
int err;
- struct rnbd_dev_blk_io *io;
struct bio *bio;
short prio;
@@ -144,33 +149,29 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
priv->sess_dev = sess_dev;
priv->id = id;
- /* Generate bio with pages pointing to the rdma buffer */
- bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev->ibd_bio_set, datalen, GFP_KERNEL);
- if (IS_ERR(bio)) {
- err = PTR_ERR(bio);
- rnbd_srv_err(sess_dev, "Failed to generate bio, err: %d\n", err);
- goto sess_dev_put;
+ bio = bio_alloc(sess_dev->rnbd_dev->bdev, 1,
+ rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
+ if (bio_add_page(bio, virt_to_page(data), datalen,
+ offset_in_page(data)) != datalen) {
+ rnbd_srv_err(sess_dev, "Failed to map data to bio\n");
+ err = -EINVAL;
+ goto bio_put;
}
- io = container_of(bio, struct rnbd_dev_blk_io, bio);
- io->dev = sess_dev->rnbd_dev;
- io->priv = priv;
-
bio->bi_end_io = rnbd_dev_bi_end_io;
- bio->bi_private = io;
- bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw));
+ bio->bi_private = priv;
bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size);
prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
bio_set_prio(bio, prio);
- bio_set_dev(bio, sess_dev->rnbd_dev->bdev);
submit_bio(bio);
return 0;
-sess_dev_put:
+bio_put:
+ bio_put(bio);
rnbd_put_sess_dev(sess_dev);
err:
kfree(priv);
@@ -251,7 +252,6 @@ static void destroy_sess(struct rnbd_srv_session *srv_sess)
out:
xa_destroy(&srv_sess->index_idr);
- bioset_exit(&srv_sess->sess_bio_set);
pr_info("RTRS Session %s disconnected\n", srv_sess->sessname);
@@ -280,16 +280,6 @@ static int create_sess(struct rtrs_srv_sess *rtrs)
return -ENOMEM;
srv_sess->queue_depth = rtrs_srv_get_queue_depth(rtrs);
- err = bioset_init(&srv_sess->sess_bio_set, srv_sess->queue_depth,
- offsetof(struct rnbd_dev_blk_io, bio),
- BIOSET_NEED_BVECS);
- if (err) {
- pr_err("Allocating srv_session for path %s failed\n",
- pathname);
- kfree(srv_sess);
- return err;
- }
-
xa_init_flags(&srv_sess->index_idr, XA_FLAGS_ALLOC);
INIT_LIST_HEAD(&srv_sess->sess_dev_list);
mutex_init(&srv_sess->lock);
@@ -568,7 +558,6 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev));
rsp->secure_discard =
cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev));
- rsp->rotational = !blk_queue_nonrot(q);
rsp->cache_policy = 0;
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
rsp->cache_policy |= RNBD_WRITEBACK;
@@ -738,8 +727,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
goto reject;
}
- rnbd_dev = rnbd_dev_open(full_path, open_flags,
- &srv_sess->sess_bio_set);
+ rnbd_dev = rnbd_dev_open(full_path, open_flags);
if (IS_ERR(rnbd_dev)) {
pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %ld\n",
full_path, srv_sess->sessname, PTR_ERR(rnbd_dev));
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index e5604bce123a..be2ae486d407 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -23,7 +23,6 @@ struct rnbd_srv_session {
struct rtrs_srv_sess *rtrs;
char sessname[NAME_MAX];
int queue_depth;
- struct bio_set sess_bio_set;
struct xarray index_idr;
/* List of struct rnbd_srv_sess_dev */
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 146d85d80e0e..dd0a1a6fed29 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -9,7 +9,6 @@
#include <linux/types.h>
#include <linux/blk-mq.h>
#include <linux/hdreg.h>
-#include <linux/genhd.h>
#include <linux/cdrom.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index c443cd64fc9b..a8bcf3f664af 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -69,16 +69,6 @@ struct virtio_blk {
/* Process context for config space updates */
struct work_struct config_work;
- /*
- * Tracks references from block_device_operations open/release and
- * virtio_driver probe/remove so this object can be freed once no
- * longer in use.
- */
- refcount_t refs;
-
- /* What host tells us, plus 2 for header & tailer. */
- unsigned int sg_elems;
-
/* Ida index - used to track minor number allocations. */
int index;
@@ -322,8 +312,6 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_status_t status;
int err;
- BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
-
status = virtblk_setup_cmd(vblk->vdev, req, vbr);
if (unlikely(status))
return status;
@@ -391,43 +379,6 @@ out:
return err;
}
-static void virtblk_get(struct virtio_blk *vblk)
-{
- refcount_inc(&vblk->refs);
-}
-
-static void virtblk_put(struct virtio_blk *vblk)
-{
- if (refcount_dec_and_test(&vblk->refs)) {
- ida_simple_remove(&vd_index_ida, vblk->index);
- mutex_destroy(&vblk->vdev_mutex);
- kfree(vblk);
- }
-}
-
-static int virtblk_open(struct block_device *bd, fmode_t mode)
-{
- struct virtio_blk *vblk = bd->bd_disk->private_data;
- int ret = 0;
-
- mutex_lock(&vblk->vdev_mutex);
-
- if (vblk->vdev)
- virtblk_get(vblk);
- else
- ret = -ENXIO;
-
- mutex_unlock(&vblk->vdev_mutex);
- return ret;
-}
-
-static void virtblk_release(struct gendisk *disk, fmode_t mode)
-{
- struct virtio_blk *vblk = disk->private_data;
-
- virtblk_put(vblk);
-}
-
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
@@ -460,11 +411,19 @@ out:
return ret;
}
+static void virtblk_free_disk(struct gendisk *disk)
+{
+ struct virtio_blk *vblk = disk->private_data;
+
+ ida_simple_remove(&vd_index_ida, vblk->index);
+ mutex_destroy(&vblk->vdev_mutex);
+ kfree(vblk);
+}
+
static const struct block_device_operations virtblk_fops = {
- .owner = THIS_MODULE,
- .open = virtblk_open,
- .release = virtblk_release,
- .getgeo = virtblk_getgeo,
+ .owner = THIS_MODULE,
+ .getgeo = virtblk_getgeo,
+ .free_disk = virtblk_free_disk,
};
static int index_to_minor(int index)
@@ -783,20 +742,15 @@ static int virtblk_probe(struct virtio_device *vdev)
/* Prevent integer overflows and honor max vq size */
sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
- /* We need extra sg elements at head and tail. */
- sg_elems += 2;
vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
if (!vblk) {
err = -ENOMEM;
goto out_free_index;
}
- /* This reference is dropped in virtblk_remove(). */
- refcount_set(&vblk->refs, 1);
mutex_init(&vblk->vdev_mutex);
vblk->vdev = vdev;
- vblk->sg_elems = sg_elems;
INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
@@ -853,7 +807,7 @@ static int virtblk_probe(struct virtio_device *vdev)
set_disk_ro(vblk->disk, 1);
/* We can handle whatever the host told us to handle. */
- blk_queue_max_segments(q, vblk->sg_elems-2);
+ blk_queue_max_segments(q, sg_elems);
/* No real sector limit. */
blk_queue_max_hw_sectors(q, -1U);
@@ -925,9 +879,15 @@ static int virtblk_probe(struct virtio_device *vdev)
virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
&v);
+
+ /*
+ * max_discard_seg == 0 is out of spec but we always
+ * handled it.
+ */
+ if (!v)
+ v = sg_elems;
blk_queue_max_discard_segments(q,
- min_not_zero(v,
- MAX_DISCARD_SEGMENTS));
+ min(v, MAX_DISCARD_SEGMENTS));
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
}
@@ -970,7 +930,7 @@ static void virtblk_remove(struct virtio_device *vdev)
flush_work(&vblk->config_work);
del_gendisk(vblk->disk);
- blk_cleanup_disk(vblk->disk);
+ blk_cleanup_queue(vblk->disk->queue);
blk_mq_free_tag_set(&vblk->tag_set);
mutex_lock(&vblk->vdev_mutex);
@@ -986,7 +946,7 @@ static void virtblk_remove(struct virtio_device *vdev)
mutex_unlock(&vblk->vdev_mutex);
- virtblk_put(vblk);
+ put_disk(vblk->disk);
}
#ifdef CONFIG_PM_SLEEP
@@ -1060,7 +1020,7 @@ static struct virtio_driver virtio_blk = {
#endif
};
-static int __init init(void)
+static int __init virtio_blk_init(void)
{
int error;
@@ -1086,14 +1046,14 @@ out_destroy_workqueue:
return error;
}
-static void __exit fini(void)
+static void __exit virtio_blk_fini(void)
{
unregister_virtio_driver(&virtio_blk);
unregister_blkdev(major, "virtblk");
destroy_workqueue(virtblk_wq);
}
-module_init(init);
-module_exit(fini);
+module_init(virtio_blk_init);
+module_exit(virtio_blk_fini);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio block driver");
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 14e452896d04..d1e26461a64e 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1326,16 +1326,13 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
pages[i]->page,
seg[i].nsec << 9,
seg[i].offset) == 0)) {
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(nseg - i));
- if (unlikely(bio == NULL))
- goto fail_put_bio;
-
+ bio = bio_alloc(preq.bdev, bio_max_segs(nseg - i),
+ operation | operation_flags,
+ GFP_KERNEL);
biolist[nbio++] = bio;
- bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
bio->bi_iter.bi_sector = preq.sector_number;
- bio_set_op_attrs(bio, operation, operation_flags);
}
preq.sector_number += seg[i].nsec;
@@ -1345,15 +1342,11 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
if (!bio) {
BUG_ON(operation_flags != REQ_PREFLUSH);
- bio = bio_alloc(GFP_KERNEL, 0);
- if (unlikely(bio == NULL))
- goto fail_put_bio;
-
+ bio = bio_alloc(preq.bdev, 0, operation | operation_flags,
+ GFP_KERNEL);
biolist[nbio++] = bio;
- bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
- bio_set_op_attrs(bio, operation, operation_flags);
}
atomic_set(&pending_req->pendcnt, nbio);
@@ -1381,14 +1374,6 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
free_req(ring, pending_req);
msleep(1); /* back off a bit */
return -EIO;
-
- fail_put_bio:
- for (i = 0; i < nbio; i++)
- bio_put(biolist[i]);
- atomic_set(&pending_req->pendcnt, 1);
- __end_block_io_op(pending_req, BLK_STS_RESOURCE);
- msleep(1); /* back off a bit */
- return -EIO;
}
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 62125fd4af4a..f09040435e2e 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/kthread.h>
+#include <linux/pagemap.h>
#include <xen/events.h>
#include <xen/grant_table.h>
#include "common.h"
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index ca71a0585333..85fc550508cc 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1288,7 +1288,8 @@ free_shadow:
rinfo->ring_ref[i] = GRANT_INVALID_REF;
}
}
- free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
+ free_pages_exact(rinfo->ring.sring,
+ info->nr_ring_pages * XEN_PAGE_SIZE);
rinfo->ring.sring = NULL;
if (rinfo->irq)
@@ -1372,9 +1373,15 @@ static int blkif_get_final_status(enum blk_req_status s1,
return BLKIF_RSP_OKAY;
}
-static bool blkif_completion(unsigned long *id,
- struct blkfront_ring_info *rinfo,
- struct blkif_response *bret)
+/*
+ * Return values:
+ * 1 response processed.
+ * 0 missing further responses.
+ * -1 error while processing.
+ */
+static int blkif_completion(unsigned long *id,
+ struct blkfront_ring_info *rinfo,
+ struct blkif_response *bret)
{
int i = 0;
struct scatterlist *sg;
@@ -1397,7 +1404,7 @@ static bool blkif_completion(unsigned long *id,
/* Wait the second response if not yet here. */
if (s2->status < REQ_DONE)
- return false;
+ return 0;
bret->status = blkif_get_final_status(s->status,
s2->status);
@@ -1448,42 +1455,43 @@ static bool blkif_completion(unsigned long *id,
}
/* Add the persistent grant into the list of free grants */
for (i = 0; i < num_grant; i++) {
- if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
+ if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) {
/*
* If the grant is still mapped by the backend (the
* backend has chosen to make this grant persistent)
* we add it at the head of the list, so it will be
* reused first.
*/
- if (!info->feature_persistent)
- pr_alert_ratelimited("backed has not unmapped grant: %u\n",
- s->grants_used[i]->gref);
+ if (!info->feature_persistent) {
+ pr_alert("backed has not unmapped grant: %u\n",
+ s->grants_used[i]->gref);
+ return -1;
+ }
list_add(&s->grants_used[i]->node, &rinfo->grants);
rinfo->persistent_gnts_c++;
} else {
/*
- * If the grant is not mapped by the backend we end the
- * foreign access and add it to the tail of the list,
- * so it will not be picked again unless we run out of
- * persistent grants.
+ * If the grant is not mapped by the backend we add it
+ * to the tail of the list, so it will not be picked
+ * again unless we run out of persistent grants.
*/
- gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
s->grants_used[i]->gref = GRANT_INVALID_REF;
list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
}
}
if (s->req.operation == BLKIF_OP_INDIRECT) {
for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
- if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
- if (!info->feature_persistent)
- pr_alert_ratelimited("backed has not unmapped grant: %u\n",
- s->indirect_grants[i]->gref);
+ if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) {
+ if (!info->feature_persistent) {
+ pr_alert("backed has not unmapped grant: %u\n",
+ s->indirect_grants[i]->gref);
+ return -1;
+ }
list_add(&s->indirect_grants[i]->node, &rinfo->grants);
rinfo->persistent_gnts_c++;
} else {
struct page *indirect_page;
- gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
/*
* Add the used indirect page back to the list of
* available pages for indirect grefs.
@@ -1498,7 +1506,7 @@ static bool blkif_completion(unsigned long *id,
}
}
- return true;
+ return 1;
}
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
@@ -1564,12 +1572,17 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
}
if (bret.operation != BLKIF_OP_DISCARD) {
+ int ret;
+
/*
* We may need to wait for an extra response if the
* I/O request is split in 2
*/
- if (!blkif_completion(&id, rinfo, &bret))
+ ret = blkif_completion(&id, rinfo, &bret);
+ if (!ret)
continue;
+ if (unlikely(ret < 0))
+ goto err;
}
if (add_id_to_freelist(rinfo, id)) {
@@ -1676,8 +1689,7 @@ static int setup_blkring(struct xenbus_device *dev,
for (i = 0; i < info->nr_ring_pages; i++)
rinfo->ring_ref[i] = GRANT_INVALID_REF;
- sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
- get_order(ring_size));
+ sring = alloc_pages_exact(ring_size, GFP_NOIO);
if (!sring) {
xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
return -ENOMEM;
@@ -1687,7 +1699,7 @@ static int setup_blkring(struct xenbus_device *dev,
err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
if (err < 0) {
- free_pages((unsigned long)sring, get_order(ring_size));
+ free_pages_exact(sring, ring_size);
rinfo->ring.sring = NULL;
goto fail;
}
@@ -2521,6 +2533,7 @@ static void purge_persistent_grants(struct blkfront_info *info)
for_each_rinfo(info, rinfo, i) {
struct grant *gnt_list_entry, *tmp;
+ LIST_HEAD(grants);
spin_lock_irqsave(&rinfo->ring_lock, flags);
@@ -2532,16 +2545,17 @@ static void purge_persistent_grants(struct blkfront_info *info)
list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
node) {
if (gnt_list_entry->gref == GRANT_INVALID_REF ||
- gnttab_query_foreign_access(gnt_list_entry->gref))
+ !gnttab_try_end_foreign_access(gnt_list_entry->gref))
continue;
list_del(&gnt_list_entry->node);
- gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
rinfo->persistent_gnts_c--;
gnt_list_entry->gref = GRANT_INVALID_REF;
- list_add_tail(&gnt_list_entry->node, &rinfo->grants);
+ list_add_tail(&gnt_list_entry->node, &grants);
}
+ list_splice_tail(&grants, &rinfo->grants);
+
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
}
}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index cb253d80d72b..e9474b02012d 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -22,7 +22,6 @@
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
-#include <linux/genhd.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
@@ -617,24 +616,21 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
{
struct bio *bio;
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc(zram->bdev, 1, parent ? parent->bi_opf : REQ_OP_READ,
+ GFP_NOIO);
if (!bio)
return -ENOMEM;
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
- bio_set_dev(bio, zram->bdev);
if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
bio_put(bio);
return -EIO;
}
- if (!parent) {
- bio->bi_opf = REQ_OP_READ;
+ if (!parent)
bio->bi_end_io = zram_page_end_io;
- } else {
- bio->bi_opf = parent->bi_opf;
+ else
bio_chain(bio, parent);
- }
submit_bio(bio);
return 1;
@@ -747,10 +743,9 @@ static ssize_t writeback_store(struct device *dev,
continue;
}
- bio_init(&bio, &bio_vec, 1);
- bio_set_dev(&bio, zram->bdev);
+ bio_init(&bio, zram->bdev, &bio_vec, 1,
+ REQ_OP_WRITE | REQ_SYNC);
bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
- bio.bi_opf = REQ_OP_WRITE | REQ_SYNC;
bio_add_page(&bio, bvec.bv_page, bvec.bv_len,
bvec.bv_offset);
@@ -1336,12 +1331,10 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
goto out;
if (is_partial_io(bvec)) {
- void *dst = kmap_atomic(bvec->bv_page);
void *src = kmap_atomic(page);
- memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
+ memcpy_to_bvec(bvec, src + offset);
kunmap_atomic(src);
- kunmap_atomic(dst);
}
out:
if (is_partial_io(bvec))
@@ -1472,7 +1465,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
{
int ret;
struct page *page = NULL;
- void *src;
struct bio_vec vec;
vec = *bvec;
@@ -1490,11 +1482,9 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
if (ret)
goto out;
- src = kmap_atomic(bvec->bv_page);
dst = kmap_atomic(page);
- memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
+ memcpy_from_bvec(dst + offset, bvec);
kunmap_atomic(dst);
- kunmap_atomic(src);
vec.bv_page = page;
vec.bv_len = PAGE_SIZE;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 36380e618ba4..e30707405455 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -400,6 +400,7 @@ config BT_MTKSDIO
config BT_MTKUART
tristate "MediaTek HCI UART driver"
depends on SERIAL_DEV_BUS
+ select BT_MTK
help
MediaTek Bluetooth HCI UART driver.
This driver is required if you want to use MediaTek Bluetooth
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 759d7828931d..88262d3a9392 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -10,7 +10,6 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/usb.h>
#include <asm/unaligned.h>
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index e667933c3d70..c738ad0408cb 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
-#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 1a4f8b227eac..06514ed66022 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -2428,10 +2428,15 @@ static int btintel_setup_combined(struct hci_dev *hdev)
/* Apply the device specific HCI quirks
*
- * WBS for SdP - SdP and Stp have a same hw_varaint but
- * different fw_variant
+ * WBS for SdP - For the Legacy ROM products, only SdP
+ * supports the WBS. But the version information is not
+ * enough to use here because the StP2 and SdP have same
+ * hw_variant and fw_variant. So, this flag is set by
+ * the transport driver (btusb) based on the HW info
+ * (idProduct)
*/
- if (ver.hw_variant == 0x08 && ver.fw_variant == 0x22)
+ if (!btintel_test_flag(hdev,
+ INTEL_ROM_LEGACY_NO_WBS_SUPPORT))
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
&hdev->quirks);
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index c9b24e9299e2..e0060e58573c 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -152,6 +152,7 @@ enum {
INTEL_BROKEN_INITIAL_NCMD,
INTEL_BROKEN_SHUTDOWN_LED,
INTEL_ROM_LEGACY,
+ INTEL_ROM_LEGACY_NO_WBS_SUPPORT,
__INTEL_NUM_FLAGS,
};
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index c4867576be00..db35b917aecf 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -1,4 +1,4 @@
-/**
+/*
* Marvell Bluetooth driver: debugfs related functions
*
* Copyright (C) 2009, Marvell International Ltd.
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 68378b42ea7f..b8ef66f89fc1 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -1,4 +1,4 @@
-/**
+/*
* Marvell BT-over-SDIO driver: SDIO interface related functions.
*
* Copyright (C) 2009, Marvell International Ltd.
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 526dfdf1fe01..809762d64fc6 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -285,6 +285,7 @@ MODULE_AUTHOR("Mark Chen <mark-yw.chen@mediatek.com>");
MODULE_DESCRIPTION("Bluetooth support for MediaTek devices ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE_MT7622);
MODULE_FIRMWARE(FIRMWARE_MT7663);
MODULE_FIRMWARE(FIRMWARE_MT7668);
MODULE_FIRMWARE(FIRMWARE_MT7961);
diff --git a/drivers/bluetooth/btmtk.h b/drivers/bluetooth/btmtk.h
index 6e7b0c7567c0..2a88ea8e475e 100644
--- a/drivers/bluetooth/btmtk.h
+++ b/drivers/bluetooth/btmtk.h
@@ -1,14 +1,26 @@
/* SPDX-License-Identifier: ISC */
/* Copyright (C) 2021 MediaTek Inc. */
+#define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin"
#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
#define FIRMWARE_MT7961 "mediatek/BT_RAM_CODE_MT7961_1_2_hdr.bin"
+#define HCI_EV_WMT 0xe4
#define HCI_WMT_MAX_EVENT_SIZE 64
+#define BTMTK_WMT_REG_WRITE 0x1
#define BTMTK_WMT_REG_READ 0x2
+#define MT7921_BTSYS_RST 0x70002610
+#define MT7921_BTSYS_RST_WITH_GPIO BIT(7)
+
+#define MT7921_PINMUX_0 0x70005050
+#define MT7921_PINMUX_1 0x70005054
+
+#define MT7921_DLSTATUS 0x7c053c10
+#define BT_DL_STATE BIT(1)
+
enum {
BTMTK_WMT_PATCH_DWNLD = 0x1,
BTMTK_WMT_TEST = 0x2,
@@ -68,6 +80,37 @@ struct btmtk_tci_sleep {
u8 time_compensation;
} __packed;
+struct btmtk_wakeon {
+ u8 mode;
+ u8 gpo;
+ u8 active_high;
+ __le16 enable_delay;
+ __le16 wakeup_delay;
+} __packed;
+
+struct btmtk_sco {
+ u8 clock_config;
+ u8 transmit_format_config;
+ u8 channel_format_config;
+ u8 channel_select_config;
+} __packed;
+
+struct reg_read_cmd {
+ u8 type;
+ u8 rsv;
+ u8 num;
+ __le32 addr;
+} __packed;
+
+struct reg_write_cmd {
+ u8 type;
+ u8 rsv;
+ u8 num;
+ __le32 addr;
+ __le32 data;
+ __le32 mask;
+} __packed;
+
struct btmtk_hci_wmt_params {
u8 op;
u8 flag;
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index b5ea8d3bffaa..f3dc5881fff7 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -12,10 +12,12 @@
#include <asm/unaligned.h>
#include <linux/atomic.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/skbuff.h>
@@ -31,28 +33,32 @@
#define VERSION "0.1"
-#define MTKBTSDIO_AUTOSUSPEND_DELAY 8000
+#define MTKBTSDIO_AUTOSUSPEND_DELAY 1000
-static bool enable_autosuspend;
+static bool enable_autosuspend = true;
struct btmtksdio_data {
const char *fwname;
u16 chipid;
+ bool lp_mbox_supported;
};
static const struct btmtksdio_data mt7663_data = {
.fwname = FIRMWARE_MT7663,
.chipid = 0x7663,
+ .lp_mbox_supported = false,
};
static const struct btmtksdio_data mt7668_data = {
.fwname = FIRMWARE_MT7668,
.chipid = 0x7668,
+ .lp_mbox_supported = false,
};
static const struct btmtksdio_data mt7921_data = {
.fwname = FIRMWARE_MT7961,
.chipid = 0x7921,
+ .lp_mbox_supported = true,
};
static const struct sdio_device_id btmtksdio_table[] = {
@@ -79,6 +85,7 @@ MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
#define MTK_REG_CHCR 0xc
#define C_INT_CLR_CTRL BIT(1)
+#define BT_RST_DONE BIT(8)
/* CHISR have the same bits field definition with CHIER */
#define MTK_REG_CHISR 0x10
@@ -87,8 +94,17 @@ MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
#define RX_DONE_INT BIT(1)
#define TX_EMPTY BIT(2)
#define TX_FIFO_OVERFLOW BIT(8)
+#define FW_MAILBOX_INT BIT(15)
+#define INT_MASK GENMASK(15, 0)
#define RX_PKT_LEN GENMASK(31, 16)
+#define MTK_REG_CSICR 0xc0
+#define CSICR_CLR_MBOX_ACK BIT(0)
+#define MTK_REG_PH2DSM0R 0xc4
+#define PH2DSM0R_DRIVER_OWN BIT(0)
+#define MTK_REG_PD2HRM0R 0xdc
+#define PD2HRM0R_DRV_OWN BIT(0)
+
#define MTK_REG_CTDR 0x18
#define MTK_REG_CRDR 0x1c
@@ -100,6 +116,8 @@ MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
#define BTMTKSDIO_TX_WAIT_VND_EVT 1
#define BTMTKSDIO_HW_TX_READY 2
#define BTMTKSDIO_FUNC_ENABLED 3
+#define BTMTKSDIO_PATCH_ENABLED 4
+#define BTMTKSDIO_HW_RESET_ACTIVE 5
struct mtkbtsdio_hdr {
__le16 len;
@@ -119,6 +137,8 @@ struct btmtksdio_dev {
struct sk_buff *evt_skb;
const struct btmtksdio_data *data;
+
+ struct gpio_desc *reset;
};
static int mtk_hci_wmt_sync(struct hci_dev *hdev,
@@ -278,19 +298,89 @@ static u32 btmtksdio_drv_own_query(struct btmtksdio_dev *bdev)
return sdio_readl(bdev->func, MTK_REG_CHLPCR, NULL);
}
+static u32 btmtksdio_drv_own_query_79xx(struct btmtksdio_dev *bdev)
+{
+ return sdio_readl(bdev->func, MTK_REG_PD2HRM0R, NULL);
+}
+
+static u32 btmtksdio_chcr_query(struct btmtksdio_dev *bdev)
+{
+ return sdio_readl(bdev->func, MTK_REG_CHCR, NULL);
+}
+
+static int btmtksdio_fw_pmctrl(struct btmtksdio_dev *bdev)
+{
+ u32 status;
+ int err;
+
+ sdio_claim_host(bdev->func);
+
+ if (bdev->data->lp_mbox_supported &&
+ test_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state)) {
+ sdio_writel(bdev->func, CSICR_CLR_MBOX_ACK, MTK_REG_CSICR,
+ &err);
+ err = readx_poll_timeout(btmtksdio_drv_own_query_79xx, bdev,
+ status, !(status & PD2HRM0R_DRV_OWN),
+ 2000, 1000000);
+ if (err < 0) {
+ bt_dev_err(bdev->hdev, "mailbox ACK not cleared");
+ goto out;
+ }
+ }
+
+ /* Return ownership to the device */
+ sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err);
+ if (err < 0)
+ goto out;
+
+ err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
+ !(status & C_COM_DRV_OWN), 2000, 1000000);
+
+out:
+ sdio_release_host(bdev->func);
+
+ if (err < 0)
+ bt_dev_err(bdev->hdev, "Cannot return ownership to device");
+
+ return err;
+}
+
+static int btmtksdio_drv_pmctrl(struct btmtksdio_dev *bdev)
+{
+ u32 status;
+ int err;
+
+ sdio_claim_host(bdev->func);
+
+ /* Get ownership from the device */
+ sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
+ if (err < 0)
+ goto out;
+
+ err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
+ status & C_COM_DRV_OWN, 2000, 1000000);
+
+ if (!err && bdev->data->lp_mbox_supported &&
+ test_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state))
+ err = readx_poll_timeout(btmtksdio_drv_own_query_79xx, bdev,
+ status, status & PD2HRM0R_DRV_OWN,
+ 2000, 1000000);
+
+out:
+ sdio_release_host(bdev->func);
+
+ if (err < 0)
+ bt_dev_err(bdev->hdev, "Cannot get ownership from device");
+
+ return err;
+}
+
static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
struct hci_event_hdr *hdr = (void *)skb->data;
int err;
- /* Fix up the vendor event id with 0xff for vendor specific instead
- * of 0xe4 so that event send via monitoring socket can be parsed
- * properly.
- */
- if (hdr->evt == 0xe4)
- hdr->evt = HCI_EV_VENDOR;
-
/* When someone waits for the WMT event, the skb is being cloned
* and being processed the events from there then.
*/
@@ -306,7 +396,7 @@ static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
if (err < 0)
goto err_free_skb;
- if (hdr->evt == HCI_EV_VENDOR) {
+ if (hdr->evt == HCI_EV_WMT) {
if (test_and_clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT,
&bdev->tx_state)) {
/* Barrier to sync with other CPUs */
@@ -480,6 +570,13 @@ static void btmtksdio_txrx_work(struct work_struct *work)
* FIFO.
*/
sdio_writel(bdev->func, int_status, MTK_REG_CHISR, NULL);
+ int_status &= INT_MASK;
+
+ if ((int_status & FW_MAILBOX_INT) &&
+ bdev->data->chipid == 0x7921) {
+ sdio_writel(bdev->func, PH2DSM0R_DRIVER_OWN,
+ MTK_REG_PH2DSM0R, 0);
+ }
if (int_status & FW_OWN_BACK_INT)
bt_dev_dbg(bdev->hdev, "Get fw own back");
@@ -531,7 +628,7 @@ static void btmtksdio_interrupt(struct sdio_func *func)
static int btmtksdio_open(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
- u32 status, val;
+ u32 val;
int err;
sdio_claim_host(bdev->func);
@@ -542,18 +639,10 @@ static int btmtksdio_open(struct hci_dev *hdev)
set_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state);
- /* Get ownership from the device */
- sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
+ err = btmtksdio_drv_pmctrl(bdev);
if (err < 0)
goto err_disable_func;
- err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
- status & C_COM_DRV_OWN, 2000, 1000000);
- if (err < 0) {
- bt_dev_err(bdev->hdev, "Cannot get ownership from device");
- goto err_disable_func;
- }
-
/* Disable interrupt & mask out all interrupt sources */
sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, &err);
if (err < 0)
@@ -623,8 +712,6 @@ err_release_host:
static int btmtksdio_close(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
- u32 status;
- int err;
sdio_claim_host(bdev->func);
@@ -635,13 +722,7 @@ static int btmtksdio_close(struct hci_dev *hdev)
cancel_work_sync(&bdev->txrx_work);
- /* Return ownership to the device */
- sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, NULL);
-
- err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
- !(status & C_COM_DRV_OWN), 2000, 1000000);
- if (err < 0)
- bt_dev_err(bdev->hdev, "Cannot return ownership to device");
+ btmtksdio_fw_pmctrl(bdev);
clear_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state);
sdio_disable_func(bdev->func);
@@ -686,6 +767,7 @@ static int btmtksdio_func_query(struct hci_dev *hdev)
static int mt76xx_setup(struct hci_dev *hdev, const char *fwname)
{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
struct btmtk_hci_wmt_params wmt_params;
struct btmtk_tci_sleep tci_sleep;
struct sk_buff *skb;
@@ -746,6 +828,8 @@ ignore_setup_fw:
return err;
}
+ set_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state);
+
ignore_func_on:
/* Apply the low power environment setup */
tci_sleep.mode = 0x5;
@@ -768,6 +852,7 @@ ignore_func_on:
static int mt79xx_setup(struct hci_dev *hdev, const char *fwname)
{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
struct btmtk_hci_wmt_params wmt_params;
u8 param = 0x1;
int err;
@@ -793,19 +878,15 @@ static int mt79xx_setup(struct hci_dev *hdev, const char *fwname)
hci_set_msft_opcode(hdev, 0xFD30);
hci_set_aosp_capable(hdev);
+ set_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state);
return err;
}
-static int btsdio_mtk_reg_read(struct hci_dev *hdev, u32 reg, u32 *val)
+static int btmtksdio_mtk_reg_read(struct hci_dev *hdev, u32 reg, u32 *val)
{
struct btmtk_hci_wmt_params wmt_params;
- struct reg_read_cmd {
- u8 type;
- u8 rsv;
- u8 num;
- __le32 addr;
- } __packed reg_read = {
+ struct reg_read_cmd reg_read = {
.type = 1,
.num = 1,
};
@@ -821,7 +902,7 @@ static int btsdio_mtk_reg_read(struct hci_dev *hdev, u32 reg, u32 *val)
err = mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
- bt_dev_err(hdev, "Failed to read reg(%d)", err);
+ bt_dev_err(hdev, "Failed to read reg (%d)", err);
return err;
}
@@ -830,6 +911,151 @@ static int btsdio_mtk_reg_read(struct hci_dev *hdev, u32 reg, u32 *val)
return err;
}
+static int btmtksdio_mtk_reg_write(struct hci_dev *hdev, u32 reg, u32 val, u32 mask)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ const struct reg_write_cmd reg_write = {
+ .type = 1,
+ .num = 1,
+ .addr = cpu_to_le32(reg),
+ .data = cpu_to_le32(val),
+ .mask = cpu_to_le32(mask),
+ };
+ int err, status;
+
+ wmt_params.op = BTMTK_WMT_REGISTER;
+ wmt_params.flag = BTMTK_WMT_REG_WRITE;
+ wmt_params.dlen = sizeof(reg_write);
+ wmt_params.data = &reg_write;
+ wmt_params.status = &status;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0)
+ bt_dev_err(hdev, "Failed to write reg (%d)", err);
+
+ return err;
+}
+
+static int btmtksdio_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id)
+{
+ /* uses 1 as data path id for all the usecases */
+ *data_path_id = 1;
+ return 0;
+}
+
+static int btmtksdio_get_codec_config_data(struct hci_dev *hdev,
+ __u8 link, struct bt_codec *codec,
+ __u8 *ven_len, __u8 **ven_data)
+{
+ int err = 0;
+
+ if (!ven_data || !ven_len)
+ return -EINVAL;
+
+ *ven_len = 0;
+ *ven_data = NULL;
+
+ if (link != ESCO_LINK) {
+ bt_dev_err(hdev, "Invalid link type(%u)", link);
+ return -EINVAL;
+ }
+
+ *ven_data = kmalloc(sizeof(__u8), GFP_KERNEL);
+ if (!ven_data) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ /* supports only CVSD and mSBC offload codecs */
+ switch (codec->id) {
+ case 0x02:
+ **ven_data = 0x00;
+ break;
+ case 0x05:
+ **ven_data = 0x01;
+ break;
+ default:
+ err = -EINVAL;
+ bt_dev_err(hdev, "Invalid codec id(%u)", codec->id);
+ goto error;
+ }
+ /* codec and its capabilities are pre-defined to ids
+ * preset id = 0x00 represents CVSD codec with sampling rate 8K
+ * preset id = 0x01 represents mSBC codec with sampling rate 16K
+ */
+ *ven_len = sizeof(__u8);
+ return err;
+
+error:
+ kfree(*ven_data);
+ *ven_data = NULL;
+ return err;
+}
+
+static int btmtksdio_sco_setting(struct hci_dev *hdev)
+{
+ const struct btmtk_sco sco_setting = {
+ .clock_config = 0x49,
+ .channel_format_config = 0x80,
+ };
+ struct sk_buff *skb;
+ u32 val;
+ int err;
+
+ /* Enable SCO over I2S/PCM for MediaTek chipset */
+ skb = __hci_cmd_sync(hdev, 0xfc72, sizeof(sco_setting),
+ &sco_setting, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ kfree_skb(skb);
+
+ err = btmtksdio_mtk_reg_read(hdev, MT7921_PINMUX_0, &val);
+ if (err < 0)
+ return err;
+
+ val |= 0x11000000;
+ err = btmtksdio_mtk_reg_write(hdev, MT7921_PINMUX_0, val, ~0);
+ if (err < 0)
+ return err;
+
+ err = btmtksdio_mtk_reg_read(hdev, MT7921_PINMUX_1, &val);
+ if (err < 0)
+ return err;
+
+ val |= 0x00000101;
+ err = btmtksdio_mtk_reg_write(hdev, MT7921_PINMUX_1, val, ~0);
+ if (err < 0)
+ return err;
+
+ hdev->get_data_path_id = btmtksdio_get_data_path_id;
+ hdev->get_codec_config_data = btmtksdio_get_codec_config_data;
+
+ return err;
+}
+
+static int btmtksdio_reset_setting(struct hci_dev *hdev)
+{
+ int err;
+ u32 val;
+
+ err = btmtksdio_mtk_reg_read(hdev, MT7921_PINMUX_1, &val);
+ if (err < 0)
+ return err;
+
+ val |= 0x20; /* set the pin (bit field 11:8) work as GPIO mode */
+ err = btmtksdio_mtk_reg_write(hdev, MT7921_PINMUX_1, val, ~0);
+ if (err < 0)
+ return err;
+
+ err = btmtksdio_mtk_reg_read(hdev, MT7921_BTSYS_RST, &val);
+ if (err < 0)
+ return err;
+
+ val |= MT7921_BTSYS_RST_WITH_GPIO;
+ return btmtksdio_mtk_reg_write(hdev, MT7921_BTSYS_RST, val, ~0);
+}
+
static int btmtksdio_setup(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
@@ -837,20 +1063,39 @@ static int btmtksdio_setup(struct hci_dev *hdev)
unsigned long long duration;
char fwname[64];
int err, dev_id;
- u32 fw_version = 0;
+ u32 fw_version = 0, val;
calltime = ktime_get();
set_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state);
switch (bdev->data->chipid) {
case 0x7921:
- err = btsdio_mtk_reg_read(hdev, 0x70010200, &dev_id);
+ if (test_bit(BTMTKSDIO_HW_RESET_ACTIVE, &bdev->tx_state)) {
+ err = btmtksdio_mtk_reg_read(hdev, MT7921_DLSTATUS,
+ &val);
+ if (err < 0)
+ return err;
+
+ val &= ~BT_DL_STATE;
+ err = btmtksdio_mtk_reg_write(hdev, MT7921_DLSTATUS,
+ val, ~0);
+ if (err < 0)
+ return err;
+
+ btmtksdio_fw_pmctrl(bdev);
+ msleep(20);
+ btmtksdio_drv_pmctrl(bdev);
+
+ clear_bit(BTMTKSDIO_HW_RESET_ACTIVE, &bdev->tx_state);
+ }
+
+ err = btmtksdio_mtk_reg_read(hdev, 0x70010200, &dev_id);
if (err < 0) {
bt_dev_err(hdev, "Failed to get device id (%d)", err);
return err;
}
- err = btsdio_mtk_reg_read(hdev, 0x80021004, &fw_version);
+ err = btmtksdio_mtk_reg_read(hdev, 0x80021004, &fw_version);
if (err < 0) {
bt_dev_err(hdev, "Failed to get fw version (%d)", err);
return err;
@@ -862,6 +1107,38 @@ static int btmtksdio_setup(struct hci_dev *hdev)
err = mt79xx_setup(hdev, fwname);
if (err < 0)
return err;
+
+ err = btmtksdio_fw_pmctrl(bdev);
+ if (err < 0)
+ return err;
+
+ err = btmtksdio_drv_pmctrl(bdev);
+ if (err < 0)
+ return err;
+
+ /* Enable SCO over I2S/PCM */
+ err = btmtksdio_sco_setting(hdev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to enable SCO setting (%d)", err);
+ return err;
+ }
+
+ /* Enable WBS with mSBC codec */
+ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+
+ /* Enable GPIO reset mechanism */
+ if (bdev->reset) {
+ err = btmtksdio_reset_setting(hdev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to enable Reset setting (%d)", err);
+ devm_gpiod_put(bdev->dev, bdev->reset);
+ bdev->reset = NULL;
+ }
+ }
+
+ /* Valid LE States quirk for MediaTek 7921 */
+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+
break;
case 0x7663:
case 0x7668:
@@ -958,6 +1235,73 @@ static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+static void btmtksdio_cmd_timeout(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ u32 status;
+ int err;
+
+ if (!bdev->reset || bdev->data->chipid != 0x7921)
+ return;
+
+ pm_runtime_get_sync(bdev->dev);
+
+ if (test_and_set_bit(BTMTKSDIO_HW_RESET_ACTIVE, &bdev->tx_state))
+ return;
+
+ sdio_claim_host(bdev->func);
+
+ sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
+ skb_queue_purge(&bdev->txq);
+ cancel_work_sync(&bdev->txrx_work);
+
+ gpiod_set_value_cansleep(bdev->reset, 1);
+ msleep(100);
+ gpiod_set_value_cansleep(bdev->reset, 0);
+
+ err = readx_poll_timeout(btmtksdio_chcr_query, bdev, status,
+ status & BT_RST_DONE, 100000, 2000000);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to reset (%d)", err);
+ goto err;
+ }
+
+ clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state);
+err:
+ sdio_release_host(bdev->func);
+
+ pm_runtime_put_noidle(bdev->dev);
+ pm_runtime_disable(bdev->dev);
+
+ hci_reset_dev(hdev);
+}
+
+static bool btmtksdio_sdio_wakeup(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ bool may_wakeup = device_may_wakeup(bdev->dev);
+ const struct btmtk_wakeon bt_awake = {
+ .mode = 0x1,
+ .gpo = 0,
+ .active_high = 0x1,
+ .enable_delay = cpu_to_le16(0xc80),
+ .wakeup_delay = cpu_to_le16(0x20),
+ };
+
+ if (may_wakeup && bdev->data->chipid == 0x7921) {
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, 0xfc27, sizeof(bt_awake),
+ &bt_awake, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb))
+ may_wakeup = false;
+ else
+ kfree_skb(skb);
+ }
+
+ return may_wakeup;
+}
+
static int btmtksdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
@@ -993,10 +1337,12 @@ static int btmtksdio_probe(struct sdio_func *func,
hdev->open = btmtksdio_open;
hdev->close = btmtksdio_close;
+ hdev->cmd_timeout = btmtksdio_cmd_timeout;
hdev->flush = btmtksdio_flush;
hdev->setup = btmtksdio_setup;
hdev->shutdown = btmtksdio_shutdown;
hdev->send = btmtksdio_send_frame;
+ hdev->wakeup = btmtksdio_sdio_wakeup;
hdev->set_bdaddr = btmtk_set_bdaddr;
SET_HCIDEV_DEV(hdev, &func->dev);
@@ -1004,6 +1350,8 @@ static int btmtksdio_probe(struct sdio_func *func,
hdev->manufacturer = 70;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ sdio_set_drvdata(func, bdev);
+
err = hci_register_dev(hdev);
if (err < 0) {
dev_err(&func->dev, "Can't register HCI device\n");
@@ -1011,8 +1359,6 @@ static int btmtksdio_probe(struct sdio_func *func,
return err;
}
- sdio_set_drvdata(func, bdev);
-
/* pm_runtime_enable would be done after the firmware is being
* downloaded because the core layer probably already enables
* runtime PM for this func such as the case host->caps &
@@ -1032,7 +1378,18 @@ static int btmtksdio_probe(struct sdio_func *func,
*/
pm_runtime_put_noidle(bdev->dev);
- return 0;
+ err = device_init_wakeup(bdev->dev, true);
+ if (err)
+ bt_dev_err(hdev, "failed to initialize device wakeup");
+
+ bdev->dev->of_node = of_find_compatible_node(NULL, NULL,
+ "mediatek,mt7921s-bluetooth");
+ bdev->reset = devm_gpiod_get_optional(bdev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(bdev->reset))
+ err = PTR_ERR(bdev->reset);
+
+ return err;
}
static void btmtksdio_remove(struct sdio_func *func)
@@ -1058,7 +1415,6 @@ static int btmtksdio_runtime_suspend(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
struct btmtksdio_dev *bdev;
- u32 status;
int err;
bdev = sdio_get_drvdata(func);
@@ -1070,18 +1426,9 @@ static int btmtksdio_runtime_suspend(struct device *dev)
sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
- sdio_claim_host(bdev->func);
-
- sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err);
- if (err < 0)
- goto out;
-
- err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
- !(status & C_COM_DRV_OWN), 2000, 1000000);
-out:
- bt_dev_info(bdev->hdev, "status (%d) return ownership to device", err);
+ err = btmtksdio_fw_pmctrl(bdev);
- sdio_release_host(bdev->func);
+ bt_dev_dbg(bdev->hdev, "status (%d) return ownership to device", err);
return err;
}
@@ -1090,7 +1437,6 @@ static int btmtksdio_runtime_resume(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
struct btmtksdio_dev *bdev;
- u32 status;
int err;
bdev = sdio_get_drvdata(func);
@@ -1100,18 +1446,9 @@ static int btmtksdio_runtime_resume(struct device *dev)
if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
return 0;
- sdio_claim_host(bdev->func);
+ err = btmtksdio_drv_pmctrl(bdev);
- sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
- if (err < 0)
- goto out;
-
- err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
- status & C_COM_DRV_OWN, 2000, 1000000);
-out:
- bt_dev_info(bdev->hdev, "status (%d) get ownership from device", err);
-
- sdio_release_host(bdev->func);
+ bt_dev_dbg(bdev->hdev, "status (%d) get ownership from device", err);
return err;
}
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index 9ba22b13b4fa..c98691cdbbd5 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -28,13 +28,10 @@
#include <net/bluetooth/hci_core.h>
#include "h4_recv.h"
+#include "btmtk.h"
#define VERSION "0.2"
-#define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin"
-#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
-#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
-
#define MTK_STP_TLR_SIZE 2
#define BTMTKUART_TX_STATE_ACTIVE 1
@@ -44,25 +41,6 @@
#define BTMTKUART_FLAG_STANDALONE_HW BIT(0)
-enum {
- MTK_WMT_PATCH_DWNLD = 0x1,
- MTK_WMT_TEST = 0x2,
- MTK_WMT_WAKEUP = 0x3,
- MTK_WMT_HIF = 0x4,
- MTK_WMT_FUNC_CTRL = 0x6,
- MTK_WMT_RST = 0x7,
- MTK_WMT_SEMAPHORE = 0x17,
-};
-
-enum {
- BTMTK_WMT_INVALID,
- BTMTK_WMT_PATCH_UNDONE,
- BTMTK_WMT_PATCH_DONE,
- BTMTK_WMT_ON_UNDONE,
- BTMTK_WMT_ON_DONE,
- BTMTK_WMT_ON_PROGRESS,
-};
-
struct mtk_stp_hdr {
u8 prefix;
__be16 dlen;
@@ -74,44 +52,6 @@ struct btmtkuart_data {
const char *fwname;
};
-struct mtk_wmt_hdr {
- u8 dir;
- u8 op;
- __le16 dlen;
- u8 flag;
-} __packed;
-
-struct mtk_hci_wmt_cmd {
- struct mtk_wmt_hdr hdr;
- u8 data[256];
-} __packed;
-
-struct btmtk_hci_wmt_evt {
- struct hci_event_hdr hhdr;
- struct mtk_wmt_hdr whdr;
-} __packed;
-
-struct btmtk_hci_wmt_evt_funcc {
- struct btmtk_hci_wmt_evt hwhdr;
- __be16 status;
-} __packed;
-
-struct btmtk_tci_sleep {
- u8 mode;
- __le16 duration;
- __le16 host_duration;
- u8 host_wakeup_pin;
- u8 time_compensation;
-} __packed;
-
-struct btmtk_hci_wmt_params {
- u8 op;
- u8 flag;
- u16 dlen;
- const void *data;
- u32 *status;
-};
-
struct btmtkuart_dev {
struct hci_dev *hdev;
struct serdev_device *serdev;
@@ -153,29 +93,36 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
u32 hlen, status = BTMTK_WMT_INVALID;
struct btmtk_hci_wmt_evt *wmt_evt;
- struct mtk_hci_wmt_cmd wc;
- struct mtk_wmt_hdr *hdr;
+ struct btmtk_hci_wmt_cmd *wc;
+ struct btmtk_wmt_hdr *hdr;
int err;
+ /* Send the WMT command and wait until the WMT event returns */
hlen = sizeof(*hdr) + wmt_params->dlen;
if (hlen > 255) {
err = -EINVAL;
goto err_free_skb;
}
- hdr = (struct mtk_wmt_hdr *)&wc;
+ wc = kzalloc(hlen, GFP_KERNEL);
+ if (!wc) {
+ err = -ENOMEM;
+ goto err_free_skb;
+ }
+
+ hdr = &wc->hdr;
hdr->dir = 1;
hdr->op = wmt_params->op;
hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
hdr->flag = wmt_params->flag;
- memcpy(wc.data, wmt_params->data, wmt_params->dlen);
+ memcpy(wc->data, wmt_params->data, wmt_params->dlen);
set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
- err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
+ err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
if (err < 0) {
clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
- goto err_free_skb;
+ goto err_free_wc;
}
/* The vendor specific WMT commands are all answered by a vendor
@@ -192,14 +139,14 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
if (err == -EINTR) {
bt_dev_err(hdev, "Execution of wmt command interrupted");
clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
- goto err_free_skb;
+ goto err_free_wc;
}
if (err) {
bt_dev_err(hdev, "Execution of wmt command timed out");
clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
err = -ETIMEDOUT;
- goto err_free_skb;
+ goto err_free_wc;
}
/* Parse and handle the return WMT event */
@@ -208,17 +155,17 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
bt_dev_err(hdev, "Wrong op received %d expected %d",
wmt_evt->whdr.op, hdr->op);
err = -EIO;
- goto err_free_skb;
+ goto err_free_wc;
}
switch (wmt_evt->whdr.op) {
- case MTK_WMT_SEMAPHORE:
+ case BTMTK_WMT_SEMAPHORE:
if (wmt_evt->whdr.flag == 2)
status = BTMTK_WMT_PATCH_UNDONE;
else
status = BTMTK_WMT_PATCH_DONE;
break;
- case MTK_WMT_FUNC_CTRL:
+ case BTMTK_WMT_FUNC_CTRL:
wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
status = BTMTK_WMT_ON_DONE;
@@ -232,6 +179,8 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
if (wmt_params->status)
*wmt_params->status = status;
+err_free_wc:
+ kfree(wc);
err_free_skb:
kfree_skb(bdev->evt_skb);
bdev->evt_skb = NULL;
@@ -239,95 +188,12 @@ err_free_skb:
return err;
}
-static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
-{
- struct btmtk_hci_wmt_params wmt_params;
- const struct firmware *fw;
- const u8 *fw_ptr;
- size_t fw_size;
- int err, dlen;
- u8 flag;
-
- err = request_firmware(&fw, fwname, &hdev->dev);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
- return err;
- }
-
- fw_ptr = fw->data;
- fw_size = fw->size;
-
- /* The size of patch header is 30 bytes, should be skip */
- if (fw_size < 30) {
- err = -EINVAL;
- goto free_fw;
- }
-
- fw_size -= 30;
- fw_ptr += 30;
- flag = 1;
-
- wmt_params.op = MTK_WMT_PATCH_DWNLD;
- wmt_params.status = NULL;
-
- while (fw_size > 0) {
- dlen = min_t(int, 250, fw_size);
-
- /* Tell device the position in sequence */
- if (fw_size - dlen <= 0)
- flag = 3;
- else if (fw_size < fw->size - 30)
- flag = 2;
-
- wmt_params.flag = flag;
- wmt_params.dlen = dlen;
- wmt_params.data = fw_ptr;
-
- err = mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
- err);
- goto free_fw;
- }
-
- fw_size -= dlen;
- fw_ptr += dlen;
- }
-
- wmt_params.op = MTK_WMT_RST;
- wmt_params.flag = 4;
- wmt_params.dlen = 0;
- wmt_params.data = NULL;
- wmt_params.status = NULL;
-
- /* Activate funciton the firmware providing to */
- err = mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
- goto free_fw;
- }
-
- /* Wait a few moments for firmware activation done */
- usleep_range(10000, 12000);
-
-free_fw:
- release_firmware(fw);
- return err;
-}
-
static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
struct hci_event_hdr *hdr = (void *)skb->data;
int err;
- /* Fix up the vendor event id with 0xff for vendor specific instead
- * of 0xe4 so that event send via monitoring socket can be parsed
- * properly.
- */
- if (hdr->evt == 0xe4)
- hdr->evt = HCI_EV_VENDOR;
-
/* When someone waits for the WMT event, the skb is being cloned
* and being processed the events from there then.
*/
@@ -343,7 +209,7 @@ static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
if (err < 0)
goto err_free_skb;
- if (hdr->evt == HCI_EV_VENDOR) {
+ if (hdr->evt == HCI_EV_WMT) {
if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT,
&bdev->tx_state)) {
/* Barrier to sync with other CPUs */
@@ -645,7 +511,7 @@ static int btmtkuart_func_query(struct hci_dev *hdev)
u8 param = 0;
/* Query whether the function is enabled */
- wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 4;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
@@ -672,7 +538,7 @@ static int btmtkuart_change_baudrate(struct hci_dev *hdev)
* ready to change a new baudrate.
*/
baudrate = cpu_to_le32(bdev->desired_speed);
- wmt_params.op = MTK_WMT_HIF;
+ wmt_params.op = BTMTK_WMT_HIF;
wmt_params.flag = 1;
wmt_params.dlen = 4;
wmt_params.data = &baudrate;
@@ -706,7 +572,7 @@ static int btmtkuart_change_baudrate(struct hci_dev *hdev)
usleep_range(20000, 22000);
/* Test the new baudrate */
- wmt_params.op = MTK_WMT_TEST;
+ wmt_params.op = BTMTK_WMT_TEST;
wmt_params.flag = 7;
wmt_params.dlen = 0;
wmt_params.data = NULL;
@@ -741,7 +607,7 @@ static int btmtkuart_setup(struct hci_dev *hdev)
* do any setups.
*/
if (test_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state)) {
- wmt_params.op = MTK_WMT_WAKEUP;
+ wmt_params.op = BTMTK_WMT_WAKEUP;
wmt_params.flag = 3;
wmt_params.dlen = 0;
wmt_params.data = NULL;
@@ -760,7 +626,7 @@ static int btmtkuart_setup(struct hci_dev *hdev)
btmtkuart_change_baudrate(hdev);
/* Query whether the firmware is already download */
- wmt_params.op = MTK_WMT_SEMAPHORE;
+ wmt_params.op = BTMTK_WMT_SEMAPHORE;
wmt_params.flag = 1;
wmt_params.dlen = 0;
wmt_params.data = NULL;
@@ -778,7 +644,7 @@ static int btmtkuart_setup(struct hci_dev *hdev)
}
/* Setup a firmware which the device definitely requires */
- err = mtk_setup_firmware(hdev, bdev->data->fwname);
+ err = btmtk_setup_firmware(hdev, bdev->data->fwname, mtk_hci_wmt_sync);
if (err < 0)
return err;
@@ -801,7 +667,7 @@ ignore_setup_fw:
}
/* Enable Bluetooth protocol */
- wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 0;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
@@ -846,7 +712,7 @@ static int btmtkuart_shutdown(struct hci_dev *hdev)
int err;
/* Disable the device */
- wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 0;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
@@ -1007,6 +873,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
hdev->setup = btmtkuart_setup;
hdev->shutdown = btmtkuart_shutdown;
hdev->send = btmtkuart_send_frame;
+ hdev->set_bdaddr = btmtk_set_bdaddr;
SET_HCIDEV_DEV(hdev, &serdev->dev);
hdev->manufacturer = 70;
@@ -1131,6 +998,3 @@ MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(FIRMWARE_MT7622);
-MODULE_FIRMWARE(FIRMWARE_MT7663);
-MODULE_FIRMWARE(FIRMWARE_MT7668);
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index c2bdd1e6060e..481d488bca0f 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -49,6 +49,7 @@ enum btrtl_chip_id {
CHIP_ID_8822C = 13,
CHIP_ID_8761B,
CHIP_ID_8852A = 18,
+ CHIP_ID_8852B = 20,
};
struct id_table {
@@ -149,6 +150,14 @@ static const struct id_table ic_id_table[] = {
.cfg_name = "rtl_bt/rtl8761bu_config" },
/* 8822C with UART interface */
+ { IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0x8, HCI_UART),
+ .config_needed = true,
+ .has_rom_version = true,
+ .has_msft_ext = true,
+ .fw_name = "rtl_bt/rtl8822cs_fw.bin",
+ .cfg_name = "rtl_bt/rtl8822cs_config" },
+
+ /* 8822C with UART interface */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_UART),
.config_needed = true,
.has_rom_version = true,
@@ -179,6 +188,14 @@ static const struct id_table ic_id_table[] = {
.has_msft_ext = true,
.fw_name = "rtl_bt/rtl8852au_fw.bin",
.cfg_name = "rtl_bt/rtl8852au_config" },
+
+ /* 8852B */
+ { IC_INFO(RTL_ROM_LMP_8852A, 0xb, 0xb, HCI_USB),
+ .config_needed = false,
+ .has_rom_version = true,
+ .has_msft_ext = true,
+ .fw_name = "rtl_bt/rtl8852bu_fw.bin",
+ .cfg_name = "rtl_bt/rtl8852bu_config" },
};
static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev,
@@ -287,6 +304,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
{ RTL_ROM_LMP_8822B, 13 }, /* 8822C */
{ RTL_ROM_LMP_8761A, 14 }, /* 8761B */
{ RTL_ROM_LMP_8852A, 18 }, /* 8852A */
+ { RTL_ROM_LMP_8852A, 20 }, /* 8852B */
};
min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
@@ -749,6 +767,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
switch (btrtl_dev->project_id) {
case CHIP_ID_8822C:
case CHIP_ID_8852A:
+ case CHIP_ID_8852B:
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
hci_set_aosp_capable(hdev);
@@ -926,3 +945,5 @@ MODULE_FIRMWARE("rtl_bt/rtl8822b_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8822b_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852au_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852au_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8852bu_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8852bu_config.bin");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index c30d131da784..50df417207af 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -36,32 +36,33 @@ static bool reset = true;
static struct usb_driver btusb_driver;
-#define BTUSB_IGNORE 0x01
-#define BTUSB_DIGIANSWER 0x02
-#define BTUSB_CSR 0x04
-#define BTUSB_SNIFFER 0x08
-#define BTUSB_BCM92035 0x10
-#define BTUSB_BROKEN_ISOC 0x20
-#define BTUSB_WRONG_SCO_MTU 0x40
-#define BTUSB_ATH3012 0x80
-#define BTUSB_INTEL_COMBINED 0x100
-#define BTUSB_INTEL_BOOT 0x200
-#define BTUSB_BCM_PATCHRAM 0x400
-#define BTUSB_MARVELL 0x800
-#define BTUSB_SWAVE 0x1000
-#define BTUSB_AMP 0x4000
-#define BTUSB_QCA_ROME 0x8000
-#define BTUSB_BCM_APPLE 0x10000
-#define BTUSB_REALTEK 0x20000
-#define BTUSB_BCM2045 0x40000
-#define BTUSB_IFNUM_2 0x80000
-#define BTUSB_CW6622 0x100000
-#define BTUSB_MEDIATEK 0x200000
-#define BTUSB_WIDEBAND_SPEECH 0x400000
-#define BTUSB_VALID_LE_STATES 0x800000
-#define BTUSB_QCA_WCN6855 0x1000000
-#define BTUSB_INTEL_BROKEN_SHUTDOWN_LED 0x2000000
-#define BTUSB_INTEL_BROKEN_INITIAL_NCMD 0x4000000
+#define BTUSB_IGNORE BIT(0)
+#define BTUSB_DIGIANSWER BIT(1)
+#define BTUSB_CSR BIT(2)
+#define BTUSB_SNIFFER BIT(3)
+#define BTUSB_BCM92035 BIT(4)
+#define BTUSB_BROKEN_ISOC BIT(5)
+#define BTUSB_WRONG_SCO_MTU BIT(6)
+#define BTUSB_ATH3012 BIT(7)
+#define BTUSB_INTEL_COMBINED BIT(8)
+#define BTUSB_INTEL_BOOT BIT(9)
+#define BTUSB_BCM_PATCHRAM BIT(10)
+#define BTUSB_MARVELL BIT(11)
+#define BTUSB_SWAVE BIT(12)
+#define BTUSB_AMP BIT(13)
+#define BTUSB_QCA_ROME BIT(14)
+#define BTUSB_BCM_APPLE BIT(15)
+#define BTUSB_REALTEK BIT(16)
+#define BTUSB_BCM2045 BIT(17)
+#define BTUSB_IFNUM_2 BIT(18)
+#define BTUSB_CW6622 BIT(19)
+#define BTUSB_MEDIATEK BIT(20)
+#define BTUSB_WIDEBAND_SPEECH BIT(21)
+#define BTUSB_VALID_LE_STATES BIT(22)
+#define BTUSB_QCA_WCN6855 BIT(23)
+#define BTUSB_INTEL_BROKEN_SHUTDOWN_LED BIT(24)
+#define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25)
+#define BTUSB_INTEL_NO_WBS_SUPPORT BIT(26)
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -383,11 +384,14 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x0032), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x0033), .driver_info = BTUSB_INTEL_COMBINED },
+ { USB_DEVICE(0x8087, 0x0035), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL_COMBINED |
+ BTUSB_INTEL_NO_WBS_SUPPORT |
BTUSB_INTEL_BROKEN_INITIAL_NCMD |
BTUSB_INTEL_BROKEN_SHUTDOWN_LED },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL_COMBINED |
+ BTUSB_INTEL_NO_WBS_SUPPORT |
BTUSB_INTEL_BROKEN_SHUTDOWN_LED },
{ USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL_COMBINED |
@@ -405,6 +409,8 @@ static const struct usb_device_id blacklist_table[] = {
BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852AE Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x385a), .driver_info = BTUSB_REALTEK |
@@ -429,6 +435,11 @@ static const struct usb_device_id blacklist_table[] = {
/* Additional MediaTek MT7615E Bluetooth devices */
{ USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK},
+ /* Additional MediaTek MT7663 Bluetooth devices */
+ { USB_DEVICE(0x043e, 0x310c), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+
/* Additional MediaTek MT7668 Bluetooth devices */
{ USB_DEVICE(0x043e, 0x3109), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
@@ -444,6 +455,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3564), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3567), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
@@ -463,6 +477,7 @@ static const struct usb_device_id blacklist_table[] = {
/* Additional Realtek 8723BE Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe085), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x0489, 0xe08b), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x04f2, 0xb49f), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
@@ -482,6 +497,8 @@ static const struct usb_device_id blacklist_table[] = {
/* Additional Realtek 8761BU Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2550, 0x8761), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
@@ -2041,6 +2058,8 @@ static int btusb_setup_csr(struct hci_dev *hdev)
*/
set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks);
+ set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks);
/* Clear the reset quirk since this is not an actual
* early Bluetooth 1.1 device from CSR.
@@ -2051,16 +2070,16 @@ static int btusb_setup_csr(struct hci_dev *hdev)
/*
* Special workaround for these BT 4.0 chip clones, and potentially more:
*
- * - 0x0134: a Barrot 8041a02 (HCI rev: 0x1012 sub: 0x0810)
+ * - 0x0134: a Barrot 8041a02 (HCI rev: 0x0810 sub: 0x1012)
* - 0x7558: IC markings FR3191AHAL 749H15143 (HCI rev/sub-version: 0x0709)
*
* These controllers are really messed-up.
*
* 1. Their bulk RX endpoint will never report any data unless
- * the device was suspended at least once (yes, really).
+ * the device was suspended at least once (yes, really).
* 2. They will not wakeup when autosuspended and receiving data
- * on their bulk RX endpoint from e.g. a keyboard or mouse
- * (IOW remote-wakeup support is broken for the bulk endpoint).
+ * on their bulk RX endpoint from e.g. a keyboard or mouse
+ * (IOW remote-wakeup support is broken for the bulk endpoint).
*
* To fix 1. enable runtime-suspend, force-suspend the
* HCI and then wake-it up by disabling runtime-suspend.
@@ -2080,7 +2099,7 @@ static int btusb_setup_csr(struct hci_dev *hdev)
if (ret >= 0)
msleep(200);
else
- bt_dev_err(hdev, "CSR: Failed to suspend the device for our Barrot 8041a02 receive-issue workaround");
+ bt_dev_warn(hdev, "CSR: Couldn't suspend the device for our Barrot 8041a02 receive-issue workaround");
pm_runtime_forbid(&data->udev->dev);
@@ -2245,7 +2264,6 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
struct btusb_data *data = hci_get_drvdata(hdev);
- struct hci_event_hdr *hdr;
struct sk_buff *skb;
int err;
@@ -2265,13 +2283,6 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
skb_put_data(skb, urb->transfer_buffer, urb->actual_length);
- hdr = (void *)skb->data;
- /* Fix up the vendor event id with 0xff for vendor specific
- * instead of 0xe4 so that event send via monitoring socket can
- * be parsed properly.
- */
- hdr->evt = 0xff;
-
/* When someone waits for the WMT event, the skb is being cloned
* and being processed the events from there then.
*/
@@ -2988,6 +2999,7 @@ static int btusb_set_bdaddr_wcn6855(struct hci_dev *hdev,
#define QCA_PATCH_UPDATED 0x80
#define QCA_DFU_TIMEOUT 3000
#define QCA_FLAG_MULTI_NVM 0x80
+#define QCA_BT_RESET_WAIT_MS 100
#define WCN6855_2_0_RAM_VERSION_GF 0x400c1200
#define WCN6855_2_1_RAM_VERSION_GF 0x400c1211
@@ -3314,6 +3326,13 @@ static int btusb_setup_qca(struct hci_dev *hdev)
err = btusb_setup_qca_load_nvm(hdev, &ver, info);
if (err < 0)
return err;
+
+ /* WCN6855 2.1 will reset to apply firmware downloaded here, so
+ * wait ~100ms for reset Done then go ahead, otherwise, it maybe
+ * cause potential enable failure.
+ */
+ if (info->rom_version == 0x00130201)
+ msleep(QCA_BT_RESET_WAIT_MS);
}
return 0;
@@ -3737,6 +3756,9 @@ static int btusb_probe(struct usb_interface *intf,
hdev->send = btusb_send_frame_intel;
hdev->cmd_timeout = btusb_intel_cmd_timeout;
+ if (id->driver_info & BTUSB_INTEL_NO_WBS_SUPPORT)
+ btintel_set_flag(hdev, INTEL_ROM_LEGACY_NO_WBS_SUPPORT);
+
if (id->driver_info & BTUSB_INTEL_BROKEN_INITIAL_NCMD)
btintel_set_flag(hdev, INTEL_BROKEN_INITIAL_NCMD);
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index d634a27bc850..785f445dd60d 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -20,6 +20,7 @@
#include <linux/regulator/consumer.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
#include <linux/tty.h>
#include <linux/interrupt.h>
#include <linux/dmi.h>
@@ -870,8 +871,24 @@ unlock:
#endif
/* Some firmware reports an IRQ which does not work (wrong pin in fw table?) */
+static struct gpiod_lookup_table asus_tf103c_irq_gpios = {
+ .dev_id = "serial0-0",
+ .table = {
+ GPIO_LOOKUP("INT33FC:02", 17, "host-wakeup-alt", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
static const struct dmi_system_id bcm_broken_irq_dmi_table[] = {
{
+ .ident = "Asus TF103C",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
+ },
+ .driver_data = &asus_tf103c_irq_gpios,
+ },
+ {
.ident = "Meegopad T08",
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR,
@@ -1027,7 +1044,8 @@ static struct clk *bcm_get_txco(struct device *dev)
static int bcm_get_resources(struct bcm_device *dev)
{
- const struct dmi_system_id *dmi_id;
+ const struct dmi_system_id *broken_irq_dmi_id;
+ const char *irq_con_id = "host-wakeup";
int err;
dev->name = dev_name(dev->dev);
@@ -1083,23 +1101,33 @@ static int bcm_get_resources(struct bcm_device *dev)
if (err)
return err;
+ broken_irq_dmi_id = dmi_first_match(bcm_broken_irq_dmi_table);
+ if (broken_irq_dmi_id && broken_irq_dmi_id->driver_data) {
+ gpiod_add_lookup_table(broken_irq_dmi_id->driver_data);
+ irq_con_id = "host-wakeup-alt";
+ dev->irq_active_low = false;
+ dev->irq = 0;
+ }
+
/* IRQ can be declared in ACPI table as Interrupt or GpioInt */
if (dev->irq <= 0) {
struct gpio_desc *gpio;
- gpio = devm_gpiod_get_optional(dev->dev, "host-wakeup",
- GPIOD_IN);
+ gpio = devm_gpiod_get_optional(dev->dev, irq_con_id, GPIOD_IN);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
dev->irq = gpiod_to_irq(gpio);
}
- dmi_id = dmi_first_match(bcm_broken_irq_dmi_table);
- if (dmi_id) {
- dev_info(dev->dev, "%s: Has a broken IRQ config, disabling IRQ support / runtime-pm\n",
- dmi_id->ident);
- dev->irq = 0;
+ if (broken_irq_dmi_id) {
+ if (broken_irq_dmi_id->driver_data) {
+ gpiod_remove_lookup_table(broken_irq_dmi_id->driver_data);
+ } else {
+ dev_info(dev->dev, "%s: Has a broken IRQ config, disabling IRQ support / runtime-pm\n",
+ broken_irq_dmi_id->ident);
+ dev->irq = 0;
+ }
}
dev_dbg(dev->dev, "BCM irq: %d\n", dev->irq);
@@ -1513,6 +1541,8 @@ static const struct of_device_id bcm_bluetooth_of_match[] = {
{ .compatible = "brcm,bcm4330-bt" },
{ .compatible = "brcm,bcm4334-bt" },
{ .compatible = "brcm,bcm4345c5" },
+ { .compatible = "brcm,bcm43430a0-bt" },
+ { .compatible = "brcm,bcm43430a1-bt" },
{ .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data },
{ .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data },
{ .compatible = "brcm,bcm4335a0" },
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 34286ffe0568..c5a0409ef84f 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -629,9 +629,11 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
break;
}
- pm_runtime_get_sync(&hu->serdev->dev);
- pm_runtime_mark_last_busy(&hu->serdev->dev);
- pm_runtime_put_autosuspend(&hu->serdev->dev);
+ if (hu->serdev) {
+ pm_runtime_get_sync(&hu->serdev->dev);
+ pm_runtime_mark_last_busy(&hu->serdev->dev);
+ pm_runtime_put_autosuspend(&hu->serdev->dev);
+ }
return 0;
}
@@ -966,6 +968,11 @@ static void h5_btrtl_open(struct h5 *h5)
pm_runtime_enable(&h5->hu->serdev->dev);
}
+ /* The controller needs reset to startup */
+ gpiod_set_value_cansleep(h5->enable_gpio, 0);
+ gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
+ msleep(100);
+
/* The controller needs up to 500ms to wakeup */
gpiod_set_value_cansleep(h5->enable_gpio, 1);
gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index eb1e736efeeb..4eb420a9ed04 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -509,7 +509,7 @@ static int send_command_from_firmware(struct ll_device *lldev,
return 0;
}
-/**
+/*
* download_firmware -
* internal function which parses through the .bts firmware
* script file intreprets SEND, DELAY actions only as of now
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 3b00d82d36cf..4cda890ce647 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -305,6 +305,8 @@ int hci_uart_register_device(struct hci_uart *hu,
if (err)
return err;
+ percpu_init_rwsem(&hu->proto_lock);
+
err = p->open(hu);
if (err)
goto err_open;
@@ -327,7 +329,6 @@ int hci_uart_register_device(struct hci_uart *hu,
INIT_WORK(&hu->init_ready, hci_uart_init_work);
INIT_WORK(&hu->write_work, hci_uart_write_work);
- percpu_init_rwsem(&hu->proto_lock);
/* Only when vendor specific setup callback is provided, consider
* the manufacturer information valid. This avoids filling in the
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index bccb275b65ba..60fbd42041dd 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -64,6 +64,11 @@ struct cs_timing_state {
struct cs_timing cs[MAX_CS_COUNT];
};
+struct weim_priv {
+ void __iomem *base;
+ struct cs_timing_state timing_state;
+};
+
static const struct of_device_id weim_id_table[] = {
/* i.MX1/21 */
{ .compatible = "fsl,imx1-weim", .data = &imx1_weim_devtype, },
@@ -128,21 +133,26 @@ err:
}
/* Parse and set the timing for this device. */
-static int weim_timing_setup(struct device *dev,
- struct device_node *np, void __iomem *base,
- const struct imx_weim_devtype *devtype,
- struct cs_timing_state *ts)
+static int weim_timing_setup(struct device *dev, struct device_node *np,
+ const struct imx_weim_devtype *devtype)
{
u32 cs_idx, value[MAX_CS_REGS_COUNT];
int i, ret;
int reg_idx, num_regs;
struct cs_timing *cst;
+ struct weim_priv *priv;
+ struct cs_timing_state *ts;
+ void __iomem *base;
if (WARN_ON(devtype->cs_regs_count > MAX_CS_REGS_COUNT))
return -EINVAL;
if (WARN_ON(devtype->cs_count > MAX_CS_COUNT))
return -EINVAL;
+ priv = dev_get_drvdata(dev);
+ base = priv->base;
+ ts = &priv->timing_state;
+
ret = of_property_read_u32_array(np, "fsl,weim-cs-timing",
value, devtype->cs_regs_count);
if (ret)
@@ -189,14 +199,15 @@ static int weim_timing_setup(struct device *dev,
return 0;
}
-static int weim_parse_dt(struct platform_device *pdev, void __iomem *base)
+static int weim_parse_dt(struct platform_device *pdev)
{
const struct of_device_id *of_id = of_match_device(weim_id_table,
&pdev->dev);
const struct imx_weim_devtype *devtype = of_id->data;
struct device_node *child;
int ret, have_child = 0;
- struct cs_timing_state ts = {};
+ struct weim_priv *priv;
+ void __iomem *base;
u32 reg;
if (devtype == &imx50_weim_devtype) {
@@ -205,6 +216,9 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base)
return ret;
}
+ priv = dev_get_drvdata(&pdev->dev);
+ base = priv->base;
+
if (of_property_read_bool(pdev->dev.of_node, "fsl,burst-clk-enable")) {
if (devtype->wcr_bcm) {
reg = readl(base + devtype->wcr_offset);
@@ -229,7 +243,7 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base)
}
for_each_available_child_of_node(pdev->dev.of_node, child) {
- ret = weim_timing_setup(&pdev->dev, child, base, devtype, &ts);
+ ret = weim_timing_setup(&pdev->dev, child, devtype);
if (ret)
dev_warn(&pdev->dev, "%pOF set timing failed.\n",
child);
@@ -248,17 +262,25 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base)
static int weim_probe(struct platform_device *pdev)
{
+ struct weim_priv *priv;
struct resource *res;
struct clk *clk;
void __iomem *base;
int ret;
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
/* get the resource */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
+ priv->base = base;
+ dev_set_drvdata(&pdev->dev, priv);
+
/* get the clock */
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk))
@@ -269,7 +291,7 @@ static int weim_probe(struct platform_device *pdev)
return ret;
/* parse the device node */
- ret = weim_parse_dt(pdev, base);
+ ret = weim_parse_dt(pdev);
if (ret)
clk_disable_unprepare(clk);
else
@@ -278,6 +300,81 @@ static int weim_probe(struct platform_device *pdev)
return ret;
}
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+static int of_weim_notify(struct notifier_block *nb, unsigned long action,
+ void *arg)
+{
+ const struct imx_weim_devtype *devtype;
+ struct of_reconfig_data *rd = arg;
+ const struct of_device_id *of_id;
+ struct platform_device *pdev;
+ int ret = NOTIFY_OK;
+
+ switch (of_reconfig_get_state_change(action, rd)) {
+ case OF_RECONFIG_CHANGE_ADD:
+ of_id = of_match_node(weim_id_table, rd->dn->parent);
+ if (!of_id)
+ return NOTIFY_OK; /* not for us */
+
+ devtype = of_id->data;
+
+ pdev = of_find_device_by_node(rd->dn->parent);
+ if (!pdev) {
+ pr_err("%s: could not find platform device for '%pOF'\n",
+ __func__, rd->dn->parent);
+
+ return notifier_from_errno(-EINVAL);
+ }
+
+ if (weim_timing_setup(&pdev->dev, rd->dn, devtype))
+ dev_warn(&pdev->dev,
+ "Failed to setup timing for '%pOF'\n", rd->dn);
+
+ if (!of_node_check_flag(rd->dn, OF_POPULATED)) {
+ if (!of_platform_device_create(rd->dn, NULL, &pdev->dev)) {
+ dev_err(&pdev->dev,
+ "Failed to create child device '%pOF'\n",
+ rd->dn);
+ ret = notifier_from_errno(-EINVAL);
+ }
+ }
+
+ platform_device_put(pdev);
+
+ break;
+ case OF_RECONFIG_CHANGE_REMOVE:
+ if (!of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK; /* device already destroyed */
+
+ of_id = of_match_node(weim_id_table, rd->dn->parent);
+ if (!of_id)
+ return NOTIFY_OK; /* not for us */
+
+ pdev = of_find_device_by_node(rd->dn);
+ if (!pdev) {
+ dev_err(&pdev->dev,
+ "Could not find platform device for '%pOF'\n",
+ rd->dn);
+
+ ret = notifier_from_errno(-EINVAL);
+ } else {
+ of_platform_device_destroy(&pdev->dev, NULL);
+ platform_device_put(pdev);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+struct notifier_block weim_of_notifier = {
+ .notifier_call = of_weim_notify,
+};
+#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+
static struct platform_driver weim_driver = {
.driver = {
.name = "imx-weim",
@@ -285,7 +382,27 @@ static struct platform_driver weim_driver = {
},
.probe = weim_probe,
};
-module_platform_driver(weim_driver);
+
+static int __init weim_init(void)
+{
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+ WARN_ON(of_reconfig_notifier_register(&weim_of_notifier));
+#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+
+ return platform_driver_register(&weim_driver);
+}
+module_init(weim_init);
+
+static void __exit weim_exit(void)
+{
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+ of_reconfig_notifier_unregister(&weim_of_notifier);
+#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+
+ return platform_driver_unregister(&weim_driver);
+
+}
+module_exit(weim_exit);
MODULE_AUTHOR("Freescale Semiconductor Inc.");
MODULE_DESCRIPTION("i.MX EIM Controller Driver");
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index fd87a59837fa..5eb0fe73ddc4 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -815,7 +815,7 @@ static int moxtet_probe(struct spi_device *spi)
return 0;
}
-static int moxtet_remove(struct spi_device *spi)
+static void moxtet_remove(struct spi_device *spi)
{
struct moxtet *moxtet = spi_get_drvdata(spi);
@@ -828,8 +828,6 @@ static int moxtet_remove(struct spi_device *spi)
device_for_each_child(moxtet->dev, NULL, __unregister);
mutex_destroy(&moxtet->lock);
-
- return 0;
}
static const struct of_device_id moxtet_dt_ids[] = {
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index faead41709bc..8e78b37d0f6a 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/cdrom.h>
-#include <linux/genhd.h>
#include <linux/bio.h>
#include <linux/blk-mq.h>
#include <linux/interrupt.h>
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 9704963f9d50..a087156a5818 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -401,7 +401,7 @@ config HW_RANDOM_MESON
config HW_RANDOM_CAVIUM
tristate "Cavium ThunderX Random Number Generator support"
- depends on HW_RANDOM && PCI && ARM64
+ depends on HW_RANDOM && PCI && ARCH_THUNDER
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index ecb71c4317a5..b8effe77d80f 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -13,13 +13,16 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/hw_random.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#define TRNG_CR 0x00
#define TRNG_MR 0x04
#define TRNG_ISR 0x1c
+#define TRNG_ISR_DATRDY BIT(0)
#define TRNG_ODATA 0x50
#define TRNG_KEY 0x524e4700 /* RNG */
@@ -34,37 +37,79 @@ struct atmel_trng {
struct clk *clk;
void __iomem *base;
struct hwrng rng;
+ bool has_half_rate;
};
+static bool atmel_trng_wait_ready(struct atmel_trng *trng, bool wait)
+{
+ int ready;
+
+ ready = readl(trng->base + TRNG_ISR) & TRNG_ISR_DATRDY;
+ if (!ready && wait)
+ readl_poll_timeout(trng->base + TRNG_ISR, ready,
+ ready & TRNG_ISR_DATRDY, 1000, 20000);
+
+ return !!ready;
+}
+
static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
struct atmel_trng *trng = container_of(rng, struct atmel_trng, rng);
u32 *data = buf;
+ int ret;
- /* data ready? */
- if (readl(trng->base + TRNG_ISR) & 1) {
- *data = readl(trng->base + TRNG_ODATA);
- /*
- ensure data ready is only set again AFTER the next data
- word is ready in case it got set between checking ISR
- and reading ODATA, so we don't risk re-reading the
- same word
- */
- readl(trng->base + TRNG_ISR);
- return 4;
- } else
- return 0;
+ ret = pm_runtime_get_sync((struct device *)trng->rng.priv);
+ if (ret < 0) {
+ pm_runtime_put_sync((struct device *)trng->rng.priv);
+ return ret;
+ }
+
+ ret = atmel_trng_wait_ready(trng, wait);
+ if (!ret)
+ goto out;
+
+ *data = readl(trng->base + TRNG_ODATA);
+ /*
+ * ensure data ready is only set again AFTER the next data word is ready
+ * in case it got set between checking ISR and reading ODATA, so we
+ * don't risk re-reading the same word
+ */
+ readl(trng->base + TRNG_ISR);
+ ret = 4;
+
+out:
+ pm_runtime_mark_last_busy((struct device *)trng->rng.priv);
+ pm_runtime_put_sync_autosuspend((struct device *)trng->rng.priv);
+ return ret;
}
-static void atmel_trng_enable(struct atmel_trng *trng)
+static int atmel_trng_init(struct atmel_trng *trng)
{
+ unsigned long rate;
+ int ret;
+
+ ret = clk_prepare_enable(trng->clk);
+ if (ret)
+ return ret;
+
+ if (trng->has_half_rate) {
+ rate = clk_get_rate(trng->clk);
+
+ /* if peripheral clk is above 100MHz, set HALFR */
+ if (rate > 100000000)
+ writel(TRNG_HALFR, trng->base + TRNG_MR);
+ }
+
writel(TRNG_KEY | 1, trng->base + TRNG_CR);
+
+ return 0;
}
-static void atmel_trng_disable(struct atmel_trng *trng)
+static void atmel_trng_cleanup(struct atmel_trng *trng)
{
writel(TRNG_KEY, trng->base + TRNG_CR);
+ clk_disable_unprepare(trng->clk);
}
static int atmel_trng_probe(struct platform_device *pdev)
@@ -88,32 +133,31 @@ static int atmel_trng_probe(struct platform_device *pdev)
if (!data)
return -ENODEV;
- if (data->has_half_rate) {
- unsigned long rate = clk_get_rate(trng->clk);
-
- /* if peripheral clk is above 100MHz, set HALFR */
- if (rate > 100000000)
- writel(TRNG_HALFR, trng->base + TRNG_MR);
- }
-
- ret = clk_prepare_enable(trng->clk);
- if (ret)
- return ret;
-
- atmel_trng_enable(trng);
+ trng->has_half_rate = data->has_half_rate;
trng->rng.name = pdev->name;
trng->rng.read = atmel_trng_read;
+ trng->rng.priv = (unsigned long)&pdev->dev;
+ platform_set_drvdata(pdev, trng);
- ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+#ifndef CONFIG_PM
+ ret = atmel_trng_init(trng);
if (ret)
- goto err_register;
+ return ret;
+#endif
- platform_set_drvdata(pdev, trng);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
- return 0;
+ ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+#ifndef CONFIG_PM
+ atmel_trng_cleanup(trng);
+#endif
+ }
-err_register:
- clk_disable_unprepare(trng->clk);
return ret;
}
@@ -121,43 +165,35 @@ static int atmel_trng_remove(struct platform_device *pdev)
{
struct atmel_trng *trng = platform_get_drvdata(pdev);
-
- atmel_trng_disable(trng);
- clk_disable_unprepare(trng->clk);
+ atmel_trng_cleanup(trng);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
return 0;
}
-#ifdef CONFIG_PM
-static int atmel_trng_suspend(struct device *dev)
+static int __maybe_unused atmel_trng_runtime_suspend(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
- atmel_trng_disable(trng);
- clk_disable_unprepare(trng->clk);
+ atmel_trng_cleanup(trng);
return 0;
}
-static int atmel_trng_resume(struct device *dev)
+static int __maybe_unused atmel_trng_runtime_resume(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
- int ret;
- ret = clk_prepare_enable(trng->clk);
- if (ret)
- return ret;
-
- atmel_trng_enable(trng);
-
- return 0;
+ return atmel_trng_init(trng);
}
-static const struct dev_pm_ops atmel_trng_pm_ops = {
- .suspend = atmel_trng_suspend,
- .resume = atmel_trng_resume,
+static const struct dev_pm_ops __maybe_unused atmel_trng_pm_ops = {
+ SET_RUNTIME_PM_OPS(atmel_trng_runtime_suspend,
+ atmel_trng_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
-#endif /* CONFIG_PM */
static const struct atmel_trng_data at91sam9g45_config = {
.has_half_rate = false,
@@ -185,9 +221,7 @@ static struct platform_driver atmel_trng_driver = {
.remove = atmel_trng_remove,
.driver = {
.name = "atmel-trng",
-#ifdef CONFIG_PM
- .pm = &atmel_trng_pm_ops,
-#endif /* CONFIG_PM */
+ .pm = pm_ptr(&atmel_trng_pm_ops),
.of_match_table = atmel_trng_dt_ids,
},
};
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
index 6f66919652bf..7c55f4cf4a8b 100644
--- a/drivers/char/hw_random/cavium-rng-vf.c
+++ b/drivers/char/hw_random/cavium-rng-vf.c
@@ -179,7 +179,7 @@ static int cavium_map_pf_regs(struct cavium_rng *rng)
pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_CAVIUM_RNG_PF, NULL);
if (!pdev) {
- dev_err(&pdev->dev, "Cannot find RNG PF device\n");
+ pr_err("Cannot find RNG PF device\n");
return -EIO;
}
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index a3db27916256..16f227b995e8 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/hw_random.h>
+#include <linux/random.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/sched/signal.h>
@@ -31,7 +32,7 @@ static struct hwrng *current_rng;
/* the current rng has been explicitly chosen by user via sysfs */
static int cur_rng_set_by_user;
static struct task_struct *hwrng_fill;
-/* list of registered rngs, sorted decending by quality */
+/* list of registered rngs */
static LIST_HEAD(rng_list);
/* Protects rng_list and current_rng */
static DEFINE_MUTEX(rng_mutex);
@@ -44,14 +45,14 @@ static unsigned short default_quality; /* = 0; default to "off" */
module_param(current_quality, ushort, 0644);
MODULE_PARM_DESC(current_quality,
- "current hwrng entropy estimation per 1024 bits of input");
+ "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead");
module_param(default_quality, ushort, 0644);
MODULE_PARM_DESC(default_quality,
"default entropy content of hwrng per 1024 bits of input");
static void drop_current_rng(void);
static int hwrng_init(struct hwrng *rng);
-static void start_khwrngd(void);
+static void hwrng_manage_rngd(struct hwrng *rng);
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int wait);
@@ -64,13 +65,12 @@ static size_t rng_buffer_size(void)
static void add_early_randomness(struct hwrng *rng)
{
int bytes_read;
- size_t size = min_t(size_t, 16, rng_buffer_size());
mutex_lock(&reading_mutex);
- bytes_read = rng_get_data(rng, rng_buffer, size, 0);
+ bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0);
mutex_unlock(&reading_mutex);
if (bytes_read > 0)
- add_device_randomness(rng_buffer, bytes_read);
+ add_device_randomness(rng_fillbuf, bytes_read);
}
static inline void cleanup_rng(struct kref *kref)
@@ -161,14 +161,13 @@ static int hwrng_init(struct hwrng *rng)
reinit_completion(&rng->cleanup_done);
skip_init:
- current_quality = rng->quality ? : default_quality;
- if (current_quality > 1024)
- current_quality = 1024;
+ if (!rng->quality)
+ rng->quality = default_quality;
+ if (rng->quality > 1024)
+ rng->quality = 1024;
+ current_quality = rng->quality; /* obsolete */
- if (current_quality == 0 && hwrng_fill)
- kthread_stop(hwrng_fill);
- if (current_quality > 0 && !hwrng_fill)
- start_khwrngd();
+ hwrng_manage_rngd(rng);
return 0;
}
@@ -298,24 +297,28 @@ static struct miscdevice rng_miscdev = {
static int enable_best_rng(void)
{
+ struct hwrng *rng, *new_rng = NULL;
int ret = -ENODEV;
BUG_ON(!mutex_is_locked(&rng_mutex));
- /* rng_list is sorted by quality, use the best (=first) one */
- if (!list_empty(&rng_list)) {
- struct hwrng *new_rng;
-
- new_rng = list_entry(rng_list.next, struct hwrng, list);
- ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
- if (!ret)
- cur_rng_set_by_user = 0;
- } else {
+ /* no rng to use? */
+ if (list_empty(&rng_list)) {
drop_current_rng();
cur_rng_set_by_user = 0;
- ret = 0;
+ return 0;
+ }
+
+ /* use the rng which offers the best quality */
+ list_for_each_entry(rng, &rng_list, list) {
+ if (!new_rng || rng->quality > new_rng->quality)
+ new_rng = rng;
}
+ ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
+ if (!ret)
+ cur_rng_set_by_user = 0;
+
return ret;
}
@@ -336,8 +339,9 @@ static ssize_t rng_current_store(struct device *dev,
} else {
list_for_each_entry(rng, &rng_list, list) {
if (sysfs_streq(rng->name, buf)) {
- cur_rng_set_by_user = 1;
err = set_current_rng(rng);
+ if (!err)
+ cur_rng_set_by_user = 1;
break;
}
}
@@ -399,14 +403,76 @@ static ssize_t rng_selected_show(struct device *dev,
return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
}
+static ssize_t rng_quality_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret;
+ struct hwrng *rng;
+
+ rng = get_current_rng();
+ if (IS_ERR(rng))
+ return PTR_ERR(rng);
+
+ if (!rng) /* no need to put_rng */
+ return -ENODEV;
+
+ ret = sysfs_emit(buf, "%hu\n", rng->quality);
+ put_rng(rng);
+
+ return ret;
+}
+
+static ssize_t rng_quality_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ u16 quality;
+ int ret = -EINVAL;
+
+ if (len < 2)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&rng_mutex);
+ if (ret)
+ return -ERESTARTSYS;
+
+ ret = kstrtou16(buf, 0, &quality);
+ if (ret || quality > 1024) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!current_rng) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ current_rng->quality = quality;
+ current_quality = quality; /* obsolete */
+
+ /* the best available RNG may have changed */
+ ret = enable_best_rng();
+
+ /* start/stop rngd if necessary */
+ if (current_rng)
+ hwrng_manage_rngd(current_rng);
+
+out:
+ mutex_unlock(&rng_mutex);
+ return ret ? ret : len;
+}
+
static DEVICE_ATTR_RW(rng_current);
static DEVICE_ATTR_RO(rng_available);
static DEVICE_ATTR_RO(rng_selected);
+static DEVICE_ATTR_RW(rng_quality);
static struct attribute *rng_dev_attrs[] = {
&dev_attr_rng_current.attr,
&dev_attr_rng_available.attr,
&dev_attr_rng_selected.attr,
+ &dev_attr_rng_quality.attr,
NULL
};
@@ -424,9 +490,11 @@ static int __init register_miscdev(void)
static int hwrng_fillfn(void *unused)
{
+ size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */
long rc;
while (!kthread_should_stop()) {
+ unsigned short quality;
struct hwrng *rng;
rng = get_current_rng();
@@ -435,27 +503,49 @@ static int hwrng_fillfn(void *unused)
mutex_lock(&reading_mutex);
rc = rng_get_data(rng, rng_fillbuf,
rng_buffer_size(), 1);
+ if (current_quality != rng->quality)
+ rng->quality = current_quality; /* obsolete */
+ quality = rng->quality;
mutex_unlock(&reading_mutex);
put_rng(rng);
+
+ if (!quality)
+ break;
+
if (rc <= 0) {
pr_warn("hwrng: no data available\n");
msleep_interruptible(10000);
continue;
}
+
+ /* If we cannot credit at least one bit of entropy,
+ * keep track of the remainder for the next iteration
+ */
+ entropy = rc * quality * 8 + entropy_credit;
+ if ((entropy >> 10) == 0)
+ entropy_credit = entropy;
+
/* Outside lock, sure, but y'know: randomness. */
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
- rc * current_quality * 8 >> 10);
+ entropy >> 10);
}
hwrng_fill = NULL;
return 0;
}
-static void start_khwrngd(void)
+static void hwrng_manage_rngd(struct hwrng *rng)
{
- hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
- if (IS_ERR(hwrng_fill)) {
- pr_err("hwrng_fill thread creation failed\n");
- hwrng_fill = NULL;
+ if (WARN_ON(!mutex_is_locked(&rng_mutex)))
+ return;
+
+ if (rng->quality == 0 && hwrng_fill)
+ kthread_stop(hwrng_fill);
+ if (rng->quality > 0 && !hwrng_fill) {
+ hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
+ if (IS_ERR(hwrng_fill)) {
+ pr_err("hwrng_fill thread creation failed\n");
+ hwrng_fill = NULL;
+ }
}
}
@@ -463,7 +553,6 @@ int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
struct hwrng *tmp;
- struct list_head *rng_list_ptr;
bool is_new_current = false;
if (!rng->name || (!rng->data_read && !rng->read))
@@ -477,18 +566,11 @@ int hwrng_register(struct hwrng *rng)
if (strcmp(tmp->name, rng->name) == 0)
goto out_unlock;
}
+ list_add_tail(&rng->list, &rng_list);
init_completion(&rng->cleanup_done);
complete(&rng->cleanup_done);
- /* rng_list is sorted by decreasing quality */
- list_for_each(rng_list_ptr, &rng_list) {
- tmp = list_entry(rng_list_ptr, struct hwrng, list);
- if (tmp->quality < rng->quality)
- break;
- }
- list_add_tail(&rng->list, rng_list_ptr);
-
if (!current_rng ||
(!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
/*
@@ -638,7 +720,7 @@ static void __exit hwrng_modexit(void)
unregister_miscdev();
}
-module_init(hwrng_modinit);
+fs_initcall(hwrng_modinit); /* depends on misc_register() */
module_exit(hwrng_modexit);
MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index 67947a19aa22..e8f9621e7954 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -65,14 +65,14 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
out_release:
amba_release_regions(dev);
out_clk:
- clk_disable(rng_clk);
+ clk_disable_unprepare(rng_clk);
return ret;
}
static void nmk_rng_remove(struct amba_device *dev)
{
amba_release_regions(dev);
- clk_disable(rng_clk);
+ clk_disable_unprepare(rng_clk);
}
static const struct amba_id nmk_rng_ids[] = {
diff --git a/drivers/char/hw_random/optee-rng.c b/drivers/char/hw_random/optee-rng.c
index 135a82590923..a948c0727b2b 100644
--- a/drivers/char/hw_random/optee-rng.c
+++ b/drivers/char/hw_random/optee-rng.c
@@ -145,10 +145,10 @@ static int optee_rng_init(struct hwrng *rng)
struct optee_rng_private *pvt_data = to_optee_rng_private(rng);
struct tee_shm *entropy_shm_pool = NULL;
- entropy_shm_pool = tee_shm_alloc(pvt_data->ctx, MAX_ENTROPY_REQ_SZ,
- TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ entropy_shm_pool = tee_shm_alloc_kernel_buf(pvt_data->ctx,
+ MAX_ENTROPY_REQ_SZ);
if (IS_ERR(entropy_shm_pool)) {
- dev_err(pvt_data->dev, "tee_shm_alloc failed\n");
+ dev_err(pvt_data->dev, "tee_shm_alloc_kernel_buf failed\n");
return PTR_ERR(entropy_shm_pool);
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 3404a91edf29..66ce7c03a142 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,320 +1,28 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
- * random.c -- A strong random number generator
- *
* Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- *
* Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
- *
- * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
- * rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, and the entire permission notice in its entirety,
- * including the disclaimer of warranties.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote
- * products derived from this software without specific prior
- * written permission.
- *
- * ALTERNATIVELY, this product may be distributed under the terms of
- * the GNU General Public License, in which case the provisions of the GPL are
- * required INSTEAD OF the above restrictions. (This clause is
- * necessary due to a potential bad interaction between the GPL and
- * the restrictions contained in a BSD-style copyright.)
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
- * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
- * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- */
-
-/*
- * (now, with legal B.S. out of the way.....)
- *
- * This routine gathers environmental noise from device drivers, etc.,
- * and returns good random numbers, suitable for cryptographic use.
- * Besides the obvious cryptographic uses, these numbers are also good
- * for seeding TCP sequence numbers, and other places where it is
- * desirable to have numbers which are not only random, but hard to
- * predict by an attacker.
- *
- * Theory of operation
- * ===================
- *
- * Computers are very predictable devices. Hence it is extremely hard
- * to produce truly random numbers on a computer --- as opposed to
- * pseudo-random numbers, which can easily generated by using a
- * algorithm. Unfortunately, it is very easy for attackers to guess
- * the sequence of pseudo-random number generators, and for some
- * applications this is not acceptable. So instead, we must try to
- * gather "environmental noise" from the computer's environment, which
- * must be hard for outside attackers to observe, and use that to
- * generate random numbers. In a Unix environment, this is best done
- * from inside the kernel.
- *
- * Sources of randomness from the environment include inter-keyboard
- * timings, inter-interrupt timings from some interrupts, and other
- * events which are both (a) non-deterministic and (b) hard for an
- * outside observer to measure. Randomness from these sources are
- * added to an "entropy pool", which is mixed using a CRC-like function.
- * This is not cryptographically strong, but it is adequate assuming
- * the randomness is not chosen maliciously, and it is fast enough that
- * the overhead of doing it on every interrupt is very reasonable.
- * As random bytes are mixed into the entropy pool, the routines keep
- * an *estimate* of how many bits of randomness have been stored into
- * the random number generator's internal state.
- *
- * When random bytes are desired, they are obtained by taking the BLAKE2s
- * hash of the contents of the "entropy pool". The BLAKE2s hash avoids
- * exposing the internal state of the entropy pool. It is believed to
- * be computationally infeasible to derive any useful information
- * about the input of BLAKE2s from its output. Even if it is possible to
- * analyze BLAKE2s in some clever way, as long as the amount of data
- * returned from the generator is less than the inherent entropy in
- * the pool, the output data is totally unpredictable. For this
- * reason, the routine decreases its internal estimate of how many
- * bits of "true randomness" are contained in the entropy pool as it
- * outputs random numbers.
- *
- * If this estimate goes to zero, the routine can still generate
- * random numbers; however, an attacker may (at least in theory) be
- * able to infer the future output of the generator from prior
- * outputs. This requires successful cryptanalysis of BLAKE2s, which is
- * not believed to be feasible, but there is a remote possibility.
- * Nonetheless, these numbers should be useful for the vast majority
- * of purposes.
- *
- * Exported interfaces ---- output
- * ===============================
- *
- * There are four exported interfaces; two for use within the kernel,
- * and two for use from userspace.
- *
- * Exported interfaces ---- userspace output
- * -----------------------------------------
- *
- * The userspace interfaces are two character devices /dev/random and
- * /dev/urandom. /dev/random is suitable for use when very high
- * quality randomness is desired (for example, for key generation or
- * one-time pads), as it will only return a maximum of the number of
- * bits of randomness (as estimated by the random number generator)
- * contained in the entropy pool.
- *
- * The /dev/urandom device does not have this limit, and will return
- * as many bytes as are requested. As more and more random bytes are
- * requested without giving time for the entropy pool to recharge,
- * this will result in random numbers that are merely cryptographically
- * strong. For many applications, however, this is acceptable.
- *
- * Exported interfaces ---- kernel output
- * --------------------------------------
- *
- * The primary kernel interface is
- *
- * void get_random_bytes(void *buf, int nbytes);
- *
- * This interface will return the requested number of random bytes,
- * and place it in the requested buffer. This is equivalent to a
- * read from /dev/urandom.
- *
- * For less critical applications, there are the functions:
- *
- * u32 get_random_u32()
- * u64 get_random_u64()
- * unsigned int get_random_int()
- * unsigned long get_random_long()
- *
- * These are produced by a cryptographic RNG seeded from get_random_bytes,
- * and so do not deplete the entropy pool as much. These are recommended
- * for most in-kernel operations *if the result is going to be stored in
- * the kernel*.
- *
- * Specifically, the get_random_int() family do not attempt to do
- * "anti-backtracking". If you capture the state of the kernel (e.g.
- * by snapshotting the VM), you can figure out previous get_random_int()
- * return values. But if the value is stored in the kernel anyway,
- * this is not a problem.
- *
- * It *is* safe to expose get_random_int() output to attackers (e.g. as
- * network cookies); given outputs 1..n, it's not feasible to predict
- * outputs 0 or n+1. The only concern is an attacker who breaks into
- * the kernel later; the get_random_int() engine is not reseeded as
- * often as the get_random_bytes() one.
- *
- * get_random_bytes() is needed for keys that need to stay secret after
- * they are erased from the kernel. For example, any key that will
- * be wrapped and stored encrypted. And session encryption keys: we'd
- * like to know that after the session is closed and the keys erased,
- * the plaintext is unrecoverable to someone who recorded the ciphertext.
- *
- * But for network ports/cookies, stack canaries, PRNG seeds, address
- * space layout randomization, session *authentication* keys, or other
- * applications where the sensitive data is stored in the kernel in
- * plaintext for as long as it's sensitive, the get_random_int() family
- * is just fine.
- *
- * Consider ASLR. We want to keep the address space secret from an
- * outside attacker while the process is running, but once the address
- * space is torn down, it's of no use to an attacker any more. And it's
- * stored in kernel data structures as long as it's alive, so worrying
- * about an attacker's ability to extrapolate it from the get_random_int()
- * CRNG is silly.
- *
- * Even some cryptographic keys are safe to generate with get_random_int().
- * In particular, keys for SipHash are generally fine. Here, knowledge
- * of the key authorizes you to do something to a kernel object (inject
- * packets to a network connection, or flood a hash table), and the
- * key is stored with the object being protected. Once it goes away,
- * we no longer care if anyone knows the key.
- *
- * prandom_u32()
- * -------------
- *
- * For even weaker applications, see the pseudorandom generator
- * prandom_u32(), prandom_max(), and prandom_bytes(). If the random
- * numbers aren't security-critical at all, these are *far* cheaper.
- * Useful for self-tests, random error simulation, randomized backoffs,
- * and any other application where you trust that nobody is trying to
- * maliciously mess with you by guessing the "random" numbers.
- *
- * Exported interfaces ---- input
- * ==============================
- *
- * The current exported interfaces for gathering environmental noise
- * from the devices are:
- *
- * void add_device_randomness(const void *buf, unsigned int size);
- * void add_input_randomness(unsigned int type, unsigned int code,
- * unsigned int value);
- * void add_interrupt_randomness(int irq);
- * void add_disk_randomness(struct gendisk *disk);
- * void add_hwgenerator_randomness(const char *buffer, size_t count,
- * size_t entropy);
- * void add_bootloader_randomness(const void *buf, unsigned int size);
- *
- * add_device_randomness() is for adding data to the random pool that
- * is likely to differ between two devices (or possibly even per boot).
- * This would be things like MAC addresses or serial numbers, or the
- * read-out of the RTC. This does *not* add any actual entropy to the
- * pool, but it initializes the pool to different values for devices
- * that might otherwise be identical and have very little entropy
- * available to them (particularly common in the embedded world).
- *
- * add_input_randomness() uses the input layer interrupt timing, as well as
- * the event type information from the hardware.
- *
- * add_interrupt_randomness() uses the interrupt timing as random
- * inputs to the entropy pool. Using the cycle counters and the irq source
- * as inputs, it feeds the randomness roughly once a second.
- *
- * add_disk_randomness() uses what amounts to the seek time of block
- * layer request events, on a per-disk_devt basis, as input to the
- * entropy pool. Note that high-speed solid state drives with very low
- * seek times do not make for good sources of entropy, as their seek
- * times are usually fairly consistent.
- *
- * All of these routines try to estimate how many bits of randomness a
- * particular randomness source. They do this by keeping track of the
- * first and second order deltas of the event timings.
- *
- * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
- * entropy as specified by the caller. If the entropy pool is full it will
- * block until more entropy is needed.
- *
- * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or
- * add_device_randomness(), depending on whether or not the configuration
- * option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
- *
- * Ensuring unpredictability at system startup
- * ============================================
- *
- * When any operating system starts up, it will go through a sequence
- * of actions that are fairly predictable by an adversary, especially
- * if the start-up does not involve interaction with a human operator.
- * This reduces the actual number of bits of unpredictability in the
- * entropy pool below the value in entropy_count. In order to
- * counteract this effect, it helps to carry information in the
- * entropy pool across shut-downs and start-ups. To do this, put the
- * following lines an appropriate script which is run during the boot
- * sequence:
- *
- * echo "Initializing random number generator..."
- * random_seed=/var/run/random-seed
- * # Carry a random seed from start-up to start-up
- * # Load and then save the whole entropy pool
- * if [ -f $random_seed ]; then
- * cat $random_seed >/dev/urandom
- * else
- * touch $random_seed
- * fi
- * chmod 600 $random_seed
- * dd if=/dev/urandom of=$random_seed count=1 bs=512
- *
- * and the following lines in an appropriate script which is run as
- * the system is shutdown:
- *
- * # Carry a random seed from shut-down to start-up
- * # Save the whole entropy pool
- * echo "Saving random seed..."
- * random_seed=/var/run/random-seed
- * touch $random_seed
- * chmod 600 $random_seed
- * dd if=/dev/urandom of=$random_seed count=1 bs=512
- *
- * For example, on most modern systems using the System V init
- * scripts, such code fragments would be found in
- * /etc/rc.d/init.d/random. On older Linux systems, the correct script
- * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
- *
- * Effectively, these commands cause the contents of the entropy pool
- * to be saved at shut-down time and reloaded into the entropy pool at
- * start-up. (The 'dd' in the addition to the bootup script is to
- * make sure that /etc/random-seed is different for every start-up,
- * even if the system crashes without executing rc.0.) Even with
- * complete knowledge of the start-up activities, predicting the state
- * of the entropy pool requires knowledge of the previous history of
- * the system.
- *
- * Configuring the /dev/random driver under Linux
- * ==============================================
- *
- * The /dev/random driver under Linux uses minor numbers 8 and 9 of
- * the /dev/mem major number (#1). So if your system does not have
- * /dev/random and /dev/urandom created already, they can be created
- * by using the commands:
- *
- * mknod /dev/random c 1 8
- * mknod /dev/urandom c 1 9
- *
- * Acknowledgements:
- * =================
- *
- * Ideas for constructing this random number generator were derived
- * from Pretty Good Privacy's random number generator, and from private
- * discussions with Phil Karn. Colin Plumb provided a faster random
- * number generator, which speed up the mixing function of the entropy
- * pool, taken from PGPfone. Dale Worley has also contributed many
- * useful ideas and suggestions to improve this driver.
- *
- * Any flaws in the design are solely my responsibility, and should
- * not be attributed to the Phil, Colin, or any of authors of PGP.
- *
- * Further background information on this topic may be obtained from
- * RFC 1750, "Randomness Recommendations for Security", by Donald
- * Eastlake, Steve Crocker, and Jeff Schiller.
+ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
+ *
+ * This driver produces cryptographically secure pseudorandom data. It is divided
+ * into roughly six sections, each with a section header:
+ *
+ * - Initialization and readiness waiting.
+ * - Fast key erasure RNG, the "crng".
+ * - Entropy accumulation and extraction routines.
+ * - Entropy collection routines.
+ * - Userspace reader/writer interfaces.
+ * - Sysctl interface.
+ *
+ * The high level overview is that there is one input pool, into which
+ * various pieces of data are hashed. Some of that data is then "credited" as
+ * having a certain number of bits of entropy. When enough bits of entropy are
+ * available, the hash is finalized and handed as a key to a stream cipher that
+ * expands it indefinitely for various consumers. This key is periodically
+ * refreshed as the various entropy collectors, described below, add data to the
+ * input pool and credit it. There is currently no Fortuna-like scheduler
+ * involved, which can lead to malicious entropy sources causing a premature
+ * reseed, and the entropy estimates are, at best, conservative guesses.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -330,7 +38,7 @@
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/fs.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/nodemask.h>
@@ -344,744 +52,947 @@
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <linux/uuid.h>
+#include <linux/uaccess.h>
#include <crypto/chacha.h>
#include <crypto/blake2s.h>
-
#include <asm/processor.h>
-#include <linux/uaccess.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/io.h>
-#define CREATE_TRACE_POINTS
-#include <trace/events/random.h>
-
-/* #define ADD_INTERRUPT_BENCH */
-
-/*
- * If the entropy count falls under this number of bits, then we
- * should wake up processes which are selecting or polling on write
- * access to /dev/random.
- */
-static int random_write_wakeup_bits = 28 * (1 << 5);
-
-/*
- * Originally, we used a primitive polynomial of degree .poolwords
- * over GF(2). The taps for various sizes are defined below. They
- * were chosen to be evenly spaced except for the last tap, which is 1
- * to get the twisting happening as fast as possible.
- *
- * For the purposes of better mixing, we use the CRC-32 polynomial as
- * well to make a (modified) twisted Generalized Feedback Shift
- * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR
- * generators. ACM Transactions on Modeling and Computer Simulation
- * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted
- * GFSR generators II. ACM Transactions on Modeling and Computer
- * Simulation 4:254-266)
+/*********************************************************************
*
- * Thanks to Colin Plumb for suggesting this.
+ * Initialization and readiness waiting.
*
- * The mixing operation is much less sensitive than the output hash,
- * where we use BLAKE2s. All that we want of mixing operation is that
- * it be a good non-cryptographic hash; i.e. it not produce collisions
- * when fed "random" data of the sort we expect to see. As long as
- * the pool state differs for different inputs, we have preserved the
- * input entropy and done a good job. The fact that an intelligent
- * attacker can construct inputs that will produce controlled
- * alterations to the pool's state is not important because we don't
- * consider such inputs to contribute any randomness. The only
- * property we need with respect to them is that the attacker can't
- * increase his/her knowledge of the pool's state. Since all
- * additions are reversible (knowing the final state and the input,
- * you can reconstruct the initial state), if an attacker has any
- * uncertainty about the initial state, he/she can only shuffle that
- * uncertainty about, but never cause any collisions (which would
- * decrease the uncertainty).
+ * Much of the RNG infrastructure is devoted to various dependencies
+ * being able to wait until the RNG has collected enough entropy and
+ * is ready for safe consumption.
*
- * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
- * Videau in their paper, "The Linux Pseudorandom Number Generator
- * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their
- * paper, they point out that we are not using a true Twisted GFSR,
- * since Matsumoto & Kurita used a trinomial feedback polynomial (that
- * is, with only three taps, instead of the six that we are using).
- * As a result, the resulting polynomial is neither primitive nor
- * irreducible, and hence does not have a maximal period over
- * GF(2**32). They suggest a slight change to the generator
- * polynomial which improves the resulting TGFSR polynomial to be
- * irreducible, which we have made here.
- */
-enum poolinfo {
- POOL_WORDS = 128,
- POOL_WORDMASK = POOL_WORDS - 1,
- POOL_BYTES = POOL_WORDS * sizeof(u32),
- POOL_BITS = POOL_BYTES * 8,
- POOL_BITSHIFT = ilog2(POOL_BITS),
-
- /* To allow fractional bits to be tracked, the entropy_count field is
- * denominated in units of 1/8th bits. */
- POOL_ENTROPY_SHIFT = 3,
-#define POOL_ENTROPY_BITS() (input_pool.entropy_count >> POOL_ENTROPY_SHIFT)
- POOL_FRACBITS = POOL_BITS << POOL_ENTROPY_SHIFT,
-
- /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
- POOL_TAP1 = 104,
- POOL_TAP2 = 76,
- POOL_TAP3 = 51,
- POOL_TAP4 = 25,
- POOL_TAP5 = 1,
-
- EXTRACT_SIZE = BLAKE2S_HASH_SIZE / 2
-};
-
-/*
- * Static global variables
- */
-static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
-static struct fasync_struct *fasync;
-
-static DEFINE_SPINLOCK(random_ready_list_lock);
-static LIST_HEAD(random_ready_list);
-
-struct crng_state {
- u32 state[16];
- unsigned long init_time;
- spinlock_t lock;
-};
-
-static struct crng_state primary_crng = {
- .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
- .state[0] = CHACHA_CONSTANT_EXPA,
- .state[1] = CHACHA_CONSTANT_ND_3,
- .state[2] = CHACHA_CONSTANT_2_BY,
- .state[3] = CHACHA_CONSTANT_TE_K,
-};
+ *********************************************************************/
/*
* crng_init = 0 --> Uninitialized
* 1 --> Initialized
* 2 --> Initialized from input_pool
*
- * crng_init is protected by primary_crng->lock, and only increases
+ * crng_init is protected by base_crng->lock, and only increases
* its value (from 0->1->2).
*/
static int crng_init = 0;
-static bool crng_need_final_init = false;
#define crng_ready() (likely(crng_init > 1))
-static int crng_init_cnt = 0;
-static unsigned long crng_global_init_time = 0;
-#define CRNG_INIT_CNT_THRESH (2 * CHACHA_KEY_SIZE)
-static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE]);
-static void _crng_backtrack_protect(struct crng_state *crng,
- u8 tmp[CHACHA_BLOCK_SIZE], int used);
-static void process_random_ready_list(void);
-static void _get_random_bytes(void *buf, int nbytes);
+/* Various types of waiters for crng_init->2 transition. */
+static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
+static struct fasync_struct *fasync;
+static DEFINE_SPINLOCK(random_ready_chain_lock);
+static RAW_NOTIFIER_HEAD(random_ready_chain);
+/* Control how we warn userspace. */
static struct ratelimit_state unseeded_warning =
RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
static struct ratelimit_state urandom_warning =
RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
-
static int ratelimit_disable __read_mostly;
-
module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
-/**********************************************************************
- *
- * OS independent entropy store. Here are the functions which handle
- * storing entropy in an entropy pool.
+/*
+ * Returns whether or not the input pool has been seeded and thus guaranteed
+ * to supply cryptographically secure random numbers. This applies to: the
+ * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
+ * ,u64,int,long} family of functions.
*
- **********************************************************************/
-
-static u32 input_pool_data[POOL_WORDS] __latent_entropy;
-
-static struct {
- spinlock_t lock;
- u16 add_ptr;
- u16 input_rotate;
- int entropy_count;
-} input_pool = {
- .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
-};
+ * Returns: true if the input pool has been seeded.
+ * false if the input pool has not been seeded.
+ */
+bool rng_is_initialized(void)
+{
+ return crng_ready();
+}
+EXPORT_SYMBOL(rng_is_initialized);
-static ssize_t extract_entropy(void *buf, size_t nbytes, int min);
-static ssize_t _extract_entropy(void *buf, size_t nbytes);
+/* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
+static void try_to_generate_entropy(void);
-static void crng_reseed(struct crng_state *crng, bool use_input_pool);
+/*
+ * Wait for the input pool to be seeded and thus guaranteed to supply
+ * cryptographically secure random numbers. This applies to: the /dev/urandom
+ * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
+ * family of functions. Using any of these functions without first calling
+ * this function forfeits the guarantee of security.
+ *
+ * Returns: 0 if the input pool has been seeded.
+ * -ERESTARTSYS if the function was interrupted by a signal.
+ */
+int wait_for_random_bytes(void)
+{
+ while (!crng_ready()) {
+ int ret;
-static const u32 twist_table[8] = {
- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
+ try_to_generate_entropy();
+ ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
+ if (ret)
+ return ret > 0 ? 0 : ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(wait_for_random_bytes);
/*
- * This function adds bytes into the entropy "pool". It does not
- * update the entropy estimate. The caller should call
- * credit_entropy_bits if this is appropriate.
+ * Add a callback function that will be invoked when the input
+ * pool is initialised.
*
- * The pool is stirred with a primitive polynomial of the appropriate
- * degree, and then twisted. We twist by three bits at a time because
- * it's cheap to do so and helps slightly in the expected case where
- * the entropy is concentrated in the low-order bits.
+ * returns: 0 if callback is successfully added
+ * -EALREADY if pool is already initialised (callback not called)
*/
-static void _mix_pool_bytes(const void *in, int nbytes)
+int register_random_ready_notifier(struct notifier_block *nb)
{
- unsigned long i;
- int input_rotate;
- const u8 *bytes = in;
- u32 w;
-
- input_rotate = input_pool.input_rotate;
- i = input_pool.add_ptr;
-
- /* mix one byte at a time to simplify size handling and churn faster */
- while (nbytes--) {
- w = rol32(*bytes++, input_rotate);
- i = (i - 1) & POOL_WORDMASK;
-
- /* XOR in the various taps */
- w ^= input_pool_data[i];
- w ^= input_pool_data[(i + POOL_TAP1) & POOL_WORDMASK];
- w ^= input_pool_data[(i + POOL_TAP2) & POOL_WORDMASK];
- w ^= input_pool_data[(i + POOL_TAP3) & POOL_WORDMASK];
- w ^= input_pool_data[(i + POOL_TAP4) & POOL_WORDMASK];
- w ^= input_pool_data[(i + POOL_TAP5) & POOL_WORDMASK];
-
- /* Mix the result back in with a twist */
- input_pool_data[i] = (w >> 3) ^ twist_table[w & 7];
+ unsigned long flags;
+ int ret = -EALREADY;
- /*
- * Normally, we add 7 bits of rotation to the pool.
- * At the beginning of the pool, add an extra 7 bits
- * rotation, so that successive passes spread the
- * input bits across the pool evenly.
- */
- input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
- }
+ if (crng_ready())
+ return ret;
- input_pool.input_rotate = input_rotate;
- input_pool.add_ptr = i;
+ spin_lock_irqsave(&random_ready_chain_lock, flags);
+ if (!crng_ready())
+ ret = raw_notifier_chain_register(&random_ready_chain, nb);
+ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
+ return ret;
}
-static void __mix_pool_bytes(const void *in, int nbytes)
+/*
+ * Delete a previously registered readiness callback function.
+ */
+int unregister_random_ready_notifier(struct notifier_block *nb)
{
- trace_mix_pool_bytes_nolock(nbytes, _RET_IP_);
- _mix_pool_bytes(in, nbytes);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&random_ready_chain_lock, flags);
+ ret = raw_notifier_chain_unregister(&random_ready_chain, nb);
+ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
+ return ret;
}
-static void mix_pool_bytes(const void *in, int nbytes)
+static void process_random_ready_list(void)
{
unsigned long flags;
- trace_mix_pool_bytes(nbytes, _RET_IP_);
- spin_lock_irqsave(&input_pool.lock, flags);
- _mix_pool_bytes(in, nbytes);
- spin_unlock_irqrestore(&input_pool.lock, flags);
+ spin_lock_irqsave(&random_ready_chain_lock, flags);
+ raw_notifier_call_chain(&random_ready_chain, 0, NULL);
+ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
}
-struct fast_pool {
- u32 pool[4];
- unsigned long last;
- u16 reg_idx;
- u8 count;
-};
+#define warn_unseeded_randomness(previous) \
+ _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
-/*
- * This is a fast mixing routine used by the interrupt randomness
- * collector. It's hardcoded for an 128 bit pool and assumes that any
- * locks that might be needed are taken by the caller.
- */
-static void fast_mix(struct fast_pool *f)
+static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
{
- u32 a = f->pool[0], b = f->pool[1];
- u32 c = f->pool[2], d = f->pool[3];
+#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
+ const bool print_once = false;
+#else
+ static bool print_once __read_mostly;
+#endif
- a += b; c += d;
- b = rol32(b, 6); d = rol32(d, 27);
- d ^= a; b ^= c;
+ if (print_once || crng_ready() ||
+ (previous && (caller == READ_ONCE(*previous))))
+ return;
+ WRITE_ONCE(*previous, caller);
+#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
+ print_once = true;
+#endif
+ if (__ratelimit(&unseeded_warning))
+ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
+ func_name, caller, crng_init);
+}
- a += b; c += d;
- b = rol32(b, 16); d = rol32(d, 14);
- d ^= a; b ^= c;
- a += b; c += d;
- b = rol32(b, 6); d = rol32(d, 27);
- d ^= a; b ^= c;
+/*********************************************************************
+ *
+ * Fast key erasure RNG, the "crng".
+ *
+ * These functions expand entropy from the entropy extractor into
+ * long streams for external consumption using the "fast key erasure"
+ * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
+ *
+ * There are a few exported interfaces for use by other drivers:
+ *
+ * void get_random_bytes(void *buf, size_t nbytes)
+ * u32 get_random_u32()
+ * u64 get_random_u64()
+ * unsigned int get_random_int()
+ * unsigned long get_random_long()
+ *
+ * These interfaces will return the requested number of random bytes
+ * into the given buffer or as a return value. This is equivalent to
+ * a read from /dev/urandom. The integer family of functions may be
+ * higher performance for one-off random integers, because they do a
+ * bit of buffering.
+ *
+ *********************************************************************/
- a += b; c += d;
- b = rol32(b, 16); d = rol32(d, 14);
- d ^= a; b ^= c;
+enum {
+ CRNG_RESEED_INTERVAL = 300 * HZ,
+ CRNG_INIT_CNT_THRESH = 2 * CHACHA_KEY_SIZE
+};
- f->pool[0] = a; f->pool[1] = b;
- f->pool[2] = c; f->pool[3] = d;
- f->count++;
-}
+static struct {
+ u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
+ unsigned long birth;
+ unsigned long generation;
+ spinlock_t lock;
+} base_crng = {
+ .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
+};
-static void process_random_ready_list(void)
+struct crng {
+ u8 key[CHACHA_KEY_SIZE];
+ unsigned long generation;
+ local_lock_t lock;
+};
+
+static DEFINE_PER_CPU(struct crng, crngs) = {
+ .generation = ULONG_MAX,
+ .lock = INIT_LOCAL_LOCK(crngs.lock),
+};
+
+/* Used by crng_reseed() to extract a new seed from the input pool. */
+static bool drain_entropy(void *buf, size_t nbytes, bool force);
+
+/*
+ * This extracts a new crng key from the input pool, but only if there is a
+ * sufficient amount of entropy available or force is true, in order to
+ * mitigate bruteforcing of newly added bits.
+ */
+static void crng_reseed(bool force)
{
unsigned long flags;
- struct random_ready_callback *rdy, *tmp;
+ unsigned long next_gen;
+ u8 key[CHACHA_KEY_SIZE];
+ bool finalize_init = false;
- spin_lock_irqsave(&random_ready_list_lock, flags);
- list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
- struct module *owner = rdy->owner;
+ /* Only reseed if we can, to prevent brute forcing a small amount of new bits. */
+ if (!drain_entropy(key, sizeof(key), force))
+ return;
- list_del_init(&rdy->list);
- rdy->func(rdy);
- module_put(owner);
+ /*
+ * We copy the new key into the base_crng, overwriting the old one,
+ * and update the generation counter. We avoid hitting ULONG_MAX,
+ * because the per-cpu crngs are initialized to ULONG_MAX, so this
+ * forces new CPUs that come online to always initialize.
+ */
+ spin_lock_irqsave(&base_crng.lock, flags);
+ memcpy(base_crng.key, key, sizeof(base_crng.key));
+ next_gen = base_crng.generation + 1;
+ if (next_gen == ULONG_MAX)
+ ++next_gen;
+ WRITE_ONCE(base_crng.generation, next_gen);
+ WRITE_ONCE(base_crng.birth, jiffies);
+ if (!crng_ready()) {
+ crng_init = 2;
+ finalize_init = true;
+ }
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+ memzero_explicit(key, sizeof(key));
+ if (finalize_init) {
+ process_random_ready_list();
+ wake_up_interruptible(&crng_init_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+ pr_notice("crng init done\n");
+ if (unseeded_warning.missed) {
+ pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
+ unseeded_warning.missed);
+ unseeded_warning.missed = 0;
+ }
+ if (urandom_warning.missed) {
+ pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
+ urandom_warning.missed);
+ urandom_warning.missed = 0;
+ }
}
- spin_unlock_irqrestore(&random_ready_list_lock, flags);
}
/*
- * Credit (or debit) the entropy store with n bits of entropy.
- * Use credit_entropy_bits_safe() if the value comes from userspace
- * or otherwise should be checked for extreme values.
+ * This generates a ChaCha block using the provided key, and then
+ * immediately overwites that key with half the block. It returns
+ * the resultant ChaCha state to the user, along with the second
+ * half of the block containing 32 bytes of random data that may
+ * be used; random_data_len may not be greater than 32.
*/
-static void credit_entropy_bits(int nbits)
+static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
+ u32 chacha_state[CHACHA_STATE_WORDS],
+ u8 *random_data, size_t random_data_len)
{
- int entropy_count, entropy_bits, orig;
- int nfrac = nbits << POOL_ENTROPY_SHIFT;
-
- /* Ensure that the multiplication can avoid being 64 bits wide. */
- BUILD_BUG_ON(2 * (POOL_ENTROPY_SHIFT + POOL_BITSHIFT) > 31);
-
- if (!nbits)
- return;
+ u8 first_block[CHACHA_BLOCK_SIZE];
-retry:
- entropy_count = orig = READ_ONCE(input_pool.entropy_count);
- if (nfrac < 0) {
- /* Debit */
- entropy_count += nfrac;
- } else {
- /*
- * Credit: we have to account for the possibility of
- * overwriting already present entropy. Even in the
- * ideal case of pure Shannon entropy, new contributions
- * approach the full value asymptotically:
- *
- * entropy <- entropy + (pool_size - entropy) *
- * (1 - exp(-add_entropy/pool_size))
- *
- * For add_entropy <= pool_size/2 then
- * (1 - exp(-add_entropy/pool_size)) >=
- * (add_entropy/pool_size)*0.7869...
- * so we can approximate the exponential with
- * 3/4*add_entropy/pool_size and still be on the
- * safe side by adding at most pool_size/2 at a time.
- *
- * The use of pool_size-2 in the while statement is to
- * prevent rounding artifacts from making the loop
- * arbitrarily long; this limits the loop to log2(pool_size)*2
- * turns no matter how large nbits is.
- */
- int pnfrac = nfrac;
- const int s = POOL_BITSHIFT + POOL_ENTROPY_SHIFT + 2;
- /* The +2 corresponds to the /4 in the denominator */
-
- do {
- unsigned int anfrac = min(pnfrac, POOL_FRACBITS / 2);
- unsigned int add =
- ((POOL_FRACBITS - entropy_count) * anfrac * 3) >> s;
-
- entropy_count += add;
- pnfrac -= anfrac;
- } while (unlikely(entropy_count < POOL_FRACBITS - 2 && pnfrac));
- }
+ BUG_ON(random_data_len > 32);
- if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy/overflow: count %d\n", entropy_count);
- entropy_count = 0;
- } else if (entropy_count > POOL_FRACBITS)
- entropy_count = POOL_FRACBITS;
- if (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig)
- goto retry;
+ chacha_init_consts(chacha_state);
+ memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
+ memset(&chacha_state[12], 0, sizeof(u32) * 4);
+ chacha20_block(chacha_state, first_block);
- trace_credit_entropy_bits(nbits, entropy_count >> POOL_ENTROPY_SHIFT, _RET_IP_);
+ memcpy(key, first_block, CHACHA_KEY_SIZE);
+ memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
+ memzero_explicit(first_block, sizeof(first_block));
+}
- entropy_bits = entropy_count >> POOL_ENTROPY_SHIFT;
- if (crng_init < 2 && entropy_bits >= 128)
- crng_reseed(&primary_crng, true);
+/*
+ * Return whether the crng seed is considered to be sufficiently
+ * old that a reseeding might be attempted. This happens if the last
+ * reseeding was CRNG_RESEED_INTERVAL ago, or during early boot, at
+ * an interval proportional to the uptime.
+ */
+static bool crng_has_old_seed(void)
+{
+ static bool early_boot = true;
+ unsigned long interval = CRNG_RESEED_INTERVAL;
+
+ if (unlikely(READ_ONCE(early_boot))) {
+ time64_t uptime = ktime_get_seconds();
+ if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
+ WRITE_ONCE(early_boot, false);
+ else
+ interval = max_t(unsigned int, 5 * HZ,
+ (unsigned int)uptime / 2 * HZ);
+ }
+ return time_after(jiffies, READ_ONCE(base_crng.birth) + interval);
}
-static int credit_entropy_bits_safe(int nbits)
+/*
+ * This function returns a ChaCha state that you may use for generating
+ * random data. It also returns up to 32 bytes on its own of random data
+ * that may be used; random_data_len may not be greater than 32.
+ */
+static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
+ u8 *random_data, size_t random_data_len)
{
- if (nbits < 0)
- return -EINVAL;
+ unsigned long flags;
+ struct crng *crng;
- /* Cap the value to avoid overflows */
- nbits = min(nbits, POOL_BITS);
+ BUG_ON(random_data_len > 32);
- credit_entropy_bits(nbits);
- return 0;
-}
+ /*
+ * For the fast path, we check whether we're ready, unlocked first, and
+ * then re-check once locked later. In the case where we're really not
+ * ready, we do fast key erasure with the base_crng directly, because
+ * this is what crng_pre_init_inject() mutates during early init.
+ */
+ if (!crng_ready()) {
+ bool ready;
+
+ spin_lock_irqsave(&base_crng.lock, flags);
+ ready = crng_ready();
+ if (!ready)
+ crng_fast_key_erasure(base_crng.key, chacha_state,
+ random_data, random_data_len);
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+ if (!ready)
+ return;
+ }
-/*********************************************************************
- *
- * CRNG using CHACHA20
- *
- *********************************************************************/
+ /*
+ * If the base_crng is old enough, we try to reseed, which in turn
+ * bumps the generation counter that we check below.
+ */
+ if (unlikely(crng_has_old_seed()))
+ crng_reseed(false);
-#define CRNG_RESEED_INTERVAL (300 * HZ)
+ local_lock_irqsave(&crngs.lock, flags);
+ crng = raw_cpu_ptr(&crngs);
-static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
+ /*
+ * If our per-cpu crng is older than the base_crng, then it means
+ * somebody reseeded the base_crng. In that case, we do fast key
+ * erasure on the base_crng, and use its output as the new key
+ * for our per-cpu crng. This brings us up to date with base_crng.
+ */
+ if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
+ spin_lock(&base_crng.lock);
+ crng_fast_key_erasure(base_crng.key, chacha_state,
+ crng->key, sizeof(crng->key));
+ crng->generation = base_crng.generation;
+ spin_unlock(&base_crng.lock);
+ }
+
+ /*
+ * Finally, when we've made it this far, our per-cpu crng has an up
+ * to date key, and we can do fast key erasure with it to produce
+ * some random data and a ChaCha state for the caller. All other
+ * branches of this function are "unlikely", so most of the time we
+ * should wind up here immediately.
+ */
+ crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
+ local_unlock_irqrestore(&crngs.lock, flags);
+}
/*
- * Hack to deal with crazy userspace progams when they are all trying
- * to access /dev/urandom in parallel. The programs are almost
- * certainly doing something terribly wrong, but we'll work around
- * their brain damage.
+ * This function is for crng_init == 0 only. It loads entropy directly
+ * into the crng's key, without going through the input pool. It is,
+ * generally speaking, not very safe, but we use this only at early
+ * boot time when it's better to have something there rather than
+ * nothing.
+ *
+ * If account is set, then the crng_init_cnt counter is incremented.
+ * This shouldn't be set by functions like add_device_randomness(),
+ * where we can't trust the buffer passed to it is guaranteed to be
+ * unpredictable (so it might not have any entropy at all).
+ *
+ * Returns the number of bytes processed from input, which is bounded
+ * by CRNG_INIT_CNT_THRESH if account is true.
*/
-static struct crng_state **crng_node_pool __read_mostly;
+static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
+{
+ static int crng_init_cnt = 0;
+ struct blake2s_state hash;
+ unsigned long flags;
-static void invalidate_batched_entropy(void);
-static void numa_crng_init(void);
+ blake2s_init(&hash, sizeof(base_crng.key));
-static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
-static int __init parse_trust_cpu(char *arg)
-{
- return kstrtobool(arg, &trust_cpu);
-}
-early_param("random.trust_cpu", parse_trust_cpu);
+ spin_lock_irqsave(&base_crng.lock, flags);
+ if (crng_init != 0) {
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+ return 0;
+ }
-static bool crng_init_try_arch(struct crng_state *crng)
-{
- int i;
- bool arch_init = true;
- unsigned long rv;
+ if (account)
+ len = min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
- for (i = 4; i < 16; i++) {
- if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv)) {
- rv = random_get_entropy();
- arch_init = false;
+ blake2s_update(&hash, base_crng.key, sizeof(base_crng.key));
+ blake2s_update(&hash, input, len);
+ blake2s_final(&hash, base_crng.key);
+
+ if (account) {
+ crng_init_cnt += len;
+ if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
+ ++base_crng.generation;
+ crng_init = 1;
}
- crng->state[i] ^= rv;
}
- return arch_init;
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+
+ if (crng_init == 1)
+ pr_notice("fast init done\n");
+
+ return len;
}
-static bool __init crng_init_try_arch_early(void)
+static void _get_random_bytes(void *buf, size_t nbytes)
{
- int i;
- bool arch_init = true;
- unsigned long rv;
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u8 tmp[CHACHA_BLOCK_SIZE];
+ size_t len;
- for (i = 4; i < 16; i++) {
- if (!arch_get_random_seed_long_early(&rv) &&
- !arch_get_random_long_early(&rv)) {
- rv = random_get_entropy();
- arch_init = false;
+ if (!nbytes)
+ return;
+
+ len = min_t(size_t, 32, nbytes);
+ crng_make_state(chacha_state, buf, len);
+ nbytes -= len;
+ buf += len;
+
+ while (nbytes) {
+ if (nbytes < CHACHA_BLOCK_SIZE) {
+ chacha20_block(chacha_state, tmp);
+ memcpy(buf, tmp, nbytes);
+ memzero_explicit(tmp, sizeof(tmp));
+ break;
}
- primary_crng.state[i] ^= rv;
+
+ chacha20_block(chacha_state, buf);
+ if (unlikely(chacha_state[12] == 0))
+ ++chacha_state[13];
+ nbytes -= CHACHA_BLOCK_SIZE;
+ buf += CHACHA_BLOCK_SIZE;
}
- return arch_init;
+ memzero_explicit(chacha_state, sizeof(chacha_state));
}
-static void crng_initialize_secondary(struct crng_state *crng)
+/*
+ * This function is the exported kernel interface. It returns some
+ * number of good random numbers, suitable for key generation, seeding
+ * TCP sequence numbers, etc. It does not rely on the hardware random
+ * number generator. For random bytes direct from the hardware RNG
+ * (when available), use get_random_bytes_arch(). In order to ensure
+ * that the randomness provided by this function is okay, the function
+ * wait_for_random_bytes() should be called and return 0 at least once
+ * at any point prior.
+ */
+void get_random_bytes(void *buf, size_t nbytes)
{
- chacha_init_consts(crng->state);
- _get_random_bytes(&crng->state[4], sizeof(u32) * 12);
- crng_init_try_arch(crng);
- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
-}
+ static void *previous;
-static void __init crng_initialize_primary(void)
-{
- _extract_entropy(&primary_crng.state[4], sizeof(u32) * 12);
- if (crng_init_try_arch_early() && trust_cpu && crng_init < 2) {
- invalidate_batched_entropy();
- numa_crng_init();
- crng_init = 2;
- pr_notice("crng init done (trusting CPU's manufacturer)\n");
- }
- primary_crng.init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
+ warn_unseeded_randomness(&previous);
+ _get_random_bytes(buf, nbytes);
}
+EXPORT_SYMBOL(get_random_bytes);
-static void crng_finalize_init(void)
+static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
{
- if (!system_wq) {
- /* We can't call numa_crng_init until we have workqueues,
- * so mark this for processing later. */
- crng_need_final_init = true;
- return;
- }
+ bool large_request = nbytes > 256;
+ ssize_t ret = 0;
+ size_t len;
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u8 output[CHACHA_BLOCK_SIZE];
- invalidate_batched_entropy();
- numa_crng_init();
- crng_init = 2;
- crng_need_final_init = false;
- process_random_ready_list();
- wake_up_interruptible(&crng_init_wait);
- kill_fasync(&fasync, SIGIO, POLL_IN);
- pr_notice("crng init done\n");
- if (unseeded_warning.missed) {
- pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
- unseeded_warning.missed);
- unseeded_warning.missed = 0;
- }
- if (urandom_warning.missed) {
- pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
- urandom_warning.missed);
- urandom_warning.missed = 0;
- }
-}
+ if (!nbytes)
+ return 0;
-static void do_numa_crng_init(struct work_struct *work)
-{
- int i;
- struct crng_state *crng;
- struct crng_state **pool;
-
- pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL | __GFP_NOFAIL);
- for_each_online_node(i) {
- crng = kmalloc_node(sizeof(struct crng_state),
- GFP_KERNEL | __GFP_NOFAIL, i);
- spin_lock_init(&crng->lock);
- crng_initialize_secondary(crng);
- pool[i] = crng;
- }
- /* pairs with READ_ONCE() in select_crng() */
- if (cmpxchg_release(&crng_node_pool, NULL, pool) != NULL) {
- for_each_node(i)
- kfree(pool[i]);
- kfree(pool);
- }
-}
+ len = min_t(size_t, 32, nbytes);
+ crng_make_state(chacha_state, output, len);
-static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
+ if (copy_to_user(buf, output, len))
+ return -EFAULT;
+ nbytes -= len;
+ buf += len;
+ ret += len;
-static void numa_crng_init(void)
-{
- if (IS_ENABLED(CONFIG_NUMA))
- schedule_work(&numa_crng_init_work);
-}
+ while (nbytes) {
+ if (large_request && need_resched()) {
+ if (signal_pending(current))
+ break;
+ schedule();
+ }
-static struct crng_state *select_crng(void)
-{
- if (IS_ENABLED(CONFIG_NUMA)) {
- struct crng_state **pool;
- int nid = numa_node_id();
-
- /* pairs with cmpxchg_release() in do_numa_crng_init() */
- pool = READ_ONCE(crng_node_pool);
- if (pool && pool[nid])
- return pool[nid];
+ chacha20_block(chacha_state, output);
+ if (unlikely(chacha_state[12] == 0))
+ ++chacha_state[13];
+
+ len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
+ if (copy_to_user(buf, output, len)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ nbytes -= len;
+ buf += len;
+ ret += len;
}
- return &primary_crng;
+ memzero_explicit(chacha_state, sizeof(chacha_state));
+ memzero_explicit(output, sizeof(output));
+ return ret;
}
/*
- * crng_fast_load() can be called by code in the interrupt service
- * path. So we can't afford to dilly-dally. Returns the number of
- * bytes processed from cp.
+ * Batched entropy returns random integers. The quality of the random
+ * number is good as /dev/urandom. In order to ensure that the randomness
+ * provided by this function is okay, the function wait_for_random_bytes()
+ * should be called and return 0 at least once at any point prior.
*/
-static size_t crng_fast_load(const u8 *cp, size_t len)
+struct batched_entropy {
+ union {
+ /*
+ * We make this 1.5x a ChaCha block, so that we get the
+ * remaining 32 bytes from fast key erasure, plus one full
+ * block from the detached ChaCha state. We can increase
+ * the size of this later if needed so long as we keep the
+ * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.
+ */
+ u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))];
+ u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
+ };
+ local_lock_t lock;
+ unsigned long generation;
+ unsigned int position;
+};
+
+
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
+ .lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock),
+ .position = UINT_MAX
+};
+
+u64 get_random_u64(void)
{
+ u64 ret;
unsigned long flags;
- u8 *p;
- size_t ret = 0;
+ struct batched_entropy *batch;
+ static void *previous;
+ unsigned long next_gen;
- if (!spin_trylock_irqsave(&primary_crng.lock, flags))
- return 0;
- if (crng_init != 0) {
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- return 0;
- }
- p = (u8 *)&primary_crng.state[4];
- while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
- p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
- cp++; crng_init_cnt++; len--; ret++;
- }
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
- invalidate_batched_entropy();
- crng_init = 1;
- pr_notice("fast init done\n");
+ warn_unseeded_randomness(&previous);
+
+ local_lock_irqsave(&batched_entropy_u64.lock, flags);
+ batch = raw_cpu_ptr(&batched_entropy_u64);
+
+ next_gen = READ_ONCE(base_crng.generation);
+ if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
+ next_gen != batch->generation) {
+ _get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
+ batch->position = 0;
+ batch->generation = next_gen;
}
+
+ ret = batch->entropy_u64[batch->position];
+ batch->entropy_u64[batch->position] = 0;
+ ++batch->position;
+ local_unlock_irqrestore(&batched_entropy_u64.lock, flags);
return ret;
}
+EXPORT_SYMBOL(get_random_u64);
-/*
- * crng_slow_load() is called by add_device_randomness, which has two
- * attributes. (1) We can't trust the buffer passed to it is
- * guaranteed to be unpredictable (so it might not have any entropy at
- * all), and (2) it doesn't have the performance constraints of
- * crng_fast_load().
- *
- * So we do something more comprehensive which is guaranteed to touch
- * all of the primary_crng's state, and which uses a LFSR with a
- * period of 255 as part of the mixing algorithm. Finally, we do
- * *not* advance crng_init_cnt since buffer we may get may be something
- * like a fixed DMI table (for example), which might very well be
- * unique to the machine, but is otherwise unvarying.
- */
-static int crng_slow_load(const u8 *cp, size_t len)
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
+ .lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock),
+ .position = UINT_MAX
+};
+
+u32 get_random_u32(void)
{
+ u32 ret;
unsigned long flags;
- static u8 lfsr = 1;
- u8 tmp;
- unsigned int i, max = CHACHA_KEY_SIZE;
- const u8 *src_buf = cp;
- u8 *dest_buf = (u8 *)&primary_crng.state[4];
+ struct batched_entropy *batch;
+ static void *previous;
+ unsigned long next_gen;
- if (!spin_trylock_irqsave(&primary_crng.lock, flags))
- return 0;
- if (crng_init != 0) {
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- return 0;
- }
- if (len > max)
- max = len;
-
- for (i = 0; i < max; i++) {
- tmp = lfsr;
- lfsr >>= 1;
- if (tmp & 1)
- lfsr ^= 0xE1;
- tmp = dest_buf[i % CHACHA_KEY_SIZE];
- dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
- lfsr += (tmp << 3) | (tmp >> 5);
+ warn_unseeded_randomness(&previous);
+
+ local_lock_irqsave(&batched_entropy_u32.lock, flags);
+ batch = raw_cpu_ptr(&batched_entropy_u32);
+
+ next_gen = READ_ONCE(base_crng.generation);
+ if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
+ next_gen != batch->generation) {
+ _get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
+ batch->position = 0;
+ batch->generation = next_gen;
}
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- return 1;
+
+ ret = batch->entropy_u32[batch->position];
+ batch->entropy_u32[batch->position] = 0;
+ ++batch->position;
+ local_unlock_irqrestore(&batched_entropy_u32.lock, flags);
+ return ret;
}
+EXPORT_SYMBOL(get_random_u32);
-static void crng_reseed(struct crng_state *crng, bool use_input_pool)
+#ifdef CONFIG_SMP
+/*
+ * This function is called when the CPU is coming up, with entry
+ * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
+ */
+int random_prepare_cpu(unsigned int cpu)
{
- unsigned long flags;
- int i, num;
- union {
- u8 block[CHACHA_BLOCK_SIZE];
- u32 key[8];
- } buf;
+ /*
+ * When the cpu comes back online, immediately invalidate both
+ * the per-cpu crng and all batches, so that we serve fresh
+ * randomness.
+ */
+ per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
+ per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
+ per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
+ return 0;
+}
+#endif
- if (use_input_pool) {
- num = extract_entropy(&buf, 32, 16);
- if (num == 0)
- return;
- } else {
- _extract_crng(&primary_crng, buf.block);
- _crng_backtrack_protect(&primary_crng, buf.block,
- CHACHA_KEY_SIZE);
- }
- spin_lock_irqsave(&crng->lock, flags);
- for (i = 0; i < 8; i++) {
- unsigned long rv;
- if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv))
- rv = random_get_entropy();
- crng->state[i + 4] ^= buf.key[i] ^ rv;
+/**
+ * randomize_page - Generate a random, page aligned address
+ * @start: The smallest acceptable address the caller will take.
+ * @range: The size of the area, starting at @start, within which the
+ * random address must fall.
+ *
+ * If @start + @range would overflow, @range is capped.
+ *
+ * NOTE: Historical use of randomize_range, which this replaces, presumed that
+ * @start was already page aligned. We now align it regardless.
+ *
+ * Return: A page aligned address within [start, start + range). On error,
+ * @start is returned.
+ */
+unsigned long randomize_page(unsigned long start, unsigned long range)
+{
+ if (!PAGE_ALIGNED(start)) {
+ range -= PAGE_ALIGN(start) - start;
+ start = PAGE_ALIGN(start);
}
- memzero_explicit(&buf, sizeof(buf));
- WRITE_ONCE(crng->init_time, jiffies);
- spin_unlock_irqrestore(&crng->lock, flags);
- if (crng == &primary_crng && crng_init < 2)
- crng_finalize_init();
+
+ if (start > ULONG_MAX - range)
+ range = ULONG_MAX - start;
+
+ range >>= PAGE_SHIFT;
+
+ if (range == 0)
+ return start;
+
+ return start + (get_random_long() % range << PAGE_SHIFT);
}
-static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE])
+/*
+ * This function will use the architecture-specific hardware random
+ * number generator if it is available. It is not recommended for
+ * use. Use get_random_bytes() instead. It returns the number of
+ * bytes filled in.
+ */
+size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes)
{
- unsigned long flags, init_time;
+ size_t left = nbytes;
+ u8 *p = buf;
- if (crng_ready()) {
- init_time = READ_ONCE(crng->init_time);
- if (time_after(READ_ONCE(crng_global_init_time), init_time) ||
- time_after(jiffies, init_time + CRNG_RESEED_INTERVAL))
- crng_reseed(crng, crng == &primary_crng);
+ while (left) {
+ unsigned long v;
+ size_t chunk = min_t(size_t, left, sizeof(unsigned long));
+
+ if (!arch_get_random_long(&v))
+ break;
+
+ memcpy(p, &v, chunk);
+ p += chunk;
+ left -= chunk;
}
- spin_lock_irqsave(&crng->lock, flags);
- chacha20_block(&crng->state[0], out);
- if (crng->state[12] == 0)
- crng->state[13]++;
- spin_unlock_irqrestore(&crng->lock, flags);
+
+ return nbytes - left;
}
+EXPORT_SYMBOL(get_random_bytes_arch);
+
-static void extract_crng(u8 out[CHACHA_BLOCK_SIZE])
+/**********************************************************************
+ *
+ * Entropy accumulation and extraction routines.
+ *
+ * Callers may add entropy via:
+ *
+ * static void mix_pool_bytes(const void *in, size_t nbytes)
+ *
+ * After which, if added entropy should be credited:
+ *
+ * static void credit_entropy_bits(size_t nbits)
+ *
+ * Finally, extract entropy via these two, with the latter one
+ * setting the entropy count to zero and extracting only if there
+ * is POOL_MIN_BITS entropy credited prior or force is true:
+ *
+ * static void extract_entropy(void *buf, size_t nbytes)
+ * static bool drain_entropy(void *buf, size_t nbytes, bool force)
+ *
+ **********************************************************************/
+
+enum {
+ POOL_BITS = BLAKE2S_HASH_SIZE * 8,
+ POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */
+};
+
+/* For notifying userspace should write into /dev/random. */
+static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
+
+static struct {
+ struct blake2s_state hash;
+ spinlock_t lock;
+ unsigned int entropy_count;
+} input_pool = {
+ .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
+ BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
+ BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
+ .hash.outlen = BLAKE2S_HASH_SIZE,
+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
+};
+
+static void _mix_pool_bytes(const void *in, size_t nbytes)
{
- _extract_crng(select_crng(), out);
+ blake2s_update(&input_pool.hash, in, nbytes);
}
/*
- * Use the leftover bytes from the CRNG block output (if there is
- * enough) to mutate the CRNG key to provide backtracking protection.
+ * This function adds bytes into the entropy "pool". It does not
+ * update the entropy estimate. The caller should call
+ * credit_entropy_bits if this is appropriate.
*/
-static void _crng_backtrack_protect(struct crng_state *crng,
- u8 tmp[CHACHA_BLOCK_SIZE], int used)
+static void mix_pool_bytes(const void *in, size_t nbytes)
{
unsigned long flags;
- u32 *s, *d;
- int i;
- used = round_up(used, sizeof(u32));
- if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
- extract_crng(tmp);
- used = 0;
- }
- spin_lock_irqsave(&crng->lock, flags);
- s = (u32 *)&tmp[used];
- d = &crng->state[4];
- for (i = 0; i < 8; i++)
- *d++ ^= *s++;
- spin_unlock_irqrestore(&crng->lock, flags);
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(in, nbytes);
+ spin_unlock_irqrestore(&input_pool.lock, flags);
}
-static void crng_backtrack_protect(u8 tmp[CHACHA_BLOCK_SIZE], int used)
+static void credit_entropy_bits(size_t nbits)
{
- _crng_backtrack_protect(select_crng(), tmp, used);
+ unsigned int entropy_count, orig, add;
+
+ if (!nbits)
+ return;
+
+ add = min_t(size_t, nbits, POOL_BITS);
+
+ do {
+ orig = READ_ONCE(input_pool.entropy_count);
+ entropy_count = min_t(unsigned int, POOL_BITS, orig + add);
+ } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig);
+
+ if (!crng_ready() && entropy_count >= POOL_MIN_BITS)
+ crng_reseed(false);
}
-static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
+/*
+ * This is an HKDF-like construction for using the hashed collected entropy
+ * as a PRF key, that's then expanded block-by-block.
+ */
+static void extract_entropy(void *buf, size_t nbytes)
{
- ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
- u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
- int large_request = (nbytes > 256);
+ unsigned long flags;
+ u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
+ struct {
+ unsigned long rdseed[32 / sizeof(long)];
+ size_t counter;
+ } block;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
+ if (!arch_get_random_seed_long(&block.rdseed[i]) &&
+ !arch_get_random_long(&block.rdseed[i]))
+ block.rdseed[i] = random_get_entropy();
+ }
- while (nbytes) {
- if (large_request && need_resched()) {
- if (signal_pending(current)) {
- if (ret == 0)
- ret = -ERESTARTSYS;
- break;
- }
- schedule();
- }
+ spin_lock_irqsave(&input_pool.lock, flags);
- extract_crng(tmp);
- i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
- if (copy_to_user(buf, tmp, i)) {
- ret = -EFAULT;
- break;
- }
+ /* seed = HASHPRF(last_key, entropy_input) */
+ blake2s_final(&input_pool.hash, seed);
+ /* next_key = HASHPRF(seed, RDSEED || 0) */
+ block.counter = 0;
+ blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
+ blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
+
+ spin_unlock_irqrestore(&input_pool.lock, flags);
+ memzero_explicit(next_key, sizeof(next_key));
+
+ while (nbytes) {
+ i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE);
+ /* output = HASHPRF(seed, RDSEED || ++counter) */
+ ++block.counter;
+ blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
nbytes -= i;
buf += i;
- ret += i;
}
- crng_backtrack_protect(tmp, i);
- /* Wipe data just written to memory */
- memzero_explicit(tmp, sizeof(tmp));
+ memzero_explicit(seed, sizeof(seed));
+ memzero_explicit(&block, sizeof(block));
+}
- return ret;
+/*
+ * First we make sure we have POOL_MIN_BITS of entropy in the pool unless force
+ * is true, and then we set the entropy count to zero (but don't actually touch
+ * any data). Only then can we extract a new key with extract_entropy().
+ */
+static bool drain_entropy(void *buf, size_t nbytes, bool force)
+{
+ unsigned int entropy_count;
+ do {
+ entropy_count = READ_ONCE(input_pool.entropy_count);
+ if (!force && entropy_count < POOL_MIN_BITS)
+ return false;
+ } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count);
+ extract_entropy(buf, nbytes);
+ wake_up_interruptible(&random_write_wait);
+ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ return true;
}
-/*********************************************************************
+
+/**********************************************************************
*
- * Entropy input management
+ * Entropy collection routines.
*
- *********************************************************************/
+ * The following exported functions are used for pushing entropy into
+ * the above entropy accumulation routines:
+ *
+ * void add_device_randomness(const void *buf, size_t size);
+ * void add_input_randomness(unsigned int type, unsigned int code,
+ * unsigned int value);
+ * void add_disk_randomness(struct gendisk *disk);
+ * void add_hwgenerator_randomness(const void *buffer, size_t count,
+ * size_t entropy);
+ * void add_bootloader_randomness(const void *buf, size_t size);
+ * void add_vmfork_randomness(const void *unique_vm_id, size_t size);
+ * void add_interrupt_randomness(int irq);
+ *
+ * add_device_randomness() adds data to the input pool that
+ * is likely to differ between two devices (or possibly even per boot).
+ * This would be things like MAC addresses or serial numbers, or the
+ * read-out of the RTC. This does *not* credit any actual entropy to
+ * the pool, but it initializes the pool to different values for devices
+ * that might otherwise be identical and have very little entropy
+ * available to them (particularly common in the embedded world).
+ *
+ * add_input_randomness() uses the input layer interrupt timing, as well
+ * as the event type information from the hardware.
+ *
+ * add_disk_randomness() uses what amounts to the seek time of block
+ * layer request events, on a per-disk_devt basis, as input to the
+ * entropy pool. Note that high-speed solid state drives with very low
+ * seek times do not make for good sources of entropy, as their seek
+ * times are usually fairly consistent.
+ *
+ * The above two routines try to estimate how many bits of entropy
+ * to credit. They do this by keeping track of the first and second
+ * order deltas of the event timings.
+ *
+ * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
+ * entropy as specified by the caller. If the entropy pool is full it will
+ * block until more entropy is needed.
+ *
+ * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or
+ * add_device_randomness(), depending on whether or not the configuration
+ * option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
+ *
+ * add_vmfork_randomness() adds a unique (but not necessarily secret) ID
+ * representing the current instance of a VM to the pool, without crediting,
+ * and then force-reseeds the crng so that it takes effect immediately.
+ *
+ * add_interrupt_randomness() uses the interrupt timing as random
+ * inputs to the entropy pool. Using the cycle counters and the irq source
+ * as inputs, it feeds the input pool roughly once a second or after 64
+ * interrupts, crediting 1 bit of entropy for whichever comes first.
+ *
+ **********************************************************************/
-/* There is one of these per entropy source */
-struct timer_rand_state {
- cycles_t last_time;
- long last_delta, last_delta2;
-};
+static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+static int __init parse_trust_cpu(char *arg)
+{
+ return kstrtobool(arg, &trust_cpu);
+}
+early_param("random.trust_cpu", parse_trust_cpu);
-#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
+/*
+ * The first collection of entropy occurs at system boot while interrupts
+ * are still turned off. Here we push in RDSEED, a timestamp, and utsname().
+ * Depending on the above configuration knob, RDSEED may be considered
+ * sufficient for initialization. Note that much earlier setup may already
+ * have pushed entropy into the input pool by the time we get here.
+ */
+int __init rand_initialize(void)
+{
+ size_t i;
+ ktime_t now = ktime_get_real();
+ bool arch_init = true;
+ unsigned long rv;
+
+ for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) {
+ if (!arch_get_random_seed_long_early(&rv) &&
+ !arch_get_random_long_early(&rv)) {
+ rv = random_get_entropy();
+ arch_init = false;
+ }
+ _mix_pool_bytes(&rv, sizeof(rv));
+ }
+ _mix_pool_bytes(&now, sizeof(now));
+ _mix_pool_bytes(utsname(), sizeof(*(utsname())));
+
+ extract_entropy(base_crng.key, sizeof(base_crng.key));
+ ++base_crng.generation;
+
+ if (arch_init && trust_cpu && !crng_ready()) {
+ crng_init = 2;
+ pr_notice("crng init done (trusting CPU's manufacturer)\n");
+ }
+
+ if (ratelimit_disable) {
+ urandom_warning.interval = 0;
+ unseeded_warning.interval = 0;
+ }
+ return 0;
+}
/*
* Add device- or boot-specific data to the input pool to help
@@ -1091,23 +1002,27 @@ struct timer_rand_state {
* the entropy pool having similar initial state across largely
* identical devices.
*/
-void add_device_randomness(const void *buf, unsigned int size)
+void add_device_randomness(const void *buf, size_t size)
{
- unsigned long time = random_get_entropy() ^ jiffies;
- unsigned long flags;
+ cycles_t cycles = random_get_entropy();
+ unsigned long flags, now = jiffies;
- if (!crng_ready() && size)
- crng_slow_load(buf, size);
+ if (crng_init == 0 && size)
+ crng_pre_init_inject(buf, size, false);
- trace_add_device_randomness(size, _RET_IP_);
spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(&cycles, sizeof(cycles));
+ _mix_pool_bytes(&now, sizeof(now));
_mix_pool_bytes(buf, size);
- _mix_pool_bytes(&time, sizeof(time));
spin_unlock_irqrestore(&input_pool.lock, flags);
}
EXPORT_SYMBOL(add_device_randomness);
-static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
+/* There is one of these per entropy source */
+struct timer_rand_state {
+ unsigned long last_time;
+ long last_delta, last_delta2;
+};
/*
* This function adds entropy to the entropy "pool" by using timing
@@ -1117,29 +1032,26 @@ static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
* The number "num" is also added to the pool - it should somehow describe
* the type of event which just happened. This is currently 0-255 for
* keyboard scan codes, and 256 upwards for interrupts.
- *
*/
-static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
{
- struct {
- long jiffies;
- unsigned int cycles;
- unsigned int num;
- } sample;
+ cycles_t cycles = random_get_entropy();
+ unsigned long flags, now = jiffies;
long delta, delta2, delta3;
- sample.jiffies = jiffies;
- sample.cycles = random_get_entropy();
- sample.num = num;
- mix_pool_bytes(&sample, sizeof(sample));
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(&cycles, sizeof(cycles));
+ _mix_pool_bytes(&now, sizeof(now));
+ _mix_pool_bytes(&num, sizeof(num));
+ spin_unlock_irqrestore(&input_pool.lock, flags);
/*
* Calculate number of bits of randomness we probably added.
* We take into account the first, second and third-order deltas
* in order to make our estimate.
*/
- delta = sample.jiffies - READ_ONCE(state->last_time);
- WRITE_ONCE(state->last_time, sample.jiffies);
+ delta = now - READ_ONCE(state->last_time);
+ WRITE_ONCE(state->last_time, now);
delta2 = delta - READ_ONCE(state->last_delta);
WRITE_ONCE(state->last_delta, delta);
@@ -1163,318 +1075,303 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* Round down by 1 bit on general principles,
* and limit entropy estimate to 12 bits.
*/
- credit_entropy_bits(min_t(int, fls(delta >> 1), 11));
+ credit_entropy_bits(min_t(unsigned int, fls(delta >> 1), 11));
}
void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value)
{
static unsigned char last_value;
+ static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
- /* ignore autorepeat and the like */
+ /* Ignore autorepeat and the like. */
if (value == last_value)
return;
last_value = value;
add_timer_randomness(&input_timer_state,
(type << 4) ^ code ^ (code >> 4) ^ value);
- trace_add_input_randomness(POOL_ENTROPY_BITS());
}
EXPORT_SYMBOL_GPL(add_input_randomness);
-static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
-
-#ifdef ADD_INTERRUPT_BENCH
-static unsigned long avg_cycles, avg_deviation;
-
-#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
-#define FIXED_1_2 (1 << (AVG_SHIFT - 1))
-
-static void add_interrupt_bench(cycles_t start)
+#ifdef CONFIG_BLOCK
+void add_disk_randomness(struct gendisk *disk)
{
- long delta = random_get_entropy() - start;
-
- /* Use a weighted moving average */
- delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
- avg_cycles += delta;
- /* And average deviation */
- delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
- avg_deviation += delta;
+ if (!disk || !disk->random)
+ return;
+ /* First major is 1, so we get >= 0x200 here. */
+ add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
}
-#else
-#define add_interrupt_bench(x)
-#endif
+EXPORT_SYMBOL_GPL(add_disk_randomness);
-static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
+void rand_initialize_disk(struct gendisk *disk)
{
- u32 *ptr = (u32 *)regs;
- unsigned int idx;
+ struct timer_rand_state *state;
- if (regs == NULL)
- return 0;
- idx = READ_ONCE(f->reg_idx);
- if (idx >= sizeof(struct pt_regs) / sizeof(u32))
- idx = 0;
- ptr += idx++;
- WRITE_ONCE(f->reg_idx, idx);
- return *ptr;
+ /*
+ * If kzalloc returns null, we just won't use that entropy
+ * source.
+ */
+ state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
+ if (state) {
+ state->last_time = INITIAL_JIFFIES;
+ disk->random = state;
+ }
}
+#endif
-void add_interrupt_randomness(int irq)
+/*
+ * Interface for in-kernel drivers of true hardware RNGs.
+ * Those devices may produce endless random bits and will be throttled
+ * when our pool is full.
+ */
+void add_hwgenerator_randomness(const void *buffer, size_t count,
+ size_t entropy)
{
- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
- struct pt_regs *regs = get_irq_regs();
- unsigned long now = jiffies;
- cycles_t cycles = random_get_entropy();
- u32 c_high, j_high;
- u64 ip;
-
- if (cycles == 0)
- cycles = get_reg(fast_pool, regs);
- c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
- j_high = (sizeof(now) > 4) ? now >> 32 : 0;
- fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
- fast_pool->pool[1] ^= now ^ c_high;
- ip = regs ? instruction_pointer(regs) : _RET_IP_;
- fast_pool->pool[2] ^= ip;
- fast_pool->pool[3] ^=
- (sizeof(ip) > 4) ? ip >> 32 : get_reg(fast_pool, regs);
-
- fast_mix(fast_pool);
- add_interrupt_bench(cycles);
-
if (unlikely(crng_init == 0)) {
- if ((fast_pool->count >= 64) &&
- crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0) {
- fast_pool->count = 0;
- fast_pool->last = now;
- }
- return;
+ size_t ret = crng_pre_init_inject(buffer, count, true);
+ mix_pool_bytes(buffer, ret);
+ count -= ret;
+ buffer += ret;
+ if (!count || crng_init == 0)
+ return;
}
- if ((fast_pool->count < 64) && !time_after(now, fast_pool->last + HZ))
- return;
+ /*
+ * Throttle writing if we're above the trickle threshold.
+ * We'll be woken up again once below POOL_MIN_BITS, when
+ * the calling thread is about to terminate, or once
+ * CRNG_RESEED_INTERVAL has elapsed.
+ */
+ wait_event_interruptible_timeout(random_write_wait,
+ !system_wq || kthread_should_stop() ||
+ input_pool.entropy_count < POOL_MIN_BITS,
+ CRNG_RESEED_INTERVAL);
+ mix_pool_bytes(buffer, count);
+ credit_entropy_bits(entropy);
+}
+EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
- if (!spin_trylock(&input_pool.lock))
- return;
+/*
+ * Handle random seed passed by bootloader.
+ * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
+ * it would be regarded as device data.
+ * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
+ */
+void add_bootloader_randomness(const void *buf, size_t size)
+{
+ if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
+ add_hwgenerator_randomness(buf, size, size * 8);
+ else
+ add_device_randomness(buf, size);
+}
+EXPORT_SYMBOL_GPL(add_bootloader_randomness);
- fast_pool->last = now;
- __mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool));
- spin_unlock(&input_pool.lock);
+#if IS_ENABLED(CONFIG_VMGENID)
+static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
- fast_pool->count = 0;
+/*
+ * Handle a new unique VM ID, which is unique, not secret, so we
+ * don't credit it, but we do immediately force a reseed after so
+ * that it's used by the crng posthaste.
+ */
+void add_vmfork_randomness(const void *unique_vm_id, size_t size)
+{
+ add_device_randomness(unique_vm_id, size);
+ if (crng_ready()) {
+ crng_reseed(true);
+ pr_notice("crng reseeded due to virtual machine fork\n");
+ }
+ blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
+}
+#if IS_MODULE(CONFIG_VMGENID)
+EXPORT_SYMBOL_GPL(add_vmfork_randomness);
+#endif
- /* award one bit for the contents of the fast pool */
- credit_entropy_bits(1);
+int register_random_vmfork_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&vmfork_chain, nb);
}
-EXPORT_SYMBOL_GPL(add_interrupt_randomness);
+EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
-#ifdef CONFIG_BLOCK
-void add_disk_randomness(struct gendisk *disk)
+int unregister_random_vmfork_notifier(struct notifier_block *nb)
{
- if (!disk || !disk->random)
- return;
- /* first major is 1, so we get >= 0x200 here */
- add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
- trace_add_disk_randomness(disk_devt(disk), POOL_ENTROPY_BITS());
+ return blocking_notifier_chain_unregister(&vmfork_chain, nb);
}
-EXPORT_SYMBOL_GPL(add_disk_randomness);
+EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
#endif
-/*********************************************************************
- *
- * Entropy extraction routines
- *
- *********************************************************************/
+struct fast_pool {
+ struct work_struct mix;
+ unsigned long pool[4];
+ unsigned long last;
+ unsigned int count;
+ u16 reg_idx;
+};
+
+static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
+#ifdef CONFIG_64BIT
+ /* SipHash constants */
+ .pool = { 0x736f6d6570736575UL, 0x646f72616e646f6dUL,
+ 0x6c7967656e657261UL, 0x7465646279746573UL }
+#else
+ /* HalfSipHash constants */
+ .pool = { 0, 0, 0x6c796765U, 0x74656462U }
+#endif
+};
/*
- * This function decides how many bytes to actually take from the
- * given pool, and also debits the entropy count accordingly.
+ * This is [Half]SipHash-1-x, starting from an empty key. Because
+ * the key is fixed, it assumes that its inputs are non-malicious,
+ * and therefore this has no security on its own. s represents the
+ * 128 or 256-bit SipHash state, while v represents a 128-bit input.
*/
-static size_t account(size_t nbytes, int min)
+static void fast_mix(unsigned long s[4], const unsigned long *v)
{
- int entropy_count, orig;
- size_t ibytes, nfrac;
-
- BUG_ON(input_pool.entropy_count > POOL_FRACBITS);
-
- /* Can we pull enough? */
-retry:
- entropy_count = orig = READ_ONCE(input_pool.entropy_count);
- if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy count: count %d\n", entropy_count);
- entropy_count = 0;
- }
-
- /* never pull more than available */
- ibytes = min_t(size_t, nbytes, entropy_count >> (POOL_ENTROPY_SHIFT + 3));
- if (ibytes < min)
- ibytes = 0;
- nfrac = ibytes << (POOL_ENTROPY_SHIFT + 3);
- if ((size_t)entropy_count > nfrac)
- entropy_count -= nfrac;
- else
- entropy_count = 0;
+ size_t i;
- if (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig)
- goto retry;
-
- trace_debit_entropy(8 * ibytes);
- if (ibytes && POOL_ENTROPY_BITS() < random_write_wakeup_bits) {
- wake_up_interruptible(&random_write_wait);
- kill_fasync(&fasync, SIGIO, POLL_OUT);
+ for (i = 0; i < 16 / sizeof(long); ++i) {
+ s[3] ^= v[i];
+#ifdef CONFIG_64BIT
+ s[0] += s[1]; s[1] = rol64(s[1], 13); s[1] ^= s[0]; s[0] = rol64(s[0], 32);
+ s[2] += s[3]; s[3] = rol64(s[3], 16); s[3] ^= s[2];
+ s[0] += s[3]; s[3] = rol64(s[3], 21); s[3] ^= s[0];
+ s[2] += s[1]; s[1] = rol64(s[1], 17); s[1] ^= s[2]; s[2] = rol64(s[2], 32);
+#else
+ s[0] += s[1]; s[1] = rol32(s[1], 5); s[1] ^= s[0]; s[0] = rol32(s[0], 16);
+ s[2] += s[3]; s[3] = rol32(s[3], 8); s[3] ^= s[2];
+ s[0] += s[3]; s[3] = rol32(s[3], 7); s[3] ^= s[0];
+ s[2] += s[1]; s[1] = rol32(s[1], 13); s[1] ^= s[2]; s[2] = rol32(s[2], 16);
+#endif
+ s[0] ^= v[i];
}
-
- return ibytes;
}
+#ifdef CONFIG_SMP
/*
- * This function does the actual extraction for extract_entropy.
- *
- * Note: we assume that .poolwords is a multiple of 16 words.
+ * This function is called when the CPU has just come online, with
+ * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
*/
-static void extract_buf(u8 *out)
+int random_online_cpu(unsigned int cpu)
{
- struct blake2s_state state __aligned(__alignof__(unsigned long));
- u8 hash[BLAKE2S_HASH_SIZE];
- unsigned long *salt;
- unsigned long flags;
-
- blake2s_init(&state, sizeof(hash));
-
/*
- * If we have an architectural hardware random number
- * generator, use it for BLAKE2's salt & personal fields.
+ * During CPU shutdown and before CPU onlining, add_interrupt_
+ * randomness() may schedule mix_interrupt_randomness(), and
+ * set the MIX_INFLIGHT flag. However, because the worker can
+ * be scheduled on a different CPU during this period, that
+ * flag will never be cleared. For that reason, we zero out
+ * the flag here, which runs just after workqueues are onlined
+ * for the CPU again. This also has the effect of setting the
+ * irq randomness count to zero so that new accumulated irqs
+ * are fresh.
*/
- for (salt = (unsigned long *)&state.h[4];
- salt < (unsigned long *)&state.h[8]; ++salt) {
- unsigned long v;
- if (!arch_get_random_long(&v))
- break;
- *salt ^= v;
- }
-
- /* Generate a hash across the pool */
- spin_lock_irqsave(&input_pool.lock, flags);
- blake2s_update(&state, (const u8 *)input_pool_data, POOL_BYTES);
- blake2s_final(&state, hash); /* final zeros out state */
+ per_cpu_ptr(&irq_randomness, cpu)->count = 0;
+ return 0;
+}
+#endif
- /*
- * We mix the hash back into the pool to prevent backtracking
- * attacks (where the attacker knows the state of the pool
- * plus the current outputs, and attempts to find previous
- * outputs), unless the hash function can be inverted. By
- * mixing at least a hash worth of hash data back, we make
- * brute-forcing the feedback as hard as brute-forcing the
- * hash.
- */
- __mix_pool_bytes(hash, sizeof(hash));
- spin_unlock_irqrestore(&input_pool.lock, flags);
+static unsigned long get_reg(struct fast_pool *f, struct pt_regs *regs)
+{
+ unsigned long *ptr = (unsigned long *)regs;
+ unsigned int idx;
- /* Note that EXTRACT_SIZE is half of hash size here, because above
- * we've dumped the full length back into mixer. By reducing the
- * amount that we emit, we retain a level of forward secrecy.
- */
- memcpy(out, hash, EXTRACT_SIZE);
- memzero_explicit(hash, sizeof(hash));
+ if (regs == NULL)
+ return 0;
+ idx = READ_ONCE(f->reg_idx);
+ if (idx >= sizeof(struct pt_regs) / sizeof(unsigned long))
+ idx = 0;
+ ptr += idx++;
+ WRITE_ONCE(f->reg_idx, idx);
+ return *ptr;
}
-static ssize_t _extract_entropy(void *buf, size_t nbytes)
+static void mix_interrupt_randomness(struct work_struct *work)
{
- ssize_t ret = 0, i;
- u8 tmp[EXTRACT_SIZE];
+ struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
+ /*
+ * The size of the copied stack pool is explicitly 16 bytes so that we
+ * tax mix_pool_byte()'s compression function the same amount on all
+ * platforms. This means on 64-bit we copy half the pool into this,
+ * while on 32-bit we copy all of it. The entropy is supposed to be
+ * sufficiently dispersed between bits that in the sponge-like
+ * half case, on average we don't wind up "losing" some.
+ */
+ u8 pool[16];
- while (nbytes) {
- extract_buf(tmp);
- i = min_t(int, nbytes, EXTRACT_SIZE);
- memcpy(buf, tmp, i);
- nbytes -= i;
- buf += i;
- ret += i;
+ /* Check to see if we're running on the wrong CPU due to hotplug. */
+ local_irq_disable();
+ if (fast_pool != this_cpu_ptr(&irq_randomness)) {
+ local_irq_enable();
+ return;
}
- /* Wipe data just returned from memory */
- memzero_explicit(tmp, sizeof(tmp));
+ /*
+ * Copy the pool to the stack so that the mixer always has a
+ * consistent view, before we reenable irqs again.
+ */
+ memcpy(pool, fast_pool->pool, sizeof(pool));
+ fast_pool->count = 0;
+ fast_pool->last = jiffies;
+ local_irq_enable();
- return ret;
-}
+ if (unlikely(crng_init == 0)) {
+ crng_pre_init_inject(pool, sizeof(pool), true);
+ mix_pool_bytes(pool, sizeof(pool));
+ } else {
+ mix_pool_bytes(pool, sizeof(pool));
+ credit_entropy_bits(1);
+ }
-/*
- * This function extracts randomness from the "entropy pool", and
- * returns it in a buffer.
- *
- * The min parameter specifies the minimum amount we can pull before
- * failing to avoid races that defeat catastrophic reseeding.
- */
-static ssize_t extract_entropy(void *buf, size_t nbytes, int min)
-{
- trace_extract_entropy(nbytes, POOL_ENTROPY_BITS(), _RET_IP_);
- nbytes = account(nbytes, min);
- return _extract_entropy(buf, nbytes);
+ memzero_explicit(pool, sizeof(pool));
}
-#define warn_unseeded_randomness(previous) \
- _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
-
-static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
+void add_interrupt_randomness(int irq)
{
-#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
- const bool print_once = false;
-#else
- static bool print_once __read_mostly;
-#endif
-
- if (print_once || crng_ready() ||
- (previous && (caller == READ_ONCE(*previous))))
- return;
- WRITE_ONCE(*previous, caller);
-#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
- print_once = true;
-#endif
- if (__ratelimit(&unseeded_warning))
- printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
- func_name, caller, crng_init);
-}
+ enum { MIX_INFLIGHT = 1U << 31 };
+ cycles_t cycles = random_get_entropy();
+ unsigned long now = jiffies;
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
+ unsigned int new_count;
+ union {
+ u32 u32[4];
+ u64 u64[2];
+ unsigned long longs[16 / sizeof(long)];
+ } irq_data;
-/*
- * This function is the exported kernel interface. It returns some
- * number of good random numbers, suitable for key generation, seeding
- * TCP sequence numbers, etc. It does not rely on the hardware random
- * number generator. For random bytes direct from the hardware RNG
- * (when available), use get_random_bytes_arch(). In order to ensure
- * that the randomness provided by this function is okay, the function
- * wait_for_random_bytes() should be called and return 0 at least once
- * at any point prior.
- */
-static void _get_random_bytes(void *buf, int nbytes)
-{
- u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
+ if (cycles == 0)
+ cycles = get_reg(fast_pool, regs);
- trace_get_random_bytes(nbytes, _RET_IP_);
+ if (sizeof(cycles) == 8)
+ irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq;
+ else {
+ irq_data.u32[0] = cycles ^ irq;
+ irq_data.u32[1] = now;
+ }
- while (nbytes >= CHACHA_BLOCK_SIZE) {
- extract_crng(buf);
- buf += CHACHA_BLOCK_SIZE;
- nbytes -= CHACHA_BLOCK_SIZE;
+ if (sizeof(unsigned long) == 8)
+ irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_;
+ else {
+ irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_;
+ irq_data.u32[3] = get_reg(fast_pool, regs);
}
- if (nbytes > 0) {
- extract_crng(tmp);
- memcpy(buf, tmp, nbytes);
- crng_backtrack_protect(tmp, nbytes);
- } else
- crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
- memzero_explicit(tmp, sizeof(tmp));
-}
+ fast_mix(fast_pool->pool, irq_data.longs);
+ new_count = ++fast_pool->count;
-void get_random_bytes(void *buf, int nbytes)
-{
- static void *previous;
+ if (new_count & MIX_INFLIGHT)
+ return;
- warn_unseeded_randomness(&previous);
- _get_random_bytes(buf, nbytes);
+ if (new_count < 64 && (!time_after(now, fast_pool->last + HZ) ||
+ unlikely(crng_init == 0)))
+ return;
+
+ if (unlikely(!fast_pool->mix.func))
+ INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
+ fast_pool->count |= MIX_INFLIGHT;
+ queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
}
-EXPORT_SYMBOL(get_random_bytes);
+EXPORT_SYMBOL_GPL(add_interrupt_randomness);
/*
* Each time the timer fires, we expect that we got an unpredictable
@@ -1501,238 +1398,134 @@ static void entropy_timer(struct timer_list *t)
static void try_to_generate_entropy(void)
{
struct {
- unsigned long now;
+ cycles_t cycles;
struct timer_list timer;
} stack;
- stack.now = random_get_entropy();
+ stack.cycles = random_get_entropy();
/* Slow counter - or none. Don't even bother */
- if (stack.now == random_get_entropy())
+ if (stack.cycles == random_get_entropy())
return;
timer_setup_on_stack(&stack.timer, entropy_timer, 0);
- while (!crng_ready()) {
+ while (!crng_ready() && !signal_pending(current)) {
if (!timer_pending(&stack.timer))
mod_timer(&stack.timer, jiffies + 1);
- mix_pool_bytes(&stack.now, sizeof(stack.now));
+ mix_pool_bytes(&stack.cycles, sizeof(stack.cycles));
schedule();
- stack.now = random_get_entropy();
+ stack.cycles = random_get_entropy();
}
del_timer_sync(&stack.timer);
destroy_timer_on_stack(&stack.timer);
- mix_pool_bytes(&stack.now, sizeof(stack.now));
+ mix_pool_bytes(&stack.cycles, sizeof(stack.cycles));
}
-/*
- * Wait for the urandom pool to be seeded and thus guaranteed to supply
- * cryptographically secure random numbers. This applies to: the /dev/urandom
- * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
- * family of functions. Using any of these functions without first calling
- * this function forfeits the guarantee of security.
- *
- * Returns: 0 if the urandom pool has been seeded.
- * -ERESTARTSYS if the function was interrupted by a signal.
- */
-int wait_for_random_bytes(void)
-{
- if (likely(crng_ready()))
- return 0;
-
- do {
- int ret;
- ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
- if (ret)
- return ret > 0 ? 0 : ret;
-
- try_to_generate_entropy();
- } while (!crng_ready());
- return 0;
-}
-EXPORT_SYMBOL(wait_for_random_bytes);
-
-/*
- * Returns whether or not the urandom pool has been seeded and thus guaranteed
- * to supply cryptographically secure random numbers. This applies to: the
- * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
- * ,u64,int,long} family of functions.
+/**********************************************************************
*
- * Returns: true if the urandom pool has been seeded.
- * false if the urandom pool has not been seeded.
- */
-bool rng_is_initialized(void)
-{
- return crng_ready();
-}
-EXPORT_SYMBOL(rng_is_initialized);
-
-/*
- * Add a callback function that will be invoked when the nonblocking
- * pool is initialised.
+ * Userspace reader/writer interfaces.
*
- * returns: 0 if callback is successfully added
- * -EALREADY if pool is already initialised (callback not called)
- * -ENOENT if module for callback is not alive
- */
-int add_random_ready_callback(struct random_ready_callback *rdy)
-{
- struct module *owner;
- unsigned long flags;
- int err = -EALREADY;
-
- if (crng_ready())
- return err;
-
- owner = rdy->owner;
- if (!try_module_get(owner))
- return -ENOENT;
-
- spin_lock_irqsave(&random_ready_list_lock, flags);
- if (crng_ready())
- goto out;
-
- owner = NULL;
-
- list_add(&rdy->list, &random_ready_list);
- err = 0;
-
-out:
- spin_unlock_irqrestore(&random_ready_list_lock, flags);
-
- module_put(owner);
-
- return err;
-}
-EXPORT_SYMBOL(add_random_ready_callback);
+ * getrandom(2) is the primary modern interface into the RNG and should
+ * be used in preference to anything else.
+ *
+ * Reading from /dev/random has the same functionality as calling
+ * getrandom(2) with flags=0. In earlier versions, however, it had
+ * vastly different semantics and should therefore be avoided, to
+ * prevent backwards compatibility issues.
+ *
+ * Reading from /dev/urandom has the same functionality as calling
+ * getrandom(2) with flags=GRND_INSECURE. Because it does not block
+ * waiting for the RNG to be ready, it should not be used.
+ *
+ * Writing to either /dev/random or /dev/urandom adds entropy to
+ * the input pool but does not credit it.
+ *
+ * Polling on /dev/random indicates when the RNG is initialized, on
+ * the read side, and when it wants new entropy, on the write side.
+ *
+ * Both /dev/random and /dev/urandom have the same set of ioctls for
+ * adding entropy, getting the entropy count, zeroing the count, and
+ * reseeding the crng.
+ *
+ **********************************************************************/
-/*
- * Delete a previously registered readiness callback function.
- */
-void del_random_ready_callback(struct random_ready_callback *rdy)
+SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
+ flags)
{
- unsigned long flags;
- struct module *owner = NULL;
-
- spin_lock_irqsave(&random_ready_list_lock, flags);
- if (!list_empty(&rdy->list)) {
- list_del_init(&rdy->list);
- owner = rdy->owner;
- }
- spin_unlock_irqrestore(&random_ready_list_lock, flags);
-
- module_put(owner);
-}
-EXPORT_SYMBOL(del_random_ready_callback);
+ if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
+ return -EINVAL;
-/*
- * This function will use the architecture-specific hardware random
- * number generator if it is available. The arch-specific hw RNG will
- * almost certainly be faster than what we can do in software, but it
- * is impossible to verify that it is implemented securely (as
- * opposed, to, say, the AES encryption of a sequence number using a
- * key known by the NSA). So it's useful if we need the speed, but
- * only if we're willing to trust the hardware manufacturer not to
- * have put in a back door.
- *
- * Return number of bytes filled in.
- */
-int __must_check get_random_bytes_arch(void *buf, int nbytes)
-{
- int left = nbytes;
- u8 *p = buf;
+ /*
+ * Requesting insecure and blocking randomness at the same time makes
+ * no sense.
+ */
+ if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
+ return -EINVAL;
- trace_get_random_bytes_arch(left, _RET_IP_);
- while (left) {
- unsigned long v;
- int chunk = min_t(int, left, sizeof(unsigned long));
+ if (count > INT_MAX)
+ count = INT_MAX;
- if (!arch_get_random_long(&v))
- break;
+ if (!(flags & GRND_INSECURE) && !crng_ready()) {
+ int ret;
- memcpy(p, &v, chunk);
- p += chunk;
- left -= chunk;
+ if (flags & GRND_NONBLOCK)
+ return -EAGAIN;
+ ret = wait_for_random_bytes();
+ if (unlikely(ret))
+ return ret;
}
-
- return nbytes - left;
+ return get_random_bytes_user(buf, count);
}
-EXPORT_SYMBOL(get_random_bytes_arch);
-/*
- * init_std_data - initialize pool with system data
- *
- * This function clears the pool's entropy count and mixes some system
- * data into the pool to prepare it for use. The pool is not cleared
- * as that can only decrease the entropy in the pool.
- */
-static void __init init_std_data(void)
+static __poll_t random_poll(struct file *file, poll_table *wait)
{
- int i;
- ktime_t now = ktime_get_real();
- unsigned long rv;
-
- mix_pool_bytes(&now, sizeof(now));
- for (i = POOL_BYTES; i > 0; i -= sizeof(rv)) {
- if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv))
- rv = random_get_entropy();
- mix_pool_bytes(&rv, sizeof(rv));
- }
- mix_pool_bytes(utsname(), sizeof(*(utsname())));
-}
+ __poll_t mask;
-/*
- * Note that setup_arch() may call add_device_randomness()
- * long before we get here. This allows seeding of the pools
- * with some platform dependent data very early in the boot
- * process. But it limits our options here. We must use
- * statically allocated structures that already have all
- * initializations complete at compile time. We should also
- * take care not to overwrite the precious per platform data
- * we were given.
- */
-int __init rand_initialize(void)
-{
- init_std_data();
- if (crng_need_final_init)
- crng_finalize_init();
- crng_initialize_primary();
- crng_global_init_time = jiffies;
- if (ratelimit_disable) {
- urandom_warning.interval = 0;
- unseeded_warning.interval = 0;
- }
- return 0;
+ poll_wait(file, &crng_init_wait, wait);
+ poll_wait(file, &random_write_wait, wait);
+ mask = 0;
+ if (crng_ready())
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (input_pool.entropy_count < POOL_MIN_BITS)
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ return mask;
}
-#ifdef CONFIG_BLOCK
-void rand_initialize_disk(struct gendisk *disk)
+static int write_pool(const char __user *ubuf, size_t count)
{
- struct timer_rand_state *state;
+ size_t len;
+ int ret = 0;
+ u8 block[BLAKE2S_BLOCK_SIZE];
- /*
- * If kzalloc returns null, we just won't use that entropy
- * source.
- */
- state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
- if (state) {
- state->last_time = INITIAL_JIFFIES;
- disk->random = state;
+ while (count) {
+ len = min(count, sizeof(block));
+ if (copy_from_user(block, ubuf, len)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ count -= len;
+ ubuf += len;
+ mix_pool_bytes(block, len);
+ cond_resched();
}
+
+out:
+ memzero_explicit(block, sizeof(block));
+ return ret;
}
-#endif
-static ssize_t urandom_read_nowarn(struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
+static ssize_t random_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
{
int ret;
- nbytes = min_t(size_t, nbytes, INT_MAX >> (POOL_ENTROPY_SHIFT + 3));
- ret = extract_crng_user(buf, nbytes);
- trace_urandom_read(8 * nbytes, 0, POOL_ENTROPY_BITS());
- return ret;
+ ret = write_pool(buffer, count);
+ if (ret)
+ return ret;
+
+ return (ssize_t)count;
}
static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
@@ -1747,7 +1540,7 @@ static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
current->comm, nbytes);
}
- return urandom_read_nowarn(file, buf, nbytes, ppos);
+ return get_random_bytes_user(buf, nbytes);
}
static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
@@ -1758,62 +1551,7 @@ static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
ret = wait_for_random_bytes();
if (ret != 0)
return ret;
- return urandom_read_nowarn(file, buf, nbytes, ppos);
-}
-
-static __poll_t random_poll(struct file *file, poll_table *wait)
-{
- __poll_t mask;
-
- poll_wait(file, &crng_init_wait, wait);
- poll_wait(file, &random_write_wait, wait);
- mask = 0;
- if (crng_ready())
- mask |= EPOLLIN | EPOLLRDNORM;
- if (POOL_ENTROPY_BITS() < random_write_wakeup_bits)
- mask |= EPOLLOUT | EPOLLWRNORM;
- return mask;
-}
-
-static int write_pool(const char __user *buffer, size_t count)
-{
- size_t bytes;
- u32 t, buf[16];
- const char __user *p = buffer;
-
- while (count > 0) {
- int b, i = 0;
-
- bytes = min(count, sizeof(buf));
- if (copy_from_user(&buf, p, bytes))
- return -EFAULT;
-
- for (b = bytes; b > 0; b -= sizeof(u32), i++) {
- if (!arch_get_random_int(&t))
- break;
- buf[i] ^= t;
- }
-
- count -= bytes;
- p += bytes;
-
- mix_pool_bytes(buf, bytes);
- cond_resched();
- }
-
- return 0;
-}
-
-static ssize_t random_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- size_t ret;
-
- ret = write_pool(buffer, count);
- if (ret)
- return ret;
-
- return (ssize_t)count;
+ return get_random_bytes_user(buf, nbytes);
}
static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
@@ -1824,9 +1562,8 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
switch (cmd) {
case RNDGETENTCNT:
- /* inherently racy, no point locking */
- ent_count = POOL_ENTROPY_BITS();
- if (put_user(ent_count, p))
+ /* Inherently racy, no point locking. */
+ if (put_user(input_pool.entropy_count, p))
return -EFAULT;
return 0;
case RNDADDTOENTCNT:
@@ -1834,7 +1571,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
- return credit_entropy_bits_safe(ent_count);
+ if (ent_count < 0)
+ return -EINVAL;
+ credit_entropy_bits(ent_count);
+ return 0;
case RNDADDENTROPY:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1847,7 +1587,8 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
retval = write_pool((const char __user *)p, size);
if (retval < 0)
return retval;
- return credit_entropy_bits_safe(ent_count);
+ credit_entropy_bits(ent_count);
+ return 0;
case RNDZAPENTCNT:
case RNDCLEARPOOL:
/*
@@ -1856,7 +1597,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
*/
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) {
+ if (xchg(&input_pool.entropy_count, 0) >= POOL_MIN_BITS) {
wake_up_interruptible(&random_write_wait);
kill_fasync(&fasync, SIGIO, POLL_OUT);
}
@@ -1864,10 +1605,9 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (crng_init < 2)
+ if (!crng_ready())
return -ENODATA;
- crng_reseed(&primary_crng, true);
- WRITE_ONCE(crng_global_init_time, jiffies - 1);
+ crng_reseed(false);
return 0;
default:
return -EINVAL;
@@ -1898,37 +1638,34 @@ const struct file_operations urandom_fops = {
.llseek = noop_llseek,
};
-SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
- flags)
-{
- int ret;
-
- if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
- return -EINVAL;
-
- /*
- * Requesting insecure and blocking randomness at the same time makes
- * no sense.
- */
- if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
- return -EINVAL;
-
- if (count > INT_MAX)
- count = INT_MAX;
-
- if (!(flags & GRND_INSECURE) && !crng_ready()) {
- if (flags & GRND_NONBLOCK)
- return -EAGAIN;
- ret = wait_for_random_bytes();
- if (unlikely(ret))
- return ret;
- }
- return urandom_read_nowarn(NULL, buf, count, NULL);
-}
/********************************************************************
*
- * Sysctl interface
+ * Sysctl interface.
+ *
+ * These are partly unused legacy knobs with dummy values to not break
+ * userspace and partly still useful things. They are usually accessible
+ * in /proc/sys/kernel/random/ and are as follows:
+ *
+ * - boot_id - a UUID representing the current boot.
+ *
+ * - uuid - a random UUID, different each time the file is read.
+ *
+ * - poolsize - the number of bits of entropy that the input pool can
+ * hold, tied to the POOL_BITS constant.
+ *
+ * - entropy_avail - the number of bits of entropy currently in the
+ * input pool. Always <= poolsize.
+ *
+ * - write_wakeup_threshold - the amount of entropy in the input pool
+ * below which write polls to /dev/random will unblock, requesting
+ * more entropy, tied to the POOL_MIN_BITS constant. It is writable
+ * to avoid breaking old userspaces, but writing to it does not
+ * change any behavior of the RNG.
+ *
+ * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
+ * It is writable to avoid breaking old userspaces, but writing
+ * to it does not change any behavior of the RNG.
*
********************************************************************/
@@ -1936,25 +1673,28 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
#include <linux/sysctl.h>
-static int min_write_thresh;
-static int max_write_thresh = POOL_BITS;
-static int random_min_urandom_seed = 60;
-static char sysctl_bootid[16];
+static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
+static int sysctl_random_write_wakeup_bits = POOL_MIN_BITS;
+static int sysctl_poolsize = POOL_BITS;
+static u8 sysctl_bootid[UUID_SIZE];
/*
* This function is used to return both the bootid UUID, and random
- * UUID. The difference is in whether table->data is NULL; if it is,
+ * UUID. The difference is in whether table->data is NULL; if it is,
* then a new UUID is generated and returned to the user.
- *
- * If the user accesses this via the proc interface, the UUID will be
- * returned as an ASCII string in the standard UUID format; if via the
- * sysctl system call, as 16 bytes of binary data.
*/
static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
- struct ctl_table fake_table;
- unsigned char buf[64], tmp_uuid[16], *uuid;
+ u8 tmp_uuid[UUID_SIZE], *uuid;
+ char uuid_string[UUID_STRING_LEN + 1];
+ struct ctl_table fake_table = {
+ .data = uuid_string,
+ .maxlen = UUID_STRING_LEN
+ };
+
+ if (write)
+ return -EPERM;
uuid = table->data;
if (!uuid) {
@@ -1969,32 +1709,17 @@ static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
spin_unlock(&bootid_spinlock);
}
- sprintf(buf, "%pU", uuid);
-
- fake_table.data = buf;
- fake_table.maxlen = sizeof(buf);
-
- return proc_dostring(&fake_table, write, buffer, lenp, ppos);
+ snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
+ return proc_dostring(&fake_table, 0, buffer, lenp, ppos);
}
-/*
- * Return entropy available scaled to integral bits
- */
-static int proc_do_entropy(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos)
+/* The same as proc_dointvec, but writes don't change anything. */
+static int proc_do_rointvec(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
- struct ctl_table fake_table;
- int entropy_count;
-
- entropy_count = *(int *)table->data >> POOL_ENTROPY_SHIFT;
-
- fake_table.data = &entropy_count;
- fake_table.maxlen = sizeof(entropy_count);
-
- return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
+ return write ? 0 : proc_dointvec(table, 0, buffer, lenp, ppos);
}
-static int sysctl_poolsize = POOL_BITS;
static struct ctl_table random_table[] = {
{
.procname = "poolsize",
@@ -2005,56 +1730,36 @@ static struct ctl_table random_table[] = {
},
{
.procname = "entropy_avail",
+ .data = &input_pool.entropy_count,
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_do_entropy,
- .data = &input_pool.entropy_count,
+ .proc_handler = proc_dointvec,
},
{
.procname = "write_wakeup_threshold",
- .data = &random_write_wakeup_bits,
+ .data = &sysctl_random_write_wakeup_bits,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_write_thresh,
- .extra2 = &max_write_thresh,
+ .proc_handler = proc_do_rointvec,
},
{
.procname = "urandom_min_reseed_secs",
- .data = &random_min_urandom_seed,
+ .data = &sysctl_random_min_urandom_seed,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_do_rointvec,
},
{
.procname = "boot_id",
.data = &sysctl_bootid,
- .maxlen = 16,
.mode = 0444,
.proc_handler = proc_do_uuid,
},
{
.procname = "uuid",
- .maxlen = 16,
.mode = 0444,
.proc_handler = proc_do_uuid,
},
-#ifdef ADD_INTERRUPT_BENCH
- {
- .procname = "add_interrupt_avg_cycles",
- .data = &avg_cycles,
- .maxlen = sizeof(avg_cycles),
- .mode = 0444,
- .proc_handler = proc_doulongvec_minmax,
- },
- {
- .procname = "add_interrupt_avg_deviation",
- .data = &avg_deviation,
- .maxlen = sizeof(avg_deviation),
- .mode = 0444,
- .proc_handler = proc_doulongvec_minmax,
- },
-#endif
{ }
};
@@ -2068,170 +1773,4 @@ static int __init random_sysctls_init(void)
return 0;
}
device_initcall(random_sysctls_init);
-#endif /* CONFIG_SYSCTL */
-
-struct batched_entropy {
- union {
- u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
- u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
- };
- unsigned int position;
- spinlock_t batch_lock;
-};
-
-/*
- * Get a random word for internal kernel use only. The quality of the random
- * number is good as /dev/urandom, but there is no backtrack protection, with
- * the goal of being quite fast and not depleting entropy. In order to ensure
- * that the randomness provided by this function is okay, the function
- * wait_for_random_bytes() should be called and return 0 at least once at any
- * point prior.
- */
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
-};
-
-u64 get_random_u64(void)
-{
- u64 ret;
- unsigned long flags;
- struct batched_entropy *batch;
- static void *previous;
-
- warn_unseeded_randomness(&previous);
-
- batch = raw_cpu_ptr(&batched_entropy_u64);
- spin_lock_irqsave(&batch->batch_lock, flags);
- if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
- extract_crng((u8 *)batch->entropy_u64);
- batch->position = 0;
- }
- ret = batch->entropy_u64[batch->position++];
- spin_unlock_irqrestore(&batch->batch_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(get_random_u64);
-
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
-};
-u32 get_random_u32(void)
-{
- u32 ret;
- unsigned long flags;
- struct batched_entropy *batch;
- static void *previous;
-
- warn_unseeded_randomness(&previous);
-
- batch = raw_cpu_ptr(&batched_entropy_u32);
- spin_lock_irqsave(&batch->batch_lock, flags);
- if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
- extract_crng((u8 *)batch->entropy_u32);
- batch->position = 0;
- }
- ret = batch->entropy_u32[batch->position++];
- spin_unlock_irqrestore(&batch->batch_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(get_random_u32);
-
-/* It's important to invalidate all potential batched entropy that might
- * be stored before the crng is initialized, which we can do lazily by
- * simply resetting the counter to zero so that it's re-extracted on the
- * next usage. */
-static void invalidate_batched_entropy(void)
-{
- int cpu;
- unsigned long flags;
-
- for_each_possible_cpu(cpu) {
- struct batched_entropy *batched_entropy;
-
- batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
- spin_lock_irqsave(&batched_entropy->batch_lock, flags);
- batched_entropy->position = 0;
- spin_unlock(&batched_entropy->batch_lock);
-
- batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
- spin_lock(&batched_entropy->batch_lock);
- batched_entropy->position = 0;
- spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
- }
-}
-
-/**
- * randomize_page - Generate a random, page aligned address
- * @start: The smallest acceptable address the caller will take.
- * @range: The size of the area, starting at @start, within which the
- * random address must fall.
- *
- * If @start + @range would overflow, @range is capped.
- *
- * NOTE: Historical use of randomize_range, which this replaces, presumed that
- * @start was already page aligned. We now align it regardless.
- *
- * Return: A page aligned address within [start, start + range). On error,
- * @start is returned.
- */
-unsigned long randomize_page(unsigned long start, unsigned long range)
-{
- if (!PAGE_ALIGNED(start)) {
- range -= PAGE_ALIGN(start) - start;
- start = PAGE_ALIGN(start);
- }
-
- if (start > ULONG_MAX - range)
- range = ULONG_MAX - start;
-
- range >>= PAGE_SHIFT;
-
- if (range == 0)
- return start;
-
- return start + (get_random_long() % range << PAGE_SHIFT);
-}
-
-/* Interface for in-kernel drivers of true hardware RNGs.
- * Those devices may produce endless random bits and will be throttled
- * when our pool is full.
- */
-void add_hwgenerator_randomness(const char *buffer, size_t count,
- size_t entropy)
-{
- if (unlikely(crng_init == 0)) {
- size_t ret = crng_fast_load(buffer, count);
- mix_pool_bytes(buffer, ret);
- count -= ret;
- buffer += ret;
- if (!count || crng_init == 0)
- return;
- }
-
- /* Throttle writing if we're above the trickle threshold.
- * We'll be woken up again once below random_write_wakeup_thresh,
- * when the calling thread is about to terminate, or once
- * CRNG_RESEED_INTERVAL has lapsed.
- */
- wait_event_interruptible_timeout(random_write_wait,
- !system_wq || kthread_should_stop() ||
- POOL_ENTROPY_BITS() <= random_write_wakeup_bits,
- CRNG_RESEED_INTERVAL);
- mix_pool_bytes(buffer, count);
- credit_entropy_bits(entropy);
-}
-EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
-
-/* Handle random seed passed by bootloader.
- * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
- * it would be regarded as device data.
- * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
- */
-void add_bootloader_randomness(const void *buf, unsigned int size)
-{
- if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
- add_hwgenerator_randomness(buf, size, size * 8);
- else
- add_device_randomness(buf, size);
-}
-EXPORT_SYMBOL_GPL(add_bootloader_randomness);
+#endif
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
index 7c617edff4ca..3170d59d660c 100644
--- a/drivers/char/tpm/st33zp24/i2c.c
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -267,11 +267,8 @@ static int st33zp24_i2c_probe(struct i2c_client *client,
static int st33zp24_i2c_remove(struct i2c_client *client)
{
struct tpm_chip *chip = i2c_get_clientdata(client);
- int ret;
- ret = st33zp24_remove(chip);
- if (ret)
- return ret;
+ st33zp24_remove(chip);
return 0;
}
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
index a75dafd39445..22d184884694 100644
--- a/drivers/char/tpm/st33zp24/spi.c
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -381,16 +381,11 @@ static int st33zp24_spi_probe(struct spi_device *dev)
* @param: client, the spi_device description (TPM SPI description).
* @return: 0 in case of success.
*/
-static int st33zp24_spi_remove(struct spi_device *dev)
+static void st33zp24_spi_remove(struct spi_device *dev)
{
struct tpm_chip *chip = spi_get_drvdata(dev);
- int ret;
- ret = st33zp24_remove(chip);
- if (ret)
- return ret;
-
- return 0;
+ st33zp24_remove(chip);
}
static const struct spi_device_id st33zp24_spi_id[] = {
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index ce9efb73c144..15b393e92c8e 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -511,10 +511,9 @@ _tpm_clean_answer:
}
EXPORT_SYMBOL(st33zp24_probe);
-int st33zp24_remove(struct tpm_chip *chip)
+void st33zp24_remove(struct tpm_chip *chip)
{
tpm_chip_unregister(chip);
- return 0;
}
EXPORT_SYMBOL(st33zp24_remove);
diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h
index 6747be1e2502..b387a476c555 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.h
+++ b/drivers/char/tpm/st33zp24/st33zp24.h
@@ -34,5 +34,5 @@ int st33zp24_pm_resume(struct device *dev);
int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
struct device *dev, int irq, int io_lpcpd);
-int st33zp24_remove(struct tpm_chip *chip);
+void st33zp24_remove(struct tpm_chip *chip);
#endif /* __LOCAL_ST33ZP24_H__ */
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index b009e7479b70..783d65fc71f0 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -274,14 +274,6 @@ static void tpm_dev_release(struct device *dev)
kfree(chip);
}
-static void tpm_devs_release(struct device *dev)
-{
- struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs);
-
- /* release the master device reference */
- put_device(&chip->dev);
-}
-
/**
* tpm_class_shutdown() - prepare the TPM device for loss of power.
* @dev: device to which the chip is associated.
@@ -344,7 +336,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
chip->dev_num = rc;
device_initialize(&chip->dev);
- device_initialize(&chip->devs);
chip->dev.class = tpm_class;
chip->dev.class->shutdown_pre = tpm_class_shutdown;
@@ -352,39 +343,20 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
chip->dev.parent = pdev;
chip->dev.groups = chip->groups;
- chip->devs.parent = pdev;
- chip->devs.class = tpmrm_class;
- chip->devs.release = tpm_devs_release;
- /* get extra reference on main device to hold on
- * behalf of devs. This holds the chip structure
- * while cdevs is in use. The corresponding put
- * is in the tpm_devs_release (TPM2 only)
- */
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- get_device(&chip->dev);
-
if (chip->dev_num == 0)
chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR);
else
chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num);
- chip->devs.devt =
- MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES);
-
rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num);
if (rc)
goto out;
- rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num);
- if (rc)
- goto out;
if (!pdev)
chip->flags |= TPM_CHIP_FLAG_VIRTUAL;
cdev_init(&chip->cdev, &tpm_fops);
- cdev_init(&chip->cdevs, &tpmrm_fops);
chip->cdev.owner = THIS_MODULE;
- chip->cdevs.owner = THIS_MODULE;
rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE);
if (rc) {
@@ -396,7 +368,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
return chip;
out:
- put_device(&chip->devs);
put_device(&chip->dev);
return ERR_PTR(rc);
}
@@ -445,14 +416,9 @@ static int tpm_add_char_device(struct tpm_chip *chip)
}
if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip)) {
- rc = cdev_device_add(&chip->cdevs, &chip->devs);
- if (rc) {
- dev_err(&chip->devs,
- "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
- dev_name(&chip->devs), MAJOR(chip->devs.devt),
- MINOR(chip->devs.devt), rc);
- return rc;
- }
+ rc = tpm_devs_add(chip);
+ if (rc)
+ goto err_del_cdev;
}
/* Make the chip available. */
@@ -460,6 +426,10 @@ static int tpm_add_char_device(struct tpm_chip *chip)
idr_replace(&dev_nums_idr, chip, chip->dev_num);
mutex_unlock(&idr_lock);
+ return 0;
+
+err_del_cdev:
+ cdev_device_del(&chip->cdev, &chip->dev);
return rc;
}
@@ -654,7 +624,7 @@ void tpm_chip_unregister(struct tpm_chip *chip)
hwrng_unregister(&chip->hwrng);
tpm_bios_log_teardown(chip);
if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip))
- cdev_device_del(&chip->cdevs, &chip->devs);
+ tpm_devs_remove(chip);
tpm_del_char_device(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index c08cbb306636..dc4c0a0a5129 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -69,7 +69,13 @@ static void tpm_dev_async_work(struct work_struct *work)
ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
sizeof(priv->data_buffer));
tpm_put_ops(priv->chip);
- if (ret > 0) {
+
+ /*
+ * If ret is > 0 then tpm_dev_transmit returned the size of the
+ * response. If ret is < 0 then tpm_dev_transmit failed and
+ * returned an error code.
+ */
+ if (ret != 0) {
priv->response_length = ret;
mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 283f78211c3a..2163c6ee0d36 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -234,6 +234,8 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
size_t cmdsiz);
int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf,
size_t *bufsiz);
+int tpm_devs_add(struct tpm_chip *chip);
+void tpm_devs_remove(struct tpm_chip *chip);
void tpm_bios_log_setup(struct tpm_chip *chip);
void tpm_bios_log_teardown(struct tpm_chip *chip);
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index 97e916856cf3..ffb35f0154c1 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -58,12 +58,12 @@ int tpm2_init_space(struct tpm_space *space, unsigned int buf_size)
void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space)
{
- mutex_lock(&chip->tpm_mutex);
- if (!tpm_chip_start(chip)) {
+
+ if (tpm_try_get_ops(chip) == 0) {
tpm2_flush_sessions(chip, space);
- tpm_chip_stop(chip);
+ tpm_put_ops(chip);
}
- mutex_unlock(&chip->tpm_mutex);
+
kfree(space->context_buf);
kfree(space->session_buf);
}
@@ -574,3 +574,68 @@ out:
dev_err(&chip->dev, "%s: error %d\n", __func__, rc);
return rc;
}
+
+/*
+ * Put the reference to the main device.
+ */
+static void tpm_devs_release(struct device *dev)
+{
+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs);
+
+ /* release the master device reference */
+ put_device(&chip->dev);
+}
+
+/*
+ * Remove the device file for exposed TPM spaces and release the device
+ * reference. This may also release the reference to the master device.
+ */
+void tpm_devs_remove(struct tpm_chip *chip)
+{
+ cdev_device_del(&chip->cdevs, &chip->devs);
+ put_device(&chip->devs);
+}
+
+/*
+ * Add a device file to expose TPM spaces. Also take a reference to the
+ * main device.
+ */
+int tpm_devs_add(struct tpm_chip *chip)
+{
+ int rc;
+
+ device_initialize(&chip->devs);
+ chip->devs.parent = chip->dev.parent;
+ chip->devs.class = tpmrm_class;
+
+ /*
+ * Get extra reference on main device to hold on behalf of devs.
+ * This holds the chip structure while cdevs is in use. The
+ * corresponding put is in the tpm_devs_release.
+ */
+ get_device(&chip->dev);
+ chip->devs.release = tpm_devs_release;
+ chip->devs.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES);
+ cdev_init(&chip->cdevs, &tpmrm_fops);
+ chip->cdevs.owner = THIS_MODULE;
+
+ rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num);
+ if (rc)
+ goto err_put_devs;
+
+ rc = cdev_device_add(&chip->cdevs, &chip->devs);
+ if (rc) {
+ dev_err(&chip->devs,
+ "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
+ dev_name(&chip->devs), MAJOR(chip->devs.devt),
+ MINOR(chip->devs.devt), rc);
+ goto err_put_devs;
+ }
+
+ return 0;
+
+err_put_devs:
+ put_device(&chip->devs);
+
+ return rc;
+}
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index aaa59a00eeae..184396b3af50 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -254,13 +254,12 @@ static int tpm_tis_spi_driver_probe(struct spi_device *spi)
static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume);
-static int tpm_tis_spi_remove(struct spi_device *dev)
+static void tpm_tis_spi_remove(struct spi_device *dev)
{
struct tpm_chip *chip = spi_get_drvdata(dev);
tpm_chip_unregister(chip);
tpm_tis_remove(chip);
- return 0;
}
static const struct spi_device_id tpm_tis_spi_id[] = {
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 91c772e38bb5..5c865987ba5c 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -91,7 +91,7 @@ static ssize_t vtpm_proxy_fops_read(struct file *filp, char __user *buf,
len = proxy_dev->req_len;
- if (count < len) {
+ if (count < len || len > sizeof(proxy_dev->buffer)) {
mutex_unlock(&proxy_dev->buf_lock);
pr_debug("Invalid size in recv: count=%zd, req_len=%zd\n",
count, len);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index da5b30771418..f53e0cf1ec7e 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -126,16 +126,16 @@ static void vtpm_cancel(struct tpm_chip *chip)
notify_remote_via_evtchn(priv->evtchn);
}
-static unsigned int shr_data_offset(struct vtpm_shared_page *shr)
+static size_t shr_data_offset(struct vtpm_shared_page *shr)
{
- return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages;
+ return struct_size(shr, extra_pages, shr->nr_extra_pages);
}
static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct tpm_private *priv = dev_get_drvdata(&chip->dev);
struct vtpm_shared_page *shr = priv->shr;
- unsigned int offset = shr_data_offset(shr);
+ size_t offset = shr_data_offset(shr);
u32 ordinal;
unsigned long duration;
@@ -177,7 +177,7 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct tpm_private *priv = dev_get_drvdata(&chip->dev);
struct vtpm_shared_page *shr = priv->shr;
- unsigned int offset = shr_data_offset(shr);
+ size_t offset = shr_data_offset(shr);
size_t length = shr->length;
if (shr->state == VTPM_STATE_IDLE)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 2359889a35a0..e3c430539a17 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1957,6 +1957,13 @@ static void virtcons_remove(struct virtio_device *vdev)
list_del(&portdev->list);
spin_unlock_irq(&pdrvdata_lock);
+ /* Device is going away, exit any polling for buffers */
+ virtio_break_device(vdev);
+ if (use_multiport(portdev))
+ flush_work(&portdev->control_work);
+ else
+ flush_work(&portdev->config_work);
+
/* Disable interrupts for vqs */
virtio_reset_device(vdev);
/* Finish up work that's lined up */
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index ad4256d54361..d4d67fbae869 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -231,6 +231,8 @@ config COMMON_CLK_GEMINI
config COMMON_CLK_LAN966X
bool "Generic Clock Controller driver for LAN966X SoC"
+ depends on HAS_IOMEM
+ depends on OF
help
This driver provides support for Generic Clock Controller(GCK) on
LAN966X SoC. GCK generates and supplies clock to various peripherals
diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c
index 8f02c0b88000..f416f8bc2898 100644
--- a/drivers/clk/clk-lmk04832.c
+++ b/drivers/clk/clk-lmk04832.c
@@ -1544,14 +1544,12 @@ err_disable_oscin:
return ret;
}
-static int lmk04832_remove(struct spi_device *spi)
+static void lmk04832_remove(struct spi_device *spi)
{
struct lmk04832 *lmk = spi_get_drvdata(spi);
clk_disable_unprepare(lmk->oscin);
of_clk_del_provider(spi->dev.of_node);
-
- return 0;
}
static const struct spi_device_id lmk04832_id[] = {
{ "lmk04832", LMK04832 },
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index 1e357d364ca2..2c7a830ce308 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -2,7 +2,7 @@
/*
* System Control and Power Interface (SCMI) Protocol based clock driver
*
- * Copyright (C) 2018-2021 ARM Ltd.
+ * Copyright (C) 2018-2022 ARM Ltd.
*/
#include <linux/clk-provider.h>
@@ -88,21 +88,51 @@ static void scmi_clk_disable(struct clk_hw *hw)
scmi_proto_clk_ops->disable(clk->ph, clk->id);
}
+static int scmi_clk_atomic_enable(struct clk_hw *hw)
+{
+ struct scmi_clk *clk = to_scmi_clk(hw);
+
+ return scmi_proto_clk_ops->enable_atomic(clk->ph, clk->id);
+}
+
+static void scmi_clk_atomic_disable(struct clk_hw *hw)
+{
+ struct scmi_clk *clk = to_scmi_clk(hw);
+
+ scmi_proto_clk_ops->disable_atomic(clk->ph, clk->id);
+}
+
+/*
+ * We can provide enable/disable atomic callbacks only if the underlying SCMI
+ * transport for an SCMI instance is configured to handle SCMI commands in an
+ * atomic manner.
+ *
+ * When no SCMI atomic transport support is available we instead provide only
+ * the prepare/unprepare API, as allowed by the clock framework when atomic
+ * calls are not available.
+ *
+ * Two distinct sets of clk_ops are provided since we could have multiple SCMI
+ * instances with different underlying transport quality, so they cannot be
+ * shared.
+ */
static const struct clk_ops scmi_clk_ops = {
.recalc_rate = scmi_clk_recalc_rate,
.round_rate = scmi_clk_round_rate,
.set_rate = scmi_clk_set_rate,
- /*
- * We can't provide enable/disable callback as we can't perform the same
- * in atomic context. Since the clock framework provides standard API
- * clk_prepare_enable that helps cases using clk_enable in non-atomic
- * context, it should be fine providing prepare/unprepare.
- */
.prepare = scmi_clk_enable,
.unprepare = scmi_clk_disable,
};
-static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
+static const struct clk_ops scmi_atomic_clk_ops = {
+ .recalc_rate = scmi_clk_recalc_rate,
+ .round_rate = scmi_clk_round_rate,
+ .set_rate = scmi_clk_set_rate,
+ .enable = scmi_clk_atomic_enable,
+ .disable = scmi_clk_atomic_disable,
+};
+
+static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
+ const struct clk_ops *scmi_ops)
{
int ret;
unsigned long min_rate, max_rate;
@@ -110,7 +140,7 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
struct clk_init_data init = {
.flags = CLK_GET_RATE_NOCACHE,
.num_parents = 0,
- .ops = &scmi_clk_ops,
+ .ops = scmi_ops,
.name = sclk->info->name,
};
@@ -139,6 +169,8 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
static int scmi_clocks_probe(struct scmi_device *sdev)
{
int idx, count, err;
+ unsigned int atomic_threshold;
+ bool is_atomic;
struct clk_hw **hws;
struct clk_hw_onecell_data *clk_data;
struct device *dev = &sdev->dev;
@@ -168,8 +200,11 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
clk_data->num = count;
hws = clk_data->hws;
+ is_atomic = handle->is_transport_atomic(handle, &atomic_threshold);
+
for (idx = 0; idx < count; idx++) {
struct scmi_clk *sclk;
+ const struct clk_ops *scmi_ops;
sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL);
if (!sclk)
@@ -184,13 +219,27 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
sclk->id = idx;
sclk->ph = ph;
- err = scmi_clk_ops_init(dev, sclk);
+ /*
+ * Note that when transport is atomic but SCMI protocol did not
+ * specify (or support) an enable_latency associated with a
+ * clock, we default to use atomic operations mode.
+ */
+ if (is_atomic &&
+ sclk->info->enable_latency <= atomic_threshold)
+ scmi_ops = &scmi_atomic_clk_ops;
+ else
+ scmi_ops = &scmi_clk_ops;
+
+ err = scmi_clk_ops_init(dev, sclk, scmi_ops);
if (err) {
dev_err(dev, "failed to register clock %d\n", idx);
devm_kfree(dev, sclk);
hws[idx] = NULL;
} else {
- dev_dbg(dev, "Registered clock:%s\n", sclk->info->name);
+ dev_dbg(dev, "Registered clock:%s%s\n",
+ sclk->info->name,
+ scmi_ops == &scmi_atomic_clk_ops ?
+ " (atomic ops)" : "");
hws[idx] = &sclk->hw;
}
}
diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c
index 538e4963c915..5d2ae297e741 100644
--- a/drivers/clk/qcom/dispcc-sc7180.c
+++ b/drivers/clk/qcom/dispcc-sc7180.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019, 2022, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -625,6 +625,9 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
static struct gdsc mdss_gdsc = {
.gdscr = 0x3000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "mdss_gdsc",
},
diff --git a/drivers/clk/qcom/dispcc-sc7280.c b/drivers/clk/qcom/dispcc-sc7280.c
index 4ef4ae231794..ad596d567f6a 100644
--- a/drivers/clk/qcom/dispcc-sc7280.c
+++ b/drivers/clk/qcom/dispcc-sc7280.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -787,6 +787,9 @@ static struct clk_branch disp_cc_sleep_clk = {
static struct gdsc disp_cc_mdss_core_gdsc = {
.gdscr = 0x1004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "disp_cc_mdss_core_gdsc",
},
diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
index 566fdfa0a15b..db9379634fb2 100644
--- a/drivers/clk/qcom/dispcc-sm8250.c
+++ b/drivers/clk/qcom/dispcc-sm8250.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2020, 2022, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -1126,6 +1126,9 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
static struct gdsc mdss_gdsc = {
.gdscr = 0x3000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "mdss_gdsc",
},
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index 7e1dd8ccfa38..44520efc6c72 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved.
*/
#include <linux/bitops.h>
@@ -35,9 +35,14 @@
#define CFG_GDSCR_OFFSET 0x4
/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
-#define EN_REST_WAIT_VAL (0x2 << 20)
-#define EN_FEW_WAIT_VAL (0x8 << 16)
-#define CLK_DIS_WAIT_VAL (0x2 << 12)
+#define EN_REST_WAIT_VAL 0x2
+#define EN_FEW_WAIT_VAL 0x8
+#define CLK_DIS_WAIT_VAL 0x2
+
+/* Transition delay shifts */
+#define EN_REST_WAIT_SHIFT 20
+#define EN_FEW_WAIT_SHIFT 16
+#define CLK_DIS_WAIT_SHIFT 12
#define RETAIN_MEM BIT(14)
#define RETAIN_PERIPH BIT(13)
@@ -380,7 +385,18 @@ static int gdsc_init(struct gdsc *sc)
*/
mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK |
EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK;
- val = EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
+
+ if (!sc->en_rest_wait_val)
+ sc->en_rest_wait_val = EN_REST_WAIT_VAL;
+ if (!sc->en_few_wait_val)
+ sc->en_few_wait_val = EN_FEW_WAIT_VAL;
+ if (!sc->clk_dis_wait_val)
+ sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL;
+
+ val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT |
+ sc->en_few_wait_val << EN_FEW_WAIT_SHIFT |
+ sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT;
+
ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index d7cc4c21a9d4..ad313d7210bd 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved.
*/
#ifndef __QCOM_GDSC_H__
@@ -22,6 +22,9 @@ struct reset_controller_dev;
* @cxcs: offsets of branch registers to toggle mem/periph bits in
* @cxc_count: number of @cxcs
* @pwrsts: Possible powerdomain power states
+ * @en_rest_wait_val: transition delay value for receiving enr ack signal
+ * @en_few_wait_val: transition delay value for receiving enf ack signal
+ * @clk_dis_wait_val: transition delay value for halting clock
* @resets: ids of resets associated with this gdsc
* @reset_count: number of @resets
* @rcdev: reset controller
@@ -36,6 +39,9 @@ struct gdsc {
unsigned int clamp_io_ctrl;
unsigned int *cxcs;
unsigned int cxc_count;
+ unsigned int en_rest_wait_val;
+ unsigned int en_few_wait_val;
+ unsigned int clk_dis_wait_val;
const u8 pwrsts;
/* Powerdomain allowable state bitfields */
#define PWRSTS_OFF BIT(0)
diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig
index 0e18d6ff2916..8e8245ab3fd1 100644
--- a/drivers/clk/samsung/Kconfig
+++ b/drivers/clk/samsung/Kconfig
@@ -11,6 +11,7 @@ config COMMON_CLK_SAMSUNG
select EXYNOS_5410_COMMON_CLK if ARM && SOC_EXYNOS5410
select EXYNOS_5420_COMMON_CLK if ARM && SOC_EXYNOS5420
select EXYNOS_ARM64_COMMON_CLK if ARM64 && ARCH_EXYNOS
+ select TESLA_FSD_COMMON_CLK if ARM64 && ARCH_TESLA_FSD
config S3C64XX_COMMON_CLK
bool "Samsung S3C64xx clock controller support" if COMPILE_TEST
@@ -124,3 +125,11 @@ config S3C2443_COMMON_CLK
help
Support for the clock controller present on the Samsung
S3C2416/S3C2443 SoCs. Choose Y here only if you build for this SoC.
+
+config TESLA_FSD_COMMON_CLK
+ bool "Tesla FSD clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+ depends on EXYNOS_ARM64_COMMON_CLK
+ help
+ Support for the clock controller present on the Tesla FSD SoC.
+ Choose Y here only if you build for this SoC.
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 0df74916a895..17e5d1cb9da2 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -26,3 +26,4 @@ obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
obj-$(CONFIG_S3C2443_COMMON_CLK)+= clk-s3c2443.o
obj-$(CONFIG_S3C64XX_COMMON_CLK) += clk-s3c64xx.o
obj-$(CONFIG_S5PV210_COMMON_CLK) += clk-s5pv210.o clk-s5pv210-audss.o
+obj-$(CONFIG_TESLA_FSD_COMMON_CLK) += clk-fsd.o
diff --git a/drivers/clk/samsung/clk-fsd.c b/drivers/clk/samsung/clk-fsd.c
new file mode 100644
index 000000000000..5d009c70e97d
--- /dev/null
+++ b/drivers/clk/samsung/clk-fsd.c
@@ -0,0 +1,1803 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2022 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ * Copyright (c) 2017-2022 Tesla, Inc.
+ * https://www.tesla.com
+ *
+ * Common Clock Framework support for FSD SoC.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/fsd-clk.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+
+/* Register Offset definitions for CMU_CMU (0x11c10000) */
+#define PLL_LOCKTIME_PLL_SHARED0 0x0
+#define PLL_LOCKTIME_PLL_SHARED1 0x4
+#define PLL_LOCKTIME_PLL_SHARED2 0x8
+#define PLL_LOCKTIME_PLL_SHARED3 0xc
+#define PLL_CON0_PLL_SHARED0 0x100
+#define PLL_CON0_PLL_SHARED1 0x120
+#define PLL_CON0_PLL_SHARED2 0x140
+#define PLL_CON0_PLL_SHARED3 0x160
+#define MUX_CMU_CIS0_CLKMUX 0x1000
+#define MUX_CMU_CIS1_CLKMUX 0x1004
+#define MUX_CMU_CIS2_CLKMUX 0x1008
+#define MUX_CMU_CPUCL_SWITCHMUX 0x100c
+#define MUX_CMU_FSYS1_ACLK_MUX 0x1014
+#define MUX_PLL_SHARED0_MUX 0x1020
+#define MUX_PLL_SHARED1_MUX 0x1024
+#define DIV_CMU_CIS0_CLK 0x1800
+#define DIV_CMU_CIS1_CLK 0x1804
+#define DIV_CMU_CIS2_CLK 0x1808
+#define DIV_CMU_CMU_ACLK 0x180c
+#define DIV_CMU_CPUCL_SWITCH 0x1810
+#define DIV_CMU_FSYS0_SHARED0DIV4 0x181c
+#define DIV_CMU_FSYS0_SHARED1DIV3 0x1820
+#define DIV_CMU_FSYS0_SHARED1DIV4 0x1824
+#define DIV_CMU_FSYS1_SHARED0DIV4 0x1828
+#define DIV_CMU_FSYS1_SHARED0DIV8 0x182c
+#define DIV_CMU_IMEM_ACLK 0x1834
+#define DIV_CMU_IMEM_DMACLK 0x1838
+#define DIV_CMU_IMEM_TCUCLK 0x183c
+#define DIV_CMU_PERIC_SHARED0DIV20 0x1844
+#define DIV_CMU_PERIC_SHARED0DIV3_TBUCLK 0x1848
+#define DIV_CMU_PERIC_SHARED1DIV36 0x184c
+#define DIV_CMU_PERIC_SHARED1DIV4_DMACLK 0x1850
+#define DIV_PLL_SHARED0_DIV2 0x1858
+#define DIV_PLL_SHARED0_DIV3 0x185c
+#define DIV_PLL_SHARED0_DIV4 0x1860
+#define DIV_PLL_SHARED0_DIV6 0x1864
+#define DIV_PLL_SHARED1_DIV3 0x1868
+#define DIV_PLL_SHARED1_DIV36 0x186c
+#define DIV_PLL_SHARED1_DIV4 0x1870
+#define DIV_PLL_SHARED1_DIV9 0x1874
+#define GAT_CMU_CIS0_CLKGATE 0x2000
+#define GAT_CMU_CIS1_CLKGATE 0x2004
+#define GAT_CMU_CIS2_CLKGATE 0x2008
+#define GAT_CMU_CPUCL_SWITCH_GATE 0x200c
+#define GAT_CMU_FSYS0_SHARED0DIV4_GATE 0x2018
+#define GAT_CMU_FSYS0_SHARED1DIV4_CLK 0x201c
+#define GAT_CMU_FSYS0_SHARED1DIV4_GATE 0x2020
+#define GAT_CMU_FSYS1_SHARED0DIV4_GATE 0x2024
+#define GAT_CMU_FSYS1_SHARED1DIV4_GATE 0x2028
+#define GAT_CMU_IMEM_ACLK_GATE 0x2030
+#define GAT_CMU_IMEM_DMACLK_GATE 0x2034
+#define GAT_CMU_IMEM_TCUCLK_GATE 0x2038
+#define GAT_CMU_PERIC_SHARED0DIVE3_TBUCLK_GATE 0x2040
+#define GAT_CMU_PERIC_SHARED0DIVE4_GATE 0x2044
+#define GAT_CMU_PERIC_SHARED1DIV4_DMACLK_GATE 0x2048
+#define GAT_CMU_PERIC_SHARED1DIVE4_GATE 0x204c
+#define GAT_CMU_CMU_CMU_IPCLKPORT_PCLK 0x2054
+#define GAT_CMU_AXI2APB_CMU_IPCLKPORT_ACLK 0x2058
+#define GAT_CMU_NS_BRDG_CMU_IPCLKPORT_CLK__PSOC_CMU__CLK_CMU 0x205c
+#define GAT_CMU_SYSREG_CMU_IPCLKPORT_PCLK 0x2060
+
+static const unsigned long cmu_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_LOCKTIME_PLL_SHARED2,
+ PLL_LOCKTIME_PLL_SHARED3,
+ PLL_CON0_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
+ PLL_CON0_PLL_SHARED2,
+ PLL_CON0_PLL_SHARED3,
+ MUX_CMU_CIS0_CLKMUX,
+ MUX_CMU_CIS1_CLKMUX,
+ MUX_CMU_CIS2_CLKMUX,
+ MUX_CMU_CPUCL_SWITCHMUX,
+ MUX_CMU_FSYS1_ACLK_MUX,
+ MUX_PLL_SHARED0_MUX,
+ MUX_PLL_SHARED1_MUX,
+ DIV_CMU_CIS0_CLK,
+ DIV_CMU_CIS1_CLK,
+ DIV_CMU_CIS2_CLK,
+ DIV_CMU_CMU_ACLK,
+ DIV_CMU_CPUCL_SWITCH,
+ DIV_CMU_FSYS0_SHARED0DIV4,
+ DIV_CMU_FSYS0_SHARED1DIV3,
+ DIV_CMU_FSYS0_SHARED1DIV4,
+ DIV_CMU_FSYS1_SHARED0DIV4,
+ DIV_CMU_FSYS1_SHARED0DIV8,
+ DIV_CMU_IMEM_ACLK,
+ DIV_CMU_IMEM_DMACLK,
+ DIV_CMU_IMEM_TCUCLK,
+ DIV_CMU_PERIC_SHARED0DIV20,
+ DIV_CMU_PERIC_SHARED0DIV3_TBUCLK,
+ DIV_CMU_PERIC_SHARED1DIV36,
+ DIV_CMU_PERIC_SHARED1DIV4_DMACLK,
+ DIV_PLL_SHARED0_DIV2,
+ DIV_PLL_SHARED0_DIV3,
+ DIV_PLL_SHARED0_DIV4,
+ DIV_PLL_SHARED0_DIV6,
+ DIV_PLL_SHARED1_DIV3,
+ DIV_PLL_SHARED1_DIV36,
+ DIV_PLL_SHARED1_DIV4,
+ DIV_PLL_SHARED1_DIV9,
+ GAT_CMU_CIS0_CLKGATE,
+ GAT_CMU_CIS1_CLKGATE,
+ GAT_CMU_CIS2_CLKGATE,
+ GAT_CMU_CPUCL_SWITCH_GATE,
+ GAT_CMU_FSYS0_SHARED0DIV4_GATE,
+ GAT_CMU_FSYS0_SHARED1DIV4_CLK,
+ GAT_CMU_FSYS0_SHARED1DIV4_GATE,
+ GAT_CMU_FSYS1_SHARED0DIV4_GATE,
+ GAT_CMU_FSYS1_SHARED1DIV4_GATE,
+ GAT_CMU_IMEM_ACLK_GATE,
+ GAT_CMU_IMEM_DMACLK_GATE,
+ GAT_CMU_IMEM_TCUCLK_GATE,
+ GAT_CMU_PERIC_SHARED0DIVE3_TBUCLK_GATE,
+ GAT_CMU_PERIC_SHARED0DIVE4_GATE,
+ GAT_CMU_PERIC_SHARED1DIV4_DMACLK_GATE,
+ GAT_CMU_PERIC_SHARED1DIVE4_GATE,
+ GAT_CMU_CMU_CMU_IPCLKPORT_PCLK,
+ GAT_CMU_AXI2APB_CMU_IPCLKPORT_ACLK,
+ GAT_CMU_NS_BRDG_CMU_IPCLKPORT_CLK__PSOC_CMU__CLK_CMU,
+ GAT_CMU_SYSREG_CMU_IPCLKPORT_PCLK,
+};
+
+static const struct samsung_pll_rate_table pll_shared0_rate_table[] __initconst = {
+ PLL_35XX_RATE(24 * MHZ, 2000000000U, 250, 3, 0),
+};
+
+static const struct samsung_pll_rate_table pll_shared1_rate_table[] __initconst = {
+ PLL_35XX_RATE(24 * MHZ, 2400000000U, 200, 2, 0),
+};
+
+static const struct samsung_pll_rate_table pll_shared2_rate_table[] __initconst = {
+ PLL_35XX_RATE(24 * MHZ, 2400000000U, 200, 2, 0),
+};
+
+static const struct samsung_pll_rate_table pll_shared3_rate_table[] __initconst = {
+ PLL_35XX_RATE(24 * MHZ, 1800000000U, 150, 2, 0),
+};
+
+static const struct samsung_pll_clock cmu_pll_clks[] __initconst = {
+ PLL(pll_142xx, 0, "fout_pll_shared0", "fin_pll", PLL_LOCKTIME_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED0, pll_shared0_rate_table),
+ PLL(pll_142xx, 0, "fout_pll_shared1", "fin_pll", PLL_LOCKTIME_PLL_SHARED1,
+ PLL_CON0_PLL_SHARED1, pll_shared1_rate_table),
+ PLL(pll_142xx, 0, "fout_pll_shared2", "fin_pll", PLL_LOCKTIME_PLL_SHARED2,
+ PLL_CON0_PLL_SHARED2, pll_shared2_rate_table),
+ PLL(pll_142xx, 0, "fout_pll_shared3", "fin_pll", PLL_LOCKTIME_PLL_SHARED3,
+ PLL_CON0_PLL_SHARED3, pll_shared3_rate_table),
+};
+
+/* List of parent clocks for Muxes in CMU_CMU */
+PNAME(mout_cmu_shared0_pll_p) = { "fin_pll", "fout_pll_shared0" };
+PNAME(mout_cmu_shared1_pll_p) = { "fin_pll", "fout_pll_shared1" };
+PNAME(mout_cmu_shared2_pll_p) = { "fin_pll", "fout_pll_shared2" };
+PNAME(mout_cmu_shared3_pll_p) = { "fin_pll", "fout_pll_shared3" };
+PNAME(mout_cmu_cis0_clkmux_p) = { "fin_pll", "dout_cmu_pll_shared0_div4" };
+PNAME(mout_cmu_cis1_clkmux_p) = { "fin_pll", "dout_cmu_pll_shared0_div4" };
+PNAME(mout_cmu_cis2_clkmux_p) = { "fin_pll", "dout_cmu_pll_shared0_div4" };
+PNAME(mout_cmu_cpucl_switchmux_p) = { "mout_cmu_pll_shared2", "mout_cmu_pll_shared0_mux" };
+PNAME(mout_cmu_fsys1_aclk_mux_p) = { "dout_cmu_pll_shared0_div4", "fin_pll" };
+PNAME(mout_cmu_pll_shared0_mux_p) = { "fin_pll", "mout_cmu_pll_shared0" };
+PNAME(mout_cmu_pll_shared1_mux_p) = { "fin_pll", "mout_cmu_pll_shared1" };
+
+static const struct samsung_mux_clock cmu_mux_clks[] __initconst = {
+ MUX(0, "mout_cmu_pll_shared0", mout_cmu_shared0_pll_p, PLL_CON0_PLL_SHARED0, 4, 1),
+ MUX(0, "mout_cmu_pll_shared1", mout_cmu_shared1_pll_p, PLL_CON0_PLL_SHARED1, 4, 1),
+ MUX(0, "mout_cmu_pll_shared2", mout_cmu_shared2_pll_p, PLL_CON0_PLL_SHARED2, 4, 1),
+ MUX(0, "mout_cmu_pll_shared3", mout_cmu_shared3_pll_p, PLL_CON0_PLL_SHARED3, 4, 1),
+ MUX(0, "mout_cmu_cis0_clkmux", mout_cmu_cis0_clkmux_p, MUX_CMU_CIS0_CLKMUX, 0, 1),
+ MUX(0, "mout_cmu_cis1_clkmux", mout_cmu_cis1_clkmux_p, MUX_CMU_CIS1_CLKMUX, 0, 1),
+ MUX(0, "mout_cmu_cis2_clkmux", mout_cmu_cis2_clkmux_p, MUX_CMU_CIS2_CLKMUX, 0, 1),
+ MUX(0, "mout_cmu_cpucl_switchmux", mout_cmu_cpucl_switchmux_p,
+ MUX_CMU_CPUCL_SWITCHMUX, 0, 1),
+ MUX(0, "mout_cmu_fsys1_aclk_mux", mout_cmu_fsys1_aclk_mux_p, MUX_CMU_FSYS1_ACLK_MUX, 0, 1),
+ MUX(0, "mout_cmu_pll_shared0_mux", mout_cmu_pll_shared0_mux_p, MUX_PLL_SHARED0_MUX, 0, 1),
+ MUX(0, "mout_cmu_pll_shared1_mux", mout_cmu_pll_shared1_mux_p, MUX_PLL_SHARED1_MUX, 0, 1),
+};
+
+static const struct samsung_div_clock cmu_div_clks[] __initconst = {
+ DIV(0, "dout_cmu_cis0_clk", "cmu_cis0_clkgate", DIV_CMU_CIS0_CLK, 0, 4),
+ DIV(0, "dout_cmu_cis1_clk", "cmu_cis1_clkgate", DIV_CMU_CIS1_CLK, 0, 4),
+ DIV(0, "dout_cmu_cis2_clk", "cmu_cis2_clkgate", DIV_CMU_CIS2_CLK, 0, 4),
+ DIV(0, "dout_cmu_cmu_aclk", "dout_cmu_pll_shared1_div9", DIV_CMU_CMU_ACLK, 0, 4),
+ DIV(0, "dout_cmu_cpucl_switch", "cmu_cpucl_switch_gate", DIV_CMU_CPUCL_SWITCH, 0, 4),
+ DIV(DOUT_CMU_FSYS0_SHARED0DIV4, "dout_cmu_fsys0_shared0div4", "cmu_fsys0_shared0div4_gate",
+ DIV_CMU_FSYS0_SHARED0DIV4, 0, 4),
+ DIV(0, "dout_cmu_fsys0_shared1div3", "cmu_fsys0_shared1div4_clk",
+ DIV_CMU_FSYS0_SHARED1DIV3, 0, 4),
+ DIV(DOUT_CMU_FSYS0_SHARED1DIV4, "dout_cmu_fsys0_shared1div4", "cmu_fsys0_shared1div4_gate",
+ DIV_CMU_FSYS0_SHARED1DIV4, 0, 4),
+ DIV(DOUT_CMU_FSYS1_SHARED0DIV4, "dout_cmu_fsys1_shared0div4", "cmu_fsys1_shared0div4_gate",
+ DIV_CMU_FSYS1_SHARED0DIV4, 0, 4),
+ DIV(DOUT_CMU_FSYS1_SHARED0DIV8, "dout_cmu_fsys1_shared0div8", "cmu_fsys1_shared1div4_gate",
+ DIV_CMU_FSYS1_SHARED0DIV8, 0, 4),
+ DIV(DOUT_CMU_IMEM_ACLK, "dout_cmu_imem_aclk", "cmu_imem_aclk_gate",
+ DIV_CMU_IMEM_ACLK, 0, 4),
+ DIV(DOUT_CMU_IMEM_DMACLK, "dout_cmu_imem_dmaclk", "cmu_imem_dmaclk_gate",
+ DIV_CMU_IMEM_DMACLK, 0, 4),
+ DIV(DOUT_CMU_IMEM_TCUCLK, "dout_cmu_imem_tcuclk", "cmu_imem_tcuclk_gate",
+ DIV_CMU_IMEM_TCUCLK, 0, 4),
+ DIV(DOUT_CMU_PERIC_SHARED0DIV20, "dout_cmu_peric_shared0div20",
+ "cmu_peric_shared0dive4_gate", DIV_CMU_PERIC_SHARED0DIV20, 0, 4),
+ DIV(DOUT_CMU_PERIC_SHARED0DIV3_TBUCLK, "dout_cmu_peric_shared0div3_tbuclk",
+ "cmu_peric_shared0dive3_tbuclk_gate", DIV_CMU_PERIC_SHARED0DIV3_TBUCLK, 0, 4),
+ DIV(DOUT_CMU_PERIC_SHARED1DIV36, "dout_cmu_peric_shared1div36",
+ "cmu_peric_shared1dive4_gate", DIV_CMU_PERIC_SHARED1DIV36, 0, 4),
+ DIV(DOUT_CMU_PERIC_SHARED1DIV4_DMACLK, "dout_cmu_peric_shared1div4_dmaclk",
+ "cmu_peric_shared1div4_dmaclk_gate", DIV_CMU_PERIC_SHARED1DIV4_DMACLK, 0, 4),
+ DIV(0, "dout_cmu_pll_shared0_div2", "mout_cmu_pll_shared0_mux",
+ DIV_PLL_SHARED0_DIV2, 0, 4),
+ DIV(0, "dout_cmu_pll_shared0_div3", "mout_cmu_pll_shared0_mux",
+ DIV_PLL_SHARED0_DIV3, 0, 4),
+ DIV(DOUT_CMU_PLL_SHARED0_DIV4, "dout_cmu_pll_shared0_div4", "dout_cmu_pll_shared0_div2",
+ DIV_PLL_SHARED0_DIV4, 0, 4),
+ DIV(DOUT_CMU_PLL_SHARED0_DIV6, "dout_cmu_pll_shared0_div6", "dout_cmu_pll_shared0_div3",
+ DIV_PLL_SHARED0_DIV6, 0, 4),
+ DIV(0, "dout_cmu_pll_shared1_div3", "mout_cmu_pll_shared1_mux",
+ DIV_PLL_SHARED1_DIV3, 0, 4),
+ DIV(0, "dout_cmu_pll_shared1_div36", "dout_cmu_pll_shared1_div9",
+ DIV_PLL_SHARED1_DIV36, 0, 4),
+ DIV(0, "dout_cmu_pll_shared1_div4", "mout_cmu_pll_shared1_mux",
+ DIV_PLL_SHARED1_DIV4, 0, 4),
+ DIV(0, "dout_cmu_pll_shared1_div9", "dout_cmu_pll_shared1_div3",
+ DIV_PLL_SHARED1_DIV9, 0, 4),
+};
+
+static const struct samsung_gate_clock cmu_gate_clks[] __initconst = {
+ GATE(0, "cmu_cis0_clkgate", "mout_cmu_cis0_clkmux", GAT_CMU_CIS0_CLKGATE, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cmu_cis1_clkgate", "mout_cmu_cis1_clkmux", GAT_CMU_CIS1_CLKGATE, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cmu_cis2_clkgate", "mout_cmu_cis2_clkmux", GAT_CMU_CIS2_CLKGATE, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CMU_CPUCL_SWITCH_GATE, "cmu_cpucl_switch_gate", "mout_cmu_cpucl_switchmux",
+ GAT_CMU_CPUCL_SWITCH_GATE, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(GAT_CMU_FSYS0_SHARED0DIV4, "cmu_fsys0_shared0div4_gate", "dout_cmu_pll_shared0_div4",
+ GAT_CMU_FSYS0_SHARED0DIV4_GATE, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cmu_fsys0_shared1div4_clk", "dout_cmu_pll_shared1_div3",
+ GAT_CMU_FSYS0_SHARED1DIV4_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cmu_fsys0_shared1div4_gate", "dout_cmu_pll_shared1_div4",
+ GAT_CMU_FSYS0_SHARED1DIV4_GATE, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cmu_fsys1_shared0div4_gate", "mout_cmu_fsys1_aclk_mux",
+ GAT_CMU_FSYS1_SHARED0DIV4_GATE, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cmu_fsys1_shared1div4_gate", "dout_cmu_fsys1_shared0div4",
+ GAT_CMU_FSYS1_SHARED1DIV4_GATE, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cmu_imem_aclk_gate", "dout_cmu_pll_shared1_div9", GAT_CMU_IMEM_ACLK_GATE, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cmu_imem_dmaclk_gate", "mout_cmu_pll_shared1_mux", GAT_CMU_IMEM_DMACLK_GATE, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cmu_imem_tcuclk_gate", "dout_cmu_pll_shared0_div3", GAT_CMU_IMEM_TCUCLK_GATE, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cmu_peric_shared0dive3_tbuclk_gate", "dout_cmu_pll_shared0_div3",
+ GAT_CMU_PERIC_SHARED0DIVE3_TBUCLK_GATE, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cmu_peric_shared0dive4_gate", "dout_cmu_pll_shared0_div4",
+ GAT_CMU_PERIC_SHARED0DIVE4_GATE, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cmu_peric_shared1div4_dmaclk_gate", "dout_cmu_pll_shared1_div4",
+ GAT_CMU_PERIC_SHARED1DIV4_DMACLK_GATE, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cmu_peric_shared1dive4_gate", "dout_cmu_pll_shared1_div36",
+ GAT_CMU_PERIC_SHARED1DIVE4_GATE, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cmu_uid_cmu_cmu_cmu_ipclkport_pclk", "dout_cmu_cmu_aclk",
+ GAT_CMU_CMU_CMU_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cmu_uid_axi2apb_cmu_ipclkport_aclk", "dout_cmu_cmu_aclk",
+ GAT_CMU_AXI2APB_CMU_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cmu_uid_ns_brdg_cmu_ipclkport_clk__psoc_cmu__clk_cmu", "dout_cmu_cmu_aclk",
+ GAT_CMU_NS_BRDG_CMU_IPCLKPORT_CLK__PSOC_CMU__CLK_CMU, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cmu_uid_sysreg_cmu_ipclkport_pclk", "dout_cmu_cmu_aclk",
+ GAT_CMU_SYSREG_CMU_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info cmu_cmu_info __initconst = {
+ .pll_clks = cmu_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cmu_pll_clks),
+ .mux_clks = cmu_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_mux_clks),
+ .div_clks = cmu_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_div_clks),
+ .gate_clks = cmu_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_gate_clks),
+ .nr_clk_ids = CMU_NR_CLK,
+ .clk_regs = cmu_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_clk_regs),
+};
+
+static void __init fsd_clk_cmu_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &cmu_cmu_info);
+}
+
+CLK_OF_DECLARE(fsd_clk_cmu, "tesla,fsd-clock-cmu", fsd_clk_cmu_init);
+
+/* Register Offset definitions for CMU_PERIC (0x14010000) */
+#define PLL_CON0_PERIC_DMACLK_MUX 0x100
+#define PLL_CON0_PERIC_EQOS_BUSCLK_MUX 0x120
+#define PLL_CON0_PERIC_PCLK_MUX 0x140
+#define PLL_CON0_PERIC_TBUCLK_MUX 0x160
+#define PLL_CON0_SPI_CLK 0x180
+#define PLL_CON0_SPI_PCLK 0x1a0
+#define PLL_CON0_UART_CLK 0x1c0
+#define PLL_CON0_UART_PCLK 0x1e0
+#define MUX_PERIC_EQOS_PHYRXCLK 0x1000
+#define DIV_EQOS_BUSCLK 0x1800
+#define DIV_PERIC_MCAN_CLK 0x1804
+#define DIV_RGMII_CLK 0x1808
+#define DIV_RII_CLK 0x180c
+#define DIV_RMII_CLK 0x1810
+#define DIV_SPI_CLK 0x1814
+#define DIV_UART_CLK 0x1818
+#define GAT_EQOS_TOP_IPCLKPORT_CLK_PTP_REF_I 0x2000
+#define GAT_GPIO_PERIC_IPCLKPORT_OSCCLK 0x2004
+#define GAT_PERIC_ADC0_IPCLKPORT_I_OSCCLK 0x2008
+#define GAT_PERIC_CMU_PERIC_IPCLKPORT_PCLK 0x200c
+#define GAT_PERIC_PWM0_IPCLKPORT_I_OSCCLK 0x2010
+#define GAT_PERIC_PWM1_IPCLKPORT_I_OSCCLK 0x2014
+#define GAT_ASYNC_APB_DMA0_IPCLKPORT_PCLKM 0x2018
+#define GAT_ASYNC_APB_DMA0_IPCLKPORT_PCLKS 0x201c
+#define GAT_ASYNC_APB_DMA1_IPCLKPORT_PCLKM 0x2020
+#define GAT_ASYNC_APB_DMA1_IPCLKPORT_PCLKS 0x2024
+#define GAT_AXI2APB_PERIC0_IPCLKPORT_ACLK 0x2028
+#define GAT_AXI2APB_PERIC1_IPCLKPORT_ACLK 0x202c
+#define GAT_AXI2APB_PERIC2_IPCLKPORT_ACLK 0x2030
+#define GAT_BUS_D_PERIC_IPCLKPORT_DMACLK 0x2034
+#define GAT_BUS_D_PERIC_IPCLKPORT_EQOSCLK 0x2038
+#define GAT_BUS_D_PERIC_IPCLKPORT_MAINCLK 0x203c
+#define GAT_BUS_P_PERIC_IPCLKPORT_EQOSCLK 0x2040
+#define GAT_BUS_P_PERIC_IPCLKPORT_MAINCLK 0x2044
+#define GAT_BUS_P_PERIC_IPCLKPORT_SMMUCLK 0x2048
+#define GAT_EQOS_TOP_IPCLKPORT_ACLK_I 0x204c
+#define GAT_EQOS_TOP_IPCLKPORT_CLK_RX_I 0x2050
+#define GAT_EQOS_TOP_IPCLKPORT_HCLK_I 0x2054
+#define GAT_EQOS_TOP_IPCLKPORT_RGMII_CLK_I 0x2058
+#define GAT_EQOS_TOP_IPCLKPORT_RII_CLK_I 0x205c
+#define GAT_EQOS_TOP_IPCLKPORT_RMII_CLK_I 0x2060
+#define GAT_GPIO_PERIC_IPCLKPORT_PCLK 0x2064
+#define GAT_NS_BRDG_PERIC_IPCLKPORT_CLK__PSOC_PERIC__CLK_PERIC_D 0x2068
+#define GAT_NS_BRDG_PERIC_IPCLKPORT_CLK__PSOC_PERIC__CLK_PERIC_P 0x206c
+#define GAT_PERIC_ADC0_IPCLKPORT_PCLK_S0 0x2070
+#define GAT_PERIC_DMA0_IPCLKPORT_ACLK 0x2074
+#define GAT_PERIC_DMA1_IPCLKPORT_ACLK 0x2078
+#define GAT_PERIC_I2C0_IPCLKPORT_I_PCLK 0x207c
+#define GAT_PERIC_I2C1_IPCLKPORT_I_PCLK 0x2080
+#define GAT_PERIC_I2C2_IPCLKPORT_I_PCLK 0x2084
+#define GAT_PERIC_I2C3_IPCLKPORT_I_PCLK 0x2088
+#define GAT_PERIC_I2C4_IPCLKPORT_I_PCLK 0x208c
+#define GAT_PERIC_I2C5_IPCLKPORT_I_PCLK 0x2090
+#define GAT_PERIC_I2C6_IPCLKPORT_I_PCLK 0x2094
+#define GAT_PERIC_I2C7_IPCLKPORT_I_PCLK 0x2098
+#define GAT_PERIC_MCAN0_IPCLKPORT_CCLK 0x209c
+#define GAT_PERIC_MCAN0_IPCLKPORT_PCLK 0x20a0
+#define GAT_PERIC_MCAN1_IPCLKPORT_CCLK 0x20a4
+#define GAT_PERIC_MCAN1_IPCLKPORT_PCLK 0x20a8
+#define GAT_PERIC_MCAN2_IPCLKPORT_CCLK 0x20ac
+#define GAT_PERIC_MCAN2_IPCLKPORT_PCLK 0x20b0
+#define GAT_PERIC_MCAN3_IPCLKPORT_CCLK 0x20b4
+#define GAT_PERIC_MCAN3_IPCLKPORT_PCLK 0x20b8
+#define GAT_PERIC_PWM0_IPCLKPORT_I_PCLK_S0 0x20bc
+#define GAT_PERIC_PWM1_IPCLKPORT_I_PCLK_S0 0x20c0
+#define GAT_PERIC_SMMU_IPCLKPORT_CCLK 0x20c4
+#define GAT_PERIC_SMMU_IPCLKPORT_PERIC_BCLK 0x20c8
+#define GAT_PERIC_SPI0_IPCLKPORT_I_PCLK 0x20cc
+#define GAT_PERIC_SPI0_IPCLKPORT_I_SCLK_SPI 0x20d0
+#define GAT_PERIC_SPI1_IPCLKPORT_I_PCLK 0x20d4
+#define GAT_PERIC_SPI1_IPCLKPORT_I_SCLK_SPI 0x20d8
+#define GAT_PERIC_SPI2_IPCLKPORT_I_PCLK 0x20dc
+#define GAT_PERIC_SPI2_IPCLKPORT_I_SCLK_SPI 0x20e0
+#define GAT_PERIC_TDM0_IPCLKPORT_HCLK_M 0x20e4
+#define GAT_PERIC_TDM0_IPCLKPORT_PCLK 0x20e8
+#define GAT_PERIC_TDM1_IPCLKPORT_HCLK_M 0x20ec
+#define GAT_PERIC_TDM1_IPCLKPORT_PCLK 0x20f0
+#define GAT_PERIC_UART0_IPCLKPORT_I_SCLK_UART 0x20f4
+#define GAT_PERIC_UART0_IPCLKPORT_PCLK 0x20f8
+#define GAT_PERIC_UART1_IPCLKPORT_I_SCLK_UART 0x20fc
+#define GAT_PERIC_UART1_IPCLKPORT_PCLK 0x2100
+#define GAT_SYSREG_PERI_IPCLKPORT_PCLK 0x2104
+
+static const unsigned long peric_clk_regs[] __initconst = {
+ PLL_CON0_PERIC_DMACLK_MUX,
+ PLL_CON0_PERIC_EQOS_BUSCLK_MUX,
+ PLL_CON0_PERIC_PCLK_MUX,
+ PLL_CON0_PERIC_TBUCLK_MUX,
+ PLL_CON0_SPI_CLK,
+ PLL_CON0_SPI_PCLK,
+ PLL_CON0_UART_CLK,
+ PLL_CON0_UART_PCLK,
+ MUX_PERIC_EQOS_PHYRXCLK,
+ DIV_EQOS_BUSCLK,
+ DIV_PERIC_MCAN_CLK,
+ DIV_RGMII_CLK,
+ DIV_RII_CLK,
+ DIV_RMII_CLK,
+ DIV_SPI_CLK,
+ DIV_UART_CLK,
+ GAT_EQOS_TOP_IPCLKPORT_CLK_PTP_REF_I,
+ GAT_GPIO_PERIC_IPCLKPORT_OSCCLK,
+ GAT_PERIC_ADC0_IPCLKPORT_I_OSCCLK,
+ GAT_PERIC_CMU_PERIC_IPCLKPORT_PCLK,
+ GAT_PERIC_PWM0_IPCLKPORT_I_OSCCLK,
+ GAT_PERIC_PWM1_IPCLKPORT_I_OSCCLK,
+ GAT_ASYNC_APB_DMA0_IPCLKPORT_PCLKM,
+ GAT_ASYNC_APB_DMA0_IPCLKPORT_PCLKS,
+ GAT_ASYNC_APB_DMA1_IPCLKPORT_PCLKM,
+ GAT_ASYNC_APB_DMA1_IPCLKPORT_PCLKS,
+ GAT_AXI2APB_PERIC0_IPCLKPORT_ACLK,
+ GAT_AXI2APB_PERIC1_IPCLKPORT_ACLK,
+ GAT_AXI2APB_PERIC2_IPCLKPORT_ACLK,
+ GAT_BUS_D_PERIC_IPCLKPORT_DMACLK,
+ GAT_BUS_D_PERIC_IPCLKPORT_EQOSCLK,
+ GAT_BUS_D_PERIC_IPCLKPORT_MAINCLK,
+ GAT_BUS_P_PERIC_IPCLKPORT_EQOSCLK,
+ GAT_BUS_P_PERIC_IPCLKPORT_MAINCLK,
+ GAT_BUS_P_PERIC_IPCLKPORT_SMMUCLK,
+ GAT_EQOS_TOP_IPCLKPORT_ACLK_I,
+ GAT_EQOS_TOP_IPCLKPORT_CLK_RX_I,
+ GAT_EQOS_TOP_IPCLKPORT_HCLK_I,
+ GAT_EQOS_TOP_IPCLKPORT_RGMII_CLK_I,
+ GAT_EQOS_TOP_IPCLKPORT_RII_CLK_I,
+ GAT_EQOS_TOP_IPCLKPORT_RMII_CLK_I,
+ GAT_GPIO_PERIC_IPCLKPORT_PCLK,
+ GAT_NS_BRDG_PERIC_IPCLKPORT_CLK__PSOC_PERIC__CLK_PERIC_D,
+ GAT_NS_BRDG_PERIC_IPCLKPORT_CLK__PSOC_PERIC__CLK_PERIC_P,
+ GAT_PERIC_ADC0_IPCLKPORT_PCLK_S0,
+ GAT_PERIC_DMA0_IPCLKPORT_ACLK,
+ GAT_PERIC_DMA1_IPCLKPORT_ACLK,
+ GAT_PERIC_I2C0_IPCLKPORT_I_PCLK,
+ GAT_PERIC_I2C1_IPCLKPORT_I_PCLK,
+ GAT_PERIC_I2C2_IPCLKPORT_I_PCLK,
+ GAT_PERIC_I2C3_IPCLKPORT_I_PCLK,
+ GAT_PERIC_I2C4_IPCLKPORT_I_PCLK,
+ GAT_PERIC_I2C5_IPCLKPORT_I_PCLK,
+ GAT_PERIC_I2C6_IPCLKPORT_I_PCLK,
+ GAT_PERIC_I2C7_IPCLKPORT_I_PCLK,
+ GAT_PERIC_MCAN0_IPCLKPORT_CCLK,
+ GAT_PERIC_MCAN0_IPCLKPORT_PCLK,
+ GAT_PERIC_MCAN1_IPCLKPORT_CCLK,
+ GAT_PERIC_MCAN1_IPCLKPORT_PCLK,
+ GAT_PERIC_MCAN2_IPCLKPORT_CCLK,
+ GAT_PERIC_MCAN2_IPCLKPORT_PCLK,
+ GAT_PERIC_MCAN3_IPCLKPORT_CCLK,
+ GAT_PERIC_MCAN3_IPCLKPORT_PCLK,
+ GAT_PERIC_PWM0_IPCLKPORT_I_PCLK_S0,
+ GAT_PERIC_PWM1_IPCLKPORT_I_PCLK_S0,
+ GAT_PERIC_SMMU_IPCLKPORT_CCLK,
+ GAT_PERIC_SMMU_IPCLKPORT_PERIC_BCLK,
+ GAT_PERIC_SPI0_IPCLKPORT_I_PCLK,
+ GAT_PERIC_SPI0_IPCLKPORT_I_SCLK_SPI,
+ GAT_PERIC_SPI1_IPCLKPORT_I_PCLK,
+ GAT_PERIC_SPI1_IPCLKPORT_I_SCLK_SPI,
+ GAT_PERIC_SPI2_IPCLKPORT_I_PCLK,
+ GAT_PERIC_SPI2_IPCLKPORT_I_SCLK_SPI,
+ GAT_PERIC_TDM0_IPCLKPORT_HCLK_M,
+ GAT_PERIC_TDM0_IPCLKPORT_PCLK,
+ GAT_PERIC_TDM1_IPCLKPORT_HCLK_M,
+ GAT_PERIC_TDM1_IPCLKPORT_PCLK,
+ GAT_PERIC_UART0_IPCLKPORT_I_SCLK_UART,
+ GAT_PERIC_UART0_IPCLKPORT_PCLK,
+ GAT_PERIC_UART1_IPCLKPORT_I_SCLK_UART,
+ GAT_PERIC_UART1_IPCLKPORT_PCLK,
+ GAT_SYSREG_PERI_IPCLKPORT_PCLK,
+};
+
+static const struct samsung_fixed_rate_clock peric_fixed_clks[] __initconst = {
+ FRATE(PERIC_EQOS_PHYRXCLK, "eqos_phyrxclk", NULL, 0, 125000000),
+};
+
+/* List of parent clocks for Muxes in CMU_PERIC */
+PNAME(mout_peric_dmaclk_p) = { "fin_pll", "cmu_peric_shared1div4_dmaclk_gate" };
+PNAME(mout_peric_eqos_busclk_p) = { "fin_pll", "dout_cmu_pll_shared0_div4" };
+PNAME(mout_peric_pclk_p) = { "fin_pll", "dout_cmu_peric_shared1div36" };
+PNAME(mout_peric_tbuclk_p) = { "fin_pll", "dout_cmu_peric_shared0div3_tbuclk" };
+PNAME(mout_peric_spi_clk_p) = { "fin_pll", "dout_cmu_peric_shared0div20" };
+PNAME(mout_peric_spi_pclk_p) = { "fin_pll", "dout_cmu_peric_shared1div36" };
+PNAME(mout_peric_uart_clk_p) = { "fin_pll", "dout_cmu_peric_shared1div4_dmaclk" };
+PNAME(mout_peric_uart_pclk_p) = { "fin_pll", "dout_cmu_peric_shared1div36" };
+PNAME(mout_peric_eqos_phyrxclk_p) = { "dout_peric_rgmii_clk", "eqos_phyrxclk" };
+
+static const struct samsung_mux_clock peric_mux_clks[] __initconst = {
+ MUX(0, "mout_peric_dmaclk", mout_peric_dmaclk_p, PLL_CON0_PERIC_DMACLK_MUX, 4, 1),
+ MUX(0, "mout_peric_eqos_busclk", mout_peric_eqos_busclk_p,
+ PLL_CON0_PERIC_EQOS_BUSCLK_MUX, 4, 1),
+ MUX(0, "mout_peric_pclk", mout_peric_pclk_p, PLL_CON0_PERIC_PCLK_MUX, 4, 1),
+ MUX(0, "mout_peric_tbuclk", mout_peric_tbuclk_p, PLL_CON0_PERIC_TBUCLK_MUX, 4, 1),
+ MUX(0, "mout_peric_spi_clk", mout_peric_spi_clk_p, PLL_CON0_SPI_CLK, 4, 1),
+ MUX(0, "mout_peric_spi_pclk", mout_peric_spi_pclk_p, PLL_CON0_SPI_PCLK, 4, 1),
+ MUX(0, "mout_peric_uart_clk", mout_peric_uart_clk_p, PLL_CON0_UART_CLK, 4, 1),
+ MUX(0, "mout_peric_uart_pclk", mout_peric_uart_pclk_p, PLL_CON0_UART_PCLK, 4, 1),
+ MUX(PERIC_EQOS_PHYRXCLK_MUX, "mout_peric_eqos_phyrxclk", mout_peric_eqos_phyrxclk_p,
+ MUX_PERIC_EQOS_PHYRXCLK, 0, 1),
+};
+
+static const struct samsung_div_clock peric_div_clks[] __initconst = {
+ DIV(0, "dout_peric_eqos_busclk", "mout_peric_eqos_busclk", DIV_EQOS_BUSCLK, 0, 4),
+ DIV(0, "dout_peric_mcan_clk", "mout_peric_dmaclk", DIV_PERIC_MCAN_CLK, 0, 4),
+ DIV(PERIC_DOUT_RGMII_CLK, "dout_peric_rgmii_clk", "mout_peric_eqos_busclk",
+ DIV_RGMII_CLK, 0, 4),
+ DIV(0, "dout_peric_rii_clk", "dout_peric_rmii_clk", DIV_RII_CLK, 0, 4),
+ DIV(0, "dout_peric_rmii_clk", "dout_peric_rgmii_clk", DIV_RMII_CLK, 0, 4),
+ DIV(0, "dout_peric_spi_clk", "mout_peric_spi_clk", DIV_SPI_CLK, 0, 6),
+ DIV(0, "dout_peric_uart_clk", "mout_peric_uart_clk", DIV_UART_CLK, 0, 6),
+};
+
+static const struct samsung_gate_clock peric_gate_clks[] __initconst = {
+ GATE(PERIC_EQOS_TOP_IPCLKPORT_CLK_PTP_REF_I, "peric_eqos_top_ipclkport_clk_ptp_ref_i",
+ "fin_pll", GAT_EQOS_TOP_IPCLKPORT_CLK_PTP_REF_I, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_gpio_peric_ipclkport_oscclk", "fin_pll", GAT_GPIO_PERIC_IPCLKPORT_OSCCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PCLK_ADCIF, "peric_adc0_ipclkport_i_oscclk", "fin_pll",
+ GAT_PERIC_ADC0_IPCLKPORT_I_OSCCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_cmu_peric_ipclkport_pclk", "mout_peric_pclk",
+ GAT_PERIC_CMU_PERIC_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_pwm0_ipclkport_i_oscclk", "fin_pll", GAT_PERIC_PWM0_IPCLKPORT_I_OSCCLK, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_pwm1_ipclkport_i_oscclk", "fin_pll", GAT_PERIC_PWM1_IPCLKPORT_I_OSCCLK, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_async_apb_dma0_ipclkport_pclkm", "mout_peric_dmaclk",
+ GAT_ASYNC_APB_DMA0_IPCLKPORT_PCLKM, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_async_apb_dma0_ipclkport_pclks", "mout_peric_pclk",
+ GAT_ASYNC_APB_DMA0_IPCLKPORT_PCLKS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_async_apb_dma1_ipclkport_pclkm", "mout_peric_dmaclk",
+ GAT_ASYNC_APB_DMA1_IPCLKPORT_PCLKM, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_async_apb_dma1_ipclkport_pclks", "mout_peric_pclk",
+ GAT_ASYNC_APB_DMA1_IPCLKPORT_PCLKS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_axi2apb_peric0_ipclkport_aclk", "mout_peric_pclk",
+ GAT_AXI2APB_PERIC0_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_axi2apb_peric1_ipclkport_aclk", "mout_peric_pclk",
+ GAT_AXI2APB_PERIC1_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_axi2apb_peric2_ipclkport_aclk", "mout_peric_pclk",
+ GAT_AXI2APB_PERIC2_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_bus_d_peric_ipclkport_dmaclk", "mout_peric_dmaclk",
+ GAT_BUS_D_PERIC_IPCLKPORT_DMACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_BUS_D_PERIC_IPCLKPORT_EQOSCLK, "peric_bus_d_peric_ipclkport_eqosclk",
+ "dout_peric_eqos_busclk", GAT_BUS_D_PERIC_IPCLKPORT_EQOSCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_bus_d_peric_ipclkport_mainclk", "mout_peric_tbuclk",
+ GAT_BUS_D_PERIC_IPCLKPORT_MAINCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_BUS_P_PERIC_IPCLKPORT_EQOSCLK, "peric_bus_p_peric_ipclkport_eqosclk",
+ "dout_peric_eqos_busclk", GAT_BUS_P_PERIC_IPCLKPORT_EQOSCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_bus_p_peric_ipclkport_mainclk", "mout_peric_pclk",
+ GAT_BUS_P_PERIC_IPCLKPORT_MAINCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_bus_p_peric_ipclkport_smmuclk", "mout_peric_tbuclk",
+ GAT_BUS_P_PERIC_IPCLKPORT_SMMUCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_EQOS_TOP_IPCLKPORT_ACLK_I, "peric_eqos_top_ipclkport_aclk_i",
+ "dout_peric_eqos_busclk", GAT_EQOS_TOP_IPCLKPORT_ACLK_I, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_EQOS_TOP_IPCLKPORT_CLK_RX_I, "peric_eqos_top_ipclkport_clk_rx_i",
+ "mout_peric_eqos_phyrxclk", GAT_EQOS_TOP_IPCLKPORT_CLK_RX_I, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_EQOS_TOP_IPCLKPORT_HCLK_I, "peric_eqos_top_ipclkport_hclk_i",
+ "dout_peric_eqos_busclk", GAT_EQOS_TOP_IPCLKPORT_HCLK_I, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_EQOS_TOP_IPCLKPORT_RGMII_CLK_I, "peric_eqos_top_ipclkport_rgmii_clk_i",
+ "dout_peric_rgmii_clk", GAT_EQOS_TOP_IPCLKPORT_RGMII_CLK_I, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_eqos_top_ipclkport_rii_clk_i", "dout_peric_rii_clk",
+ GAT_EQOS_TOP_IPCLKPORT_RII_CLK_I, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_eqos_top_ipclkport_rmii_clk_i", "dout_peric_rmii_clk",
+ GAT_EQOS_TOP_IPCLKPORT_RMII_CLK_I, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_gpio_peric_ipclkport_pclk", "mout_peric_pclk",
+ GAT_GPIO_PERIC_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_ns_brdg_peric_ipclkport_clk__psoc_peric__clk_peric_d", "mout_peric_tbuclk",
+ GAT_NS_BRDG_PERIC_IPCLKPORT_CLK__PSOC_PERIC__CLK_PERIC_D, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_ns_brdg_peric_ipclkport_clk__psoc_peric__clk_peric_p", "mout_peric_pclk",
+ GAT_NS_BRDG_PERIC_IPCLKPORT_CLK__PSOC_PERIC__CLK_PERIC_P, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_adc0_ipclkport_pclk_s0", "mout_peric_pclk",
+ GAT_PERIC_ADC0_IPCLKPORT_PCLK_S0, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_DMA0_IPCLKPORT_ACLK, "peric_dma0_ipclkport_aclk", "mout_peric_dmaclk",
+ GAT_PERIC_DMA0_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_DMA1_IPCLKPORT_ACLK, "peric_dma1_ipclkport_aclk", "mout_peric_dmaclk",
+ GAT_PERIC_DMA1_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PCLK_HSI2C0, "peric_i2c0_ipclkport_i_pclk", "mout_peric_pclk",
+ GAT_PERIC_I2C0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PCLK_HSI2C1, "peric_i2c1_ipclkport_i_pclk", "mout_peric_pclk",
+ GAT_PERIC_I2C1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PCLK_HSI2C2, "peric_i2c2_ipclkport_i_pclk", "mout_peric_pclk",
+ GAT_PERIC_I2C2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PCLK_HSI2C3, "peric_i2c3_ipclkport_i_pclk", "mout_peric_pclk",
+ GAT_PERIC_I2C3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PCLK_HSI2C4, "peric_i2c4_ipclkport_i_pclk", "mout_peric_pclk",
+ GAT_PERIC_I2C4_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PCLK_HSI2C5, "peric_i2c5_ipclkport_i_pclk", "mout_peric_pclk",
+ GAT_PERIC_I2C5_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PCLK_HSI2C6, "peric_i2c6_ipclkport_i_pclk", "mout_peric_pclk",
+ GAT_PERIC_I2C6_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PCLK_HSI2C7, "peric_i2c7_ipclkport_i_pclk", "mout_peric_pclk",
+ GAT_PERIC_I2C7_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_MCAN0_IPCLKPORT_CCLK, "peric_mcan0_ipclkport_cclk", "dout_peric_mcan_clk",
+ GAT_PERIC_MCAN0_IPCLKPORT_CCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_MCAN0_IPCLKPORT_PCLK, "peric_mcan0_ipclkport_pclk", "mout_peric_pclk",
+ GAT_PERIC_MCAN0_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_MCAN1_IPCLKPORT_CCLK, "peric_mcan1_ipclkport_cclk", "dout_peric_mcan_clk",
+ GAT_PERIC_MCAN1_IPCLKPORT_CCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_MCAN1_IPCLKPORT_PCLK, "peric_mcan1_ipclkport_pclk", "mout_peric_pclk",
+ GAT_PERIC_MCAN1_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_MCAN2_IPCLKPORT_CCLK, "peric_mcan2_ipclkport_cclk", "dout_peric_mcan_clk",
+ GAT_PERIC_MCAN2_IPCLKPORT_CCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_MCAN2_IPCLKPORT_PCLK, "peric_mcan2_ipclkport_pclk", "mout_peric_pclk",
+ GAT_PERIC_MCAN2_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_MCAN3_IPCLKPORT_CCLK, "peric_mcan3_ipclkport_cclk", "dout_peric_mcan_clk",
+ GAT_PERIC_MCAN3_IPCLKPORT_CCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_MCAN3_IPCLKPORT_PCLK, "peric_mcan3_ipclkport_pclk", "mout_peric_pclk",
+ GAT_PERIC_MCAN3_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PWM0_IPCLKPORT_I_PCLK_S0, "peric_pwm0_ipclkport_i_pclk_s0", "mout_peric_pclk",
+ GAT_PERIC_PWM0_IPCLKPORT_I_PCLK_S0, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PWM1_IPCLKPORT_I_PCLK_S0, "peric_pwm1_ipclkport_i_pclk_s0", "mout_peric_pclk",
+ GAT_PERIC_PWM1_IPCLKPORT_I_PCLK_S0, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_smmu_ipclkport_cclk", "mout_peric_tbuclk",
+ GAT_PERIC_SMMU_IPCLKPORT_CCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_smmu_ipclkport_peric_bclk", "mout_peric_tbuclk",
+ GAT_PERIC_SMMU_IPCLKPORT_PERIC_BCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PCLK_SPI0, "peric_spi0_ipclkport_i_pclk", "mout_peric_spi_pclk",
+ GAT_PERIC_SPI0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_SCLK_SPI0, "peric_spi0_ipclkport_i_sclk_spi", "dout_peric_spi_clk",
+ GAT_PERIC_SPI0_IPCLKPORT_I_SCLK_SPI, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PCLK_SPI1, "peric_spi1_ipclkport_i_pclk", "mout_peric_spi_pclk",
+ GAT_PERIC_SPI1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_SCLK_SPI1, "peric_spi1_ipclkport_i_sclk_spi", "dout_peric_spi_clk",
+ GAT_PERIC_SPI1_IPCLKPORT_I_SCLK_SPI, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PCLK_SPI2, "peric_spi2_ipclkport_i_pclk", "mout_peric_spi_pclk",
+ GAT_PERIC_SPI2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_SCLK_SPI2, "peric_spi2_ipclkport_i_sclk_spi", "dout_peric_spi_clk",
+ GAT_PERIC_SPI2_IPCLKPORT_I_SCLK_SPI, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_HCLK_TDM0, "peric_tdm0_ipclkport_hclk_m", "mout_peric_pclk",
+ GAT_PERIC_TDM0_IPCLKPORT_HCLK_M, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PCLK_TDM0, "peric_tdm0_ipclkport_pclk", "mout_peric_pclk",
+ GAT_PERIC_TDM0_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_HCLK_TDM1, "peric_tdm1_ipclkport_hclk_m", "mout_peric_pclk",
+ GAT_PERIC_TDM1_IPCLKPORT_HCLK_M, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PCLK_TDM1, "peric_tdm1_ipclkport_pclk", "mout_peric_pclk",
+ GAT_PERIC_TDM1_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_SCLK_UART0, "peric_uart0_ipclkport_i_sclk_uart", "dout_peric_uart_clk",
+ GAT_PERIC_UART0_IPCLKPORT_I_SCLK_UART, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PCLK_UART0, "peric_uart0_ipclkport_pclk", "mout_peric_uart_pclk",
+ GAT_PERIC_UART0_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_SCLK_UART1, "peric_uart1_ipclkport_i_sclk_uart", "dout_peric_uart_clk",
+ GAT_PERIC_UART1_IPCLKPORT_I_SCLK_UART, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PERIC_PCLK_UART1, "peric_uart1_ipclkport_pclk", "mout_peric_uart_pclk",
+ GAT_PERIC_UART1_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "peric_sysreg_peri_ipclkport_pclk", "mout_peric_pclk",
+ GAT_SYSREG_PERI_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info peric_cmu_info __initconst = {
+ .mux_clks = peric_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric_mux_clks),
+ .div_clks = peric_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric_div_clks),
+ .gate_clks = peric_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric_gate_clks),
+ .fixed_clks = peric_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(peric_fixed_clks),
+ .nr_clk_ids = PERIC_NR_CLK,
+ .clk_regs = peric_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric_clk_regs),
+ .clk_name = "dout_cmu_pll_shared0_div4",
+};
+
+/* Register Offset definitions for CMU_FSYS0 (0x15010000) */
+#define PLL_CON0_CLKCMU_FSYS0_UNIPRO 0x100
+#define PLL_CON0_CLK_FSYS0_SLAVEBUSCLK 0x140
+#define PLL_CON0_EQOS_RGMII_125_MUX1 0x160
+#define DIV_CLK_UNIPRO 0x1800
+#define DIV_EQS_RGMII_CLK_125 0x1804
+#define DIV_PERIBUS_GRP 0x1808
+#define DIV_EQOS_RII_CLK2O5 0x180c
+#define DIV_EQOS_RMIICLK_25 0x1810
+#define DIV_PCIE_PHY_OSCCLK 0x1814
+#define GAT_FSYS0_EQOS_TOP0_IPCLKPORT_CLK_PTP_REF_I 0x2004
+#define GAT_FSYS0_EQOS_TOP0_IPCLKPORT_CLK_RX_I 0x2008
+#define GAT_FSYS0_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK 0x200c
+#define GAT_FSYS0_GPIO_FSYS0_IPCLKPORT_OSCCLK 0x2010
+#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_PLL_REFCLK_FROM_XO 0x2014
+#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_PIPE_PAL_INST_0_I_IMMORTAL_CLK 0x2018
+#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_AUX_CLK_SOC 0x201c
+#define GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_MPHY_REFCLK_IXTAL24 0x2020
+#define GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_MPHY_REFCLK_IXTAL26 0x2024
+#define GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_MPHY_REFCLK_IXTAL24 0x2028
+#define GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_MPHY_REFCLK_IXTAL26 0x202c
+#define GAT_FSYS0_AHBBR_FSYS0_IPCLKPORT_HCLK 0x2038
+#define GAT_FSYS0_AXI2APB_FSYS0_IPCLKPORT_ACLK 0x203c
+#define GAT_FSYS0_BUS_D_FSYS0_IPCLKPORT_MAINCLK 0x2040
+#define GAT_FSYS0_BUS_D_FSYS0_IPCLKPORT_PERICLK 0x2044
+#define GAT_FSYS0_BUS_P_FSYS0_IPCLKPORT_MAINCLK 0x2048
+#define GAT_FSYS0_BUS_P_FSYS0_IPCLKPORT_TCUCLK 0x204c
+#define GAT_FSYS0_CPE425_IPCLKPORT_ACLK 0x2050
+#define GAT_FSYS0_EQOS_TOP0_IPCLKPORT_ACLK_I 0x2054
+#define GAT_FSYS0_EQOS_TOP0_IPCLKPORT_HCLK_I 0x2058
+#define GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RGMII_CLK_I 0x205c
+#define GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RII_CLK_I 0x2060
+#define GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RMII_CLK_I 0x2064
+#define GAT_FSYS0_GPIO_FSYS0_IPCLKPORT_PCLK 0x2068
+#define GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_D 0x206c
+#define GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_D1 0x2070
+#define GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_P 0x2074
+#define GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_S 0x2078
+#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_I_APB_PCLK 0x207c
+#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_PLL_REFCLK_FROM_SYSPLL 0x2080
+#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_PIPE_PAL_INST_0_I_APB_PCLK_0 0x2084
+#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_DBI_ACLK_SOC 0x2088
+#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK 0x208c
+#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_MSTR_ACLK_SOC 0x2090
+#define GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_SLV_ACLK_SOC 0x2094
+#define GAT_FSYS0_SMMU_FSYS0_IPCLKPORT_CCLK 0x2098
+#define GAT_FSYS0_SMMU_FSYS0_IPCLKPORT_FSYS0_BCLK 0x209c
+#define GAT_FSYS0_SYSREG_FSYS0_IPCLKPORT_PCLK 0x20a0
+#define GAT_FSYS0_UFS_TOP0_IPCLKPORT_HCLK_BUS 0x20a4
+#define GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_ACLK 0x20a8
+#define GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_CLK_UNIPRO 0x20ac
+#define GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_FMP_CLK 0x20b0
+#define GAT_FSYS0_UFS_TOP1_IPCLKPORT_HCLK_BUS 0x20b4
+#define GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_ACLK 0x20b8
+#define GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_CLK_UNIPRO 0x20bc
+#define GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_FMP_CLK 0x20c0
+#define GAT_FSYS0_RII_CLK_DIVGATE 0x20d4
+
+static const unsigned long fsys0_clk_regs[] __initconst = {
+ PLL_CON0_CLKCMU_FSYS0_UNIPRO,
+ PLL_CON0_CLK_FSYS0_SLAVEBUSCLK,
+ PLL_CON0_EQOS_RGMII_125_MUX1,
+ DIV_CLK_UNIPRO,
+ DIV_EQS_RGMII_CLK_125,
+ DIV_PERIBUS_GRP,
+ DIV_EQOS_RII_CLK2O5,
+ DIV_EQOS_RMIICLK_25,
+ DIV_PCIE_PHY_OSCCLK,
+ GAT_FSYS0_EQOS_TOP0_IPCLKPORT_CLK_PTP_REF_I,
+ GAT_FSYS0_EQOS_TOP0_IPCLKPORT_CLK_RX_I,
+ GAT_FSYS0_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK,
+ GAT_FSYS0_GPIO_FSYS0_IPCLKPORT_OSCCLK,
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_PLL_REFCLK_FROM_XO,
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_PIPE_PAL_INST_0_I_IMMORTAL_CLK,
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_AUX_CLK_SOC,
+ GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_MPHY_REFCLK_IXTAL24,
+ GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_MPHY_REFCLK_IXTAL26,
+ GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_MPHY_REFCLK_IXTAL24,
+ GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_MPHY_REFCLK_IXTAL26,
+ GAT_FSYS0_AHBBR_FSYS0_IPCLKPORT_HCLK,
+ GAT_FSYS0_AXI2APB_FSYS0_IPCLKPORT_ACLK,
+ GAT_FSYS0_BUS_D_FSYS0_IPCLKPORT_MAINCLK,
+ GAT_FSYS0_BUS_D_FSYS0_IPCLKPORT_PERICLK,
+ GAT_FSYS0_BUS_P_FSYS0_IPCLKPORT_MAINCLK,
+ GAT_FSYS0_BUS_P_FSYS0_IPCLKPORT_TCUCLK,
+ GAT_FSYS0_CPE425_IPCLKPORT_ACLK,
+ GAT_FSYS0_EQOS_TOP0_IPCLKPORT_ACLK_I,
+ GAT_FSYS0_EQOS_TOP0_IPCLKPORT_HCLK_I,
+ GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RGMII_CLK_I,
+ GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RII_CLK_I,
+ GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RMII_CLK_I,
+ GAT_FSYS0_GPIO_FSYS0_IPCLKPORT_PCLK,
+ GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_D,
+ GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_D1,
+ GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_P,
+ GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_S,
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_I_APB_PCLK,
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_PLL_REFCLK_FROM_SYSPLL,
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_PIPE_PAL_INST_0_I_APB_PCLK_0,
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_DBI_ACLK_SOC,
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK,
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_MSTR_ACLK_SOC,
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_SLV_ACLK_SOC,
+ GAT_FSYS0_SMMU_FSYS0_IPCLKPORT_CCLK,
+ GAT_FSYS0_SMMU_FSYS0_IPCLKPORT_FSYS0_BCLK,
+ GAT_FSYS0_SYSREG_FSYS0_IPCLKPORT_PCLK,
+ GAT_FSYS0_UFS_TOP0_IPCLKPORT_HCLK_BUS,
+ GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_ACLK,
+ GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_CLK_UNIPRO,
+ GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_FMP_CLK,
+ GAT_FSYS0_UFS_TOP1_IPCLKPORT_HCLK_BUS,
+ GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_ACLK,
+ GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_CLK_UNIPRO,
+ GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_FMP_CLK,
+ GAT_FSYS0_RII_CLK_DIVGATE,
+};
+
+static const struct samsung_fixed_rate_clock fsys0_fixed_clks[] __initconst = {
+ FRATE(0, "pad_eqos0_phyrxclk", NULL, 0, 125000000),
+ FRATE(0, "i_mphy_refclk_ixtal26", NULL, 0, 26000000),
+ FRATE(0, "xtal_clk_pcie_phy", NULL, 0, 100000000),
+};
+
+/* List of parent clocks for Muxes in CMU_FSYS0 */
+PNAME(mout_fsys0_clkcmu_fsys0_unipro_p) = { "fin_pll", "dout_cmu_pll_shared0_div6" };
+PNAME(mout_fsys0_clk_fsys0_slavebusclk_p) = { "fin_pll", "dout_cmu_fsys0_shared1div4" };
+PNAME(mout_fsys0_eqos_rgmii_125_mux1_p) = { "fin_pll", "dout_cmu_fsys0_shared0div4" };
+
+static const struct samsung_mux_clock fsys0_mux_clks[] __initconst = {
+ MUX(0, "mout_fsys0_clkcmu_fsys0_unipro", mout_fsys0_clkcmu_fsys0_unipro_p,
+ PLL_CON0_CLKCMU_FSYS0_UNIPRO, 4, 1),
+ MUX(0, "mout_fsys0_clk_fsys0_slavebusclk", mout_fsys0_clk_fsys0_slavebusclk_p,
+ PLL_CON0_CLK_FSYS0_SLAVEBUSCLK, 4, 1),
+ MUX(0, "mout_fsys0_eqos_rgmii_125_mux1", mout_fsys0_eqos_rgmii_125_mux1_p,
+ PLL_CON0_EQOS_RGMII_125_MUX1, 4, 1),
+};
+
+static const struct samsung_div_clock fsys0_div_clks[] __initconst = {
+ DIV(0, "dout_fsys0_clk_unipro", "mout_fsys0_clkcmu_fsys0_unipro", DIV_CLK_UNIPRO, 0, 4),
+ DIV(0, "dout_fsys0_eqs_rgmii_clk_125", "mout_fsys0_eqos_rgmii_125_mux1",
+ DIV_EQS_RGMII_CLK_125, 0, 4),
+ DIV(FSYS0_DOUT_FSYS0_PERIBUS_GRP, "dout_fsys0_peribus_grp",
+ "mout_fsys0_clk_fsys0_slavebusclk", DIV_PERIBUS_GRP, 0, 4),
+ DIV(0, "dout_fsys0_eqos_rii_clk2o5", "fsys0_rii_clk_divgate", DIV_EQOS_RII_CLK2O5, 0, 4),
+ DIV(0, "dout_fsys0_eqos_rmiiclk_25", "mout_fsys0_eqos_rgmii_125_mux1",
+ DIV_EQOS_RMIICLK_25, 0, 5),
+ DIV(0, "dout_fsys0_pcie_phy_oscclk", "mout_fsys0_eqos_rgmii_125_mux1",
+ DIV_PCIE_PHY_OSCCLK, 0, 4),
+};
+
+static const struct samsung_gate_clock fsys0_gate_clks[] __initconst = {
+ GATE(FSYS0_EQOS_TOP0_IPCLKPORT_CLK_RX_I, "fsys0_eqos_top0_ipclkport_clk_rx_i",
+ "pad_eqos0_phyrxclk", GAT_FSYS0_EQOS_TOP0_IPCLKPORT_CLK_RX_I, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(PCIE_SUBCTRL_INST0_AUX_CLK_SOC,
+ "fsys0_pcie_top_ipclkport_fsd_pcie_sub_ctrl_inst_0_aux_clk_soc", "fin_pll",
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_AUX_CLK_SOC, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_fsys0_cmu_fsys0_ipclkport_pclk", "dout_fsys0_peribus_grp",
+ GAT_FSYS0_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0,
+ "fsys0_pcie_top_ipclkport_pcieg3_phy_x4_inst_0_pll_refclk_from_xo",
+ "xtal_clk_pcie_phy",
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_PLL_REFCLK_FROM_XO, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(UFS0_MPHY_REFCLK_IXTAL24, "fsys0_ufs_top0_ipclkport_i_mphy_refclk_ixtal24",
+ "i_mphy_refclk_ixtal26", GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_MPHY_REFCLK_IXTAL24, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(UFS0_MPHY_REFCLK_IXTAL26, "fsys0_ufs_top0_ipclkport_i_mphy_refclk_ixtal26",
+ "i_mphy_refclk_ixtal26", GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_MPHY_REFCLK_IXTAL26, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(UFS1_MPHY_REFCLK_IXTAL24, "fsys0_ufs_top1_ipclkport_i_mphy_refclk_ixtal24",
+ "i_mphy_refclk_ixtal26", GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_MPHY_REFCLK_IXTAL24, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(UFS1_MPHY_REFCLK_IXTAL26, "fsys0_ufs_top1_ipclkport_i_mphy_refclk_ixtal26",
+ "i_mphy_refclk_ixtal26", GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_MPHY_REFCLK_IXTAL26, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_ahbbr_fsys0_ipclkport_hclk", "dout_fsys0_peribus_grp",
+ GAT_FSYS0_AHBBR_FSYS0_IPCLKPORT_HCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_axi2apb_fsys0_ipclkport_aclk", "dout_fsys0_peribus_grp",
+ GAT_FSYS0_AXI2APB_FSYS0_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_bus_d_fsys0_ipclkport_mainclk", "mout_fsys0_clk_fsys0_slavebusclk",
+ GAT_FSYS0_BUS_D_FSYS0_IPCLKPORT_MAINCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_bus_d_fsys0_ipclkport_periclk", "dout_fsys0_peribus_grp",
+ GAT_FSYS0_BUS_D_FSYS0_IPCLKPORT_PERICLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_bus_p_fsys0_ipclkport_mainclk", "dout_fsys0_peribus_grp",
+ GAT_FSYS0_BUS_P_FSYS0_IPCLKPORT_MAINCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_bus_p_fsys0_ipclkport_tcuclk", "mout_fsys0_eqos_rgmii_125_mux1",
+ GAT_FSYS0_BUS_P_FSYS0_IPCLKPORT_TCUCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_cpe425_ipclkport_aclk", "mout_fsys0_clk_fsys0_slavebusclk",
+ GAT_FSYS0_CPE425_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(FSYS0_EQOS_TOP0_IPCLKPORT_ACLK_I, "fsys0_eqos_top0_ipclkport_aclk_i",
+ "dout_fsys0_peribus_grp", GAT_FSYS0_EQOS_TOP0_IPCLKPORT_ACLK_I, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(FSYS0_EQOS_TOP0_IPCLKPORT_HCLK_I, "fsys0_eqos_top0_ipclkport_hclk_i",
+ "dout_fsys0_peribus_grp", GAT_FSYS0_EQOS_TOP0_IPCLKPORT_HCLK_I, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(FSYS0_EQOS_TOP0_IPCLKPORT_RGMII_CLK_I, "fsys0_eqos_top0_ipclkport_rgmii_clk_i",
+ "dout_fsys0_eqs_rgmii_clk_125", GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RGMII_CLK_I, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_eqos_top0_ipclkport_rii_clk_i", "dout_fsys0_eqos_rii_clk2o5",
+ GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RII_CLK_I, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_eqos_top0_ipclkport_rmii_clk_i", "dout_fsys0_eqos_rmiiclk_25",
+ GAT_FSYS0_EQOS_TOP0_IPCLKPORT_RMII_CLK_I, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_gpio_fsys0_ipclkport_pclk", "dout_fsys0_peribus_grp",
+ GAT_FSYS0_GPIO_FSYS0_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_gpio_fsys0_ipclkport_oscclk", "fin_pll",
+ GAT_FSYS0_GPIO_FSYS0_IPCLKPORT_OSCCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_ns_brdg_fsys0_ipclkport_clk__psoc_fsys0__clk_fsys0_d",
+ "mout_fsys0_clk_fsys0_slavebusclk",
+ GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_D, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_ns_brdg_fsys0_ipclkport_clk__psoc_fsys0__clk_fsys0_d1",
+ "mout_fsys0_eqos_rgmii_125_mux1",
+ GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_D1, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_ns_brdg_fsys0_ipclkport_clk__psoc_fsys0__clk_fsys0_p",
+ "dout_fsys0_peribus_grp",
+ GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_P, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_ns_brdg_fsys0_ipclkport_clk__psoc_fsys0__clk_fsys0_s",
+ "mout_fsys0_clk_fsys0_slavebusclk",
+ GAT_FSYS0_NS_BRDG_FSYS0_IPCLKPORT_CLK__PSOC_FSYS0__CLK_FSYS0_S, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_pcie_top_ipclkport_pcieg3_phy_x4_inst_0_i_apb_pclk",
+ "dout_fsys0_peribus_grp",
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_I_APB_PCLK, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0,
+ "fsys0_pcie_top_ipclkport_pcieg3_phy_x4_inst_0_pll_refclk_from_syspll",
+ "dout_fsys0_pcie_phy_oscclk",
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_PCIEG3_PHY_X4_INST_0_PLL_REFCLK_FROM_SYSPLL,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_pcie_top_ipclkport_pipe_pal_inst_0_i_apb_pclk_0", "dout_fsys0_peribus_grp",
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_PIPE_PAL_INST_0_I_APB_PCLK_0, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_pcie_top_ipclkport_pipe_pal_inst_0_i_immortal_clk", "fin_pll",
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_PIPE_PAL_INST_0_I_IMMORTAL_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PCIE_SUBCTRL_INST0_DBI_ACLK_SOC,
+ "fsys0_pcie_top_ipclkport_fsd_pcie_sub_ctrl_inst_0_dbi_aclk_soc",
+ "dout_fsys0_peribus_grp",
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_DBI_ACLK_SOC, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_pcie_top_ipclkport_fsd_pcie_sub_ctrl_inst_0_i_driver_apb_clk",
+ "dout_fsys0_peribus_grp",
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(PCIE_SUBCTRL_INST0_MSTR_ACLK_SOC,
+ "fsys0_pcie_top_ipclkport_fsd_pcie_sub_ctrl_inst_0_mstr_aclk_soc",
+ "mout_fsys0_clk_fsys0_slavebusclk",
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_MSTR_ACLK_SOC, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(PCIE_SUBCTRL_INST0_SLV_ACLK_SOC,
+ "fsys0_pcie_top_ipclkport_fsd_pcie_sub_ctrl_inst_0_slv_aclk_soc",
+ "mout_fsys0_clk_fsys0_slavebusclk",
+ GAT_FSYS0_PCIE_TOP_IPCLKPORT_FSD_PCIE_SUB_CTRL_INST_0_SLV_ACLK_SOC, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_smmu_fsys0_ipclkport_cclk", "mout_fsys0_eqos_rgmii_125_mux1",
+ GAT_FSYS0_SMMU_FSYS0_IPCLKPORT_CCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_smmu_fsys0_ipclkport_fsys0_bclk", "mout_fsys0_clk_fsys0_slavebusclk",
+ GAT_FSYS0_SMMU_FSYS0_IPCLKPORT_FSYS0_BCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_sysreg_fsys0_ipclkport_pclk", "dout_fsys0_peribus_grp",
+ GAT_FSYS0_SYSREG_FSYS0_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(UFS0_TOP0_HCLK_BUS, "fsys0_ufs_top0_ipclkport_hclk_bus", "dout_fsys0_peribus_grp",
+ GAT_FSYS0_UFS_TOP0_IPCLKPORT_HCLK_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(UFS0_TOP0_ACLK, "fsys0_ufs_top0_ipclkport_i_aclk", "dout_fsys0_peribus_grp",
+ GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(UFS0_TOP0_CLK_UNIPRO, "fsys0_ufs_top0_ipclkport_i_clk_unipro", "dout_fsys0_clk_unipro",
+ GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_CLK_UNIPRO, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(UFS0_TOP0_FMP_CLK, "fsys0_ufs_top0_ipclkport_i_fmp_clk", "dout_fsys0_peribus_grp",
+ GAT_FSYS0_UFS_TOP0_IPCLKPORT_I_FMP_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(UFS1_TOP1_HCLK_BUS, "fsys0_ufs_top1_ipclkport_hclk_bus", "dout_fsys0_peribus_grp",
+ GAT_FSYS0_UFS_TOP1_IPCLKPORT_HCLK_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(UFS1_TOP1_ACLK, "fsys0_ufs_top1_ipclkport_i_aclk", "dout_fsys0_peribus_grp",
+ GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(UFS1_TOP1_CLK_UNIPRO, "fsys0_ufs_top1_ipclkport_i_clk_unipro", "dout_fsys0_clk_unipro",
+ GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_CLK_UNIPRO, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(UFS1_TOP1_FMP_CLK, "fsys0_ufs_top1_ipclkport_i_fmp_clk", "dout_fsys0_peribus_grp",
+ GAT_FSYS0_UFS_TOP1_IPCLKPORT_I_FMP_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys0_rii_clk_divgate", "dout_fsys0_eqos_rmiiclk_25", GAT_FSYS0_RII_CLK_DIVGATE,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(FSYS0_EQOS_TOP0_IPCLKPORT_CLK_PTP_REF_I, "fsys0_eqos_top0_ipclkport_clk_ptp_ref_i",
+ "fin_pll", GAT_FSYS0_EQOS_TOP0_IPCLKPORT_CLK_PTP_REF_I, 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info fsys0_cmu_info __initconst = {
+ .mux_clks = fsys0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(fsys0_mux_clks),
+ .div_clks = fsys0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(fsys0_div_clks),
+ .gate_clks = fsys0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys0_gate_clks),
+ .fixed_clks = fsys0_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(fsys0_fixed_clks),
+ .nr_clk_ids = FSYS0_NR_CLK,
+ .clk_regs = fsys0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys0_clk_regs),
+ .clk_name = "dout_cmu_fsys0_shared1div4",
+};
+
+/* Register Offset definitions for CMU_FSYS1 (0x16810000) */
+#define PLL_CON0_ACLK_FSYS1_BUSP_MUX 0x100
+#define PLL_CON0_PCLKL_FSYS1_BUSP_MUX 0x180
+#define DIV_CLK_FSYS1_PHY0_OSCCLK 0x1800
+#define DIV_CLK_FSYS1_PHY1_OSCCLK 0x1804
+#define GAT_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK 0x2000
+#define GAT_FSYS1_PCIE_LINK0_IPCLKPORT_AUXCLK 0x2004
+#define GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_SOC_REF_CLK 0x2008
+#define GAT_FSYS1_PCIE_LINK1_IPCLKPORT_AUXCLK 0x200c
+#define GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_REF_XTAL 0x202c
+#define GAT_FSYS1_PHY0_OSCCLLK 0x2034
+#define GAT_FSYS1_PHY1_OSCCLK 0x2038
+#define GAT_FSYS1_AXI2APB_FSYS1_IPCLKPORT_ACLK 0x203c
+#define GAT_FSYS1_BUS_D0_FSYS1_IPCLKPORT_MAINCLK 0x2040
+#define GAT_FSYS1_BUS_S0_FSYS1_IPCLKPORT_M250CLK 0x2048
+#define GAT_FSYS1_BUS_S0_FSYS1_IPCLKPORT_MAINCLK 0x204c
+#define GAT_FSYS1_CPE425_0_FSYS1_IPCLKPORT_ACLK 0x2054
+#define GAT_FSYS1_NS_BRDG_FSYS1_IPCLKPORT_CLK__PSOC_FSYS1__CLK_FSYS1_D0 0x205c
+#define GAT_FSYS1_NS_BRDG_FSYS1_IPCLKPORT_CLK__PSOC_FSYS1__CLK_FSYS1_S0 0x2064
+#define GAT_FSYS1_PCIE_LINK0_IPCLKPORT_DBI_ACLK 0x206c
+#define GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_APB_CLK 0x2070
+#define GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_DRIVER_APB_CLK 0x2074
+#define GAT_FSYS1_PCIE_LINK0_IPCLKPORT_MSTR_ACLK 0x2078
+#define GAT_FSYS1_PCIE_LINK0_IPCLKPORT_SLV_ACLK 0x207c
+#define GAT_FSYS1_PCIE_LINK1_IPCLKPORT_DBI_ACLK 0x2080
+#define GAT_FSYS1_PCIE_LINK1_IPCLKPORT_I_DRIVER_APB_CLK 0x2084
+#define GAT_FSYS1_PCIE_LINK1_IPCLKPORT_MSTR_ACLK 0x2088
+#define GAT_FSYS1_PCIE_LINK1_IPCLKPORT_SLV_ACLK 0x208c
+#define GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_APB_CLK 0x20a4
+#define GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_REF_SOC_PLL 0x20a8
+#define GAT_FSYS1_SYSREG_FSYS1_IPCLKPORT_PCLK 0x20b4
+#define GAT_FSYS1_TBU0_FSYS1_IPCLKPORT_ACLK 0x20b8
+
+static const unsigned long fsys1_clk_regs[] __initconst = {
+ PLL_CON0_ACLK_FSYS1_BUSP_MUX,
+ PLL_CON0_PCLKL_FSYS1_BUSP_MUX,
+ DIV_CLK_FSYS1_PHY0_OSCCLK,
+ DIV_CLK_FSYS1_PHY1_OSCCLK,
+ GAT_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK,
+ GAT_FSYS1_PCIE_LINK0_IPCLKPORT_AUXCLK,
+ GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_SOC_REF_CLK,
+ GAT_FSYS1_PCIE_LINK1_IPCLKPORT_AUXCLK,
+ GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_REF_XTAL,
+ GAT_FSYS1_PHY0_OSCCLLK,
+ GAT_FSYS1_PHY1_OSCCLK,
+ GAT_FSYS1_AXI2APB_FSYS1_IPCLKPORT_ACLK,
+ GAT_FSYS1_BUS_D0_FSYS1_IPCLKPORT_MAINCLK,
+ GAT_FSYS1_BUS_S0_FSYS1_IPCLKPORT_M250CLK,
+ GAT_FSYS1_BUS_S0_FSYS1_IPCLKPORT_MAINCLK,
+ GAT_FSYS1_CPE425_0_FSYS1_IPCLKPORT_ACLK,
+ GAT_FSYS1_NS_BRDG_FSYS1_IPCLKPORT_CLK__PSOC_FSYS1__CLK_FSYS1_D0,
+ GAT_FSYS1_NS_BRDG_FSYS1_IPCLKPORT_CLK__PSOC_FSYS1__CLK_FSYS1_S0,
+ GAT_FSYS1_PCIE_LINK0_IPCLKPORT_DBI_ACLK,
+ GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_APB_CLK,
+ GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_DRIVER_APB_CLK,
+ GAT_FSYS1_PCIE_LINK0_IPCLKPORT_MSTR_ACLK,
+ GAT_FSYS1_PCIE_LINK0_IPCLKPORT_SLV_ACLK,
+ GAT_FSYS1_PCIE_LINK1_IPCLKPORT_DBI_ACLK,
+ GAT_FSYS1_PCIE_LINK1_IPCLKPORT_I_DRIVER_APB_CLK,
+ GAT_FSYS1_PCIE_LINK1_IPCLKPORT_MSTR_ACLK,
+ GAT_FSYS1_PCIE_LINK1_IPCLKPORT_SLV_ACLK,
+ GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_APB_CLK,
+ GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_REF_SOC_PLL,
+ GAT_FSYS1_SYSREG_FSYS1_IPCLKPORT_PCLK,
+ GAT_FSYS1_TBU0_FSYS1_IPCLKPORT_ACLK,
+};
+
+static const struct samsung_fixed_rate_clock fsys1_fixed_clks[] __initconst = {
+ FRATE(0, "clk_fsys1_phy0_ref", NULL, 0, 100000000),
+ FRATE(0, "clk_fsys1_phy1_ref", NULL, 0, 100000000),
+};
+
+/* List of parent clocks for Muxes in CMU_FSYS1 */
+PNAME(mout_fsys1_pclkl_fsys1_busp_mux_p) = { "fin_pll", "dout_cmu_fsys1_shared0div8" };
+PNAME(mout_fsys1_aclk_fsys1_busp_mux_p) = { "fin_pll", "dout_cmu_fsys1_shared0div4" };
+
+static const struct samsung_mux_clock fsys1_mux_clks[] __initconst = {
+ MUX(0, "mout_fsys1_pclkl_fsys1_busp_mux", mout_fsys1_pclkl_fsys1_busp_mux_p,
+ PLL_CON0_PCLKL_FSYS1_BUSP_MUX, 4, 1),
+ MUX(0, "mout_fsys1_aclk_fsys1_busp_mux", mout_fsys1_aclk_fsys1_busp_mux_p,
+ PLL_CON0_ACLK_FSYS1_BUSP_MUX, 4, 1),
+};
+
+static const struct samsung_div_clock fsys1_div_clks[] __initconst = {
+ DIV(0, "dout_fsys1_clk_fsys1_phy0_oscclk", "fsys1_phy0_osccllk",
+ DIV_CLK_FSYS1_PHY0_OSCCLK, 0, 4),
+ DIV(0, "dout_fsys1_clk_fsys1_phy1_oscclk", "fsys1_phy1_oscclk",
+ DIV_CLK_FSYS1_PHY1_OSCCLK, 0, 4),
+};
+
+static const struct samsung_gate_clock fsys1_gate_clks[] __initconst = {
+ GATE(0, "fsys1_cmu_fsys1_ipclkport_pclk", "mout_fsys1_pclkl_fsys1_busp_mux",
+ GAT_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_pcie_phy0_ipclkport_i_ref_xtal", "clk_fsys1_phy0_ref",
+ GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_REF_XTAL, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_phy0_osccllk", "mout_fsys1_aclk_fsys1_busp_mux",
+ GAT_FSYS1_PHY0_OSCCLLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_phy1_oscclk", "mout_fsys1_aclk_fsys1_busp_mux",
+ GAT_FSYS1_PHY1_OSCCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_axi2apb_fsys1_ipclkport_aclk", "mout_fsys1_pclkl_fsys1_busp_mux",
+ GAT_FSYS1_AXI2APB_FSYS1_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_bus_d0_fsys1_ipclkport_mainclk", "mout_fsys1_aclk_fsys1_busp_mux",
+ GAT_FSYS1_BUS_D0_FSYS1_IPCLKPORT_MAINCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_bus_s0_fsys1_ipclkport_m250clk", "mout_fsys1_pclkl_fsys1_busp_mux",
+ GAT_FSYS1_BUS_S0_FSYS1_IPCLKPORT_M250CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_bus_s0_fsys1_ipclkport_mainclk", "mout_fsys1_aclk_fsys1_busp_mux",
+ GAT_FSYS1_BUS_S0_FSYS1_IPCLKPORT_MAINCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_cpe425_0_fsys1_ipclkport_aclk", "mout_fsys1_aclk_fsys1_busp_mux",
+ GAT_FSYS1_CPE425_0_FSYS1_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_ns_brdg_fsys1_ipclkport_clk__psoc_fsys1__clk_fsys1_d0",
+ "mout_fsys1_aclk_fsys1_busp_mux",
+ GAT_FSYS1_NS_BRDG_FSYS1_IPCLKPORT_CLK__PSOC_FSYS1__CLK_FSYS1_D0, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_ns_brdg_fsys1_ipclkport_clk__psoc_fsys1__clk_fsys1_s0",
+ "mout_fsys1_aclk_fsys1_busp_mux",
+ GAT_FSYS1_NS_BRDG_FSYS1_IPCLKPORT_CLK__PSOC_FSYS1__CLK_FSYS1_S0, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(PCIE_LINK0_IPCLKPORT_DBI_ACLK, "fsys1_pcie_link0_ipclkport_dbi_aclk",
+ "mout_fsys1_aclk_fsys1_busp_mux", GAT_FSYS1_PCIE_LINK0_IPCLKPORT_DBI_ACLK, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_pcie_link0_ipclkport_i_apb_clk", "mout_fsys1_pclkl_fsys1_busp_mux",
+ GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_APB_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_pcie_link0_ipclkport_i_soc_ref_clk", "fin_pll",
+ GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_SOC_REF_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_pcie_link0_ipclkport_i_driver_apb_clk", "mout_fsys1_pclkl_fsys1_busp_mux",
+ GAT_FSYS1_PCIE_LINK0_IPCLKPORT_I_DRIVER_APB_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PCIE_LINK0_IPCLKPORT_MSTR_ACLK, "fsys1_pcie_link0_ipclkport_mstr_aclk",
+ "mout_fsys1_aclk_fsys1_busp_mux", GAT_FSYS1_PCIE_LINK0_IPCLKPORT_MSTR_ACLK, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(PCIE_LINK0_IPCLKPORT_SLV_ACLK, "fsys1_pcie_link0_ipclkport_slv_aclk",
+ "mout_fsys1_aclk_fsys1_busp_mux", GAT_FSYS1_PCIE_LINK0_IPCLKPORT_SLV_ACLK, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(PCIE_LINK1_IPCLKPORT_DBI_ACLK, "fsys1_pcie_link1_ipclkport_dbi_aclk",
+ "mout_fsys1_aclk_fsys1_busp_mux", GAT_FSYS1_PCIE_LINK1_IPCLKPORT_DBI_ACLK, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_pcie_link1_ipclkport_i_driver_apb_clk", "mout_fsys1_pclkl_fsys1_busp_mux",
+ GAT_FSYS1_PCIE_LINK1_IPCLKPORT_I_DRIVER_APB_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PCIE_LINK1_IPCLKPORT_MSTR_ACLK, "fsys1_pcie_link1_ipclkport_mstr_aclk",
+ "mout_fsys1_aclk_fsys1_busp_mux", GAT_FSYS1_PCIE_LINK1_IPCLKPORT_MSTR_ACLK, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(PCIE_LINK1_IPCLKPORT_SLV_ACLK, "fsys1_pcie_link1_ipclkport_slv_aclk",
+ "mout_fsys1_aclk_fsys1_busp_mux", GAT_FSYS1_PCIE_LINK1_IPCLKPORT_SLV_ACLK, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_pcie_phy0_ipclkport_i_apb_clk", "mout_fsys1_pclkl_fsys1_busp_mux",
+ GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_APB_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PCIE_LINK0_IPCLKPORT_AUX_ACLK, "fsys1_pcie_link0_ipclkport_auxclk", "fin_pll",
+ GAT_FSYS1_PCIE_LINK0_IPCLKPORT_AUXCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(PCIE_LINK1_IPCLKPORT_AUX_ACLK, "fsys1_pcie_link1_ipclkport_auxclk", "fin_pll",
+ GAT_FSYS1_PCIE_LINK1_IPCLKPORT_AUXCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_pcie_phy0_ipclkport_i_ref_soc_pll", "dout_fsys1_clk_fsys1_phy0_oscclk",
+ GAT_FSYS1_PCIE_PHY0_IPCLKPORT_I_REF_SOC_PLL, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_sysreg_fsys1_ipclkport_pclk", "mout_fsys1_pclkl_fsys1_busp_mux",
+ GAT_FSYS1_SYSREG_FSYS1_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "fsys1_tbu0_fsys1_ipclkport_aclk", "mout_fsys1_aclk_fsys1_busp_mux",
+ GAT_FSYS1_TBU0_FSYS1_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info fsys1_cmu_info __initconst = {
+ .mux_clks = fsys1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(fsys1_mux_clks),
+ .div_clks = fsys1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(fsys1_div_clks),
+ .gate_clks = fsys1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys1_gate_clks),
+ .fixed_clks = fsys1_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(fsys1_fixed_clks),
+ .nr_clk_ids = FSYS1_NR_CLK,
+ .clk_regs = fsys1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys1_clk_regs),
+ .clk_name = "dout_cmu_fsys1_shared0div4",
+};
+
+/* Register Offset definitions for CMU_IMEM (0x10010000) */
+#define PLL_CON0_CLK_IMEM_ACLK 0x100
+#define PLL_CON0_CLK_IMEM_INTMEMCLK 0x120
+#define PLL_CON0_CLK_IMEM_TCUCLK 0x140
+#define DIV_OSCCLK_IMEM_TMUTSCLK 0x1800
+#define GAT_IMEM_IMEM_CMU_IMEM_IPCLKPORT_PCLK 0x2000
+#define GAT_IMEM_MCT_IPCLKPORT_OSCCLK__ALO 0x2004
+#define GAT_IMEM_OTP_CON_TOP_IPCLKPORT_I_OSCCLK 0x2008
+#define GAT_IMEM_RSTNSYNC_OSCCLK_IPCLKPORT_CLK 0x200c
+#define GAT_IMEM_TMU_CPU0_IPCLKPORT_I_CLK 0x2010
+#define GAT_IMEM_TMU_CPU0_IPCLKPORT_I_CLK_TS 0x2014
+#define GAT_IMEM_TMU_CPU2_IPCLKPORT_I_CLK 0x2018
+#define GAT_IMEM_TMU_CPU2_IPCLKPORT_I_CLK_TS 0x201c
+#define GAT_IMEM_TMU_GPU_IPCLKPORT_I_CLK 0x2020
+#define GAT_IMEM_TMU_GPU_IPCLKPORT_I_CLK_TS 0x2024
+#define GAT_IMEM_TMU_GT_IPCLKPORT_I_CLK 0x2028
+#define GAT_IMEM_TMU_GT_IPCLKPORT_I_CLK_TS 0x202c
+#define GAT_IMEM_TMU_TOP_IPCLKPORT_I_CLK 0x2030
+#define GAT_IMEM_TMU_TOP_IPCLKPORT_I_CLK_TS 0x2034
+#define GAT_IMEM_WDT0_IPCLKPORT_CLK 0x2038
+#define GAT_IMEM_WDT1_IPCLKPORT_CLK 0x203c
+#define GAT_IMEM_WDT2_IPCLKPORT_CLK 0x2040
+#define GAT_IMEM_ADM_AXI4ST_I0_IMEM_IPCLKPORT_ACLKM 0x2044
+#define GAT_IMEM_ADM_AXI4ST_I1_IMEM_IPCLKPORT_ACLKM 0x2048
+#define GAT_IMEM_ADM_AXI4ST_I2_IMEM_IPCLKPORT_ACLKM 0x204c
+#define GAT_IMEM_ADS_AXI4ST_I0_IMEM_IPCLKPORT_ACLKS 0x2050
+#define GAT_IMEM_ADS_AXI4ST_I1_IMEM_IPCLKPORT_ACLKS 0x2054
+#define GAT_IMEM_ADS_AXI4ST_I2_IMEM_IPCLKPORT_ACLKS 0x2058
+#define GAT_IMEM_ASYNC_DMA0_IPCLKPORT_PCLKM 0x205c
+#define GAT_IMEM_ASYNC_DMA0_IPCLKPORT_PCLKS 0x2060
+#define GAT_IMEM_ASYNC_DMA1_IPCLKPORT_PCLKM 0x2064
+#define GAT_IMEM_ASYNC_DMA1_IPCLKPORT_PCLKS 0x2068
+#define GAT_IMEM_AXI2APB_IMEMP0_IPCLKPORT_ACLK 0x206c
+#define GAT_IMEM_AXI2APB_IMEMP1_IPCLKPORT_ACLK 0x2070
+#define GAT_IMEM_BUS_D_IMEM_IPCLKPORT_MAINCLK 0x2074
+#define GAT_IMEM_BUS_P_IMEM_IPCLKPORT_MAINCLK 0x2078
+#define GAT_IMEM_BUS_P_IMEM_IPCLKPORT_PERICLK 0x207c
+#define GAT_IMEM_BUS_P_IMEM_IPCLKPORT_TCUCLK 0x2080
+#define GAT_IMEM_DMA0_IPCLKPORT_ACLK 0x2084
+#define GAT_IMEM_DMA1_IPCLKPORT_ACLK 0x2088
+#define GAT_IMEM_GIC500_INPUT_SYNC_IPCLKPORT_CLK 0x208c
+#define GAT_IMEM_GIC_IPCLKPORT_CLK 0x2090
+#define GAT_IMEM_INTMEM_IPCLKPORT_ACLK 0x2094
+#define GAT_IMEM_MAILBOX_SCS_CA72_IPCLKPORT_PCLK 0x2098
+#define GAT_IMEM_MAILBOX_SMS_CA72_IPCLKPORT_PCLK 0x209c
+#define GAT_IMEM_MCT_IPCLKPORT_PCLK 0x20a0
+#define GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSCO_IMEM__CLK_IMEM_D 0x20a4
+#define GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSCO_IMEM__CLK_IMEM_TCU 0x20a8
+#define GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSOC_IMEM__CLK_IMEM_P 0x20ac
+#define GAT_IMEM_OTP_CON_TOP_IPCLKPORT_PCLK 0x20b0
+#define GAT_IMEM_RSTNSYNC_ACLK_IPCLKPORT_CLK 0x20b4
+#define GAT_IMEM_RSTNSYNC_INTMEMCLK_IPCLKPORT_CLK 0x20b8
+#define GAT_IMEM_RSTNSYNC_TCUCLK_IPCLKPORT_CLK 0x20bc
+#define GAT_IMEM_SFRIF_TMU0_IMEM_IPCLKPORT_PCLK 0x20c0
+#define GAT_IMEM_SFRIF_TMU1_IMEM_IPCLKPORT_PCLK 0x20c4
+#define GAT_IMEM_SYSREG_IMEM_IPCLKPORT_PCLK 0x20c8
+#define GAT_IMEM_TBU_IMEM_IPCLKPORT_ACLK 0x20cc
+#define GAT_IMEM_TCU_IPCLKPORT_ACLK 0x20d0
+#define GAT_IMEM_WDT0_IPCLKPORT_PCLK 0x20d4
+#define GAT_IMEM_WDT1_IPCLKPORT_PCLK 0x20d8
+#define GAT_IMEM_WDT2_IPCLKPORT_PCLK 0x20dc
+
+static const unsigned long imem_clk_regs[] __initconst = {
+ PLL_CON0_CLK_IMEM_ACLK,
+ PLL_CON0_CLK_IMEM_INTMEMCLK,
+ PLL_CON0_CLK_IMEM_TCUCLK,
+ DIV_OSCCLK_IMEM_TMUTSCLK,
+ GAT_IMEM_IMEM_CMU_IMEM_IPCLKPORT_PCLK,
+ GAT_IMEM_MCT_IPCLKPORT_OSCCLK__ALO,
+ GAT_IMEM_OTP_CON_TOP_IPCLKPORT_I_OSCCLK,
+ GAT_IMEM_RSTNSYNC_OSCCLK_IPCLKPORT_CLK,
+ GAT_IMEM_TMU_CPU0_IPCLKPORT_I_CLK,
+ GAT_IMEM_TMU_CPU0_IPCLKPORT_I_CLK_TS,
+ GAT_IMEM_TMU_CPU2_IPCLKPORT_I_CLK,
+ GAT_IMEM_TMU_CPU2_IPCLKPORT_I_CLK_TS,
+ GAT_IMEM_TMU_GPU_IPCLKPORT_I_CLK,
+ GAT_IMEM_TMU_GPU_IPCLKPORT_I_CLK_TS,
+ GAT_IMEM_TMU_GT_IPCLKPORT_I_CLK,
+ GAT_IMEM_TMU_GT_IPCLKPORT_I_CLK_TS,
+ GAT_IMEM_TMU_TOP_IPCLKPORT_I_CLK,
+ GAT_IMEM_TMU_TOP_IPCLKPORT_I_CLK_TS,
+ GAT_IMEM_WDT0_IPCLKPORT_CLK,
+ GAT_IMEM_WDT1_IPCLKPORT_CLK,
+ GAT_IMEM_WDT2_IPCLKPORT_CLK,
+ GAT_IMEM_ADM_AXI4ST_I0_IMEM_IPCLKPORT_ACLKM,
+ GAT_IMEM_ADM_AXI4ST_I1_IMEM_IPCLKPORT_ACLKM,
+ GAT_IMEM_ADM_AXI4ST_I2_IMEM_IPCLKPORT_ACLKM,
+ GAT_IMEM_ADS_AXI4ST_I0_IMEM_IPCLKPORT_ACLKS,
+ GAT_IMEM_ADS_AXI4ST_I1_IMEM_IPCLKPORT_ACLKS,
+ GAT_IMEM_ADS_AXI4ST_I2_IMEM_IPCLKPORT_ACLKS,
+ GAT_IMEM_ASYNC_DMA0_IPCLKPORT_PCLKM,
+ GAT_IMEM_ASYNC_DMA0_IPCLKPORT_PCLKS,
+ GAT_IMEM_ASYNC_DMA1_IPCLKPORT_PCLKM,
+ GAT_IMEM_ASYNC_DMA1_IPCLKPORT_PCLKS,
+ GAT_IMEM_AXI2APB_IMEMP0_IPCLKPORT_ACLK,
+ GAT_IMEM_AXI2APB_IMEMP1_IPCLKPORT_ACLK,
+ GAT_IMEM_BUS_D_IMEM_IPCLKPORT_MAINCLK,
+ GAT_IMEM_BUS_P_IMEM_IPCLKPORT_MAINCLK,
+ GAT_IMEM_BUS_P_IMEM_IPCLKPORT_PERICLK,
+ GAT_IMEM_BUS_P_IMEM_IPCLKPORT_TCUCLK,
+ GAT_IMEM_DMA0_IPCLKPORT_ACLK,
+ GAT_IMEM_DMA1_IPCLKPORT_ACLK,
+ GAT_IMEM_GIC500_INPUT_SYNC_IPCLKPORT_CLK,
+ GAT_IMEM_GIC_IPCLKPORT_CLK,
+ GAT_IMEM_INTMEM_IPCLKPORT_ACLK,
+ GAT_IMEM_MAILBOX_SCS_CA72_IPCLKPORT_PCLK,
+ GAT_IMEM_MAILBOX_SMS_CA72_IPCLKPORT_PCLK,
+ GAT_IMEM_MCT_IPCLKPORT_PCLK,
+ GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSCO_IMEM__CLK_IMEM_D,
+ GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSCO_IMEM__CLK_IMEM_TCU,
+ GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSOC_IMEM__CLK_IMEM_P,
+ GAT_IMEM_OTP_CON_TOP_IPCLKPORT_PCLK,
+ GAT_IMEM_RSTNSYNC_ACLK_IPCLKPORT_CLK,
+ GAT_IMEM_RSTNSYNC_INTMEMCLK_IPCLKPORT_CLK,
+ GAT_IMEM_RSTNSYNC_TCUCLK_IPCLKPORT_CLK,
+ GAT_IMEM_SFRIF_TMU0_IMEM_IPCLKPORT_PCLK,
+ GAT_IMEM_SFRIF_TMU1_IMEM_IPCLKPORT_PCLK,
+ GAT_IMEM_SYSREG_IMEM_IPCLKPORT_PCLK,
+ GAT_IMEM_TBU_IMEM_IPCLKPORT_ACLK,
+ GAT_IMEM_TCU_IPCLKPORT_ACLK,
+ GAT_IMEM_WDT0_IPCLKPORT_PCLK,
+ GAT_IMEM_WDT1_IPCLKPORT_PCLK,
+ GAT_IMEM_WDT2_IPCLKPORT_PCLK,
+};
+
+PNAME(mout_imem_clk_imem_tcuclk_p) = { "fin_pll", "dout_cmu_imem_tcuclk" };
+PNAME(mout_imem_clk_imem_aclk_p) = { "fin_pll", "dout_cmu_imem_aclk" };
+PNAME(mout_imem_clk_imem_intmemclk_p) = { "fin_pll", "dout_cmu_imem_dmaclk" };
+
+static const struct samsung_mux_clock imem_mux_clks[] __initconst = {
+ MUX(0, "mout_imem_clk_imem_tcuclk", mout_imem_clk_imem_tcuclk_p,
+ PLL_CON0_CLK_IMEM_TCUCLK, 4, 1),
+ MUX(0, "mout_imem_clk_imem_aclk", mout_imem_clk_imem_aclk_p, PLL_CON0_CLK_IMEM_ACLK, 4, 1),
+ MUX(0, "mout_imem_clk_imem_intmemclk", mout_imem_clk_imem_intmemclk_p,
+ PLL_CON0_CLK_IMEM_INTMEMCLK, 4, 1),
+};
+
+static const struct samsung_div_clock imem_div_clks[] __initconst = {
+ DIV(0, "dout_imem_oscclk_imem_tmutsclk", "fin_pll", DIV_OSCCLK_IMEM_TMUTSCLK, 0, 4),
+};
+
+static const struct samsung_gate_clock imem_gate_clks[] __initconst = {
+ GATE(0, "imem_imem_cmu_imem_ipclkport_pclk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_IMEM_CMU_IMEM_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_otp_con_top_ipclkport_i_oscclk", "fin_pll",
+ GAT_IMEM_OTP_CON_TOP_IPCLKPORT_I_OSCCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_tmu_top_ipclkport_i_clk", "fin_pll",
+ GAT_IMEM_TMU_TOP_IPCLKPORT_I_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_tmu_gt_ipclkport_i_clk", "fin_pll",
+ GAT_IMEM_TMU_GT_IPCLKPORT_I_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_tmu_cpu0_ipclkport_i_clk", "fin_pll",
+ GAT_IMEM_TMU_CPU0_IPCLKPORT_I_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_tmu_gpu_ipclkport_i_clk", "fin_pll",
+ GAT_IMEM_TMU_GPU_IPCLKPORT_I_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_mct_ipclkport_oscclk__alo", "fin_pll",
+ GAT_IMEM_MCT_IPCLKPORT_OSCCLK__ALO, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_wdt0_ipclkport_clk", "fin_pll",
+ GAT_IMEM_WDT0_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_wdt1_ipclkport_clk", "fin_pll",
+ GAT_IMEM_WDT1_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_wdt2_ipclkport_clk", "fin_pll",
+ GAT_IMEM_WDT2_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(IMEM_TMU_CPU0_IPCLKPORT_I_CLK_TS, "imem_tmu_cpu0_ipclkport_i_clk_ts",
+ "dout_imem_oscclk_imem_tmutsclk",
+ GAT_IMEM_TMU_CPU0_IPCLKPORT_I_CLK_TS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(IMEM_TMU_CPU2_IPCLKPORT_I_CLK_TS, "imem_tmu_cpu2_ipclkport_i_clk_ts",
+ "dout_imem_oscclk_imem_tmutsclk",
+ GAT_IMEM_TMU_CPU2_IPCLKPORT_I_CLK_TS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(IMEM_TMU_GPU_IPCLKPORT_I_CLK_TS, "imem_tmu_gpu_ipclkport_i_clk_ts",
+ "dout_imem_oscclk_imem_tmutsclk",
+ GAT_IMEM_TMU_GPU_IPCLKPORT_I_CLK_TS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(IMEM_TMU_GT_IPCLKPORT_I_CLK_TS, "imem_tmu_gt_ipclkport_i_clk_ts",
+ "dout_imem_oscclk_imem_tmutsclk",
+ GAT_IMEM_TMU_GT_IPCLKPORT_I_CLK_TS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(IMEM_TMU_TOP_IPCLKPORT_I_CLK_TS, "imem_tmu_top_ipclkport_i_clk_ts",
+ "dout_imem_oscclk_imem_tmutsclk",
+ GAT_IMEM_TMU_TOP_IPCLKPORT_I_CLK_TS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_adm_axi4st_i0_imem_ipclkport_aclkm", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_ADM_AXI4ST_I0_IMEM_IPCLKPORT_ACLKM, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_adm_axi4st_i1_imem_ipclkport_aclkm", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_ADM_AXI4ST_I1_IMEM_IPCLKPORT_ACLKM, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_adm_axi4st_i2_imem_ipclkport_aclkm", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_ADM_AXI4ST_I2_IMEM_IPCLKPORT_ACLKM, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_ads_axi4st_i0_imem_ipclkport_aclks", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_ADS_AXI4ST_I0_IMEM_IPCLKPORT_ACLKS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_ads_axi4st_i1_imem_ipclkport_aclks", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_ADS_AXI4ST_I1_IMEM_IPCLKPORT_ACLKS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_ads_axi4st_i2_imem_ipclkport_aclks", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_ADS_AXI4ST_I2_IMEM_IPCLKPORT_ACLKS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_async_dma0_ipclkport_pclkm", "mout_imem_clk_imem_tcuclk",
+ GAT_IMEM_ASYNC_DMA0_IPCLKPORT_PCLKM, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_async_dma0_ipclkport_pclks", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_ASYNC_DMA0_IPCLKPORT_PCLKS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_async_dma1_ipclkport_pclkm", "mout_imem_clk_imem_tcuclk",
+ GAT_IMEM_ASYNC_DMA1_IPCLKPORT_PCLKM, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_async_dma1_ipclkport_pclks", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_ASYNC_DMA1_IPCLKPORT_PCLKS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_axi2apb_imemp0_ipclkport_aclk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_AXI2APB_IMEMP0_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_axi2apb_imemp1_ipclkport_aclk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_AXI2APB_IMEMP1_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_bus_d_imem_ipclkport_mainclk", "mout_imem_clk_imem_tcuclk",
+ GAT_IMEM_BUS_D_IMEM_IPCLKPORT_MAINCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_bus_p_imem_ipclkport_mainclk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_BUS_P_IMEM_IPCLKPORT_MAINCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_bus_p_imem_ipclkport_pericclk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_BUS_P_IMEM_IPCLKPORT_PERICLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_bus_p_imem_ipclkport_tcuclk", "mout_imem_clk_imem_tcuclk",
+ GAT_IMEM_BUS_P_IMEM_IPCLKPORT_TCUCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(IMEM_DMA0_IPCLKPORT_ACLK, "imem_dma0_ipclkport_aclk", "mout_imem_clk_imem_tcuclk",
+ GAT_IMEM_DMA0_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0),
+ GATE(IMEM_DMA1_IPCLKPORT_ACLK, "imem_dma1_ipclkport_aclk", "mout_imem_clk_imem_tcuclk",
+ GAT_IMEM_DMA1_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0),
+ GATE(0, "imem_gic500_input_sync_ipclkport_clk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_GIC500_INPUT_SYNC_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_gic_ipclkport_clk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_GIC_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_intmem_ipclkport_aclk", "mout_imem_clk_imem_intmemclk",
+ GAT_IMEM_INTMEM_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_mailbox_scs_ca72_ipclkport_pclk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_MAILBOX_SCS_CA72_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_mailbox_sms_ca72_ipclkport_pclk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_MAILBOX_SMS_CA72_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(IMEM_MCT_PCLK, "imem_mct_ipclkport_pclk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_MCT_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_ns_brdg_imem_ipclkport_clk__psco_imem__clk_imem_d",
+ "mout_imem_clk_imem_tcuclk",
+ GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSCO_IMEM__CLK_IMEM_D, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_ns_brdg_imem_ipclkport_clk__psco_imem__clk_imem_tcu",
+ "mout_imem_clk_imem_tcuclk",
+ GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSCO_IMEM__CLK_IMEM_TCU, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_ns_brdg_imem_ipclkport_clk__psoc_imem__clk_imem_p", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_NS_BRDG_IMEM_IPCLKPORT_CLK__PSOC_IMEM__CLK_IMEM_P, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_otp_con_top_ipclkport_pclk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_OTP_CON_TOP_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_rstnsync_aclk_ipclkport_clk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_RSTNSYNC_ACLK_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_rstnsync_oscclk_ipclkport_clk", "fin_pll",
+ GAT_IMEM_RSTNSYNC_OSCCLK_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_rstnsync_intmemclk_ipclkport_clk", "mout_imem_clk_imem_intmemclk",
+ GAT_IMEM_RSTNSYNC_INTMEMCLK_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_rstnsync_tcuclk_ipclkport_clk", "mout_imem_clk_imem_tcuclk",
+ GAT_IMEM_RSTNSYNC_TCUCLK_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_sfrif_tmu0_imem_ipclkport_pclk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_SFRIF_TMU0_IMEM_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_sfrif_tmu1_imem_ipclkport_pclk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_SFRIF_TMU1_IMEM_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_tmu_cpu2_ipclkport_i_clk", "fin_pll",
+ GAT_IMEM_TMU_CPU2_IPCLKPORT_I_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_sysreg_imem_ipclkport_pclk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_SYSREG_IMEM_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_tbu_imem_ipclkport_aclk", "mout_imem_clk_imem_tcuclk",
+ GAT_IMEM_TBU_IMEM_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "imem_tcu_ipclkport_aclk", "mout_imem_clk_imem_tcuclk",
+ GAT_IMEM_TCU_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(IMEM_WDT0_IPCLKPORT_PCLK, "imem_wdt0_ipclkport_pclk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_WDT0_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(IMEM_WDT1_IPCLKPORT_PCLK, "imem_wdt1_ipclkport_pclk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_WDT1_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(IMEM_WDT2_IPCLKPORT_PCLK, "imem_wdt2_ipclkport_pclk", "mout_imem_clk_imem_aclk",
+ GAT_IMEM_WDT2_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info imem_cmu_info __initconst = {
+ .mux_clks = imem_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(imem_mux_clks),
+ .div_clks = imem_div_clks,
+ .nr_div_clks = ARRAY_SIZE(imem_div_clks),
+ .gate_clks = imem_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(imem_gate_clks),
+ .nr_clk_ids = IMEM_NR_CLK,
+ .clk_regs = imem_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(imem_clk_regs),
+};
+
+static void __init fsd_clk_imem_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &imem_cmu_info);
+}
+
+CLK_OF_DECLARE(fsd_clk_imem, "tesla,fsd-clock-imem", fsd_clk_imem_init);
+
+/* Register Offset definitions for CMU_MFC (0x12810000) */
+#define PLL_LOCKTIME_PLL_MFC 0x0
+#define PLL_CON0_PLL_MFC 0x100
+#define MUX_MFC_BUSD 0x1000
+#define MUX_MFC_BUSP 0x1008
+#define DIV_MFC_BUSD_DIV4 0x1800
+#define GAT_MFC_CMU_MFC_IPCLKPORT_PCLK 0x2000
+#define GAT_MFC_AS_P_MFC_IPCLKPORT_PCLKM 0x2004
+#define GAT_MFC_AS_P_MFC_IPCLKPORT_PCLKS 0x2008
+#define GAT_MFC_AXI2APB_MFC_IPCLKPORT_ACLK 0x200c
+#define GAT_MFC_MFC_IPCLKPORT_ACLK 0x2010
+#define GAT_MFC_NS_BRDG_MFC_IPCLKPORT_CLK__PMFC__CLK_MFC_D 0x2018
+#define GAT_MFC_NS_BRDG_MFC_IPCLKPORT_CLK__PMFC__CLK_MFC_P 0x201c
+#define GAT_MFC_PPMU_MFCD0_IPCLKPORT_ACLK 0x2028
+#define GAT_MFC_PPMU_MFCD0_IPCLKPORT_PCLK 0x202c
+#define GAT_MFC_PPMU_MFCD1_IPCLKPORT_ACLK 0x2030
+#define GAT_MFC_PPMU_MFCD1_IPCLKPORT_PCLK 0x2034
+#define GAT_MFC_SYSREG_MFC_IPCLKPORT_PCLK 0x2038
+#define GAT_MFC_TBU_MFCD0_IPCLKPORT_CLK 0x203c
+#define GAT_MFC_TBU_MFCD1_IPCLKPORT_CLK 0x2040
+#define GAT_MFC_BUSD_DIV4_GATE 0x2044
+#define GAT_MFC_BUSD_GATE 0x2048
+
+static const unsigned long mfc_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_MFC,
+ PLL_CON0_PLL_MFC,
+ MUX_MFC_BUSD,
+ MUX_MFC_BUSP,
+ DIV_MFC_BUSD_DIV4,
+ GAT_MFC_CMU_MFC_IPCLKPORT_PCLK,
+ GAT_MFC_AS_P_MFC_IPCLKPORT_PCLKM,
+ GAT_MFC_AS_P_MFC_IPCLKPORT_PCLKS,
+ GAT_MFC_AXI2APB_MFC_IPCLKPORT_ACLK,
+ GAT_MFC_MFC_IPCLKPORT_ACLK,
+ GAT_MFC_NS_BRDG_MFC_IPCLKPORT_CLK__PMFC__CLK_MFC_D,
+ GAT_MFC_NS_BRDG_MFC_IPCLKPORT_CLK__PMFC__CLK_MFC_P,
+ GAT_MFC_PPMU_MFCD0_IPCLKPORT_ACLK,
+ GAT_MFC_PPMU_MFCD0_IPCLKPORT_PCLK,
+ GAT_MFC_PPMU_MFCD1_IPCLKPORT_ACLK,
+ GAT_MFC_PPMU_MFCD1_IPCLKPORT_PCLK,
+ GAT_MFC_SYSREG_MFC_IPCLKPORT_PCLK,
+ GAT_MFC_TBU_MFCD0_IPCLKPORT_CLK,
+ GAT_MFC_TBU_MFCD1_IPCLKPORT_CLK,
+ GAT_MFC_BUSD_DIV4_GATE,
+ GAT_MFC_BUSD_GATE,
+};
+
+static const struct samsung_pll_rate_table pll_mfc_rate_table[] __initconst = {
+ PLL_35XX_RATE(24 * MHZ, 666000000U, 111, 4, 0),
+};
+
+static const struct samsung_pll_clock mfc_pll_clks[] __initconst = {
+ PLL(pll_142xx, 0, "fout_pll_mfc", "fin_pll",
+ PLL_LOCKTIME_PLL_MFC, PLL_CON0_PLL_MFC, pll_mfc_rate_table),
+};
+
+PNAME(mout_mfc_pll_p) = { "fin_pll", "fout_pll_mfc" };
+PNAME(mout_mfc_busp_p) = { "fin_pll", "dout_mfc_busd_div4" };
+PNAME(mout_mfc_busd_p) = { "fin_pll", "mfc_busd_gate" };
+
+static const struct samsung_mux_clock mfc_mux_clks[] __initconst = {
+ MUX(0, "mout_mfc_pll", mout_mfc_pll_p, PLL_CON0_PLL_MFC, 4, 1),
+ MUX(0, "mout_mfc_busp", mout_mfc_busp_p, MUX_MFC_BUSP, 0, 1),
+ MUX(0, "mout_mfc_busd", mout_mfc_busd_p, MUX_MFC_BUSD, 0, 1),
+};
+
+static const struct samsung_div_clock mfc_div_clks[] __initconst = {
+ DIV(0, "dout_mfc_busd_div4", "mfc_busd_div4_gate", DIV_MFC_BUSD_DIV4, 0, 4),
+};
+
+static const struct samsung_gate_clock mfc_gate_clks[] __initconst = {
+ GATE(0, "mfc_cmu_mfc_ipclkport_pclk", "mout_mfc_busp",
+ GAT_MFC_CMU_MFC_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "mfc_as_p_mfc_ipclkport_pclkm", "mout_mfc_busd",
+ GAT_MFC_AS_P_MFC_IPCLKPORT_PCLKM, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "mfc_as_p_mfc_ipclkport_pclks", "mout_mfc_busp",
+ GAT_MFC_AS_P_MFC_IPCLKPORT_PCLKS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "mfc_axi2apb_mfc_ipclkport_aclk", "mout_mfc_busp",
+ GAT_MFC_AXI2APB_MFC_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(MFC_MFC_IPCLKPORT_ACLK, "mfc_mfc_ipclkport_aclk", "mout_mfc_busd",
+ GAT_MFC_MFC_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "mfc_ns_brdg_mfc_ipclkport_clk__pmfc__clk_mfc_d", "mout_mfc_busd",
+ GAT_MFC_NS_BRDG_MFC_IPCLKPORT_CLK__PMFC__CLK_MFC_D, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "mfc_ns_brdg_mfc_ipclkport_clk__pmfc__clk_mfc_p", "mout_mfc_busp",
+ GAT_MFC_NS_BRDG_MFC_IPCLKPORT_CLK__PMFC__CLK_MFC_P, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "mfc_ppmu_mfcd0_ipclkport_aclk", "mout_mfc_busd",
+ GAT_MFC_PPMU_MFCD0_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "mfc_ppmu_mfcd0_ipclkport_pclk", "mout_mfc_busp",
+ GAT_MFC_PPMU_MFCD0_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "mfc_ppmu_mfcd1_ipclkport_aclk", "mout_mfc_busd",
+ GAT_MFC_PPMU_MFCD1_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "mfc_ppmu_mfcd1_ipclkport_pclk", "mout_mfc_busp",
+ GAT_MFC_PPMU_MFCD1_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "mfc_sysreg_mfc_ipclkport_pclk", "mout_mfc_busp",
+ GAT_MFC_SYSREG_MFC_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "mfc_tbu_mfcd0_ipclkport_clk", "mout_mfc_busd",
+ GAT_MFC_TBU_MFCD0_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "mfc_tbu_mfcd1_ipclkport_clk", "mout_mfc_busd",
+ GAT_MFC_TBU_MFCD1_IPCLKPORT_CLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "mfc_busd_div4_gate", "mout_mfc_pll",
+ GAT_MFC_BUSD_DIV4_GATE, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "mfc_busd_gate", "mout_mfc_pll", GAT_MFC_BUSD_GATE, 21, CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info mfc_cmu_info __initconst = {
+ .pll_clks = mfc_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(mfc_pll_clks),
+ .mux_clks = mfc_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(mfc_mux_clks),
+ .div_clks = mfc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(mfc_div_clks),
+ .gate_clks = mfc_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(mfc_gate_clks),
+ .nr_clk_ids = MFC_NR_CLK,
+ .clk_regs = mfc_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(mfc_clk_regs),
+};
+
+/* Register Offset definitions for CMU_CAM_CSI (0x12610000) */
+#define PLL_LOCKTIME_PLL_CAM_CSI 0x0
+#define PLL_CON0_PLL_CAM_CSI 0x100
+#define DIV_CAM_CSI0_ACLK 0x1800
+#define DIV_CAM_CSI1_ACLK 0x1804
+#define DIV_CAM_CSI2_ACLK 0x1808
+#define DIV_CAM_CSI_BUSD 0x180c
+#define DIV_CAM_CSI_BUSP 0x1810
+#define GAT_CAM_CSI_CMU_CAM_CSI_IPCLKPORT_PCLK 0x2000
+#define GAT_CAM_AXI2APB_CAM_CSI_IPCLKPORT_ACLK 0x2004
+#define GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI0 0x2008
+#define GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI1 0x200c
+#define GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI2 0x2010
+#define GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_SOC_NOC 0x2014
+#define GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__NOC 0x2018
+#define GAT_CAM_CSI0_0_IPCLKPORT_I_ACLK 0x201c
+#define GAT_CAM_CSI0_0_IPCLKPORT_I_PCLK 0x2020
+#define GAT_CAM_CSI0_1_IPCLKPORT_I_ACLK 0x2024
+#define GAT_CAM_CSI0_1_IPCLKPORT_I_PCLK 0x2028
+#define GAT_CAM_CSI0_2_IPCLKPORT_I_ACLK 0x202c
+#define GAT_CAM_CSI0_2_IPCLKPORT_I_PCLK 0x2030
+#define GAT_CAM_CSI0_3_IPCLKPORT_I_ACLK 0x2034
+#define GAT_CAM_CSI0_3_IPCLKPORT_I_PCLK 0x2038
+#define GAT_CAM_CSI1_0_IPCLKPORT_I_ACLK 0x203c
+#define GAT_CAM_CSI1_0_IPCLKPORT_I_PCLK 0x2040
+#define GAT_CAM_CSI1_1_IPCLKPORT_I_ACLK 0x2044
+#define GAT_CAM_CSI1_1_IPCLKPORT_I_PCLK 0x2048
+#define GAT_CAM_CSI1_2_IPCLKPORT_I_ACLK 0x204c
+#define GAT_CAM_CSI1_2_IPCLKPORT_I_PCLK 0x2050
+#define GAT_CAM_CSI1_3_IPCLKPORT_I_ACLK 0x2054
+#define GAT_CAM_CSI1_3_IPCLKPORT_I_PCLK 0x2058
+#define GAT_CAM_CSI2_0_IPCLKPORT_I_ACLK 0x205c
+#define GAT_CAM_CSI2_0_IPCLKPORT_I_PCLK 0x2060
+#define GAT_CAM_CSI2_1_IPCLKPORT_I_ACLK 0x2064
+#define GAT_CAM_CSI2_1_IPCLKPORT_I_PCLK 0x2068
+#define GAT_CAM_CSI2_2_IPCLKPORT_I_ACLK 0x206c
+#define GAT_CAM_CSI2_2_IPCLKPORT_I_PCLK 0x2070
+#define GAT_CAM_CSI2_3_IPCLKPORT_I_ACLK 0x2074
+#define GAT_CAM_CSI2_3_IPCLKPORT_I_PCLK 0x2078
+#define GAT_CAM_NS_BRDG_CAM_CSI_IPCLKPORT_CLK__PSOC_CAM_CSI__CLK_CAM_CSI_D 0x207c
+#define GAT_CAM_NS_BRDG_CAM_CSI_IPCLKPORT_CLK__PSOC_CAM_CSI__CLK_CAM_CSI_P 0x2080
+#define GAT_CAM_SYSREG_CAM_CSI_IPCLKPORT_PCLK 0x2084
+#define GAT_CAM_TBU_CAM_CSI_IPCLKPORT_ACLK 0x2088
+
+static const unsigned long cam_csi_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CAM_CSI,
+ PLL_CON0_PLL_CAM_CSI,
+ DIV_CAM_CSI0_ACLK,
+ DIV_CAM_CSI1_ACLK,
+ DIV_CAM_CSI2_ACLK,
+ DIV_CAM_CSI_BUSD,
+ DIV_CAM_CSI_BUSP,
+ GAT_CAM_CSI_CMU_CAM_CSI_IPCLKPORT_PCLK,
+ GAT_CAM_AXI2APB_CAM_CSI_IPCLKPORT_ACLK,
+ GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI0,
+ GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI1,
+ GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI2,
+ GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_SOC_NOC,
+ GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__NOC,
+ GAT_CAM_CSI0_0_IPCLKPORT_I_ACLK,
+ GAT_CAM_CSI0_0_IPCLKPORT_I_PCLK,
+ GAT_CAM_CSI0_1_IPCLKPORT_I_ACLK,
+ GAT_CAM_CSI0_1_IPCLKPORT_I_PCLK,
+ GAT_CAM_CSI0_2_IPCLKPORT_I_ACLK,
+ GAT_CAM_CSI0_2_IPCLKPORT_I_PCLK,
+ GAT_CAM_CSI0_3_IPCLKPORT_I_ACLK,
+ GAT_CAM_CSI0_3_IPCLKPORT_I_PCLK,
+ GAT_CAM_CSI1_0_IPCLKPORT_I_ACLK,
+ GAT_CAM_CSI1_0_IPCLKPORT_I_PCLK,
+ GAT_CAM_CSI1_1_IPCLKPORT_I_ACLK,
+ GAT_CAM_CSI1_1_IPCLKPORT_I_PCLK,
+ GAT_CAM_CSI1_2_IPCLKPORT_I_ACLK,
+ GAT_CAM_CSI1_2_IPCLKPORT_I_PCLK,
+ GAT_CAM_CSI1_3_IPCLKPORT_I_ACLK,
+ GAT_CAM_CSI1_3_IPCLKPORT_I_PCLK,
+ GAT_CAM_CSI2_0_IPCLKPORT_I_ACLK,
+ GAT_CAM_CSI2_0_IPCLKPORT_I_PCLK,
+ GAT_CAM_CSI2_1_IPCLKPORT_I_ACLK,
+ GAT_CAM_CSI2_1_IPCLKPORT_I_PCLK,
+ GAT_CAM_CSI2_2_IPCLKPORT_I_ACLK,
+ GAT_CAM_CSI2_2_IPCLKPORT_I_PCLK,
+ GAT_CAM_CSI2_3_IPCLKPORT_I_ACLK,
+ GAT_CAM_CSI2_3_IPCLKPORT_I_PCLK,
+ GAT_CAM_NS_BRDG_CAM_CSI_IPCLKPORT_CLK__PSOC_CAM_CSI__CLK_CAM_CSI_D,
+ GAT_CAM_NS_BRDG_CAM_CSI_IPCLKPORT_CLK__PSOC_CAM_CSI__CLK_CAM_CSI_P,
+ GAT_CAM_SYSREG_CAM_CSI_IPCLKPORT_PCLK,
+ GAT_CAM_TBU_CAM_CSI_IPCLKPORT_ACLK,
+};
+
+static const struct samsung_pll_rate_table pll_cam_csi_rate_table[] __initconst = {
+ PLL_35XX_RATE(24 * MHZ, 1066000000U, 533, 12, 0),
+};
+
+static const struct samsung_pll_clock cam_csi_pll_clks[] __initconst = {
+ PLL(pll_142xx, 0, "fout_pll_cam_csi", "fin_pll",
+ PLL_LOCKTIME_PLL_CAM_CSI, PLL_CON0_PLL_CAM_CSI, pll_cam_csi_rate_table),
+};
+
+PNAME(mout_cam_csi_pll_p) = { "fin_pll", "fout_pll_cam_csi" };
+
+static const struct samsung_mux_clock cam_csi_mux_clks[] __initconst = {
+ MUX(0, "mout_cam_csi_pll", mout_cam_csi_pll_p, PLL_CON0_PLL_CAM_CSI, 4, 1),
+};
+
+static const struct samsung_div_clock cam_csi_div_clks[] __initconst = {
+ DIV(0, "dout_cam_csi0_aclk", "mout_cam_csi_pll", DIV_CAM_CSI0_ACLK, 0, 4),
+ DIV(0, "dout_cam_csi1_aclk", "mout_cam_csi_pll", DIV_CAM_CSI1_ACLK, 0, 4),
+ DIV(0, "dout_cam_csi2_aclk", "mout_cam_csi_pll", DIV_CAM_CSI2_ACLK, 0, 4),
+ DIV(0, "dout_cam_csi_busd", "mout_cam_csi_pll", DIV_CAM_CSI_BUSD, 0, 4),
+ DIV(0, "dout_cam_csi_busp", "mout_cam_csi_pll", DIV_CAM_CSI_BUSP, 0, 4),
+};
+
+static const struct samsung_gate_clock cam_csi_gate_clks[] __initconst = {
+ GATE(0, "cam_csi_cmu_cam_csi_ipclkport_pclk", "dout_cam_csi_busp",
+ GAT_CAM_CSI_CMU_CAM_CSI_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_axi2apb_cam_csi_ipclkport_aclk", "dout_cam_csi_busp",
+ GAT_CAM_AXI2APB_CAM_CSI_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_csi_bus_d_cam_csi_ipclkport_clk__system__clk_csi0", "dout_cam_csi0_aclk",
+ GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI0, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_csi_bus_d_cam_csi_ipclkport_clk__system__clk_csi1", "dout_cam_csi1_aclk",
+ GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI1, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_csi_bus_d_cam_csi_ipclkport_clk__system__clk_csi2", "dout_cam_csi2_aclk",
+ GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_CSI2, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_csi_bus_d_cam_csi_ipclkport_clk__system__clk_soc_noc", "dout_cam_csi_busd",
+ GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__CLK_SOC_NOC, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_csi_bus_d_cam_csi_ipclkport_clk__system__noc", "dout_cam_csi_busd",
+ GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__NOC, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CAM_CSI0_0_IPCLKPORT_I_ACLK, "cam_csi0_0_ipclkport_i_aclk", "dout_cam_csi0_aclk",
+ GAT_CAM_CSI0_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_csi0_0_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GAT_CAM_CSI0_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CAM_CSI0_1_IPCLKPORT_I_ACLK, "cam_csi0_1_ipclkport_i_aclk", "dout_cam_csi0_aclk",
+ GAT_CAM_CSI0_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_csi0_1_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GAT_CAM_CSI0_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CAM_CSI0_2_IPCLKPORT_I_ACLK, "cam_csi0_2_ipclkport_i_aclk", "dout_cam_csi0_aclk",
+ GAT_CAM_CSI0_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_csi0_2_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GAT_CAM_CSI0_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CAM_CSI0_3_IPCLKPORT_I_ACLK, "cam_csi0_3_ipclkport_i_aclk", "dout_cam_csi0_aclk",
+ GAT_CAM_CSI0_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_csi0_3_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GAT_CAM_CSI0_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CAM_CSI1_0_IPCLKPORT_I_ACLK, "cam_csi1_0_ipclkport_i_aclk", "dout_cam_csi1_aclk",
+ GAT_CAM_CSI1_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_csi1_0_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GAT_CAM_CSI1_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CAM_CSI1_1_IPCLKPORT_I_ACLK, "cam_csi1_1_ipclkport_i_aclk", "dout_cam_csi1_aclk",
+ GAT_CAM_CSI1_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_csi1_1_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GAT_CAM_CSI1_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CAM_CSI1_2_IPCLKPORT_I_ACLK, "cam_csi1_2_ipclkport_i_aclk", "dout_cam_csi1_aclk",
+ GAT_CAM_CSI1_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_csi1_2_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GAT_CAM_CSI1_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CAM_CSI1_3_IPCLKPORT_I_ACLK, "cam_csi1_3_ipclkport_i_aclk", "dout_cam_csi1_aclk",
+ GAT_CAM_CSI1_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_csi1_3_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GAT_CAM_CSI1_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CAM_CSI2_0_IPCLKPORT_I_ACLK, "cam_csi2_0_ipclkport_i_aclk", "dout_cam_csi2_aclk",
+ GAT_CAM_CSI2_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_csi2_0_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GAT_CAM_CSI2_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CAM_CSI2_1_IPCLKPORT_I_ACLK, "cam_csi2_1_ipclkport_i_aclk", "dout_cam_csi2_aclk",
+ GAT_CAM_CSI2_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_csi2_1_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GAT_CAM_CSI2_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CAM_CSI2_2_IPCLKPORT_I_ACLK, "cam_csi2_2_ipclkport_i_aclk", "dout_cam_csi2_aclk",
+ GAT_CAM_CSI2_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_csi2_2_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GAT_CAM_CSI2_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CAM_CSI2_3_IPCLKPORT_I_ACLK, "cam_csi2_3_ipclkport_i_aclk", "dout_cam_csi2_aclk",
+ GAT_CAM_CSI2_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_csi2_3_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GAT_CAM_CSI2_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_ns_brdg_cam_csi_ipclkport_clk__psoc_cam_csi__clk_cam_csi_d",
+ "dout_cam_csi_busd",
+ GAT_CAM_NS_BRDG_CAM_CSI_IPCLKPORT_CLK__PSOC_CAM_CSI__CLK_CAM_CSI_D, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_ns_brdg_cam_csi_ipclkport_clk__psoc_cam_csi__clk_cam_csi_p",
+ "dout_cam_csi_busp",
+ GAT_CAM_NS_BRDG_CAM_CSI_IPCLKPORT_CLK__PSOC_CAM_CSI__CLK_CAM_CSI_P, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_sysreg_cam_csi_ipclkport_pclk", "dout_cam_csi_busp",
+ GAT_CAM_SYSREG_CAM_CSI_IPCLKPORT_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(0, "cam_tbu_cam_csi_ipclkport_aclk", "dout_cam_csi_busd",
+ GAT_CAM_TBU_CAM_CSI_IPCLKPORT_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info cam_csi_cmu_info __initconst = {
+ .pll_clks = cam_csi_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cam_csi_pll_clks),
+ .mux_clks = cam_csi_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cam_csi_mux_clks),
+ .div_clks = cam_csi_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cam_csi_div_clks),
+ .gate_clks = cam_csi_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cam_csi_gate_clks),
+ .nr_clk_ids = CAM_CSI_NR_CLK,
+ .clk_regs = cam_csi_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cam_csi_clk_regs),
+};
+
+/**
+ * fsd_cmu_probe - Probe function for FSD platform clocks
+ * @pdev: Pointer to platform device
+ *
+ * Configure clock hierarchy for clock domains of FSD platform
+ */
+static int __init fsd_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+/* CMUs which belong to Power Domains and need runtime PM to be implemented */
+static const struct of_device_id fsd_cmu_of_match[] = {
+ {
+ .compatible = "tesla,fsd-clock-peric",
+ .data = &peric_cmu_info,
+ }, {
+ .compatible = "tesla,fsd-clock-fsys0",
+ .data = &fsys0_cmu_info,
+ }, {
+ .compatible = "tesla,fsd-clock-fsys1",
+ .data = &fsys1_cmu_info,
+ }, {
+ .compatible = "tesla,fsd-clock-mfc",
+ .data = &mfc_cmu_info,
+ }, {
+ .compatible = "tesla,fsd-clock-cam_csi",
+ .data = &cam_csi_cmu_info,
+ }, {
+ },
+};
+
+static struct platform_driver fsd_cmu_driver __refdata = {
+ .driver = {
+ .name = "fsd-cmu",
+ .of_match_table = fsd_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fsd_cmu_probe,
+};
+
+static int __init fsd_cmu_init(void)
+{
+ return platform_driver_register(&fsd_cmu_driver);
+}
+core_initcall(fsd_cmu_init);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 70cdc87f714e..fe383471c5f0 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -1469,6 +1469,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
case pll_1450x:
case pll_1451x:
case pll_1452x:
+ case pll_142xx:
pll->enable_offs = PLL35XX_ENABLE_SHIFT;
pll->lock_offs = PLL35XX_LOCK_STAT_SHIFT;
if (!pll->rate_table)
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index c83a20195f6d..a9892c2d1f57 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -39,6 +39,7 @@ enum samsung_pll_type {
pll_1460x,
pll_0822x,
pll_0831x,
+ pll_142xx,
};
#define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index cfb8ea0df3b1..1589ae7d5abb 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -617,15 +617,6 @@ config CLKSRC_ST_LPC
Enable this option to use the Low Power controller timer
as clocksource.
-config ATCPIT100_TIMER
- bool "ATCPIT100 timer driver"
- depends on NDS32 || COMPILE_TEST
- depends on HAS_IOMEM
- select TIMER_OF
- default NDS32
- help
- This option enables support for the Andestech ATCPIT100 timers.
-
config RISCV_TIMER
bool "Timer for the RISC-V platform" if COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && RISCV && RISCV_SBI
@@ -713,7 +704,6 @@ config INGENIC_OST
config MICROCHIP_PIT64B
bool "Microchip PIT64B support"
depends on OF || COMPILE_TEST
- select CLKSRC_MMIO
select TIMER_OF
help
This option enables Microchip PIT64B timer for Atmel
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index fa5f624eadb6..9c85ee2bb373 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -81,7 +81,6 @@ obj-$(CONFIG_INGENIC_SYSOST) += ingenic-sysost.o
obj-$(CONFIG_INGENIC_TIMER) += ingenic-timer.o
obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o
obj-$(CONFIG_X86_NUMACHIP) += numachip.o
-obj-$(CONFIG_ATCPIT100_TIMER) += timer-atcpit100.o
obj-$(CONFIG_RISCV_TIMER) += timer-riscv.o
obj-$(CONFIG_CLINT_TIMER) += timer-clint.o
obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index eb596ff9e7bb..279ddff81ab4 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -229,8 +229,10 @@ static int __init parse_pmtmr(char *arg)
int ret;
ret = kstrtouint(arg, 16, &base);
- if (ret)
- return ret;
+ if (ret) {
+ pr_warn("PMTMR: invalid 'pmtmr=' value: '%s'\n", arg);
+ return 1;
+ }
pr_info("PMTMR IOPort override: 0x%04x -> 0x%04x\n", pmtmr_ioport,
base);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 1ecd52f903b8..9ab8221ee3c6 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -880,10 +880,19 @@ static void __arch_timer_setup(unsigned type,
clockevents_config_and_register(clk, arch_timer_rate, 0xf, max_delta);
}
-static void arch_timer_evtstrm_enable(int divider)
+static void arch_timer_evtstrm_enable(unsigned int divider)
{
u32 cntkctl = arch_timer_get_cntkctl();
+#ifdef CONFIG_ARM64
+ /* ECV is likely to require a large divider. Use the EVNTIS flag. */
+ if (cpus_have_const_cap(ARM64_HAS_ECV) && divider > 15) {
+ cntkctl |= ARCH_TIMER_EVT_INTERVAL_SCALE;
+ divider -= 8;
+ }
+#endif
+
+ divider = min(divider, 15U);
cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
/* Set the divider and enable virtual event stream */
cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
@@ -912,7 +921,7 @@ static void arch_timer_configure_evtstream(void)
lsb++;
/* enable event stream */
- arch_timer_evtstrm_enable(max(0, min(lsb, 15)));
+ arch_timer_evtstrm_enable(max(0, lsb));
}
static void arch_counter_set_user_access(void)
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 6db3d5511b0f..f29c812b70c9 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -60,27 +60,18 @@
#define MCT_CLKEVENTS_RATING 350
#endif
+/* There are four Global timers starting with 0 offset */
+#define MCT_G0_IRQ 0
+/* Local timers count starts after global timer count */
+#define MCT_L0_IRQ 4
+/* Max number of IRQ as per DT binding document */
+#define MCT_NR_IRQS 20
+
enum {
MCT_INT_SPI,
MCT_INT_PPI
};
-enum {
- MCT_G0_IRQ,
- MCT_G1_IRQ,
- MCT_G2_IRQ,
- MCT_G3_IRQ,
- MCT_L0_IRQ,
- MCT_L1_IRQ,
- MCT_L2_IRQ,
- MCT_L3_IRQ,
- MCT_L4_IRQ,
- MCT_L5_IRQ,
- MCT_L6_IRQ,
- MCT_L7_IRQ,
- MCT_NR_IRQS,
-};
-
static void __iomem *reg_base;
static unsigned long clk_rate;
static unsigned int mct_int_type;
@@ -89,7 +80,11 @@ static int mct_irqs[MCT_NR_IRQS];
struct mct_clock_event_device {
struct clock_event_device evt;
unsigned long base;
- char name[10];
+ /**
+ * The length of the name must be adjusted if number of
+ * local timer interrupts grow over two digits
+ */
+ char name[11];
};
static void exynos4_mct_write(unsigned int value, unsigned long offset)
@@ -541,6 +536,11 @@ static int __init exynos4_timer_interrupts(struct device_node *np,
* irqs are specified.
*/
nr_irqs = of_irq_count(np);
+ if (nr_irqs > ARRAY_SIZE(mct_irqs)) {
+ pr_err("exynos-mct: too many (%d) interrupts configured in DT\n",
+ nr_irqs);
+ nr_irqs = ARRAY_SIZE(mct_irqs);
+ }
for (i = MCT_L0_IRQ; i < nr_irqs; i++)
mct_irqs[i] = irq_of_parse_and_map(np, i);
@@ -553,11 +553,14 @@ static int __init exynos4_timer_interrupts(struct device_node *np,
mct_irqs[MCT_L0_IRQ], err);
} else {
for_each_possible_cpu(cpu) {
- int mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
+ int mct_irq;
struct mct_clock_event_device *pcpu_mevt =
per_cpu_ptr(&percpu_mct_tick, cpu);
pcpu_mevt->evt.irq = -1;
+ if (MCT_L0_IRQ + cpu >= ARRAY_SIZE(mct_irqs))
+ break;
+ mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
if (request_irq(mct_irq,
diff --git a/drivers/clocksource/timer-atcpit100.c b/drivers/clocksource/timer-atcpit100.c
deleted file mode 100644
index b4bd2f5b801d..000000000000
--- a/drivers/clocksource/timer-atcpit100.c
+++ /dev/null
@@ -1,266 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2005-2017 Andes Technology Corporation
-/*
- * Andestech ATCPIT100 Timer Device Driver Implementation
- * Rick Chen, Andes Technology Corporation <rick@andestech.com>
- *
- */
-
-#include <linux/irq.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/cpufreq.h>
-#include <linux/sched.h>
-#include <linux/sched_clock.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-#include "timer-of.h"
-#ifdef CONFIG_NDS32
-#include <asm/vdso_timer_info.h>
-#endif
-
-/*
- * Definition of register offsets
- */
-
-/* ID and Revision Register */
-#define ID_REV 0x0
-
-/* Configuration Register */
-#define CFG 0x10
-
-/* Interrupt Enable Register */
-#define INT_EN 0x14
-#define CH_INT_EN(c, i) ((1<<i)<<(4*c))
-#define CH0INT0EN 0x01
-
-/* Interrupt Status Register */
-#define INT_STA 0x18
-#define CH0INT0 0x01
-
-/* Channel Enable Register */
-#define CH_EN 0x1C
-#define CH0TMR0EN 0x1
-#define CH1TMR0EN 0x10
-
-/* Channel 0 , 1 Control Register */
-#define CH0_CTL (0x20)
-#define CH1_CTL (0x20 + 0x10)
-
-/* Channel clock source , bit 3 , 0:External clock , 1:APB clock */
-#define APB_CLK BIT(3)
-
-/* Channel mode , bit 0~2 */
-#define TMR_32 0x1
-#define TMR_16 0x2
-#define TMR_8 0x3
-
-/* Channel 0 , 1 Reload Register */
-#define CH0_REL (0x24)
-#define CH1_REL (0x24 + 0x10)
-
-/* Channel 0 , 1 Counter Register */
-#define CH0_CNT (0x28)
-#define CH1_CNT (0x28 + 0x10)
-
-#define TIMER_SYNC_TICKS 3
-
-static void atcpit100_ch1_tmr0_en(void __iomem *base)
-{
- writel(~0, base + CH1_REL);
- writel(APB_CLK|TMR_32, base + CH1_CTL);
-}
-
-static void atcpit100_ch0_tmr0_en(void __iomem *base)
-{
- writel(APB_CLK|TMR_32, base + CH0_CTL);
-}
-
-static void atcpit100_clkevt_time_setup(void __iomem *base, unsigned long delay)
-{
- writel(delay, base + CH0_CNT);
- writel(delay, base + CH0_REL);
-}
-
-static void atcpit100_timer_clear_interrupt(void __iomem *base)
-{
- u32 val;
-
- val = readl(base + INT_STA);
- writel(val | CH0INT0, base + INT_STA);
-}
-
-static void atcpit100_clocksource_start(void __iomem *base)
-{
- u32 val;
-
- val = readl(base + CH_EN);
- writel(val | CH1TMR0EN, base + CH_EN);
-}
-
-static void atcpit100_clkevt_time_start(void __iomem *base)
-{
- u32 val;
-
- val = readl(base + CH_EN);
- writel(val | CH0TMR0EN, base + CH_EN);
-}
-
-static void atcpit100_clkevt_time_stop(void __iomem *base)
-{
- u32 val;
-
- atcpit100_timer_clear_interrupt(base);
- val = readl(base + CH_EN);
- writel(val & ~CH0TMR0EN, base + CH_EN);
-}
-
-static int atcpit100_clkevt_next_event(unsigned long evt,
- struct clock_event_device *clkevt)
-{
- u32 val;
- struct timer_of *to = to_timer_of(clkevt);
-
- val = readl(timer_of_base(to) + CH_EN);
- writel(val & ~CH0TMR0EN, timer_of_base(to) + CH_EN);
- writel(evt, timer_of_base(to) + CH0_REL);
- writel(val | CH0TMR0EN, timer_of_base(to) + CH_EN);
-
- return 0;
-}
-
-static int atcpit100_clkevt_set_periodic(struct clock_event_device *evt)
-{
- struct timer_of *to = to_timer_of(evt);
-
- atcpit100_clkevt_time_setup(timer_of_base(to), timer_of_period(to));
- atcpit100_clkevt_time_start(timer_of_base(to));
-
- return 0;
-}
-static int atcpit100_clkevt_shutdown(struct clock_event_device *evt)
-{
- struct timer_of *to = to_timer_of(evt);
-
- atcpit100_clkevt_time_stop(timer_of_base(to));
-
- return 0;
-}
-static int atcpit100_clkevt_set_oneshot(struct clock_event_device *evt)
-{
- struct timer_of *to = to_timer_of(evt);
- u32 val;
-
- writel(~0x0, timer_of_base(to) + CH0_REL);
- val = readl(timer_of_base(to) + CH_EN);
- writel(val | CH0TMR0EN, timer_of_base(to) + CH_EN);
-
- return 0;
-}
-
-static irqreturn_t atcpit100_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = (struct clock_event_device *)dev_id;
- struct timer_of *to = to_timer_of(evt);
-
- atcpit100_timer_clear_interrupt(timer_of_base(to));
-
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-static struct timer_of to = {
- .flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE,
-
- .clkevt = {
- .name = "atcpit100_tick",
- .rating = 300,
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_state_shutdown = atcpit100_clkevt_shutdown,
- .set_state_periodic = atcpit100_clkevt_set_periodic,
- .set_state_oneshot = atcpit100_clkevt_set_oneshot,
- .tick_resume = atcpit100_clkevt_shutdown,
- .set_next_event = atcpit100_clkevt_next_event,
- .cpumask = cpu_possible_mask,
- },
-
- .of_irq = {
- .handler = atcpit100_timer_interrupt,
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- },
-
- /*
- * FIXME: we currently only support clocking using PCLK
- * and using EXTCLK is not supported in the driver.
- */
- .of_clk = {
- .name = "PCLK",
- }
-};
-
-static u64 notrace atcpit100_timer_sched_read(void)
-{
- return ~readl(timer_of_base(&to) + CH1_CNT);
-}
-
-#ifdef CONFIG_NDS32
-static void fill_vdso_need_info(struct device_node *node)
-{
- struct resource timer_res;
- of_address_to_resource(node, 0, &timer_res);
- timer_info.mapping_base = (unsigned long)timer_res.start;
- timer_info.cycle_count_down = true;
- timer_info.cycle_count_reg_offset = CH1_CNT;
-}
-#endif
-
-static int __init atcpit100_timer_init(struct device_node *node)
-{
- int ret;
- u32 val;
- void __iomem *base;
-
- ret = timer_of_init(node, &to);
- if (ret)
- return ret;
-
- base = timer_of_base(&to);
-
- sched_clock_register(atcpit100_timer_sched_read, 32,
- timer_of_rate(&to));
-
- ret = clocksource_mmio_init(base + CH1_CNT,
- node->name, timer_of_rate(&to), 300, 32,
- clocksource_mmio_readl_down);
-
- if (ret) {
- pr_err("Failed to register clocksource\n");
- return ret;
- }
-
- /* clear channel 0 timer0 interrupt */
- atcpit100_timer_clear_interrupt(base);
-
- clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
- TIMER_SYNC_TICKS, 0xffffffff);
- atcpit100_ch0_tmr0_en(base);
- atcpit100_ch1_tmr0_en(base);
- atcpit100_clocksource_start(base);
- atcpit100_clkevt_time_start(base);
-
- /* Enable channel 0 timer0 interrupt */
- val = readl(base + INT_EN);
- writel(val | CH0INT0EN, base + INT_EN);
-
-#ifdef CONFIG_NDS32
- fill_vdso_need_info(node);
-#endif
-
- return ret;
-}
-
-TIMER_OF_DECLARE(atcpit100, "andestech,atcpit100", atcpit100_timer_init);
diff --git a/drivers/clocksource/timer-imx-sysctr.c b/drivers/clocksource/timer-imx-sysctr.c
index 55a8e198d2a1..523e37662a6e 100644
--- a/drivers/clocksource/timer-imx-sysctr.c
+++ b/drivers/clocksource/timer-imx-sysctr.c
@@ -110,7 +110,7 @@ static struct timer_of to_sysctr = {
},
.of_irq = {
.handler = sysctr_timer_interrupt,
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER,
},
.of_clk = {
.name = "per",
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
index 2cdc077a39f5..bd64a8a8427f 100644
--- a/drivers/clocksource/timer-imx-tpm.c
+++ b/drivers/clocksource/timer-imx-tpm.c
@@ -32,8 +32,8 @@
#define TPM_C0SC_CHF_MASK (0x1 << 7)
#define TPM_C0V 0x24
-static int counter_width;
-static void __iomem *timer_base;
+static int counter_width __ro_after_init;
+static void __iomem *timer_base __ro_after_init;
static inline void tpm_timer_disable(void)
{
@@ -73,12 +73,12 @@ static unsigned long tpm_read_current_timer(void)
{
return tpm_read_counter();
}
-#endif
static u64 notrace tpm_read_sched_clock(void)
{
return tpm_read_counter();
}
+#endif
static int tpm_set_next_event(unsigned long delta,
struct clock_event_device *evt)
@@ -127,9 +127,9 @@ static irqreturn_t tpm_timer_interrupt(int irq, void *dev_id)
static struct timer_of to_tpm = {
.flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
.clkevt = {
- .name = "i.MX7ULP TPM Timer",
+ .name = "i.MX TPM Timer",
.rating = 200,
- .features = CLOCK_EVT_FEAT_ONESHOT,
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ,
.set_state_shutdown = tpm_set_state_shutdown,
.set_state_oneshot = tpm_set_state_oneshot,
.set_next_event = tpm_set_next_event,
@@ -137,7 +137,7 @@ static struct timer_of to_tpm = {
},
.of_irq = {
.handler = tpm_timer_interrupt,
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER,
},
.of_clk = {
.name = "per",
@@ -150,10 +150,10 @@ static int __init tpm_clocksource_init(void)
tpm_delay_timer.read_current_timer = &tpm_read_current_timer;
tpm_delay_timer.freq = timer_of_rate(&to_tpm) >> 3;
register_current_timer_delay(&tpm_delay_timer);
-#endif
sched_clock_register(tpm_read_sched_clock, counter_width,
timer_of_rate(&to_tpm) >> 3);
+#endif
return clocksource_mmio_init(timer_base + TPM_CNT,
"imx-tpm",
diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c
index cfa4ec7ef396..abce83d2f00b 100644
--- a/drivers/clocksource/timer-microchip-pit64b.c
+++ b/drivers/clocksource/timer-microchip-pit64b.c
@@ -42,8 +42,7 @@
#define MCHP_PIT64B_LSBMASK GENMASK_ULL(31, 0)
#define MCHP_PIT64B_PRES_TO_MODE(p) (MCHP_PIT64B_MR_PRES & ((p) << 8))
#define MCHP_PIT64B_MODE_TO_PRES(m) ((MCHP_PIT64B_MR_PRES & (m)) >> 8)
-#define MCHP_PIT64B_DEF_CS_FREQ 5000000UL /* 5 MHz */
-#define MCHP_PIT64B_DEF_CE_FREQ 32768 /* 32 KHz */
+#define MCHP_PIT64B_DEF_FREQ 5000000UL /* 5 MHz */
#define MCHP_PIT64B_NAME "pit64b"
@@ -165,7 +164,7 @@ static u64 mchp_pit64b_clksrc_read(struct clocksource *cs)
return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
}
-static u64 mchp_pit64b_sched_read_clk(void)
+static u64 notrace mchp_pit64b_sched_read_clk(void)
{
return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
}
@@ -418,7 +417,6 @@ static int __init mchp_pit64b_init_clkevt(struct mchp_pit64b_timer *timer,
static int __init mchp_pit64b_dt_init_timer(struct device_node *node,
bool clkevt)
{
- u32 freq = clkevt ? MCHP_PIT64B_DEF_CE_FREQ : MCHP_PIT64B_DEF_CS_FREQ;
struct mchp_pit64b_timer timer;
unsigned long clk_rate;
u32 irq = 0;
@@ -446,7 +444,7 @@ static int __init mchp_pit64b_dt_init_timer(struct device_node *node,
}
/* Initialize mode (prescaler + SGCK bit). To be used at runtime. */
- ret = mchp_pit64b_init_mode(&timer, freq);
+ ret = mchp_pit64b_init_mode(&timer, MCHP_PIT64B_DEF_FREQ);
if (ret)
goto irq_unmap;
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index 529cc6a51cdb..c3f54d9912be 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -157,9 +157,9 @@ static __init int timer_of_base_init(struct device_node *np,
of_base->base = of_base->name ?
of_io_request_and_map(np, of_base->index, of_base->name) :
of_iomap(np, of_base->index);
- if (IS_ERR(of_base->base)) {
- pr_err("Failed to iomap (%s)\n", of_base->name);
- return PTR_ERR(of_base->base);
+ if (IS_ERR_OR_NULL(of_base->base)) {
+ pr_err("Failed to iomap (%s:%s)\n", np->name, of_base->name);
+ return of_base->base ? PTR_ERR(of_base->base) : -ENOMEM;
}
return 0;
diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
index 5c40ca1d4740..2737407ff069 100644
--- a/drivers/clocksource/timer-ti-dm-systimer.c
+++ b/drivers/clocksource/timer-ti-dm-systimer.c
@@ -241,8 +241,7 @@ static void __init dmtimer_systimer_assign_alwon(void)
bool quirk_unreliable_oscillator = false;
/* Quirk unreliable 32 KiHz oscillator with incomplete dts */
- if (of_machine_is_compatible("ti,omap3-beagle-ab4") ||
- of_machine_is_compatible("timll,omap3-devkit8000")) {
+ if (of_machine_is_compatible("ti,omap3-beagle-ab4")) {
quirk_unreliable_oscillator = true;
counter_32k = -ENODEV;
}
@@ -695,9 +694,9 @@ static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
return 0;
}
- if (pa == 0x48034000) /* dra7 dmtimer3 */
+ if (pa == 0x4882c000) /* dra7 dmtimer15 */
return dmtimer_percpu_timer_init(np, 0);
- else if (pa == 0x48036000) /* dra7 dmtimer4 */
+ else if (pa == 0x4882e000) /* dra7 dmtimer16 */
return dmtimer_percpu_timer_init(np, 1);
return 0;
diff --git a/drivers/counter/counter-sysfs.c b/drivers/counter/counter-sysfs.c
index 7cc4d1d523ea..04eac41dad33 100644
--- a/drivers/counter/counter-sysfs.c
+++ b/drivers/counter/counter-sysfs.c
@@ -19,6 +19,11 @@
#include "counter-sysfs.h"
+static inline struct counter_device *counter_from_dev(struct device *dev)
+{
+ return container_of(dev, struct counter_device, dev);
+}
+
/**
* struct counter_attribute - Counter sysfs attribute
* @dev_attr: device attribute for sysfs
@@ -90,7 +95,7 @@ static ssize_t counter_comp_u8_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
- struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_device *const counter = counter_from_dev(dev);
int err;
u8 data = 0;
@@ -122,7 +127,7 @@ static ssize_t counter_comp_u8_store(struct device *dev,
const char *buf, size_t len)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
- struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_device *const counter = counter_from_dev(dev);
int err;
bool bool_data = 0;
u8 data = 0;
@@ -158,7 +163,7 @@ static ssize_t counter_comp_u32_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
- struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_device *const counter = counter_from_dev(dev);
const struct counter_available *const avail = a->comp.priv;
int err;
u32 data = 0;
@@ -221,7 +226,7 @@ static ssize_t counter_comp_u32_store(struct device *dev,
const char *buf, size_t len)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
- struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_device *const counter = counter_from_dev(dev);
struct counter_count *const count = a->parent;
struct counter_synapse *const synapse = a->comp.priv;
const struct counter_available *const avail = a->comp.priv;
@@ -281,7 +286,7 @@ static ssize_t counter_comp_u64_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
- struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_device *const counter = counter_from_dev(dev);
int err;
u64 data = 0;
@@ -309,7 +314,7 @@ static ssize_t counter_comp_u64_store(struct device *dev,
const char *buf, size_t len)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
- struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_device *const counter = counter_from_dev(dev);
int err;
u64 data = 0;
diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
index 647505957d4f..35f38ae67fb1 100644
--- a/drivers/cpufreq/amd-pstate-trace.h
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -27,6 +27,10 @@ TRACE_EVENT(amd_pstate_perf,
TP_PROTO(unsigned long min_perf,
unsigned long target_perf,
unsigned long capacity,
+ u64 freq,
+ u64 mperf,
+ u64 aperf,
+ u64 tsc,
unsigned int cpu_id,
bool changed,
bool fast_switch
@@ -35,6 +39,10 @@ TRACE_EVENT(amd_pstate_perf,
TP_ARGS(min_perf,
target_perf,
capacity,
+ freq,
+ mperf,
+ aperf,
+ tsc,
cpu_id,
changed,
fast_switch
@@ -44,6 +52,10 @@ TRACE_EVENT(amd_pstate_perf,
__field(unsigned long, min_perf)
__field(unsigned long, target_perf)
__field(unsigned long, capacity)
+ __field(unsigned long long, freq)
+ __field(unsigned long long, mperf)
+ __field(unsigned long long, aperf)
+ __field(unsigned long long, tsc)
__field(unsigned int, cpu_id)
__field(bool, changed)
__field(bool, fast_switch)
@@ -53,15 +65,23 @@ TRACE_EVENT(amd_pstate_perf,
__entry->min_perf = min_perf;
__entry->target_perf = target_perf;
__entry->capacity = capacity;
+ __entry->freq = freq;
+ __entry->mperf = mperf;
+ __entry->aperf = aperf;
+ __entry->tsc = tsc;
__entry->cpu_id = cpu_id;
__entry->changed = changed;
__entry->fast_switch = fast_switch;
),
- TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu cpu_id=%u changed=%s fast_switch=%s",
+ TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u changed=%s fast_switch=%s",
(unsigned long)__entry->min_perf,
(unsigned long)__entry->target_perf,
(unsigned long)__entry->capacity,
+ (unsigned long long)__entry->freq,
+ (unsigned long long)__entry->mperf,
+ (unsigned long long)__entry->aperf,
+ (unsigned long long)__entry->tsc,
(unsigned int)__entry->cpu_id,
(__entry->changed) ? "true" : "false",
(__entry->fast_switch) ? "true" : "false"
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 9ce75ed11f8e..7be38bc6a673 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -66,6 +66,18 @@ MODULE_PARM_DESC(shared_mem,
static struct cpufreq_driver amd_pstate_driver;
/**
+ * struct amd_aperf_mperf
+ * @aperf: actual performance frequency clock count
+ * @mperf: maximum performance frequency clock count
+ * @tsc: time stamp counter
+ */
+struct amd_aperf_mperf {
+ u64 aperf;
+ u64 mperf;
+ u64 tsc;
+};
+
+/**
* struct amd_cpudata - private CPU data for AMD P-State
* @cpu: CPU number
* @req: constraint request to apply
@@ -81,6 +93,9 @@ static struct cpufreq_driver amd_pstate_driver;
* @min_freq: the frequency that mapped to lowest_perf
* @nominal_freq: the frequency that mapped to nominal_perf
* @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf
+ * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
+ * @prev: Last Aperf/Mperf/tsc count value read from register
+ * @freq: current cpu frequency value
* @boost_supported: check whether the Processor or SBIOS supports boost mode
*
* The amd_cpudata is key private data for each CPU thread in AMD P-State, and
@@ -102,6 +117,10 @@ struct amd_cpudata {
u32 nominal_freq;
u32 lowest_nonlinear_freq;
+ struct amd_aperf_mperf cur;
+ struct amd_aperf_mperf prev;
+
+ u64 freq;
bool boost_supported;
};
@@ -211,6 +230,39 @@ static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
max_perf, fast_switch);
}
+static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
+{
+ u64 aperf, mperf, tsc;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ rdmsrl(MSR_IA32_APERF, aperf);
+ rdmsrl(MSR_IA32_MPERF, mperf);
+ tsc = rdtsc();
+
+ if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
+ local_irq_restore(flags);
+ return false;
+ }
+
+ local_irq_restore(flags);
+
+ cpudata->cur.aperf = aperf;
+ cpudata->cur.mperf = mperf;
+ cpudata->cur.tsc = tsc;
+ cpudata->cur.aperf -= cpudata->prev.aperf;
+ cpudata->cur.mperf -= cpudata->prev.mperf;
+ cpudata->cur.tsc -= cpudata->prev.tsc;
+
+ cpudata->prev.aperf = aperf;
+ cpudata->prev.mperf = mperf;
+ cpudata->prev.tsc = tsc;
+
+ cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf);
+
+ return true;
+}
+
static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
u32 des_perf, u32 max_perf, bool fast_switch)
{
@@ -226,8 +278,11 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
value &= ~AMD_CPPC_MAX_PERF(~0L);
value |= AMD_CPPC_MAX_PERF(max_perf);
- trace_amd_pstate_perf(min_perf, des_perf, max_perf,
- cpudata->cpu, (value != prev), fast_switch);
+ if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
+ trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
+ cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
+ cpudata->cpu, (value != prev), fast_switch);
+ }
if (value == prev)
return;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 08515f7e515f..b6bd0ff35323 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -146,7 +146,7 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
/************************** sysfs interface ************************/
-static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
+static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -161,7 +161,7 @@ static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
return count;
}
-static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
+static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -177,7 +177,7 @@ static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
return count;
}
-static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
+static ssize_t down_threshold_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -195,7 +195,7 @@ static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
return count;
}
-static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
+static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -220,7 +220,7 @@ static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
return count;
}
-static ssize_t store_freq_step(struct gov_attr_set *attr_set, const char *buf,
+static ssize_t freq_step_store(struct gov_attr_set *attr_set, const char *buf,
size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 63f7c219062b..0d42cf8b88d8 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -27,7 +27,7 @@ static DEFINE_MUTEX(gov_dbs_data_mutex);
/* Common sysfs tunables */
/*
- * store_sampling_rate - update sampling rate effective immediately if needed.
+ * sampling_rate_store - update sampling rate effective immediately if needed.
*
* If new rate is smaller than the old, simply updating
* dbs.sampling_rate might not be appropriate. For example, if the
@@ -41,7 +41,7 @@ static DEFINE_MUTEX(gov_dbs_data_mutex);
* This must be called with dbs_data->mutex held, otherwise traversing
* policy_dbs_list isn't safe.
*/
-ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
+ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf,
size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -80,7 +80,7 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
return count;
}
-EXPORT_SYMBOL_GPL(store_sampling_rate);
+EXPORT_SYMBOL_GPL(sampling_rate_store);
/**
* gov_update_cpu_data - Update CPU load data.
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index bab8e6140377..a5a0bc3cc23e 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -51,7 +51,7 @@ static inline struct dbs_data *to_dbs_data(struct gov_attr_set *attr_set)
}
#define gov_show_one(_gov, file_name) \
-static ssize_t show_##file_name \
+static ssize_t file_name##_show \
(struct gov_attr_set *attr_set, char *buf) \
{ \
struct dbs_data *dbs_data = to_dbs_data(attr_set); \
@@ -60,7 +60,7 @@ static ssize_t show_##file_name \
}
#define gov_show_one_common(file_name) \
-static ssize_t show_##file_name \
+static ssize_t file_name##_show \
(struct gov_attr_set *attr_set, char *buf) \
{ \
struct dbs_data *dbs_data = to_dbs_data(attr_set); \
@@ -68,12 +68,10 @@ static ssize_t show_##file_name \
}
#define gov_attr_ro(_name) \
-static struct governor_attr _name = \
-__ATTR(_name, 0444, show_##_name, NULL)
+static struct governor_attr _name = __ATTR_RO(_name)
#define gov_attr_rw(_name) \
-static struct governor_attr _name = \
-__ATTR(_name, 0644, show_##_name, store_##_name)
+static struct governor_attr _name = __ATTR_RW(_name)
/* Common to all CPUs of a policy */
struct policy_dbs_info {
@@ -176,7 +174,7 @@ void od_register_powersave_bias_handler(unsigned int (*f)
(struct cpufreq_policy *, unsigned int, unsigned int),
unsigned int powersave_bias);
void od_unregister_powersave_bias_handler(void);
-ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
+ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf,
size_t count);
void gov_update_cpu_data(struct dbs_data *dbs_data);
#endif /* _CPUFREQ_GOVERNOR_H */
diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c
index a6f365b9cc1a..771770ea0ed0 100644
--- a/drivers/cpufreq/cpufreq_governor_attr_set.c
+++ b/drivers/cpufreq/cpufreq_governor_attr_set.c
@@ -8,11 +8,6 @@
#include "cpufreq_governor.h"
-static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
-{
- return container_of(kobj, struct gov_attr_set, kobj);
-}
-
static inline struct governor_attr *to_gov_attr(struct attribute *attr)
{
return container_of(attr, struct governor_attr, attr);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 6a41ea4729b8..e8fbf970ff07 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -202,7 +202,7 @@ static unsigned int od_dbs_update(struct cpufreq_policy *policy)
/************************** sysfs interface ************************/
static struct dbs_governor od_dbs_gov;
-static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
+static ssize_t io_is_busy_store(struct gov_attr_set *attr_set, const char *buf,
size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -220,7 +220,7 @@ static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
return count;
}
-static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
+static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -237,7 +237,7 @@ static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
return count;
}
-static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
+static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -265,7 +265,7 @@ static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
return count;
}
-static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
+static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -290,7 +290,7 @@ static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
return count;
}
-static ssize_t store_powersave_bias(struct gov_attr_set *attr_set,
+static ssize_t powersave_bias_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index bc7f7e6759bd..846bb3a78788 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1692,6 +1692,37 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
}
}
+static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
+{
+ cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
+
+ /*
+ * If this CPU gen doesn't call for change in balance_perf
+ * EPP return.
+ */
+ if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
+ return;
+
+ /*
+ * If powerup EPP is something other than chipset default 0x80 and
+ * - is more performance oriented than 0x80 (default balance_perf EPP)
+ * - But less performance oriented than performance EPP
+ * then use this as new balance_perf EPP.
+ */
+ if (cpudata->epp_default < HWP_EPP_BALANCE_PERFORMANCE &&
+ cpudata->epp_default > HWP_EPP_PERFORMANCE) {
+ epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = cpudata->epp_default;
+ return;
+ }
+
+ /*
+ * Use hard coded value per gen to update the balance_perf
+ * and default EPP.
+ */
+ cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE];
+ intel_pstate_set_epp(cpudata, cpudata->epp_default);
+}
+
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt till we activate again */
@@ -1705,12 +1736,7 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
if (cpudata->epp_default >= 0)
return;
- if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) {
- cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
- } else {
- cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE];
- intel_pstate_set_epp(cpudata, cpudata->epp_default);
- }
+ intel_pstate_update_epp_defaults(cpudata);
}
static int atom_get_min_pstate(void)
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index c538a153ee82..3e000e1a75c6 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -668,9 +668,9 @@ static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
u32 nesting_level,
void *context, void **return_value)
{
- struct acpi_device *d;
+ struct acpi_device *d = acpi_fetch_acpi_dev(obj_handle);
- if (acpi_bus_get_device(obj_handle, &d))
+ if (!d)
return 0;
*return_value = acpi_driver_data(d);
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 12ab4014af71..d289036beff2 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1172,14 +1172,14 @@ static int powernowk8_init(void)
unsigned int i, supported_cpus = 0;
int ret;
+ if (!x86_match_cpu(powernow_k8_ids))
+ return -ENODEV;
+
if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) {
__request_acpi_cpufreq();
return -ENODEV;
}
- if (!x86_match_cpu(powernow_k8_ids))
- return -ENODEV;
-
cpus_read_lock();
for_each_online_cpu(i) {
smp_call_function_single(i, check_supported_cpu, &ret, 1);
diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c
index fcc53215bac8..3a39a7f48b77 100644
--- a/drivers/cpuidle/cpuidle-haltpoll.c
+++ b/drivers/cpuidle/cpuidle-haltpoll.c
@@ -108,11 +108,11 @@ static int __init haltpoll_init(void)
if (boot_option_idle_override != IDLE_NO_OVERRIDE)
return -ENODEV;
- cpuidle_poll_state_init(drv);
-
if (!kvm_para_available() || !haltpoll_want())
return -ENODEV;
+ cpuidle_poll_state_init(drv);
+
ret = cpuidle_register_driver(drv);
if (ret < 0)
return ret;
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index 01e77913a414..beedf22cbe78 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -122,10 +122,6 @@ static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
if (ret <= 0)
return ret ? : -ENODEV;
- ret = qcom_scm_set_warm_boot_addr(cpu_resume_arm, cpumask_of(cpu));
- if (ret)
- return ret;
-
return cpuidle_register(&data->cpuidle_driver, NULL);
}
@@ -136,6 +132,10 @@ static int spm_cpuidle_drv_probe(struct platform_device *pdev)
if (!qcom_scm_is_available())
return -EPROBE_DEFER;
+ ret = qcom_scm_set_warm_boot_addr(cpu_resume_arm);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "set warm boot addr failed");
+
for_each_possible_cpu(cpu) {
ret = spm_cpuidle_register(&pdev->dev, cpu);
if (ret && ret != -ENODEV) {
@@ -155,6 +155,22 @@ static struct platform_driver spm_cpuidle_driver = {
},
};
+static bool __init qcom_spm_find_any_cpu(void)
+{
+ struct device_node *cpu_node, *saw_node;
+
+ for_each_of_cpu_node(cpu_node) {
+ saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+ if (of_device_is_available(saw_node)) {
+ of_node_put(saw_node);
+ of_node_put(cpu_node);
+ return true;
+ }
+ of_node_put(saw_node);
+ }
+ return false;
+}
+
static int __init qcom_spm_cpuidle_init(void)
{
struct platform_device *pdev;
@@ -164,6 +180,10 @@ static int __init qcom_spm_cpuidle_init(void)
if (ret)
return ret;
+ /* Make sure there is actually any CPU managed by the SPM */
+ if (!qcom_spm_find_any_cpu())
+ return 0;
+
pdev = platform_device_register_simple("qcom-spm-cpuidle",
-1, NULL, 0);
if (IS_ERR(pdev)) {
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4f705674f94f..7b2d138bc83e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -808,6 +808,16 @@ config CRYPTO_DEV_ZYNQMP_AES
accelerator. Select this if you want to use the ZynqMP module
for AES algorithms.
+config CRYPTO_DEV_ZYNQMP_SHA3
+ tristate "Support for Xilinx ZynqMP SHA3 hardware accelerator"
+ depends on ZYNQMP_FIRMWARE || COMPILE_TEST
+ select CRYPTO_SHA3
+ help
+ Xilinx ZynqMP has SHA3 engine used for secure hash calculation.
+ This driver interfaces with SHA3 hardware engine.
+ Select this if you want to use the ZynqMP module
+ for SHA3 hash computation.
+
source "drivers/crypto/chelsio/Kconfig"
source "drivers/crypto/virtio/Kconfig"
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 1fe5120eb966..0a4fff23d272 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -47,7 +47,7 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
-obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += xilinx/
+obj-y += xilinx/
obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
obj-y += keembay/
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 54ae8d16e493..35e3cadccac2 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -11,6 +11,7 @@
* You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -283,7 +284,9 @@ static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
flow = rctx->flow;
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
+ local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 88194718a806..859b7522faaa 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -9,6 +9,7 @@
*
* You could find the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
@@ -414,6 +415,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
theend:
kfree(buf);
kfree(result);
+ local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 9ef1c85c4aaa..554e400d41ca 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -11,6 +11,7 @@
* You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -274,7 +275,9 @@ static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *ar
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
err = sun8i_ss_cipher(breq);
+ local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 80e89066dbd1..319fe3279a71 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -30,6 +30,8 @@
static const struct ss_variant ss_a80_variant = {
.alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES,
},
+ .alg_hash = { SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP,
+ },
.op_mode = { SS_OP_ECB, SS_OP_CBC,
},
.ss_clks = {
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 3c073eb3db03..1a71ed49d233 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -9,6 +9,7 @@
*
* You could find the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
@@ -442,6 +443,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
theend:
kfree(pad);
kfree(result);
+ local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index c6865cbd334b..e79514fce731 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -265,7 +265,9 @@ static int meson_handle_cipher_request(struct crypto_engine *engine,
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
err = meson_cipher(breq);
+ local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index fe0558403191..f72c6b3e4ad8 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2509,6 +2509,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x700:
case 0x500:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 1b13f601fd95..d1628112dacc 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2508,6 +2508,7 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x700:
case 0x510:
dd->caps.has_dma = 1;
dd->caps.has_dualbuff = 1;
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index e30786ec9f2d..9fd7b8e439d2 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1130,6 +1130,7 @@ static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xf00) {
+ case 0x800:
case 0x700:
dd->caps.has_dma = 1;
dd->caps.has_cfb_3keys = 1;
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
index 8ccc22075043..4b1bcf53f7ac 100644
--- a/drivers/crypto/caam/pdb.h
+++ b/drivers/crypto/caam/pdb.h
@@ -144,7 +144,7 @@ struct ipsec_encap_pdb {
};
u32 spi;
u32 ip_hdr_len;
- u32 ip_hdr[0];
+ u32 ip_hdr[];
};
/**
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
index 2e9c0d214363..9e7308e39b30 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/bitmap.h>
#include <linux/workqueue.h>
#include "nitrox_csr.h"
@@ -120,6 +121,7 @@ static void pf2vf_resp_handler(struct work_struct *work)
void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
{
+ DECLARE_BITMAP(csr, BITS_PER_TYPE(u64));
struct nitrox_vfdev *vfdev;
struct pf2vf_work *pfwork;
u64 value, reg_addr;
@@ -129,7 +131,8 @@ void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
/* loop for VF(0..63) */
reg_addr = NPS_PKT_MBOX_INT_LO;
value = nitrox_read_csr(ndev, reg_addr);
- for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
+ bitmap_from_u64(csr, value);
+ for_each_set_bit(i, csr, BITS_PER_TYPE(csr)) {
/* get the vfno from ring */
vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues);
vfdev = ndev->iov.vfdev + vfno;
@@ -151,7 +154,8 @@ void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
/* loop for VF(64..127) */
reg_addr = NPS_PKT_MBOX_INT_HI;
value = nitrox_read_csr(ndev, reg_addr);
- for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
+ bitmap_from_u64(csr, value);
+ for_each_set_bit(i, csr, BITS_PER_TYPE(csr)) {
/* get the vfno from ring */
vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues);
vfdev = ndev->iov.vfdev + vfno;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index ed174883c8e3..6bf088bcdd11 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -440,7 +440,7 @@ struct aqmq_command_s {
/**
* struct ctx_hdr - Book keeping data about the crypto context
* @pool: Pool used to allocate crypto context
- * @dma: Base DMA address of the cypto context
+ * @dma: Base DMA address of the crypto context
* @ctx_dma: Actual usable crypto context for NITROX
*/
struct ctx_hdr {
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
index 812b4ac9afd6..dc5b7bf7e1fd 100644
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -55,6 +55,11 @@ static const struct pci_device_id zip_id_table[] = {
{ 0, }
};
+static void zip_debugfs_init(void);
+static void zip_debugfs_exit(void);
+static int zip_register_compression_device(void);
+static void zip_unregister_compression_device(void);
+
void zip_reg_write(u64 val, u64 __iomem *addr)
{
writeq(val, addr);
@@ -235,6 +240,15 @@ static int zip_init_hw(struct zip_device *zip)
return 0;
}
+static void zip_reset(struct zip_device *zip)
+{
+ union zip_cmd_ctl cmd_ctl;
+
+ cmd_ctl.u_reg64 = 0x0ull;
+ cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */
+ zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
+}
+
static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
@@ -282,8 +296,21 @@ static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_release_regions;
+ /* Register with the Kernel Crypto Interface */
+ err = zip_register_compression_device();
+ if (err < 0) {
+ zip_err("ZIP: Kernel Crypto Registration failed\n");
+ goto err_register;
+ }
+
+ /* comp-decomp statistics are handled with debugfs interface */
+ zip_debugfs_init();
+
return 0;
+err_register:
+ zip_reset(zip);
+
err_release_regions:
if (zip->reg_base)
iounmap(zip->reg_base);
@@ -305,16 +332,17 @@ err_free_device:
static void zip_remove(struct pci_dev *pdev)
{
struct zip_device *zip = pci_get_drvdata(pdev);
- union zip_cmd_ctl cmd_ctl;
int q = 0;
if (!zip)
return;
+ zip_debugfs_exit();
+
+ zip_unregister_compression_device();
+
if (zip->reg_base) {
- cmd_ctl.u_reg64 = 0x0ull;
- cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */
- zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
+ zip_reset(zip);
iounmap(zip->reg_base);
}
@@ -585,7 +613,7 @@ DEFINE_SHOW_ATTRIBUTE(zip_regs);
/* Root directory for thunderx_zip debugfs entry */
static struct dentry *zip_debugfs_root;
-static void __init zip_debugfs_init(void)
+static void zip_debugfs_init(void)
{
if (!debugfs_initialized())
return;
@@ -604,7 +632,7 @@ static void __init zip_debugfs_init(void)
}
-static void __exit zip_debugfs_exit(void)
+static void zip_debugfs_exit(void)
{
debugfs_remove_recursive(zip_debugfs_root);
}
@@ -615,48 +643,7 @@ static void __exit zip_debugfs_exit(void) { }
#endif
/* debugfs - end */
-static int __init zip_init_module(void)
-{
- int ret;
-
- zip_msg("%s\n", DRV_NAME);
-
- ret = pci_register_driver(&zip_driver);
- if (ret < 0) {
- zip_err("ZIP: pci_register_driver() failed\n");
- return ret;
- }
-
- /* Register with the Kernel Crypto Interface */
- ret = zip_register_compression_device();
- if (ret < 0) {
- zip_err("ZIP: Kernel Crypto Registration failed\n");
- goto err_pci_unregister;
- }
-
- /* comp-decomp statistics are handled with debugfs interface */
- zip_debugfs_init();
-
- return ret;
-
-err_pci_unregister:
- pci_unregister_driver(&zip_driver);
- return ret;
-}
-
-static void __exit zip_cleanup_module(void)
-{
- zip_debugfs_exit();
-
- /* Unregister from the kernel crypto interface */
- zip_unregister_compression_device();
-
- /* Unregister this driver for pci zip devices */
- pci_unregister_driver(&zip_driver);
-}
-
-module_init(zip_init_module);
-module_exit(zip_cleanup_module);
+module_pci_driver(zip_driver);
MODULE_AUTHOR("Cavium Inc");
MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index e6dcd8cedd53..bed331953ff9 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -69,7 +69,6 @@ static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt)
struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
- int ret;
if (!ctx->u.aes.key_len)
return -EINVAL;
@@ -104,9 +103,7 @@ static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt)
rctx->cmd.u.aes.src_len = req->cryptlen;
rctx->cmd.u.aes.dst = req->dst;
- ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
-
- return ret;
+ return ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
}
static int ccp_aes_encrypt(struct skcipher_request *req)
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index d718db224be4..7d4b4ad1db1f 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -632,6 +632,20 @@ static int ccp_terminate_all(struct dma_chan *dma_chan)
return 0;
}
+static void ccp_dma_release(struct ccp_device *ccp)
+{
+ struct ccp_dma_chan *chan;
+ struct dma_chan *dma_chan;
+ unsigned int i;
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ chan = ccp->ccp_dma_chan + i;
+ dma_chan = &chan->dma_chan;
+ tasklet_kill(&chan->cleanup_tasklet);
+ list_del_rcu(&dma_chan->device_node);
+ }
+}
+
int ccp_dmaengine_register(struct ccp_device *ccp)
{
struct ccp_dma_chan *chan;
@@ -736,6 +750,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
return 0;
err_reg:
+ ccp_dma_release(ccp);
kmem_cache_destroy(ccp->dma_desc_cache);
err_cache:
@@ -752,6 +767,7 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
return;
dma_async_device_unregister(dma_dev);
+ ccp_dma_release(ccp);
kmem_cache_destroy(ccp->dma_desc_cache);
kmem_cache_destroy(ccp->dma_cmd_cache);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 8fd774a10edc..6ab93dfd478a 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -413,7 +413,7 @@ static int __sev_platform_init_locked(int *error)
{
struct psp_device *psp = psp_master;
struct sev_device *sev;
- int rc, psp_ret;
+ int rc, psp_ret = -1;
int (*init_function)(int *error);
if (!psp || !psp->sev_data)
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index a5e041d9d2cf..11e0278c8631 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -258,6 +258,13 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
{
int ret = 0;
+ if (!nbytes) {
+ *mapped_nents = 0;
+ *lbytes = 0;
+ *nents = 0;
+ return 0;
+ }
+
*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
if (*nents > max_sg_nents) {
*nents = 0;
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 78833491f534..309da6334a0a 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -257,8 +257,8 @@ static void cc_cipher_exit(struct crypto_tfm *tfm)
&ctx_p->user.key_dma_addr);
/* Free key buffer in context */
- kfree_sensitive(ctx_p->user.key);
dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key);
+ kfree_sensitive(ctx_p->user.key);
}
struct tdes_keys {
diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c
index c1c2b1d86663..14d0d83d388d 100644
--- a/drivers/crypto/gemini/sl3516-ce-cipher.c
+++ b/drivers/crypto/gemini/sl3516-ce-cipher.c
@@ -23,8 +23,8 @@ static bool sl3516_ce_need_fallback(struct skcipher_request *areq)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sl3516_ce_dev *ce = op->ce;
- struct scatterlist *in_sg = areq->src;
- struct scatterlist *out_sg = areq->dst;
+ struct scatterlist *in_sg;
+ struct scatterlist *out_sg;
struct scatterlist *sg;
if (areq->cryptlen == 0 || areq->cryptlen % 16) {
@@ -264,7 +264,9 @@ static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *a
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
err = sl3516_ce_cipher(breq);
+ local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index e0b4a1982ee9..9a0558ed82f9 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -4,7 +4,7 @@
#define __HISI_HPRE_H
#include <linux/list.h>
-#include "../qm.h"
+#include <linux/hisi_acc_qm.h>
#define HPRE_SQE_SIZE sizeof(struct hpre_sqe)
#define HPRE_PF_DEF_Q_NUM 64
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index ebfab3e14499..36ab30e9e654 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -68,8 +68,7 @@
#define HPRE_REG_RD_INTVRL_US 10
#define HPRE_REG_RD_TMOUT_US 1000
#define HPRE_DBGFS_VAL_MAX_LEN 20
-#define HPRE_PCI_DEVICE_ID 0xa258
-#define HPRE_PCI_VF_DEVICE_ID 0xa259
+#define PCI_DEVICE_ID_HUAWEI_HPRE_PF 0xa258
#define HPRE_QM_USR_CFG_MASK GENMASK(31, 1)
#define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0)
#define HPRE_QM_VFG_AX_MASK GENMASK(7, 0)
@@ -111,8 +110,8 @@
static const char hpre_name[] = "hisi_hpre";
static struct dentry *hpre_debugfs_root;
static const struct pci_device_id hpre_dev_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID) },
- { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_VF_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_PF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) },
{ 0, }
};
@@ -242,7 +241,7 @@ MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
- return q_num_set(val, kp, HPRE_PCI_DEVICE_ID);
+ return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
}
static const struct kernel_param_ops hpre_pf_q_num_ops = {
@@ -921,7 +920,7 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
hisi_qm_debug_init(qm);
- if (qm->pdev->device == HPRE_PCI_DEVICE_ID) {
+ if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) {
ret = hpre_ctrl_debug_init(qm);
if (ret)
goto failed_to_create;
@@ -958,7 +957,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->sqe_size = HPRE_SQE_SIZE;
qm->dev_name = hpre_name;
- qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ?
+ qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) ?
QM_HW_PF : QM_HW_VF;
if (qm->fun_type == QM_HW_PF) {
qm->qp_base = HPRE_PF_DEF_Q_BASE;
@@ -1191,6 +1190,12 @@ static struct pci_driver hpre_pci_driver = {
.driver.pm = &hpre_pm_ops,
};
+struct pci_driver *hisi_hpre_get_pf_driver(void)
+{
+ return &hpre_pci_driver;
+}
+EXPORT_SYMBOL_GPL(hisi_hpre_get_pf_driver);
+
static void hpre_register_debugfs(void)
{
if (!debugfs_initialized())
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index c5b84a5ea350..009132333d2b 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -15,7 +15,7 @@
#include <linux/uacce.h>
#include <linux/uaccess.h>
#include <uapi/misc/uacce/hisi_qm.h>
-#include "qm.h"
+#include <linux/hisi_acc_qm.h>
/* eq/aeq irq enable */
#define QM_VF_AEQ_INT_SOURCE 0x0
@@ -33,23 +33,6 @@
#define QM_ABNORMAL_EVENT_IRQ_VECTOR 3
/* mailbox */
-#define QM_MB_CMD_SQC 0x0
-#define QM_MB_CMD_CQC 0x1
-#define QM_MB_CMD_EQC 0x2
-#define QM_MB_CMD_AEQC 0x3
-#define QM_MB_CMD_SQC_BT 0x4
-#define QM_MB_CMD_CQC_BT 0x5
-#define QM_MB_CMD_SQC_VFT_V2 0x6
-#define QM_MB_CMD_STOP_QP 0x8
-#define QM_MB_CMD_SRC 0xc
-#define QM_MB_CMD_DST 0xd
-
-#define QM_MB_CMD_SEND_BASE 0x300
-#define QM_MB_EVENT_SHIFT 8
-#define QM_MB_BUSY_SHIFT 13
-#define QM_MB_OP_SHIFT 14
-#define QM_MB_CMD_DATA_ADDR_L 0x304
-#define QM_MB_CMD_DATA_ADDR_H 0x308
#define QM_MB_PING_ALL_VFS 0xffff
#define QM_MB_CMD_DATA_SHIFT 32
#define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
@@ -103,19 +86,12 @@
#define QM_DB_CMD_SHIFT_V1 16
#define QM_DB_INDEX_SHIFT_V1 32
#define QM_DB_PRIORITY_SHIFT_V1 48
-#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
-#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
#define QM_QUE_ISO_CFG_V 0x0030
#define QM_PAGE_SIZE 0x0034
#define QM_QUE_ISO_EN 0x100154
#define QM_CAPBILITY 0x100158
#define QM_QP_NUN_MASK GENMASK(10, 0)
#define QM_QP_DB_INTERVAL 0x10000
-#define QM_QP_MAX_NUM_SHIFT 11
-#define QM_DB_CMD_SHIFT_V2 12
-#define QM_DB_RAND_SHIFT_V2 16
-#define QM_DB_INDEX_SHIFT_V2 32
-#define QM_DB_PRIORITY_SHIFT_V2 48
#define QM_MEM_START_INIT 0x100040
#define QM_MEM_INIT_DONE 0x100044
@@ -693,7 +669,7 @@ static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd,
}
/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
-static int qm_wait_mb_ready(struct hisi_qm *qm)
+int hisi_qm_wait_mb_ready(struct hisi_qm *qm)
{
u32 val;
@@ -701,6 +677,7 @@ static int qm_wait_mb_ready(struct hisi_qm *qm)
val, !((val >> QM_MB_BUSY_SHIFT) &
0x1), POLL_PERIOD, POLL_TIMEOUT);
}
+EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready);
/* 128 bit should be written to hardware at one time to trigger a mailbox */
static void qm_mb_write(struct hisi_qm *qm, const void *src)
@@ -726,14 +703,14 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src)
static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
{
- if (unlikely(qm_wait_mb_ready(qm))) {
+ if (unlikely(hisi_qm_wait_mb_ready(qm))) {
dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
goto mb_busy;
}
qm_mb_write(qm, mailbox);
- if (unlikely(qm_wait_mb_ready(qm))) {
+ if (unlikely(hisi_qm_wait_mb_ready(qm))) {
dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
goto mb_busy;
}
@@ -745,8 +722,8 @@ mb_busy:
return -EBUSY;
}
-static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
- bool op)
+int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
+ bool op)
{
struct qm_mailbox mailbox;
int ret;
@@ -762,6 +739,7 @@ static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
return ret;
}
+EXPORT_SYMBOL_GPL(hisi_qm_mb);
static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
{
@@ -1351,7 +1329,7 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
u64 sqc_vft;
int ret;
- ret = qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
+ ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
if (ret)
return ret;
@@ -1725,12 +1703,12 @@ static int dump_show(struct hisi_qm *qm, void *info,
static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
{
- return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
+ return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
}
static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
{
- return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
+ return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
}
static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
@@ -1842,7 +1820,7 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
if (IS_ERR(xeqc))
return PTR_ERR(xeqc);
- ret = qm_mb(qm, cmd, xeqc_dma, 0, 1);
+ ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1);
if (ret)
goto err_free_ctx;
@@ -2495,7 +2473,7 @@ unlock:
static int qm_stop_qp(struct hisi_qp *qp)
{
- return qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
+ return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
}
static int qm_set_msi(struct hisi_qm *qm, bool set)
@@ -2763,7 +2741,7 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
return -ENOMEM;
}
- ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
+ ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
kfree(sqc);
@@ -2804,7 +2782,7 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
return -ENOMEM;
}
- ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
+ ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
kfree(cqc);
@@ -3514,6 +3492,12 @@ static void hisi_qm_pci_uninit(struct hisi_qm *qm)
pci_disable_device(pdev);
}
+static void hisi_qm_set_state(struct hisi_qm *qm, u8 state)
+{
+ if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF)
+ writel(state, qm->io_base + QM_VF_STATE);
+}
+
/**
* hisi_qm_uninit() - Uninitialize qm.
* @qm: The qm needed uninit.
@@ -3542,6 +3526,7 @@ void hisi_qm_uninit(struct hisi_qm *qm)
dma_free_coherent(dev, qm->qdma.size,
qm->qdma.va, qm->qdma.dma);
}
+ hisi_qm_set_state(qm, QM_NOT_READY);
up_write(&qm->qps_lock);
qm_irq_unregister(qm);
@@ -3655,7 +3640,7 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
return -ENOMEM;
}
- ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
+ ret = hisi_qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
kfree(eqc);
@@ -3684,7 +3669,7 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
return -ENOMEM;
}
- ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
+ ret = hisi_qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
kfree(aeqc);
@@ -3723,11 +3708,11 @@ static int __hisi_qm_start(struct hisi_qm *qm)
if (ret)
return ret;
- ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
+ ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
if (ret)
return ret;
- ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
+ ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
if (ret)
return ret;
@@ -3767,6 +3752,7 @@ int hisi_qm_start(struct hisi_qm *qm)
if (!ret)
atomic_set(&qm->status.flags, QM_START);
+ hisi_qm_set_state(qm, QM_READY);
err_unlock:
up_write(&qm->qps_lock);
return ret;
@@ -3840,7 +3826,7 @@ static void qm_clear_queues(struct hisi_qm *qm)
for (i = 0; i < qm->qp_num; i++) {
qp = &qm->qp_array[i];
- if (qp->is_resetting)
+ if (qp->is_in_kernel && qp->is_resetting)
memset(qp->qdma.va, 0, qp->qdma.size);
}
@@ -4295,7 +4281,7 @@ static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
static int qm_vf_read_qos(struct hisi_qm *qm)
{
int cnt = 0;
- int ret;
+ int ret = -EINVAL;
/* reset mailbox qos val */
qm->mb_qos = 0;
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
deleted file mode 100644
index 3068093229a5..000000000000
--- a/drivers/crypto/hisilicon/qm.h
+++ /dev/null
@@ -1,441 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2019 HiSilicon Limited. */
-#ifndef HISI_ACC_QM_H
-#define HISI_ACC_QM_H
-
-#include <linux/bitfield.h>
-#include <linux/debugfs.h>
-#include <linux/iopoll.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#define QM_QNUM_V1 4096
-#define QM_QNUM_V2 1024
-#define QM_MAX_VFS_NUM_V2 63
-
-/* qm user domain */
-#define QM_ARUSER_M_CFG_1 0x100088
-#define AXUSER_SNOOP_ENABLE BIT(30)
-#define AXUSER_CMD_TYPE GENMASK(14, 12)
-#define AXUSER_CMD_SMMU_NORMAL 1
-#define AXUSER_NS BIT(6)
-#define AXUSER_NO BIT(5)
-#define AXUSER_FP BIT(4)
-#define AXUSER_SSV BIT(0)
-#define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \
- FIELD_PREP(AXUSER_CMD_TYPE, \
- AXUSER_CMD_SMMU_NORMAL) | \
- AXUSER_NS | AXUSER_NO | AXUSER_FP)
-#define QM_ARUSER_M_CFG_ENABLE 0x100090
-#define ARUSER_M_CFG_ENABLE 0xfffffffe
-#define QM_AWUSER_M_CFG_1 0x100098
-#define QM_AWUSER_M_CFG_ENABLE 0x1000a0
-#define AWUSER_M_CFG_ENABLE 0xfffffffe
-#define QM_WUSER_M_CFG_ENABLE 0x1000a8
-#define WUSER_M_CFG_ENABLE 0xffffffff
-
-/* qm cache */
-#define QM_CACHE_CTL 0x100050
-#define SQC_CACHE_ENABLE BIT(0)
-#define CQC_CACHE_ENABLE BIT(1)
-#define SQC_CACHE_WB_ENABLE BIT(4)
-#define SQC_CACHE_WB_THRD GENMASK(10, 5)
-#define CQC_CACHE_WB_ENABLE BIT(11)
-#define CQC_CACHE_WB_THRD GENMASK(17, 12)
-#define QM_AXI_M_CFG 0x1000ac
-#define AXI_M_CFG 0xffff
-#define QM_AXI_M_CFG_ENABLE 0x1000b0
-#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014
-#define AXI_M_CFG_ENABLE 0xffffffff
-#define QM_PEH_AXUSER_CFG 0x1000cc
-#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0
-#define PEH_AXUSER_CFG 0x401001
-#define PEH_AXUSER_CFG_ENABLE 0xffffffff
-
-#define QM_AXI_RRESP BIT(0)
-#define QM_AXI_BRESP BIT(1)
-#define QM_ECC_MBIT BIT(2)
-#define QM_ECC_1BIT BIT(3)
-#define QM_ACC_GET_TASK_TIMEOUT BIT(4)
-#define QM_ACC_DO_TASK_TIMEOUT BIT(5)
-#define QM_ACC_WB_NOT_READY_TIMEOUT BIT(6)
-#define QM_SQ_CQ_VF_INVALID BIT(7)
-#define QM_CQ_VF_INVALID BIT(8)
-#define QM_SQ_VF_INVALID BIT(9)
-#define QM_DB_TIMEOUT BIT(10)
-#define QM_OF_FIFO_OF BIT(11)
-#define QM_DB_RANDOM_INVALID BIT(12)
-#define QM_MAILBOX_TIMEOUT BIT(13)
-#define QM_FLR_TIMEOUT BIT(14)
-
-#define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \
- QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \
- QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID | \
- QM_MAILBOX_TIMEOUT | QM_FLR_TIMEOUT)
-#define QM_BASE_CE QM_ECC_1BIT
-
-#define QM_Q_DEPTH 1024
-#define QM_MIN_QNUM 2
-#define HISI_ACC_SGL_SGE_NR_MAX 255
-#define QM_SHAPER_CFG 0x100164
-#define QM_SHAPER_ENABLE BIT(30)
-#define QM_SHAPER_TYPE1_OFFSET 10
-
-/* page number for queue file region */
-#define QM_DOORBELL_PAGE_NR 1
-
-/* uacce mode of the driver */
-#define UACCE_MODE_NOUACCE 0 /* don't use uacce */
-#define UACCE_MODE_SVA 1 /* use uacce sva mode */
-#define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce"
-
-enum qm_stop_reason {
- QM_NORMAL,
- QM_SOFT_RESET,
- QM_FLR,
-};
-
-enum qm_state {
- QM_INIT = 0,
- QM_START,
- QM_CLOSE,
- QM_STOP,
-};
-
-enum qp_state {
- QP_INIT = 1,
- QP_START,
- QP_STOP,
- QP_CLOSE,
-};
-
-enum qm_hw_ver {
- QM_HW_UNKNOWN = -1,
- QM_HW_V1 = 0x20,
- QM_HW_V2 = 0x21,
- QM_HW_V3 = 0x30,
-};
-
-enum qm_fun_type {
- QM_HW_PF,
- QM_HW_VF,
-};
-
-enum qm_debug_file {
- CURRENT_QM,
- CURRENT_Q,
- CLEAR_ENABLE,
- DEBUG_FILE_NUM,
-};
-
-struct qm_dfx {
- atomic64_t err_irq_cnt;
- atomic64_t aeq_irq_cnt;
- atomic64_t abnormal_irq_cnt;
- atomic64_t create_qp_err_cnt;
- atomic64_t mb_err_cnt;
-};
-
-struct debugfs_file {
- enum qm_debug_file index;
- struct mutex lock;
- struct qm_debug *debug;
-};
-
-struct qm_debug {
- u32 curr_qm_qp_num;
- u32 sqe_mask_offset;
- u32 sqe_mask_len;
- struct qm_dfx dfx;
- struct dentry *debug_root;
- struct dentry *qm_d;
- struct debugfs_file files[DEBUG_FILE_NUM];
-};
-
-struct qm_shaper_factor {
- u32 func_qos;
- u64 cir_b;
- u64 cir_u;
- u64 cir_s;
- u64 cbs_s;
-};
-
-struct qm_dma {
- void *va;
- dma_addr_t dma;
- size_t size;
-};
-
-struct hisi_qm_status {
- u32 eq_head;
- bool eqc_phase;
- u32 aeq_head;
- bool aeqc_phase;
- atomic_t flags;
- int stop_reason;
-};
-
-struct hisi_qm;
-
-struct hisi_qm_err_info {
- char *acpi_rst;
- u32 msi_wr_port;
- u32 ecc_2bits_mask;
- u32 dev_ce_mask;
- u32 ce;
- u32 nfe;
- u32 fe;
-};
-
-struct hisi_qm_err_status {
- u32 is_qm_ecc_mbit;
- u32 is_dev_ecc_mbit;
-};
-
-struct hisi_qm_err_ini {
- int (*hw_init)(struct hisi_qm *qm);
- void (*hw_err_enable)(struct hisi_qm *qm);
- void (*hw_err_disable)(struct hisi_qm *qm);
- u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
- void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
- void (*open_axi_master_ooo)(struct hisi_qm *qm);
- void (*close_axi_master_ooo)(struct hisi_qm *qm);
- void (*open_sva_prefetch)(struct hisi_qm *qm);
- void (*close_sva_prefetch)(struct hisi_qm *qm);
- void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
- void (*err_info_init)(struct hisi_qm *qm);
-};
-
-struct hisi_qm_list {
- struct mutex lock;
- struct list_head list;
- int (*register_to_crypto)(struct hisi_qm *qm);
- void (*unregister_from_crypto)(struct hisi_qm *qm);
-};
-
-struct hisi_qm {
- enum qm_hw_ver ver;
- enum qm_fun_type fun_type;
- const char *dev_name;
- struct pci_dev *pdev;
- void __iomem *io_base;
- void __iomem *db_io_base;
- u32 sqe_size;
- u32 qp_base;
- u32 qp_num;
- u32 qp_in_used;
- u32 ctrl_qp_num;
- u32 max_qp_num;
- u32 vfs_num;
- u32 db_interval;
- struct list_head list;
- struct hisi_qm_list *qm_list;
-
- struct qm_dma qdma;
- struct qm_sqc *sqc;
- struct qm_cqc *cqc;
- struct qm_eqe *eqe;
- struct qm_aeqe *aeqe;
- dma_addr_t sqc_dma;
- dma_addr_t cqc_dma;
- dma_addr_t eqe_dma;
- dma_addr_t aeqe_dma;
-
- struct hisi_qm_status status;
- const struct hisi_qm_err_ini *err_ini;
- struct hisi_qm_err_info err_info;
- struct hisi_qm_err_status err_status;
- unsigned long misc_ctl; /* driver removing and reset sched */
-
- struct rw_semaphore qps_lock;
- struct idr qp_idr;
- struct hisi_qp *qp_array;
-
- struct mutex mailbox_lock;
-
- const struct hisi_qm_hw_ops *ops;
-
- struct qm_debug debug;
-
- u32 error_mask;
-
- struct workqueue_struct *wq;
- struct work_struct work;
- struct work_struct rst_work;
- struct work_struct cmd_process;
-
- const char *algs;
- bool use_sva;
- bool is_frozen;
-
- /* doorbell isolation enable */
- bool use_db_isolation;
- resource_size_t phys_base;
- resource_size_t db_phys_base;
- struct uacce_device *uacce;
- int mode;
- struct qm_shaper_factor *factor;
- u32 mb_qos;
- u32 type_rate;
-};
-
-struct hisi_qp_status {
- atomic_t used;
- u16 sq_tail;
- u16 cq_head;
- bool cqc_phase;
- atomic_t flags;
-};
-
-struct hisi_qp_ops {
- int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
-};
-
-struct hisi_qp {
- u32 qp_id;
- u8 alg_type;
- u8 req_type;
-
- struct qm_dma qdma;
- void *sqe;
- struct qm_cqe *cqe;
- dma_addr_t sqe_dma;
- dma_addr_t cqe_dma;
-
- struct hisi_qp_status qp_status;
- struct hisi_qp_ops *hw_ops;
- void *qp_ctx;
- void (*req_cb)(struct hisi_qp *qp, void *data);
- void (*event_cb)(struct hisi_qp *qp);
-
- struct hisi_qm *qm;
- bool is_resetting;
- bool is_in_kernel;
- u16 pasid;
- struct uacce_queue *uacce_q;
-};
-
-static inline int q_num_set(const char *val, const struct kernel_param *kp,
- unsigned int device)
-{
- struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
- device, NULL);
- u32 n, q_num;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- if (!pdev) {
- q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
- pr_info("No device found currently, suppose queue number is %u\n",
- q_num);
- } else {
- if (pdev->revision == QM_HW_V1)
- q_num = QM_QNUM_V1;
- else
- q_num = QM_QNUM_V2;
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret || n < QM_MIN_QNUM || n > q_num)
- return -EINVAL;
-
- return param_set_int(val, kp);
-}
-
-static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
-{
- u32 n;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, 10, &n);
- if (ret < 0)
- return ret;
-
- if (n > QM_MAX_VFS_NUM_V2)
- return -EINVAL;
-
- return param_set_int(val, kp);
-}
-
-static inline int mode_set(const char *val, const struct kernel_param *kp)
-{
- u32 n;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || (n != UACCE_MODE_SVA &&
- n != UACCE_MODE_NOUACCE))
- return -EINVAL;
-
- return param_set_int(val, kp);
-}
-
-static inline int uacce_mode_set(const char *val, const struct kernel_param *kp)
-{
- return mode_set(val, kp);
-}
-
-static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
-{
- INIT_LIST_HEAD(&qm_list->list);
- mutex_init(&qm_list->lock);
-}
-
-int hisi_qm_init(struct hisi_qm *qm);
-void hisi_qm_uninit(struct hisi_qm *qm);
-int hisi_qm_start(struct hisi_qm *qm);
-int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
-struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type);
-int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
-int hisi_qm_stop_qp(struct hisi_qp *qp);
-void hisi_qm_release_qp(struct hisi_qp *qp);
-int hisi_qp_send(struct hisi_qp *qp, const void *msg);
-int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
-int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
-void hisi_qm_debug_init(struct hisi_qm *qm);
-enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
-void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
-int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
-int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen);
-int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
-void hisi_qm_dev_err_init(struct hisi_qm *qm);
-void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
-pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
- pci_channel_state_t state);
-pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
-void hisi_qm_reset_prepare(struct pci_dev *pdev);
-void hisi_qm_reset_done(struct pci_dev *pdev);
-
-struct hisi_acc_sgl_pool;
-struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
- struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
- u32 index, dma_addr_t *hw_sgl_dma);
-void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
- struct hisi_acc_hw_sgl *hw_sgl);
-struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
- u32 count, u32 sge_nr);
-void hisi_acc_free_sgl_pool(struct device *dev,
- struct hisi_acc_sgl_pool *pool);
-int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
- u8 alg_type, int node, struct hisi_qp **qps);
-void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
-void hisi_qm_dev_shutdown(struct pci_dev *pdev);
-void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
-int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
-void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
-int hisi_qm_resume(struct device *dev);
-int hisi_qm_suspend(struct device *dev);
-void hisi_qm_pm_uninit(struct hisi_qm *qm);
-void hisi_qm_pm_init(struct hisi_qm *qm);
-int hisi_qm_get_dfx_access(struct hisi_qm *qm);
-void hisi_qm_put_dfx_access(struct hisi_qm *qm);
-void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
-#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index d97cf02b1df7..c2e9b01187a7 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -4,7 +4,7 @@
#ifndef __HISI_SEC_V2_H
#define __HISI_SEC_V2_H
-#include "../qm.h"
+#include <linux/hisi_acc_qm.h>
#include "sec_crypto.h"
/* Algorithm resource per hardware SEC queue */
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 6a45bd23b363..a91635c348b5 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -42,6 +42,8 @@
#define SEC_DE_OFFSET_V3 9
#define SEC_SCENE_OFFSET_V3 5
#define SEC_CKEY_OFFSET_V3 13
+#define SEC_CTR_CNT_OFFSET 25
+#define SEC_CTR_CNT_ROLLOVER 2
#define SEC_SRC_SGL_OFFSET_V3 11
#define SEC_DST_SGL_OFFSET_V3 14
#define SEC_CALG_OFFSET_V3 4
@@ -63,6 +65,7 @@
#define SEC_AUTH_CIPHER 0x1
#define SEC_MAX_MAC_LEN 64
#define SEC_MAX_AAD_LEN 65535
+#define SEC_MAX_CCM_AAD_LEN 65279
#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
#define SEC_PBUF_SZ 512
@@ -237,7 +240,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
if (unlikely(type != type_supported)) {
atomic64_inc(&dfx->err_bd_cnt);
- pr_err("err bd type [%d]\n", type);
+ pr_err("err bd type [%u]\n", type);
return;
}
@@ -641,13 +644,15 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
c_ctx->fallback = false;
+
+ /* Currently, only XTS mode need fallback tfm when using 192bit key */
if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
return 0;
c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(c_ctx->fbtfm)) {
- pr_err("failed to alloc fallback tfm!\n");
+ pr_err("failed to alloc xts mode fallback tfm!\n");
return PTR_ERR(c_ctx->fbtfm);
}
@@ -808,7 +813,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
- if (c_ctx->fallback) {
+ if (c_ctx->fallback && c_ctx->fbtfm) {
ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
if (ret) {
dev_err(dev, "failed to set fallback skcipher key!\n");
@@ -1300,6 +1305,10 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
cipher = SEC_CIPHER_DEC;
sec_sqe3->c_icv_key |= cpu_to_le16(cipher);
+ /* Set the CTR counter mode is 128bit rollover */
+ sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER <<
+ SEC_CTR_CNT_OFFSET);
+
if (req->use_pbuf) {
bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3;
bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3;
@@ -1614,7 +1623,7 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
} else {
- sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
+ sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2);
sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
}
sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen);
@@ -2032,13 +2041,12 @@ static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
struct skcipher_request *sreq, bool encrypt)
{
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
struct device *dev = ctx->dev;
int ret;
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
-
if (!c_ctx->fbtfm) {
- dev_err(dev, "failed to check fallback tfm\n");
+ dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n");
return -EINVAL;
}
@@ -2219,6 +2227,10 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
}
if (c_mode == SEC_CMODE_CCM) {
+ if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) {
+ dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n");
+ return -EINVAL;
+ }
ret = aead_iv_demension_check(req);
if (ret) {
dev_err(dev, "aead input iv param error!\n");
@@ -2256,7 +2268,6 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
if (ctx->sec->qm.ver == QM_HW_V2) {
if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
req->cryptlen <= authsize))) {
- dev_err(dev, "Kunpeng920 not support 0 length!\n");
ctx->a_ctx.fallback = true;
return -EINVAL;
}
@@ -2284,9 +2295,10 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
struct aead_request *aead_req,
bool encrypt)
{
- struct aead_request *subreq = aead_request_ctx(aead_req);
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
struct device *dev = ctx->dev;
+ struct aead_request *subreq;
+ int ret;
/* Kunpeng920 aead mode not support input 0 size */
if (!a_ctx->fallback_aead_tfm) {
@@ -2294,6 +2306,10 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
return -EINVAL;
}
+ subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
+ if (!subreq)
+ return -ENOMEM;
+
aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
aead_request_set_callback(subreq, aead_req->base.flags,
aead_req->base.complete, aead_req->base.data);
@@ -2301,8 +2317,13 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
aead_req->cryptlen, aead_req->iv);
aead_request_set_ad(subreq, aead_req->assoclen);
- return encrypt ? crypto_aead_encrypt(subreq) :
- crypto_aead_decrypt(subreq);
+ if (encrypt)
+ ret = crypto_aead_encrypt(subreq);
+ else
+ ret = crypto_aead_decrypt(subreq);
+ aead_request_free(subreq);
+
+ return ret;
}
static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 9f71c358a6d3..5e039b50e9d4 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -354,8 +354,10 @@ struct sec_sqe3 {
* akey_len: 9~14 bits
* a_alg: 15~20 bits
* key_sel: 21~24 bits
- * updata_key: 25 bits
- * reserved: 26~31 bits
+ * ctr_count_mode/sm4_xts: 25~26 bits
+ * sva_prefetch: 27 bits
+ * key_wrap_num: 28~30 bits
+ * update_key: 31 bits
*/
__le32 auth_mac_key;
__le32 salt;
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 26d3ab1d308b..92fae706bdb2 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -20,8 +20,7 @@
#define SEC_VF_NUM 63
#define SEC_QUEUE_NUM_V1 4096
-#define SEC_PF_PCI_DEVICE_ID 0xa255
-#define SEC_VF_PCI_DEVICE_ID 0xa256
+#define PCI_DEVICE_ID_HUAWEI_SEC_PF 0xa255
#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
#define SEC_BD_ERR_CHK_EN1 0x7ffff7fd
@@ -90,6 +89,10 @@
SEC_USER1_WB_DATA_SSV)
#define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET)
#define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET)
+#define SEC_INTERFACE_USER_CTRL0_REG_V3 0x302220
+#define SEC_INTERFACE_USER_CTRL1_REG_V3 0x302224
+#define SEC_USER1_SMMU_NORMAL_V3 (BIT(23) | BIT(17) | BIT(11) | BIT(5))
+#define SEC_USER1_SMMU_MASK_V3 0xFF79E79E
#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
#define SEC_PREFETCH_CFG 0x301130
@@ -225,7 +228,7 @@ static const struct debugfs_reg32 sec_dfx_regs[] = {
static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
{
- return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID);
+ return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
}
static const struct kernel_param_ops sec_pf_q_num_ops = {
@@ -313,8 +316,8 @@ module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444);
MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
static const struct pci_device_id sec_dev_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
- { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_PF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, sec_dev_ids);
@@ -335,6 +338,41 @@ static void sec_set_endian(struct hisi_qm *qm)
writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
}
+static void sec_engine_sva_config(struct hisi_qm *qm)
+{
+ u32 reg;
+
+ if (qm->ver > QM_HW_V2) {
+ reg = readl_relaxed(qm->io_base +
+ SEC_INTERFACE_USER_CTRL0_REG_V3);
+ reg |= SEC_USER0_SMMU_NORMAL;
+ writel_relaxed(reg, qm->io_base +
+ SEC_INTERFACE_USER_CTRL0_REG_V3);
+
+ reg = readl_relaxed(qm->io_base +
+ SEC_INTERFACE_USER_CTRL1_REG_V3);
+ reg &= SEC_USER1_SMMU_MASK_V3;
+ reg |= SEC_USER1_SMMU_NORMAL_V3;
+ writel_relaxed(reg, qm->io_base +
+ SEC_INTERFACE_USER_CTRL1_REG_V3);
+ } else {
+ reg = readl_relaxed(qm->io_base +
+ SEC_INTERFACE_USER_CTRL0_REG);
+ reg |= SEC_USER0_SMMU_NORMAL;
+ writel_relaxed(reg, qm->io_base +
+ SEC_INTERFACE_USER_CTRL0_REG);
+ reg = readl_relaxed(qm->io_base +
+ SEC_INTERFACE_USER_CTRL1_REG);
+ reg &= SEC_USER1_SMMU_MASK;
+ if (qm->use_sva)
+ reg |= SEC_USER1_SMMU_SVA;
+ else
+ reg |= SEC_USER1_SMMU_NORMAL;
+ writel_relaxed(reg, qm->io_base +
+ SEC_INTERFACE_USER_CTRL1_REG);
+ }
+}
+
static void sec_open_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
@@ -426,26 +464,18 @@ static int sec_engine_init(struct hisi_qm *qm)
reg |= (0x1 << SEC_TRNG_EN_SHIFT);
writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
- reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
- reg |= SEC_USER0_SMMU_NORMAL;
- writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
-
- reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
- reg &= SEC_USER1_SMMU_MASK;
- if (qm->use_sva && qm->ver == QM_HW_V2)
- reg |= SEC_USER1_SMMU_SVA;
- else
- reg |= SEC_USER1_SMMU_NORMAL;
- writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
+ sec_engine_sva_config(qm);
writel(SEC_SINGLE_PORT_MAX_TRANS,
qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
- /* Enable sm4 extra mode, as ctr/ecb */
- writel_relaxed(SEC_BD_ERR_CHK_EN0,
- qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
+ /* HW V2 enable sm4 extra mode, as ctr/ecb */
+ if (qm->ver < QM_HW_V3)
+ writel_relaxed(SEC_BD_ERR_CHK_EN0,
+ qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
+
/* Enable sm4 xts mode multiple iv */
writel_relaxed(SEC_BD_ERR_CHK_EN1,
qm->io_base + SEC_BD_ERR_CHK_EN_REG1);
@@ -717,7 +747,7 @@ static int sec_core_debug_init(struct hisi_qm *qm)
regset->base = qm->io_base;
regset->dev = dev;
- if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID)
+ if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF)
debugfs_create_file("regs", 0444, tmp_d, regset, &sec_regs_fops);
for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
@@ -735,7 +765,7 @@ static int sec_debug_init(struct hisi_qm *qm)
struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
int i;
- if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
+ if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) {
for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) {
spin_lock_init(&sec->debug.files[i].lock);
sec->debug.files[i].index = i;
@@ -877,7 +907,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
- qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ?
+ qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) ?
QM_HW_PF : QM_HW_VF;
if (qm->fun_type == QM_HW_PF) {
qm->qp_base = SEC_PF_DEF_Q_BASE;
@@ -1089,6 +1119,12 @@ static struct pci_driver sec_pci_driver = {
.driver.pm = &sec_pm_ops,
};
+struct pci_driver *hisi_sec_get_pf_driver(void)
+{
+ return &sec_pci_driver;
+}
+EXPORT_SYMBOL_GPL(hisi_sec_get_pf_driver);
+
static void sec_register_debugfs(void)
{
if (!debugfs_initialized())
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 057273769f26..f7efc02b065f 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <linux/dma-mapping.h>
+#include <linux/hisi_acc_qm.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include "qm.h"
#define HISI_ACC_SGL_SGE_NR_MIN 1
#define HISI_ACC_SGL_NR_MAX 256
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 517fdbdff3ea..3dfd3bac5a33 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -7,7 +7,7 @@
#define pr_fmt(fmt) "hisi_zip: " fmt
#include <linux/list.h>
-#include "../qm.h"
+#include <linux/hisi_acc_qm.h>
enum hisi_zip_error_type {
/* negative compression */
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 678f8b58ec42..4534e1e107d1 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -15,8 +15,7 @@
#include <linux/uacce.h>
#include "zip.h"
-#define PCI_DEVICE_ID_ZIP_PF 0xa250
-#define PCI_DEVICE_ID_ZIP_VF 0xa251
+#define PCI_DEVICE_ID_HUAWEI_ZIP_PF 0xa250
#define HZIP_QUEUE_NUM_V1 4096
@@ -246,7 +245,7 @@ MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
- return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF);
+ return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
}
static const struct kernel_param_ops pf_q_num_ops = {
@@ -268,8 +267,8 @@ module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
static const struct pci_device_id hisi_zip_dev_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) },
- { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_VF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_PF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
@@ -838,7 +837,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
- qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ?
+ qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_ZIP_PF) ?
QM_HW_PF : QM_HW_VF;
if (qm->fun_type == QM_HW_PF) {
qm->qp_base = HZIP_PF_DEF_Q_BASE;
@@ -1013,6 +1012,12 @@ static struct pci_driver hisi_zip_pci_driver = {
.driver.pm = &hisi_zip_pm_ops,
};
+struct pci_driver *hisi_zip_get_pf_driver(void)
+{
+ return &hisi_zip_pci_driver;
+}
+EXPORT_SYMBOL_GPL(hisi_zip_get_pf_driver);
+
static void hisi_zip_register_debugfs(void)
{
if (!debugfs_initialized())
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 98730aab287c..d39a386b31ac 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -33,7 +33,6 @@
/* Intermittent includes, delete this after v5.14-rc1 */
#include <linux/soc/ixp4xx/cpu.h>
-#include <mach/ixp4xx-regs.h>
#define MAX_KEYLEN 32
diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig
index 9125199f1702..a48591af12d0 100644
--- a/drivers/crypto/marvell/Kconfig
+++ b/drivers/crypto/marvell/Kconfig
@@ -47,6 +47,7 @@ config CRYPTO_DEV_OCTEONTX2_CPT
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_AEAD
+ select NET_DEVLINK
help
This driver allows you to utilize the Marvell Cryptographic
Accelerator Unit(CPT) found in OcteonTX2 series of processors.
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index ccbef01888d4..01c48ddc4eeb 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -1639,11 +1639,8 @@ static void swap_func(void *lptr, void *rptr, int size)
{
struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
- struct cpt_device_desc desc;
- desc = *ldesc;
- *ldesc = *rdesc;
- *rdesc = desc;
+ swap(*ldesc, *rdesc);
}
int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
index b681bd2dc6ad..36d72e35ebeb 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
@@ -204,7 +204,6 @@ static int alloc_command_queues(struct otx_cptvf *cptvf,
/* per queue initialization */
for (i = 0; i < cptvf->num_queues; i++) {
- c_size = 0;
rem_q_size = q_size;
first = NULL;
last = NULL;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index fb56824cb0a6..5012b7e669f0 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -157,5 +157,6 @@ struct otx2_cptlfs_info;
int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs);
+int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox);
#endif /* __OTX2_CPT_COMMON_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index 9074876d38e5..a317319696ef 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -202,3 +202,17 @@ int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
}
return ret;
}
+
+int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
+{
+ int err;
+
+ if (!otx2_mbox_nonempty(mbox, 0))
+ return 0;
+ otx2_mbox_msg_send(mbox, 0);
+ err = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (err)
+ return err;
+
+ return otx2_mbox_check_rsp_msgs(mbox, 0);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index b691b6c1d5c4..4fcaf61a70e3 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -26,12 +26,22 @@
*/
#define OTX2_CPT_INST_QLEN_MSGS ((OTX2_CPT_SIZE_DIV40 - 1) * 40)
+/*
+ * LDWB is getting incorrectly used when IQB_LDWB = 1 and CPT instruction
+ * queue has less than 320 free entries. So, increase HW instruction queue
+ * size by 320 and give 320 entries less for SW/NIX RX as a workaround.
+ */
+#define OTX2_CPT_INST_QLEN_EXTRA_BYTES (320 * OTX2_CPT_INST_SIZE)
+#define OTX2_CPT_EXTRA_SIZE_DIV40 (320/40)
+
/* CPT instruction queue length in bytes */
-#define OTX2_CPT_INST_QLEN_BYTES (OTX2_CPT_SIZE_DIV40 * 40 * \
- OTX2_CPT_INST_SIZE)
+#define OTX2_CPT_INST_QLEN_BYTES \
+ ((OTX2_CPT_SIZE_DIV40 * 40 * OTX2_CPT_INST_SIZE) + \
+ OTX2_CPT_INST_QLEN_EXTRA_BYTES)
/* CPT instruction group queue length in bytes */
-#define OTX2_CPT_INST_GRP_QLEN_BYTES (OTX2_CPT_SIZE_DIV40 * 16)
+#define OTX2_CPT_INST_GRP_QLEN_BYTES \
+ ((OTX2_CPT_SIZE_DIV40 + OTX2_CPT_EXTRA_SIZE_DIV40) * 16)
/* CPT FC length in bytes */
#define OTX2_CPT_Q_FC_LEN 128
@@ -179,7 +189,8 @@ static inline void otx2_cptlf_do_set_iqueue_size(struct otx2_cptlf_info *lf)
{
union otx2_cptx_lf_q_size lf_q_size = { .u = 0x0 };
- lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40;
+ lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40 +
+ OTX2_CPT_EXTRA_SIZE_DIV40;
otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
OTX2_CPT_LF_Q_SIZE, lf_q_size.u);
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
index 05b2d9c650e1..936174b012e8 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -46,6 +46,7 @@ struct otx2_cptpf_dev {
struct workqueue_struct *flr_wq;
struct cptpf_flr_work *flr_work;
+ struct mutex lock; /* serialize mailbox access */
unsigned long cap_flag;
u8 pf_id; /* RVU PF number */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 1720a5bb7016..a402ccfac557 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -140,10 +140,13 @@ static void cptpf_flr_wq_handler(struct work_struct *work)
vf = flr_work - pf->flr_work;
+ mutex_lock(&pf->lock);
req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(struct msg_rsp));
- if (!req)
+ if (!req) {
+ mutex_unlock(&pf->lock);
return;
+ }
req->sig = OTX2_MBOX_REQ_SIG;
req->id = MBOX_MSG_VF_FLR;
@@ -151,16 +154,19 @@ static void cptpf_flr_wq_handler(struct work_struct *work)
req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
otx2_cpt_send_mbox_msg(mbox, pf->pdev);
+ if (!otx2_cpt_sync_mbox_msg(&pf->afpf_mbox)) {
- if (vf >= 64) {
- reg = 1;
- vf = vf - 64;
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+ /* Clear transaction pending register */
+ otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
}
- /* Clear transaction pending register */
- otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
- RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
- otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
- RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+ mutex_unlock(&pf->lock);
}
static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
@@ -468,6 +474,7 @@ static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
goto error;
INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
+ mutex_init(&cptpf->lock);
return 0;
error:
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
index 186f1c1190c1..dee0aa60b698 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -18,9 +18,12 @@ static int forward_to_af(struct otx2_cptpf_dev *cptpf,
struct mbox_msghdr *msg;
int ret;
+ mutex_lock(&cptpf->lock);
msg = otx2_mbox_alloc_msg(&cptpf->afpf_mbox, 0, size);
- if (msg == NULL)
+ if (msg == NULL) {
+ mutex_unlock(&cptpf->lock);
return -ENOMEM;
+ }
memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr),
(uint8_t *)req + sizeof(struct mbox_msghdr), size);
@@ -29,15 +32,19 @@ static int forward_to_af(struct otx2_cptpf_dev *cptpf,
msg->sig = req->sig;
msg->ver = req->ver;
- otx2_mbox_msg_send(&cptpf->afpf_mbox, 0);
- ret = otx2_mbox_wait_for_rsp(&cptpf->afpf_mbox, 0);
+ ret = otx2_cpt_sync_mbox_msg(&cptpf->afpf_mbox);
+ /* Error code -EIO indicate there is a communication failure
+ * to the AF. Rest of the error codes indicate that AF processed
+ * VF messages and set the error codes in response messages
+ * (if any) so simply forward responses to VF.
+ */
if (ret == -EIO) {
- dev_err(&cptpf->pdev->dev, "RVU MBOX timeout.\n");
+ dev_warn(&cptpf->pdev->dev,
+ "AF not responding to VF%d messages\n", vf->vf_id);
+ mutex_unlock(&cptpf->lock);
return ret;
- } else if (ret) {
- dev_err(&cptpf->pdev->dev, "RVU MBOX error: %d.\n", ret);
- return -EFAULT;
}
+ mutex_unlock(&cptpf->lock);
return 0;
}
@@ -204,6 +211,10 @@ void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
if (err == -ENOMEM || err == -EIO)
break;
offset = msg->next_msgoff;
+ /* Write barrier required for VF responses which are handled by
+ * PF driver and not forwarded to AF.
+ */
+ smp_wmb();
}
/* Send mbox responses to VF */
if (mdev->num_msgs)
@@ -350,6 +361,8 @@ void otx2_cptpf_afpf_mbox_handler(struct work_struct *work)
process_afpf_mbox_msg(cptpf, msg);
offset = msg->next_msgoff;
+ /* Sync VF response ready to be sent */
+ smp_wmb();
mdev->msgs_acked++;
}
otx2_mbox_reset(afpf_mbox, 0);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 1b4d425bbf0e..9cba2f714c7e 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -1076,6 +1076,39 @@ static void delete_engine_grps(struct pci_dev *pdev,
delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
}
+#define PCI_DEVID_CN10K_RNM 0xA098
+#define RNM_ENTROPY_STATUS 0x8
+
+static void rnm_to_cpt_errata_fixup(struct device *dev)
+{
+ struct pci_dev *pdev;
+ void __iomem *base;
+ int timeout = 5000;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RNM, NULL);
+ if (!pdev)
+ return;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto put_pdev;
+
+ while ((readq(base + RNM_ENTROPY_STATUS) & 0x7F) != 0x40) {
+ cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout) {
+ dev_warn(dev, "RNM is not producing entropy\n");
+ break;
+ }
+ }
+
+ iounmap(base);
+
+put_pdev:
+ pci_dev_put(pdev);
+}
+
int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
{
@@ -1111,6 +1144,7 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
struct pci_dev *pdev = cptpf->pdev;
struct fw_info_t fw_info;
+ u64 reg_val;
int ret = 0;
mutex_lock(&eng_grps->lock);
@@ -1189,9 +1223,17 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
if (is_dev_otx2(pdev))
goto unlock;
+
+ /*
+ * Ensure RNM_ENTROPY_STATUS[NORMAL_CNT] = 0x40 before writing
+ * CPT_AF_CTL[RNM_REQ_EN] = 1 as a workaround for HW errata.
+ */
+ rnm_to_cpt_errata_fixup(&pdev->dev);
+
/*
* Configure engine group mask to allow context prefetching
- * for the groups.
+ * for the groups and enable random number request, to enable
+ * CPT to request random numbers from RNM.
*/
otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16),
@@ -1203,6 +1245,18 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
*/
otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
+
+ /*
+ * Set CPT_AF_DIAG[FLT_DIS], as a workaround for HW errata, when
+ * CPT_AF_DIAG[FLT_DIS] = 0 and a CPT engine access to LLC/DRAM
+ * encounters a fault/poison, a rare case may result in
+ * unpredictable data being delivered to a CPT engine.
+ */
+ otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG, &reg_val,
+ BLKADDR_CPT0);
+ otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
+ reg_val | BIT_ULL(24), BLKADDR_CPT0);
+
mutex_unlock(&eng_grps->lock);
return 0;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index 2748a3327e39..f8f8542ce3e4 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -1634,16 +1634,13 @@ static inline int cpt_register_algs(void)
{
int i, err = 0;
- if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
- for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
- otx2_cpt_skciphers[i].base.cra_flags &=
- ~CRYPTO_ALG_DEAD;
-
- err = crypto_register_skciphers(otx2_cpt_skciphers,
- ARRAY_SIZE(otx2_cpt_skciphers));
- if (err)
- return err;
- }
+ for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
+ otx2_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
+
+ err = crypto_register_skciphers(otx2_cpt_skciphers,
+ ARRAY_SIZE(otx2_cpt_skciphers));
+ if (err)
+ return err;
for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++)
otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index d19e5ffb5104..d6f9e2fe863d 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -331,7 +331,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
}
- for_each_sg(req->src, src, sg_nents(src), i) {
+ for_each_sg(req->src, src, sg_nents(req->src), i) {
src_buf = sg_virt(src);
len = sg_dma_len(src);
tlen += len;
diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c
index 4e304f6081e4..7584a34ba88c 100644
--- a/drivers/crypto/nx/nx-common-pseries.c
+++ b/drivers/crypto/nx/nx-common-pseries.c
@@ -962,7 +962,7 @@ static struct attribute *nx842_sysfs_entries[] = {
NULL,
};
-static struct attribute_group nx842_attribute_group = {
+static const struct attribute_group nx842_attribute_group = {
.name = NULL, /* put in device directory */
.attrs = nx842_sysfs_entries,
};
@@ -992,7 +992,7 @@ static struct attribute *nxcop_caps_sysfs_entries[] = {
NULL,
};
-static struct attribute_group nxcop_caps_attr_group = {
+static const struct attribute_group nxcop_caps_attr_group = {
.name = "nx_gzip_caps",
.attrs = nxcop_caps_sysfs_entries,
};
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index a196bb8b1701..581211a92628 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1093,7 +1093,7 @@ static struct attribute *omap_aes_attrs[] = {
NULL,
};
-static struct attribute_group omap_aes_attr_group = {
+static const struct attribute_group omap_aes_attr_group = {
.attrs = omap_aes_attrs,
};
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index f6bf53c00b61..4b37dc69a50c 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2045,7 +2045,7 @@ static struct attribute *omap_sham_attrs[] = {
NULL,
};
-static struct attribute_group omap_sham_attr_group = {
+static const struct attribute_group omap_sham_attr_group = {
.attrs = omap_sham_attrs,
};
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
index 6d10edc40aca..fb5970a68484 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -6,6 +6,7 @@
#include <adf_common_drv.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
+#include <adf_gen4_pm.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -52,7 +53,7 @@ static const char *const dev_cfg_services[] = {
static int get_service_enabled(struct adf_accel_dev *accel_dev)
{
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
- u32 ret;
+ int ret;
ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
ADF_SERVICES_ENABLED, services);
@@ -229,7 +230,7 @@ static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
void __iomem *csr = misc_bar->virt_addr;
/* Enable all in errsou3 except VFLR notification on host */
- ADF_CSR_WR(csr, ADF_4XXX_ERRMSK3, ADF_4XXX_VFLNOTIFY);
+ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
}
static void adf_enable_ints(struct adf_accel_dev *accel_dev)
@@ -256,19 +257,19 @@ static int adf_init_device(struct adf_accel_dev *accel_dev)
addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
/* Temporarily mask PM interrupt */
- csr = ADF_CSR_RD(addr, ADF_4XXX_ERRMSK2);
- csr |= ADF_4XXX_PM_SOU;
- ADF_CSR_WR(addr, ADF_4XXX_ERRMSK2, csr);
+ csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
+ csr |= ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
/* Set DRV_ACTIVE bit to power up the device */
- ADF_CSR_WR(addr, ADF_4XXX_PM_INTERRUPT, ADF_4XXX_PM_DRV_ACTIVE);
+ ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
/* Poll status register to make sure the device is powered up */
ret = read_poll_timeout(ADF_CSR_RD, status,
- status & ADF_4XXX_PM_INIT_STATE,
- ADF_4XXX_PM_POLL_DELAY_US,
- ADF_4XXX_PM_POLL_TIMEOUT_US, true, addr,
- ADF_4XXX_PM_STATUS);
+ status & ADF_GEN4_PM_INIT_STATE,
+ ADF_GEN4_PM_POLL_DELAY_US,
+ ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
+ ADF_GEN4_PM_STATUS);
if (ret)
dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
@@ -354,6 +355,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
+ hw_data->enable_pm = adf_gen4_enable_pm;
+ hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
index 12e4fb9b40ce..1034752845ca 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -39,20 +39,6 @@
#define ADF_4XXX_NUM_RINGS_PER_BANK 2
#define ADF_4XXX_NUM_BANKS_PER_VF 4
-/* Error source registers */
-#define ADF_4XXX_ERRSOU0 (0x41A200)
-#define ADF_4XXX_ERRSOU1 (0x41A204)
-#define ADF_4XXX_ERRSOU2 (0x41A208)
-#define ADF_4XXX_ERRSOU3 (0x41A20C)
-
-/* Error source mask registers */
-#define ADF_4XXX_ERRMSK0 (0x41A210)
-#define ADF_4XXX_ERRMSK1 (0x41A214)
-#define ADF_4XXX_ERRMSK2 (0x41A218)
-#define ADF_4XXX_ERRMSK3 (0x41A21C)
-
-#define ADF_4XXX_VFLNOTIFY BIT(7)
-
/* Arbiter configuration */
#define ADF_4XXX_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
#define ADF_4XXX_ARB_OFFSET (0x0)
@@ -63,16 +49,6 @@
#define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578)
#define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970)
-/* Power management */
-#define ADF_4XXX_PM_POLL_DELAY_US 20
-#define ADF_4XXX_PM_POLL_TIMEOUT_US USEC_PER_SEC
-#define ADF_4XXX_PM_STATUS (0x50A00C)
-#define ADF_4XXX_PM_INTERRUPT (0x50A028)
-#define ADF_4XXX_PM_DRV_ACTIVE BIT(20)
-#define ADF_4XXX_PM_INIT_STATE BIT(21)
-/* Power management source in ERRSOU2 and ERRMSK2 */
-#define ADF_4XXX_PM_SOU BIT(18)
-
/* Firmware Binaries */
#define ADF_4XXX_FW "qat_4xxx.bin"
#define ADF_4XXX_MMP "qat_4xxx_mmp.bin"
diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c
index a6c78b9c730b..fa4c350c1bf9 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c
@@ -75,6 +75,13 @@ static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
if (ret)
goto err;
+ /* Temporarily set the number of crypto instances to zero to avoid
+ * registering the crypto algorithms.
+ * This will be removed when the algorithms will support the
+ * CRYPTO_TFM_REQ_MAY_BACKLOG flag
+ */
+ instances = 0;
+
for (i = 0; i < instances; i++) {
val = i;
bank = i * 2;
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index 7e191a42a5c7..f25a6c8edfc7 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -12,6 +12,7 @@ intel_qat-objs := adf_cfg.o \
adf_hw_arbiter.o \
adf_gen2_hw_data.o \
adf_gen4_hw_data.o \
+ adf_gen4_pm.o \
qat_crypto.o \
qat_algs.o \
qat_asym_algs.o \
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 2d4cd7c7cf33..a03c6cf72331 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -184,6 +184,8 @@ struct adf_hw_device_data {
void (*exit_arb)(struct adf_accel_dev *accel_dev);
const u32 *(*get_arb_mapping)(void);
int (*init_device)(struct adf_accel_dev *accel_dev);
+ int (*enable_pm)(struct adf_accel_dev *accel_dev);
+ bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev);
void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
bool enable);
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index 498eb6f690e3..3b6184c35081 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -251,6 +251,43 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev)
}
EXPORT_SYMBOL_GPL(adf_send_admin_init);
+/**
+ * adf_init_admin_pm() - Function sends PM init message to FW
+ * @accel_dev: Pointer to acceleration device.
+ * @idle_delay: QAT HW idle time before power gating is initiated.
+ * 000 - 64us
+ * 001 - 128us
+ * 010 - 256us
+ * 011 - 512us
+ * 100 - 1ms
+ * 101 - 2ms
+ * 110 - 4ms
+ * 111 - 8ms
+ *
+ * Function sends to the FW the admin init message for the PM state
+ * configuration.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct icp_qat_fw_init_admin_resp resp = {0};
+ struct icp_qat_fw_init_admin_req req = {0};
+ u32 ae_mask = hw_data->admin_ae_mask;
+
+ if (!accel_dev->admin) {
+ dev_err(&GET_DEV(accel_dev), "adf_admin is not available\n");
+ return -EFAULT;
+ }
+
+ req.cmd_id = ICP_QAT_FW_PM_STATE_CONFIG;
+ req.idle_filter = idle_delay;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+EXPORT_SYMBOL_GPL(adf_init_admin_pm);
+
int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
{
struct adf_admin_comms *admin;
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 76f4f96ec5eb..e8c9b77c0d66 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -102,6 +102,7 @@ void adf_exit_aer(void);
int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay);
int adf_init_arb(struct adf_accel_dev *accel_dev);
void adf_exit_arb(struct adf_accel_dev *accel_dev);
void adf_update_ring_arb(struct adf_etr_ring_data *ring);
@@ -188,6 +189,9 @@ int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, u32 mem_size, char *obj_name);
int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
unsigned int cfg_ae_mask);
+int adf_init_misc_wq(void);
+void adf_exit_misc_wq(void);
+bool adf_misc_wq_queue_work(struct work_struct *work);
#if defined(CONFIG_PCI_IOV)
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 6f64aa693146..e8ac932bbaab 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -419,6 +419,9 @@ static int __init adf_register_ctl_device_driver(void)
if (adf_chr_drv_create())
goto err_chr_dev;
+ if (adf_init_misc_wq())
+ goto err_misc_wq;
+
if (adf_init_aer())
goto err_aer;
@@ -440,6 +443,8 @@ err_vf_wq:
err_pf_wq:
adf_exit_aer();
err_aer:
+ adf_exit_misc_wq();
+err_misc_wq:
adf_chr_drv_destroy();
err_chr_dev:
mutex_destroy(&adf_ctl_lock);
@@ -449,6 +454,7 @@ err_chr_dev:
static void __exit adf_unregister_ctl_device_driver(void)
{
adf_chr_drv_destroy();
+ adf_exit_misc_wq();
adf_exit_aer();
adf_exit_vf_wq();
adf_exit_pf_wq();
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
index f0f71ca44ca3..43b8f864806b 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
@@ -122,6 +122,20 @@ do { \
#define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0)
#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
+/* Error source registers */
+#define ADF_GEN4_ERRSOU0 (0x41A200)
+#define ADF_GEN4_ERRSOU1 (0x41A204)
+#define ADF_GEN4_ERRSOU2 (0x41A208)
+#define ADF_GEN4_ERRSOU3 (0x41A20C)
+
+/* Error source mask registers */
+#define ADF_GEN4_ERRMSK0 (0x41A210)
+#define ADF_GEN4_ERRMSK1 (0x41A214)
+#define ADF_GEN4_ERRMSK2 (0x41A218)
+#define ADF_GEN4_ERRMSK3 (0x41A21C)
+
+#define ADF_GEN4_VFLNOTIFY BIT(7)
+
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
index 8efbedf63bc8..d80d493a7756 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
+++ b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
@@ -9,15 +9,12 @@
#include "adf_pfvf_pf_proto.h"
#include "adf_pfvf_utils.h"
-#define ADF_4XXX_MAX_NUM_VFS 16
-
#define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i) * 0x20))
#define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i) * 0x20))
/* VF2PF interrupt source registers */
-#define ADF_4XXX_VM2PF_SOU(i) (0x41A180 + ((i) * 4))
-#define ADF_4XXX_VM2PF_MSK(i) (0x41A1C0 + ((i) * 4))
-#define ADF_4XXX_VM2PF_INT_EN_MSK BIT(0)
+#define ADF_4XXX_VM2PF_SOU 0x41A180
+#define ADF_4XXX_VM2PF_MSK 0x41A1C0
#define ADF_PFVF_GEN4_MSGTYPE_SHIFT 2
#define ADF_PFVF_GEN4_MSGTYPE_MASK 0x3F
@@ -41,51 +38,30 @@ static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
static u32 adf_gen4_get_vf2pf_sources(void __iomem *pmisc_addr)
{
- int i;
u32 sou, mask;
- int num_csrs = ADF_4XXX_MAX_NUM_VFS;
- u32 vf_mask = 0;
- for (i = 0; i < num_csrs; i++) {
- sou = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU(i));
- mask = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK(i));
- sou &= ~mask;
- vf_mask |= sou << i;
- }
+ sou = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU);
+ mask = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK);
- return vf_mask;
+ return sou & ~mask;
}
static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr,
u32 vf_mask)
{
- int num_csrs = ADF_4XXX_MAX_NUM_VFS;
- unsigned long mask = vf_mask;
unsigned int val;
- int i;
-
- for_each_set_bit(i, &mask, num_csrs) {
- unsigned int offset = ADF_4XXX_VM2PF_MSK(i);
- val = ADF_CSR_RD(pmisc_addr, offset) & ~ADF_4XXX_VM2PF_INT_EN_MSK;
- ADF_CSR_WR(pmisc_addr, offset, val);
- }
+ val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) & ~vf_mask;
+ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
}
static void adf_gen4_disable_vf2pf_interrupts(void __iomem *pmisc_addr,
u32 vf_mask)
{
- int num_csrs = ADF_4XXX_MAX_NUM_VFS;
- unsigned long mask = vf_mask;
unsigned int val;
- int i;
-
- for_each_set_bit(i, &mask, num_csrs) {
- unsigned int offset = ADF_4XXX_VM2PF_MSK(i);
- val = ADF_CSR_RD(pmisc_addr, offset) | ADF_4XXX_VM2PF_INT_EN_MSK;
- ADF_CSR_WR(pmisc_addr, offset, val);
- }
+ val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) | vf_mask;
+ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
}
static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev,
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/qat/qat_common/adf_gen4_pm.c
new file mode 100644
index 000000000000..7037c0892a8a
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen4_pm.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_pm.h"
+#include "adf_cfg_strings.h"
+#include "icp_qat_fw_init_admin.h"
+#include "adf_gen4_hw_data.h"
+#include "adf_cfg.h"
+
+enum qat_pm_host_msg {
+ PM_NO_CHANGE = 0,
+ PM_SET_MIN,
+};
+
+struct adf_gen4_pm_data {
+ struct work_struct pm_irq_work;
+ struct adf_accel_dev *accel_dev;
+ u32 pm_int_sts;
+};
+
+static int send_host_msg(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ u32 msg;
+
+ msg = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
+ if (msg & ADF_GEN4_PM_MSG_PENDING)
+ return -EBUSY;
+
+ /* Send HOST_MSG */
+ msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK, PM_SET_MIN);
+ msg |= ADF_GEN4_PM_MSG_PENDING;
+ ADF_CSR_WR(pmisc, ADF_GEN4_PM_HOST_MSG, msg);
+
+ /* Poll status register to make sure the HOST_MSG has been processed */
+ return read_poll_timeout(ADF_CSR_RD, msg,
+ !(msg & ADF_GEN4_PM_MSG_PENDING),
+ ADF_GEN4_PM_MSG_POLL_DELAY_US,
+ ADF_GEN4_PM_POLL_TIMEOUT_US, true, pmisc,
+ ADF_GEN4_PM_HOST_MSG);
+}
+
+static void pm_bh_handler(struct work_struct *work)
+{
+ struct adf_gen4_pm_data *pm_data =
+ container_of(work, struct adf_gen4_pm_data, pm_irq_work);
+ struct adf_accel_dev *accel_dev = pm_data->accel_dev;
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ u32 pm_int_sts = pm_data->pm_int_sts;
+ u32 val;
+
+ /* PM Idle interrupt */
+ if (pm_int_sts & ADF_GEN4_PM_IDLE_STS) {
+ /* Issue host message to FW */
+ if (send_host_msg(accel_dev))
+ dev_warn_ratelimited(&GET_DEV(accel_dev),
+ "Failed to send host msg to FW\n");
+ }
+
+ /* Clear interrupt status */
+ ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, pm_int_sts);
+
+ /* Reenable PM interrupt */
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+ val &= ~ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+ kfree(pm_data);
+}
+
+bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ struct adf_gen4_pm_data *pm_data = NULL;
+ u32 errsou2;
+ u32 errmsk2;
+ u32 val;
+
+ /* Only handle the interrupt triggered by PM */
+ errmsk2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+ if (errmsk2 & ADF_GEN4_PM_SOU)
+ return false;
+
+ errsou2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRSOU2);
+ if (!(errsou2 & ADF_GEN4_PM_SOU))
+ return false;
+
+ /* Disable interrupt */
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+ val |= ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
+
+ pm_data = kzalloc(sizeof(*pm_data), GFP_ATOMIC);
+ if (!pm_data)
+ return false;
+
+ pm_data->pm_int_sts = val;
+ pm_data->accel_dev = accel_dev;
+
+ INIT_WORK(&pm_data->pm_irq_work, pm_bh_handler);
+ adf_misc_wq_queue_work(&pm_data->pm_irq_work);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_handle_pm_interrupt);
+
+int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ int ret;
+ u32 val;
+
+ ret = adf_init_admin_pm(accel_dev, ADF_GEN4_PM_DEFAULT_IDLE_FILTER);
+ if (ret)
+ return ret;
+
+ /* Enable default PM interrupts: IDLE, THROTTLE */
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
+ val |= ADF_GEN4_PM_INT_EN_DEFAULT;
+
+ /* Clear interrupt status */
+ val |= ADF_GEN4_PM_INT_STS_MASK;
+ ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, val);
+
+ /* Unmask PM Interrupt */
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+ val &= ~ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_enable_pm);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/qat/qat_common/adf_gen4_pm.h
new file mode 100644
index 000000000000..f8f8a9ee29e5
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen4_pm.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN4_PM_H
+#define ADF_GEN4_PM_H
+
+#include "adf_accel_devices.h"
+
+/* Power management registers */
+#define ADF_GEN4_PM_HOST_MSG (0x50A01C)
+
+/* Power management */
+#define ADF_GEN4_PM_POLL_DELAY_US 20
+#define ADF_GEN4_PM_POLL_TIMEOUT_US USEC_PER_SEC
+#define ADF_GEN4_PM_MSG_POLL_DELAY_US (10 * USEC_PER_MSEC)
+#define ADF_GEN4_PM_STATUS (0x50A00C)
+#define ADF_GEN4_PM_INTERRUPT (0x50A028)
+
+/* Power management source in ERRSOU2 and ERRMSK2 */
+#define ADF_GEN4_PM_SOU BIT(18)
+
+#define ADF_GEN4_PM_IDLE_INT_EN BIT(18)
+#define ADF_GEN4_PM_THROTTLE_INT_EN BIT(19)
+#define ADF_GEN4_PM_DRV_ACTIVE BIT(20)
+#define ADF_GEN4_PM_INIT_STATE BIT(21)
+#define ADF_GEN4_PM_INT_EN_DEFAULT (ADF_GEN4_PM_IDLE_INT_EN | \
+ ADF_GEN4_PM_THROTTLE_INT_EN)
+
+#define ADF_GEN4_PM_THR_STS BIT(0)
+#define ADF_GEN4_PM_IDLE_STS BIT(1)
+#define ADF_GEN4_PM_FW_INT_STS BIT(2)
+#define ADF_GEN4_PM_INT_STS_MASK (ADF_GEN4_PM_THR_STS | \
+ ADF_GEN4_PM_IDLE_STS | \
+ ADF_GEN4_PM_FW_INT_STS)
+
+#define ADF_GEN4_PM_MSG_PENDING BIT(0)
+#define ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK GENMASK(28, 1)
+
+#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER (0x0)
+#define ADF_GEN4_PM_MAX_IDLE_FILTER (0x7)
+
+int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev);
+bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 2edc63c6b6ca..c2c718f1b489 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -181,6 +181,12 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
if (hw_data->set_ssm_wdtimer)
hw_data->set_ssm_wdtimer(accel_dev);
+ /* Enable Power Management */
+ if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
+ return -EFAULT;
+ }
+
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index 4ca482aa69f7..a35149f8bf1e 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -16,6 +16,7 @@
#include "adf_transport_internal.h"
#define ADF_MAX_NUM_VFS 32
+static struct workqueue_struct *adf_misc_wq;
static int adf_enable_msix(struct adf_accel_dev *accel_dev)
{
@@ -123,6 +124,17 @@ static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev)
}
#endif /* CONFIG_PCI_IOV */
+static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+
+ if (hw_data->handle_pm_interrupt &&
+ hw_data->handle_pm_interrupt(accel_dev))
+ return true;
+
+ return false;
+}
+
static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
{
struct adf_accel_dev *accel_dev = dev_ptr;
@@ -133,6 +145,9 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
return IRQ_HANDLED;
#endif /* CONFIG_PCI_IOV */
+ if (adf_handle_pm_int(accel_dev))
+ return IRQ_HANDLED;
+
dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
accel_dev->accel_id);
@@ -341,3 +356,30 @@ err_out:
return ret;
}
EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
+
+/**
+ * adf_init_misc_wq() - Init misc workqueue
+ *
+ * Function init workqueue 'qat_misc_wq' for general purpose.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int __init adf_init_misc_wq(void)
+{
+ adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0);
+
+ return !adf_misc_wq ? -ENOMEM : 0;
+}
+
+void adf_exit_misc_wq(void)
+{
+ if (adf_misc_wq)
+ destroy_workqueue(adf_misc_wq);
+
+ adf_misc_wq = NULL;
+}
+
+bool adf_misc_wq_queue_work(struct work_struct *work)
+{
+ return queue_work(adf_misc_wq, work);
+}
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
index 14b222691c9c..1141258db4b6 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
@@ -96,7 +96,7 @@ int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct capabilities_v3 cap_msg = { { 0 }, };
+ struct capabilities_v3 cap_msg = { 0 };
unsigned int len = sizeof(cap_msg);
if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_CAPABILITIES)
@@ -141,7 +141,7 @@ int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev)
{
- struct ring_to_svc_map_v1 rts_map_msg = { { 0 }, };
+ struct ring_to_svc_map_v1 rts_map_msg = { 0 };
unsigned int len = sizeof(rts_map_msg);
if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_RING_TO_SVC_MAP)
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
index afe59a7684ac..56cb827f93ea 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
@@ -16,6 +16,7 @@ enum icp_qat_fw_init_admin_cmd_id {
ICP_QAT_FW_HEARTBEAT_SYNC = 7,
ICP_QAT_FW_HEARTBEAT_GET = 8,
ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
+ ICP_QAT_FW_PM_STATE_CONFIG = 128,
};
enum icp_qat_fw_init_admin_resp_status {
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 7234c4940fae..67c9588e89df 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -161,6 +161,13 @@ int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
if (ret)
goto err;
+ /* Temporarily set the number of crypto instances to zero to avoid
+ * registering the crypto algorithms.
+ * This will be removed when the algorithms will support the
+ * CRYPTO_TFM_REQ_MAY_BACKLOG flag
+ */
+ instances = 0;
+
for (i = 0; i < instances; i++) {
val = i;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 2026cc6be8f0..6356402a2c9e 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -387,7 +387,9 @@ static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
page = image->page;
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
- if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
+ unsigned long ae_assigned = uof_image->ae_assigned;
+
+ if (!test_bit(ae, &ae_assigned))
continue;
if (!test_bit(ae, &cfg_ae_mask))
@@ -664,8 +666,9 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
continue;
for (i = 0; i < obj_handle->uimage_num; i++) {
- if (!test_bit(ae, (unsigned long *)
- &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
+ unsigned long ae_assigned = obj_handle->ae_uimage[i].img_ptr->ae_assigned;
+
+ if (!test_bit(ae, &ae_assigned))
continue;
mflag = 1;
if (qat_uclo_init_ae_data(obj_handle, ae, i))
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 99ba8d51d102..11f30fd48c14 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -43,16 +44,19 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
{
unsigned int currsize = 0;
u32 val;
+ int ret;
/* read random data from hardware */
do {
- val = readl_relaxed(rng->base + PRNG_STATUS);
- if (!(val & PRNG_STATUS_DATA_AVAIL))
- break;
+ ret = readl_poll_timeout(rng->base + PRNG_STATUS, val,
+ val & PRNG_STATUS_DATA_AVAIL,
+ 200, 10000);
+ if (ret)
+ return ret;
val = readl_relaxed(rng->base + PRNG_DATA_OUT);
if (!val)
- break;
+ return -EINVAL;
if ((max - currsize) >= WORD_SZ) {
memcpy(data, &val, WORD_SZ);
@@ -61,11 +65,10 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
} else {
/* copy only remaining bytes */
memcpy(data, &val, max - currsize);
- break;
}
} while (currsize < max);
- return currsize;
+ return 0;
}
static int qcom_rng_generate(struct crypto_rng *tfm,
@@ -87,7 +90,7 @@ static int qcom_rng_generate(struct crypto_rng *tfm,
mutex_unlock(&rng->lock);
clk_disable_unprepare(rng->clk);
- return 0;
+ return ret;
}
static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed,
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index 1cece1a7d3f0..5bbf0d2722e1 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -506,7 +506,6 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
.exit = rk_ablk_exit_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
.setkey = rk_tdes_setkey,
.encrypt = rk_des3_ede_ecb_encrypt,
.decrypt = rk_des3_ede_ecb_decrypt,
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 97277b7150cb..5a57c9afd8c8 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1264,7 +1264,7 @@ static int ux500_cryp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
dev_dbg(dev, "[%s]", __func__);
- device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
+ device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_KERNEL);
if (!device_data) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 51a6e1a42434..5157c118d642 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1658,7 +1658,7 @@ static int ux500_hash_probe(struct platform_device *pdev)
struct hash_device_data *device_data;
struct device *dev = &pdev->dev;
- device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
+ device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_KERNEL);
if (!device_data) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig
index c85fab7ef0bd..b2c28b87f14b 100644
--- a/drivers/crypto/vmx/Kconfig
+++ b/drivers/crypto/vmx/Kconfig
@@ -2,7 +2,11 @@
config CRYPTO_DEV_VMX_ENCRYPT
tristate "Encryption acceleration support on P8 CPU"
depends on CRYPTO_DEV_VMX
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_CTR
select CRYPTO_GHASH
+ select CRYPTO_XTS
default m
help
Support for VMX cryptographic acceleration instructions on Power8 CPU.
diff --git a/drivers/crypto/xilinx/Makefile b/drivers/crypto/xilinx/Makefile
index 534e32daf76a..730feff5b5f2 100644
--- a/drivers/crypto/xilinx/Makefile
+++ b/drivers/crypto/xilinx/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += zynqmp-aes-gcm.o
+obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_SHA3) += zynqmp-sha.o
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
new file mode 100644
index 000000000000..43ff170ff1c2
--- /dev/null
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP SHA Driver.
+ * Copyright (c) 2022 Xilinx Inc.
+ */
+#include <linux/cacheflush.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha3.h>
+#include <linux/crypto.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define ZYNQMP_DMA_BIT_MASK 32U
+#define ZYNQMP_DMA_ALLOC_FIXED_SIZE 0x1000U
+
+enum zynqmp_sha_op {
+ ZYNQMP_SHA3_INIT = 1,
+ ZYNQMP_SHA3_UPDATE = 2,
+ ZYNQMP_SHA3_FINAL = 4,
+};
+
+struct zynqmp_sha_drv_ctx {
+ struct shash_alg sha3_384;
+ struct device *dev;
+};
+
+struct zynqmp_sha_tfm_ctx {
+ struct device *dev;
+ struct crypto_shash *fbk_tfm;
+};
+
+struct zynqmp_sha_desc_ctx {
+ struct shash_desc fbk_req;
+};
+
+static dma_addr_t update_dma_addr, final_dma_addr;
+static char *ubuf, *fbuf;
+
+static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
+{
+ const char *fallback_driver_name = crypto_shash_alg_name(hash);
+ struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
+ struct shash_alg *alg = crypto_shash_alg(hash);
+ struct crypto_shash *fallback_tfm;
+ struct zynqmp_sha_drv_ctx *drv_ctx;
+
+ drv_ctx = container_of(alg, struct zynqmp_sha_drv_ctx, sha3_384);
+ tfm_ctx->dev = drv_ctx->dev;
+
+ /* Allocate a fallback and abort if it failed. */
+ fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback_tfm))
+ return PTR_ERR(fallback_tfm);
+
+ tfm_ctx->fbk_tfm = fallback_tfm;
+ hash->descsize += crypto_shash_descsize(tfm_ctx->fbk_tfm);
+
+ return 0;
+}
+
+static void zynqmp_sha_exit_tfm(struct crypto_shash *hash)
+{
+ struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
+
+ if (tfm_ctx->fbk_tfm) {
+ crypto_free_shash(tfm_ctx->fbk_tfm);
+ tfm_ctx->fbk_tfm = NULL;
+ }
+
+ memzero_explicit(tfm_ctx, sizeof(struct zynqmp_sha_tfm_ctx));
+}
+
+static int zynqmp_sha_init(struct shash_desc *desc)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+
+ dctx->fbk_req.tfm = tctx->fbk_tfm;
+ return crypto_shash_init(&dctx->fbk_req);
+}
+
+static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ return crypto_shash_update(&dctx->fbk_req, data, length);
+}
+
+static int zynqmp_sha_final(struct shash_desc *desc, u8 *out)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ return crypto_shash_final(&dctx->fbk_req, out);
+}
+
+static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ return crypto_shash_finup(&dctx->fbk_req, data, length, out);
+}
+
+static int zynqmp_sha_import(struct shash_desc *desc, const void *in)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+
+ dctx->fbk_req.tfm = tctx->fbk_tfm;
+ return crypto_shash_import(&dctx->fbk_req, in);
+}
+
+static int zynqmp_sha_export(struct shash_desc *desc, void *out)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ return crypto_shash_export(&dctx->fbk_req, out);
+}
+
+static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
+{
+ unsigned int remaining_len = len;
+ int update_size;
+ int ret;
+
+ ret = zynqmp_pm_sha_hash(0, 0, ZYNQMP_SHA3_INIT);
+ if (ret)
+ return ret;
+
+ while (remaining_len != 0) {
+ memzero_explicit(ubuf, ZYNQMP_DMA_ALLOC_FIXED_SIZE);
+ if (remaining_len >= ZYNQMP_DMA_ALLOC_FIXED_SIZE) {
+ update_size = ZYNQMP_DMA_ALLOC_FIXED_SIZE;
+ remaining_len -= ZYNQMP_DMA_ALLOC_FIXED_SIZE;
+ } else {
+ update_size = remaining_len;
+ remaining_len = 0;
+ }
+ memcpy(ubuf, data, update_size);
+ flush_icache_range((unsigned long)ubuf, (unsigned long)ubuf + update_size);
+ ret = zynqmp_pm_sha_hash(update_dma_addr, update_size, ZYNQMP_SHA3_UPDATE);
+ if (ret)
+ return ret;
+
+ data += update_size;
+ }
+
+ ret = zynqmp_pm_sha_hash(final_dma_addr, SHA3_384_DIGEST_SIZE, ZYNQMP_SHA3_FINAL);
+ memcpy(out, fbuf, SHA3_384_DIGEST_SIZE);
+ memzero_explicit(fbuf, SHA3_384_DIGEST_SIZE);
+
+ return ret;
+}
+
+static struct zynqmp_sha_drv_ctx sha3_drv_ctx = {
+ .sha3_384 = {
+ .init = zynqmp_sha_init,
+ .update = zynqmp_sha_update,
+ .final = zynqmp_sha_final,
+ .finup = zynqmp_sha_finup,
+ .digest = zynqmp_sha_digest,
+ .export = zynqmp_sha_export,
+ .import = zynqmp_sha_import,
+ .init_tfm = zynqmp_sha_init_tfm,
+ .exit_tfm = zynqmp_sha_exit_tfm,
+ .descsize = sizeof(struct zynqmp_sha_desc_ctx),
+ .statesize = sizeof(struct sha3_state),
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha3-384",
+ .cra_driver_name = "zynqmp-sha3-384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+};
+
+static int zynqmp_sha_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int err;
+ u32 v;
+
+ /* Verify the hardware is present */
+ err = zynqmp_pm_get_api_version(&v);
+ if (err)
+ return err;
+
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK));
+ if (err < 0) {
+ dev_err(dev, "No usable DMA configuration\n");
+ return err;
+ }
+
+ err = crypto_register_shash(&sha3_drv_ctx.sha3_384);
+ if (err < 0) {
+ dev_err(dev, "Failed to register shash alg.\n");
+ return err;
+ }
+
+ sha3_drv_ctx.dev = dev;
+ platform_set_drvdata(pdev, &sha3_drv_ctx);
+
+ ubuf = dma_alloc_coherent(dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, &update_dma_addr, GFP_KERNEL);
+ if (!ubuf) {
+ err = -ENOMEM;
+ goto err_shash;
+ }
+
+ fbuf = dma_alloc_coherent(dev, SHA3_384_DIGEST_SIZE, &final_dma_addr, GFP_KERNEL);
+ if (!fbuf) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
+
+ return 0;
+
+err_mem:
+ dma_free_coherent(sha3_drv_ctx.dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr);
+
+err_shash:
+ crypto_unregister_shash(&sha3_drv_ctx.sha3_384);
+
+ return err;
+}
+
+static int zynqmp_sha_remove(struct platform_device *pdev)
+{
+ sha3_drv_ctx.dev = platform_get_drvdata(pdev);
+
+ dma_free_coherent(sha3_drv_ctx.dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr);
+ dma_free_coherent(sha3_drv_ctx.dev, SHA3_384_DIGEST_SIZE, fbuf, final_dma_addr);
+ crypto_unregister_shash(&sha3_drv_ctx.sha3_384);
+
+ return 0;
+}
+
+static struct platform_driver zynqmp_sha_driver = {
+ .probe = zynqmp_sha_probe,
+ .remove = zynqmp_sha_remove,
+ .driver = {
+ .name = "zynqmp-sha3-384",
+ },
+};
+
+module_platform_driver(zynqmp_sha_driver);
+MODULE_DESCRIPTION("ZynqMP SHA3 hardware acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Harsha <harsha.harsha@xilinx.com>");
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index d33a0613ed0c..5494d745ced5 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -346,8 +346,7 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
}
static const struct address_space_operations dev_dax_aops = {
- .set_page_dirty = __set_page_dirty_no_writeback,
- .invalidatepage = noop_invalidatepage,
+ .dirty_folio = noop_dirty_folio,
};
static int dax_open(struct inode *inode, struct file *filp)
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index e3029389d809..aedc15eabeac 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -282,7 +282,7 @@ static struct inode *dax_alloc_inode(struct super_block *sb)
struct dax_device *dax_dev;
struct inode *inode;
- dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
+ dax_dev = alloc_inode_sb(sb, dax_cache, GFP_KERNEL);
if (!dax_dev)
return NULL;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 110de8a60058..858400e42ec0 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2968,7 +2968,7 @@ static int __maybe_unused pl330_suspend(struct device *dev)
struct amba_device *pcdev = to_amba_device(dev);
pm_runtime_force_suspend(dev);
- amba_pclk_unprepare(pcdev);
+ clk_unprepare(pcdev->pclk);
return 0;
}
@@ -2978,7 +2978,7 @@ static int __maybe_unused pl330_resume(struct device *dev)
struct amba_device *pcdev = to_amba_device(dev);
int ret;
- ret = amba_pclk_prepare(pcdev);
+ ret = clk_prepare(pcdev->pclk);
if (ret)
return ret;
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 5dd29789f97d..e7e8e624a436 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -1083,8 +1083,46 @@ static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat)
#ifdef CONFIG_EDAC_ALTERA_SDRAM
+/*
+ * A legacy U-Boot bug only enabled memory mapped access to the ECC Enable
+ * register if ECC is enabled. Linux checks the ECC Enable register to
+ * determine ECC status.
+ * Use an SMC call (which always works) to determine ECC enablement.
+ */
+static int altr_s10_sdram_check_ecc_deps(struct altr_edac_device_dev *device)
+{
+ const struct edac_device_prv_data *prv = device->data;
+ unsigned long sdram_ecc_addr;
+ struct arm_smccc_res result;
+ struct device_node *np;
+ phys_addr_t sdram_addr;
+ u32 read_reg;
+ int ret;
+
+ np = of_find_compatible_node(NULL, NULL, "altr,sdr-ctl");
+ if (!np)
+ goto sdram_err;
+
+ sdram_addr = of_translate_address(np, of_get_address(np, 0,
+ NULL, NULL));
+ of_node_put(np);
+ sdram_ecc_addr = (unsigned long)sdram_addr + prv->ecc_en_ofst;
+ arm_smccc_smc(INTEL_SIP_SMC_REG_READ, sdram_ecc_addr,
+ 0, 0, 0, 0, 0, 0, &result);
+ read_reg = (unsigned int)result.a1;
+ ret = (int)result.a0;
+ if (!ret && (read_reg & prv->ecc_enable_mask))
+ return 0;
+
+sdram_err:
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s: No ECC present or ECC disabled.\n",
+ device->edac_dev_name);
+ return -ENODEV;
+}
+
static const struct edac_device_prv_data s10_sdramecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = altr_s10_sdram_check_ecc_deps,
.ce_clear_mask = ALTR_S10_ECC_SERRPENA,
.ue_clear_mask = ALTR_S10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_S10_ECC_EN,
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index fba609ada0e6..812baa48b290 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -15,6 +15,21 @@ static struct msr __percpu *msrs;
static struct amd64_family_type *fam_type;
+static inline u32 get_umc_reg(u32 reg)
+{
+ if (!fam_type->flags.zn_regs_v2)
+ return reg;
+
+ switch (reg) {
+ case UMCCH_ADDR_CFG: return UMCCH_ADDR_CFG_DDR5;
+ case UMCCH_ADDR_MASK_SEC: return UMCCH_ADDR_MASK_SEC_DDR5;
+ case UMCCH_DIMM_CFG: return UMCCH_DIMM_CFG_DDR5;
+ }
+
+ WARN_ONCE(1, "%s: unknown register 0x%x", __func__, reg);
+ return 0;
+}
+
/* Per-node stuff */
static struct ecc_settings **ecc_stngs;
@@ -1429,8 +1444,10 @@ static void __dump_misc_regs_df(struct amd64_pvt *pvt)
edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
- if (pvt->dram_type == MEM_LRDDR4) {
- amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
+ if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) {
+ amd_smn_read(pvt->mc_node_id,
+ umc_base + get_umc_reg(UMCCH_ADDR_CFG),
+ &tmp);
edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
i, 1 << ((tmp >> 4) & 0x3));
}
@@ -1505,7 +1522,7 @@ static void prep_chip_selects(struct amd64_pvt *pvt)
for_each_umc(umc) {
pvt->csels[umc].b_cnt = 4;
- pvt->csels[umc].m_cnt = 2;
+ pvt->csels[umc].m_cnt = fam_type->flags.zn_regs_v2 ? 4 : 2;
}
} else {
@@ -1545,7 +1562,7 @@ static void read_umc_base_mask(struct amd64_pvt *pvt)
}
umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
- umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
+ umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(UMCCH_ADDR_MASK_SEC);
for_each_chip_select_mask(cs, umc, pvt) {
mask = &pvt->csels[umc].csmasks[cs];
@@ -1616,19 +1633,49 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
}
}
+static void determine_memory_type_df(struct amd64_pvt *pvt)
+{
+ struct amd64_umc *umc;
+ u32 i;
+
+ for_each_umc(i) {
+ umc = &pvt->umc[i];
+
+ if (!(umc->sdp_ctrl & UMC_SDP_INIT)) {
+ umc->dram_type = MEM_EMPTY;
+ continue;
+ }
+
+ /*
+ * Check if the system supports the "DDR Type" field in UMC Config
+ * and has DDR5 DIMMs in use.
+ */
+ if (fam_type->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
+ if (umc->dimm_cfg & BIT(5))
+ umc->dram_type = MEM_LRDDR5;
+ else if (umc->dimm_cfg & BIT(4))
+ umc->dram_type = MEM_RDDR5;
+ else
+ umc->dram_type = MEM_DDR5;
+ } else {
+ if (umc->dimm_cfg & BIT(5))
+ umc->dram_type = MEM_LRDDR4;
+ else if (umc->dimm_cfg & BIT(4))
+ umc->dram_type = MEM_RDDR4;
+ else
+ umc->dram_type = MEM_DDR4;
+ }
+
+ edac_dbg(1, " UMC%d DIMM type: %s\n", i, edac_mem_types[umc->dram_type]);
+ }
+}
+
static void determine_memory_type(struct amd64_pvt *pvt)
{
u32 dram_ctrl, dcsm;
- if (pvt->umc) {
- if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
- pvt->dram_type = MEM_LRDDR4;
- else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
- pvt->dram_type = MEM_RDDR4;
- else
- pvt->dram_type = MEM_DDR4;
- return;
- }
+ if (pvt->umc)
+ return determine_memory_type_df(pvt);
switch (pvt->fam) {
case 0xf:
@@ -2149,6 +2196,7 @@ static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
{
u32 addr_mask_orig, addr_mask_deinterleaved;
u32 msb, weight, num_zero_bits;
+ int cs_mask_nr = csrow_nr;
int dimm, size = 0;
/* No Chip Selects are enabled. */
@@ -2164,17 +2212,33 @@ static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
return size;
/*
- * There is one mask per DIMM, and two Chip Selects per DIMM.
- * CS0 and CS1 -> DIMM0
- * CS2 and CS3 -> DIMM1
+ * Family 17h introduced systems with one mask per DIMM,
+ * and two Chip Selects per DIMM.
+ *
+ * CS0 and CS1 -> MASK0 / DIMM0
+ * CS2 and CS3 -> MASK1 / DIMM1
+ *
+ * Family 19h Model 10h introduced systems with one mask per Chip Select,
+ * and two Chip Selects per DIMM.
+ *
+ * CS0 -> MASK0 -> DIMM0
+ * CS1 -> MASK1 -> DIMM0
+ * CS2 -> MASK2 -> DIMM1
+ * CS3 -> MASK3 -> DIMM1
+ *
+ * Keep the mask number equal to the Chip Select number for newer systems,
+ * and shift the mask number for older systems.
*/
dimm = csrow_nr >> 1;
+ if (!fam_type->flags.zn_regs_v2)
+ cs_mask_nr >>= 1;
+
/* Asymmetric dual-rank DIMM support. */
if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
- addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
+ addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
else
- addr_mask_orig = pvt->csels[umc].csmasks[dimm];
+ addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
/*
* The number of zero bits in the mask is equal to the number of bits
@@ -2930,6 +2994,7 @@ static struct amd64_family_type family_types[] = {
.f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6,
.max_mcs = 12,
+ .flags.zn_regs_v2 = 1,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
@@ -3368,7 +3433,7 @@ static void __read_mc_regs_df(struct amd64_pvt *pvt)
umc_base = get_umc_base(i);
umc = &pvt->umc[i];
- amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
+ amd_smn_read(nid, umc_base + get_umc_reg(UMCCH_DIMM_CFG), &umc->dimm_cfg);
amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
@@ -3452,7 +3517,9 @@ skip:
read_dct_base_mask(pvt);
determine_memory_type(pvt);
- edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
+
+ if (!pvt->umc)
+ edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
determine_ecc_sym_sz(pvt);
}
@@ -3548,7 +3615,7 @@ static int init_csrows_df(struct mem_ctl_info *mci)
pvt->mc_node_id, cs);
dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
- dimm->mtype = pvt->dram_type;
+ dimm->mtype = pvt->umc[umc].dram_type;
dimm->edac_mode = edac_mode;
dimm->dtype = dev_type;
dimm->grain = 64;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 352bda9803f6..38e5ad95d010 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -273,8 +273,11 @@
#define UMCCH_BASE_ADDR_SEC 0x10
#define UMCCH_ADDR_MASK 0x20
#define UMCCH_ADDR_MASK_SEC 0x28
+#define UMCCH_ADDR_MASK_SEC_DDR5 0x30
#define UMCCH_ADDR_CFG 0x30
+#define UMCCH_ADDR_CFG_DDR5 0x40
#define UMCCH_DIMM_CFG 0x80
+#define UMCCH_DIMM_CFG_DDR5 0x90
#define UMCCH_UMC_CFG 0x100
#define UMCCH_SDP_CTRL 0x104
#define UMCCH_ECC_CTRL 0x14C
@@ -344,6 +347,9 @@ struct amd64_umc {
u32 sdp_ctrl; /* SDP Control reg */
u32 ecc_ctrl; /* DRAM ECC Control reg */
u32 umc_cap_hi; /* Capabilities High reg */
+
+ /* cache the dram_type */
+ enum mem_type dram_type;
};
struct amd64_pvt {
@@ -391,7 +397,12 @@ struct amd64_pvt {
/* place to store error injection parameters prior to issue */
struct error_injection injection;
- /* cache the dram_type */
+ /*
+ * cache the dram_type
+ *
+ * NOTE: Don't use this for Family 17h and later.
+ * Use dram_type in struct amd64_umc instead.
+ */
enum mem_type dram_type;
struct amd64_umc *umc; /* UMC registers */
@@ -480,11 +491,22 @@ struct low_ops {
unsigned cs_mode, int cs_mask_nr);
};
+struct amd64_family_flags {
+ /*
+ * Indicates that the system supports the new register offsets, etc.
+ * first introduced with Family 19h Model 10h.
+ */
+ __u64 zn_regs_v2 : 1,
+
+ __reserved : 63;
+};
+
struct amd64_family_type {
const char *ctl_name;
u16 f0_id, f1_id, f2_id, f6_id;
/* Maximum number of memory controllers per die/node. */
u8 max_mcs;
+ struct amd64_family_flags flags;
struct low_ops ops;
};
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 5e7593753799..9a61d92bdf42 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -163,13 +163,14 @@ CTL_INFO_ATTR(poll_msec, S_IRUGO | S_IWUSR,
edac_device_ctl_poll_msec_show, edac_device_ctl_poll_msec_store);
/* Base Attributes of the EDAC_DEVICE ECC object */
-static struct ctl_info_attribute *device_ctrl_attr[] = {
- &attr_ctl_info_panic_on_ue,
- &attr_ctl_info_log_ue,
- &attr_ctl_info_log_ce,
- &attr_ctl_info_poll_msec,
+static struct attribute *device_ctrl_attrs[] = {
+ &attr_ctl_info_panic_on_ue.attr,
+ &attr_ctl_info_log_ue.attr,
+ &attr_ctl_info_log_ce.attr,
+ &attr_ctl_info_poll_msec.attr,
NULL,
};
+ATTRIBUTE_GROUPS(device_ctrl);
/*
* edac_device_ctrl_master_release
@@ -217,7 +218,7 @@ static void edac_device_ctrl_master_release(struct kobject *kobj)
static struct kobj_type ktype_device_ctrl = {
.release = edac_device_ctrl_master_release,
.sysfs_ops = &device_ctl_info_ops,
- .default_attrs = (struct attribute **)device_ctrl_attr,
+ .default_groups = device_ctrl_groups,
};
/*
@@ -389,17 +390,18 @@ INSTANCE_ATTR(ce_count, S_IRUGO, instance_ce_count_show, NULL);
INSTANCE_ATTR(ue_count, S_IRUGO, instance_ue_count_show, NULL);
/* list of edac_dev 'instance' attributes */
-static struct instance_attribute *device_instance_attr[] = {
- &attr_instance_ce_count,
- &attr_instance_ue_count,
+static struct attribute *device_instance_attrs[] = {
+ &attr_instance_ce_count.attr,
+ &attr_instance_ue_count.attr,
NULL,
};
+ATTRIBUTE_GROUPS(device_instance);
/* The 'ktype' for each edac_dev 'instance' */
static struct kobj_type ktype_instance_ctrl = {
.release = edac_device_ctrl_instance_release,
.sysfs_ops = &device_instance_ops,
- .default_attrs = (struct attribute **)device_instance_attr,
+ .default_groups = device_instance_groups,
};
/* edac_dev -> instance -> block information */
@@ -487,17 +489,18 @@ BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show, NULL);
BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show, NULL);
/* list of edac_dev 'block' attributes */
-static struct edac_dev_sysfs_block_attribute *device_block_attr[] = {
- &attr_block_ce_count,
- &attr_block_ue_count,
+static struct attribute *device_block_attrs[] = {
+ &attr_block_ce_count.attr,
+ &attr_block_ue_count.attr,
NULL,
};
+ATTRIBUTE_GROUPS(device_block);
/* The 'ktype' for each edac_dev 'block' */
static struct kobj_type ktype_block_ctrl = {
.release = edac_device_ctrl_block_release,
.sysfs_ops = &device_block_ops,
- .default_attrs = (struct attribute **)device_block_attr,
+ .default_groups = device_block_groups,
};
/* block ctor/dtor code */
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index f5677d81bd2d..d2715774af6f 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -213,12 +213,12 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems)
else if (size > sizeof(char))
align = sizeof(short);
else
- return (char *)ptr;
+ return ptr;
r = (unsigned long)ptr % align;
if (r == 0)
- return (char *)ptr;
+ return ptr;
*p += align - r;
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 53042af7262e..888d5728ecef 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -135,17 +135,18 @@ INSTANCE_ATTR(pe_count, S_IRUGO, instance_pe_count_show, NULL);
INSTANCE_ATTR(npe_count, S_IRUGO, instance_npe_count_show, NULL);
/* pci instance attributes */
-static struct instance_attribute *pci_instance_attr[] = {
- &attr_instance_pe_count,
- &attr_instance_npe_count,
+static struct attribute *pci_instance_attrs[] = {
+ &attr_instance_pe_count.attr,
+ &attr_instance_npe_count.attr,
NULL
};
+ATTRIBUTE_GROUPS(pci_instance);
/* the ktype for a pci instance */
static struct kobj_type ktype_pci_instance = {
.release = edac_pci_instance_release,
.sysfs_ops = &pci_instance_ops,
- .default_attrs = (struct attribute **)pci_instance_attr,
+ .default_groups = pci_instance_groups,
};
/*
@@ -292,15 +293,16 @@ EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL);
EDAC_PCI_ATTR(pci_nonparity_count, S_IRUGO, edac_pci_int_show, NULL);
/* Base Attributes of the memory ECC object */
-static struct edac_pci_dev_attribute *edac_pci_attr[] = {
- &edac_pci_attr_check_pci_errors,
- &edac_pci_attr_edac_pci_log_pe,
- &edac_pci_attr_edac_pci_log_npe,
- &edac_pci_attr_edac_pci_panic_on_pe,
- &edac_pci_attr_pci_parity_count,
- &edac_pci_attr_pci_nonparity_count,
+static struct attribute *edac_pci_attrs[] = {
+ &edac_pci_attr_check_pci_errors.attr,
+ &edac_pci_attr_edac_pci_log_pe.attr,
+ &edac_pci_attr_edac_pci_log_npe.attr,
+ &edac_pci_attr_edac_pci_panic_on_pe.attr,
+ &edac_pci_attr_pci_parity_count.attr,
+ &edac_pci_attr_pci_nonparity_count.attr,
NULL,
};
+ATTRIBUTE_GROUPS(edac_pci);
/*
* edac_pci_release_main_kobj
@@ -327,7 +329,7 @@ static void edac_pci_release_main_kobj(struct kobject *kobj)
static struct kobj_type ktype_edac_pci_main_kobj = {
.release = edac_pci_release_main_kobj,
.sysfs_ops = &edac_pci_sysfs_ops,
- .default_attrs = (struct attribute **)edac_pci_attr,
+ .default_groups = edac_pci_groups,
};
/**
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 75cb91055c17..e5cfb01353d8 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -40,6 +40,7 @@ config ARM_SCPI_POWER_DOMAIN
config ARM_SDE_INTERFACE
bool "ARM Software Delegated Exception Interface (SDEI)"
depends on ARM64
+ depends on ACPI_APEI_GHES
help
The Software Delegated Exception Interface (SDEI) is an ARM
standard for registering callbacks from the platform firmware
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
index 3d7081e84853..7794bd41eaa0 100644
--- a/drivers/firmware/arm_scmi/Kconfig
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -54,6 +54,18 @@ config ARM_SCMI_TRANSPORT_MAILBOX
If you want the ARM SCMI PROTOCOL stack to include support for a
transport based on mailboxes, answer Y.
+config ARM_SCMI_TRANSPORT_OPTEE
+ bool "SCMI transport based on OP-TEE service"
+ depends on OPTEE=y || OPTEE=ARM_SCMI_PROTOCOL
+ select ARM_SCMI_HAVE_TRANSPORT
+ select ARM_SCMI_HAVE_SHMEM
+ default y
+ help
+ This enables the OP-TEE service based transport for SCMI.
+
+ If you want the ARM SCMI PROTOCOL stack to include support for a
+ transport based on OP-TEE SCMI service, answer Y.
+
config ARM_SCMI_TRANSPORT_SMC
bool "SCMI transport based on SMC"
depends on HAVE_ARM_SMCCC_DISCOVERY
@@ -66,6 +78,20 @@ config ARM_SCMI_TRANSPORT_SMC
If you want the ARM SCMI PROTOCOL stack to include support for a
transport based on SMC, answer Y.
+config ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE
+ bool "Enable atomic mode support for SCMI SMC transport"
+ depends on ARM_SCMI_TRANSPORT_SMC
+ help
+ Enable support of atomic operation for SCMI SMC based transport.
+
+ If you want the SCMI SMC based transport to operate in atomic
+ mode, avoiding any kind of sleeping behaviour for selected
+ transactions on the TX path, answer Y.
+ Enabling atomic mode operations allows any SCMI driver using this
+ transport to optionally ask for atomic SCMI transactions and operate
+ in atomic context too, at the price of using a number of busy-waiting
+ primitives all over instead. If unsure say N.
+
config ARM_SCMI_TRANSPORT_VIRTIO
bool "SCMI transport based on VirtIO"
depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL
@@ -77,6 +103,36 @@ config ARM_SCMI_TRANSPORT_VIRTIO
If you want the ARM SCMI PROTOCOL stack to include support for a
transport based on VirtIO, answer Y.
+config ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE
+ bool "SCMI VirtIO transport Version 1 compliance"
+ depends on ARM_SCMI_TRANSPORT_VIRTIO
+ default y
+ help
+ This enforces strict compliance with VirtIO Version 1 specification.
+
+ If you want the ARM SCMI VirtIO transport layer to refuse to work
+ with Legacy VirtIO backends and instead support only VirtIO Version 1
+ devices (or above), answer Y.
+
+ If you want instead to support also old Legacy VirtIO backends (like
+ the ones implemented by kvmtool) and let the core Kernel VirtIO layer
+ take care of the needed conversions, say N.
+
+config ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE
+ bool "Enable atomic mode for SCMI VirtIO transport"
+ depends on ARM_SCMI_TRANSPORT_VIRTIO
+ help
+ Enable support of atomic operation for SCMI VirtIO based transport.
+
+ If you want the SCMI VirtIO based transport to operate in atomic
+ mode, avoiding any kind of sleeping behaviour for selected
+ transactions on the TX path, answer Y.
+
+ Enabling atomic mode operations allows any SCMI driver using this
+ transport to optionally ask for atomic SCMI transactions and operate
+ in atomic context too, at the price of using a number of busy-waiting
+ primitives all over instead. If unsure say N.
+
endif #ARM_SCMI_PROTOCOL
config ARM_SCMI_POWER_DOMAIN
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 1dcf123d64ab..8d4afadda38c 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -6,8 +6,16 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o
+scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o
scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \
$(scmi-transport-y)
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
+
+ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy)
+# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame
+# pointer in Thumb2 mode, which is forcibly enabled by Clang when profiling
+# hooks are inserted via the -pg switch.
+CFLAGS_REMOVE_smc.o += $(CC_FLAGS_FTRACE)
+endif
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 35b56c8ba0c0..cf6fed6dec77 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -27,7 +27,8 @@ struct scmi_msg_resp_clock_protocol_attributes {
struct scmi_msg_resp_clock_attributes {
__le32 attributes;
#define CLOCK_ENABLE BIT(0)
- u8 name[SCMI_MAX_STR_SIZE];
+ u8 name[SCMI_MAX_STR_SIZE];
+ __le32 clock_enable_latency;
};
struct scmi_clock_set_config {
@@ -116,10 +117,15 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
attr = t->rx.buf;
ret = ph->xops->do_xfer(ph, t);
- if (!ret)
+ if (!ret) {
strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
- else
+ /* Is optional field clock_enable_latency provided ? */
+ if (t->rx.len == sizeof(*attr))
+ clk->enable_latency =
+ le32_to_cpu(attr->clock_enable_latency);
+ } else {
clk->name[0] = '\0';
+ }
ph->xops->xfer_put(ph, t);
return ret;
@@ -273,7 +279,7 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
static int
scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
- u32 config)
+ u32 config, bool atomic)
{
int ret;
struct scmi_xfer *t;
@@ -284,6 +290,8 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
if (ret)
return ret;
+ t->hdr.poll_completion = atomic;
+
cfg = t->tx.buf;
cfg->id = cpu_to_le32(clk_id);
cfg->attributes = cpu_to_le32(config);
@@ -296,12 +304,24 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id)
{
- return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE);
+ return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false);
}
static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id)
{
- return scmi_clock_config_set(ph, clk_id, 0);
+ return scmi_clock_config_set(ph, clk_id, 0, false);
+}
+
+static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph,
+ u32 clk_id)
+{
+ return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true);
+}
+
+static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph,
+ u32 clk_id)
+{
+ return scmi_clock_config_set(ph, clk_id, 0, true);
}
static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
@@ -330,6 +350,8 @@ static const struct scmi_clk_proto_ops clk_proto_ops = {
.rate_set = scmi_clock_rate_set,
.enable = scmi_clock_enable,
.disable = scmi_clock_disable,
+ .enable_atomic = scmi_clock_enable_atomic,
+ .disable_atomic = scmi_clock_disable_atomic,
};
static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index dea1bfbe1052..4fda84bfab42 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -339,11 +339,16 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
* @dev: Reference to device in the SCMI hierarchy corresponding to this
* channel
* @handle: Pointer to SCMI entity handle
+ * @no_completion_irq: Flag to indicate that this channel has no completion
+ * interrupt mechanism for synchronous commands.
+ * This can be dynamically set by transports at run-time
+ * inside their provided .chan_setup().
* @transport_info: Transport layer related information
*/
struct scmi_chan_info {
struct device *dev;
struct scmi_handle *handle;
+ bool no_completion_irq;
void *transport_info;
};
@@ -373,7 +378,8 @@ struct scmi_transport_ops {
unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo);
int (*send_message)(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer);
- void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret);
+ void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret,
+ struct scmi_xfer *xfer);
void (*fetch_response)(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer);
void (*fetch_notification)(struct scmi_chan_info *cinfo,
@@ -402,6 +408,18 @@ struct scmi_device *scmi_child_dev_find(struct device *parent,
* be pending simultaneously in the system. May be overridden by the
* get_max_msg op.
* @max_msg_size: Maximum size of data per message that can be handled.
+ * @force_polling: Flag to force this whole transport to use SCMI core polling
+ * mechanism instead of completion interrupts even if available.
+ * @sync_cmds_completed_on_ret: Flag to indicate that the transport assures
+ * synchronous-command messages are atomically
+ * completed on .send_message: no need to poll
+ * actively waiting for a response.
+ * Used by core internally only when polling is
+ * selected as a waiting for reply method: i.e.
+ * if a completion irq was found use that anyway.
+ * @atomic_enabled: Flag to indicate that this transport, which is assured not
+ * to sleep anywhere on the TX path, can be used in atomic mode
+ * when requested.
*/
struct scmi_desc {
int (*transport_init)(void);
@@ -410,6 +428,9 @@ struct scmi_desc {
int max_rx_timeout_ms;
int max_msg;
int max_msg_size;
+ const bool force_polling;
+ const bool sync_cmds_completed_on_ret;
+ const bool atomic_enabled;
};
#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
@@ -421,6 +442,9 @@ extern const struct scmi_desc scmi_smc_desc;
#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
extern const struct scmi_desc scmi_virtio_desc;
#endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
+extern const struct scmi_desc scmi_optee_desc;
+#endif
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv);
void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index b406b3f78f46..46118300a4d1 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -131,6 +131,12 @@ struct scmi_protocol_instance {
* MAX_PROTOCOLS_IMP elements allocated by the base protocol
* @active_protocols: IDR storing device_nodes for protocols actually defined
* in the DT and confirmed as implemented by fw.
+ * @atomic_threshold: Optional system wide DT-configured threshold, expressed
+ * in microseconds, for atomic operations.
+ * Only SCMI synchronous commands reported by the platform
+ * to have an execution latency lesser-equal to the threshold
+ * should be considered for atomic mode operation: such
+ * decision is finally left up to the SCMI drivers.
* @notify_priv: Pointer to private data structure specific to notifications.
* @node: List head
* @users: Number of users of this instance
@@ -149,6 +155,7 @@ struct scmi_info {
struct mutex protocols_mtx;
u8 *protocols_imp;
struct idr active_protocols;
+ unsigned int atomic_threshold;
void *notify_priv;
struct list_head node;
int users;
@@ -609,6 +616,25 @@ static inline void scmi_clear_channel(struct scmi_info *info,
info->desc->ops->clear_channel(cinfo);
}
+static inline bool is_polling_required(struct scmi_chan_info *cinfo,
+ struct scmi_info *info)
+{
+ return cinfo->no_completion_irq || info->desc->force_polling;
+}
+
+static inline bool is_transport_polling_capable(struct scmi_info *info)
+{
+ return info->desc->ops->poll_done ||
+ info->desc->sync_cmds_completed_on_ret;
+}
+
+static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
+ struct scmi_info *info)
+{
+ return is_polling_required(cinfo, info) &&
+ is_transport_polling_capable(info);
+}
+
static void scmi_handle_notification(struct scmi_chan_info *cinfo,
u32 msg_hdr, void *priv)
{
@@ -629,7 +655,8 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
unpack_scmi_header(msg_hdr, &xfer->hdr);
if (priv)
- xfer->priv = priv;
+ /* Ensure order between xfer->priv store and following ops */
+ smp_store_mb(xfer->priv, priv);
info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
xfer);
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
@@ -661,7 +688,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
xfer->rx.len = info->desc->max_msg_size;
if (priv)
- xfer->priv = priv;
+ /* Ensure order between xfer->priv store and following ops */
+ smp_store_mb(xfer->priv, priv);
info->desc->ops->fetch_response(cinfo, xfer);
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
@@ -724,8 +752,6 @@ static void xfer_put(const struct scmi_protocol_handle *ph,
__scmi_xfer_put(&info->tx_minfo, xfer);
}
-#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
-
static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer, ktime_t stop)
{
@@ -741,6 +767,79 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
}
/**
+ * scmi_wait_for_message_response - An helper to group all the possible ways of
+ * waiting for a synchronous message response.
+ *
+ * @cinfo: SCMI channel info
+ * @xfer: Reference to the transfer being waited for.
+ *
+ * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
+ * configuration flags like xfer->hdr.poll_completion.
+ *
+ * Return: 0 on Success, error otherwise.
+ */
+static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct device *dev = info->dev;
+ int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms;
+
+ trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ timeout_ms,
+ xfer->hdr.poll_completion);
+
+ if (xfer->hdr.poll_completion) {
+ /*
+ * Real polling is needed only if transport has NOT declared
+ * itself to support synchronous commands replies.
+ */
+ if (!info->desc->sync_cmds_completed_on_ret) {
+ /*
+ * Poll on xfer using transport provided .poll_done();
+ * assumes no completion interrupt was available.
+ */
+ ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
+
+ spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
+ xfer, stop));
+ if (ktime_after(ktime_get(), stop)) {
+ dev_err(dev,
+ "timed out in resp(caller: %pS) - polling\n",
+ (void *)_RET_IP_);
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ if (!ret) {
+ unsigned long flags;
+
+ /*
+ * Do not fetch_response if an out-of-order delayed
+ * response is being processed.
+ */
+ spin_lock_irqsave(&xfer->lock, flags);
+ if (xfer->state == SCMI_XFER_SENT_OK) {
+ info->desc->ops->fetch_response(cinfo, xfer);
+ xfer->state = SCMI_XFER_RESP_OK;
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ }
+ } else {
+ /* And we wait for the response. */
+ if (!wait_for_completion_timeout(&xfer->done,
+ msecs_to_jiffies(timeout_ms))) {
+ dev_err(dev, "timed out in resp(caller: %pS)\n",
+ (void *)_RET_IP_);
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ return ret;
+}
+
+/**
* do_xfer() - Do one transfer
*
* @ph: Pointer to SCMI protocol handle
@@ -754,18 +853,26 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
struct scmi_xfer *xfer)
{
int ret;
- int timeout;
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
struct scmi_info *info = handle_to_scmi_info(pi->handle);
struct device *dev = info->dev;
struct scmi_chan_info *cinfo;
- if (xfer->hdr.poll_completion && !info->desc->ops->poll_done) {
+ /* Check for polling request on custom command xfers at first */
+ if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) {
dev_warn_once(dev,
"Polling mode is not supported by transport.\n");
return -EINVAL;
}
+ cinfo = idr_find(&info->tx_idr, pi->proto->id);
+ if (unlikely(!cinfo))
+ return -EINVAL;
+
+ /* True ONLY if also supported by transport. */
+ if (is_polling_enabled(cinfo, info))
+ xfer->hdr.poll_completion = true;
+
/*
* Initialise protocol id now from protocol handle to avoid it being
* overridden by mistake (or malice) by the protocol code mangling with
@@ -774,10 +881,6 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
xfer->hdr.protocol_id = pi->proto->id;
reinit_completion(&xfer->done);
- cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
- if (unlikely(!cinfo))
- return -EINVAL;
-
trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.poll_completion);
@@ -798,41 +901,12 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
return ret;
}
- if (xfer->hdr.poll_completion) {
- ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
-
- spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
- if (ktime_before(ktime_get(), stop)) {
- unsigned long flags;
-
- /*
- * Do not fetch_response if an out-of-order delayed
- * response is being processed.
- */
- spin_lock_irqsave(&xfer->lock, flags);
- if (xfer->state == SCMI_XFER_SENT_OK) {
- info->desc->ops->fetch_response(cinfo, xfer);
- xfer->state = SCMI_XFER_RESP_OK;
- }
- spin_unlock_irqrestore(&xfer->lock, flags);
- } else {
- ret = -ETIMEDOUT;
- }
- } else {
- /* And we wait for the response. */
- timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
- if (!wait_for_completion_timeout(&xfer->done, timeout)) {
- dev_err(dev, "timed out in resp(caller: %pS)\n",
- (void *)_RET_IP_);
- ret = -ETIMEDOUT;
- }
- }
-
+ ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status)
ret = scmi_to_linux_errno(xfer->hdr.status);
if (info->desc->ops->mark_txdone)
- info->desc->ops->mark_txdone(cinfo, ret);
+ info->desc->ops->mark_txdone(cinfo, ret, xfer);
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq, ret);
@@ -858,6 +932,20 @@ static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
* @ph: Pointer to SCMI protocol handle
* @xfer: Transfer to initiate and wait for response
*
+ * Using asynchronous commands in atomic/polling mode should be avoided since
+ * it could cause long busy-waiting here, so ignore polling for the delayed
+ * response and WARN if it was requested for this command transaction since
+ * upper layers should refrain from issuing such kind of requests.
+ *
+ * The only other option would have been to refrain from using any asynchronous
+ * command even if made available, when an atomic transport is detected, and
+ * instead forcibly use the synchronous version (thing that can be easily
+ * attained at the protocol layer), but this would also have led to longer
+ * stalls of the channel for synchronous commands and possibly timeouts.
+ * (in other words there is usually a good reason if a platform provides an
+ * asynchronous version of a command and we should prefer to use it...just not
+ * when using atomic/polling mode)
+ *
* Return: -ETIMEDOUT in case of no delayed response, if transmit error,
* return corresponding error, else if all goes well, return 0.
*/
@@ -869,12 +957,24 @@ static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
xfer->async_done = &async_response;
+ /*
+ * Delayed responses should not be polled, so an async command should
+ * not have been used when requiring an atomic/poll context; WARN and
+ * perform instead a sleeping wait.
+ * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
+ */
+ WARN_ON_ONCE(xfer->hdr.poll_completion);
+
ret = do_xfer(ph, xfer);
if (!ret) {
- if (!wait_for_completion_timeout(xfer->async_done, timeout))
+ if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
+ dev_err(ph->dev,
+ "timed out in delayed resp(caller: %pS)\n",
+ (void *)_RET_IP_);
ret = -ETIMEDOUT;
- else if (xfer->hdr.status)
+ } else if (xfer->hdr.status) {
ret = scmi_to_linux_errno(xfer->hdr.status);
+ }
}
xfer->async_done = NULL;
@@ -1308,6 +1408,29 @@ static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
WARN_ON(ret);
}
+/**
+ * scmi_is_transport_atomic - Method to check if underlying transport for an
+ * SCMI instance is configured as atomic.
+ *
+ * @handle: A reference to the SCMI platform instance.
+ * @atomic_threshold: An optional return value for the system wide currently
+ * configured threshold for atomic operations.
+ *
+ * Return: True if transport is configured as atomic
+ */
+static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
+ unsigned int *atomic_threshold)
+{
+ bool ret;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ ret = info->desc->atomic_enabled && is_transport_polling_capable(info);
+ if (ret && atomic_threshold)
+ *atomic_threshold = info->atomic_threshold;
+
+ return ret;
+}
+
static inline
struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
{
@@ -1499,6 +1622,16 @@ static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
if (ret)
return ret;
+ if (tx && is_polling_required(cinfo, info)) {
+ if (is_transport_polling_capable(info))
+ dev_info(dev,
+ "Enabled polling mode TX channel - prot_id:%d\n",
+ prot_id);
+ else
+ dev_warn(dev,
+ "Polling mode NOT supported by transport.\n");
+ }
+
idr_alloc:
ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
if (ret != prot_id) {
@@ -1836,6 +1969,14 @@ static int scmi_probe(struct platform_device *pdev)
handle->devm_protocol_get = scmi_devm_protocol_get;
handle->devm_protocol_put = scmi_devm_protocol_put;
+ /* System wide atomic threshold for atomic ops .. if any */
+ if (!of_property_read_u32(np, "atomic-threshold-us",
+ &info->atomic_threshold))
+ dev_info(dev,
+ "SCMI System wide atomic threshold set to %d us\n",
+ info->atomic_threshold);
+ handle->is_transport_atomic = scmi_is_transport_atomic;
+
if (desc->ops->link_supplier) {
ret = desc->ops->link_supplier(dev);
if (ret)
@@ -1853,6 +1994,10 @@ static int scmi_probe(struct platform_device *pdev)
if (scmi_notification_init(handle))
dev_err(dev, "SCMI Notifications NOT available.\n");
+ if (info->desc->atomic_enabled && !is_transport_polling_capable(info))
+ dev_err(dev,
+ "Transport is not polling capable. Atomic mode not supported.\n");
+
/*
* Trigger SCMI Base protocol initialization.
* It's mandatory and won't be ever released/deinit until the
@@ -1994,6 +2139,9 @@ static const struct of_device_id scmi_of_match[] = {
#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
#endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
+ { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
+#endif
#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
#endif
@@ -2112,7 +2260,7 @@ static void __exit scmi_driver_exit(void)
}
module_exit(scmi_driver_exit);
-MODULE_ALIAS("platform: arm-scmi");
+MODULE_ALIAS("platform:arm-scmi");
MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
MODULE_DESCRIPTION("ARM SCMI protocol driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index e09eb12bf421..08ff4d110beb 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -140,7 +140,8 @@ static int mailbox_send_message(struct scmi_chan_info *cinfo,
return ret;
}
-static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret)
+static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,
+ struct scmi_xfer *__unused)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
new file mode 100644
index 000000000000..734f1eeee161
--- /dev/null
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -0,0 +1,567 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+#include <uapi/linux/tee.h>
+
+#include "common.h"
+
+#define SCMI_OPTEE_MAX_MSG_SIZE 128
+
+enum scmi_optee_pta_cmd {
+ /*
+ * PTA_SCMI_CMD_CAPABILITIES - Get channel capabilities
+ *
+ * [out] value[0].a: Capability bit mask (enum pta_scmi_caps)
+ * [out] value[0].b: Extended capabilities or 0
+ */
+ PTA_SCMI_CMD_CAPABILITIES = 0,
+
+ /*
+ * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL - Process SCMI message in SMT buffer
+ *
+ * [in] value[0].a: Channel handle
+ *
+ * Shared memory used for SCMI message/response exhange is expected
+ * already identified and bound to channel handle in both SCMI agent
+ * and SCMI server (OP-TEE) parts.
+ * The memory uses SMT header to carry SCMI meta-data (protocol ID and
+ * protocol message ID).
+ */
+ PTA_SCMI_CMD_PROCESS_SMT_CHANNEL = 1,
+
+ /*
+ * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE - Process SMT/SCMI message
+ *
+ * [in] value[0].a: Channel handle
+ * [in/out] memref[1]: Message/response buffer (SMT and SCMI payload)
+ *
+ * Shared memory used for SCMI message/response is a SMT buffer
+ * referenced by param[1]. It shall be 128 bytes large to fit response
+ * payload whatever message playload size.
+ * The memory uses SMT header to carry SCMI meta-data (protocol ID and
+ * protocol message ID).
+ */
+ PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE = 2,
+
+ /*
+ * PTA_SCMI_CMD_GET_CHANNEL - Get channel handle
+ *
+ * SCMI shm information are 0 if agent expects to use OP-TEE regular SHM
+ *
+ * [in] value[0].a: Channel identifier
+ * [out] value[0].a: Returned channel handle
+ * [in] value[0].b: Requested capabilities mask (enum pta_scmi_caps)
+ */
+ PTA_SCMI_CMD_GET_CHANNEL = 3,
+};
+
+/*
+ * OP-TEE SCMI service capabilities bit flags (32bit)
+ *
+ * PTA_SCMI_CAPS_SMT_HEADER
+ * When set, OP-TEE supports command using SMT header protocol (SCMI shmem) in
+ * shared memory buffers to carry SCMI protocol synchronisation information.
+ */
+#define PTA_SCMI_CAPS_NONE 0
+#define PTA_SCMI_CAPS_SMT_HEADER BIT(0)
+
+/**
+ * struct scmi_optee_channel - Description of an OP-TEE SCMI channel
+ *
+ * @channel_id: OP-TEE channel ID used for this transport
+ * @tee_session: TEE session identifier
+ * @caps: OP-TEE SCMI channel capabilities
+ * @mu: Mutex protection on channel access
+ * @cinfo: SCMI channel information
+ * @shmem: Virtual base address of the shared memory
+ * @tee_shm: Reference to TEE shared memory or NULL if using static shmem
+ * @link: Reference in agent's channel list
+ */
+struct scmi_optee_channel {
+ u32 channel_id;
+ u32 tee_session;
+ u32 caps;
+ struct mutex mu;
+ struct scmi_chan_info *cinfo;
+ struct scmi_shared_mem __iomem *shmem;
+ struct tee_shm *tee_shm;
+ struct list_head link;
+};
+
+/**
+ * struct scmi_optee_agent - OP-TEE transport private data
+ *
+ * @dev: Device used for communication with TEE
+ * @tee_ctx: TEE context used for communication
+ * @caps: Supported channel capabilities
+ * @mu: Mutex for protection of @channel_list
+ * @channel_list: List of all created channels for the agent
+ */
+struct scmi_optee_agent {
+ struct device *dev;
+ struct tee_context *tee_ctx;
+ u32 caps;
+ struct mutex mu;
+ struct list_head channel_list;
+};
+
+/* There can be only 1 SCMI service in OP-TEE we connect to */
+static struct scmi_optee_agent *scmi_optee_private;
+
+/* Forward reference to scmi_optee transport initialization */
+static int scmi_optee_init(void);
+
+/* Open a session toward SCMI OP-TEE service with REE_KERNEL identity */
+static int open_session(struct scmi_optee_agent *agent, u32 *tee_session)
+{
+ struct device *dev = agent->dev;
+ struct tee_client_device *scmi_pta = to_tee_client_device(dev);
+ struct tee_ioctl_open_session_arg arg = { };
+ int ret;
+
+ memcpy(arg.uuid, scmi_pta->id.uuid.b, TEE_IOCTL_UUID_LEN);
+ arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL;
+
+ ret = tee_client_open_session(agent->tee_ctx, &arg, NULL);
+ if (ret < 0 || arg.ret) {
+ dev_err(dev, "Can't open tee session: %d / %#x\n", ret, arg.ret);
+ return -EOPNOTSUPP;
+ }
+
+ *tee_session = arg.session;
+
+ return 0;
+}
+
+static void close_session(struct scmi_optee_agent *agent, u32 tee_session)
+{
+ tee_client_close_session(agent->tee_ctx, tee_session);
+}
+
+static int get_capabilities(struct scmi_optee_agent *agent)
+{
+ struct tee_ioctl_invoke_arg arg = { };
+ struct tee_param param[1] = { };
+ u32 caps;
+ u32 tee_session;
+ int ret;
+
+ ret = open_session(agent, &tee_session);
+ if (ret)
+ return ret;
+
+ arg.func = PTA_SCMI_CMD_CAPABILITIES;
+ arg.session = tee_session;
+ arg.num_params = 1;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
+
+ ret = tee_client_invoke_func(agent->tee_ctx, &arg, param);
+
+ close_session(agent, tee_session);
+
+ if (ret < 0 || arg.ret) {
+ dev_err(agent->dev, "Can't get capabilities: %d / %#x\n", ret, arg.ret);
+ return -EOPNOTSUPP;
+ }
+
+ caps = param[0].u.value.a;
+
+ if (!(caps & PTA_SCMI_CAPS_SMT_HEADER)) {
+ dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT\n");
+ return -EOPNOTSUPP;
+ }
+
+ agent->caps = caps;
+
+ return 0;
+}
+
+static int get_channel(struct scmi_optee_channel *channel)
+{
+ struct device *dev = scmi_optee_private->dev;
+ struct tee_ioctl_invoke_arg arg = { };
+ struct tee_param param[1] = { };
+ unsigned int caps = PTA_SCMI_CAPS_SMT_HEADER;
+ int ret;
+
+ arg.func = PTA_SCMI_CMD_GET_CHANNEL;
+ arg.session = channel->tee_session;
+ arg.num_params = 1;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
+ param[0].u.value.a = channel->channel_id;
+ param[0].u.value.b = caps;
+
+ ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
+
+ if (ret || arg.ret) {
+ dev_err(dev, "Can't get channel with caps %#x: %d / %#x\n", caps, ret, arg.ret);
+ return -EOPNOTSUPP;
+ }
+
+ /* From now on use channel identifer provided by OP-TEE SCMI service */
+ channel->channel_id = param[0].u.value.a;
+ channel->caps = caps;
+
+ return 0;
+}
+
+static int invoke_process_smt_channel(struct scmi_optee_channel *channel)
+{
+ struct tee_ioctl_invoke_arg arg = { };
+ struct tee_param param[2] = { };
+ int ret;
+
+ arg.session = channel->tee_session;
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ param[0].u.value.a = channel->channel_id;
+
+ if (channel->tee_shm) {
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
+ param[1].u.memref.shm = channel->tee_shm;
+ param[1].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE;
+ arg.num_params = 2;
+ arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE;
+ } else {
+ arg.num_params = 1;
+ arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL;
+ }
+
+ ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
+ if (ret < 0 || arg.ret) {
+ dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n",
+ channel->channel_id, ret, arg.ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int scmi_optee_link_supplier(struct device *dev)
+{
+ if (!scmi_optee_private) {
+ if (scmi_optee_init())
+ dev_dbg(dev, "Optee bus not yet ready\n");
+
+ /* Wait for optee bus */
+ return -EPROBE_DEFER;
+ }
+
+ if (!device_link_add(dev, scmi_optee_private->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) {
+ dev_err(dev, "Adding link to supplier optee device failed\n");
+ return -ECANCELED;
+ }
+
+ return 0;
+}
+
+static bool scmi_optee_chan_available(struct device *dev, int idx)
+{
+ u32 channel_id;
+
+ return !of_property_read_u32_index(dev->of_node, "linaro,optee-channel-id",
+ idx, &channel_id);
+}
+
+static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo)
+{
+ struct scmi_optee_channel *channel = cinfo->transport_info;
+
+ shmem_clear_channel(channel->shmem);
+}
+
+static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo,
+ struct scmi_optee_channel *channel)
+{
+ struct device_node *np;
+ resource_size_t size;
+ struct resource res;
+ int ret;
+
+ np = of_parse_phandle(cinfo->dev->of_node, "shmem", 0);
+ if (!of_device_is_compatible(np, "arm,scmi-shmem")) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ dev_err(dev, "Failed to get SCMI Tx shared memory\n");
+ goto out;
+ }
+
+ size = resource_size(&res);
+
+ channel->shmem = devm_ioremap(dev, res.start, size);
+ if (!channel->shmem) {
+ dev_err(dev, "Failed to ioremap SCMI Tx shared memory\n");
+ ret = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ of_node_put(np);
+
+ return ret;
+}
+
+static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo,
+ struct scmi_optee_channel *channel)
+{
+ if (of_find_property(cinfo->dev->of_node, "shmem", NULL))
+ return setup_static_shmem(dev, cinfo, channel);
+ else
+ return -ENOMEM;
+}
+
+static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx)
+{
+ struct scmi_optee_channel *channel;
+ uint32_t channel_id;
+ int ret;
+
+ if (!tx)
+ return -ENODEV;
+
+ channel = devm_kzalloc(dev, sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_index(cinfo->dev->of_node, "linaro,optee-channel-id",
+ 0, &channel_id);
+ if (ret)
+ return ret;
+
+ cinfo->transport_info = channel;
+ channel->cinfo = cinfo;
+ channel->channel_id = channel_id;
+ mutex_init(&channel->mu);
+
+ ret = setup_shmem(dev, cinfo, channel);
+ if (ret)
+ return ret;
+
+ ret = open_session(scmi_optee_private, &channel->tee_session);
+ if (ret)
+ goto err_free_shm;
+
+ ret = get_channel(channel);
+ if (ret)
+ goto err_close_sess;
+
+ /* Enable polling */
+ cinfo->no_completion_irq = true;
+
+ mutex_lock(&scmi_optee_private->mu);
+ list_add(&channel->link, &scmi_optee_private->channel_list);
+ mutex_unlock(&scmi_optee_private->mu);
+
+ return 0;
+
+err_close_sess:
+ close_session(scmi_optee_private, channel->tee_session);
+err_free_shm:
+ if (channel->tee_shm)
+ tee_shm_free(channel->tee_shm);
+
+ return ret;
+}
+
+static int scmi_optee_chan_free(int id, void *p, void *data)
+{
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_optee_channel *channel = cinfo->transport_info;
+
+ mutex_lock(&scmi_optee_private->mu);
+ list_del(&channel->link);
+ mutex_unlock(&scmi_optee_private->mu);
+
+ close_session(scmi_optee_private, channel->tee_session);
+
+ if (channel->tee_shm) {
+ tee_shm_free(channel->tee_shm);
+ channel->tee_shm = NULL;
+ }
+
+ cinfo->transport_info = NULL;
+ channel->cinfo = NULL;
+
+ scmi_free_channel(cinfo, data, id);
+
+ return 0;
+}
+
+static struct scmi_shared_mem *get_channel_shm(struct scmi_optee_channel *chan,
+ struct scmi_xfer *xfer)
+{
+ if (!chan)
+ return NULL;
+
+ return chan->shmem;
+}
+
+
+static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_optee_channel *channel = cinfo->transport_info;
+ struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
+ int ret;
+
+ mutex_lock(&channel->mu);
+ shmem_tx_prepare(shmem, xfer);
+
+ ret = invoke_process_smt_channel(channel);
+ if (ret)
+ mutex_unlock(&channel->mu);
+
+ return ret;
+}
+
+static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_optee_channel *channel = cinfo->transport_info;
+ struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
+
+ shmem_fetch_response(shmem, xfer);
+}
+
+static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
+ struct scmi_xfer *__unused)
+{
+ struct scmi_optee_channel *channel = cinfo->transport_info;
+
+ mutex_unlock(&channel->mu);
+}
+
+static struct scmi_transport_ops scmi_optee_ops = {
+ .link_supplier = scmi_optee_link_supplier,
+ .chan_available = scmi_optee_chan_available,
+ .chan_setup = scmi_optee_chan_setup,
+ .chan_free = scmi_optee_chan_free,
+ .send_message = scmi_optee_send_message,
+ .mark_txdone = scmi_optee_mark_txdone,
+ .fetch_response = scmi_optee_fetch_response,
+ .clear_channel = scmi_optee_clear_channel,
+};
+
+static int scmi_optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ return ver->impl_id == TEE_IMPL_ID_OPTEE;
+}
+
+static int scmi_optee_service_probe(struct device *dev)
+{
+ struct scmi_optee_agent *agent;
+ struct tee_context *tee_ctx;
+ int ret;
+
+ /* Only one SCMI OP-TEE device allowed */
+ if (scmi_optee_private) {
+ dev_err(dev, "An SCMI OP-TEE device was already initialized: only one allowed\n");
+ return -EBUSY;
+ }
+
+ tee_ctx = tee_client_open_context(NULL, scmi_optee_ctx_match, NULL, NULL);
+ if (IS_ERR(tee_ctx))
+ return -ENODEV;
+
+ agent = devm_kzalloc(dev, sizeof(*agent), GFP_KERNEL);
+ if (!agent) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ agent->dev = dev;
+ agent->tee_ctx = tee_ctx;
+ INIT_LIST_HEAD(&agent->channel_list);
+ mutex_init(&agent->mu);
+
+ ret = get_capabilities(agent);
+ if (ret)
+ goto err;
+
+ /* Ensure agent resources are all visible before scmi_optee_private is */
+ smp_mb();
+ scmi_optee_private = agent;
+
+ return 0;
+
+err:
+ tee_client_close_context(tee_ctx);
+
+ return ret;
+}
+
+static int scmi_optee_service_remove(struct device *dev)
+{
+ struct scmi_optee_agent *agent = scmi_optee_private;
+
+ if (!scmi_optee_private)
+ return -EINVAL;
+
+ if (!list_empty(&scmi_optee_private->channel_list))
+ return -EBUSY;
+
+ /* Ensure cleared reference is visible before resources are released */
+ smp_store_mb(scmi_optee_private, NULL);
+
+ tee_client_close_context(agent->tee_ctx);
+
+ return 0;
+}
+
+static const struct tee_client_device_id scmi_optee_service_id[] = {
+ {
+ UUID_INIT(0xa8cfe406, 0xd4f5, 0x4a2e,
+ 0x9f, 0x8d, 0xa2, 0x5d, 0xc7, 0x54, 0xc0, 0x99)
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(tee, scmi_optee_service_id);
+
+static struct tee_client_driver scmi_optee_driver = {
+ .id_table = scmi_optee_service_id,
+ .driver = {
+ .name = "scmi-optee",
+ .bus = &tee_bus_type,
+ .probe = scmi_optee_service_probe,
+ .remove = scmi_optee_service_remove,
+ },
+};
+
+static int scmi_optee_init(void)
+{
+ return driver_register(&scmi_optee_driver.driver);
+}
+
+static void scmi_optee_exit(void)
+{
+ if (scmi_optee_private)
+ driver_unregister(&scmi_optee_driver.driver);
+}
+
+const struct scmi_desc scmi_optee_desc = {
+ .transport_exit = scmi_optee_exit,
+ .ops = &scmi_optee_ops,
+ .max_rx_timeout_ms = 30,
+ .max_msg = 20,
+ .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE,
+ .sync_cmds_completed_on_ret = true,
+};
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
index 4effecc3bb46..745acfdd0b3d 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -7,6 +7,7 @@
*/
#include <linux/arm-smccc.h>
+#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -14,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/processor.h>
#include <linux/slab.h>
#include "common.h"
@@ -23,26 +25,29 @@
*
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
- * @shmem_lock: Lock to protect access to Tx/Rx shared memory area
+ * @shmem_lock: Lock to protect access to Tx/Rx shared memory area.
+ * Used when NOT operating in atomic mode.
+ * @inflight: Atomic flag to protect access to Tx/Rx shared memory area.
+ * Used when operating in atomic mode.
* @func_id: smc/hvc call function id
- * @irq: Optional; employed when platforms indicates msg completion by intr.
- * @tx_complete: Optional, employed only when irq is valid.
*/
struct scmi_smc {
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
+ /* Protect access to shmem area */
struct mutex shmem_lock;
+#define INFLIGHT_NONE MSG_TOKEN_MAX
+ atomic_t inflight;
u32 func_id;
- int irq;
- struct completion tx_complete;
};
static irqreturn_t smc_msg_done_isr(int irq, void *data)
{
struct scmi_smc *scmi_info = data;
- complete(&scmi_info->tx_complete);
+ scmi_rx_callback(scmi_info->cinfo,
+ shmem_read_header(scmi_info->shmem), NULL);
return IRQ_HANDLED;
}
@@ -57,6 +62,41 @@ static bool smc_chan_available(struct device *dev, int idx)
return true;
}
+static inline void smc_channel_lock_init(struct scmi_smc *scmi_info)
+{
+ if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
+ atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
+ else
+ mutex_init(&scmi_info->shmem_lock);
+}
+
+static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight)
+{
+ int ret;
+
+ ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq);
+
+ return ret == INFLIGHT_NONE;
+}
+
+static inline void
+smc_channel_lock_acquire(struct scmi_smc *scmi_info,
+ struct scmi_xfer *xfer __maybe_unused)
+{
+ if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
+ spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight));
+ else
+ mutex_lock(&scmi_info->shmem_lock);
+}
+
+static inline void smc_channel_lock_release(struct scmi_smc *scmi_info)
+{
+ if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
+ atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
+ else
+ mutex_unlock(&scmi_info->shmem_lock);
+}
+
static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx)
{
@@ -111,13 +151,13 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
dev_err(dev, "failed to setup SCMI smc irq\n");
return ret;
}
- init_completion(&scmi_info->tx_complete);
- scmi_info->irq = irq;
+ } else {
+ cinfo->no_completion_irq = true;
}
scmi_info->func_id = func_id;
scmi_info->cinfo = cinfo;
- mutex_init(&scmi_info->shmem_lock);
+ smc_channel_lock_init(scmi_info);
cinfo->transport_info = scmi_info;
return 0;
@@ -142,26 +182,22 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
struct scmi_smc *scmi_info = cinfo->transport_info;
struct arm_smccc_res res;
- mutex_lock(&scmi_info->shmem_lock);
+ /*
+ * Channel will be released only once response has been
+ * surely fully retrieved, so after .mark_txdone()
+ */
+ smc_channel_lock_acquire(scmi_info, xfer);
shmem_tx_prepare(scmi_info->shmem, xfer);
- if (scmi_info->irq)
- reinit_completion(&scmi_info->tx_complete);
-
arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res);
- if (scmi_info->irq)
- wait_for_completion(&scmi_info->tx_complete);
-
- scmi_rx_callback(scmi_info->cinfo,
- shmem_read_header(scmi_info->shmem), NULL);
-
- mutex_unlock(&scmi_info->shmem_lock);
-
/* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
- if (res.a0)
+ if (res.a0) {
+ smc_channel_lock_release(scmi_info);
return -EOPNOTSUPP;
+ }
+
return 0;
}
@@ -173,12 +209,12 @@ static void smc_fetch_response(struct scmi_chan_info *cinfo,
shmem_fetch_response(scmi_info->shmem, xfer);
}
-static bool
-smc_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
+static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret,
+ struct scmi_xfer *__unused)
{
struct scmi_smc *scmi_info = cinfo->transport_info;
- return shmem_poll_done(scmi_info->shmem, xfer);
+ smc_channel_lock_release(scmi_info);
}
static const struct scmi_transport_ops scmi_smc_ops = {
@@ -186,8 +222,8 @@ static const struct scmi_transport_ops scmi_smc_ops = {
.chan_setup = smc_chan_setup,
.chan_free = smc_chan_free,
.send_message = smc_send_message,
+ .mark_txdone = smc_mark_txdone,
.fetch_response = smc_fetch_response,
- .poll_done = smc_poll_done,
};
const struct scmi_desc scmi_smc_desc = {
@@ -195,4 +231,14 @@ const struct scmi_desc scmi_smc_desc = {
.max_rx_timeout_ms = 30,
.max_msg = 20,
.max_msg_size = 128,
+ /*
+ * Setting .sync_cmds_atomic_replies to true for SMC assumes that,
+ * once the SMC instruction has completed successfully, the issued
+ * SCMI command would have been already fully processed by the SCMI
+ * platform firmware and so any possible response value expected
+ * for the issued command will be immmediately ready to be fetched
+ * from the shared memory area.
+ */
+ .sync_cmds_completed_on_ret = true,
+ .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE),
};
diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
index eefcc4146749..14709dbc96a1 100644
--- a/drivers/firmware/arm_scmi/virtio.c
+++ b/drivers/firmware/arm_scmi/virtio.c
@@ -3,8 +3,8 @@
* Virtio Transport driver for Arm System Control and Management Interface
* (SCMI).
*
- * Copyright (C) 2020-2021 OpenSynergy.
- * Copyright (C) 2021 ARM Ltd.
+ * Copyright (C) 2020-2022 OpenSynergy.
+ * Copyright (C) 2021-2022 ARM Ltd.
*/
/**
@@ -17,7 +17,9 @@
* virtqueue. Access to each virtqueue is protected by spinlocks.
*/
+#include <linux/completion.h>
#include <linux/errno.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
@@ -27,6 +29,7 @@
#include "common.h"
+#define VIRTIO_MAX_RX_TIMEOUT_MS 60000
#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
#define VIRTIO_SCMI_MAX_PDU_SIZE \
(VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
@@ -37,25 +40,46 @@
*
* @vqueue: Associated virtqueue
* @cinfo: SCMI Tx or Rx channel
+ * @free_lock: Protects access to the @free_list.
* @free_list: List of unused scmi_vio_msg, maintained for Tx channels only
+ * @deferred_tx_work: Worker for TX deferred replies processing
+ * @deferred_tx_wq: Workqueue for TX deferred replies
+ * @pending_lock: Protects access to the @pending_cmds_list.
+ * @pending_cmds_list: List of pre-fetched commands queueud for later processing
* @is_rx: Whether channel is an Rx channel
- * @ready: Whether transport user is ready to hear about channel
* @max_msg: Maximum number of pending messages for this channel.
- * @lock: Protects access to all members except ready.
- * @ready_lock: Protects access to ready. If required, it must be taken before
- * lock.
+ * @lock: Protects access to all members except users, free_list and
+ * pending_cmds_list.
+ * @shutdown_done: A reference to a completion used when freeing this channel.
+ * @users: A reference count to currently active users of this channel.
*/
struct scmi_vio_channel {
struct virtqueue *vqueue;
struct scmi_chan_info *cinfo;
+ /* lock to protect access to the free list. */
+ spinlock_t free_lock;
struct list_head free_list;
+ /* lock to protect access to the pending list. */
+ spinlock_t pending_lock;
+ struct list_head pending_cmds_list;
+ struct work_struct deferred_tx_work;
+ struct workqueue_struct *deferred_tx_wq;
bool is_rx;
- bool ready;
unsigned int max_msg;
- /* lock to protect access to all members except ready. */
+ /*
+ * Lock to protect access to all members except users, free_list and
+ * pending_cmds_list
+ */
spinlock_t lock;
- /* lock to rotects access to ready flag. */
- spinlock_t ready_lock;
+ struct completion *shutdown_done;
+ refcount_t users;
+};
+
+enum poll_states {
+ VIO_MSG_NOT_POLLED,
+ VIO_MSG_POLL_TIMEOUT,
+ VIO_MSG_POLLING,
+ VIO_MSG_POLL_DONE,
};
/**
@@ -65,29 +89,154 @@ struct scmi_vio_channel {
* @input: SDU used for (delayed) responses and notifications
* @list: List which scmi_vio_msg may be part of
* @rx_len: Input SDU size in bytes, once input has been received
+ * @poll_idx: Last used index registered for polling purposes if this message
+ * transaction reply was configured for polling.
+ * @poll_status: Polling state for this message.
+ * @poll_lock: A lock to protect @poll_status
+ * @users: A reference count to track this message users and avoid premature
+ * freeing (and reuse) when polling and IRQ execution paths interleave.
*/
struct scmi_vio_msg {
struct scmi_msg_payld *request;
struct scmi_msg_payld *input;
struct list_head list;
unsigned int rx_len;
+ unsigned int poll_idx;
+ enum poll_states poll_status;
+ /* Lock to protect access to poll_status */
+ spinlock_t poll_lock;
+ refcount_t users;
};
/* Only one SCMI VirtIO device can possibly exist */
static struct virtio_device *scmi_vdev;
+static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch,
+ struct scmi_chan_info *cinfo)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vioch->lock, flags);
+ cinfo->transport_info = vioch;
+ /* Indirectly setting channel not available any more */
+ vioch->cinfo = cinfo;
+ spin_unlock_irqrestore(&vioch->lock, flags);
+
+ refcount_set(&vioch->users, 1);
+}
+
+static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch)
+{
+ return refcount_inc_not_zero(&vioch->users);
+}
+
+static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch)
+{
+ if (refcount_dec_and_test(&vioch->users)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&vioch->lock, flags);
+ if (vioch->shutdown_done) {
+ vioch->cinfo = NULL;
+ complete(vioch->shutdown_done);
+ }
+ spin_unlock_irqrestore(&vioch->lock, flags);
+ }
+}
+
+static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
+{
+ unsigned long flags;
+ DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done);
+ void *deferred_wq = NULL;
+
+ /*
+ * Prepare to wait for the last release if not already released
+ * or in progress.
+ */
+ spin_lock_irqsave(&vioch->lock, flags);
+ if (!vioch->cinfo || vioch->shutdown_done) {
+ spin_unlock_irqrestore(&vioch->lock, flags);
+ return;
+ }
+
+ vioch->shutdown_done = &vioch_shutdown_done;
+ virtio_break_device(vioch->vqueue->vdev);
+ if (!vioch->is_rx && vioch->deferred_tx_wq) {
+ deferred_wq = vioch->deferred_tx_wq;
+ /* Cannot be kicked anymore after this...*/
+ vioch->deferred_tx_wq = NULL;
+ }
+ spin_unlock_irqrestore(&vioch->lock, flags);
+
+ if (deferred_wq)
+ destroy_workqueue(deferred_wq);
+
+ scmi_vio_channel_release(vioch);
+
+ /* Let any possibly concurrent RX path release the channel */
+ wait_for_completion(vioch->shutdown_done);
+}
+
+/* Assumes to be called with vio channel acquired already */
+static struct scmi_vio_msg *
+scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch)
+{
+ unsigned long flags;
+ struct scmi_vio_msg *msg;
+
+ spin_lock_irqsave(&vioch->free_lock, flags);
+ if (list_empty(&vioch->free_list)) {
+ spin_unlock_irqrestore(&vioch->free_lock, flags);
+ return NULL;
+ }
+
+ msg = list_first_entry(&vioch->free_list, typeof(*msg), list);
+ list_del_init(&msg->list);
+ spin_unlock_irqrestore(&vioch->free_lock, flags);
+
+ /* Still no users, no need to acquire poll_lock */
+ msg->poll_status = VIO_MSG_NOT_POLLED;
+ refcount_set(&msg->users, 1);
+
+ return msg;
+}
+
+static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg)
+{
+ return refcount_inc_not_zero(&msg->users);
+}
+
+/* Assumes to be called with vio channel acquired already */
+static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch,
+ struct scmi_vio_msg *msg)
+{
+ bool ret;
+
+ ret = refcount_dec_and_test(&msg->users);
+ if (ret) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&vioch->free_lock, flags);
+ list_add_tail(&msg->list, &vioch->free_list);
+ spin_unlock_irqrestore(&vioch->free_lock, flags);
+ }
+
+ return ret;
+}
+
static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
{
return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS);
}
static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
- struct scmi_vio_msg *msg,
- struct device *dev)
+ struct scmi_vio_msg *msg)
{
struct scatterlist sg_in;
int rc;
unsigned long flags;
+ struct device *dev = &vioch->vqueue->vdev->dev;
sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE);
@@ -95,7 +244,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
if (rc)
- dev_err_once(dev, "failed to add to virtqueue (%d)\n", rc);
+ dev_err(dev, "failed to add to RX virtqueue (%d)\n", rc);
else
virtqueue_kick(vioch->vqueue);
@@ -104,22 +253,22 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
return rc;
}
+/*
+ * Assume to be called with channel already acquired or not ready at all;
+ * vioch->lock MUST NOT have been already acquired.
+ */
static void scmi_finalize_message(struct scmi_vio_channel *vioch,
struct scmi_vio_msg *msg)
{
- if (vioch->is_rx) {
- scmi_vio_feed_vq_rx(vioch, msg, vioch->cinfo->dev);
- } else {
- /* Here IRQs are assumed to be already disabled by the caller */
- spin_lock(&vioch->lock);
- list_add(&msg->list, &vioch->free_list);
- spin_unlock(&vioch->lock);
- }
+ if (vioch->is_rx)
+ scmi_vio_feed_vq_rx(vioch, msg);
+ else
+ scmi_vio_msg_release(vioch, msg);
}
static void scmi_vio_complete_cb(struct virtqueue *vqueue)
{
- unsigned long ready_flags;
+ unsigned long flags;
unsigned int length;
struct scmi_vio_channel *vioch;
struct scmi_vio_msg *msg;
@@ -130,27 +279,25 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index];
for (;;) {
- spin_lock_irqsave(&vioch->ready_lock, ready_flags);
+ if (!scmi_vio_channel_acquire(vioch))
+ return;
- if (!vioch->ready) {
- if (!cb_enabled)
- (void)virtqueue_enable_cb(vqueue);
- goto unlock_ready_out;
- }
-
- /* IRQs already disabled here no need to irqsave */
- spin_lock(&vioch->lock);
+ spin_lock_irqsave(&vioch->lock, flags);
if (cb_enabled) {
virtqueue_disable_cb(vqueue);
cb_enabled = false;
}
+
msg = virtqueue_get_buf(vqueue, &length);
if (!msg) {
- if (virtqueue_enable_cb(vqueue))
- goto unlock_out;
+ if (virtqueue_enable_cb(vqueue)) {
+ spin_unlock_irqrestore(&vioch->lock, flags);
+ scmi_vio_channel_release(vioch);
+ return;
+ }
cb_enabled = true;
}
- spin_unlock(&vioch->lock);
+ spin_unlock_irqrestore(&vioch->lock, flags);
if (msg) {
msg->rx_len = length;
@@ -161,19 +308,57 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
}
/*
- * Release ready_lock and re-enable IRQs between loop iterations
- * to allow virtio_chan_free() to possibly kick in and set the
- * flag vioch->ready to false even in between processing of
- * messages, so as to force outstanding messages to be ignored
- * when system is shutting down.
+ * Release vio channel between loop iterations to allow
+ * virtio_chan_free() to eventually fully release it when
+ * shutting down; in such a case, any outstanding message will
+ * be ignored since this loop will bail out at the next
+ * iteration.
+ */
+ scmi_vio_channel_release(vioch);
+ }
+}
+
+static void scmi_vio_deferred_tx_worker(struct work_struct *work)
+{
+ unsigned long flags;
+ struct scmi_vio_channel *vioch;
+ struct scmi_vio_msg *msg, *tmp;
+
+ vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work);
+
+ if (!scmi_vio_channel_acquire(vioch))
+ return;
+
+ /*
+ * Process pre-fetched messages: these could be non-polled messages or
+ * late timed-out replies to polled messages dequeued by chance while
+ * polling for some other messages: this worker is in charge to process
+ * the valid non-expired messages and anyway finally free all of them.
+ */
+ spin_lock_irqsave(&vioch->pending_lock, flags);
+
+ /* Scan the list of possibly pre-fetched messages during polling. */
+ list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) {
+ list_del(&msg->list);
+
+ /*
+ * Channel is acquired here (cannot vanish) and this message
+ * is no more processed elsewhere so no poll_lock needed.
*/
- spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
+ if (msg->poll_status == VIO_MSG_NOT_POLLED)
+ scmi_rx_callback(vioch->cinfo,
+ msg_read_header(msg->input), msg);
+
+ /* Free the processed message once done */
+ scmi_vio_msg_release(vioch, msg);
}
-unlock_out:
- spin_unlock(&vioch->lock);
-unlock_ready_out:
- spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
+ spin_unlock_irqrestore(&vioch->pending_lock, flags);
+
+ /* Process possibly still pending messages */
+ scmi_vio_complete_cb(vioch->vqueue);
+
+ scmi_vio_channel_release(vioch);
}
static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" };
@@ -193,8 +378,8 @@ static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo)
static int virtio_link_supplier(struct device *dev)
{
if (!scmi_vdev) {
- dev_notice_once(dev,
- "Deferring probe after not finding a bound scmi-virtio device\n");
+ dev_notice(dev,
+ "Deferring probe after not finding a bound scmi-virtio device\n");
return -EPROBE_DEFER;
}
@@ -234,7 +419,6 @@ static bool virtio_chan_available(struct device *dev, int idx)
static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx)
{
- unsigned long flags;
struct scmi_vio_channel *vioch;
int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX;
int i;
@@ -244,6 +428,19 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index];
+ /* Setup a deferred worker for polling. */
+ if (tx && !vioch->deferred_tx_wq) {
+ vioch->deferred_tx_wq =
+ alloc_workqueue(dev_name(&scmi_vdev->dev),
+ WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
+ 0);
+ if (!vioch->deferred_tx_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&vioch->deferred_tx_work,
+ scmi_vio_deferred_tx_worker);
+ }
+
for (i = 0; i < vioch->max_msg; i++) {
struct scmi_vio_msg *msg;
@@ -257,6 +454,8 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
GFP_KERNEL);
if (!msg->request)
return -ENOMEM;
+ spin_lock_init(&msg->poll_lock);
+ refcount_set(&msg->users, 1);
}
msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE,
@@ -264,44 +463,23 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (!msg->input)
return -ENOMEM;
- if (tx) {
- spin_lock_irqsave(&vioch->lock, flags);
- list_add_tail(&msg->list, &vioch->free_list);
- spin_unlock_irqrestore(&vioch->lock, flags);
- } else {
- scmi_vio_feed_vq_rx(vioch, msg, cinfo->dev);
- }
+ scmi_finalize_message(vioch, msg);
}
- spin_lock_irqsave(&vioch->lock, flags);
- cinfo->transport_info = vioch;
- /* Indirectly setting channel not available any more */
- vioch->cinfo = cinfo;
- spin_unlock_irqrestore(&vioch->lock, flags);
-
- spin_lock_irqsave(&vioch->ready_lock, flags);
- vioch->ready = true;
- spin_unlock_irqrestore(&vioch->ready_lock, flags);
+ scmi_vio_channel_ready(vioch, cinfo);
return 0;
}
static int virtio_chan_free(int id, void *p, void *data)
{
- unsigned long flags;
struct scmi_chan_info *cinfo = p;
struct scmi_vio_channel *vioch = cinfo->transport_info;
- spin_lock_irqsave(&vioch->ready_lock, flags);
- vioch->ready = false;
- spin_unlock_irqrestore(&vioch->ready_lock, flags);
+ scmi_vio_channel_cleanup_sync(vioch);
scmi_free_channel(cinfo, data, id);
- spin_lock_irqsave(&vioch->lock, flags);
- vioch->cinfo = NULL;
- spin_unlock_irqrestore(&vioch->lock, flags);
-
return 0;
}
@@ -316,33 +494,56 @@ static int virtio_send_message(struct scmi_chan_info *cinfo,
int rc;
struct scmi_vio_msg *msg;
- spin_lock_irqsave(&vioch->lock, flags);
+ if (!scmi_vio_channel_acquire(vioch))
+ return -EINVAL;
- if (list_empty(&vioch->free_list)) {
- spin_unlock_irqrestore(&vioch->lock, flags);
+ msg = scmi_virtio_get_free_msg(vioch);
+ if (!msg) {
+ scmi_vio_channel_release(vioch);
return -EBUSY;
}
- msg = list_first_entry(&vioch->free_list, typeof(*msg), list);
- list_del(&msg->list);
-
msg_tx_prepare(msg->request, xfer);
sg_init_one(&sg_out, msg->request, msg_command_size(xfer));
sg_init_one(&sg_in, msg->input, msg_response_size(xfer));
+ spin_lock_irqsave(&vioch->lock, flags);
+
+ /*
+ * If polling was requested for this transaction:
+ * - retrieve last used index (will be used as polling reference)
+ * - bind the polled message to the xfer via .priv
+ * - grab an additional msg refcount for the poll-path
+ */
+ if (xfer->hdr.poll_completion) {
+ msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);
+ /* Still no users, no need to acquire poll_lock */
+ msg->poll_status = VIO_MSG_POLLING;
+ scmi_vio_msg_acquire(msg);
+ /* Ensure initialized msg is visibly bound to xfer */
+ smp_store_mb(xfer->priv, msg);
+ }
+
rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC);
- if (rc) {
- list_add(&msg->list, &vioch->free_list);
- dev_err_once(vioch->cinfo->dev,
- "%s() failed to add to virtqueue (%d)\n", __func__,
- rc);
- } else {
+ if (rc)
+ dev_err(vioch->cinfo->dev,
+ "failed to add to TX virtqueue (%d)\n", rc);
+ else
virtqueue_kick(vioch->vqueue);
- }
spin_unlock_irqrestore(&vioch->lock, flags);
+ if (rc) {
+ /* Ensure order between xfer->priv clear and vq feeding */
+ smp_store_mb(xfer->priv, NULL);
+ if (xfer->hdr.poll_completion)
+ scmi_vio_msg_release(vioch, msg);
+ scmi_vio_msg_release(vioch, msg);
+ }
+
+ scmi_vio_channel_release(vioch);
+
return rc;
}
@@ -351,10 +552,8 @@ static void virtio_fetch_response(struct scmi_chan_info *cinfo,
{
struct scmi_vio_msg *msg = xfer->priv;
- if (msg) {
+ if (msg)
msg_fetch_response(msg->input, msg->rx_len, xfer);
- xfer->priv = NULL;
- }
}
static void virtio_fetch_notification(struct scmi_chan_info *cinfo,
@@ -362,10 +561,225 @@ static void virtio_fetch_notification(struct scmi_chan_info *cinfo,
{
struct scmi_vio_msg *msg = xfer->priv;
- if (msg) {
+ if (msg)
msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer);
- xfer->priv = NULL;
+}
+
+/**
+ * virtio_mark_txdone - Mark transmission done
+ *
+ * Free only completed polling transfer messages.
+ *
+ * Note that in the SCMI VirtIO transport we never explicitly release still
+ * outstanding but timed-out messages by forcibly re-adding them to the
+ * free-list inside the TX code path; we instead let IRQ/RX callbacks, or the
+ * TX deferred worker, eventually clean up such messages once, finally, a late
+ * reply is received and discarded (if ever).
+ *
+ * This approach was deemed preferable since those pending timed-out buffers are
+ * still effectively owned by the SCMI platform VirtIO device even after timeout
+ * expiration: forcibly freeing and reusing them before they had been returned
+ * explicitly by the SCMI platform could lead to subtle bugs due to message
+ * corruption.
+ * An SCMI platform VirtIO device which never returns message buffers is
+ * anyway broken and it will quickly lead to exhaustion of available messages.
+ *
+ * For this same reason, here, we take care to free only the polled messages
+ * that had been somehow replied (only if not by chance already processed on the
+ * IRQ path - the initial scmi_vio_msg_release() takes care of this) and also
+ * any timed-out polled message if that indeed appears to have been at least
+ * dequeued from the virtqueues (VIO_MSG_POLL_DONE): this is needed since such
+ * messages won't be freed elsewhere. Any other polled message is marked as
+ * VIO_MSG_POLL_TIMEOUT.
+ *
+ * Possible late replies to timed-out polled messages will be eventually freed
+ * by RX callbacks if delivered on the IRQ path or by the deferred TX worker if
+ * dequeued on some other polling path.
+ *
+ * @cinfo: SCMI channel info
+ * @ret: Transmission return code
+ * @xfer: Transfer descriptor
+ */
+static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret,
+ struct scmi_xfer *xfer)
+{
+ unsigned long flags;
+ struct scmi_vio_channel *vioch = cinfo->transport_info;
+ struct scmi_vio_msg *msg = xfer->priv;
+
+ if (!msg || !scmi_vio_channel_acquire(vioch))
+ return;
+
+ /* Ensure msg is unbound from xfer anyway at this point */
+ smp_store_mb(xfer->priv, NULL);
+
+ /* Must be a polled xfer and not already freed on the IRQ path */
+ if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) {
+ scmi_vio_channel_release(vioch);
+ return;
}
+
+ spin_lock_irqsave(&msg->poll_lock, flags);
+ /* Do not free timedout polled messages only if still inflight */
+ if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE)
+ scmi_vio_msg_release(vioch, msg);
+ else if (msg->poll_status == VIO_MSG_POLLING)
+ msg->poll_status = VIO_MSG_POLL_TIMEOUT;
+ spin_unlock_irqrestore(&msg->poll_lock, flags);
+
+ scmi_vio_channel_release(vioch);
+}
+
+/**
+ * virtio_poll_done - Provide polling support for VirtIO transport
+ *
+ * @cinfo: SCMI channel info
+ * @xfer: Reference to the transfer being poll for.
+ *
+ * VirtIO core provides a polling mechanism based only on last used indexes:
+ * this means that it is possible to poll the virtqueues waiting for something
+ * new to arrive from the host side, but the only way to check if the freshly
+ * arrived buffer was indeed what we were waiting for is to compare the newly
+ * arrived message descriptor with the one we are polling on.
+ *
+ * As a consequence it can happen to dequeue something different from the buffer
+ * we were poll-waiting for: if that is the case such early fetched buffers are
+ * then added to a the @pending_cmds_list list for later processing by a
+ * dedicated deferred worker.
+ *
+ * So, basically, once something new is spotted we proceed to de-queue all the
+ * freshly received used buffers until we found the one we were polling on, or,
+ * we have 'seemingly' emptied the virtqueue; if some buffers are still pending
+ * in the vqueue at the end of the polling loop (possible due to inherent races
+ * in virtqueues handling mechanisms), we similarly kick the deferred worker
+ * and let it process those, to avoid indefinitely looping in the .poll_done
+ * busy-waiting helper.
+ *
+ * Finally, we delegate to the deferred worker also the final free of any timed
+ * out reply to a polled message that we should dequeue.
+ *
+ * Note that, since we do NOT have per-message suppress notification mechanism,
+ * the message we are polling for could be alternatively delivered via usual
+ * IRQs callbacks on another core which happened to have IRQs enabled while we
+ * are actively polling for it here: in such a case it will be handled as such
+ * by scmi_rx_callback() and the polling loop in the SCMI Core TX path will be
+ * transparently terminated anyway.
+ *
+ * Return: True once polling has successfully completed.
+ */
+static bool virtio_poll_done(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ bool pending, found = false;
+ unsigned int length, any_prefetched = 0;
+ unsigned long flags;
+ struct scmi_vio_msg *next_msg, *msg = xfer->priv;
+ struct scmi_vio_channel *vioch = cinfo->transport_info;
+
+ if (!msg)
+ return true;
+
+ /*
+ * Processed already by other polling loop on another CPU ?
+ *
+ * Note that this message is acquired on the poll path so cannot vanish
+ * while inside this loop iteration even if concurrently processed on
+ * the IRQ path.
+ *
+ * Avoid to acquire poll_lock since polled_status can be changed
+ * in a relevant manner only later in this same thread of execution:
+ * any other possible changes made concurrently by other polling loops
+ * or by a reply delivered on the IRQ path have no meaningful impact on
+ * this loop iteration: in other words it is harmless to allow this
+ * possible race but let has avoid spinlocking with irqs off in this
+ * initial part of the polling loop.
+ */
+ if (msg->poll_status == VIO_MSG_POLL_DONE)
+ return true;
+
+ if (!scmi_vio_channel_acquire(vioch))
+ return true;
+
+ /* Has cmdq index moved at all ? */
+ pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);
+ if (!pending) {
+ scmi_vio_channel_release(vioch);
+ return false;
+ }
+
+ spin_lock_irqsave(&vioch->lock, flags);
+ virtqueue_disable_cb(vioch->vqueue);
+
+ /*
+ * Process all new messages till the polled-for message is found OR
+ * the vqueue is empty.
+ */
+ while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) {
+ bool next_msg_done = false;
+
+ /*
+ * Mark any dequeued buffer message as VIO_MSG_POLL_DONE so
+ * that can be properly freed even on timeout in mark_txdone.
+ */
+ spin_lock(&next_msg->poll_lock);
+ if (next_msg->poll_status == VIO_MSG_POLLING) {
+ next_msg->poll_status = VIO_MSG_POLL_DONE;
+ next_msg_done = true;
+ }
+ spin_unlock(&next_msg->poll_lock);
+
+ next_msg->rx_len = length;
+ /* Is the message we were polling for ? */
+ if (next_msg == msg) {
+ found = true;
+ break;
+ } else if (next_msg_done) {
+ /* Skip the rest if this was another polled msg */
+ continue;
+ }
+
+ /*
+ * Enqueue for later processing any non-polled message and any
+ * timed-out polled one that we happen to have dequeued.
+ */
+ spin_lock(&next_msg->poll_lock);
+ if (next_msg->poll_status == VIO_MSG_NOT_POLLED ||
+ next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) {
+ spin_unlock(&next_msg->poll_lock);
+
+ any_prefetched++;
+ spin_lock(&vioch->pending_lock);
+ list_add_tail(&next_msg->list,
+ &vioch->pending_cmds_list);
+ spin_unlock(&vioch->pending_lock);
+ } else {
+ spin_unlock(&next_msg->poll_lock);
+ }
+ }
+
+ /*
+ * When the polling loop has successfully terminated if something
+ * else was queued in the meantime, it will be served by a deferred
+ * worker OR by the normal IRQ/callback OR by other poll loops.
+ *
+ * If we are still looking for the polled reply, the polling index has
+ * to be updated to the current vqueue last used index.
+ */
+ if (found) {
+ pending = !virtqueue_enable_cb(vioch->vqueue);
+ } else {
+ msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);
+ pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);
+ }
+
+ if (vioch->deferred_tx_wq && (any_prefetched || pending))
+ queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work);
+
+ spin_unlock_irqrestore(&vioch->lock, flags);
+
+ scmi_vio_channel_release(vioch);
+
+ return found;
}
static const struct scmi_transport_ops scmi_virtio_ops = {
@@ -377,6 +791,8 @@ static const struct scmi_transport_ops scmi_virtio_ops = {
.send_message = virtio_send_message,
.fetch_response = virtio_fetch_response,
.fetch_notification = virtio_fetch_notification,
+ .mark_txdone = virtio_mark_txdone,
+ .poll_done = virtio_poll_done,
};
static int scmi_vio_probe(struct virtio_device *vdev)
@@ -417,8 +833,10 @@ static int scmi_vio_probe(struct virtio_device *vdev)
unsigned int sz;
spin_lock_init(&channels[i].lock);
- spin_lock_init(&channels[i].ready_lock);
+ spin_lock_init(&channels[i].free_lock);
INIT_LIST_HEAD(&channels[i].free_list);
+ spin_lock_init(&channels[i].pending_lock);
+ INIT_LIST_HEAD(&channels[i].pending_cmds_list);
channels[i].vqueue = vqs[i];
sz = virtqueue_get_vring_size(channels[i].vqueue);
@@ -427,10 +845,10 @@ static int scmi_vio_probe(struct virtio_device *vdev)
sz /= DESCRIPTORS_PER_TX_MSG;
if (sz > MSG_TOKEN_MAX) {
- dev_info_once(dev,
- "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n",
- channels[i].is_rx ? "rx" : "tx",
- sz, MSG_TOKEN_MAX);
+ dev_info(dev,
+ "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n",
+ channels[i].is_rx ? "rx" : "tx",
+ sz, MSG_TOKEN_MAX);
sz = MSG_TOKEN_MAX;
}
channels[i].max_msg = sz;
@@ -460,12 +878,13 @@ static void scmi_vio_remove(struct virtio_device *vdev)
static int scmi_vio_validate(struct virtio_device *vdev)
{
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
dev_err(&vdev->dev,
"device does not comply with spec version 1.x\n");
return -EINVAL;
}
-
+#endif
return 0;
}
@@ -503,7 +922,9 @@ const struct scmi_desc scmi_virtio_desc = {
.transport_init = virtio_scmi_init,
.transport_exit = virtio_scmi_exit,
.ops = &scmi_virtio_ops,
- .max_rx_timeout_ms = 60000, /* for non-realtime virtio devices */
+ /* for non-realtime virtio devices */
+ .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS,
.max_msg = 0, /* overridden by virtio_get_max_msg() */
.max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE,
+ .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE),
};
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index a7e762c352f9..1e1a51510e83 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -1059,14 +1059,14 @@ static bool __init sdei_present_acpi(void)
return true;
}
-static int __init sdei_init(void)
+void __init sdei_init(void)
{
struct platform_device *pdev;
int ret;
ret = platform_driver_register(&sdei_driver);
if (ret || !sdei_present_acpi())
- return ret;
+ return;
pdev = platform_device_register_simple(sdei_driver.driver.name,
0, NULL, 0);
@@ -1076,17 +1076,8 @@ static int __init sdei_init(void)
pr_info("Failed to register ACPI:SDEI platform device %d\n",
ret);
}
-
- return ret;
}
-/*
- * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register
- * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised
- * by device_initcall(). We want to be called in the middle.
- */
-subsys_initcall_sync(sdei_init);
-
int sdei_event_handler(struct pt_regs *regs,
struct sdei_registered_event *arg)
{
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index 4c3201e290e2..ea84108035eb 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -24,7 +24,7 @@ static bool dump_properties __initdata;
static int __init dump_properties_enable(char *arg)
{
dump_properties = true;
- return 0;
+ return 1;
}
__setup("dump_apple_properties", dump_properties_enable);
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 0ef086e43090..7e771c56c13c 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -266,7 +266,7 @@ static int efi_pstore_write(struct pstore_record *record)
efi_name[i] = name[i];
ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
- preemptible(), record->size, record->psi->buf);
+ false, record->size, record->psi->buf);
if (record->reason == KMSG_DUMP_OOPS && try_module_get(THIS_MODULE))
if (!schedule_work(&efivar_work))
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 7de3f5b6e8d0..5502e176d51b 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -212,7 +212,7 @@ static int __init efivar_ssdt_setup(char *str)
memcpy(efivar_ssdt, str, strlen(str));
else
pr_warn("efivar_ssdt: name too long: %s\n", str);
- return 0;
+ return 1;
}
__setup("efivar_ssdt=", efivar_ssdt_setup);
diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c
index 380e4e251399..9c460843442f 100644
--- a/drivers/firmware/efi/libstub/riscv-stub.c
+++ b/drivers/firmware/efi/libstub/riscv-stub.c
@@ -25,7 +25,7 @@ typedef void __noreturn (*jump_kernel_func)(unsigned int, unsigned long);
static u32 hartid;
-static u32 get_boot_hartid_from_fdt(void)
+static int get_boot_hartid_from_fdt(void)
{
const void *fdt;
int chosen_node, len;
@@ -33,23 +33,26 @@ static u32 get_boot_hartid_from_fdt(void)
fdt = get_efi_config_table(DEVICE_TREE_GUID);
if (!fdt)
- return U32_MAX;
+ return -EINVAL;
chosen_node = fdt_path_offset(fdt, "/chosen");
if (chosen_node < 0)
- return U32_MAX;
+ return -EINVAL;
prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len);
if (!prop || len != sizeof(u32))
- return U32_MAX;
+ return -EINVAL;
- return fdt32_to_cpu(*prop);
+ hartid = fdt32_to_cpu(*prop);
+ return 0;
}
efi_status_t check_platform_features(void)
{
- hartid = get_boot_hartid_from_fdt();
- if (hartid == U32_MAX) {
+ int ret;
+
+ ret = get_boot_hartid_from_fdt();
+ if (ret) {
efi_err("/chosen/boot-hartid missing or invalid!\n");
return EFI_UNSUPPORTED;
}
diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c
index 38722d2009e2..5ed0602c2f75 100644
--- a/drivers/firmware/efi/mokvar-table.c
+++ b/drivers/firmware/efi/mokvar-table.c
@@ -359,4 +359,4 @@ static int __init efi_mokvar_sysfs_init(void)
}
return err;
}
-device_initcall(efi_mokvar_sysfs_init);
+fs_initcall(efi_mokvar_sysfs_init);
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index abdc8a6a3963..cae590bd08f2 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -742,6 +742,7 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
{
const struct efivar_operations *ops;
efi_status_t status;
+ unsigned long varsize;
if (!__efivars)
return -EINVAL;
@@ -764,15 +765,17 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
return efivar_entry_set_nonblocking(name, vendor, attributes,
size, data);
+ varsize = size + ucs2_strsize(name, 1024);
if (!block) {
if (down_trylock(&efivars_lock))
return -EBUSY;
+ status = check_var_size_nonblocking(attributes, varsize);
} else {
if (down_interruptible(&efivars_lock))
return -EINTR;
+ status = check_var_size(attributes, varsize);
}
- status = check_var_size(attributes, size + ucs2_strsize(name, 1024));
if (status != EFI_SUCCESS) {
up(&efivars_lock);
return -ENOSPC;
diff --git a/drivers/firmware/imx/rm.c b/drivers/firmware/imx/rm.c
index a12db6ff323b..d492b99e1c6c 100644
--- a/drivers/firmware/imx/rm.c
+++ b/drivers/firmware/imx/rm.c
@@ -43,3 +43,48 @@ bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource)
return hdr->func;
}
EXPORT_SYMBOL(imx_sc_rm_is_resource_owned);
+
+struct imx_sc_msg_rm_get_resource_owner {
+ struct imx_sc_rpc_msg hdr;
+ union {
+ struct {
+ u16 resource;
+ } req;
+ struct {
+ u8 val;
+ } resp;
+ } data;
+} __packed __aligned(4);
+
+/*
+ * This function get @resource partition number
+ *
+ * @param[in] ipc IPC handle
+ * @param[in] resource resource the control is associated with
+ * @param[out] pt pointer to return the partition number
+ *
+ * @return Returns 0 for success and < 0 for errors.
+ */
+int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt)
+{
+ struct imx_sc_msg_rm_get_resource_owner msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_RM;
+ hdr->func = IMX_SC_RM_FUNC_GET_RESOURCE_OWNER;
+ hdr->size = 2;
+
+ msg.data.req.resource = resource;
+
+ ret = imx_scu_call_rpc(ipc, &msg, true);
+ if (ret)
+ return ret;
+
+ if (pt)
+ *pt = msg.data.resp.val;
+
+ return 0;
+}
+EXPORT_SYMBOL(imx_sc_rm_get_resource_owner);
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index ff6569c4a53b..af3d057e6421 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -155,6 +155,10 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "vpu-pid", IMX_SC_R_VPU_PID0, 8, true, 0 },
{ "vpu-dec0", IMX_SC_R_VPU_DEC_0, 1, false, 0 },
{ "vpu-enc0", IMX_SC_R_VPU_ENC_0, 1, false, 0 },
+ { "vpu-enc1", IMX_SC_R_VPU_ENC_1, 1, false, 0 },
+ { "vpu-mu0", IMX_SC_R_VPU_MU_0, 1, false, 0 },
+ { "vpu-mu1", IMX_SC_R_VPU_MU_1, 1, false, 0 },
+ { "vpu-mu2", IMX_SC_R_VPU_MU_2, 1, false, 0 },
/* GPU SS */
{ "gpu0-pid", IMX_SC_R_GPU_0_PID0, 4, true, 0 },
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 7db8066b19fd..491bbf70c94a 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -49,26 +49,12 @@ struct qcom_scm_mem_map_info {
__le64 mem_size;
};
-#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
-#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
-#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
-#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
-
-#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
-#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
-#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
-#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
-
-struct qcom_scm_wb_entry {
- int flag;
- void *entry;
+/* Each bit configures cold/warm boot address for one of the 4 CPUs */
+static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
+ 0, BIT(0), BIT(3), BIT(5)
};
-
-static struct qcom_scm_wb_entry qcom_scm_wb[] = {
- { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
- { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
- { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
- { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
+static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
+ BIT(2), BIT(1), BIT(4), BIT(6)
};
static const char * const qcom_scm_convention_names[] = {
@@ -179,9 +165,8 @@ found:
/**
* qcom_scm_call() - Invoke a syscall in the secure world
* @dev: device
- * @svc_id: service identifier
- * @cmd_id: command identifier
* @desc: Descriptor structure containing arguments and return values
+ * @res: Structure containing results from SMC/HVC call
*
* Sends a command to the SCM and waits for the command to finish processing.
* This should *only* be called in pre-emptible context.
@@ -205,8 +190,6 @@ static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
/**
* qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
* @dev: device
- * @svc_id: service identifier
- * @cmd_id: command identifier
* @desc: Descriptor structure containing arguments and return values
* @res: Structure containing results from SMC/HVC call
*
@@ -260,97 +243,83 @@ static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
return ret ? false : !!res.result[0];
}
-/**
- * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the Linux entry point for the SCM to transfer control to when coming
- * out of a power down. CPU power down may be executed on cpuidle or hotplug.
- */
-int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
{
- int ret;
- int flags = 0;
int cpu;
+ unsigned int flags = 0;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_BOOT,
.cmd = QCOM_SCM_BOOT_SET_ADDR,
.arginfo = QCOM_SCM_ARGS(2),
+ .owner = ARM_SMCCC_OWNER_SIP,
};
- /*
- * Reassign only if we are switching from hotplug entry point
- * to cpuidle entry point or vice versa.
- */
- for_each_cpu(cpu, cpus) {
- if (entry == qcom_scm_wb[cpu].entry)
- continue;
- flags |= qcom_scm_wb[cpu].flag;
+ for_each_present_cpu(cpu) {
+ if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
+ return -EINVAL;
+ flags |= cpu_bits[cpu];
}
- /* No change in entry function */
- if (!flags)
- return 0;
-
desc.args[0] = flags;
desc.args[1] = virt_to_phys(entry);
- ret = qcom_scm_call(__scm->dev, &desc, NULL);
- if (!ret) {
- for_each_cpu(cpu, cpus)
- qcom_scm_wb[cpu].entry = entry;
- }
-
- return ret;
+ return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
}
-EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
-/**
- * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the cold boot address of the cpus. Any cpu outside the supported
- * range would be removed from the cpu present mask.
- */
-int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
{
- int flags = 0;
- int cpu;
- int scm_cb_flags[] = {
- QCOM_SCM_FLAG_COLDBOOT_CPU0,
- QCOM_SCM_FLAG_COLDBOOT_CPU1,
- QCOM_SCM_FLAG_COLDBOOT_CPU2,
- QCOM_SCM_FLAG_COLDBOOT_CPU3,
- };
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_BOOT,
- .cmd = QCOM_SCM_BOOT_SET_ADDR,
- .arginfo = QCOM_SCM_ARGS(2),
+ .cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
.owner = ARM_SMCCC_OWNER_SIP,
+ .arginfo = QCOM_SCM_ARGS(6),
+ .args = {
+ virt_to_phys(entry),
+ /* Apply to all CPUs in all affinity levels */
+ ~0ULL, ~0ULL, ~0ULL, ~0ULL,
+ flags,
+ },
};
- if (!cpus || cpumask_empty(cpus))
- return -EINVAL;
+ /* Need a device for DMA of the additional arguments */
+ if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
+ return -EOPNOTSUPP;
- for_each_cpu(cpu, cpus) {
- if (cpu < ARRAY_SIZE(scm_cb_flags))
- flags |= scm_cb_flags[cpu];
- else
- set_cpu_present(cpu, false);
- }
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
- desc.args[0] = flags;
- desc.args[1] = virt_to_phys(entry);
+/**
+ * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
+ * @entry: Entry point function for the cpus
+ *
+ * Set the Linux entry point for the SCM to transfer control to when coming
+ * out of a power down. CPU power down may be executed on cpuidle or hotplug.
+ */
+int qcom_scm_set_warm_boot_addr(void *entry)
+{
+ if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
+ /* Fallback to old SCM call */
+ return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
+ return 0;
+}
+EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
- return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
+/**
+ * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
+ * @entry: Entry point function for the cpus
+ */
+int qcom_scm_set_cold_boot_addr(void *entry)
+{
+ if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
+ /* Fallback to old SCM call */
+ return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
+ return 0;
}
EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
/**
* qcom_scm_cpu_power_down() - Power down the cpu
- * @flags - Flags to flush cache
+ * @flags: Flags to flush cache
*
* This is an end point to power down cpu. If there was a pending interrupt,
* the control would return from this function, otherwise, the cpu jumps to the
@@ -435,10 +404,16 @@ static void qcom_scm_set_download_mode(bool enable)
* and optional blob of data used for authenticating the metadata
* and the rest of the firmware
* @size: size of the metadata
+ * @ctx: optional metadata context
*
- * Returns 0 on success.
+ * Return: 0 on success.
+ *
+ * Upon successful return, the PAS metadata context (@ctx) will be used to
+ * track the metadata allocation, this needs to be released by invoking
+ * qcom_scm_pas_metadata_release() by the caller.
*/
-int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
+int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
+ struct qcom_scm_pas_metadata *ctx)
{
dma_addr_t mdata_phys;
void *mdata_buf;
@@ -467,7 +442,7 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
ret = qcom_scm_clk_enable();
if (ret)
- goto free_metadata;
+ goto out;
desc.args[1] = mdata_phys;
@@ -475,14 +450,37 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
qcom_scm_clk_disable();
-free_metadata:
- dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
+out:
+ if (ret < 0 || !ctx) {
+ dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
+ } else if (ctx) {
+ ctx->ptr = mdata_buf;
+ ctx->phys = mdata_phys;
+ ctx->size = size;
+ }
return ret ? : res.result[0];
}
EXPORT_SYMBOL(qcom_scm_pas_init_image);
/**
+ * qcom_scm_pas_metadata_release() - release metadata context
+ * @ctx: metadata context
+ */
+void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
+{
+ if (!ctx->ptr)
+ return;
+
+ dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
+
+ ctx->ptr = NULL;
+ ctx->phys = 0;
+ ctx->size = 0;
+}
+EXPORT_SYMBOL(qcom_scm_pas_metadata_release);
+
+/**
* qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
* for firmware loading
* @peripheral: peripheral id
@@ -749,12 +747,6 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
};
int ret;
- desc.args[0] = addr;
- desc.args[1] = size;
- desc.args[2] = spare;
- desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
- QCOM_SCM_VAL);
-
ret = qcom_scm_call(__scm->dev, &desc, NULL);
/* the pg table has been initialized already, ignore the error */
@@ -765,6 +757,21 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
}
EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
+int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = size,
+ .args[1] = spare,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_set_cp_pool_size);
+
int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
u32 cp_nonpixel_start,
u32 cp_nonpixel_size)
@@ -1131,6 +1138,22 @@ int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
}
EXPORT_SYMBOL(qcom_scm_hdcp_req);
+int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
+ .cmd = QCOM_SCM_SMMU_PT_FORMAT,
+ .arginfo = QCOM_SCM_ARGS(3),
+ .args[0] = sec_id,
+ .args[1] = ctx_num,
+ .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_set_pt_format);
+
int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
{
struct qcom_scm_desc desc = {
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index d92156ceb3ac..0d51eef2472f 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -78,8 +78,13 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_BOOT_SET_ADDR 0x01
#define QCOM_SCM_BOOT_TERMINATE_PC 0x02
#define QCOM_SCM_BOOT_SET_DLOAD_MODE 0x10
+#define QCOM_SCM_BOOT_SET_ADDR_MC 0x11
#define QCOM_SCM_BOOT_SET_REMOTE_STATE 0x0a
#define QCOM_SCM_FLUSH_FLAG_MASK 0x3
+#define QCOM_SCM_BOOT_MAX_CPUS 4
+#define QCOM_SCM_BOOT_MC_FLAG_AARCH64 BIT(0)
+#define QCOM_SCM_BOOT_MC_FLAG_COLDBOOT BIT(1)
+#define QCOM_SCM_BOOT_MC_FLAG_WARMBOOT BIT(2)
#define QCOM_SCM_SVC_PIL 0x02
#define QCOM_SCM_PIL_PAS_INIT_IMAGE 0x01
@@ -100,6 +105,7 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_MP_RESTORE_SEC_CFG 0x02
#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE 0x03
#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT 0x04
+#define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05
#define QCOM_SCM_MP_VIDEO_VAR 0x08
#define QCOM_SCM_MP_ASSIGN 0x16
@@ -119,6 +125,7 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_LMH_LIMIT_DCVSH 0x10
#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
+#define QCOM_SCM_SMMU_PT_FORMAT 0x01
#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03
#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 5ae2040b8b02..4697edc125b1 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -3412,7 +3412,7 @@ static int ti_sci_probe(struct platform_device *pdev)
ret = register_restart_handler(&info->nb);
if (ret) {
dev_err(dev, "reboot registration fail(%d)\n", ret);
- return ret;
+ goto out;
}
}
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 450c5f6a1cbf..5e5b0bb2e4e0 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -1121,6 +1121,32 @@ int zynqmp_pm_aes_engine(const u64 address, u32 *out)
EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine);
/**
+ * zynqmp_pm_sha_hash - Access the SHA engine to calculate the hash
+ * @address: Address of the data/ Address of output buffer where
+ * hash should be stored.
+ * @size: Size of the data.
+ * @flags:
+ * BIT(0) - for initializing csudma driver and SHA3(Here address
+ * and size inputs can be NULL).
+ * BIT(1) - to call Sha3_Update API which can be called multiple
+ * times when data is not contiguous.
+ * BIT(2) - to get final hash of the whole updated data.
+ * Hash will be overwritten at provided address with
+ * 48 bytes.
+ *
+ * Return: Returns status, either success or error code.
+ */
+int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags)
+{
+ u32 lower_addr = lower_32_bits(address);
+ u32 upper_addr = upper_32_bits(address);
+
+ return zynqmp_pm_invoke_fn(PM_SECURE_SHA, upper_addr, lower_addr,
+ size, flags, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sha_hash);
+
+/**
* zynqmp_pm_register_notifier() - PM API for register a subsystem
* to be notified about specific
* event/error.
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 4a55cdf089d6..e00c33310517 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -163,15 +163,13 @@ exit_destroy:
return ret;
}
-static int gen_74x164_remove(struct spi_device *spi)
+static void gen_74x164_remove(struct spi_device *spi)
{
struct gen_74x164_chip *chip = spi_get_drvdata(spi);
gpiod_set_value_cansleep(chip->gpiod_oe, 0);
gpiochip_remove(&chip->gpio_chip);
mutex_destroy(&chip->lock);
-
- return 0;
}
static const struct spi_device_id gen_74x164_spi_ids[] = {
diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c
index 51cd6f98d1c7..161c4751c5f7 100644
--- a/drivers/gpio/gpio-max3191x.c
+++ b/drivers/gpio/gpio-max3191x.c
@@ -443,14 +443,12 @@ static int max3191x_probe(struct spi_device *spi)
return 0;
}
-static int max3191x_remove(struct spi_device *spi)
+static void max3191x_remove(struct spi_device *spi)
{
struct max3191x_chip *max3191x = spi_get_drvdata(spi);
gpiochip_remove(&max3191x->gpio);
mutex_destroy(&max3191x->lock);
-
- return 0;
}
static int __init max3191x_register_driver(struct spi_driver *sdrv)
diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c
index 5862d73bf325..11813f41d460 100644
--- a/drivers/gpio/gpio-max7301.c
+++ b/drivers/gpio/gpio-max7301.c
@@ -64,11 +64,9 @@ static int max7301_probe(struct spi_device *spi)
return ret;
}
-static int max7301_remove(struct spi_device *spi)
+static void max7301_remove(struct spi_device *spi)
{
__max730x_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id max7301_id[] = {
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index 31d2be1bebc8..cd9b16dbe1a9 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -134,7 +134,7 @@ exit_destroy:
return ret;
}
-static int mc33880_remove(struct spi_device *spi)
+static void mc33880_remove(struct spi_device *spi)
{
struct mc33880 *mc;
@@ -142,8 +142,6 @@ static int mc33880_remove(struct spi_device *spi)
gpiochip_remove(&mc->chip);
mutex_destroy(&mc->lock);
-
- return 0;
}
static struct spi_driver mc33880_driver = {
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index ccaad1cb3c2e..d8a26e503ca5 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -239,7 +239,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
rg->chip.offset = bank * MTK_BANK_WIDTH;
rg->irq_chip.name = dev_name(dev);
- rg->irq_chip.parent_device = dev;
rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index e099c39e0355..80ddc43fd875 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -986,7 +986,8 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
writel_relaxed(0, base + bank->regs->ctrl);
}
-static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
+static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc,
+ struct device *pm_dev)
{
struct gpio_irq_chip *irq;
static int gpio;
@@ -1052,6 +1053,7 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
if (ret)
return dev_err_probe(bank->chip.parent, ret, "Could not register gpio chip\n");
+ irq_domain_set_pm_device(bank->chip.irq.domain, pm_dev);
ret = devm_request_irq(bank->chip.parent, bank->irq,
omap_gpio_irq_handler,
0, dev_name(bank->chip.parent), bank);
@@ -1402,7 +1404,6 @@ static int omap_gpio_probe(struct platform_device *pdev)
irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
irqc->name = dev_name(&pdev->dev);
irqc->flags = IRQCHIP_MASK_ON_SUSPEND;
- irqc->parent_device = dev;
bank->irq = platform_get_irq(pdev, 0);
if (bank->irq <= 0) {
@@ -1466,7 +1467,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
omap_gpio_mod_init(bank);
- ret = omap_gpio_chip_init(bank, irqc);
+ ret = omap_gpio_chip_init(bank, irqc, dev);
if (ret) {
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
index 8e04054cf07e..81a47ae09ff8 100644
--- a/drivers/gpio/gpio-pisosr.c
+++ b/drivers/gpio/gpio-pisosr.c
@@ -163,15 +163,13 @@ static int pisosr_gpio_probe(struct spi_device *spi)
return 0;
}
-static int pisosr_gpio_remove(struct spi_device *spi)
+static void pisosr_gpio_remove(struct spi_device *spi)
{
struct pisosr_gpio *gpio = spi_get_drvdata(spi);
gpiochip_remove(&gpio->chip);
mutex_destroy(&gpio->lock);
-
- return 0;
}
static const struct spi_device_id pisosr_gpio_id_table[] = {
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index bd2e16d6e21c..3a76538f27fa 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -530,7 +530,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
irq_chip = &p->irq_chip;
irq_chip->name = "gpio-rcar";
- irq_chip->parent_device = dev;
irq_chip->irq_mask = gpio_rcar_irq_disable;
irq_chip->irq_unmask = gpio_rcar_irq_enable;
irq_chip->irq_set_type = gpio_rcar_irq_set_type;
@@ -552,6 +551,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
goto err0;
}
+ irq_domain_set_pm_device(gpio_chip->irq.domain, dev);
ret = devm_request_irq(dev, p->irq_parent, gpio_rcar_irq_handler,
IRQF_SHARED, name, p);
if (ret) {
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 153fe79e1bf3..8e5d87984a48 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -547,7 +547,7 @@ struct gpio_sim_bank {
*
* So we need to store the pointer to the parent struct here. We can
* dereference it anywhere we need with no checks and no locking as
- * it's guaranteed to survive the childred and protected by configfs
+ * it's guaranteed to survive the children and protected by configfs
* locks.
*
* Same for other structures.
@@ -1322,7 +1322,7 @@ static void gpio_sim_hog_config_item_release(struct config_item *item)
kfree(hog);
}
-struct configfs_item_operations gpio_sim_hog_config_item_ops = {
+static struct configfs_item_operations gpio_sim_hog_config_item_ops = {
.release = gpio_sim_hog_config_item_release,
};
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 8d298beffd86..031fe105b58e 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -1075,6 +1075,7 @@ static const struct tegra_gpio_soc tegra241_main_soc = {
.ports = tegra241_main_ports,
.name = "tegra241-gpio",
.instance = 0,
+ .num_irqs_per_bank = 8,
};
#define TEGRA241_AON_GPIO_PORT(_name, _bank, _port, _pins) \
@@ -1095,6 +1096,7 @@ static const struct tegra_gpio_soc tegra241_aon_soc = {
.ports = tegra241_aon_ports,
.name = "tegra241-gpio-aon",
.instance = 1,
+ .num_irqs_per_bank = 8,
};
static const struct of_device_id tegra186_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index 5b103221b58d..fa4bc7481f9a 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -281,7 +281,6 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
u8 irq_status;
irq_chip->name = chip->label;
- irq_chip->parent_device = &pdev->dev;
irq_chip->irq_mask = tqmx86_gpio_irq_mask;
irq_chip->irq_unmask = tqmx86_gpio_irq_unmask;
irq_chip->irq_set_type = tqmx86_gpio_irq_set_type;
@@ -316,6 +315,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
goto out_pm_dis;
}
+ irq_domain_set_pm_device(girq->domain, dev);
+
dev_info(dev, "GPIO functionality initialized with %d pins\n",
chip->ngpio);
diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c
index d885032cf814..d918d2df4de2 100644
--- a/drivers/gpio/gpio-ts4900.c
+++ b/drivers/gpio/gpio-ts4900.c
@@ -1,7 +1,7 @@
/*
* Digital I/O driver for Technologic Systems I2C FPGA Core
*
- * Copyright (C) 2015 Technologic Systems
+ * Copyright (C) 2015, 2018 Technologic Systems
* Copyright (C) 2016 Savoir-Faire Linux
*
* This program is free software; you can redistribute it and/or
@@ -55,19 +55,33 @@ static int ts4900_gpio_direction_input(struct gpio_chip *chip,
{
struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
- /*
- * This will clear the output enable bit, the other bits are
- * dontcare when this is cleared
+ /* Only clear the OE bit here, requires a RMW. Prevents potential issue
+ * with OE and data getting to the physical pin at different times.
*/
- return regmap_write(priv->regmap, offset, 0);
+ return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OE, 0);
}
static int ts4900_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
+ unsigned int reg;
int ret;
+ /* If changing from an input to an output, we need to first set the
+ * proper data bit to what is requested and then set OE bit. This
+ * prevents a glitch that can occur on the IO line
+ */
+ regmap_read(priv->regmap, offset, &reg);
+ if (!(reg & TS4900_GPIO_OE)) {
+ if (value)
+ reg = TS4900_GPIO_OUT;
+ else
+ reg &= ~TS4900_GPIO_OUT;
+
+ regmap_write(priv->regmap, offset, reg);
+ }
+
if (value)
ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE |
TS4900_GPIO_OUT);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index c0f6a25c3279..a5495ad31c9c 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -307,7 +307,8 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
if (IS_ERR(desc))
return desc;
- ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
+ /* ACPI uses hundredths of milliseconds units */
+ ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10);
if (ret)
dev_warn(chip->parent,
"Failed to set debounce-timeout for pin 0x%04X, err %d\n",
@@ -1035,7 +1036,8 @@ int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int ind
if (ret < 0)
return ret;
- ret = gpio_set_debounce_timeout(desc, info.debounce);
+ /* ACPI uses hundredths of milliseconds units */
+ ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
if (ret)
return ret;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a3d14277f17c..6630d92e30ad 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2227,6 +2227,16 @@ static int gpio_set_bias(struct gpio_desc *desc)
return gpio_set_config_with_argument_optional(desc, bias, arg);
}
+/**
+ * gpio_set_debounce_timeout() - Set debounce timeout
+ * @desc: GPIO descriptor to set the debounce timeout
+ * @debounce: Debounce timeout in microseconds
+ *
+ * The function calls the certain GPIO driver to set debounce timeout
+ * in the hardware.
+ *
+ * Returns 0 on success, or negative error code otherwise.
+ */
int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce)
{
return gpio_set_config_with_argument_optional(desc,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 3a29d857640b..7e3a7fcb9fe6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -24,6 +24,7 @@
#include <linux/hmm.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
+#include <linux/migrate.h>
#include "amdgpu_sync.h"
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
@@ -221,7 +222,6 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
page = pfn_to_page(pfn);
svm_range_bo_ref(prange->svm_bo);
page->zone_device_data = prange->svm_bo;
- get_page(page);
lock_page(page);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f36062be9ca8..9967a73d5b0f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -26,6 +26,7 @@
#include <linux/hashtable.h>
#include <linux/mmu_notifier.h>
+#include <linux/memremap.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/atomic.h>
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
index 58a242871b28..6e3f1d600541 100644
--- a/drivers/gpu/drm/arm/Kconfig
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -6,6 +6,7 @@ config DRM_HDLCD
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK
select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
help
Choose this option if you have an ARM High Definition Colour LCD
controller.
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index c86f5be4dfe0..007e5a282f67 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -8,7 +8,6 @@ config DRM_BRIDGE
config DRM_PANEL_BRIDGE
def_bool y
depends on DRM_BRIDGE
- depends on DRM_KMS_HELPER
select DRM_PANEL
help
DRM bridge wrapper of DRM panels
@@ -30,7 +29,7 @@ config DRM_CDNS_DSI
config DRM_CHIPONE_ICN6211
tristate "Chipone ICN6211 MIPI-DSI/RGB Converter bridge"
depends on OF
- depends on DRM_KMS_HELPER
+ select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL_BRIDGE
help
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 38616aab12ac..fb6c588b0f71 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1758,6 +1758,7 @@ static inline void ti_sn_gpio_unregister(void) {}
static void ti_sn65dsi86_runtime_disable(void *data)
{
+ pm_runtime_dont_use_autosuspend(data);
pm_runtime_disable(data);
}
@@ -1817,11 +1818,11 @@ static int ti_sn65dsi86_probe(struct i2c_client *client,
"failed to get reference clock\n");
pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(pdata->dev, 500);
+ pm_runtime_use_autosuspend(pdata->dev);
ret = devm_add_action_or_reset(dev, ti_sn65dsi86_runtime_disable, dev);
if (ret)
return ret;
- pm_runtime_set_autosuspend_delay(pdata->dev, 500);
- pm_runtime_use_autosuspend(pdata->dev);
ti_sn65dsi86_debugfs_init(pdata);
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index c3e6e615bf09..7051c9c909c2 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -27,10 +27,10 @@
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
-
#include <linux/cc_platform.h>
#include <linux/export.h>
#include <linux/highmem.h>
+#include <linux/ioport.h>
#include <linux/iosys-map.h>
#include <xen/xen.h>
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index a50c82bc2b2f..76a8c707c34b 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -2330,6 +2330,9 @@ EXPORT_SYMBOL(drm_connector_atomic_hdr_metadata_equal);
void drm_connector_set_vrr_capable_property(
struct drm_connector *connector, bool capable)
{
+ if (!connector->vrr_capable_property)
+ return;
+
drm_object_property_set_value(&connector->base,
connector->vrr_capable_property,
capable);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 5895b89df03a..bff8c2d73cdf 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -1439,6 +1439,13 @@ static inline u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private
PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
}
+static inline u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
+{
+ return IS_ALDERLAKE_P(dev_priv) ?
+ ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
+ PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
+}
+
static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -1543,7 +1550,13 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 val = PSR2_MAN_TRK_CTL_ENABLE;
+ u32 val = 0;
+
+ if (!IS_ALDERLAKE_P(dev_priv))
+ val = PSR2_MAN_TRK_CTL_ENABLE;
+
+ /* SF partial frame enable has to be set even on full update */
+ val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
if (full_update) {
/*
@@ -1563,7 +1576,6 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
} else {
drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
- val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 9bb551b83e7a..92cb88248391 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -57,7 +57,7 @@ struct __guc_ads_blob {
struct guc_gt_system_info system_info;
struct guc_engine_usage engine_usage;
/* From here on, location is dynamic! Refer to above diagram. */
- struct guc_mmio_reg regset[0];
+ struct guc_mmio_reg regset[];
} __packed;
#define ads_blob_read(guc_, field_) \
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4da10e131216..3c87d77d2cf6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2320,6 +2320,7 @@
#define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val) REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val)
#define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK REG_GENMASK(12, 0)
#define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(val) REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK, val)
+#define ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE REG_BIT(31)
#define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14)
#define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13)
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 4f7a61d5502e..4cce044efde2 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -108,6 +108,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
/* Comet Lake V PCH is based on KBP, which is SPT compatible */
return PCH_SPT;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
+ case INTEL_PCH_ICP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
return PCH_ICP;
@@ -123,7 +124,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
!IS_GEN9_BC(dev_priv));
return PCH_TGP;
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
- case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
return PCH_JSP;
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index 6fd20408f7bf..b7a8cf409d48 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -50,11 +50,11 @@ enum intel_pch {
#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
+#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880
#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
-#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880
#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index a8aba0141ce7..06cb1a59b9bc 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -217,14 +217,6 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
if (!imx_pd_format_supported(bus_fmt))
return -EINVAL;
- if (bus_flags &
- ~(DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_DE_HIGH |
- DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
- DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)) {
- dev_warn(imxpd->dev, "invalid bus_flags (%x)\n", bus_flags);
- return -EINVAL;
- }
-
bridge_state->output_bus_cfg.flags = bus_flags;
bridge_state->input_bus_cfg.flags = bus_flags;
imx_crtc_state->bus_flags = bus_flags;
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 2146299e5f52..17cd9b932298 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -11,6 +11,7 @@
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_disp_drv.h"
@@ -414,9 +415,13 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
return ret;
}
+ pm_runtime_enable(dev);
+
ret = component_add(dev, &mtk_disp_ovl_component_ops);
- if (ret)
+ if (ret) {
+ pm_runtime_disable(dev);
dev_err(dev, "Failed to add component: %d\n", ret);
+ }
return ret;
}
@@ -424,6 +429,7 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
static int mtk_disp_ovl_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index d41a3970b944..662e91d9d45f 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -9,6 +9,7 @@
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_disp_drv.h"
@@ -327,9 +328,13 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
+ pm_runtime_enable(dev);
+
ret = component_add(dev, &mtk_disp_rdma_component_ops);
- if (ret)
+ if (ret) {
+ pm_runtime_disable(dev);
dev_err(dev, "Failed to add component: %d\n", ret);
+ }
return ret;
}
@@ -338,6 +343,8 @@ static int mtk_disp_rdma_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_rdma_component_ops);
+ pm_runtime_disable(&pdev->dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 8cc0a1283d7c..ede435d2c1ef 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -12,7 +12,6 @@
#include <linux/soc/mediatek/mtk-mutex.h>
#include <asm/barrier.h>
-#include <soc/mediatek/smi.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -663,15 +662,15 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
- ret = mtk_smi_larb_get(comp->larb_dev);
- if (ret) {
- DRM_ERROR("Failed to get larb: %d\n", ret);
+ ret = pm_runtime_resume_and_get(comp->dev);
+ if (ret < 0) {
+ DRM_DEV_ERROR(comp->dev, "Failed to enable power domain: %d\n", ret);
return;
}
ret = mtk_crtc_ddp_hw_init(mtk_crtc);
if (ret) {
- mtk_smi_larb_put(comp->larb_dev);
+ pm_runtime_put(comp->dev);
return;
}
@@ -684,7 +683,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
- int i;
+ int i, ret;
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
if (!mtk_crtc->enabled)
@@ -714,7 +713,9 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
mtk_crtc_ddp_hw_fini(mtk_crtc);
- mtk_smi_larb_put(comp->larb_dev);
+ ret = pm_runtime_put(comp->dev);
+ if (ret < 0)
+ DRM_DEV_ERROR(comp->dev, "Failed to disable power domain: %d\n", ret);
mtk_crtc->enabled = false;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index b4b682bc1991..2e99aee13dfe 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -447,37 +447,15 @@ unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
return ret;
}
-static int mtk_ddp_get_larb_dev(struct device_node *node, struct mtk_ddp_comp *comp,
- struct device *dev)
-{
- struct device_node *larb_node;
- struct platform_device *larb_pdev;
-
- larb_node = of_parse_phandle(node, "mediatek,larb", 0);
- if (!larb_node) {
- dev_err(dev, "Missing mediadek,larb phandle in %pOF node\n", node);
- return -EINVAL;
- }
-
- larb_pdev = of_find_device_by_node(larb_node);
- if (!larb_pdev) {
- dev_warn(dev, "Waiting for larb device %pOF\n", larb_node);
- of_node_put(larb_node);
- return -EPROBE_DEFER;
- }
- of_node_put(larb_node);
- comp->larb_dev = &larb_pdev->dev;
-
- return 0;
-}
-
int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
enum mtk_ddp_comp_id comp_id)
{
struct platform_device *comp_pdev;
enum mtk_ddp_comp_type type;
struct mtk_ddp_comp_dev *priv;
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
int ret;
+#endif
if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
return -EINVAL;
@@ -493,16 +471,6 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
}
comp->dev = &comp_pdev->dev;
- /* Only DMA capable components need the LARB property */
- if (type == MTK_DISP_OVL ||
- type == MTK_DISP_OVL_2L ||
- type == MTK_DISP_RDMA ||
- type == MTK_DISP_WDMA) {
- ret = mtk_ddp_get_larb_dev(node, comp, comp->dev);
- if (ret)
- return ret;
- }
-
if (type == MTK_DISP_AAL ||
type == MTK_DISP_BLS ||
type == MTK_DISP_CCORR ||
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 4c6a98662305..ad267bb8fc9b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -71,7 +71,6 @@ struct mtk_ddp_comp_funcs {
struct mtk_ddp_comp {
struct device *dev;
int irq;
- struct device *larb_dev;
enum mtk_ddp_comp_id id;
const struct mtk_ddp_comp_funcs *funcs;
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index dd029307be7d..838297f9d45d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -648,11 +648,8 @@ err_pm:
pm_runtime_disable(dev);
err_node:
of_node_put(private->mutex_node);
- for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) {
+ for (i = 0; i < DDP_COMPONENT_ID_MAX; i++)
of_node_put(private->comp_node[i]);
- if (private->ddp_comp[i].larb_dev)
- put_device(private->ddp_comp[i].larb_dev);
- }
return ret;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_pll.c b/drivers/gpu/drm/mgag200/mgag200_pll.c
index e9ae22b4f813..52be08b744ad 100644
--- a/drivers/gpu/drm/mgag200/mgag200_pll.c
+++ b/drivers/gpu/drm/mgag200/mgag200_pll.c
@@ -404,9 +404,9 @@ mgag200_pixpll_update_g200wb(struct mgag200_pll *pixpll, const struct mgag200_pl
udelay(50);
/* program pixel pll register */
- WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn);
- WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm);
- WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_N, xpixpllcn);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_M, xpixpllcm);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_P, xpixpllcp);
udelay(50);
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/hs.h b/drivers/gpu/drm/nouveau/include/nvfw/hs.h
index 64d0d32200c2..b53bbc4cd130 100644
--- a/drivers/gpu/drm/nouveau/include/nvfw/hs.h
+++ b/drivers/gpu/drm/nouveau/include/nvfw/hs.h
@@ -23,7 +23,7 @@ struct nvfw_hs_load_header {
u32 data_dma_base;
u32 data_size;
u32 num_apps;
- u32 apps[0];
+ u32 apps[];
};
const struct nvfw_hs_load_header *
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 3828aafd3ac4..7ba66ad68a8a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -39,6 +39,8 @@
#include <linux/sched/mm.h>
#include <linux/hmm.h>
+#include <linux/memremap.h>
+#include <linux/migrate.h>
/*
* FIXME: this is ugly right now we are using TTM to allocate vram and we pin
@@ -324,7 +326,6 @@ nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
return NULL;
}
- get_page(page);
lock_page(page);
return page;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 46a5a1016e37..31a5b81ee9fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -35,6 +35,7 @@
#include <linux/sched/mm.h>
#include <linux/sort.h>
#include <linux/hmm.h>
+#include <linux/memremap.h>
#include <linux/rmap.h>
struct nouveau_svm {
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index bb2e47229c68..ddf5f38e8731 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -107,6 +107,7 @@ config DRM_PANEL_EDP
select VIDEOMODE_HELPERS
select DRM_DP_AUX_BUS
select DRM_DP_HELPER
+ select DRM_KMS_HELPER
help
DRM panel driver for dumb eDP panels that need at most a regulator and
a GPIO to be powered up. Optionally a backlight can be attached so
diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
index f043b484055b..ed626fdc08e8 100644
--- a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
+++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
@@ -293,15 +293,13 @@ static int y030xx067a_probe(struct spi_device *spi)
return 0;
}
-static int y030xx067a_remove(struct spi_device *spi)
+static void y030xx067a_remove(struct spi_device *spi)
{
struct y030xx067a *priv = spi_get_drvdata(spi);
drm_panel_remove(&priv->panel);
drm_panel_disable(&priv->panel);
drm_panel_unprepare(&priv->panel);
-
- return 0;
}
static const struct drm_display_mode y030xx067a_modes[] = {
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 8e84df9a0033..3dfafa585127 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -896,14 +896,12 @@ static int ili9322_probe(struct spi_device *spi)
return 0;
}
-static int ili9322_remove(struct spi_device *spi)
+static void ili9322_remove(struct spi_device *spi)
{
struct ili9322 *ili = spi_get_drvdata(spi);
ili9322_power_off(ili);
drm_panel_remove(&ili->panel);
-
- return 0;
}
/*
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index 2c3378a259b1..a07ef26234e5 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -728,7 +728,7 @@ static int ili9341_probe(struct spi_device *spi)
return -1;
}
-static int ili9341_remove(struct spi_device *spi)
+static void ili9341_remove(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
struct ili9341 *ili = spi_get_drvdata(spi);
@@ -741,7 +741,6 @@ static int ili9341_remove(struct spi_device *spi)
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
}
- return 0;
}
static void ili9341_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/panel/panel-innolux-ej030na.c b/drivers/gpu/drm/panel/panel-innolux-ej030na.c
index c558de3f99be..e3b1daa0cb72 100644
--- a/drivers/gpu/drm/panel/panel-innolux-ej030na.c
+++ b/drivers/gpu/drm/panel/panel-innolux-ej030na.c
@@ -219,15 +219,13 @@ static int ej030na_probe(struct spi_device *spi)
return 0;
}
-static int ej030na_remove(struct spi_device *spi)
+static void ej030na_remove(struct spi_device *spi)
{
struct ej030na *priv = spi_get_drvdata(spi);
drm_panel_remove(&priv->panel);
drm_panel_disable(&priv->panel);
drm_panel_unprepare(&priv->panel);
-
- return 0;
}
static const struct drm_display_mode ej030na_modes[] = {
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
index f3183b68704f..9d0d4faa3f58 100644
--- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -203,14 +203,12 @@ static int lb035q02_probe(struct spi_device *spi)
return 0;
}
-static int lb035q02_remove(struct spi_device *spi)
+static void lb035q02_remove(struct spi_device *spi)
{
struct lb035q02_device *lcd = spi_get_drvdata(spi);
drm_panel_remove(&lcd->panel);
drm_panel_disable(&lcd->panel);
-
- return 0;
}
static const struct of_device_id lb035q02_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index 8e5160af1de5..cf246d15b7b6 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -266,14 +266,12 @@ static int lg4573_probe(struct spi_device *spi)
return 0;
}
-static int lg4573_remove(struct spi_device *spi)
+static void lg4573_remove(struct spi_device *spi)
{
struct lg4573 *ctx = spi_get_drvdata(spi);
lg4573_display_off(ctx);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id lg4573_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
index 6e5ab1debc8b..81c5c541a351 100644
--- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -212,15 +212,13 @@ static int nl8048_probe(struct spi_device *spi)
return 0;
}
-static int nl8048_remove(struct spi_device *spi)
+static void nl8048_remove(struct spi_device *spi)
{
struct nl8048_panel *lcd = spi_get_drvdata(spi);
drm_panel_remove(&lcd->panel);
drm_panel_disable(&lcd->panel);
drm_panel_unprepare(&lcd->panel);
-
- return 0;
}
static const struct of_device_id nl8048_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index d036853db865..f58cfb10b58a 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -292,7 +292,7 @@ static int nt39016_probe(struct spi_device *spi)
return 0;
}
-static int nt39016_remove(struct spi_device *spi)
+static void nt39016_remove(struct spi_device *spi)
{
struct nt39016 *panel = spi_get_drvdata(spi);
@@ -300,8 +300,6 @@ static int nt39016_remove(struct spi_device *spi)
nt39016_disable(&panel->drm_panel);
nt39016_unprepare(&panel->drm_panel);
-
- return 0;
}
static const struct drm_display_mode kd035g6_display_modes[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-db7430.c b/drivers/gpu/drm/panel/panel-samsung-db7430.c
index ead479719f00..04640c5256a8 100644
--- a/drivers/gpu/drm/panel/panel-samsung-db7430.c
+++ b/drivers/gpu/drm/panel/panel-samsung-db7430.c
@@ -314,12 +314,11 @@ static int db7430_probe(struct spi_device *spi)
return 0;
}
-static int db7430_remove(struct spi_device *spi)
+static void db7430_remove(struct spi_device *spi)
{
struct db7430 *db = spi_get_drvdata(spi);
drm_panel_remove(&db->panel);
- return 0;
}
/*
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index c4b388850a13..01eb211f32f7 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -358,14 +358,12 @@ static int ld9040_probe(struct spi_device *spi)
return 0;
}
-static int ld9040_remove(struct spi_device *spi)
+static void ld9040_remove(struct spi_device *spi)
{
struct ld9040 *ctx = spi_get_drvdata(spi);
ld9040_power_off(ctx);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id ld9040_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
index 1696ceb36aa0..2adb223a895c 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
@@ -291,12 +291,11 @@ static int s6d27a1_probe(struct spi_device *spi)
return 0;
}
-static int s6d27a1_remove(struct spi_device *spi)
+static void s6d27a1_remove(struct spi_device *spi)
{
struct s6d27a1 *ctx = spi_get_drvdata(spi);
drm_panel_remove(&ctx->panel);
- return 0;
}
static const struct of_device_id s6d27a1_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c
index c178d962b0d5..d99afcc672ca 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c
@@ -62,10 +62,9 @@ static int s6e63m0_spi_probe(struct spi_device *spi)
s6e63m0_spi_dcs_write, false);
}
-static int s6e63m0_spi_remove(struct spi_device *spi)
+static void s6e63m0_spi_remove(struct spi_device *spi)
{
s6e63m0_remove(&spi->dev);
- return 0;
}
static const struct of_device_id s6e63m0_spi_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index c09eb5ad65fc..a34f4198a534 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2017,7 +2017,7 @@ static const struct display_timing innolux_g070y2_l01_timing = {
static const struct panel_desc innolux_g070y2_l01 = {
.timings = &innolux_g070y2_l01_timing,
.num_timings = 1,
- .bpc = 6,
+ .bpc = 8,
.size = {
.width = 152,
.height = 91,
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 61e565524542..bbc4569cbcdc 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -387,13 +387,11 @@ static int st7789v_probe(struct spi_device *spi)
return 0;
}
-static int st7789v_remove(struct spi_device *spi)
+static void st7789v_remove(struct spi_device *spi)
{
struct st7789v *ctx = spi_get_drvdata(spi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id st7789v_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index ba0b3ead150f..0d7541a33f87 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -655,7 +655,7 @@ static int acx565akm_probe(struct spi_device *spi)
return 0;
}
-static int acx565akm_remove(struct spi_device *spi)
+static void acx565akm_remove(struct spi_device *spi)
{
struct acx565akm_panel *lcd = spi_get_drvdata(spi);
@@ -666,8 +666,6 @@ static int acx565akm_remove(struct spi_device *spi)
drm_panel_disable(&lcd->panel);
drm_panel_unprepare(&lcd->panel);
-
- return 0;
}
static const struct of_device_id acx565akm_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index ba0c00d1a001..4dbf8b88f264 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -350,15 +350,13 @@ static int td028ttec1_probe(struct spi_device *spi)
return 0;
}
-static int td028ttec1_remove(struct spi_device *spi)
+static void td028ttec1_remove(struct spi_device *spi)
{
struct td028ttec1_panel *lcd = spi_get_drvdata(spi);
drm_panel_remove(&lcd->panel);
drm_panel_disable(&lcd->panel);
drm_panel_unprepare(&lcd->panel);
-
- return 0;
}
static const struct of_device_id td028ttec1_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
index 1866cdb8f9c1..cf4609bb9b1d 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -463,7 +463,7 @@ static int td043mtea1_probe(struct spi_device *spi)
return 0;
}
-static int td043mtea1_remove(struct spi_device *spi)
+static void td043mtea1_remove(struct spi_device *spi)
{
struct td043mtea1_panel *lcd = spi_get_drvdata(spi);
@@ -472,8 +472,6 @@ static int td043mtea1_remove(struct spi_device *spi)
drm_panel_unprepare(&lcd->panel);
sysfs_remove_group(&spi->dev.kobj, &td043mtea1_attr_group);
-
- return 0;
}
static const struct of_device_id td043mtea1_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index e3791dad6830..0b1f5a11a055 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -450,12 +450,11 @@ static int tpg110_probe(struct spi_device *spi)
return 0;
}
-static int tpg110_remove(struct spi_device *spi)
+static void tpg110_remove(struct spi_device *spi)
{
struct tpg110 *tpg = spi_get_drvdata(spi);
drm_panel_remove(&tpg->panel);
- return 0;
}
static const struct of_device_id tpg110_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-widechips-ws2401.c b/drivers/gpu/drm/panel/panel-widechips-ws2401.c
index 8bc976f54b80..236f3cb2b594 100644
--- a/drivers/gpu/drm/panel/panel-widechips-ws2401.c
+++ b/drivers/gpu/drm/panel/panel-widechips-ws2401.c
@@ -407,12 +407,11 @@ static int ws2401_probe(struct spi_device *spi)
return 0;
}
-static int ws2401_remove(struct spi_device *spi)
+static void ws2401_remove(struct spi_device *spi)
{
struct ws2401 *ws = spi_get_drvdata(spi);
drm_panel_remove(&ws->panel);
- return 0;
}
/*
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 4740cc14beb8..d3e6c93739bf 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -586,6 +586,13 @@ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes));
}
+static void cdn_dp_audio_handle_plugged_change(struct cdn_dp_device *dp,
+ bool plugged)
+{
+ if (dp->codec_dev)
+ dp->plugged_cb(dp->codec_dev, plugged);
+}
+
static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
{
struct cdn_dp_device *dp = encoder_to_dp(encoder);
@@ -641,6 +648,9 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret);
goto out;
}
+
+ cdn_dp_audio_handle_plugged_change(dp, true);
+
out:
mutex_unlock(&dp->lock);
}
@@ -651,6 +661,8 @@ static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
int ret;
mutex_lock(&dp->lock);
+ cdn_dp_audio_handle_plugged_change(dp, false);
+
if (dp->active) {
ret = cdn_dp_disable(dp);
if (ret) {
@@ -846,11 +858,27 @@ static int cdn_dp_audio_get_eld(struct device *dev, void *data,
return 0;
}
+static int cdn_dp_audio_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct cdn_dp_device *dp = dev_get_drvdata(dev);
+
+ mutex_lock(&dp->lock);
+ dp->plugged_cb = fn;
+ dp->codec_dev = codec_dev;
+ cdn_dp_audio_handle_plugged_change(dp, dp->connected);
+ mutex_unlock(&dp->lock);
+
+ return 0;
+}
+
static const struct hdmi_codec_ops audio_codec_ops = {
.hw_params = cdn_dp_audio_hw_params,
.audio_shutdown = cdn_dp_audio_shutdown,
.mute_stream = cdn_dp_audio_mute_stream,
.get_eld = cdn_dp_audio_get_eld,
+ .hook_plugged_cb = cdn_dp_audio_hook_plugged_cb,
.no_capture_mute = 1,
};
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index 0d044146f4e9..6d0c5032ef3a 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -10,6 +10,7 @@
#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <sound/hdmi-codec.h>
#include "rockchip_drm_drv.h"
@@ -101,5 +102,8 @@ struct cdn_dp_device {
u8 dpcd[DP_RECEIVER_CAP_SIZE];
bool sink_has_audio;
+
+ hdmi_codec_plugged_cb plugged_cb;
+ struct device *codec_dev;
};
#endif /* _CDN_DP_CORE_H */
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 145833a9d82d..5b3fbee18671 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -111,10 +111,10 @@
/* format 13 is semi-planar YUV411 VUVU */
#define SUN8I_MIXER_FBFMT_YUV411 14
/* format 15 doesn't exist */
-/* format 16 is P010 YVU */
-#define SUN8I_MIXER_FBFMT_P010_YUV 17
-/* format 18 is P210 YVU */
-#define SUN8I_MIXER_FBFMT_P210_YUV 19
+#define SUN8I_MIXER_FBFMT_P010_YUV 16
+/* format 17 is P010 YVU */
+#define SUN8I_MIXER_FBFMT_P210_YUV 18
+/* format 19 is P210 YVU */
/* format 20 is packed YVU444 10-bit */
/* format 21 is packed YUV444 10-bit */
diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index 9b33c05732aa..ebb025543f8d 100644
--- a/drivers/gpu/drm/tiny/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -263,14 +263,12 @@ static int hx8357d_probe(struct spi_device *spi)
return 0;
}
-static int hx8357d_remove(struct spi_device *spi)
+static void hx8357d_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void hx8357d_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c
index bcc181351236..fc8ed245b0bc 100644
--- a/drivers/gpu/drm/tiny/ili9163.c
+++ b/drivers/gpu/drm/tiny/ili9163.c
@@ -193,14 +193,12 @@ static int ili9163_probe(struct spi_device *spi)
return 0;
}
-static int ili9163_remove(struct spi_device *spi)
+static void ili9163_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void ili9163_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index 976d3209f164..cc92eb9f2a07 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -411,14 +411,12 @@ static int ili9225_probe(struct spi_device *spi)
return 0;
}
-static int ili9225_remove(struct spi_device *spi)
+static void ili9225_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void ili9225_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index 37e0c33399c8..5b8cc770ee7b 100644
--- a/drivers/gpu/drm/tiny/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -225,14 +225,12 @@ static int ili9341_probe(struct spi_device *spi)
return 0;
}
-static int ili9341_remove(struct spi_device *spi)
+static void ili9341_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void ili9341_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
index e9a63f4b2993..6d655e18e0aa 100644
--- a/drivers/gpu/drm/tiny/ili9486.c
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -243,14 +243,12 @@ static int ili9486_probe(struct spi_device *spi)
return 0;
}
-static int ili9486_remove(struct spi_device *spi)
+static void ili9486_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void ili9486_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index 023de49e7a8e..5e060f6910bb 100644
--- a/drivers/gpu/drm/tiny/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -233,14 +233,12 @@ static int mi0283qt_probe(struct spi_device *spi)
return 0;
}
-static int mi0283qt_remove(struct spi_device *spi)
+static void mi0283qt_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void mi0283qt_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
index 7f8c6c51387f..c759ff9c2c87 100644
--- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c
+++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
@@ -336,14 +336,12 @@ static int panel_mipi_dbi_spi_probe(struct spi_device *spi)
return 0;
}
-static int panel_mipi_dbi_spi_remove(struct spi_device *spi)
+static void panel_mipi_dbi_spi_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void panel_mipi_dbi_spi_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 5c74e236b16d..37b6bb90e46e 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -1118,14 +1118,12 @@ static int repaper_probe(struct spi_device *spi)
return 0;
}
-static int repaper_remove(struct spi_device *spi)
+static void repaper_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void repaper_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 51b9b9fb3ead..3f38faa1cd8c 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -360,14 +360,12 @@ static int st7586_probe(struct spi_device *spi)
return 0;
}
-static int st7586_remove(struct spi_device *spi)
+static void st7586_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void st7586_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index fc40dd10efa8..29d618093e94 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -247,14 +247,12 @@ static int st7735r_probe(struct spi_device *spi)
return 0;
}
-static int st7735r_remove(struct spi_device *spi)
+static void st7735r_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void st7735r_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index a9639d098893..778bc26d3ba5 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -357,11 +357,11 @@ static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
switch (mbus_cfg->type) {
case V4L2_MBUS_PARALLEL:
csicfg->ext_vsync = 1;
- csicfg->vsync_pol = (mbus_cfg->flags &
+ csicfg->vsync_pol = (mbus_cfg->bus.parallel.flags &
V4L2_MBUS_VSYNC_ACTIVE_LOW) ? 1 : 0;
- csicfg->hsync_pol = (mbus_cfg->flags &
+ csicfg->hsync_pol = (mbus_cfg->bus.parallel.flags &
V4L2_MBUS_HSYNC_ACTIVE_LOW) ? 1 : 0;
- csicfg->pixclk_pol = (mbus_cfg->flags &
+ csicfg->pixclk_pol = (mbus_cfg->bus.parallel.flags &
V4L2_MBUS_PCLK_SAMPLE_FALLING) ? 1 : 0;
csicfg->clk_mode = IPU_CSI_CLK_MODE_GATED_CLK;
break;
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 26c31d759914..81e7e404a5fc 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -860,7 +860,9 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_F22] = "F22", [KEY_F23] = "F23",
[KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD",
[KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3",
- [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend",
+ [KEY_PROG4] = "Prog4",
+ [KEY_ALL_APPLICATIONS] = "AllApplications",
+ [KEY_SUSPEND] = "Suspend",
[KEY_CLOSE] = "Close", [KEY_PLAY] = "Play",
[KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost",
[KEY_PRINT] = "Print", [KEY_HP] = "HP",
@@ -969,6 +971,7 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_ASSISTANT] = "Assistant",
[KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext",
[KEY_EMOJI_PICKER] = "EmojiPicker",
+ [KEY_DICTATE] = "Dictate",
[KEY_BRIGHTNESS_MIN] = "BrightnessMin",
[KEY_BRIGHTNESS_MAX] = "BrightnessMax",
[KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index 9b42b0cdeef0..2876cb6a7dca 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -228,7 +228,6 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct elo_priv *priv;
int ret;
- struct usb_device *udev;
if (!hid_is_usb(hdev))
return -EINVAL;
@@ -238,8 +237,7 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
return -ENOMEM;
INIT_DELAYED_WORK(&priv->work, elo_work);
- udev = interface_to_usbdev(to_usb_interface(hdev->dev.parent));
- priv->usbdev = usb_get_dev(udev);
+ priv->usbdev = interface_to_usbdev(to_usb_interface(hdev->dev.parent));
hid_set_drvdata(hdev, priv);
@@ -262,7 +260,6 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
return 0;
err_free:
- usb_put_dev(udev);
kfree(priv);
return ret;
}
@@ -271,8 +268,6 @@ static void elo_remove(struct hid_device *hdev)
{
struct elo_priv *priv = hid_get_drvdata(hdev);
- usb_put_dev(priv->usbdev);
-
hid_hw_stop(hdev);
cancel_delayed_work_sync(&priv->work);
kfree(priv);
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 112901d2d8d2..56ec27398a00 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -992,6 +992,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break;
case 0x0cf: map_key_clear(KEY_VOICECOMMAND); break;
+ case 0x0d8: map_key_clear(KEY_DICTATE); break;
case 0x0d9: map_key_clear(KEY_EMOJI_PICKER); break;
case 0x0e0: map_abs_clear(ABS_VOLUME); break;
@@ -1083,6 +1084,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x29d: map_key_clear(KEY_KBD_LAYOUT_NEXT); break;
+ case 0x2a2: map_key_clear(KEY_ALL_APPLICATIONS); break;
+
case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break;
case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break;
case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break;
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 7106b921b53c..c358778e070b 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1068,6 +1068,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
workitem.reports_supported |= STD_KEYBOARD;
break;
case 0x0f:
+ case 0x11:
device_type = "eQUAD Lightspeed 1.2";
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
workitem.reports_supported |= STD_KEYBOARD;
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index b6a9a0f3966e..2204de889739 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -2128,6 +2128,10 @@ static int nintendo_hid_probe(struct hid_device *hdev,
spin_lock_init(&ctlr->lock);
ctlr->rumble_queue = alloc_workqueue("hid-nintendo-rumble_wq",
WQ_FREEZABLE | WQ_MEM_RECLAIM, 0);
+ if (!ctlr->rumble_queue) {
+ ret = -ENOMEM;
+ goto err;
+ }
INIT_WORK(&ctlr->rumble_worker, joycon_rumble_worker);
ret = hid_parse(hdev);
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index 03b935ff02d5..c3e6d69fdfbd 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -64,7 +64,9 @@ struct tm_wheel_info {
*/
static const struct tm_wheel_info tm_wheels_infos[] = {
{0x0306, 0x0006, "Thrustmaster T150RS"},
+ {0x0200, 0x0005, "Thrustmaster T300RS (Missing Attachment)"},
{0x0206, 0x0005, "Thrustmaster T300RS"},
+ {0x0209, 0x0005, "Thrustmaster T300RS (Open Wheel Attachment)"},
{0x0204, 0x0005, "Thrustmaster T300 Ferrari Alcantara Edition"},
{0x0002, 0x0002, "Thrustmaster T500RS"}
//{0x0407, 0x0001, "Thrustmaster TMX"}
@@ -158,6 +160,12 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
return;
}
+ if (usbif->cur_altsetting->desc.bNumEndpoints < 2) {
+ kfree(send_buf);
+ hid_err(hdev, "Wrong number of endpoints?\n");
+ return;
+ }
+
ep = &usbif->cur_altsetting->endpoint[1];
b_ep = ep->desc.bEndpointAddress;
diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c
index efa6140915f4..42ceb2058a09 100644
--- a/drivers/hid/hid-vivaldi.c
+++ b/drivers/hid/hid-vivaldi.c
@@ -144,7 +144,7 @@ out:
static int vivaldi_input_configured(struct hid_device *hdev,
struct hid_input *hidinput)
{
- return sysfs_create_group(&hdev->dev.kobj, &input_attribute_group);
+ return devm_device_add_group(&hdev->dev, &input_attribute_group);
}
static const struct hid_device_id vivaldi_table[] = {
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 614adb510dbd..2a918aeb0af1 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -747,7 +747,7 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
* copied from, so it's unsafe to allow this with elevated
* privileges (e.g. from a setuid binary) or via kernel_write().
*/
- if (file->f_cred != current_cred() || uaccess_kernel()) {
+ if (file->f_cred != current_cred()) {
pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n",
task_tgid_vnr(current), current->comm);
ret = -EACCES;
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 60375879612f..26d269ba947c 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -459,7 +459,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel)
* init_vp_index() can (re-)use the CPU.
*/
if (hv_is_perf_channel(channel))
- hv_clear_alloced_cpu(channel->target_cpu);
+ hv_clear_allocated_cpu(channel->target_cpu);
/*
* Upon suspend, an in-use hv_sock channel is marked as "rescinded" and
@@ -728,7 +728,7 @@ static void init_vp_index(struct vmbus_channel *channel)
bool perf_chn = hv_is_perf_channel(channel);
u32 i, ncpu = num_online_cpus();
cpumask_var_t available_mask;
- struct cpumask *alloced_mask;
+ struct cpumask *allocated_mask;
u32 target_cpu;
int numa_node;
@@ -745,7 +745,7 @@ static void init_vp_index(struct vmbus_channel *channel)
*/
channel->target_cpu = VMBUS_CONNECT_CPU;
if (perf_chn)
- hv_set_alloced_cpu(VMBUS_CONNECT_CPU);
+ hv_set_allocated_cpu(VMBUS_CONNECT_CPU);
return;
}
@@ -760,22 +760,21 @@ static void init_vp_index(struct vmbus_channel *channel)
continue;
break;
}
- alloced_mask = &hv_context.hv_numa_map[numa_node];
+ allocated_mask = &hv_context.hv_numa_map[numa_node];
- if (cpumask_weight(alloced_mask) ==
- cpumask_weight(cpumask_of_node(numa_node))) {
+ if (cpumask_equal(allocated_mask, cpumask_of_node(numa_node))) {
/*
* We have cycled through all the CPUs in the node;
- * reset the alloced map.
+ * reset the allocated map.
*/
- cpumask_clear(alloced_mask);
+ cpumask_clear(allocated_mask);
}
- cpumask_xor(available_mask, alloced_mask,
+ cpumask_xor(available_mask, allocated_mask,
cpumask_of_node(numa_node));
target_cpu = cpumask_first(available_mask);
- cpumask_set_cpu(target_cpu, alloced_mask);
+ cpumask_set_cpu(target_cpu, allocated_mask);
if (channel->offermsg.offer.sub_channel_index >= ncpu ||
i > ncpu || !hv_cpuself_used(target_cpu, channel))
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index f2d05bff4245..439f99b8b5de 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1563,7 +1563,7 @@ static void balloon_onchannelcallback(void *context)
break;
default:
- pr_warn("Unhandled message: type: %d\n", dm_hdr->type);
+ pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type);
}
}
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 181d16bbf49d..c1dd21d0d7ef 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -79,8 +79,10 @@ int __init hv_common_init(void)
* calling crash enlightment interface before running kdump
* kernel.
*/
- if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE)
+ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
crash_kexec_post_notifiers = true;
+ pr_info("Hyper-V: enabling crash_kexec_post_notifiers\n");
+ }
/*
* Allocate the per-CPU state for the hypercall input arg.
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 6018b9d1b1fb..0d2184be1691 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -31,6 +31,9 @@ static const int fw_versions[] = {
UTIL_FW_VERSION
};
+/* See comment with struct hv_vss_msg regarding the max VMbus packet size */
+#define VSS_MAX_PKT_SIZE (HV_HYP_PAGE_SIZE * 2)
+
/*
* Timeout values are based on expecations from host
*/
@@ -298,7 +301,7 @@ void hv_vss_onchannelcallback(void *context)
if (vss_transaction.state > HVUTIL_READY)
return;
- if (vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen, &requestid)) {
+ if (vmbus_recvpacket(channel, recv_buffer, VSS_MAX_PKT_SIZE, &recvlen, &requestid)) {
pr_err_ratelimited("VSS request received. Could not read into recv buf\n");
return;
}
@@ -375,7 +378,7 @@ hv_vss_init(struct hv_util_service *srv)
}
recv_buffer = srv->recv_buffer;
vss_transaction.recv_channel = srv->channel;
- vss_transaction.recv_channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2;
+ vss_transaction.recv_channel->max_pkt_size = VSS_MAX_PKT_SIZE;
/*
* When this driver loads, the user level daemon that
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 3a1f007b678a..6b45c22bb717 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -405,7 +405,7 @@ static inline bool hv_is_perf_channel(struct vmbus_channel *channel)
return vmbus_devs[channel->device_id].perf_device;
}
-static inline bool hv_is_alloced_cpu(unsigned int cpu)
+static inline bool hv_is_allocated_cpu(unsigned int cpu)
{
struct vmbus_channel *channel, *sc;
@@ -427,23 +427,23 @@ static inline bool hv_is_alloced_cpu(unsigned int cpu)
return false;
}
-static inline void hv_set_alloced_cpu(unsigned int cpu)
+static inline void hv_set_allocated_cpu(unsigned int cpu)
{
cpumask_set_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
}
-static inline void hv_clear_alloced_cpu(unsigned int cpu)
+static inline void hv_clear_allocated_cpu(unsigned int cpu)
{
- if (hv_is_alloced_cpu(cpu))
+ if (hv_is_allocated_cpu(cpu))
return;
cpumask_clear_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
}
-static inline void hv_update_alloced_cpus(unsigned int old_cpu,
+static inline void hv_update_allocated_cpus(unsigned int old_cpu,
unsigned int new_cpu)
{
- hv_set_alloced_cpu(new_cpu);
- hv_clear_alloced_cpu(old_cpu);
+ hv_set_allocated_cpu(new_cpu);
+ hv_clear_allocated_cpu(old_cpu);
}
#ifdef CONFIG_HYPERV_TESTING
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 12a2b37e87f3..60ee8b329f9e 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1129,7 +1129,7 @@ void vmbus_on_msg_dpc(unsigned long data)
}
if (entry->handler_type == VMHT_BLOCKING) {
- ctx = kmalloc(sizeof(*ctx) + payload_size, GFP_ATOMIC);
+ ctx = kmalloc(struct_size(ctx, msg.payload, payload_size), GFP_ATOMIC);
if (ctx == NULL)
return;
@@ -1874,7 +1874,7 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
/* See init_vp_index(). */
if (hv_is_perf_channel(channel))
- hv_update_alloced_cpus(origin_cpu, target_cpu);
+ hv_update_allocated_cpus(origin_cpu, target_cpu);
/* Currently set only for storvsc channels. */
if (channel->change_target_cpu_callback) {
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 8df25f1079ba..9ab4e9b3d27b 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -174,6 +174,7 @@ config SENSORS_ADM9240
config SENSORS_ADT7X10
tristate
+ select REGMAP
help
This module contains common code shared by the ADT7310/ADT7320 and
ADT7410/ADT7420 temperature monitoring chip drivers.
@@ -505,6 +506,21 @@ config SENSORS_DELL_SMM
When option I8K is also enabled this driver provides legacy /proc/i8k
userspace interface for i8kutils package.
+config I8K
+ bool "Legacy /proc/i8k interface of Dell laptop SMM BIOS hwmon driver"
+ depends on SENSORS_DELL_SMM
+ depends on PROC_FS
+ help
+ This option enables the legacy /proc/i8k userspace interface of the
+ dell-smm-hwmon driver. The character file /proc/i8k exposes the BIOS
+ version, temperatures and allows control of fan speeds of some Dell
+ laptops. Sometimes it also reports power and hotkey status.
+
+ This interface is required to run programs from the i8kutils package.
+
+ Say Y if you intend to run userspace programs that use this interface.
+ Say N otherwise.
+
config SENSORS_DA9052_ADC
tristate "Dialog DA9052/DA9053 ADC"
depends on PMIC_DA9052
@@ -1208,8 +1224,8 @@ config SENSORS_LM70
depends on SPI_MASTER
help
If you say yes here you get support for the National Semiconductor
- LM70, LM71, LM74 and Texas Instruments TMP121/TMP123 digital tempera-
- ture sensor chips.
+ LM70, LM71, LM74 and Texas Instruments TMP121/TMP123, TMP122/TMP124,
+ TMP125 digital temperature sensor chips.
This driver can also be built as a module. If so, the module
will be called lm70.
@@ -1288,6 +1304,7 @@ config SENSORS_LM80
config SENSORS_LM83
tristate "National Semiconductor LM83 and compatibles"
depends on I2C
+ select REGMAP
help
If you say yes here you get support for National Semiconductor
LM82 and LM83 sensor chips.
@@ -1979,6 +1996,17 @@ config SENSORS_TMP421
This driver can also be built as a module. If so, the module
will be called tmp421.
+config SENSORS_TMP464
+ tristate "Texas Instruments TMP464 and compatible"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for Texas Instruments TMP464
+ and TMP468 temperature sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called tmp464.
+
config SENSORS_TMP513
tristate "Texas Instruments TMP513 and compatibles"
depends on I2C
@@ -2252,16 +2280,31 @@ config SENSORS_ASUS_WMI
config SENSORS_ASUS_WMI_EC
tristate "ASUS WMI B550/X570"
- depends on ACPI_WMI
+ depends on ACPI_WMI && SENSORS_ASUS_EC=n
help
If you say yes here you get support for the ACPI embedded controller
hardware monitoring interface found in B550/X570 ASUS motherboards.
This driver will provide readings of fans, voltages and temperatures
through the system firmware.
+ This driver is deprecated in favor of the ASUS EC Sensors driver
+ which provides fully compatible output.
+
This driver can also be built as a module. If so, the module
will be called asus_wmi_sensors_ec.
+config SENSORS_ASUS_EC
+ tristate "ASUS EC Sensors"
+ depends on X86
+ help
+ If you say yes here you get support for the ACPI embedded controller
+ hardware monitoring interface found in ASUS motherboards. The driver
+ currently supports B550/X570 boards, although other ASUS boards might
+ provide this monitoring interface as well.
+
+ This driver can also be built as a module. If so, the module
+ will be called asus_ec_sensors.
+
endif # ACPI
endif # HWMON
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 185f946d698b..4ed138d0621f 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_HWMON_VID) += hwmon-vid.o
# APCI drivers
obj-$(CONFIG_SENSORS_ACPI_POWER) += acpi_power_meter.o
obj-$(CONFIG_SENSORS_ATK0110) += asus_atk0110.o
+obj-$(CONFIG_SENSORS_ASUS_EC) += asus-ec-sensors.o
obj-$(CONFIG_SENSORS_ASUS_WMI) += asus_wmi_sensors.o
obj-$(CONFIG_SENSORS_ASUS_WMI_EC) += asus_wmi_ec_sensors.o
@@ -194,6 +195,7 @@ obj-$(CONFIG_SENSORS_TMP103) += tmp103.o
obj-$(CONFIG_SENSORS_TMP108) += tmp108.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
+obj-$(CONFIG_SENSORS_TMP464) += tmp464.o
obj-$(CONFIG_SENSORS_TMP513) += tmp513.o
obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o
obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c
index e5bc5ce09f4e..de37bce24fa6 100644
--- a/drivers/hwmon/adcxx.c
+++ b/drivers/hwmon/adcxx.c
@@ -194,7 +194,7 @@ out_err:
return status;
}
-static int adcxx_remove(struct spi_device *spi)
+static void adcxx_remove(struct spi_device *spi)
{
struct adcxx *adc = spi_get_drvdata(spi);
int i;
@@ -205,8 +205,6 @@ static int adcxx_remove(struct spi_device *spi)
device_remove_file(&spi->dev, &ad_input[i].dev_attr);
mutex_unlock(&adc->lock);
-
- return 0;
}
static const struct spi_device_id adcxx_ids[] = {
diff --git a/drivers/hwmon/adt7310.c b/drivers/hwmon/adt7310.c
index c40cac16af68..1efc0bdcceab 100644
--- a/drivers/hwmon/adt7310.c
+++ b/drivers/hwmon/adt7310.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <asm/unaligned.h>
@@ -38,16 +39,13 @@ static const u8 adt7310_reg_table[] = {
#define AD7310_COMMAND(reg) (adt7310_reg_table[(reg)] << ADT7310_CMD_REG_OFFSET)
-static int adt7310_spi_read_word(struct device *dev, u8 reg)
+static int adt7310_spi_read_word(struct spi_device *spi, u8 reg)
{
- struct spi_device *spi = to_spi_device(dev);
-
return spi_w8r16be(spi, AD7310_COMMAND(reg) | ADT7310_CMD_READ);
}
-static int adt7310_spi_write_word(struct device *dev, u8 reg, u16 data)
+static int adt7310_spi_write_word(struct spi_device *spi, u8 reg, u16 data)
{
- struct spi_device *spi = to_spi_device(dev);
u8 buf[3];
buf[0] = AD7310_COMMAND(reg);
@@ -56,17 +54,13 @@ static int adt7310_spi_write_word(struct device *dev, u8 reg, u16 data)
return spi_write(spi, buf, sizeof(buf));
}
-static int adt7310_spi_read_byte(struct device *dev, u8 reg)
+static int adt7310_spi_read_byte(struct spi_device *spi, u8 reg)
{
- struct spi_device *spi = to_spi_device(dev);
-
return spi_w8r8(spi, AD7310_COMMAND(reg) | ADT7310_CMD_READ);
}
-static int adt7310_spi_write_byte(struct device *dev, u8 reg,
- u8 data)
+static int adt7310_spi_write_byte(struct spi_device *spi, u8 reg, u8 data)
{
- struct spi_device *spi = to_spi_device(dev);
u8 buf[2];
buf[0] = AD7310_COMMAND(reg);
@@ -75,25 +69,79 @@ static int adt7310_spi_write_byte(struct device *dev, u8 reg,
return spi_write(spi, buf, sizeof(buf));
}
-static const struct adt7x10_ops adt7310_spi_ops = {
- .read_word = adt7310_spi_read_word,
- .write_word = adt7310_spi_write_word,
- .read_byte = adt7310_spi_read_byte,
- .write_byte = adt7310_spi_write_byte,
-};
-
-static int adt7310_spi_probe(struct spi_device *spi)
+static bool adt7310_regmap_is_volatile(struct device *dev, unsigned int reg)
{
- return adt7x10_probe(&spi->dev, spi_get_device_id(spi)->name, spi->irq,
- &adt7310_spi_ops);
+ switch (reg) {
+ case ADT7X10_TEMPERATURE:
+ case ADT7X10_STATUS:
+ return true;
+ default:
+ return false;
+ }
}
-static int adt7310_spi_remove(struct spi_device *spi)
+static int adt7310_reg_read(void *context, unsigned int reg, unsigned int *val)
{
- adt7x10_remove(&spi->dev, spi->irq);
+ struct spi_device *spi = context;
+ int regval;
+
+ switch (reg) {
+ case ADT7X10_TEMPERATURE:
+ case ADT7X10_T_ALARM_HIGH:
+ case ADT7X10_T_ALARM_LOW:
+ case ADT7X10_T_CRIT:
+ regval = adt7310_spi_read_word(spi, reg);
+ break;
+ default:
+ regval = adt7310_spi_read_byte(spi, reg);
+ break;
+ }
+ if (regval < 0)
+ return regval;
+ *val = regval;
return 0;
}
+static int adt7310_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct spi_device *spi = context;
+ int ret;
+
+ switch (reg) {
+ case ADT7X10_TEMPERATURE:
+ case ADT7X10_T_ALARM_HIGH:
+ case ADT7X10_T_ALARM_LOW:
+ case ADT7X10_T_CRIT:
+ ret = adt7310_spi_write_word(spi, reg, val);
+ break;
+ default:
+ ret = adt7310_spi_write_byte(spi, reg, val);
+ break;
+ }
+ return ret;
+}
+
+static const struct regmap_config adt7310_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = adt7310_regmap_is_volatile,
+ .reg_read = adt7310_reg_read,
+ .reg_write = adt7310_reg_write,
+};
+
+static int adt7310_spi_probe(struct spi_device *spi)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init(&spi->dev, NULL, spi, &adt7310_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return adt7x10_probe(&spi->dev, spi_get_device_id(spi)->name, spi->irq,
+ regmap);
+}
+
static const struct spi_device_id adt7310_id[] = {
{ "adt7310", 0 },
{ "adt7320", 0 },
@@ -107,7 +155,6 @@ static struct spi_driver adt7310_driver = {
.pm = ADT7X10_DEV_PM_OPS,
},
.probe = adt7310_spi_probe,
- .remove = adt7310_spi_remove,
.id_table = adt7310_id,
};
module_spi_driver(adt7310_driver);
diff --git a/drivers/hwmon/adt7410.c b/drivers/hwmon/adt7410.c
index 973db057427b..aede5baca7b9 100644
--- a/drivers/hwmon/adt7410.c
+++ b/drivers/hwmon/adt7410.c
@@ -9,49 +9,82 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
+#include <linux/regmap.h>
#include "adt7x10.h"
-static int adt7410_i2c_read_word(struct device *dev, u8 reg)
+static bool adt7410_regmap_is_volatile(struct device *dev, unsigned int reg)
{
- return i2c_smbus_read_word_swapped(to_i2c_client(dev), reg);
+ switch (reg) {
+ case ADT7X10_TEMPERATURE:
+ case ADT7X10_STATUS:
+ return true;
+ default:
+ return false;
+ }
}
-static int adt7410_i2c_write_word(struct device *dev, u8 reg, u16 data)
+static int adt7410_reg_read(void *context, unsigned int reg, unsigned int *val)
{
- return i2c_smbus_write_word_swapped(to_i2c_client(dev), reg, data);
-}
+ struct i2c_client *client = context;
+ int regval;
-static int adt7410_i2c_read_byte(struct device *dev, u8 reg)
-{
- return i2c_smbus_read_byte_data(to_i2c_client(dev), reg);
+ switch (reg) {
+ case ADT7X10_TEMPERATURE:
+ case ADT7X10_T_ALARM_HIGH:
+ case ADT7X10_T_ALARM_LOW:
+ case ADT7X10_T_CRIT:
+ regval = i2c_smbus_read_word_swapped(client, reg);
+ break;
+ default:
+ regval = i2c_smbus_read_byte_data(client, reg);
+ break;
+ }
+ if (regval < 0)
+ return regval;
+ *val = regval;
+ return 0;
}
-static int adt7410_i2c_write_byte(struct device *dev, u8 reg, u8 data)
+static int adt7410_reg_write(void *context, unsigned int reg, unsigned int val)
{
- return i2c_smbus_write_byte_data(to_i2c_client(dev), reg, data);
+ struct i2c_client *client = context;
+ int ret;
+
+ switch (reg) {
+ case ADT7X10_TEMPERATURE:
+ case ADT7X10_T_ALARM_HIGH:
+ case ADT7X10_T_ALARM_LOW:
+ case ADT7X10_T_CRIT:
+ ret = i2c_smbus_write_word_swapped(client, reg, val);
+ break;
+ default:
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+ break;
+ }
+ return ret;
}
-static const struct adt7x10_ops adt7410_i2c_ops = {
- .read_word = adt7410_i2c_read_word,
- .write_word = adt7410_i2c_write_word,
- .read_byte = adt7410_i2c_read_byte,
- .write_byte = adt7410_i2c_write_byte,
+static const struct regmap_config adt7410_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = ADT7X10_ID,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = adt7410_regmap_is_volatile,
+ .reg_read = adt7410_reg_read,
+ .reg_write = adt7410_reg_write,
};
static int adt7410_i2c_probe(struct i2c_client *client)
{
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
+ struct regmap *regmap;
- return adt7x10_probe(&client->dev, NULL, client->irq, &adt7410_i2c_ops);
-}
+ regmap = devm_regmap_init(&client->dev, NULL, client,
+ &adt7410_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
-static int adt7410_i2c_remove(struct i2c_client *client)
-{
- adt7x10_remove(&client->dev, client->irq);
- return 0;
+ return adt7x10_probe(&client->dev, client->name, client->irq, regmap);
}
static const struct i2c_device_id adt7410_ids[] = {
@@ -68,7 +101,6 @@ static struct i2c_driver adt7410_driver = {
.pm = ADT7X10_DEV_PM_OPS,
},
.probe_new = adt7410_i2c_probe,
- .remove = adt7410_i2c_remove,
.id_table = adt7410_ids,
.address_list = I2C_ADDRS(0x48, 0x49, 0x4a, 0x4b),
};
diff --git a/drivers/hwmon/adt7x10.c b/drivers/hwmon/adt7x10.c
index e9d33aa78a19..ce54bffab2ec 100644
--- a/drivers/hwmon/adt7x10.c
+++ b/drivers/hwmon/adt7x10.c
@@ -8,16 +8,17 @@
* and adt7410.c from iio-staging by Sonic Zhang <sonic.zhang@analog.com>
*/
+#include <linux/device.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/regmap.h>
#include "adt7x10.h"
@@ -53,80 +54,57 @@
/* Each client has this additional data */
struct adt7x10_data {
- const struct adt7x10_ops *ops;
- const char *name;
- struct device *hwmon_dev;
+ struct regmap *regmap;
struct mutex update_lock;
u8 config;
u8 oldconfig;
- bool valid; /* true if registers valid */
- unsigned long last_updated; /* In jiffies */
- s16 temp[4]; /* Register values,
- 0 = input
- 1 = high
- 2 = low
- 3 = critical */
- u8 hyst; /* hysteresis offset */
+ bool valid; /* true if temperature valid */
};
-static int adt7x10_read_byte(struct device *dev, u8 reg)
-{
- struct adt7x10_data *d = dev_get_drvdata(dev);
- return d->ops->read_byte(dev, reg);
-}
-
-static int adt7x10_write_byte(struct device *dev, u8 reg, u8 data)
-{
- struct adt7x10_data *d = dev_get_drvdata(dev);
- return d->ops->write_byte(dev, reg, data);
-}
-
-static int adt7x10_read_word(struct device *dev, u8 reg)
-{
- struct adt7x10_data *d = dev_get_drvdata(dev);
- return d->ops->read_word(dev, reg);
-}
-
-static int adt7x10_write_word(struct device *dev, u8 reg, u16 data)
-{
- struct adt7x10_data *d = dev_get_drvdata(dev);
- return d->ops->write_word(dev, reg, data);
-}
+enum {
+ adt7x10_temperature = 0,
+ adt7x10_t_alarm_high,
+ adt7x10_t_alarm_low,
+ adt7x10_t_crit,
+};
-static const u8 ADT7X10_REG_TEMP[4] = {
- ADT7X10_TEMPERATURE, /* input */
- ADT7X10_T_ALARM_HIGH, /* high */
- ADT7X10_T_ALARM_LOW, /* low */
- ADT7X10_T_CRIT, /* critical */
+static const u8 ADT7X10_REG_TEMP[] = {
+ [adt7x10_temperature] = ADT7X10_TEMPERATURE, /* input */
+ [adt7x10_t_alarm_high] = ADT7X10_T_ALARM_HIGH, /* high */
+ [adt7x10_t_alarm_low] = ADT7X10_T_ALARM_LOW, /* low */
+ [adt7x10_t_crit] = ADT7X10_T_CRIT, /* critical */
};
static irqreturn_t adt7x10_irq_handler(int irq, void *private)
{
struct device *dev = private;
- int status;
+ struct adt7x10_data *d = dev_get_drvdata(dev);
+ unsigned int status;
+ int ret;
- status = adt7x10_read_byte(dev, ADT7X10_STATUS);
- if (status < 0)
+ ret = regmap_read(d->regmap, ADT7X10_STATUS, &status);
+ if (ret < 0)
return IRQ_HANDLED;
if (status & ADT7X10_STAT_T_HIGH)
- sysfs_notify(&dev->kobj, NULL, "temp1_max_alarm");
+ hwmon_notify_event(dev, hwmon_temp, hwmon_temp_max_alarm, 0);
if (status & ADT7X10_STAT_T_LOW)
- sysfs_notify(&dev->kobj, NULL, "temp1_min_alarm");
+ hwmon_notify_event(dev, hwmon_temp, hwmon_temp_min_alarm, 0);
if (status & ADT7X10_STAT_T_CRIT)
- sysfs_notify(&dev->kobj, NULL, "temp1_crit_alarm");
+ hwmon_notify_event(dev, hwmon_temp, hwmon_temp_crit_alarm, 0);
return IRQ_HANDLED;
}
-static int adt7x10_temp_ready(struct device *dev)
+static int adt7x10_temp_ready(struct regmap *regmap)
{
- int i, status;
+ unsigned int status;
+ int i, ret;
for (i = 0; i < 6; i++) {
- status = adt7x10_read_byte(dev, ADT7X10_STATUS);
- if (status < 0)
- return status;
+ ret = regmap_read(regmap, ADT7X10_STATUS, &status);
+ if (ret < 0)
+ return ret;
if (!(status & ADT7X10_STAT_NOT_RDY))
return 0;
msleep(60);
@@ -134,71 +112,10 @@ static int adt7x10_temp_ready(struct device *dev)
return -ETIMEDOUT;
}
-static int adt7x10_update_temp(struct device *dev)
-{
- struct adt7x10_data *data = dev_get_drvdata(dev);
- int ret = 0;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
- int temp;
-
- dev_dbg(dev, "Starting update\n");
-
- ret = adt7x10_temp_ready(dev); /* check for new value */
- if (ret)
- goto abort;
-
- temp = adt7x10_read_word(dev, ADT7X10_REG_TEMP[0]);
- if (temp < 0) {
- ret = temp;
- dev_dbg(dev, "Failed to read value: reg %d, error %d\n",
- ADT7X10_REG_TEMP[0], ret);
- goto abort;
- }
- data->temp[0] = temp;
- data->last_updated = jiffies;
- data->valid = true;
- }
-
-abort:
- mutex_unlock(&data->update_lock);
- return ret;
-}
-
-static int adt7x10_fill_cache(struct device *dev)
-{
- struct adt7x10_data *data = dev_get_drvdata(dev);
- int ret;
- int i;
-
- for (i = 1; i < ARRAY_SIZE(data->temp); i++) {
- ret = adt7x10_read_word(dev, ADT7X10_REG_TEMP[i]);
- if (ret < 0) {
- dev_dbg(dev, "Failed to read value: reg %d, error %d\n",
- ADT7X10_REG_TEMP[i], ret);
- return ret;
- }
- data->temp[i] = ret;
- }
-
- ret = adt7x10_read_byte(dev, ADT7X10_T_HYST);
- if (ret < 0) {
- dev_dbg(dev, "Failed to read value: reg %d, error %d\n",
- ADT7X10_T_HYST, ret);
- return ret;
- }
- data->hyst = ret;
-
- return 0;
-}
-
static s16 ADT7X10_TEMP_TO_REG(long temp)
{
return DIV_ROUND_CLOSEST(clamp_val(temp, ADT7X10_TEMP_MIN,
- ADT7X10_TEMP_MAX) * 128, 1000);
+ ADT7X10_TEMP_MAX) * 128, 1000);
}
static int ADT7X10_REG_TO_TEMP(struct adt7x10_data *data, s16 reg)
@@ -215,170 +132,233 @@ static int ADT7X10_REG_TO_TEMP(struct adt7x10_data *data, s16 reg)
/*-----------------------------------------------------------------------*/
-/* sysfs attributes for hwmon */
-
-static ssize_t adt7x10_temp_show(struct device *dev,
- struct device_attribute *da, char *buf)
+static int adt7x10_temp_read(struct adt7x10_data *data, int index, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct adt7x10_data *data = dev_get_drvdata(dev);
-
-
- if (attr->index == 0) {
- int ret;
+ unsigned int regval;
+ int ret;
- ret = adt7x10_update_temp(dev);
- if (ret)
+ mutex_lock(&data->update_lock);
+ if (index == adt7x10_temperature && !data->valid) {
+ /* wait for valid temperature */
+ ret = adt7x10_temp_ready(data->regmap);
+ if (ret) {
+ mutex_unlock(&data->update_lock);
return ret;
+ }
+ data->valid = true;
}
+ mutex_unlock(&data->update_lock);
- return sprintf(buf, "%d\n", ADT7X10_REG_TO_TEMP(data,
- data->temp[attr->index]));
+ ret = regmap_read(data->regmap, ADT7X10_REG_TEMP[index], &regval);
+ if (ret)
+ return ret;
+
+ *val = ADT7X10_REG_TO_TEMP(data, regval);
+ return 0;
}
-static ssize_t adt7x10_temp_store(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+static int adt7x10_temp_write(struct adt7x10_data *data, int index, long temp)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct adt7x10_data *data = dev_get_drvdata(dev);
- int nr = attr->index;
- long temp;
int ret;
- ret = kstrtol(buf, 10, &temp);
- if (ret)
- return ret;
-
mutex_lock(&data->update_lock);
- data->temp[nr] = ADT7X10_TEMP_TO_REG(temp);
- ret = adt7x10_write_word(dev, ADT7X10_REG_TEMP[nr], data->temp[nr]);
- if (ret)
- count = ret;
+ ret = regmap_write(data->regmap, ADT7X10_REG_TEMP[index],
+ ADT7X10_TEMP_TO_REG(temp));
mutex_unlock(&data->update_lock);
- return count;
+ return ret;
}
-static ssize_t adt7x10_t_hyst_show(struct device *dev,
- struct device_attribute *da, char *buf)
+static int adt7x10_hyst_read(struct adt7x10_data *data, int index, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct adt7x10_data *data = dev_get_drvdata(dev);
- int nr = attr->index;
- int hyst;
+ int hyst, temp, ret;
+
+ mutex_lock(&data->update_lock);
+ ret = regmap_read(data->regmap, ADT7X10_T_HYST, &hyst);
+ if (ret) {
+ mutex_unlock(&data->update_lock);
+ return ret;
+ }
+
+ ret = regmap_read(data->regmap, ADT7X10_REG_TEMP[index], &temp);
+ mutex_unlock(&data->update_lock);
+ if (ret)
+ return ret;
- hyst = (data->hyst & ADT7X10_T_HYST_MASK) * 1000;
+ hyst = (hyst & ADT7X10_T_HYST_MASK) * 1000;
/*
* hysteresis is stored as a 4 bit offset in the device, convert it
* to an absolute value
*/
- if (nr == 2) /* min has positive offset, others have negative */
+ /* min has positive offset, others have negative */
+ if (index == adt7x10_t_alarm_low)
hyst = -hyst;
- return sprintf(buf, "%d\n",
- ADT7X10_REG_TO_TEMP(data, data->temp[nr]) - hyst);
+
+ *val = ADT7X10_REG_TO_TEMP(data, temp) - hyst;
+ return 0;
}
-static ssize_t adt7x10_t_hyst_store(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+static int adt7x10_hyst_write(struct adt7x10_data *data, long hyst)
{
- struct adt7x10_data *data = dev_get_drvdata(dev);
+ unsigned int regval;
int limit, ret;
- long hyst;
- ret = kstrtol(buf, 10, &hyst);
- if (ret)
- return ret;
+ mutex_lock(&data->update_lock);
+
/* convert absolute hysteresis value to a 4 bit delta value */
- limit = ADT7X10_REG_TO_TEMP(data, data->temp[1]);
- hyst = clamp_val(hyst, ADT7X10_TEMP_MIN, ADT7X10_TEMP_MAX);
- data->hyst = clamp_val(DIV_ROUND_CLOSEST(limit - hyst, 1000),
- 0, ADT7X10_T_HYST_MASK);
- ret = adt7x10_write_byte(dev, ADT7X10_T_HYST, data->hyst);
- if (ret)
- return ret;
+ ret = regmap_read(data->regmap, ADT7X10_T_ALARM_HIGH, &regval);
+ if (ret < 0)
+ goto abort;
+
+ limit = ADT7X10_REG_TO_TEMP(data, regval);
- return count;
+ hyst = clamp_val(hyst, ADT7X10_TEMP_MIN, ADT7X10_TEMP_MAX);
+ regval = clamp_val(DIV_ROUND_CLOSEST(limit - hyst, 1000), 0,
+ ADT7X10_T_HYST_MASK);
+ ret = regmap_write(data->regmap, ADT7X10_T_HYST, regval);
+abort:
+ mutex_unlock(&data->update_lock);
+ return ret;
}
-static ssize_t adt7x10_alarm_show(struct device *dev,
- struct device_attribute *da, char *buf)
+static int adt7x10_alarm_read(struct adt7x10_data *data, int index, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ unsigned int status;
int ret;
- ret = adt7x10_read_byte(dev, ADT7X10_STATUS);
+ ret = regmap_read(data->regmap, ADT7X10_STATUS, &status);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", !!(ret & attr->index));
+ *val = !!(status & index);
+
+ return 0;
+}
+
+static umode_t adt7x10_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_temp_max:
+ case hwmon_temp_min:
+ case hwmon_temp_crit:
+ case hwmon_temp_max_hyst:
+ return 0644;
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_min_hyst:
+ case hwmon_temp_crit_hyst:
+ return 0444;
+ default:
+ break;
+ }
+
+ return 0;
}
-static ssize_t name_show(struct device *dev, struct device_attribute *da,
- char *buf)
+static int adt7x10_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
{
struct adt7x10_data *data = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", data->name);
+ switch (attr) {
+ case hwmon_temp_input:
+ return adt7x10_temp_read(data, adt7x10_temperature, val);
+ case hwmon_temp_max:
+ return adt7x10_temp_read(data, adt7x10_t_alarm_high, val);
+ case hwmon_temp_min:
+ return adt7x10_temp_read(data, adt7x10_t_alarm_low, val);
+ case hwmon_temp_crit:
+ return adt7x10_temp_read(data, adt7x10_t_crit, val);
+ case hwmon_temp_max_hyst:
+ return adt7x10_hyst_read(data, adt7x10_t_alarm_high, val);
+ case hwmon_temp_min_hyst:
+ return adt7x10_hyst_read(data, adt7x10_t_alarm_low, val);
+ case hwmon_temp_crit_hyst:
+ return adt7x10_hyst_read(data, adt7x10_t_crit, val);
+ case hwmon_temp_min_alarm:
+ return adt7x10_alarm_read(data, ADT7X10_STAT_T_LOW, val);
+ case hwmon_temp_max_alarm:
+ return adt7x10_alarm_read(data, ADT7X10_STAT_T_HIGH, val);
+ case hwmon_temp_crit_alarm:
+ return adt7x10_alarm_read(data, ADT7X10_STAT_T_CRIT, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static SENSOR_DEVICE_ATTR_RO(temp1_input, adt7x10_temp, 0);
-static SENSOR_DEVICE_ATTR_RW(temp1_max, adt7x10_temp, 1);
-static SENSOR_DEVICE_ATTR_RW(temp1_min, adt7x10_temp, 2);
-static SENSOR_DEVICE_ATTR_RW(temp1_crit, adt7x10_temp, 3);
-static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, adt7x10_t_hyst, 1);
-static SENSOR_DEVICE_ATTR_RO(temp1_min_hyst, adt7x10_t_hyst, 2);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_hyst, adt7x10_t_hyst, 3);
-static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, adt7x10_alarm,
- ADT7X10_STAT_T_LOW);
-static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, adt7x10_alarm,
- ADT7X10_STAT_T_HIGH);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, adt7x10_alarm,
- ADT7X10_STAT_T_CRIT);
-static DEVICE_ATTR_RO(name);
-
-static struct attribute *adt7x10_attributes[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_min_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
- NULL
+static int adt7x10_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct adt7x10_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_temp_max:
+ return adt7x10_temp_write(data, adt7x10_t_alarm_high, val);
+ case hwmon_temp_min:
+ return adt7x10_temp_write(data, adt7x10_t_alarm_low, val);
+ case hwmon_temp_crit:
+ return adt7x10_temp_write(data, adt7x10_t_crit, val);
+ case hwmon_temp_max_hyst:
+ return adt7x10_hyst_write(data, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info *adt7x10_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_CRIT | HWMON_T_MAX_HYST | HWMON_T_MIN_HYST |
+ HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM),
+ NULL,
+};
+
+static const struct hwmon_ops adt7x10_hwmon_ops = {
+ .is_visible = adt7x10_is_visible,
+ .read = adt7x10_read,
+ .write = adt7x10_write,
};
-static const struct attribute_group adt7x10_group = {
- .attrs = adt7x10_attributes,
+static const struct hwmon_chip_info adt7x10_chip_info = {
+ .ops = &adt7x10_hwmon_ops,
+ .info = adt7x10_info,
};
+static void adt7x10_restore_config(void *private)
+{
+ struct adt7x10_data *data = private;
+
+ regmap_write(data->regmap, ADT7X10_CONFIG, data->oldconfig);
+}
+
int adt7x10_probe(struct device *dev, const char *name, int irq,
- const struct adt7x10_ops *ops)
+ struct regmap *regmap)
{
struct adt7x10_data *data;
+ unsigned int config;
+ struct device *hdev;
int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->ops = ops;
- data->name = name;
+ data->regmap = regmap;
dev_set_drvdata(dev, data);
mutex_init(&data->update_lock);
/* configure as specified */
- ret = adt7x10_read_byte(dev, ADT7X10_CONFIG);
+ ret = regmap_read(regmap, ADT7X10_CONFIG, &config);
if (ret < 0) {
dev_dbg(dev, "Can't read config? %d\n", ret);
return ret;
}
- data->oldconfig = ret;
+ data->oldconfig = config;
/*
* Set to 16 bit resolution, continous conversion and comparator mode.
@@ -389,92 +369,49 @@ int adt7x10_probe(struct device *dev, const char *name, int irq,
data->config |= ADT7X10_FULL | ADT7X10_RESOLUTION | ADT7X10_EVENT_MODE;
if (data->config != data->oldconfig) {
- ret = adt7x10_write_byte(dev, ADT7X10_CONFIG, data->config);
+ ret = regmap_write(regmap, ADT7X10_CONFIG, data->config);
if (ret)
return ret;
- }
- dev_dbg(dev, "Config %02x\n", data->config);
-
- ret = adt7x10_fill_cache(dev);
- if (ret)
- goto exit_restore;
-
- /* Register sysfs hooks */
- ret = sysfs_create_group(&dev->kobj, &adt7x10_group);
- if (ret)
- goto exit_restore;
-
- /*
- * The I2C device will already have it's own 'name' attribute, but for
- * the SPI device we need to register it. name will only be non NULL if
- * the device doesn't register the 'name' attribute on its own.
- */
- if (name) {
- ret = device_create_file(dev, &dev_attr_name);
+ ret = devm_add_action_or_reset(dev, adt7x10_restore_config, data);
if (ret)
- goto exit_remove;
+ return ret;
}
+ dev_dbg(dev, "Config %02x\n", data->config);
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- ret = PTR_ERR(data->hwmon_dev);
- goto exit_remove_name;
- }
+ hdev = devm_hwmon_device_register_with_info(dev, name, data,
+ &adt7x10_chip_info, NULL);
+ if (IS_ERR(hdev))
+ return PTR_ERR(hdev);
if (irq > 0) {
- ret = request_threaded_irq(irq, NULL, adt7x10_irq_handler,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- dev_name(dev), dev);
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ adt7x10_irq_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ dev_name(dev), hdev);
if (ret)
- goto exit_hwmon_device_unregister;
+ return ret;
}
return 0;
-
-exit_hwmon_device_unregister:
- hwmon_device_unregister(data->hwmon_dev);
-exit_remove_name:
- if (name)
- device_remove_file(dev, &dev_attr_name);
-exit_remove:
- sysfs_remove_group(&dev->kobj, &adt7x10_group);
-exit_restore:
- adt7x10_write_byte(dev, ADT7X10_CONFIG, data->oldconfig);
- return ret;
}
EXPORT_SYMBOL_GPL(adt7x10_probe);
-void adt7x10_remove(struct device *dev, int irq)
-{
- struct adt7x10_data *data = dev_get_drvdata(dev);
-
- if (irq > 0)
- free_irq(irq, dev);
-
- hwmon_device_unregister(data->hwmon_dev);
- if (data->name)
- device_remove_file(dev, &dev_attr_name);
- sysfs_remove_group(&dev->kobj, &adt7x10_group);
- if (data->oldconfig != data->config)
- adt7x10_write_byte(dev, ADT7X10_CONFIG, data->oldconfig);
-}
-EXPORT_SYMBOL_GPL(adt7x10_remove);
-
#ifdef CONFIG_PM_SLEEP
static int adt7x10_suspend(struct device *dev)
{
struct adt7x10_data *data = dev_get_drvdata(dev);
- return adt7x10_write_byte(dev, ADT7X10_CONFIG,
- data->config | ADT7X10_PD);
+ return regmap_write(data->regmap, ADT7X10_CONFIG,
+ data->config | ADT7X10_PD);
}
static int adt7x10_resume(struct device *dev)
{
struct adt7x10_data *data = dev_get_drvdata(dev);
- return adt7x10_write_byte(dev, ADT7X10_CONFIG, data->config);
+ return regmap_write(data->regmap, ADT7X10_CONFIG, data->config);
}
SIMPLE_DEV_PM_OPS(adt7x10_dev_pm_ops, adt7x10_suspend, adt7x10_resume);
diff --git a/drivers/hwmon/adt7x10.h b/drivers/hwmon/adt7x10.h
index a1ae682eb32e..ba22c32c8355 100644
--- a/drivers/hwmon/adt7x10.h
+++ b/drivers/hwmon/adt7x10.h
@@ -17,16 +17,8 @@
struct device;
-struct adt7x10_ops {
- int (*read_byte)(struct device *, u8 reg);
- int (*write_byte)(struct device *, u8 reg, u8 data);
- int (*read_word)(struct device *, u8 reg);
- int (*write_word)(struct device *, u8 reg, u16 data);
-};
-
int adt7x10_probe(struct device *dev, const char *name, int irq,
- const struct adt7x10_ops *ops);
-void adt7x10_remove(struct device *dev, int irq);
+ struct regmap *regmap);
#ifdef CONFIG_PM_SLEEP
extern const struct dev_pm_ops adt7x10_dev_pm_ops;
diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c
index fb9341a53051..525809cf7c95 100644
--- a/drivers/hwmon/aquacomputer_d5next.c
+++ b/drivers/hwmon/aquacomputer_d5next.c
@@ -1,32 +1,41 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * hwmon driver for Aquacomputer D5 Next watercooling pump
+ * hwmon driver for Aquacomputer devices (D5 Next, Farbwerk 360)
*
- * The D5 Next sends HID reports (with ID 0x01) every second to report sensor values
- * (coolant temperature, pump and fan speed, voltage, current and power). It responds to
- * Get_Report requests, but returns a dummy value of no use.
+ * Aquacomputer devices send HID reports (with ID 0x01) every second to report
+ * sensor values.
*
* Copyright 2021 Aleksa Savic <savicaleksa83@gmail.com>
*/
-#include <asm/unaligned.h>
#include <linux/debugfs.h>
#include <linux/hid.h>
#include <linux/hwmon.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/seq_file.h>
+#include <asm/unaligned.h>
-#define DRIVER_NAME "aquacomputer-d5next"
+#define USB_VENDOR_ID_AQUACOMPUTER 0x0c70
+#define USB_PRODUCT_ID_D5NEXT 0xf00e
+#define USB_PRODUCT_ID_FARBWERK360 0xf010
-#define D5NEXT_STATUS_REPORT_ID 0x01
-#define D5NEXT_STATUS_UPDATE_INTERVAL (2 * HZ) /* In seconds */
+enum kinds { d5next, farbwerk360 };
-/* Register offsets for the D5 Next pump */
+static const char *const aqc_device_names[] = {
+ [d5next] = "d5next",
+ [farbwerk360] = "farbwerk360"
+};
-#define D5NEXT_SERIAL_FIRST_PART 3
-#define D5NEXT_SERIAL_SECOND_PART 5
-#define D5NEXT_FIRMWARE_VERSION 13
+#define DRIVER_NAME "aquacomputer_d5next"
+
+#define STATUS_REPORT_ID 0x01
+#define STATUS_UPDATE_INTERVAL (2 * HZ) /* In seconds */
+#define SERIAL_FIRST_PART 3
+#define SERIAL_SECOND_PART 5
+#define FIRMWARE_VERSION 13
+
+/* Register offsets for the D5 Next pump */
#define D5NEXT_POWER_CYCLES 24
#define D5NEXT_COOLANT_TEMP 87
@@ -44,76 +53,118 @@
#define D5NEXT_PUMP_CURRENT 112
#define D5NEXT_FAN_CURRENT 99
-/* Labels for provided values */
+/* Register offsets for the Farbwerk 360 RGB controller */
+#define FARBWERK360_NUM_SENSORS 4
+#define FARBWERK360_SENSOR_START 0x32
+#define FARBWERK360_SENSOR_SIZE 0x02
+#define FARBWERK360_SENSOR_DISCONNECTED 0x7FFF
-#define L_COOLANT_TEMP "Coolant temp"
+/* Labels for D5 Next */
+#define L_D5NEXT_COOLANT_TEMP "Coolant temp"
-#define L_PUMP_SPEED "Pump speed"
-#define L_FAN_SPEED "Fan speed"
-
-#define L_PUMP_POWER "Pump power"
-#define L_FAN_POWER "Fan power"
-
-#define L_PUMP_VOLTAGE "Pump voltage"
-#define L_FAN_VOLTAGE "Fan voltage"
-#define L_5V_VOLTAGE "+5V voltage"
-
-#define L_PUMP_CURRENT "Pump current"
-#define L_FAN_CURRENT "Fan current"
+static const char *const label_d5next_speeds[] = {
+ "Pump speed",
+ "Fan speed"
+};
-static const char *const label_speeds[] = {
- L_PUMP_SPEED,
- L_FAN_SPEED,
+static const char *const label_d5next_power[] = {
+ "Pump power",
+ "Fan power"
};
-static const char *const label_power[] = {
- L_PUMP_POWER,
- L_FAN_POWER,
+static const char *const label_d5next_voltages[] = {
+ "Pump voltage",
+ "Fan voltage",
+ "+5V voltage"
};
-static const char *const label_voltages[] = {
- L_PUMP_VOLTAGE,
- L_FAN_VOLTAGE,
- L_5V_VOLTAGE,
+static const char *const label_d5next_current[] = {
+ "Pump current",
+ "Fan current"
};
-static const char *const label_current[] = {
- L_PUMP_CURRENT,
- L_FAN_CURRENT,
+/* Labels for Farbwerk 360 temperature sensors */
+static const char *const label_temp_sensors[] = {
+ "Sensor 1",
+ "Sensor 2",
+ "Sensor 3",
+ "Sensor 4"
};
-struct d5next_data {
+struct aqc_data {
struct hid_device *hdev;
struct device *hwmon_dev;
struct dentry *debugfs;
- s32 temp_input;
+ enum kinds kind;
+ const char *name;
+
+ /* General info, same across all devices */
+ u32 serial_number[2];
+ u16 firmware_version;
+
+ /* D5 Next specific - how many times the device was powered on */
+ u32 power_cycles;
+
+ /* Sensor values */
+ s32 temp_input[4];
u16 speed_input[2];
u32 power_input[2];
u16 voltage_input[3];
u16 current_input[2];
- u32 serial_number[2];
- u16 firmware_version;
- u32 power_cycles; /* How many times the device was powered on */
+
unsigned long updated;
};
-static umode_t d5next_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
- int channel)
+static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
{
- return 0444;
+ const struct aqc_data *priv = data;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (priv->kind) {
+ case d5next:
+ if (channel == 0)
+ return 0444;
+ break;
+ case farbwerk360:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_fan:
+ case hwmon_power:
+ case hwmon_in:
+ case hwmon_curr:
+ switch (priv->kind) {
+ case d5next:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
}
-static int d5next_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
- long *val)
+static int aqc_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
{
- struct d5next_data *priv = dev_get_drvdata(dev);
+ struct aqc_data *priv = dev_get_drvdata(dev);
- if (time_after(jiffies, priv->updated + D5NEXT_STATUS_UPDATE_INTERVAL))
+ if (time_after(jiffies, priv->updated + STATUS_UPDATE_INTERVAL))
return -ENODATA;
switch (type) {
case hwmon_temp:
- *val = priv->temp_input;
+ if (priv->temp_input[channel] == -ENODATA)
+ return -ENODATA;
+
+ *val = priv->temp_input[channel];
break;
case hwmon_fan:
*val = priv->speed_input[channel];
@@ -134,24 +185,59 @@ static int d5next_read(struct device *dev, enum hwmon_sensor_types type, u32 att
return 0;
}
-static int d5next_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
- int channel, const char **str)
+static int aqc_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
{
+ struct aqc_data *priv = dev_get_drvdata(dev);
+
switch (type) {
case hwmon_temp:
- *str = L_COOLANT_TEMP;
+ switch (priv->kind) {
+ case d5next:
+ *str = L_D5NEXT_COOLANT_TEMP;
+ break;
+ case farbwerk360:
+ *str = label_temp_sensors[channel];
+ break;
+ default:
+ break;
+ }
break;
case hwmon_fan:
- *str = label_speeds[channel];
+ switch (priv->kind) {
+ case d5next:
+ *str = label_d5next_speeds[channel];
+ break;
+ default:
+ break;
+ }
break;
case hwmon_power:
- *str = label_power[channel];
+ switch (priv->kind) {
+ case d5next:
+ *str = label_d5next_power[channel];
+ break;
+ default:
+ break;
+ }
break;
case hwmon_in:
- *str = label_voltages[channel];
+ switch (priv->kind) {
+ case d5next:
+ *str = label_d5next_voltages[channel];
+ break;
+ default:
+ break;
+ }
break;
case hwmon_curr:
- *str = label_current[channel];
+ switch (priv->kind) {
+ case d5next:
+ *str = label_d5next_current[channel];
+ break;
+ default:
+ break;
+ }
break;
default:
return -EOPNOTSUPP;
@@ -160,60 +246,89 @@ static int d5next_read_string(struct device *dev, enum hwmon_sensor_types type,
return 0;
}
-static const struct hwmon_ops d5next_hwmon_ops = {
- .is_visible = d5next_is_visible,
- .read = d5next_read,
- .read_string = d5next_read_string,
+static const struct hwmon_ops aqc_hwmon_ops = {
+ .is_visible = aqc_is_visible,
+ .read = aqc_read,
+ .read_string = aqc_read_string,
};
-static const struct hwmon_channel_info *d5next_info[] = {
- HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL),
- HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL),
- HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_LABEL, HWMON_P_INPUT | HWMON_P_LABEL),
- HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL,
+static const struct hwmon_channel_info *aqc_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_LABEL,
+ HWMON_P_INPUT | HWMON_P_LABEL),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
HWMON_I_INPUT | HWMON_I_LABEL),
- HWMON_CHANNEL_INFO(curr, HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL),
NULL
};
-static const struct hwmon_chip_info d5next_chip_info = {
- .ops = &d5next_hwmon_ops,
- .info = d5next_info,
+static const struct hwmon_chip_info aqc_chip_info = {
+ .ops = &aqc_hwmon_ops,
+ .info = aqc_info,
};
-static int d5next_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size)
+static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data,
+ int size)
{
- struct d5next_data *priv;
+ int i, sensor_value;
+ struct aqc_data *priv;
- if (report->id != D5NEXT_STATUS_REPORT_ID)
+ if (report->id != STATUS_REPORT_ID)
return 0;
priv = hid_get_drvdata(hdev);
/* Info provided with every report */
-
- priv->serial_number[0] = get_unaligned_be16(data + D5NEXT_SERIAL_FIRST_PART);
- priv->serial_number[1] = get_unaligned_be16(data + D5NEXT_SERIAL_SECOND_PART);
-
- priv->firmware_version = get_unaligned_be16(data + D5NEXT_FIRMWARE_VERSION);
- priv->power_cycles = get_unaligned_be32(data + D5NEXT_POWER_CYCLES);
+ priv->serial_number[0] = get_unaligned_be16(data + SERIAL_FIRST_PART);
+ priv->serial_number[1] = get_unaligned_be16(data + SERIAL_SECOND_PART);
+ priv->firmware_version = get_unaligned_be16(data + FIRMWARE_VERSION);
/* Sensor readings */
+ switch (priv->kind) {
+ case d5next:
+ priv->power_cycles = get_unaligned_be32(data + D5NEXT_POWER_CYCLES);
- priv->temp_input = get_unaligned_be16(data + D5NEXT_COOLANT_TEMP) * 10;
+ priv->temp_input[0] = get_unaligned_be16(data + D5NEXT_COOLANT_TEMP) * 10;
- priv->speed_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_SPEED);
- priv->speed_input[1] = get_unaligned_be16(data + D5NEXT_FAN_SPEED);
+ priv->speed_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_SPEED);
+ priv->speed_input[1] = get_unaligned_be16(data + D5NEXT_FAN_SPEED);
- priv->power_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_POWER) * 10000;
- priv->power_input[1] = get_unaligned_be16(data + D5NEXT_FAN_POWER) * 10000;
+ priv->power_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_POWER) * 10000;
+ priv->power_input[1] = get_unaligned_be16(data + D5NEXT_FAN_POWER) * 10000;
- priv->voltage_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_VOLTAGE) * 10;
- priv->voltage_input[1] = get_unaligned_be16(data + D5NEXT_FAN_VOLTAGE) * 10;
- priv->voltage_input[2] = get_unaligned_be16(data + D5NEXT_5V_VOLTAGE) * 10;
+ priv->voltage_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_VOLTAGE) * 10;
+ priv->voltage_input[1] = get_unaligned_be16(data + D5NEXT_FAN_VOLTAGE) * 10;
+ priv->voltage_input[2] = get_unaligned_be16(data + D5NEXT_5V_VOLTAGE) * 10;
- priv->current_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_CURRENT);
- priv->current_input[1] = get_unaligned_be16(data + D5NEXT_FAN_CURRENT);
+ priv->current_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_CURRENT);
+ priv->current_input[1] = get_unaligned_be16(data + D5NEXT_FAN_CURRENT);
+ break;
+ case farbwerk360:
+ /* Temperature sensor readings */
+ for (i = 0; i < FARBWERK360_NUM_SENSORS; i++) {
+ sensor_value = get_unaligned_be16(data + FARBWERK360_SENSOR_START +
+ i * FARBWERK360_SENSOR_SIZE);
+ if (sensor_value == FARBWERK360_SENSOR_DISCONNECTED)
+ priv->temp_input[i] = -ENODATA;
+ else
+ priv->temp_input[i] = sensor_value * 10;
+ }
+ break;
+ default:
+ break;
+ }
priv->updated = jiffies;
@@ -224,7 +339,7 @@ static int d5next_raw_event(struct hid_device *hdev, struct hid_report *report,
static int serial_number_show(struct seq_file *seqf, void *unused)
{
- struct d5next_data *priv = seqf->private;
+ struct aqc_data *priv = seqf->private;
seq_printf(seqf, "%05u-%05u\n", priv->serial_number[0], priv->serial_number[1]);
@@ -234,7 +349,7 @@ DEFINE_SHOW_ATTRIBUTE(serial_number);
static int firmware_version_show(struct seq_file *seqf, void *unused)
{
- struct d5next_data *priv = seqf->private;
+ struct aqc_data *priv = seqf->private;
seq_printf(seqf, "%u\n", priv->firmware_version);
@@ -244,7 +359,7 @@ DEFINE_SHOW_ATTRIBUTE(firmware_version);
static int power_cycles_show(struct seq_file *seqf, void *unused)
{
- struct d5next_data *priv = seqf->private;
+ struct aqc_data *priv = seqf->private;
seq_printf(seqf, "%u\n", priv->power_cycles);
@@ -252,29 +367,32 @@ static int power_cycles_show(struct seq_file *seqf, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(power_cycles);
-static void d5next_debugfs_init(struct d5next_data *priv)
+static void aqc_debugfs_init(struct aqc_data *priv)
{
- char name[32];
+ char name[64];
- scnprintf(name, sizeof(name), "%s-%s", DRIVER_NAME, dev_name(&priv->hdev->dev));
+ scnprintf(name, sizeof(name), "%s_%s-%s", "aquacomputer", priv->name,
+ dev_name(&priv->hdev->dev));
priv->debugfs = debugfs_create_dir(name, NULL);
debugfs_create_file("serial_number", 0444, priv->debugfs, priv, &serial_number_fops);
debugfs_create_file("firmware_version", 0444, priv->debugfs, priv, &firmware_version_fops);
- debugfs_create_file("power_cycles", 0444, priv->debugfs, priv, &power_cycles_fops);
+
+ if (priv->kind == d5next)
+ debugfs_create_file("power_cycles", 0444, priv->debugfs, priv, &power_cycles_fops);
}
#else
-static void d5next_debugfs_init(struct d5next_data *priv)
+static void aqc_debugfs_init(struct aqc_data *priv)
{
}
#endif
-static int d5next_probe(struct hid_device *hdev, const struct hid_device_id *id)
+static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
- struct d5next_data *priv;
+ struct aqc_data *priv;
int ret;
priv = devm_kzalloc(&hdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -284,7 +402,7 @@ static int d5next_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->hdev = hdev;
hid_set_drvdata(hdev, priv);
- priv->updated = jiffies - D5NEXT_STATUS_UPDATE_INTERVAL;
+ priv->updated = jiffies - STATUS_UPDATE_INTERVAL;
ret = hid_parse(hdev);
if (ret)
@@ -298,15 +416,28 @@ static int d5next_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret)
goto fail_and_stop;
- priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "d5next", priv,
- &d5next_chip_info, NULL);
+ switch (hdev->product) {
+ case USB_PRODUCT_ID_D5NEXT:
+ priv->kind = d5next;
+ break;
+ case USB_PRODUCT_ID_FARBWERK360:
+ priv->kind = farbwerk360;
+ break;
+ default:
+ break;
+ }
+
+ priv->name = aqc_device_names[priv->kind];
+
+ priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, priv->name, priv,
+ &aqc_chip_info, NULL);
if (IS_ERR(priv->hwmon_dev)) {
ret = PTR_ERR(priv->hwmon_dev);
goto fail_and_close;
}
- d5next_debugfs_init(priv);
+ aqc_debugfs_init(priv);
return 0;
@@ -317,9 +448,9 @@ fail_and_stop:
return ret;
}
-static void d5next_remove(struct hid_device *hdev)
+static void aqc_remove(struct hid_device *hdev)
{
- struct d5next_data *priv = hid_get_drvdata(hdev);
+ struct aqc_data *priv = hid_get_drvdata(hdev);
debugfs_remove_recursive(priv->debugfs);
hwmon_device_unregister(priv->hwmon_dev);
@@ -328,36 +459,36 @@ static void d5next_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
-static const struct hid_device_id d5next_table[] = {
- { HID_USB_DEVICE(0x0c70, 0xf00e) }, /* Aquacomputer D5 Next */
- {},
+static const struct hid_device_id aqc_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_D5NEXT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_FARBWERK360) },
+ { }
};
-MODULE_DEVICE_TABLE(hid, d5next_table);
+MODULE_DEVICE_TABLE(hid, aqc_table);
-static struct hid_driver d5next_driver = {
+static struct hid_driver aqc_driver = {
.name = DRIVER_NAME,
- .id_table = d5next_table,
- .probe = d5next_probe,
- .remove = d5next_remove,
- .raw_event = d5next_raw_event,
+ .id_table = aqc_table,
+ .probe = aqc_probe,
+ .remove = aqc_remove,
+ .raw_event = aqc_raw_event,
};
-static int __init d5next_init(void)
+static int __init aqc_init(void)
{
- return hid_register_driver(&d5next_driver);
+ return hid_register_driver(&aqc_driver);
}
-static void __exit d5next_exit(void)
+static void __exit aqc_exit(void)
{
- hid_unregister_driver(&d5next_driver);
+ hid_unregister_driver(&aqc_driver);
}
/* Request to initialize after the HID bus to ensure it's not being loaded before */
-
-late_initcall(d5next_init);
-module_exit(d5next_exit);
+late_initcall(aqc_init);
+module_exit(aqc_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Aleksa Savic <savicaleksa83@gmail.com>");
-MODULE_DESCRIPTION("Hwmon driver for Aquacomputer D5 Next pump");
+MODULE_DESCRIPTION("Hwmon driver for Aquacomputer devices");
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
new file mode 100644
index 000000000000..b5cf0136360c
--- /dev/null
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -0,0 +1,716 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HWMON driver for ASUS motherboards that publish some sensor values
+ * via the embedded controller registers.
+ *
+ * Copyright (C) 2021 Eugene Shalygin <eugene.shalygin@gmail.com>
+
+ * EC provides:
+ * - Chipset temperature
+ * - CPU temperature
+ * - Motherboard temperature
+ * - T_Sensor temperature
+ * - VRM temperature
+ * - Water In temperature
+ * - Water Out temperature
+ * - CPU Optional fan RPM
+ * - Chipset fan RPM
+ * - VRM Heat Sink fan RPM
+ * - Water Flow fan RPM
+ * - CPU current
+ * - CPU core voltage
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/dev_printk.h>
+#include <linux/dmi.h>
+#include <linux/hwmon.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sort.h>
+#include <linux/units.h>
+
+#include <asm/unaligned.h>
+
+static char *mutex_path_override;
+
+/* Writing to this EC register switches EC bank */
+#define ASUS_EC_BANK_REGISTER 0xff
+#define SENSOR_LABEL_LEN 16
+
+/*
+ * Arbitrary set max. allowed bank number. Required for sorting banks and
+ * currently is overkill with just 2 banks used at max, but for the sake
+ * of alignment let's set it to a higher value.
+ */
+#define ASUS_EC_MAX_BANK 3
+
+#define ACPI_LOCK_DELAY_MS 500
+
+/* ACPI mutex for locking access to the EC for the firmware */
+#define ASUS_HW_ACCESS_MUTEX_ASMX "\\AMW0.ASMX"
+
+/* There are two variants of the vendor spelling */
+#define VENDOR_ASUS_UPPER_CASE "ASUSTeK COMPUTER INC."
+
+typedef union {
+ u32 value;
+ struct {
+ u8 index;
+ u8 bank;
+ u8 size;
+ u8 dummy;
+ } components;
+} sensor_address;
+
+#define MAKE_SENSOR_ADDRESS(size, bank, index) { \
+ .value = (size << 16) + (bank << 8) + index \
+ }
+
+static u32 hwmon_attributes[hwmon_max] = {
+ [hwmon_chip] = HWMON_C_REGISTER_TZ,
+ [hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL,
+ [hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL,
+ [hwmon_curr] = HWMON_C_INPUT | HWMON_C_LABEL,
+ [hwmon_fan] = HWMON_F_INPUT | HWMON_F_LABEL,
+};
+
+struct ec_sensor_info {
+ char label[SENSOR_LABEL_LEN];
+ enum hwmon_sensor_types type;
+ sensor_address addr;
+};
+
+#define EC_SENSOR(sensor_label, sensor_type, size, bank, index) { \
+ .label = sensor_label, .type = sensor_type, \
+ .addr = MAKE_SENSOR_ADDRESS(size, bank, index), \
+ }
+
+enum ec_sensors {
+ /* chipset temperature [℃] */
+ ec_sensor_temp_chipset,
+ /* CPU temperature [℃] */
+ ec_sensor_temp_cpu,
+ /* motherboard temperature [℃] */
+ ec_sensor_temp_mb,
+ /* "T_Sensor" temperature sensor reading [℃] */
+ ec_sensor_temp_t_sensor,
+ /* VRM temperature [℃] */
+ ec_sensor_temp_vrm,
+ /* CPU Core voltage [mV] */
+ ec_sensor_in_cpu_core,
+ /* CPU_Opt fan [RPM] */
+ ec_sensor_fan_cpu_opt,
+ /* VRM heat sink fan [RPM] */
+ ec_sensor_fan_vrm_hs,
+ /* Chipset fan [RPM] */
+ ec_sensor_fan_chipset,
+ /* Water flow sensor reading [RPM] */
+ ec_sensor_fan_water_flow,
+ /* CPU current [A] */
+ ec_sensor_curr_cpu,
+ /* "Water_In" temperature sensor reading [℃] */
+ ec_sensor_temp_water_in,
+ /* "Water_Out" temperature sensor reading [℃] */
+ ec_sensor_temp_water_out,
+};
+
+#define SENSOR_TEMP_CHIPSET BIT(ec_sensor_temp_chipset)
+#define SENSOR_TEMP_CPU BIT(ec_sensor_temp_cpu)
+#define SENSOR_TEMP_MB BIT(ec_sensor_temp_mb)
+#define SENSOR_TEMP_T_SENSOR BIT(ec_sensor_temp_t_sensor)
+#define SENSOR_TEMP_VRM BIT(ec_sensor_temp_vrm)
+#define SENSOR_IN_CPU_CORE BIT(ec_sensor_in_cpu_core)
+#define SENSOR_FAN_CPU_OPT BIT(ec_sensor_fan_cpu_opt)
+#define SENSOR_FAN_VRM_HS BIT(ec_sensor_fan_vrm_hs)
+#define SENSOR_FAN_CHIPSET BIT(ec_sensor_fan_chipset)
+#define SENSOR_FAN_WATER_FLOW BIT(ec_sensor_fan_water_flow)
+#define SENSOR_CURR_CPU BIT(ec_sensor_curr_cpu)
+#define SENSOR_TEMP_WATER_IN BIT(ec_sensor_temp_water_in)
+#define SENSOR_TEMP_WATER_OUT BIT(ec_sensor_temp_water_out)
+
+/* All the known sensors for ASUS EC controllers */
+static const struct ec_sensor_info known_ec_sensors[] = {
+ [ec_sensor_temp_chipset] =
+ EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
+ [ec_sensor_temp_cpu] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x3b),
+ [ec_sensor_temp_mb] =
+ EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x3c),
+ [ec_sensor_temp_t_sensor] =
+ EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d),
+ [ec_sensor_temp_vrm] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
+ [ec_sensor_in_cpu_core] =
+ EC_SENSOR("CPU Core", hwmon_in, 2, 0x00, 0xa2),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
+ [ec_sensor_fan_vrm_hs] = EC_SENSOR("VRM HS", hwmon_fan, 2, 0x00, 0xb2),
+ [ec_sensor_fan_chipset] =
+ EC_SENSOR("Chipset", hwmon_fan, 2, 0x00, 0xb4),
+ [ec_sensor_fan_water_flow] =
+ EC_SENSOR("Water_Flow", hwmon_fan, 2, 0x00, 0xbc),
+ [ec_sensor_curr_cpu] = EC_SENSOR("CPU", hwmon_curr, 1, 0x00, 0xf4),
+ [ec_sensor_temp_water_in] =
+ EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
+ [ec_sensor_temp_water_out] =
+ EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
+};
+
+/* Shortcuts for common combinations */
+#define SENSOR_SET_TEMP_CHIPSET_CPU_MB \
+ (SENSOR_TEMP_CHIPSET | SENSOR_TEMP_CPU | SENSOR_TEMP_MB)
+#define SENSOR_SET_TEMP_WATER (SENSOR_TEMP_WATER_IN | SENSOR_TEMP_WATER_OUT)
+
+#define DMI_EXACT_MATCH_BOARD(vendor, name, sensors) { \
+ .matches = { \
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, vendor), \
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, name), \
+ }, \
+ .driver_data = (void *)(sensors), \
+}
+
+static const struct dmi_system_id asus_ec_dmi_table[] __initconst = {
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE, "PRIME X570-PRO",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE, "Pro WS X570-ACE",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE,
+ "ROG CROSSHAIR VIII DARK HERO",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE,
+ "ROG CROSSHAIR VIII FORMULA",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE, "ROG CROSSHAIR VIII HERO",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE,
+ "ROG CROSSHAIR VIII HERO (WI-FI)",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE,
+ "ROG CROSSHAIR VIII IMPACT",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_FAN_CHIPSET |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE, "ROG STRIX B550-E GAMING",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_FAN_CPU_OPT),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE, "ROG STRIX B550-I GAMING",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_FAN_VRM_HS |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE, "ROG STRIX X570-E GAMING",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_FAN_CHIPSET |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE, "ROG STRIX X570-F GAMING",
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET),
+ DMI_EXACT_MATCH_BOARD(VENDOR_ASUS_UPPER_CASE, "ROG STRIX X570-I GAMING",
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_VRM_HS |
+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE),
+ {}
+};
+
+struct ec_sensor {
+ unsigned int info_index;
+ s32 cached_value;
+};
+
+struct ec_sensors_data {
+ unsigned long board_sensors;
+ struct ec_sensor *sensors;
+ /* EC registers to read from */
+ u16 *registers;
+ u8 *read_buffer;
+ /* sorted list of unique register banks */
+ u8 banks[ASUS_EC_MAX_BANK + 1];
+ /* in jiffies */
+ unsigned long last_updated;
+ acpi_handle aml_mutex;
+ /* number of board EC sensors */
+ u8 nr_sensors;
+ /*
+ * number of EC registers to read
+ * (sensor might span more than 1 register)
+ */
+ u8 nr_registers;
+ /* number of unique register banks */
+ u8 nr_banks;
+};
+
+static u8 register_bank(u16 reg)
+{
+ return reg >> 8;
+}
+
+static u8 register_index(u16 reg)
+{
+ return reg & 0x00ff;
+}
+
+static bool is_sensor_data_signed(const struct ec_sensor_info *si)
+{
+ /*
+ * guessed from WMI functions in DSDT code for boards
+ * of the X470 generation
+ */
+ return si->type == hwmon_temp;
+}
+
+static const struct ec_sensor_info *
+get_sensor_info(const struct ec_sensors_data *state, int index)
+{
+ return &known_ec_sensors[state->sensors[index].info_index];
+}
+
+static int find_ec_sensor_index(const struct ec_sensors_data *ec,
+ enum hwmon_sensor_types type, int channel)
+{
+ unsigned int i;
+
+ for (i = 0; i < ec->nr_sensors; i++) {
+ if (get_sensor_info(ec, i)->type == type) {
+ if (channel == 0)
+ return i;
+ channel--;
+ }
+ }
+ return -ENOENT;
+}
+
+static int __init bank_compare(const void *a, const void *b)
+{
+ return *((const s8 *)a) - *((const s8 *)b);
+}
+
+static int __init board_sensors_count(unsigned long sensors)
+{
+ return hweight_long(sensors);
+}
+
+static void __init setup_sensor_data(struct ec_sensors_data *ec)
+{
+ struct ec_sensor *s = ec->sensors;
+ bool bank_found;
+ int i, j;
+ u8 bank;
+
+ ec->nr_banks = 0;
+ ec->nr_registers = 0;
+
+ for_each_set_bit(i, &ec->board_sensors,
+ BITS_PER_TYPE(ec->board_sensors)) {
+ s->info_index = i;
+ s->cached_value = 0;
+ ec->nr_registers +=
+ known_ec_sensors[s->info_index].addr.components.size;
+ bank_found = false;
+ bank = known_ec_sensors[s->info_index].addr.components.bank;
+ for (j = 0; j < ec->nr_banks; j++) {
+ if (ec->banks[j] == bank) {
+ bank_found = true;
+ break;
+ }
+ }
+ if (!bank_found) {
+ ec->banks[ec->nr_banks++] = bank;
+ }
+ s++;
+ }
+ sort(ec->banks, ec->nr_banks, 1, bank_compare, NULL);
+}
+
+static void __init fill_ec_registers(struct ec_sensors_data *ec)
+{
+ const struct ec_sensor_info *si;
+ unsigned int i, j, register_idx = 0;
+
+ for (i = 0; i < ec->nr_sensors; ++i) {
+ si = get_sensor_info(ec, i);
+ for (j = 0; j < si->addr.components.size; ++j, ++register_idx) {
+ ec->registers[register_idx] =
+ (si->addr.components.bank << 8) +
+ si->addr.components.index + j;
+ }
+ }
+}
+
+static acpi_handle __init asus_hw_access_mutex(struct device *dev)
+{
+ const char *mutex_path;
+ acpi_handle res;
+ int status;
+
+ mutex_path = mutex_path_override ?
+ mutex_path_override : ASUS_HW_ACCESS_MUTEX_ASMX;
+
+ status = acpi_get_handle(NULL, (acpi_string)mutex_path, &res);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev,
+ "Could not get hardware access guard mutex '%s': error %d",
+ mutex_path, status);
+ return NULL;
+ }
+ return res;
+}
+
+static int asus_ec_bank_switch(u8 bank, u8 *old)
+{
+ int status = 0;
+
+ if (old) {
+ status = ec_read(ASUS_EC_BANK_REGISTER, old);
+ }
+ if (status || (old && (*old == bank)))
+ return status;
+ return ec_write(ASUS_EC_BANK_REGISTER, bank);
+}
+
+static int asus_ec_block_read(const struct device *dev,
+ struct ec_sensors_data *ec)
+{
+ int ireg, ibank, status;
+ u8 bank, reg_bank, prev_bank;
+
+ bank = 0;
+ status = asus_ec_bank_switch(bank, &prev_bank);
+ if (status) {
+ dev_warn(dev, "EC bank switch failed");
+ return status;
+ }
+
+ if (prev_bank) {
+ /* oops... somebody else is working with the EC too */
+ dev_warn(dev,
+ "Concurrent access to the ACPI EC detected.\nRace condition possible.");
+ }
+
+ /* read registers minimizing bank switches. */
+ for (ibank = 0; ibank < ec->nr_banks; ibank++) {
+ if (bank != ec->banks[ibank]) {
+ bank = ec->banks[ibank];
+ if (asus_ec_bank_switch(bank, NULL)) {
+ dev_warn(dev, "EC bank switch to %d failed",
+ bank);
+ break;
+ }
+ }
+ for (ireg = 0; ireg < ec->nr_registers; ireg++) {
+ reg_bank = register_bank(ec->registers[ireg]);
+ if (reg_bank < bank) {
+ continue;
+ }
+ ec_read(register_index(ec->registers[ireg]),
+ ec->read_buffer + ireg);
+ }
+ }
+
+ status = asus_ec_bank_switch(prev_bank, NULL);
+ return status;
+}
+
+static inline s32 get_sensor_value(const struct ec_sensor_info *si, u8 *data)
+{
+ if (is_sensor_data_signed(si)) {
+ switch (si->addr.components.size) {
+ case 1:
+ return (s8)*data;
+ case 2:
+ return (s16)get_unaligned_be16(data);
+ case 4:
+ return (s32)get_unaligned_be32(data);
+ default:
+ return 0;
+ }
+ } else {
+ switch (si->addr.components.size) {
+ case 1:
+ return *data;
+ case 2:
+ return get_unaligned_be16(data);
+ case 4:
+ return get_unaligned_be32(data);
+ default:
+ return 0;
+ }
+ }
+}
+
+static void update_sensor_values(struct ec_sensors_data *ec, u8 *data)
+{
+ const struct ec_sensor_info *si;
+ struct ec_sensor *s;
+
+ for (s = ec->sensors; s != ec->sensors + ec->nr_sensors; s++) {
+ si = &known_ec_sensors[s->info_index];
+ s->cached_value = get_sensor_value(si, data);
+ data += si->addr.components.size;
+ }
+}
+
+static int update_ec_sensors(const struct device *dev,
+ struct ec_sensors_data *ec)
+{
+ int status;
+
+ /*
+ * ASUS DSDT does not specify that access to the EC has to be guarded,
+ * but firmware does access it via ACPI
+ */
+ if (ACPI_FAILURE(acpi_acquire_mutex(ec->aml_mutex, NULL,
+ ACPI_LOCK_DELAY_MS))) {
+ dev_err(dev, "Failed to acquire AML mutex");
+ status = -EBUSY;
+ goto cleanup;
+ }
+
+ status = asus_ec_block_read(dev, ec);
+
+ if (!status) {
+ update_sensor_values(ec, ec->read_buffer);
+ }
+ if (ACPI_FAILURE(acpi_release_mutex(ec->aml_mutex, NULL))) {
+ dev_err(dev, "Failed to release AML mutex");
+ }
+cleanup:
+ return status;
+}
+
+static long scale_sensor_value(s32 value, int data_type)
+{
+ switch (data_type) {
+ case hwmon_curr:
+ case hwmon_temp:
+ return value * MILLI;
+ default:
+ return value;
+ }
+}
+
+static int get_cached_value_or_update(const struct device *dev,
+ int sensor_index,
+ struct ec_sensors_data *state, s32 *value)
+{
+ if (time_after(jiffies, state->last_updated + HZ)) {
+ if (update_ec_sensors(dev, state)) {
+ dev_err(dev, "update_ec_sensors() failure\n");
+ return -EIO;
+ }
+
+ state->last_updated = jiffies;
+ }
+
+ *value = state->sensors[sensor_index].cached_value;
+ return 0;
+}
+
+/*
+ * Now follow the functions that implement the hwmon interface
+ */
+
+static int asus_ec_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ int ret;
+ s32 value = 0;
+
+ struct ec_sensors_data *state = dev_get_drvdata(dev);
+ int sidx = find_ec_sensor_index(state, type, channel);
+
+ if (sidx < 0) {
+ return sidx;
+ }
+
+ ret = get_cached_value_or_update(dev, sidx, state, &value);
+ if (!ret) {
+ *val = scale_sensor_value(value,
+ get_sensor_info(state, sidx)->type);
+ }
+
+ return ret;
+}
+
+static int asus_ec_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ struct ec_sensors_data *state = dev_get_drvdata(dev);
+ int sensor_index = find_ec_sensor_index(state, type, channel);
+ *str = get_sensor_info(state, sensor_index)->label;
+
+ return 0;
+}
+
+static umode_t asus_ec_hwmon_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ const struct ec_sensors_data *state = drvdata;
+
+ return find_ec_sensor_index(state, type, channel) >= 0 ? S_IRUGO : 0;
+}
+
+static int __init
+asus_ec_hwmon_add_chan_info(struct hwmon_channel_info *asus_ec_hwmon_chan,
+ struct device *dev, int num,
+ enum hwmon_sensor_types type, u32 config)
+{
+ int i;
+ u32 *cfg = devm_kcalloc(dev, num + 1, sizeof(*cfg), GFP_KERNEL);
+
+ if (!cfg)
+ return -ENOMEM;
+
+ asus_ec_hwmon_chan->type = type;
+ asus_ec_hwmon_chan->config = cfg;
+ for (i = 0; i < num; i++, cfg++)
+ *cfg = config;
+
+ return 0;
+}
+
+static const struct hwmon_ops asus_ec_hwmon_ops = {
+ .is_visible = asus_ec_hwmon_is_visible,
+ .read = asus_ec_hwmon_read,
+ .read_string = asus_ec_hwmon_read_string,
+};
+
+static struct hwmon_chip_info asus_ec_chip_info = {
+ .ops = &asus_ec_hwmon_ops,
+};
+
+static unsigned long __init get_board_sensors(void)
+{
+ const struct dmi_system_id *dmi_entry =
+ dmi_first_match(asus_ec_dmi_table);
+
+ return dmi_entry ? (unsigned long)dmi_entry->driver_data : 0;
+}
+
+static int __init asus_ec_probe(struct platform_device *pdev)
+{
+ const struct hwmon_channel_info **ptr_asus_ec_ci;
+ int nr_count[hwmon_max] = { 0 }, nr_types = 0;
+ struct hwmon_channel_info *asus_ec_hwmon_chan;
+ const struct hwmon_chip_info *chip_info;
+ struct device *dev = &pdev->dev;
+ struct ec_sensors_data *ec_data;
+ const struct ec_sensor_info *si;
+ enum hwmon_sensor_types type;
+ unsigned long board_sensors;
+ struct device *hwdev;
+ unsigned int i;
+
+ board_sensors = get_board_sensors();
+ if (!board_sensors)
+ return -ENODEV;
+
+ ec_data = devm_kzalloc(dev, sizeof(struct ec_sensors_data),
+ GFP_KERNEL);
+ if (!ec_data)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, ec_data);
+ ec_data->board_sensors = board_sensors;
+ ec_data->nr_sensors = board_sensors_count(ec_data->board_sensors);
+ ec_data->sensors = devm_kcalloc(dev, ec_data->nr_sensors,
+ sizeof(struct ec_sensor), GFP_KERNEL);
+
+ setup_sensor_data(ec_data);
+ ec_data->registers = devm_kcalloc(dev, ec_data->nr_registers,
+ sizeof(u16), GFP_KERNEL);
+ ec_data->read_buffer = devm_kcalloc(dev, ec_data->nr_registers,
+ sizeof(u8), GFP_KERNEL);
+
+ if (!ec_data->registers || !ec_data->read_buffer)
+ return -ENOMEM;
+
+ fill_ec_registers(ec_data);
+
+ ec_data->aml_mutex = asus_hw_access_mutex(dev);
+
+ for (i = 0; i < ec_data->nr_sensors; ++i) {
+ si = get_sensor_info(ec_data, i);
+ if (!nr_count[si->type])
+ ++nr_types;
+ ++nr_count[si->type];
+ }
+
+ if (nr_count[hwmon_temp])
+ nr_count[hwmon_chip]++, nr_types++;
+
+ asus_ec_hwmon_chan = devm_kcalloc(
+ dev, nr_types, sizeof(*asus_ec_hwmon_chan), GFP_KERNEL);
+ if (!asus_ec_hwmon_chan)
+ return -ENOMEM;
+
+ ptr_asus_ec_ci = devm_kcalloc(dev, nr_types + 1,
+ sizeof(*ptr_asus_ec_ci), GFP_KERNEL);
+ if (!ptr_asus_ec_ci)
+ return -ENOMEM;
+
+ asus_ec_chip_info.info = ptr_asus_ec_ci;
+ chip_info = &asus_ec_chip_info;
+
+ for (type = 0; type < hwmon_max; ++type) {
+ if (!nr_count[type])
+ continue;
+
+ asus_ec_hwmon_add_chan_info(asus_ec_hwmon_chan, dev,
+ nr_count[type], type,
+ hwmon_attributes[type]);
+ *ptr_asus_ec_ci++ = asus_ec_hwmon_chan++;
+ }
+
+ dev_info(dev, "board has %d EC sensors that span %d registers",
+ ec_data->nr_sensors, ec_data->nr_registers);
+
+ hwdev = devm_hwmon_device_register_with_info(dev, "asusec",
+ ec_data, chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(hwdev);
+}
+
+
+static const struct acpi_device_id acpi_ec_ids[] = {
+ /* Embedded Controller Device */
+ { "PNP0C09", 0 },
+ {}
+};
+
+static struct platform_driver asus_ec_sensors_platform_driver = {
+ .driver = {
+ .name = "asus-ec-sensors",
+ .acpi_match_table = acpi_ec_ids,
+ },
+};
+
+MODULE_DEVICE_TABLE(dmi, asus_ec_dmi_table);
+module_platform_driver_probe(asus_ec_sensors_platform_driver, asus_ec_probe);
+
+module_param_named(mutex_path, mutex_path_override, charp, 0);
+MODULE_PARM_DESC(mutex_path,
+ "Override ACPI mutex path used to guard access to hardware");
+
+MODULE_AUTHOR("Eugene Shalygin <eugene.shalygin@gmail.com>");
+MODULE_DESCRIPTION(
+ "HWMON driver for sensors accessible via ACPI EC in ASUS motherboards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/asus_wmi_ec_sensors.c b/drivers/hwmon/asus_wmi_ec_sensors.c
index 22a1459305a7..a3a2f014dec0 100644
--- a/drivers/hwmon/asus_wmi_ec_sensors.c
+++ b/drivers/hwmon/asus_wmi_ec_sensors.c
@@ -112,7 +112,8 @@ struct asus_wmi_data {
/* boards with EC support */
static struct asus_wmi_data sensors_board_PW_X570_P = {
.known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB, SENSOR_TEMP_VRM,
+ SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
+ SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
SENSOR_FAN_CHIPSET,
SENSOR_MAX
},
diff --git a/drivers/hwmon/asus_wmi_sensors.c b/drivers/hwmon/asus_wmi_sensors.c
index c80eee874b6c..8fdcb62ae52d 100644
--- a/drivers/hwmon/asus_wmi_sensors.c
+++ b/drivers/hwmon/asus_wmi_sensors.c
@@ -77,6 +77,7 @@ static const struct dmi_system_id asus_wmi_dmi_table[] = {
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VII HERO (WI-FI)"),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B450-E GAMING"),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B450-F GAMING"),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B450-F GAMING II"),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B450-I GAMING"),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X399-E GAMING"),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X470-F GAMING"),
diff --git a/drivers/hwmon/axi-fan-control.c b/drivers/hwmon/axi-fan-control.c
index d2092c17d993..96c4a5c45291 100644
--- a/drivers/hwmon/axi-fan-control.c
+++ b/drivers/hwmon/axi-fan-control.c
@@ -339,7 +339,8 @@ static irqreturn_t axi_fan_control_irq_handler(int irq, void *data)
ctl->update_tacho_params = true;
} else {
ctl->hw_pwm_req = false;
- sysfs_notify(&ctl->hdev->kobj, NULL, "pwm1");
+ hwmon_notify_event(ctl->hdev, hwmon_pwm,
+ hwmon_pwm_input, 0);
}
}
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 9949eeb79378..84cb1ede7bc0 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -21,6 +21,7 @@
#include <linux/errno.h>
#include <linux/hwmon.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
@@ -86,8 +87,8 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("i8k");
static bool force;
-module_param(force, bool, 0);
-MODULE_PARM_DESC(force, "Force loading without checking for supported models");
+module_param_unsafe(force, bool, 0);
+MODULE_PARM_DESC(force, "Force loading without checking for supported models and features");
static bool ignore_dmi;
module_param(ignore_dmi, bool, 0);
@@ -250,46 +251,52 @@ static int i8k_smm(struct smm_regs *regs)
/*
* Read the fan status.
*/
-static int i8k_get_fan_status(const struct dell_smm_data *data, int fan)
+static int i8k_get_fan_status(const struct dell_smm_data *data, u8 fan)
{
- struct smm_regs regs = { .eax = I8K_SMM_GET_FAN, };
+ struct smm_regs regs = {
+ .eax = I8K_SMM_GET_FAN,
+ .ebx = fan,
+ };
if (data->disallow_fan_support)
return -EINVAL;
- regs.ebx = fan & 0xff;
return i8k_smm(&regs) ? : regs.eax & 0xff;
}
/*
* Read the fan speed in RPM.
*/
-static int i8k_get_fan_speed(const struct dell_smm_data *data, int fan)
+static int i8k_get_fan_speed(const struct dell_smm_data *data, u8 fan)
{
- struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, };
+ struct smm_regs regs = {
+ .eax = I8K_SMM_GET_SPEED,
+ .ebx = fan,
+ };
if (data->disallow_fan_support)
return -EINVAL;
- regs.ebx = fan & 0xff;
return i8k_smm(&regs) ? : (regs.eax & 0xffff) * data->i8k_fan_mult;
}
/*
* Read the fan type.
*/
-static int _i8k_get_fan_type(const struct dell_smm_data *data, int fan)
+static int _i8k_get_fan_type(const struct dell_smm_data *data, u8 fan)
{
- struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
+ struct smm_regs regs = {
+ .eax = I8K_SMM_GET_FAN_TYPE,
+ .ebx = fan,
+ };
if (data->disallow_fan_support || data->disallow_fan_type_call)
return -EINVAL;
- regs.ebx = fan & 0xff;
return i8k_smm(&regs) ? : regs.eax & 0xff;
}
-static int i8k_get_fan_type(struct dell_smm_data *data, int fan)
+static int i8k_get_fan_type(struct dell_smm_data *data, u8 fan)
{
/* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */
if (data->fan_type[fan] == INT_MIN)
@@ -301,14 +308,16 @@ static int i8k_get_fan_type(struct dell_smm_data *data, int fan)
/*
* Read the fan nominal rpm for specific fan speed.
*/
-static int __init i8k_get_fan_nominal_speed(const struct dell_smm_data *data, int fan, int speed)
+static int __init i8k_get_fan_nominal_speed(const struct dell_smm_data *data, u8 fan, int speed)
{
- struct smm_regs regs = { .eax = I8K_SMM_GET_NOM_SPEED, };
+ struct smm_regs regs = {
+ .eax = I8K_SMM_GET_NOM_SPEED,
+ .ebx = fan | (speed << 8),
+ };
if (data->disallow_fan_support)
return -EINVAL;
- regs.ebx = (fan & 0xff) | (speed << 8);
return i8k_smm(&regs) ? : (regs.eax & 0xffff) * data->i8k_fan_mult;
}
@@ -329,7 +338,7 @@ static int i8k_enable_fan_auto_mode(const struct dell_smm_data *data, bool enabl
/*
* Set the fan speed (off, low, high, ...).
*/
-static int i8k_set_fan(const struct dell_smm_data *data, int fan, int speed)
+static int i8k_set_fan(const struct dell_smm_data *data, u8 fan, int speed)
{
struct smm_regs regs = { .eax = I8K_SMM_SET_FAN, };
@@ -337,33 +346,35 @@ static int i8k_set_fan(const struct dell_smm_data *data, int fan, int speed)
return -EINVAL;
speed = (speed < 0) ? 0 : ((speed > data->i8k_fan_max) ? data->i8k_fan_max : speed);
- regs.ebx = (fan & 0xff) | (speed << 8);
+ regs.ebx = fan | (speed << 8);
return i8k_smm(&regs);
}
-static int __init i8k_get_temp_type(int sensor)
+static int __init i8k_get_temp_type(u8 sensor)
{
- struct smm_regs regs = { .eax = I8K_SMM_GET_TEMP_TYPE, };
+ struct smm_regs regs = {
+ .eax = I8K_SMM_GET_TEMP_TYPE,
+ .ebx = sensor,
+ };
- regs.ebx = sensor & 0xff;
return i8k_smm(&regs) ? : regs.eax & 0xff;
}
/*
* Read the cpu temperature.
*/
-static int _i8k_get_temp(int sensor)
+static int _i8k_get_temp(u8 sensor)
{
struct smm_regs regs = {
.eax = I8K_SMM_GET_TEMP,
- .ebx = sensor & 0xff,
+ .ebx = sensor,
};
return i8k_smm(&regs) ? : regs.eax & 0xff;
}
-static int i8k_get_temp(int sensor)
+static int i8k_get_temp(u8 sensor)
{
int temp = _i8k_get_temp(sensor);
@@ -496,6 +507,9 @@ static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&val, argp, sizeof(int)))
return -EFAULT;
+ if (val > U8_MAX || val < 0)
+ return -EINVAL;
+
val = i8k_get_fan_speed(data, val);
break;
@@ -503,6 +517,9 @@ static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&val, argp, sizeof(int)))
return -EFAULT;
+ if (val > U8_MAX || val < 0)
+ return -EINVAL;
+
val = i8k_get_fan_status(data, val);
break;
@@ -513,6 +530,9 @@ static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&val, argp, sizeof(int)))
return -EFAULT;
+ if (val > U8_MAX || val < 0)
+ return -EINVAL;
+
if (copy_from_user(&speed, argp + 1, sizeof(int)))
return -EFAULT;
@@ -631,6 +651,11 @@ static umode_t dell_smm_is_visible(const void *drvdata, enum hwmon_sensor_types
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
+ /* _i8k_get_temp() is fine since we do not care about the actual value */
+ if (data->temp_type[channel] >= 0 || _i8k_get_temp(channel) >= 0)
+ return 0444;
+
+ break;
case hwmon_temp_label:
if (data->temp_type[channel] >= 0)
return 0444;
@@ -920,7 +945,8 @@ static int __init dell_smm_init_hwmon(struct device *dev)
{
struct dell_smm_data *data = dev_get_drvdata(dev);
struct device *dell_smm_hwmon_dev;
- int i, state, err;
+ int state, err;
+ u8 i;
for (i = 0; i < DELL_SMM_NO_TEMP; i++) {
data->temp_type[i] = i8k_get_temp_type(i);
@@ -1131,6 +1157,13 @@ static const struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initconst
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "),
},
},
+ {
+ .ident = "Dell Inspiron 3505",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 3505"),
+ },
+ },
{ }
};
@@ -1236,7 +1269,8 @@ static int __init dell_smm_probe(struct platform_device *pdev)
{
struct dell_smm_data *data;
const struct dmi_system_id *id, *fan_control;
- int fan, ret;
+ int ret;
+ u8 fan;
data = devm_kzalloc(&pdev->dev, sizeof(struct dell_smm_data), GFP_KERNEL);
if (!data)
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 3ae961986fc3..989e2c8496dd 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -18,6 +18,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/thermal.h>
@@ -30,6 +31,7 @@
struct hwmon_device {
const char *name;
+ const char *label;
struct device dev;
const struct hwmon_chip_info *chip;
struct list_head tzdata;
@@ -71,17 +73,29 @@ name_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(name);
+static ssize_t
+label_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", to_hwmon_device(dev)->label);
+}
+static DEVICE_ATTR_RO(label);
+
static struct attribute *hwmon_dev_attrs[] = {
&dev_attr_name.attr,
+ &dev_attr_label.attr,
NULL
};
-static umode_t hwmon_dev_name_is_visible(struct kobject *kobj,
+static umode_t hwmon_dev_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct hwmon_device *hdev = to_hwmon_device(dev);
- if (to_hwmon_device(dev)->name == NULL)
+ if (attr == &dev_attr_name.attr && hdev->name == NULL)
+ return 0;
+
+ if (attr == &dev_attr_label.attr && hdev->label == NULL)
return 0;
return attr->mode;
@@ -89,7 +103,7 @@ static umode_t hwmon_dev_name_is_visible(struct kobject *kobj,
static const struct attribute_group hwmon_dev_attr_group = {
.attrs = hwmon_dev_attrs,
- .is_visible = hwmon_dev_name_is_visible,
+ .is_visible = hwmon_dev_attr_is_visible,
};
static const struct attribute_group *hwmon_dev_attr_groups[] = {
@@ -117,6 +131,7 @@ static void hwmon_dev_release(struct device *dev)
if (hwdev->group.attrs)
hwmon_free_attrs(hwdev->group.attrs);
kfree(hwdev->groups);
+ kfree(hwdev->label);
kfree(hwdev);
}
@@ -589,6 +604,7 @@ static const char * const hwmon_pwm_attr_templates[] = {
[hwmon_pwm_enable] = "pwm%d_enable",
[hwmon_pwm_mode] = "pwm%d_mode",
[hwmon_pwm_freq] = "pwm%d_freq",
+ [hwmon_pwm_auto_channels_temp] = "pwm%d_auto_channels_temp",
};
static const char * const hwmon_intrusion_attr_templates[] = {
@@ -625,7 +641,9 @@ static const int __templates_size[] = {
int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel)
{
+ char event[MAX_SYSFS_ATTR_NAME_LENGTH + 5];
char sattr[MAX_SYSFS_ATTR_NAME_LENGTH];
+ char *envp[] = { event, NULL };
const char * const *templates;
const char *template;
int base;
@@ -641,8 +659,9 @@ int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
base = hwmon_attr_base(type);
scnprintf(sattr, MAX_SYSFS_ATTR_NAME_LENGTH, template, base + channel);
+ scnprintf(event, sizeof(event), "NAME=%s", sattr);
sysfs_notify(&dev->kobj, NULL, sattr);
- kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
if (type == hwmon_temp)
hwmon_thermal_notify(dev, channel);
@@ -735,6 +754,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
const struct attribute_group **groups)
{
struct hwmon_device *hwdev;
+ const char *label;
struct device *hdev;
int i, err, id;
@@ -790,6 +810,18 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
hdev->groups = groups;
}
+ if (dev && device_property_present(dev, "label")) {
+ err = device_property_read_string(dev, "label", &label);
+ if (err < 0)
+ goto free_hwmon;
+
+ hwdev->label = kstrdup(label, GFP_KERNEL);
+ if (hwdev->label == NULL) {
+ err = -ENOMEM;
+ goto free_hwmon;
+ }
+ }
+
hwdev->name = name;
hdev->class = &hwmon_class;
hdev->parent = dev;
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index d2a60de5b8de..c20a749fc7f2 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -34,6 +34,7 @@
#define LM70_CHIP_LM71 2 /* NS LM71 */
#define LM70_CHIP_LM74 3 /* NS LM74 */
#define LM70_CHIP_TMP122 4 /* TI TMP122/TMP124 */
+#define LM70_CHIP_TMP125 5 /* TI TMP125 */
struct lm70 {
struct spi_device *spi;
@@ -87,6 +88,12 @@ static ssize_t temp1_input_show(struct device *dev,
* LM71:
* 14 bits of 2's complement data, discard LSB 2 bits,
* resolution 0.0312 degrees celsius.
+ *
+ * TMP125:
+ * MSB/D15 is a leading zero. D14 is the sign-bit. This is
+ * followed by 9 temperature bits (D13..D5) in 2's complement
+ * data format with a resolution of 0.25 degrees celsius per unit.
+ * LSB 5 bits (D4..D0) share the same value as D5 and get discarded.
*/
switch (p_lm70->chip) {
case LM70_CHIP_LM70:
@@ -102,6 +109,10 @@ static ssize_t temp1_input_show(struct device *dev,
case LM70_CHIP_LM71:
val = ((int)raw / 4) * 3125 / 100;
break;
+
+ case LM70_CHIP_TMP125:
+ val = (sign_extend32(raw, 14) / 32) * 250;
+ break;
}
status = sprintf(buf, "%d\n", val); /* millidegrees Celsius */
@@ -136,6 +147,10 @@ static const struct of_device_id lm70_of_ids[] = {
.data = (void *) LM70_CHIP_TMP122,
},
{
+ .compatible = "ti,tmp125",
+ .data = (void *) LM70_CHIP_TMP125,
+ },
+ {
.compatible = "ti,lm71",
.data = (void *) LM70_CHIP_LM71,
},
@@ -184,6 +199,7 @@ static const struct spi_device_id lm70_ids[] = {
{ "lm70", LM70_CHIP_LM70 },
{ "tmp121", LM70_CHIP_TMP121 },
{ "tmp122", LM70_CHIP_TMP122 },
+ { "tmp125", LM70_CHIP_TMP125 },
{ "lm71", LM70_CHIP_LM71 },
{ "lm74", LM70_CHIP_LM74 },
{ },
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index 74fd7aa373a3..12370dcefa6a 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -18,15 +18,15 @@
* http://www.national.com/pf/LM/LM82.html
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
+#include <linux/bits.h>
+#include <linux/err.h>
#include <linux/i2c.h>
-#include <linux/hwmon-sysfs.h>
+#include <linux/init.h>
#include <linux/hwmon.h>
-#include <linux/err.h>
+#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
#include <linux/sysfs.h>
/*
@@ -66,35 +66,35 @@ enum chips { lm83, lm82 };
#define LM83_REG_R_TCRIT 0x42
#define LM83_REG_W_TCRIT 0x5A
-/*
- * Conversions and various macros
- * The LM83 uses signed 8-bit values with LSB = 1 degree Celsius.
- */
-
-#define TEMP_FROM_REG(val) ((val) * 1000)
-#define TEMP_TO_REG(val) ((val) <= -128000 ? -128 : \
- (val) >= 127000 ? 127 : \
- (val) < 0 ? ((val) - 500) / 1000 : \
- ((val) + 500) / 1000)
-
-static const u8 LM83_REG_R_TEMP[] = {
+static const u8 LM83_REG_TEMP[] = {
LM83_REG_R_LOCAL_TEMP,
LM83_REG_R_REMOTE1_TEMP,
LM83_REG_R_REMOTE2_TEMP,
LM83_REG_R_REMOTE3_TEMP,
+};
+
+static const u8 LM83_REG_MAX[] = {
LM83_REG_R_LOCAL_HIGH,
LM83_REG_R_REMOTE1_HIGH,
LM83_REG_R_REMOTE2_HIGH,
LM83_REG_R_REMOTE3_HIGH,
- LM83_REG_R_TCRIT,
};
-static const u8 LM83_REG_W_HIGH[] = {
- LM83_REG_W_LOCAL_HIGH,
- LM83_REG_W_REMOTE1_HIGH,
- LM83_REG_W_REMOTE2_HIGH,
- LM83_REG_W_REMOTE3_HIGH,
- LM83_REG_W_TCRIT,
+/* alarm and fault registers and bits, indexed by channel */
+static const u8 LM83_ALARM_REG[] = {
+ LM83_REG_R_STATUS1, LM83_REG_R_STATUS2, LM83_REG_R_STATUS1, LM83_REG_R_STATUS2
+};
+
+static const u8 LM83_MAX_ALARM_BIT[] = {
+ BIT(6), BIT(7), BIT(4), BIT(4)
+};
+
+static const u8 LM83_CRIT_ALARM_BIT[] = {
+ BIT(0), BIT(0), BIT(1), BIT(1)
+};
+
+static const u8 LM83_FAULT_BIT[] = {
+ 0, BIT(5), BIT(2), BIT(2)
};
/*
@@ -102,180 +102,274 @@ static const u8 LM83_REG_W_HIGH[] = {
*/
struct lm83_data {
- struct i2c_client *client;
- const struct attribute_group *groups[3];
- struct mutex update_lock;
- bool valid; /* false until following fields are valid */
- unsigned long last_updated; /* in jiffies */
-
- /* registers values */
- s8 temp[9]; /* 0..3: input 1-4,
- 4..7: high limit 1-4,
- 8 : critical limit */
- u16 alarms; /* bitvector, combined */
+ struct regmap *regmap;
+ enum chips type;
};
-static struct lm83_data *lm83_update_device(struct device *dev)
+/* regmap code */
+
+static int lm83_regmap_reg_read(void *context, unsigned int reg, unsigned int *val)
{
- struct lm83_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
+ struct i2c_client *client = context;
+ int ret;
- mutex_lock(&data->update_lock);
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0)
+ return ret;
- if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) {
- int nr;
+ *val = ret;
+ return 0;
+}
- dev_dbg(&client->dev, "Updating lm83 data.\n");
- for (nr = 0; nr < 9; nr++) {
- data->temp[nr] =
- i2c_smbus_read_byte_data(client,
- LM83_REG_R_TEMP[nr]);
- }
- data->alarms =
- i2c_smbus_read_byte_data(client, LM83_REG_R_STATUS1)
- + (i2c_smbus_read_byte_data(client, LM83_REG_R_STATUS2)
- << 8);
+/*
+ * The regmap write function maps read register addresses to write register
+ * addresses. This is necessary for regmap register caching to work.
+ * An alternative would be to clear the regmap cache whenever a register is
+ * written, but that would be much more expensive.
+ */
+static int lm83_regmap_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct i2c_client *client = context;
- data->last_updated = jiffies;
- data->valid = true;
+ switch (reg) {
+ case LM83_REG_R_CONFIG:
+ case LM83_REG_R_LOCAL_HIGH:
+ case LM83_REG_R_REMOTE2_HIGH:
+ reg += 0x06;
+ break;
+ case LM83_REG_R_REMOTE1_HIGH:
+ case LM83_REG_R_REMOTE3_HIGH:
+ case LM83_REG_R_TCRIT:
+ reg += 0x18;
+ break;
+ default:
+ break;
}
- mutex_unlock(&data->update_lock);
+ return i2c_smbus_write_byte_data(client, reg, val);
+}
- return data;
+static bool lm83_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case LM83_REG_R_LOCAL_TEMP:
+ case LM83_REG_R_REMOTE1_TEMP:
+ case LM83_REG_R_REMOTE2_TEMP:
+ case LM83_REG_R_REMOTE3_TEMP:
+ case LM83_REG_R_STATUS1:
+ case LM83_REG_R_STATUS2:
+ return true;
+ default:
+ return false;
+ }
}
-/*
- * Sysfs stuff
- */
+static const struct regmap_config lm83_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = lm83_regmap_is_volatile,
+ .reg_read = lm83_regmap_reg_read,
+ .reg_write = lm83_regmap_reg_write,
+};
-static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
+/* hwmon API */
+
+static int lm83_temp_read(struct device *dev, u32 attr, int channel, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct lm83_data *data = lm83_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index]));
+ struct lm83_data *data = dev_get_drvdata(dev);
+ unsigned int regval;
+ int err;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ err = regmap_read(data->regmap, LM83_REG_TEMP[channel], &regval);
+ if (err < 0)
+ return err;
+ *val = (s8)regval * 1000;
+ break;
+ case hwmon_temp_max:
+ err = regmap_read(data->regmap, LM83_REG_MAX[channel], &regval);
+ if (err < 0)
+ return err;
+ *val = (s8)regval * 1000;
+ break;
+ case hwmon_temp_crit:
+ err = regmap_read(data->regmap, LM83_REG_R_TCRIT, &regval);
+ if (err < 0)
+ return err;
+ *val = (s8)regval * 1000;
+ break;
+ case hwmon_temp_max_alarm:
+ err = regmap_read(data->regmap, LM83_ALARM_REG[channel], &regval);
+ if (err < 0)
+ return err;
+ *val = !!(regval & LM83_MAX_ALARM_BIT[channel]);
+ break;
+ case hwmon_temp_crit_alarm:
+ err = regmap_read(data->regmap, LM83_ALARM_REG[channel], &regval);
+ if (err < 0)
+ return err;
+ *val = !!(regval & LM83_CRIT_ALARM_BIT[channel]);
+ break;
+ case hwmon_temp_fault:
+ err = regmap_read(data->regmap, LM83_ALARM_REG[channel], &regval);
+ if (err < 0)
+ return err;
+ *val = !!(regval & LM83_FAULT_BIT[channel]);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t temp_store(struct device *dev,
- struct device_attribute *devattr, const char *buf,
- size_t count)
+static int lm83_temp_write(struct device *dev, u32 attr, int channel, long val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm83_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- long val;
- int nr = attr->index;
+ unsigned int regval;
int err;
- err = kstrtol(buf, 10, &val);
- if (err < 0)
- return err;
+ regval = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
- mutex_lock(&data->update_lock);
- data->temp[nr] = TEMP_TO_REG(val);
- i2c_smbus_write_byte_data(client, LM83_REG_W_HIGH[nr - 4],
- data->temp[nr]);
- mutex_unlock(&data->update_lock);
- return count;
+ switch (attr) {
+ case hwmon_temp_max:
+ err = regmap_write(data->regmap, LM83_REG_MAX[channel], regval);
+ if (err < 0)
+ return err;
+ break;
+ case hwmon_temp_crit:
+ err = regmap_write(data->regmap, LM83_REG_R_TCRIT, regval);
+ if (err < 0)
+ return err;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t alarms_show(struct device *dev, struct device_attribute *dummy,
- char *buf)
+static int lm83_chip_read(struct device *dev, u32 attr, int channel, long *val)
{
- struct lm83_data *data = lm83_update_device(dev);
- return sprintf(buf, "%d\n", data->alarms);
+ struct lm83_data *data = dev_get_drvdata(dev);
+ unsigned int regval;
+ int err;
+
+ switch (attr) {
+ case hwmon_chip_alarms:
+ err = regmap_read(data->regmap, LM83_REG_R_STATUS1, &regval);
+ if (err < 0)
+ return err;
+ *val = regval;
+ err = regmap_read(data->regmap, LM83_REG_R_STATUS2, &regval);
+ if (err < 0)
+ return err;
+ *val |= regval << 8;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
-static ssize_t alarm_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int lm83_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct lm83_data *data = lm83_update_device(dev);
- int bitnr = attr->index;
+ switch (type) {
+ case hwmon_chip:
+ return lm83_chip_read(dev, attr, channel, val);
+ case hwmon_temp:
+ return lm83_temp_read(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
- return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
+static int lm83_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_temp:
+ return lm83_temp_write(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
-static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
-static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
-static SENSOR_DEVICE_ATTR_RO(temp4_input, temp, 3);
-static SENSOR_DEVICE_ATTR_RW(temp1_max, temp, 4);
-static SENSOR_DEVICE_ATTR_RW(temp2_max, temp, 5);
-static SENSOR_DEVICE_ATTR_RW(temp3_max, temp, 6);
-static SENSOR_DEVICE_ATTR_RW(temp4_max, temp, 7);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp, 8);
-static SENSOR_DEVICE_ATTR_RO(temp2_crit, temp, 8);
-static SENSOR_DEVICE_ATTR_RW(temp3_crit, temp, 8);
-static SENSOR_DEVICE_ATTR_RO(temp4_crit, temp, 8);
-
-/* Individual alarm files */
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 0);
-static SENSOR_DEVICE_ATTR_RO(temp3_crit_alarm, alarm, 1);
-static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 2);
-static SENSOR_DEVICE_ATTR_RO(temp3_max_alarm, alarm, 4);
-static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 6);
-static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 8);
-static SENSOR_DEVICE_ATTR_RO(temp4_crit_alarm, alarm, 9);
-static SENSOR_DEVICE_ATTR_RO(temp4_fault, alarm, 10);
-static SENSOR_DEVICE_ATTR_RO(temp4_max_alarm, alarm, 12);
-static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 13);
-static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 15);
-/* Raw alarm file for compatibility */
-static DEVICE_ATTR_RO(alarms);
-
-static struct attribute *lm83_attributes[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp3_max.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp3_crit.dev_attr.attr,
-
- &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_fault.dev_attr.attr,
- &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
- &dev_attr_alarms.attr,
- NULL
-};
+static umode_t lm83_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct lm83_data *data = _data;
-static const struct attribute_group lm83_group = {
- .attrs = lm83_attributes,
-};
+ /*
+ * LM82 only supports a single external channel, modeled as channel 2.
+ */
+ if (data->type == lm82 && (channel == 1 || channel == 3))
+ return 0;
-static struct attribute *lm83_attributes_opt[] = {
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp4_input.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp4_max.dev_attr.attr,
- &sensor_dev_attr_temp2_crit.dev_attr.attr,
- &sensor_dev_attr_temp4_crit.dev_attr.attr,
-
- &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp4_fault.dev_attr.attr,
- &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_fault.dev_attr.attr,
- &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ switch (type) {
+ case hwmon_chip:
+ if (attr == hwmon_chip_alarms)
+ return 0444;
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ return 0444;
+ case hwmon_temp_fault:
+ if (channel)
+ return 0444;
+ break;
+ case hwmon_temp_max:
+ return 0644;
+ case hwmon_temp_crit:
+ if (channel == 2)
+ return 0644;
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *lm83_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_ALARMS),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT
+ ),
NULL
};
-static const struct attribute_group lm83_group_opt = {
- .attrs = lm83_attributes_opt,
+static const struct hwmon_ops lm83_hwmon_ops = {
+ .is_visible = lm83_is_visible,
+ .read = lm83_read,
+ .write = lm83_write,
};
-/*
- * Real code
- */
+static const struct hwmon_chip_info lm83_chip_info = {
+ .ops = &lm83_hwmon_ops,
+ .info = lm83_info,
+};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm83_detect(struct i2c_client *new_client,
+static int lm83_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
- struct i2c_adapter *adapter = new_client->adapter;
+ struct i2c_adapter *adapter = client->adapter;
const char *name;
u8 man_id, chip_id;
@@ -283,22 +377,30 @@ static int lm83_detect(struct i2c_client *new_client,
return -ENODEV;
/* Detection */
- if ((i2c_smbus_read_byte_data(new_client, LM83_REG_R_STATUS1) & 0xA8) ||
- (i2c_smbus_read_byte_data(new_client, LM83_REG_R_STATUS2) & 0x48) ||
- (i2c_smbus_read_byte_data(new_client, LM83_REG_R_CONFIG) & 0x41)) {
+ if ((i2c_smbus_read_byte_data(client, LM83_REG_R_STATUS1) & 0xA8) ||
+ (i2c_smbus_read_byte_data(client, LM83_REG_R_STATUS2) & 0x48) ||
+ (i2c_smbus_read_byte_data(client, LM83_REG_R_CONFIG) & 0x41)) {
dev_dbg(&adapter->dev, "LM83 detection failed at 0x%02x\n",
- new_client->addr);
+ client->addr);
return -ENODEV;
}
/* Identification */
- man_id = i2c_smbus_read_byte_data(new_client, LM83_REG_R_MAN_ID);
+ man_id = i2c_smbus_read_byte_data(client, LM83_REG_R_MAN_ID);
if (man_id != 0x01) /* National Semiconductor */
return -ENODEV;
- chip_id = i2c_smbus_read_byte_data(new_client, LM83_REG_R_CHIP_ID);
+ chip_id = i2c_smbus_read_byte_data(client, LM83_REG_R_CHIP_ID);
switch (chip_id) {
case 0x03:
+ /*
+ * According to the LM82 datasheet dated March 2013, recent
+ * revisions of LM82 have a die revision of 0x03. This was
+ * confirmed with a real chip. Further details in this revision
+ * of the LM82 datasheet strongly suggest that LM82 is just a
+ * repackaged LM83. It is therefore impossible to distinguish
+ * those chips from LM83, and they will be misdetected as LM83.
+ */
name = "lm83";
break;
case 0x01:
@@ -306,9 +408,9 @@ static int lm83_detect(struct i2c_client *new_client,
break;
default:
/* identification failed */
- dev_info(&adapter->dev,
- "Unsupported chip (man_id=0x%02X, chip_id=0x%02X)\n",
- man_id, chip_id);
+ dev_dbg(&adapter->dev,
+ "Unsupported chip (man_id=0x%02X, chip_id=0x%02X)\n",
+ man_id, chip_id);
return -ENODEV;
}
@@ -317,34 +419,31 @@ static int lm83_detect(struct i2c_client *new_client,
return 0;
}
-static const struct i2c_device_id lm83_id[];
+static const struct i2c_device_id lm83_id[] = {
+ { "lm83", lm83 },
+ { "lm82", lm82 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lm83_id);
-static int lm83_probe(struct i2c_client *new_client)
+static int lm83_probe(struct i2c_client *client)
{
+ struct device *dev = &client->dev;
struct device *hwmon_dev;
struct lm83_data *data;
- data = devm_kzalloc(&new_client->dev, sizeof(struct lm83_data),
- GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct lm83_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->client = new_client;
- mutex_init(&data->update_lock);
+ data->regmap = devm_regmap_init(dev, NULL, client, &lm83_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
- /*
- * Register sysfs hooks
- * The LM82 can only monitor one external diode which is
- * at the same register as the LM83 temp3 entry - so we
- * declare 1 and 3 common, and then 2 and 4 only for the LM83.
- */
- data->groups[0] = &lm83_group;
- if (i2c_match_id(lm83_id, new_client)->driver_data == lm83)
- data->groups[1] = &lm83_group_opt;
+ data->type = i2c_match_id(lm83_id, client)->driver_data;
- hwmon_dev = devm_hwmon_device_register_with_groups(&new_client->dev,
- new_client->name,
- data, data->groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data, &lm83_chip_info, NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
@@ -352,13 +451,6 @@ static int lm83_probe(struct i2c_client *new_client)
* Driver data (common to all clients)
*/
-static const struct i2c_device_id lm83_id[] = {
- { "lm83", lm83 },
- { "lm82", lm82 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, lm83_id);
-
static struct i2c_driver lm83_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index 5fcfd57df61e..4c5487aeb3cf 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -254,7 +254,7 @@ err_remove:
return err;
}
-static int max1111_remove(struct spi_device *spi)
+static void max1111_remove(struct spi_device *spi)
{
struct max1111_data *data = spi_get_drvdata(spi);
@@ -265,7 +265,6 @@ static int max1111_remove(struct spi_device *spi)
sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
mutex_destroy(&data->drvdata_lock);
- return 0;
}
static const struct spi_device_id max1111_ids[] = {
diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c
index 4cf4fe6809a3..93e048ee4955 100644
--- a/drivers/hwmon/max31722.c
+++ b/drivers/hwmon/max31722.c
@@ -100,7 +100,7 @@ static int max31722_probe(struct spi_device *spi)
return 0;
}
-static int max31722_remove(struct spi_device *spi)
+static void max31722_remove(struct spi_device *spi)
{
struct max31722_data *data = spi_get_drvdata(spi);
int ret;
@@ -111,8 +111,6 @@ static int max31722_remove(struct spi_device *spi)
if (ret)
/* There is nothing we can do about this ... */
dev_warn(&spi->dev, "Failed to put device in stand-by mode\n");
-
- return 0;
}
static int __maybe_unused max31722_suspend(struct device *dev)
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index ccc0f047bd44..14bb7726f8d7 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -87,6 +87,9 @@ struct max6639_data {
/* Register values initialized only once */
u8 ppr; /* Pulses per rotation 0..3 for 1..4 ppr */
u8 rpm_range; /* Index in above rpm_ranges table */
+
+ /* Optional regulator for FAN supply */
+ struct regulator *reg;
};
static struct max6639_data *max6639_update_device(struct device *dev)
@@ -516,6 +519,11 @@ static int max6639_detect(struct i2c_client *client,
return 0;
}
+static void max6639_regulator_disable(void *data)
+{
+ regulator_disable(data);
+}
+
static int max6639_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -528,6 +536,28 @@ static int max6639_probe(struct i2c_client *client)
return -ENOMEM;
data->client = client;
+
+ data->reg = devm_regulator_get_optional(dev, "fan");
+ if (IS_ERR(data->reg)) {
+ if (PTR_ERR(data->reg) != -ENODEV)
+ return PTR_ERR(data->reg);
+
+ data->reg = NULL;
+ } else {
+ /* Spin up fans */
+ err = regulator_enable(data->reg);
+ if (err) {
+ dev_err(dev, "Failed to enable fan supply: %d\n", err);
+ return err;
+ }
+ err = devm_add_action_or_reset(dev, max6639_regulator_disable,
+ data->reg);
+ if (err) {
+ dev_err(dev, "Failed to register action: %d\n", err);
+ return err;
+ }
+ }
+
mutex_init(&data->update_lock);
/* Initialize the max6639 chip */
@@ -545,23 +575,39 @@ static int max6639_probe(struct i2c_client *client)
static int max6639_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
- if (data < 0)
- return data;
+ struct max6639_data *data = dev_get_drvdata(dev);
+ int ret = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
+
+ if (ret < 0)
+ return ret;
+
+ if (data->reg)
+ regulator_disable(data->reg);
return i2c_smbus_write_byte_data(client,
- MAX6639_REG_GCONFIG, data | MAX6639_GCONFIG_STANDBY);
+ MAX6639_REG_GCONFIG, ret | MAX6639_GCONFIG_STANDBY);
}
static int max6639_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
- if (data < 0)
- return data;
+ struct max6639_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ if (data->reg) {
+ ret = regulator_enable(data->reg);
+ if (ret) {
+ dev_err(dev, "Failed to enable fan supply: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
+ if (ret < 0)
+ return ret;
return i2c_smbus_write_byte_data(client,
- MAX6639_REG_GCONFIG, data & ~MAX6639_GCONFIG_STANDBY);
+ MAX6639_REG_GCONFIG, ret & ~MAX6639_GCONFIG_STANDBY);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index 4a8becdb0d58..b48bd7c961d6 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -18,15 +18,6 @@
#define MLXREG_FAN_MAX_STATE 10
#define MLXREG_FAN_MIN_DUTY 51 /* 20% */
#define MLXREG_FAN_MAX_DUTY 255 /* 100% */
-/*
- * Minimum and maximum FAN allowed speed in percent: from 20% to 100%. Values
- * MLXREG_FAN_MAX_STATE + x, where x is between 2 and 10 are used for
- * setting FAN speed dynamic minimum. For example, if value is set to 14 (40%)
- * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to
- * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100.
- */
-#define MLXREG_FAN_SPEED_MIN (MLXREG_FAN_MAX_STATE + 2)
-#define MLXREG_FAN_SPEED_MAX (MLXREG_FAN_MAX_STATE * 2)
#define MLXREG_FAN_SPEED_MIN_LEVEL 2 /* 20 percent */
#define MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF 44
#define MLXREG_FAN_TACHO_DIV_MIN 283
@@ -87,13 +78,16 @@ struct mlxreg_fan_tacho {
* @connected: indicates if PWM is connected;
* @reg: register offset;
* @cooling: cooling device levels;
+ * @last_hwmon_state: last cooling state set by hwmon subsystem;
+ * @last_thermal_state: last cooling state set by thermal subsystem;
* @cdev: cooling device;
*/
struct mlxreg_fan_pwm {
struct mlxreg_fan *fan;
bool connected;
u32 reg;
- u8 cooling_levels[MLXREG_FAN_MAX_STATE + 1];
+ unsigned long last_hwmon_state;
+ unsigned long last_thermal_state;
struct thermal_cooling_device *cdev;
};
@@ -119,6 +113,9 @@ struct mlxreg_fan {
int divider;
};
+static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state);
+
static int
mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
@@ -213,6 +210,18 @@ mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
val > MLXREG_FAN_MAX_DUTY)
return -EINVAL;
pwm = &fan->pwm[channel];
+ /* If thermal is configured - handle PWM limit setting. */
+ if (IS_REACHABLE(CONFIG_THERMAL)) {
+ pwm->last_hwmon_state = MLXREG_FAN_PWM_DUTY2STATE(val);
+ /*
+ * Update PWM only in case requested state is not less than the
+ * last thermal state.
+ */
+ if (pwm->last_hwmon_state >= pwm->last_thermal_state)
+ return mlxreg_fan_set_cur_state(pwm->cdev,
+ pwm->last_hwmon_state);
+ return 0;
+ }
return regmap_write(fan->regmap, pwm->reg, val);
default:
return -EOPNOTSUPP;
@@ -338,58 +347,22 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
{
struct mlxreg_fan_pwm *pwm = cdev->devdata;
struct mlxreg_fan *fan = pwm->fan;
- unsigned long cur_state;
- int i, config = 0;
- u32 regval;
int err;
- /*
- * Verify if this request is for changing allowed FAN dynamical
- * minimum. If it is - update cooling levels accordingly and update
- * state, if current state is below the newly requested minimum state.
- * For example, if current state is 5, and minimal state is to be
- * changed from 4 to 6, fan->cooling_levels[0 to 5] will be changed all
- * from 4 to 6. And state 5 (fan->cooling_levels[4]) should be
- * overwritten.
- */
- if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) {
- /*
- * This is configuration change, which is only supported through sysfs.
- * For configuration non-zero value is to be returned to avoid thermal
- * statistics update.
- */
- config = 1;
- state -= MLXREG_FAN_MAX_STATE;
- for (i = 0; i < state; i++)
- pwm->cooling_levels[i] = state;
- for (i = state; i <= MLXREG_FAN_MAX_STATE; i++)
- pwm->cooling_levels[i] = i;
-
- err = regmap_read(fan->regmap, pwm->reg, &regval);
- if (err) {
- dev_err(fan->dev, "Failed to query PWM duty\n");
- return err;
- }
-
- cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval);
- if (state < cur_state)
- return config;
-
- state = cur_state;
- }
-
if (state > MLXREG_FAN_MAX_STATE)
return -EINVAL;
- /* Normalize the state to the valid speed range. */
- state = pwm->cooling_levels[state];
+ /* Save thermal state. */
+ pwm->last_thermal_state = state;
+
+ state = max_t(unsigned long, state, pwm->last_hwmon_state);
err = regmap_write(fan->regmap, pwm->reg,
MLXREG_FAN_PWM_STATE2DUTY(state));
if (err) {
dev_err(fan->dev, "Failed to write PWM duty\n");
return err;
}
- return config;
+ return 0;
}
static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
@@ -564,7 +537,7 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
static int mlxreg_fan_cooling_config(struct device *dev, struct mlxreg_fan *fan)
{
- int i, j;
+ int i;
for (i = 0; i < MLXREG_FAN_MAX_PWM; i++) {
struct mlxreg_fan_pwm *pwm = &fan->pwm[i];
@@ -579,11 +552,8 @@ static int mlxreg_fan_cooling_config(struct device *dev, struct mlxreg_fan *fan)
return PTR_ERR(pwm->cdev);
}
- /* Init cooling levels per PWM state. */
- for (j = 0; j < MLXREG_FAN_SPEED_MIN_LEVEL; j++)
- pwm->cooling_levels[j] = MLXREG_FAN_SPEED_MIN_LEVEL;
- for (j = MLXREG_FAN_SPEED_MIN_LEVEL; j <= MLXREG_FAN_MAX_STATE; j++)
- pwm->cooling_levels[j] = j;
+ /* Set minimal PWM speed. */
+ pwm->last_hwmon_state = MLXREG_FAN_PWM_DUTY2STATE(MLXREG_FAN_MIN_DUTY);
}
return 0;
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 098d12b9ecda..2b91f7e05126 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -308,6 +308,7 @@ static void superio_exit(struct nct6775_sio_data *sio_data)
#define NUM_TEMP 10 /* Max number of temp attribute sets w/ limits*/
#define NUM_TEMP_FIXED 6 /* Max number of fixed temp attribute sets */
+#define NUM_TSI_TEMP 8 /* Max number of TSI temp register pairs */
#define NUM_REG_ALARM 7 /* Max number of alarm registers */
#define NUM_REG_BEEP 5 /* Max number of beep registers */
@@ -498,6 +499,8 @@ static const u16 NCT6775_REG_TEMP_CRIT[32] = {
[11] = 0xa07
};
+static const u16 NCT6775_REG_TSI_TEMP[] = { 0x669 };
+
/* NCT6776 specific data */
/* STEP_UP_TIME and STEP_DOWN_TIME regs are swapped for all chips but NCT6775 */
@@ -581,6 +584,9 @@ static const u16 NCT6776_REG_TEMP_CRIT[32] = {
[12] = 0x70a,
};
+static const u16 NCT6776_REG_TSI_TEMP[] = {
+ 0x409, 0x40b, 0x40d, 0x40f, 0x411, 0x413, 0x415, 0x417 };
+
/* NCT6779 specific data */
static const u16 NCT6779_REG_IN[] = {
@@ -864,6 +870,8 @@ static const char *const nct6796_temp_label[] = {
#define NCT6796_TEMP_MASK 0xbfff0ffe
#define NCT6796_VIRT_TEMP_MASK 0x80000c00
+static const u16 NCT6796_REG_TSI_TEMP[] = { 0x409, 0x40b };
+
static const char *const nct6798_temp_label[] = {
"",
"SYSTIN",
@@ -1005,6 +1013,8 @@ static const u16 NCT6106_REG_TEMP_CRIT[32] = {
[12] = 0x205,
};
+static const u16 NCT6106_REG_TSI_TEMP[] = { 0x59, 0x5b, 0x5d, 0x5f, 0x61, 0x63, 0x65, 0x67 };
+
/* NCT6112D/NCT6114D/NCT6116D specific data */
static const u16 NCT6116_REG_FAN[] = { 0x20, 0x22, 0x24, 0x26, 0x28 };
@@ -1069,6 +1079,8 @@ static const s8 NCT6116_BEEP_BITS[] = {
34, -1 /* intrusion0, intrusion1 */
};
+static const u16 NCT6116_REG_TSI_TEMP[] = { 0x59, 0x5b };
+
static enum pwm_enable reg_to_pwm_enable(int pwm, int mode)
{
if (mode == 0 && pwm == 255)
@@ -1169,6 +1181,12 @@ static inline u8 in_to_reg(u32 val, u8 nr)
return clamp_val(DIV_ROUND_CLOSEST(val * 100, scale_in[nr]), 0, 255);
}
+/* TSI temperatures are in 8.3 format */
+static inline unsigned int tsi_temp_from_reg(unsigned int reg)
+{
+ return (reg >> 5) * 125;
+}
+
/*
* Data structures and manipulation thereof
*/
@@ -1179,7 +1197,7 @@ struct nct6775_data {
enum kinds kind;
const char *name;
- const struct attribute_group *groups[6];
+ const struct attribute_group *groups[7];
u16 reg_temp[5][NUM_TEMP]; /* 0=temp, 1=temp_over, 2=temp_hyst,
* 3=temp_crit, 4=temp_lcrit
@@ -1240,6 +1258,8 @@ struct nct6775_data {
const u16 *REG_ALARM;
const u16 *REG_BEEP;
+ const u16 *REG_TSI_TEMP;
+
unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg);
unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg);
@@ -1267,6 +1287,7 @@ struct nct6775_data {
s8 temp_offset[NUM_TEMP_FIXED];
s16 temp[5][NUM_TEMP]; /* 0=temp, 1=temp_over, 2=temp_hyst,
* 3=temp_crit, 4=temp_lcrit */
+ s16 tsi_temp[NUM_TSI_TEMP];
u64 alarms;
u64 beeps;
@@ -1315,6 +1336,7 @@ struct nct6775_data {
u16 have_temp;
u16 have_temp_fixed;
+ u16 have_tsi_temp;
u16 have_in;
/* Remember extra register values over suspend/resume */
@@ -1464,13 +1486,15 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
switch (data->kind) {
case nct6106:
return reg == 0x20 || reg == 0x22 || reg == 0x24 ||
+ (reg >= 0x59 && reg < 0x69 && (reg & 1)) ||
reg == 0xe0 || reg == 0xe2 || reg == 0xe4 ||
reg == 0x111 || reg == 0x121 || reg == 0x131;
case nct6116:
return reg == 0x20 || reg == 0x22 || reg == 0x24 ||
- reg == 0x26 || reg == 0x28 || reg == 0xe0 || reg == 0xe2 ||
- reg == 0xe4 || reg == 0xe6 || reg == 0xe8 || reg == 0x111 ||
- reg == 0x121 || reg == 0x131 || reg == 0x191 || reg == 0x1a1;
+ reg == 0x26 || reg == 0x28 || reg == 0x59 || reg == 0x5b ||
+ reg == 0xe0 || reg == 0xe2 || reg == 0xe4 || reg == 0xe6 ||
+ reg == 0xe8 || reg == 0x111 || reg == 0x121 || reg == 0x131 ||
+ reg == 0x191 || reg == 0x1a1;
case nct6775:
return (((reg & 0xff00) == 0x100 ||
(reg & 0xff00) == 0x200) &&
@@ -1479,7 +1503,7 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
(reg & 0x00ff) == 0x55)) ||
(reg & 0xfff0) == 0x630 ||
reg == 0x640 || reg == 0x642 ||
- reg == 0x662 ||
+ reg == 0x662 || reg == 0x669 ||
((reg & 0xfff0) == 0x650 && (reg & 0x000f) >= 0x06) ||
reg == 0x73 || reg == 0x75 || reg == 0x77;
case nct6776:
@@ -1490,6 +1514,7 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
(reg & 0x00ff) == 0x55)) ||
(reg & 0xfff0) == 0x630 ||
reg == 0x402 ||
+ (reg >= 0x409 && reg < 0x419 && (reg & 1)) ||
reg == 0x640 || reg == 0x642 ||
((reg & 0xfff0) == 0x650 && (reg & 0x000f) >= 0x06) ||
reg == 0x73 || reg == 0x75 || reg == 0x77;
@@ -1504,6 +1529,7 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
return reg == 0x150 || reg == 0x153 || reg == 0x155 ||
(reg & 0xfff0) == 0x4c0 ||
reg == 0x402 ||
+ (reg >= 0x409 && reg < 0x419 && (reg & 1)) ||
reg == 0x63a || reg == 0x63c || reg == 0x63e ||
reg == 0x640 || reg == 0x642 || reg == 0x64a ||
reg == 0x64c ||
@@ -1987,6 +2013,12 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
data->REG_TEMP_OFFSET[i]);
}
+ for (i = 0; i < NUM_TSI_TEMP; i++) {
+ if (!(data->have_tsi_temp & BIT(i)))
+ continue;
+ data->tsi_temp[i] = data->read_value(data, data->REG_TSI_TEMP[i]);
+ }
+
data->alarms = 0;
for (i = 0; i < NUM_REG_ALARM; i++) {
u8 alarm;
@@ -2670,6 +2702,44 @@ static const struct sensor_template_group nct6775_temp_template_group = {
.base = 1,
};
+static ssize_t show_tsi_temp(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct nct6775_data *data = nct6775_update_device(dev);
+ struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+
+ return sysfs_emit(buf, "%u\n", tsi_temp_from_reg(data->tsi_temp[sattr->index]));
+}
+
+static ssize_t show_tsi_temp_label(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+
+ return sysfs_emit(buf, "TSI%d_TEMP\n", sattr->index);
+}
+
+SENSOR_TEMPLATE(tsi_temp_input, "temp%d_input", 0444, show_tsi_temp, NULL, 0);
+SENSOR_TEMPLATE(tsi_temp_label, "temp%d_label", 0444, show_tsi_temp_label, NULL, 0);
+
+static umode_t nct6775_tsi_temp_is_visible(struct kobject *kobj, struct attribute *attr,
+ int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct nct6775_data *data = dev_get_drvdata(dev);
+ int temp = index / 2;
+
+ return (data->have_tsi_temp & BIT(temp)) ? attr->mode : 0;
+}
+
+/*
+ * The index calculation in nct6775_tsi_temp_is_visible() must be kept in
+ * sync with the size of this array.
+ */
+static struct sensor_device_template *nct6775_tsi_temp_template[] = {
+ &sensor_dev_template_tsi_temp_input,
+ &sensor_dev_template_tsi_temp_label,
+ NULL
+};
+
static ssize_t
show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -3948,10 +4018,11 @@ static int nct6775_probe(struct platform_device *pdev)
const u16 *reg_temp, *reg_temp_over, *reg_temp_hyst, *reg_temp_config;
const u16 *reg_temp_mon, *reg_temp_alternate, *reg_temp_crit;
const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL;
- int num_reg_temp, num_reg_temp_mon;
+ int num_reg_temp, num_reg_temp_mon, num_reg_tsi_temp;
u8 cr2a;
struct attribute_group *group;
struct device *hwmon_dev;
+ struct sensor_template_group tsi_temp_tg;
int num_attr_groups = 0;
if (sio_data->access == access_direct) {
@@ -4043,11 +4114,13 @@ static int nct6775_probe(struct platform_device *pdev)
data->ALARM_BITS = NCT6106_ALARM_BITS;
data->REG_BEEP = NCT6106_REG_BEEP;
data->BEEP_BITS = NCT6106_BEEP_BITS;
+ data->REG_TSI_TEMP = NCT6106_REG_TSI_TEMP;
reg_temp = NCT6106_REG_TEMP;
reg_temp_mon = NCT6106_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6106_REG_TEMP);
num_reg_temp_mon = ARRAY_SIZE(NCT6106_REG_TEMP_MON);
+ num_reg_tsi_temp = ARRAY_SIZE(NCT6106_REG_TSI_TEMP);
reg_temp_over = NCT6106_REG_TEMP_OVER;
reg_temp_hyst = NCT6106_REG_TEMP_HYST;
reg_temp_config = NCT6106_REG_TEMP_CONFIG;
@@ -4116,11 +4189,13 @@ static int nct6775_probe(struct platform_device *pdev)
data->ALARM_BITS = NCT6116_ALARM_BITS;
data->REG_BEEP = NCT6106_REG_BEEP;
data->BEEP_BITS = NCT6116_BEEP_BITS;
+ data->REG_TSI_TEMP = NCT6116_REG_TSI_TEMP;
reg_temp = NCT6106_REG_TEMP;
reg_temp_mon = NCT6106_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6106_REG_TEMP);
num_reg_temp_mon = ARRAY_SIZE(NCT6106_REG_TEMP_MON);
+ num_reg_tsi_temp = ARRAY_SIZE(NCT6116_REG_TSI_TEMP);
reg_temp_over = NCT6106_REG_TEMP_OVER;
reg_temp_hyst = NCT6106_REG_TEMP_HYST;
reg_temp_config = NCT6106_REG_TEMP_CONFIG;
@@ -4191,11 +4266,13 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE;
data->REG_ALARM = NCT6775_REG_ALARM;
data->REG_BEEP = NCT6775_REG_BEEP;
+ data->REG_TSI_TEMP = NCT6775_REG_TSI_TEMP;
reg_temp = NCT6775_REG_TEMP;
reg_temp_mon = NCT6775_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP);
num_reg_temp_mon = ARRAY_SIZE(NCT6775_REG_TEMP_MON);
+ num_reg_tsi_temp = ARRAY_SIZE(NCT6775_REG_TSI_TEMP);
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6775_REG_TEMP_CONFIG;
@@ -4264,11 +4341,13 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE;
data->REG_ALARM = NCT6775_REG_ALARM;
data->REG_BEEP = NCT6776_REG_BEEP;
+ data->REG_TSI_TEMP = NCT6776_REG_TSI_TEMP;
reg_temp = NCT6775_REG_TEMP;
reg_temp_mon = NCT6775_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP);
num_reg_temp_mon = ARRAY_SIZE(NCT6775_REG_TEMP_MON);
+ num_reg_tsi_temp = ARRAY_SIZE(NCT6776_REG_TSI_TEMP);
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6776_REG_TEMP_CONFIG;
@@ -4341,11 +4420,13 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE;
data->REG_ALARM = NCT6779_REG_ALARM;
data->REG_BEEP = NCT6776_REG_BEEP;
+ data->REG_TSI_TEMP = NCT6776_REG_TSI_TEMP;
reg_temp = NCT6779_REG_TEMP;
reg_temp_mon = NCT6779_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
num_reg_temp_mon = ARRAY_SIZE(NCT6779_REG_TEMP_MON);
+ num_reg_tsi_temp = ARRAY_SIZE(NCT6776_REG_TSI_TEMP);
reg_temp_over = NCT6779_REG_TEMP_OVER;
reg_temp_hyst = NCT6779_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
@@ -4460,6 +4541,24 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_BEEP = NCT6776_REG_BEEP;
else
data->REG_BEEP = NCT6792_REG_BEEP;
+ switch (data->kind) {
+ case nct6791:
+ case nct6792:
+ case nct6793:
+ data->REG_TSI_TEMP = NCT6776_REG_TSI_TEMP;
+ num_reg_tsi_temp = ARRAY_SIZE(NCT6776_REG_TSI_TEMP);
+ break;
+ case nct6795:
+ case nct6796:
+ case nct6797:
+ case nct6798:
+ data->REG_TSI_TEMP = NCT6796_REG_TSI_TEMP;
+ num_reg_tsi_temp = ARRAY_SIZE(NCT6796_REG_TSI_TEMP);
+ break;
+ default:
+ num_reg_tsi_temp = 0;
+ break;
+ }
reg_temp = NCT6779_REG_TEMP;
num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
@@ -4659,6 +4758,12 @@ static int nct6775_probe(struct platform_device *pdev)
}
#endif /* USE_ALTERNATE */
+ /* Check which TSIx_TEMP registers are active */
+ for (i = 0; i < num_reg_tsi_temp; i++) {
+ if (data->read_value(data, data->REG_TSI_TEMP[i]))
+ data->have_tsi_temp |= BIT(i);
+ }
+
/* Initialize the chip */
nct6775_init_device(data);
@@ -4766,6 +4871,18 @@ static int nct6775_probe(struct platform_device *pdev)
return PTR_ERR(group);
data->groups[num_attr_groups++] = group;
+
+ if (data->have_tsi_temp) {
+ tsi_temp_tg.templates = nct6775_tsi_temp_template;
+ tsi_temp_tg.is_visible = nct6775_tsi_temp_is_visible;
+ tsi_temp_tg.base = fls(data->have_temp) + 1;
+ group = nct6775_create_attr_group(dev, &tsi_temp_tg, fls(data->have_tsi_temp));
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ data->groups[num_attr_groups++] = group;
+ }
+
data->groups[num_attr_groups++] = &nct6775_group_other;
hwmon_dev = devm_hwmon_device_register_with_groups(dev, data->name,
@@ -4985,9 +5102,14 @@ static struct platform_device *pdev[2];
static const char * const asus_wmi_boards[] = {
"ProArt X570-CREATOR WIFI",
+ "Pro B550M-C",
"Pro WS X570-ACE",
"PRIME B360-PLUS",
"PRIME B460-PLUS",
+ "PRIME B550-PLUS",
+ "PRIME B550M-A",
+ "PRIME B550M-A (WI-FI)",
+ "PRIME X570-P",
"PRIME X570-PRO",
"ROG CROSSHAIR VIII DARK HERO",
"ROG CROSSHAIR VIII FORMULA",
@@ -4997,10 +5119,22 @@ static const char * const asus_wmi_boards[] = {
"ROG STRIX B550-E GAMING",
"ROG STRIX B550-F GAMING",
"ROG STRIX B550-F GAMING (WI-FI)",
+ "ROG STRIX B550-F GAMING WIFI II",
"ROG STRIX B550-I GAMING",
+ "ROG STRIX B550-XE GAMING (WI-FI)",
+ "ROG STRIX X570-E GAMING",
"ROG STRIX X570-F GAMING",
"ROG STRIX X570-I GAMING",
"ROG STRIX Z390-E GAMING",
+ "ROG STRIX Z390-F GAMING",
+ "ROG STRIX Z390-H GAMING",
+ "ROG STRIX Z390-I GAMING",
+ "ROG STRIX Z490-A GAMING",
+ "ROG STRIX Z490-E GAMING",
+ "ROG STRIX Z490-F GAMING",
+ "ROG STRIX Z490-G GAMING",
+ "ROG STRIX Z490-G GAMING (WI-FI)",
+ "ROG STRIX Z490-H GAMING",
"ROG STRIX Z490-I GAMING",
"TUF GAMING B550M-PLUS",
"TUF GAMING B550M-PLUS (WI-FI)",
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 0cb4a0a6cbc1..f00cd59f1d19 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -674,6 +674,9 @@ static ssize_t occ_show_caps_3(struct device *dev,
case 7:
val = caps->user_source;
break;
+ case 8:
+ val = get_unaligned_be16(&caps->soft_min) * 1000000ULL;
+ break;
default:
return -EINVAL;
}
@@ -835,12 +838,13 @@ static int occ_setup_sensor_attrs(struct occ *occ)
case 1:
num_attrs += (sensors->caps.num_sensors * 7);
break;
- case 3:
- show_caps = occ_show_caps_3;
- fallthrough;
case 2:
num_attrs += (sensors->caps.num_sensors * 8);
break;
+ case 3:
+ show_caps = occ_show_caps_3;
+ num_attrs += (sensors->caps.num_sensors * 9);
+ break;
default:
sensors->caps.num_sensors = 0;
}
@@ -1047,6 +1051,15 @@ static int occ_setup_sensor_attrs(struct occ *occ)
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
show_caps, NULL, 7, 0);
attr++;
+
+ if (sensors->caps.version > 2) {
+ snprintf(attr->name, sizeof(attr->name),
+ "power%d_cap_min_soft", s);
+ attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+ show_caps, NULL,
+ 8, 0);
+ attr++;
+ }
}
}
diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
index 5020117be740..2dd4a4d240c0 100644
--- a/drivers/hwmon/occ/common.h
+++ b/drivers/hwmon/occ/common.h
@@ -119,6 +119,8 @@ struct occ {
u8 prev_stat;
u8 prev_ext_stat;
u8 prev_occs_present;
+ u8 prev_ips_status;
+ u8 prev_mode;
};
int occ_setup(struct occ *occ, const char *name);
diff --git a/drivers/hwmon/occ/sysfs.c b/drivers/hwmon/occ/sysfs.c
index 03b16abef67f..b2f788a77746 100644
--- a/drivers/hwmon/occ/sysfs.c
+++ b/drivers/hwmon/occ/sysfs.c
@@ -19,6 +19,8 @@
#define OCC_EXT_STAT_DVFS_POWER BIT(6)
#define OCC_EXT_STAT_MEM_THROTTLE BIT(5)
#define OCC_EXT_STAT_QUICK_DROP BIT(4)
+#define OCC_EXT_STAT_DVFS_VDD BIT(3)
+#define OCC_EXT_STAT_GPU_THROTTLE GENMASK(2, 0)
static ssize_t occ_sysfs_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -63,6 +65,18 @@ static ssize_t occ_sysfs_show(struct device *dev,
else
val = 1;
break;
+ case 8:
+ val = header->ips_status;
+ break;
+ case 9:
+ val = header->mode;
+ break;
+ case 10:
+ val = !!(header->ext_status & OCC_EXT_STAT_DVFS_VDD);
+ break;
+ case 11:
+ val = header->ext_status & OCC_EXT_STAT_GPU_THROTTLE;
+ break;
default:
return -EINVAL;
}
@@ -88,6 +102,10 @@ static SENSOR_DEVICE_ATTR(occ_mem_throttle, 0444, occ_sysfs_show, NULL, 4);
static SENSOR_DEVICE_ATTR(occ_quick_pwr_drop, 0444, occ_sysfs_show, NULL, 5);
static SENSOR_DEVICE_ATTR(occ_state, 0444, occ_sysfs_show, NULL, 6);
static SENSOR_DEVICE_ATTR(occs_present, 0444, occ_sysfs_show, NULL, 7);
+static SENSOR_DEVICE_ATTR(occ_ips_status, 0444, occ_sysfs_show, NULL, 8);
+static SENSOR_DEVICE_ATTR(occ_mode, 0444, occ_sysfs_show, NULL, 9);
+static SENSOR_DEVICE_ATTR(occ_dvfs_vdd, 0444, occ_sysfs_show, NULL, 10);
+static SENSOR_DEVICE_ATTR(occ_gpu_throttle, 0444, occ_sysfs_show, NULL, 11);
static DEVICE_ATTR_RO(occ_error);
static struct attribute *occ_attributes[] = {
@@ -99,6 +117,10 @@ static struct attribute *occ_attributes[] = {
&sensor_dev_attr_occ_quick_pwr_drop.dev_attr.attr,
&sensor_dev_attr_occ_state.dev_attr.attr,
&sensor_dev_attr_occs_present.dev_attr.attr,
+ &sensor_dev_attr_occ_ips_status.dev_attr.attr,
+ &sensor_dev_attr_occ_mode.dev_attr.attr,
+ &sensor_dev_attr_occ_dvfs_vdd.dev_attr.attr,
+ &sensor_dev_attr_occ_gpu_throttle.dev_attr.attr,
&dev_attr_occ_error.attr,
NULL
};
@@ -156,12 +178,34 @@ void occ_sysfs_poll_done(struct occ *occ)
sysfs_notify(&occ->bus_dev->kobj, NULL, name);
}
+ if ((header->ext_status & OCC_EXT_STAT_DVFS_VDD) !=
+ (occ->prev_ext_stat & OCC_EXT_STAT_DVFS_VDD)) {
+ name = sensor_dev_attr_occ_dvfs_vdd.dev_attr.attr.name;
+ sysfs_notify(&occ->bus_dev->kobj, NULL, name);
+ }
+
+ if ((header->ext_status & OCC_EXT_STAT_GPU_THROTTLE) !=
+ (occ->prev_ext_stat & OCC_EXT_STAT_GPU_THROTTLE)) {
+ name = sensor_dev_attr_occ_gpu_throttle.dev_attr.attr.name;
+ sysfs_notify(&occ->bus_dev->kobj, NULL, name);
+ }
+
if ((header->status & OCC_STAT_MASTER) &&
header->occs_present != occ->prev_occs_present) {
name = sensor_dev_attr_occs_present.dev_attr.attr.name;
sysfs_notify(&occ->bus_dev->kobj, NULL, name);
}
+ if (header->ips_status != occ->prev_ips_status) {
+ name = sensor_dev_attr_occ_ips_status.dev_attr.attr.name;
+ sysfs_notify(&occ->bus_dev->kobj, NULL, name);
+ }
+
+ if (header->mode != occ->prev_mode) {
+ name = sensor_dev_attr_occ_mode.dev_attr.attr.name;
+ sysfs_notify(&occ->bus_dev->kobj, NULL, name);
+ }
+
if (occ->error && occ->error != occ->prev_error) {
name = dev_attr_occ_error.attr.name;
sysfs_notify(&occ->bus_dev->kobj, NULL, name);
@@ -174,6 +218,8 @@ done:
occ->prev_stat = header->status;
occ->prev_ext_stat = header->ext_status;
occ->prev_occs_present = header->occs_present;
+ occ->prev_ips_status = header->ips_status;
+ occ->prev_mode = header->mode;
}
int occ_setup_sysfs(struct occ *occ)
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 41f6cbf96d3b..a2ea1d5a8765 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -174,6 +174,13 @@ config SENSORS_LM25066
This driver can also be built as a module. If so, the module will
be called lm25066.
+config SENSORS_LM25066_REGULATOR
+ bool "Regulator support for LM25066 and compatibles"
+ depends on SENSORS_LM25066 && REGULATOR
+ help
+ If you say yes here you get regulator support for National
+ Semiconductor LM25066, LM5064, and LM5066.
+
config SENSORS_LTC2978
tristate "Linear Technologies LTC2978 and compatibles"
help
@@ -189,8 +196,8 @@ config SENSORS_LTC2978_REGULATOR
depends on SENSORS_LTC2978 && REGULATOR
help
If you say yes here you get regulator support for Linear Technology
- LTC3880, LTC3883, LTC3884, LTC3886, LTC3887, LTC3889, LTC7880,
- LTM4644, LTM4675, LTM4676, LTM4677, LTM4678, LTM4680, LTM4686,
+ LTC3880, LTC3883, LTC3884, LTC3886, LTC3887, LTC3889, LTC7880,
+ LTM4644, LTM4675, LTM4676, LTM4677, LTM4678, LTM4680, LTM4686,
and LTM4700.
config SENSORS_LTC3815
@@ -310,6 +317,22 @@ config SENSORS_PIM4328
This driver can also be built as a module. If so, the module will
be called pim4328.
+config SENSORS_PLI1209BC
+ tristate "Vicor PLI1209BC"
+ help
+ If you say yes here you get hardware monitoring support for Vicor
+ PLI1209BC Digital Supervisor.
+
+ This driver can also be built as a module. If so, the module will
+ be called pli1209bc.
+
+config SENSORS_PLI1209BC_REGULATOR
+ bool "Regulator support for PLI1209BC"
+ depends on SENSORS_PLI1209BC && REGULATOR
+ help
+ If you say yes here you get regulator support for Vicor PLI1209BC
+ Digital Supervisor.
+
config SENSORS_PM6764TR
tristate "ST PM6764TR"
help
@@ -394,6 +417,12 @@ config SENSORS_XDPE122
This driver can also be built as a module. If so, the module will
be called xdpe12284.
+config SENSORS_XDPE122_REGULATOR
+ bool "Regulator support for XDPE122 and compatibles"
+ depends on SENSORS_XDPE122 && REGULATOR
+ help
+ Uses the xdpe12284 or compatible as regulator.
+
config SENSORS_ZL6100
tristate "Intersil ZL6100 and compatibles"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index e5935f70c9e0..a4a96ac71de7 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
obj-$(CONFIG_SENSORS_MP2888) += mp2888.o
obj-$(CONFIG_SENSORS_MP2975) += mp2975.o
obj-$(CONFIG_SENSORS_MP5023) += mp5023.o
+obj-$(CONFIG_SENSORS_PLI1209BC) += pli1209bc.o
obj-$(CONFIG_SENSORS_PM6764TR) += pm6764tr.o
obj-$(CONFIG_SENSORS_PXE1610) += pxe1610.o
obj-$(CONFIG_SENSORS_Q54SJ108A2) += q54sj108a2.o
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index d311e0557401..3b07bfb43e93 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -475,6 +475,7 @@ static int adm1275_probe(struct i2c_client *client)
int vindex = -1, voindex = -1, cindex = -1, pindex = -1;
int tindex = -1;
u32 shunt;
+ u32 avg;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_BYTE_DATA
@@ -687,7 +688,7 @@ static int adm1275_probe(struct i2c_client *client)
if ((config & (ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) !=
(ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) {
config |= ADM1278_VOUT_EN | ADM1278_TEMP1_EN;
- ret = i2c_smbus_write_byte_data(client,
+ ret = i2c_smbus_write_word_data(client,
ADM1275_PMON_CONFIG,
config);
if (ret < 0) {
@@ -756,6 +757,43 @@ static int adm1275_probe(struct i2c_client *client)
return -ENODEV;
}
+ if (data->have_power_sampling &&
+ of_property_read_u32(client->dev.of_node,
+ "adi,power-sample-average", &avg) == 0) {
+ if (!avg || avg > ADM1275_SAMPLES_AVG_MAX ||
+ BIT(__fls(avg)) != avg) {
+ dev_err(&client->dev,
+ "Invalid number of power samples");
+ return -EINVAL;
+ }
+ ret = adm1275_write_pmon_config(data, client, true,
+ ilog2(avg));
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Setting power sample averaging failed with error %d",
+ ret);
+ return ret;
+ }
+ }
+
+ if (of_property_read_u32(client->dev.of_node,
+ "adi,volt-curr-sample-average", &avg) == 0) {
+ if (!avg || avg > ADM1275_SAMPLES_AVG_MAX ||
+ BIT(__fls(avg)) != avg) {
+ dev_err(&client->dev,
+ "Invalid number of voltage/current samples");
+ return -EINVAL;
+ }
+ ret = adm1275_write_pmon_config(data, client, false,
+ ilog2(avg));
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Setting voltage and current sample averaging failed with error %d",
+ ret);
+ return ret;
+ }
+ }
+
if (voindex < 0)
voindex = vindex;
if (vindex >= 0) {
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 8402b41520eb..09792cd03d9f 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -435,6 +435,12 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
return ret;
}
+#if IS_ENABLED(CONFIG_SENSORS_LM25066_REGULATOR)
+static const struct regulator_desc lm25066_reg_desc[] = {
+ PMBUS_REGULATOR("vout", 0),
+};
+#endif
+
static const struct i2c_device_id lm25066_id[] = {
{"lm25056", lm25056},
{"lm25066", lm25066},
@@ -545,6 +551,14 @@ static int lm25066_probe(struct i2c_client *client)
info->m[PSC_CURRENT_IN] = info->m[PSC_CURRENT_IN] * shunt / 1000;
info->m[PSC_POWER] = info->m[PSC_POWER] * shunt / 1000;
+#if IS_ENABLED(CONFIG_SENSORS_LM25066_REGULATOR)
+ /* LM25056 doesn't support OPERATION */
+ if (data->id != lm25056) {
+ info->num_regulators = ARRAY_SIZE(lm25066_reg_desc);
+ info->reg_desc = lm25066_reg_desc;
+ }
+#endif
+
return pmbus_do_probe(client, info);
}
diff --git a/drivers/hwmon/pmbus/pli1209bc.c b/drivers/hwmon/pmbus/pli1209bc.c
new file mode 100644
index 000000000000..05b4ee35ba27
--- /dev/null
+++ b/drivers/hwmon/pmbus/pli1209bc.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for Vicor PLI1209BC Digital Supervisor
+ *
+ * Copyright (c) 2022 9elements GmbH
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pmbus.h>
+#include <linux/regulator/driver.h>
+#include "pmbus.h"
+
+/*
+ * The capability command is only supported at page 0. Probing the device while
+ * the page register is set to 1 will falsely enable PEC support. Disable
+ * capability probing accordingly, since the PLI1209BC does not have any
+ * additional capabilities.
+ */
+static struct pmbus_platform_data pli1209bc_plat_data = {
+ .flags = PMBUS_NO_CAPABILITY,
+};
+
+static int pli1209bc_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ int data;
+
+ switch (reg) {
+ /* PMBUS_READ_POUT uses a direct format with R=0 */
+ case PMBUS_READ_POUT:
+ data = pmbus_read_word_data(client, page, phase, reg);
+ if (data < 0)
+ return data;
+ data = sign_extend32(data, 15) * 10;
+ return clamp_val(data, -32768, 32767) & 0xffff;
+ /*
+ * PMBUS_READ_VOUT and PMBUS_READ_TEMPERATURE_1 return invalid data
+ * when the BCM is turned off. Since it is not possible to return
+ * ENODATA error, return zero instead.
+ */
+ case PMBUS_READ_VOUT:
+ case PMBUS_READ_TEMPERATURE_1:
+ data = pmbus_read_word_data(client, page, phase,
+ PMBUS_STATUS_WORD);
+ if (data < 0)
+ return data;
+ if (data & PB_STATUS_POWER_GOOD_N)
+ return 0;
+ return pmbus_read_word_data(client, page, phase, reg);
+ default:
+ return -ENODATA;
+ }
+}
+
+#if IS_ENABLED(CONFIG_SENSORS_PLI1209BC_REGULATOR)
+static const struct regulator_desc pli1209bc_reg_desc = {
+ .name = "vout2",
+ .id = 1,
+ .of_match = of_match_ptr("vout2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .ops = &pmbus_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+#endif
+
+static struct pmbus_driver_info pli1209bc_info = {
+ .pages = 2,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_IN] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_POWER] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 1,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 1,
+ .m[PSC_CURRENT_IN] = 1,
+ .b[PSC_CURRENT_IN] = 0,
+ .R[PSC_CURRENT_IN] = 3,
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 2,
+ .m[PSC_POWER] = 1,
+ .b[PSC_POWER] = 0,
+ .R[PSC_POWER] = 1,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 0,
+ /*
+ * Page 0 sums up all attributes except voltage readings.
+ * The pli1209 digital supervisor only contains a single BCM, making
+ * page 0 redundant.
+ */
+ .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT
+ | PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT
+ | PMBUS_HAVE_PIN | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP
+ | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT,
+ .read_word_data = pli1209bc_read_word_data,
+#if IS_ENABLED(CONFIG_SENSORS_PLI1209BC_REGULATOR)
+ .num_regulators = 1,
+ .reg_desc = &pli1209bc_reg_desc,
+#endif
+};
+
+static int pli1209bc_probe(struct i2c_client *client)
+{
+ client->dev.platform_data = &pli1209bc_plat_data;
+ return pmbus_do_probe(client, &pli1209bc_info);
+}
+
+static const struct i2c_device_id pli1209bc_id[] = {
+ {"pli1209bc", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, pli1209bc_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id pli1209bc_of_match[] = {
+ { .compatible = "vicor,pli1209bc" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pli1209bc_of_match);
+#endif
+
+static struct i2c_driver pli1209bc_driver = {
+ .driver = {
+ .name = "pli1209bc",
+ .of_match_table = of_match_ptr(pli1209bc_of_match),
+ },
+ .probe_new = pli1209bc_probe,
+ .id_table = pli1209bc_id,
+};
+
+module_i2c_driver(pli1209bc_driver);
+
+MODULE_AUTHOR("Marcello Sylvester Bauer <sylv@sylv.io>");
+MODULE_DESCRIPTION("PMBus driver for Vicor PLI1209BC");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index e0aa8aa46d8c..e74b6ef070f3 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -319,6 +319,7 @@ enum pmbus_fan_mode { percent = 0, rpm };
/*
* STATUS_VOUT, STATUS_INPUT
*/
+#define PB_VOLTAGE_VIN_OFF BIT(3)
#define PB_VOLTAGE_UV_FAULT BIT(4)
#define PB_VOLTAGE_UV_WARNING BIT(5)
#define PB_VOLTAGE_OV_WARNING BIT(6)
@@ -464,6 +465,7 @@ extern const struct regulator_ops pmbus_regulator_ops;
#define PMBUS_REGULATOR(_name, _id) \
[_id] = { \
.name = (_name # _id), \
+ .supply_name = "vin", \
.id = (_id), \
.of_match = of_match_ptr(_name # _id), \
.regulators_node = of_match_ptr("regulators"), \
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index ac2fbee1ba9c..b2618b1d529e 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -1373,7 +1373,7 @@ static const struct pmbus_limit_attr vin_limit_attrs[] = {
.reg = PMBUS_VIN_UV_FAULT_LIMIT,
.attr = "lcrit",
.alarm = "lcrit_alarm",
- .sbit = PB_VOLTAGE_UV_FAULT,
+ .sbit = PB_VOLTAGE_UV_FAULT | PB_VOLTAGE_VIN_OFF,
}, {
.reg = PMBUS_VIN_OV_WARN_LIMIT,
.attr = "max",
@@ -2391,10 +2391,14 @@ static int pmbus_regulator_is_enabled(struct regulator_dev *rdev)
{
struct device *dev = rdev_get_dev(rdev);
struct i2c_client *client = to_i2c_client(dev->parent);
+ struct pmbus_data *data = i2c_get_clientdata(client);
u8 page = rdev_get_id(rdev);
int ret;
+ mutex_lock(&data->update_lock);
ret = pmbus_read_byte_data(client, page, PMBUS_OPERATION);
+ mutex_unlock(&data->update_lock);
+
if (ret < 0)
return ret;
@@ -2405,11 +2409,17 @@ static int _pmbus_regulator_on_off(struct regulator_dev *rdev, bool enable)
{
struct device *dev = rdev_get_dev(rdev);
struct i2c_client *client = to_i2c_client(dev->parent);
+ struct pmbus_data *data = i2c_get_clientdata(client);
u8 page = rdev_get_id(rdev);
+ int ret;
- return pmbus_update_byte_data(client, page, PMBUS_OPERATION,
- PB_OPERATION_CONTROL_ON,
- enable ? PB_OPERATION_CONTROL_ON : 0);
+ mutex_lock(&data->update_lock);
+ ret = pmbus_update_byte_data(client, page, PMBUS_OPERATION,
+ PB_OPERATION_CONTROL_ON,
+ enable ? PB_OPERATION_CONTROL_ON : 0);
+ mutex_unlock(&data->update_lock);
+
+ return ret;
}
static int pmbus_regulator_enable(struct regulator_dev *rdev)
@@ -2422,10 +2432,124 @@ static int pmbus_regulator_disable(struct regulator_dev *rdev)
return _pmbus_regulator_on_off(rdev, 0);
}
+/* A PMBus status flag and the corresponding REGULATOR_ERROR_* flag */
+struct pmbus_regulator_status_assoc {
+ int pflag, rflag;
+};
+
+/* PMBus->regulator bit mappings for a PMBus status register */
+struct pmbus_regulator_status_category {
+ int func;
+ int reg;
+ const struct pmbus_regulator_status_assoc *bits; /* zero-terminated */
+};
+
+static const struct pmbus_regulator_status_category pmbus_regulator_flag_map[] = {
+ {
+ .func = PMBUS_HAVE_STATUS_VOUT,
+ .reg = PMBUS_STATUS_VOUT,
+ .bits = (const struct pmbus_regulator_status_assoc[]) {
+ { PB_VOLTAGE_UV_WARNING, REGULATOR_ERROR_UNDER_VOLTAGE_WARN },
+ { PB_VOLTAGE_UV_FAULT, REGULATOR_ERROR_UNDER_VOLTAGE },
+ { PB_VOLTAGE_OV_WARNING, REGULATOR_ERROR_OVER_VOLTAGE_WARN },
+ { PB_VOLTAGE_OV_FAULT, REGULATOR_ERROR_REGULATION_OUT },
+ { },
+ },
+ }, {
+ .func = PMBUS_HAVE_STATUS_IOUT,
+ .reg = PMBUS_STATUS_IOUT,
+ .bits = (const struct pmbus_regulator_status_assoc[]) {
+ { PB_IOUT_OC_WARNING, REGULATOR_ERROR_OVER_CURRENT_WARN },
+ { PB_IOUT_OC_FAULT, REGULATOR_ERROR_OVER_CURRENT },
+ { PB_IOUT_OC_LV_FAULT, REGULATOR_ERROR_OVER_CURRENT },
+ { },
+ },
+ }, {
+ .func = PMBUS_HAVE_STATUS_TEMP,
+ .reg = PMBUS_STATUS_TEMPERATURE,
+ .bits = (const struct pmbus_regulator_status_assoc[]) {
+ { PB_TEMP_OT_WARNING, REGULATOR_ERROR_OVER_TEMP_WARN },
+ { PB_TEMP_OT_FAULT, REGULATOR_ERROR_OVER_TEMP },
+ { },
+ },
+ },
+};
+
+static int pmbus_regulator_get_error_flags(struct regulator_dev *rdev, unsigned int *flags)
+{
+ int i, status;
+ const struct pmbus_regulator_status_category *cat;
+ const struct pmbus_regulator_status_assoc *bit;
+ struct device *dev = rdev_get_dev(rdev);
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ u8 page = rdev_get_id(rdev);
+ int func = data->info->func[page];
+
+ *flags = 0;
+
+ mutex_lock(&data->update_lock);
+
+ for (i = 0; i < ARRAY_SIZE(pmbus_regulator_flag_map); i++) {
+ cat = &pmbus_regulator_flag_map[i];
+ if (!(func & cat->func))
+ continue;
+
+ status = pmbus_read_byte_data(client, page, cat->reg);
+ if (status < 0) {
+ mutex_unlock(&data->update_lock);
+ return status;
+ }
+
+ for (bit = cat->bits; bit->pflag; bit++) {
+ if (status & bit->pflag)
+ *flags |= bit->rflag;
+ }
+ }
+
+ /*
+ * Map what bits of STATUS_{WORD,BYTE} we can to REGULATOR_ERROR_*
+ * bits. Some of the other bits are tempting (especially for cases
+ * where we don't have the relevant PMBUS_HAVE_STATUS_*
+ * functionality), but there's an unfortunate ambiguity in that
+ * they're defined as indicating a fault *or* a warning, so we can't
+ * easily determine whether to report REGULATOR_ERROR_<foo> or
+ * REGULATOR_ERROR_<foo>_WARN.
+ */
+ status = pmbus_get_status(client, page, PMBUS_STATUS_WORD);
+ mutex_unlock(&data->update_lock);
+ if (status < 0)
+ return status;
+
+ if (pmbus_regulator_is_enabled(rdev) && (status & PB_STATUS_OFF))
+ *flags |= REGULATOR_ERROR_FAIL;
+
+ /*
+ * Unlike most other status bits, PB_STATUS_{IOUT_OC,VOUT_OV} are
+ * defined strictly as fault indicators (not warnings).
+ */
+ if (status & PB_STATUS_IOUT_OC)
+ *flags |= REGULATOR_ERROR_OVER_CURRENT;
+ if (status & PB_STATUS_VOUT_OV)
+ *flags |= REGULATOR_ERROR_REGULATION_OUT;
+
+ /*
+ * If we haven't discovered any thermal faults or warnings via
+ * PMBUS_STATUS_TEMPERATURE, map PB_STATUS_TEMPERATURE to a warning as
+ * a (conservative) best-effort interpretation.
+ */
+ if (!(*flags & (REGULATOR_ERROR_OVER_TEMP | REGULATOR_ERROR_OVER_TEMP_WARN)) &&
+ (status & PB_STATUS_TEMPERATURE))
+ *flags |= REGULATOR_ERROR_OVER_TEMP_WARN;
+
+ return 0;
+}
+
const struct regulator_ops pmbus_regulator_ops = {
.enable = pmbus_regulator_enable,
.disable = pmbus_regulator_disable,
.is_enabled = pmbus_regulator_is_enabled,
+ .get_error_flags = pmbus_regulator_get_error_flags,
};
EXPORT_SYMBOL_NS_GPL(pmbus_regulator_ops, PMBUS);
diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c
index b07da06a40c9..18fffc5d749b 100644
--- a/drivers/hwmon/pmbus/xdpe12284.c
+++ b/drivers/hwmon/pmbus/xdpe12284.c
@@ -10,6 +10,8 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/regulator/driver.h>
+
#include "pmbus.h"
#define XDPE122_PROT_VR12_5MV 0x01 /* VR12.0 mode, 5-mV DAC */
@@ -76,7 +78,22 @@ static int xdpe122_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
u8 vout_params;
- int i, ret;
+ int i, ret, vout_mode;
+
+ vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+ if (vout_mode >= 0 && vout_mode != 0xff) {
+ switch (vout_mode >> 5) {
+ case 0:
+ info->format[PSC_VOLTAGE_OUT] = linear;
+ return 0;
+ case 1:
+ info->format[PSC_VOLTAGE_OUT] = vid;
+ info->read_word_data = xdpe122_read_word_data;
+ break;
+ default:
+ return -ENODEV;
+ }
+ }
for (i = 0; i < XDPE122_PAGE_NUM; i++) {
/* Read the register with VOUT scaling value.*/
@@ -107,10 +124,14 @@ static int xdpe122_identify(struct i2c_client *client,
return 0;
}
+static const struct regulator_desc xdpe122_reg_desc[] = {
+ PMBUS_REGULATOR("vout", 0),
+ PMBUS_REGULATOR("vout", 1),
+};
+
static struct pmbus_driver_info xdpe122_info = {
.pages = XDPE122_PAGE_NUM,
.format[PSC_VOLTAGE_IN] = linear,
- .format[PSC_VOLTAGE_OUT] = vid,
.format[PSC_TEMPERATURE] = linear,
.format[PSC_CURRENT_IN] = linear,
.format[PSC_CURRENT_OUT] = linear,
@@ -124,7 +145,10 @@ static struct pmbus_driver_info xdpe122_info = {
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
.identify = xdpe122_identify,
- .read_word_data = xdpe122_read_word_data,
+#if IS_ENABLED(CONFIG_SENSORS_XDPE122_REGULATOR)
+ .num_regulators = 2,
+ .reg_desc = xdpe122_reg_desc,
+#endif
};
static int xdpe122_probe(struct i2c_client *client)
@@ -140,6 +164,7 @@ static int xdpe122_probe(struct i2c_client *client)
}
static const struct i2c_device_id xdpe122_id[] = {
+ {"xdpe11280", 0},
{"xdpe12254", 0},
{"xdpe12284", 0},
{}
@@ -148,6 +173,7 @@ static const struct i2c_device_id xdpe122_id[] = {
MODULE_DEVICE_TABLE(i2c, xdpe122_id);
static const struct of_device_id __maybe_unused xdpe122_of_match[] = {
+ {.compatible = "infineon,xdpe11280"},
{.compatible = "infineon,xdpe12254"},
{.compatible = "infineon,xdpe12284"},
{}
diff --git a/drivers/hwmon/powr1220.c b/drivers/hwmon/powr1220.c
index 9e086338dcba..f77dc6db31ac 100644
--- a/drivers/hwmon/powr1220.c
+++ b/drivers/hwmon/powr1220.c
@@ -22,6 +22,8 @@
#define ADC_STEP_MV 2
#define ADC_MAX_LOW_MEASUREMENT_MV 2000
+enum powr1xxx_chips { powr1014, powr1220 };
+
enum powr1220_regs {
VMON_STATUS0,
VMON_STATUS1,
@@ -74,6 +76,7 @@ enum powr1220_adc_values {
struct powr1220_data {
struct i2c_client *client;
struct mutex update_lock;
+ u8 max_channels;
bool adc_valid[MAX_POWR1220_ADC_VALUES];
/* the next value is in jiffies */
unsigned long adc_last_updated[MAX_POWR1220_ADC_VALUES];
@@ -111,7 +114,7 @@ static int powr1220_read_adc(struct device *dev, int ch_num)
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->adc_last_updated[ch_num] + HZ) ||
- !data->adc_valid[ch_num]) {
+ !data->adc_valid[ch_num]) {
/*
* figure out if we need to use the attenuator for
* high inputs or inputs that we don't yet have a measurement
@@ -119,12 +122,12 @@ static int powr1220_read_adc(struct device *dev, int ch_num)
* max reading.
*/
if (data->adc_maxes[ch_num] > ADC_MAX_LOW_MEASUREMENT_MV ||
- data->adc_maxes[ch_num] == 0)
+ data->adc_maxes[ch_num] == 0)
adc_range = 1 << 4;
/* set the attenuator and mux */
result = i2c_smbus_write_byte_data(data->client, ADC_MUX,
- adc_range | ch_num);
+ adc_range | ch_num);
if (result)
goto exit;
@@ -167,135 +170,116 @@ exit:
return result;
}
-/* Shows the voltage associated with the specified ADC channel */
-static ssize_t powr1220_voltage_show(struct device *dev,
- struct device_attribute *dev_attr,
- char *buf)
+static umode_t
+powr1220_is_visible(const void *data, enum hwmon_sensor_types type, u32
+ attr, int channel)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
- int adc_val = powr1220_read_adc(dev, attr->index);
-
- if (adc_val < 0)
- return adc_val;
+ struct powr1220_data *chip_data = (struct powr1220_data *)data;
+
+ if (channel >= chip_data->max_channels)
+ return 0;
+
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_highest:
+ case hwmon_in_label:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
- return sprintf(buf, "%d\n", adc_val);
+ return 0;
}
-/* Shows the maximum setting associated with the specified ADC channel */
-static ssize_t powr1220_max_show(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static int
+powr1220_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
- struct powr1220_data *data = dev_get_drvdata(dev);
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_label:
+ *str = input_names[channel];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
- return sprintf(buf, "%d\n", data->adc_maxes[attr->index]);
+ return -EOPNOTSUPP;
}
-/* Shows the label associated with the specified ADC channel */
-static ssize_t powr1220_label_show(struct device *dev,
- struct device_attribute *dev_attr,
- char *buf)
+static int
+powr1220_read(struct device *dev, enum hwmon_sensor_types type, u32
+ attr, int channel, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct powr1220_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ ret = powr1220_read_adc(dev, channel);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ break;
+ case hwmon_in_highest:
+ *val = data->adc_maxes[channel];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+}
- return sprintf(buf, "%s\n", input_names[attr->index]);
+ return 0;
}
-static SENSOR_DEVICE_ATTR_RO(in0_input, powr1220_voltage, VMON1);
-static SENSOR_DEVICE_ATTR_RO(in1_input, powr1220_voltage, VMON2);
-static SENSOR_DEVICE_ATTR_RO(in2_input, powr1220_voltage, VMON3);
-static SENSOR_DEVICE_ATTR_RO(in3_input, powr1220_voltage, VMON4);
-static SENSOR_DEVICE_ATTR_RO(in4_input, powr1220_voltage, VMON5);
-static SENSOR_DEVICE_ATTR_RO(in5_input, powr1220_voltage, VMON6);
-static SENSOR_DEVICE_ATTR_RO(in6_input, powr1220_voltage, VMON7);
-static SENSOR_DEVICE_ATTR_RO(in7_input, powr1220_voltage, VMON8);
-static SENSOR_DEVICE_ATTR_RO(in8_input, powr1220_voltage, VMON9);
-static SENSOR_DEVICE_ATTR_RO(in9_input, powr1220_voltage, VMON10);
-static SENSOR_DEVICE_ATTR_RO(in10_input, powr1220_voltage, VMON11);
-static SENSOR_DEVICE_ATTR_RO(in11_input, powr1220_voltage, VMON12);
-static SENSOR_DEVICE_ATTR_RO(in12_input, powr1220_voltage, VCCA);
-static SENSOR_DEVICE_ATTR_RO(in13_input, powr1220_voltage, VCCINP);
-
-static SENSOR_DEVICE_ATTR_RO(in0_highest, powr1220_max, VMON1);
-static SENSOR_DEVICE_ATTR_RO(in1_highest, powr1220_max, VMON2);
-static SENSOR_DEVICE_ATTR_RO(in2_highest, powr1220_max, VMON3);
-static SENSOR_DEVICE_ATTR_RO(in3_highest, powr1220_max, VMON4);
-static SENSOR_DEVICE_ATTR_RO(in4_highest, powr1220_max, VMON5);
-static SENSOR_DEVICE_ATTR_RO(in5_highest, powr1220_max, VMON6);
-static SENSOR_DEVICE_ATTR_RO(in6_highest, powr1220_max, VMON7);
-static SENSOR_DEVICE_ATTR_RO(in7_highest, powr1220_max, VMON8);
-static SENSOR_DEVICE_ATTR_RO(in8_highest, powr1220_max, VMON9);
-static SENSOR_DEVICE_ATTR_RO(in9_highest, powr1220_max, VMON10);
-static SENSOR_DEVICE_ATTR_RO(in10_highest, powr1220_max, VMON11);
-static SENSOR_DEVICE_ATTR_RO(in11_highest, powr1220_max, VMON12);
-static SENSOR_DEVICE_ATTR_RO(in12_highest, powr1220_max, VCCA);
-static SENSOR_DEVICE_ATTR_RO(in13_highest, powr1220_max, VCCINP);
-
-static SENSOR_DEVICE_ATTR_RO(in0_label, powr1220_label, VMON1);
-static SENSOR_DEVICE_ATTR_RO(in1_label, powr1220_label, VMON2);
-static SENSOR_DEVICE_ATTR_RO(in2_label, powr1220_label, VMON3);
-static SENSOR_DEVICE_ATTR_RO(in3_label, powr1220_label, VMON4);
-static SENSOR_DEVICE_ATTR_RO(in4_label, powr1220_label, VMON5);
-static SENSOR_DEVICE_ATTR_RO(in5_label, powr1220_label, VMON6);
-static SENSOR_DEVICE_ATTR_RO(in6_label, powr1220_label, VMON7);
-static SENSOR_DEVICE_ATTR_RO(in7_label, powr1220_label, VMON8);
-static SENSOR_DEVICE_ATTR_RO(in8_label, powr1220_label, VMON9);
-static SENSOR_DEVICE_ATTR_RO(in9_label, powr1220_label, VMON10);
-static SENSOR_DEVICE_ATTR_RO(in10_label, powr1220_label, VMON11);
-static SENSOR_DEVICE_ATTR_RO(in11_label, powr1220_label, VMON12);
-static SENSOR_DEVICE_ATTR_RO(in12_label, powr1220_label, VCCA);
-static SENSOR_DEVICE_ATTR_RO(in13_label, powr1220_label, VCCINP);
-
-static struct attribute *powr1220_attrs[] = {
- &sensor_dev_attr_in0_input.dev_attr.attr,
- &sensor_dev_attr_in1_input.dev_attr.attr,
- &sensor_dev_attr_in2_input.dev_attr.attr,
- &sensor_dev_attr_in3_input.dev_attr.attr,
- &sensor_dev_attr_in4_input.dev_attr.attr,
- &sensor_dev_attr_in5_input.dev_attr.attr,
- &sensor_dev_attr_in6_input.dev_attr.attr,
- &sensor_dev_attr_in7_input.dev_attr.attr,
- &sensor_dev_attr_in8_input.dev_attr.attr,
- &sensor_dev_attr_in9_input.dev_attr.attr,
- &sensor_dev_attr_in10_input.dev_attr.attr,
- &sensor_dev_attr_in11_input.dev_attr.attr,
- &sensor_dev_attr_in12_input.dev_attr.attr,
- &sensor_dev_attr_in13_input.dev_attr.attr,
-
- &sensor_dev_attr_in0_highest.dev_attr.attr,
- &sensor_dev_attr_in1_highest.dev_attr.attr,
- &sensor_dev_attr_in2_highest.dev_attr.attr,
- &sensor_dev_attr_in3_highest.dev_attr.attr,
- &sensor_dev_attr_in4_highest.dev_attr.attr,
- &sensor_dev_attr_in5_highest.dev_attr.attr,
- &sensor_dev_attr_in6_highest.dev_attr.attr,
- &sensor_dev_attr_in7_highest.dev_attr.attr,
- &sensor_dev_attr_in8_highest.dev_attr.attr,
- &sensor_dev_attr_in9_highest.dev_attr.attr,
- &sensor_dev_attr_in10_highest.dev_attr.attr,
- &sensor_dev_attr_in11_highest.dev_attr.attr,
- &sensor_dev_attr_in12_highest.dev_attr.attr,
- &sensor_dev_attr_in13_highest.dev_attr.attr,
-
- &sensor_dev_attr_in0_label.dev_attr.attr,
- &sensor_dev_attr_in1_label.dev_attr.attr,
- &sensor_dev_attr_in2_label.dev_attr.attr,
- &sensor_dev_attr_in3_label.dev_attr.attr,
- &sensor_dev_attr_in4_label.dev_attr.attr,
- &sensor_dev_attr_in5_label.dev_attr.attr,
- &sensor_dev_attr_in6_label.dev_attr.attr,
- &sensor_dev_attr_in7_label.dev_attr.attr,
- &sensor_dev_attr_in8_label.dev_attr.attr,
- &sensor_dev_attr_in9_label.dev_attr.attr,
- &sensor_dev_attr_in10_label.dev_attr.attr,
- &sensor_dev_attr_in11_label.dev_attr.attr,
- &sensor_dev_attr_in12_label.dev_attr.attr,
- &sensor_dev_attr_in13_label.dev_attr.attr,
+static const struct hwmon_channel_info *powr1220_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_HIGHEST | HWMON_I_LABEL),
NULL
};
-ATTRIBUTE_GROUPS(powr1220);
+static const struct hwmon_ops powr1220_hwmon_ops = {
+ .read = powr1220_read,
+ .read_string = powr1220_read_string,
+ .is_visible = powr1220_is_visible,
+};
+
+static const struct hwmon_chip_info powr1220_chip_info = {
+ .ops = &powr1220_hwmon_ops,
+ .info = powr1220_info,
+};
+
+static const struct i2c_device_id powr1220_ids[];
static int powr1220_probe(struct i2c_client *client)
{
@@ -309,17 +293,30 @@ static int powr1220_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
+ switch (i2c_match_id(powr1220_ids, client)->driver_data) {
+ case powr1014:
+ data->max_channels = 10;
+ break;
+ default:
+ data->max_channels = 12;
+ break;
+ }
+
mutex_init(&data->update_lock);
data->client = client;
- hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
- client->name, data, powr1220_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(&client->dev,
+ client->name,
+ data,
+ &powr1220_chip_info,
+ NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id powr1220_ids[] = {
- { "powr1220", 0, },
+ { "powr1014", powr1014, },
+ { "powr1220", powr1220, },
{ }
};
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 8f1b569c69e7..25fbbd4c9a2b 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
@@ -51,6 +52,9 @@ static const u16 SCH5627_REG_FAN[SCH5627_NO_FANS] = {
static const u16 SCH5627_REG_FAN_MIN[SCH5627_NO_FANS] = {
0x62, 0x64, 0x66, 0x68 };
+static const u16 SCH5627_REG_PWM_MAP[SCH5627_NO_FANS] = {
+ 0xA0, 0xA1, 0xA2, 0xA3 };
+
static const u16 SCH5627_REG_IN_MSB[SCH5627_NO_IN] = {
0x22, 0x23, 0x24, 0x25, 0x189 };
static const u16 SCH5627_REG_IN_LSN[SCH5627_NO_IN] = {
@@ -222,6 +226,9 @@ static int reg_to_rpm(u16 reg)
static umode_t sch5627_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr,
int channel)
{
+ if (type == hwmon_pwm && attr == hwmon_pwm_auto_channels_temp)
+ return 0644;
+
return 0444;
}
@@ -277,6 +284,23 @@ static int sch5627_read(struct device *dev, enum hwmon_sensor_types type, u32 at
break;
}
break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_auto_channels_temp:
+ mutex_lock(&data->update_lock);
+ ret = sch56xx_read_virtual_reg(data->addr, SCH5627_REG_PWM_MAP[channel]);
+ mutex_unlock(&data->update_lock);
+
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ return 0;
+ default:
+ break;
+ }
+ break;
case hwmon_in:
ret = sch5627_update_in(data);
if (ret < 0)
@@ -317,10 +341,42 @@ static int sch5627_read_string(struct device *dev, enum hwmon_sensor_types type,
return -EOPNOTSUPP;
}
+static int sch5627_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
+ long val)
+{
+ struct sch5627_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_auto_channels_temp:
+ /* registers are 8 bit wide */
+ if (val > U8_MAX || val < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ ret = sch56xx_write_virtual_reg(data->addr, SCH5627_REG_PWM_MAP[channel],
+ val);
+ mutex_unlock(&data->update_lock);
+
+ return ret;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static const struct hwmon_ops sch5627_ops = {
.is_visible = sch5627_is_visible,
.read = sch5627_read,
.read_string = sch5627_read_string,
+ .write = sch5627_write,
};
static const struct hwmon_channel_info *sch5627_info[] = {
@@ -341,6 +397,12 @@ static const struct hwmon_channel_info *sch5627_info[] = {
HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_FAULT
),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP
+ ),
HWMON_CHANNEL_INFO(in,
HWMON_I_INPUT | HWMON_I_LABEL,
HWMON_I_INPUT | HWMON_I_LABEL,
@@ -456,11 +518,20 @@ static int sch5627_probe(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id sch5627_device_id[] = {
+ {
+ .name = "sch5627",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, sch5627_device_id);
+
static struct platform_driver sch5627_driver = {
.driver = {
.name = DRVNAME,
},
.probe = sch5627_probe,
+ .id_table = sch5627_device_id,
};
module_platform_driver(sch5627_driver);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 39ff1c9b1df5..269757bc3a9e 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
@@ -501,12 +502,21 @@ error:
return err;
}
+static const struct platform_device_id sch5636_device_id[] = {
+ {
+ .name = "sch5636",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, sch5636_device_id);
+
static struct platform_driver sch5636_driver = {
.driver = {
.name = DRVNAME,
},
.probe = sch5636_probe,
.remove = sch5636_remove,
+ .id_table = sch5636_device_id,
};
module_platform_driver(sch5636_driver);
diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
index 40cdadad35e5..3ece53adabd6 100644
--- a/drivers/hwmon/sch56xx-common.c
+++ b/drivers/hwmon/sch56xx-common.c
@@ -7,8 +7,10 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/dmi.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/acpi.h>
@@ -19,7 +21,10 @@
#include <linux/slab.h>
#include "sch56xx-common.h"
-/* Insmod parameters */
+static bool ignore_dmi;
+module_param(ignore_dmi, bool, 0);
+MODULE_PARM_DESC(ignore_dmi, "Omit DMI check for supported devices (default=0)");
+
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
@@ -134,7 +139,7 @@ static int sch56xx_send_cmd(u16 addr, u8 cmd, u16 reg, u8 v)
/* EM Interface Polling "Algorithm" */
for (i = 0; i < max_busy_polls + max_lazy_polls; i++) {
if (i >= max_busy_polls)
- msleep(1);
+ usleep_range(1000, 2000);
/* Read Interrupt source Register */
val = inb(addr + 8);
/* Write Clear the interrupt source bits */
@@ -422,7 +427,7 @@ void sch56xx_watchdog_register(struct device *parent, u16 addr, u32 revision,
data->wddev.max_timeout = 255 * 60;
watchdog_set_nowayout(&data->wddev, nowayout);
if (output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)
- set_bit(WDOG_ACTIVE, &data->wddev.status);
+ set_bit(WDOG_HW_RUNNING, &data->wddev.status);
/* Since the watchdog uses a downcounter there is no register to read
the BIOS set timeout from (if any was set at all) ->
@@ -518,11 +523,42 @@ static int __init sch56xx_device_add(int address, const char *name)
return PTR_ERR_OR_ZERO(sch56xx_pdev);
}
+/* For autoloading only */
+static const struct dmi_system_id sch56xx_dmi_table[] __initconst = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ },
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, sch56xx_dmi_table);
+
static int __init sch56xx_init(void)
{
- int address;
const char *name = NULL;
+ int address;
+ if (!ignore_dmi) {
+ if (!dmi_check_system(sch56xx_dmi_table))
+ return -ENODEV;
+
+ /*
+ * Some machines like the Esprimo P720 and Esprimo C700 have
+ * onboard devices named " Antiope"/" Theseus" instead of
+ * "Antiope"/"Theseus", so we need to check for both.
+ */
+ if (!dmi_find_device(DMI_DEV_TYPE_OTHER, "Antiope", NULL) &&
+ !dmi_find_device(DMI_DEV_TYPE_OTHER, " Antiope", NULL) &&
+ !dmi_find_device(DMI_DEV_TYPE_OTHER, "Theseus", NULL) &&
+ !dmi_find_device(DMI_DEV_TYPE_OTHER, " Theseus", NULL))
+ return -ENODEV;
+ }
+
+ /*
+ * Some devices like the Esprimo C700 have both onboard devices,
+ * so we still have to check manually
+ */
address = sch56xx_find(0x4e, &name);
if (address < 0)
address = sch56xx_find(0x2e, &name);
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 919877970ae3..5187c6dd5a4f 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -141,7 +141,6 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
struct scpi_ops *scpi_ops;
struct device *hwdev, *dev = &pdev->dev;
struct scpi_sensors *scpi_sensors;
- const struct of_device_id *of_id;
int idx, ret;
scpi_ops = get_scpi_ops();
@@ -171,12 +170,11 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
scpi_sensors->scpi_ops = scpi_ops;
- of_id = of_match_device(scpi_of_match, &pdev->dev);
- if (!of_id) {
+ scale = of_device_get_match_data(&pdev->dev);
+ if (!scale) {
dev_err(&pdev->dev, "Unable to initialize scpi-hwmon data\n");
return -ENODEV;
}
- scale = of_id->data;
for (i = 0, idx = 0; i < nr_sensors; i++) {
struct sensor_data *sensor = &scpi_sensors->data[idx];
diff --git a/drivers/hwmon/tc654.c b/drivers/hwmon/tc654.c
index a52ca72af120..54cd33d09688 100644
--- a/drivers/hwmon/tc654.c
+++ b/drivers/hwmon/tc654.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/thermal.h>
#include <linux/util_macros.h>
enum tc654_regs {
@@ -379,28 +380,20 @@ static ssize_t pwm_show(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", pwm);
}
-static ssize_t pwm_store(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static int _set_pwm(struct tc654_data *data, unsigned long val)
{
- struct tc654_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
- unsigned long val;
int ret;
- if (kstrtoul(buf, 10, &val))
- return -EINVAL;
- if (val > 255)
- return -EINVAL;
-
mutex_lock(&data->update_lock);
- if (val == 0)
+ if (val == 0) {
data->config |= TC654_REG_CONFIG_SDM;
- else
+ data->duty_cycle = 0;
+ } else {
data->config &= ~TC654_REG_CONFIG_SDM;
-
- data->duty_cycle = find_closest(val, tc654_pwm_map,
- ARRAY_SIZE(tc654_pwm_map));
+ data->duty_cycle = val - 1;
+ }
ret = i2c_smbus_write_byte_data(client, TC654_REG_CONFIG, data->config);
if (ret < 0)
@@ -411,6 +404,24 @@ static ssize_t pwm_store(struct device *dev, struct device_attribute *da,
out:
mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static ssize_t pwm_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct tc654_data *data = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+ if (val > 255)
+ return -EINVAL;
+ if (val > 0)
+ val = find_closest(val, tc654_pwm_map, ARRAY_SIZE(tc654_pwm_map)) + 1;
+
+ ret = _set_pwm(data, val);
return ret < 0 ? ret : count;
}
@@ -443,6 +454,58 @@ static struct attribute *tc654_attrs[] = {
ATTRIBUTE_GROUPS(tc654);
/*
+ * thermal cooling device functions
+ *
+ * Account for the "ShutDown Mode (SDM)" state by offsetting
+ * the 16 PWM duty cycle states by 1.
+ *
+ * State 0 = 0% PWM | Shutdown - Fan(s) are off
+ * State 1 = 30% PWM | duty_cycle = 0
+ * State 2 = ~35% PWM | duty_cycle = 1
+ * [...]
+ * State 15 = ~95% PWM | duty_cycle = 14
+ * State 16 = 100% PWM | duty_cycle = 15
+ */
+#define TC654_MAX_COOLING_STATE 16
+
+static int tc654_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state)
+{
+ *state = TC654_MAX_COOLING_STATE;
+ return 0;
+}
+
+static int tc654_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state)
+{
+ struct tc654_data *data = tc654_update_client(cdev->devdata);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (data->config & TC654_REG_CONFIG_SDM)
+ *state = 0; /* FAN is off */
+ else
+ *state = data->duty_cycle + 1; /* offset PWM States by 1 */
+
+ return 0;
+}
+
+static int tc654_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
+{
+ struct tc654_data *data = tc654_update_client(cdev->devdata);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return _set_pwm(data, clamp_val(state, 0, TC654_MAX_COOLING_STATE));
+}
+
+static const struct thermal_cooling_device_ops tc654_fan_cool_ops = {
+ .get_max_state = tc654_get_max_state,
+ .get_cur_state = tc654_get_cur_state,
+ .set_cur_state = tc654_set_cur_state,
+};
+
+/*
* device probe and removal
*/
@@ -472,7 +535,18 @@ static int tc654_probe(struct i2c_client *client)
hwmon_dev =
devm_hwmon_device_register_with_groups(dev, client->name, data,
tc654_groups);
- return PTR_ERR_OR_ZERO(hwmon_dev);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ if (IS_ENABLED(CONFIG_THERMAL)) {
+ struct thermal_cooling_device *cdev;
+
+ cdev = devm_thermal_of_cooling_device_register(dev, dev->of_node, client->name,
+ hwmon_dev, &tc654_fan_cool_ops);
+ return PTR_ERR_OR_ZERO(cdev);
+ }
+
+ return 0;
}
static const struct i2c_device_id tc654_id[] = {
diff --git a/drivers/hwmon/tmp464.c b/drivers/hwmon/tmp464.c
new file mode 100644
index 000000000000..7814f39bd1a3
--- /dev/null
+++ b/drivers/hwmon/tmp464.c
@@ -0,0 +1,712 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/* Driver for the Texas Instruments TMP464 SMBus temperature sensor IC.
+ * Supported models: TMP464, TMP468
+
+ * Copyright (C) 2022 Agathe Porte <agathe.porte@nokia.com>
+ * Preliminary support by:
+ * Lionel Pouliquen <lionel.lp.pouliquen@nokia.com>
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* Addresses to scan */
+static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, I2C_CLIENT_END };
+
+#define TMP464_NUM_CHANNELS 5 /* chan 0 is internal, 1-4 are remote */
+#define TMP468_NUM_CHANNELS 9 /* chan 0 is internal, 1-8 are remote */
+
+#define MAX_CHANNELS 9
+
+#define TMP464_TEMP_REG(channel) (channel)
+#define TMP464_TEMP_OFFSET_REG(channel) (0x40 + ((channel) - 1) * 8)
+#define TMP464_N_FACTOR_REG(channel) (0x41 + ((channel) - 1) * 8)
+
+static const u8 TMP464_THERM_LIMIT[MAX_CHANNELS] = {
+ 0x39, 0x42, 0x4A, 0x52, 0x5A, 0x62, 0x6a, 0x72, 0x7a };
+static const u8 TMP464_THERM2_LIMIT[MAX_CHANNELS] = {
+ 0x3A, 0x43, 0x4B, 0x53, 0x5B, 0x63, 0x6b, 0x73, 0x7b };
+
+#define TMP464_THERM_STATUS_REG 0x21
+#define TMP464_THERM2_STATUS_REG 0x22
+#define TMP464_REMOTE_OPEN_REG 0x23
+#define TMP464_CONFIG_REG 0x30
+#define TMP464_TEMP_HYST_REG 0x38
+#define TMP464_LOCK_REG 0xc4
+
+/* Identification */
+#define TMP464_MANUFACTURER_ID_REG 0xFE
+#define TMP464_DEVICE_ID_REG 0xFF
+
+/* Flags */
+#define TMP464_CONFIG_SHUTDOWN BIT(5)
+#define TMP464_CONFIG_RANGE 0x04
+#define TMP464_CONFIG_REG_REN(x) (BIT(7 + (x)))
+#define TMP464_CONFIG_REG_REN_MASK GENMASK(15, 7)
+#define TMP464_CONFIG_CONVERSION_RATE_B0 2
+#define TMP464_CONFIG_CONVERSION_RATE_B2 4
+#define TMP464_CONFIG_CONVERSION_RATE_MASK GENMASK(TMP464_CONFIG_CONVERSION_RATE_B2, \
+ TMP464_CONFIG_CONVERSION_RATE_B0)
+
+#define TMP464_UNLOCK_VAL 0xeb19
+#define TMP464_LOCK_VAL 0x5ca6
+#define TMP464_LOCKED 0x8000
+
+/* Manufacturer / Device ID's */
+#define TMP464_MANUFACTURER_ID 0x5449
+#define TMP464_DEVICE_ID 0x1468
+#define TMP468_DEVICE_ID 0x0468
+
+static const struct i2c_device_id tmp464_id[] = {
+ { "tmp464", TMP464_NUM_CHANNELS },
+ { "tmp468", TMP468_NUM_CHANNELS },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tmp464_id);
+
+static const struct of_device_id __maybe_unused tmp464_of_match[] = {
+ {
+ .compatible = "ti,tmp464",
+ .data = (void *)TMP464_NUM_CHANNELS
+ },
+ {
+ .compatible = "ti,tmp468",
+ .data = (void *)TMP468_NUM_CHANNELS
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tmp464_of_match);
+
+struct tmp464_channel {
+ const char *label;
+ bool enabled;
+};
+
+struct tmp464_data {
+ struct regmap *regmap;
+ struct mutex update_lock;
+ int channels;
+ s16 config_orig;
+ u16 open_reg;
+ unsigned long last_updated;
+ bool valid;
+ int update_interval;
+ struct tmp464_channel channel[MAX_CHANNELS];
+};
+
+static int temp_from_reg(s16 reg)
+{
+ return DIV_ROUND_CLOSEST((reg >> 3) * 625, 10);
+}
+
+static s16 temp_to_limit_reg(long temp)
+{
+ return DIV_ROUND_CLOSEST(temp, 500) << 6;
+}
+
+static s16 temp_to_offset_reg(long temp)
+{
+ return DIV_ROUND_CLOSEST(temp * 10, 625) << 3;
+}
+
+static int tmp464_enable_channels(struct tmp464_data *data)
+{
+ struct regmap *regmap = data->regmap;
+ u16 enable = 0;
+ int i;
+
+ for (i = 0; i < data->channels; i++)
+ if (data->channel[i].enabled)
+ enable |= TMP464_CONFIG_REG_REN(i);
+
+ return regmap_update_bits(regmap, TMP464_CONFIG_REG, TMP464_CONFIG_REG_REN_MASK, enable);
+}
+
+static int tmp464_chip_read(struct device *dev, u32 attr, int channel, long *val)
+{
+ struct tmp464_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ *val = data->update_interval;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tmp464_temp_read(struct device *dev, u32 attr, int channel, long *val)
+{
+ struct tmp464_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ unsigned int regval, regval2;
+ int err = 0;
+
+ mutex_lock(&data->update_lock);
+
+ switch (attr) {
+ case hwmon_temp_max_alarm:
+ err = regmap_read(regmap, TMP464_THERM_STATUS_REG, &regval);
+ if (err < 0)
+ break;
+ *val = !!(regval & BIT(channel + 7));
+ break;
+ case hwmon_temp_crit_alarm:
+ err = regmap_read(regmap, TMP464_THERM2_STATUS_REG, &regval);
+ if (err < 0)
+ break;
+ *val = !!(regval & BIT(channel + 7));
+ break;
+ case hwmon_temp_fault:
+ /*
+ * The chip clears TMP464_REMOTE_OPEN_REG after it is read
+ * and only updates it after the next measurement cycle is
+ * complete. That means we have to cache the value internally
+ * for one measurement cycle and report the cached value.
+ */
+ if (!data->valid || time_after(jiffies, data->last_updated +
+ msecs_to_jiffies(data->update_interval))) {
+ err = regmap_read(regmap, TMP464_REMOTE_OPEN_REG, &regval);
+ if (err < 0)
+ break;
+ data->open_reg = regval;
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+ *val = !!(data->open_reg & BIT(channel + 7));
+ break;
+ case hwmon_temp_max_hyst:
+ err = regmap_read(regmap, TMP464_THERM_LIMIT[channel], &regval);
+ if (err < 0)
+ break;
+ err = regmap_read(regmap, TMP464_TEMP_HYST_REG, &regval2);
+ if (err < 0)
+ break;
+ regval -= regval2;
+ *val = temp_from_reg(regval);
+ break;
+ case hwmon_temp_max:
+ err = regmap_read(regmap, TMP464_THERM_LIMIT[channel], &regval);
+ if (err < 0)
+ break;
+ *val = temp_from_reg(regval);
+ break;
+ case hwmon_temp_crit_hyst:
+ err = regmap_read(regmap, TMP464_THERM2_LIMIT[channel], &regval);
+ if (err < 0)
+ break;
+ err = regmap_read(regmap, TMP464_TEMP_HYST_REG, &regval2);
+ if (err < 0)
+ break;
+ regval -= regval2;
+ *val = temp_from_reg(regval);
+ break;
+ case hwmon_temp_crit:
+ err = regmap_read(regmap, TMP464_THERM2_LIMIT[channel], &regval);
+ if (err < 0)
+ break;
+ *val = temp_from_reg(regval);
+ break;
+ case hwmon_temp_offset:
+ err = regmap_read(regmap, TMP464_TEMP_OFFSET_REG(channel), &regval);
+ if (err < 0)
+ break;
+ *val = temp_from_reg(regval);
+ break;
+ case hwmon_temp_input:
+ if (!data->channel[channel].enabled) {
+ err = -ENODATA;
+ break;
+ }
+ err = regmap_read(regmap, TMP464_TEMP_REG(channel), &regval);
+ if (err < 0)
+ break;
+ *val = temp_from_reg(regval);
+ break;
+ case hwmon_temp_enable:
+ *val = data->channel[channel].enabled;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return err;
+}
+
+static int tmp464_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_chip:
+ return tmp464_chip_read(dev, attr, channel, val);
+ case hwmon_temp:
+ return tmp464_temp_read(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tmp464_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct tmp464_data *data = dev_get_drvdata(dev);
+
+ *str = data->channel[channel].label;
+
+ return 0;
+}
+
+static int tmp464_set_convrate(struct tmp464_data *data, long interval)
+{
+ int rate;
+
+ /*
+ * For valid rates, interval in milli-seconds can be calculated as
+ * interval = 125 << (7 - rate);
+ * or
+ * interval = (1 << (7 - rate)) * 125;
+ * The rate is therefore
+ * rate = 7 - __fls(interval / 125);
+ * and the rounded rate is
+ * rate = 7 - __fls(interval * 4 / (125 * 3));
+ * Use clamp_val() to avoid overflows, and to ensure valid input
+ * for __fls.
+ */
+ interval = clamp_val(interval, 125, 16000);
+ rate = 7 - __fls(interval * 4 / (125 * 3));
+ data->update_interval = 125 << (7 - rate);
+
+ return regmap_update_bits(data->regmap, TMP464_CONFIG_REG,
+ TMP464_CONFIG_CONVERSION_RATE_MASK,
+ rate << TMP464_CONFIG_CONVERSION_RATE_B0);
+}
+
+static int tmp464_chip_write(struct tmp464_data *data, u32 attr, int channel, long val)
+{
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return tmp464_set_convrate(data, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tmp464_temp_write(struct tmp464_data *data, u32 attr, int channel, long val)
+{
+ struct regmap *regmap = data->regmap;
+ unsigned int regval;
+ int err = 0;
+
+ switch (attr) {
+ case hwmon_temp_max_hyst:
+ err = regmap_read(regmap, TMP464_THERM_LIMIT[0], &regval);
+ if (err < 0)
+ break;
+ val = clamp_val(val, -256000, 256000); /* prevent overflow/underflow */
+ val = clamp_val(temp_from_reg(regval) - val, 0, 255000);
+ err = regmap_write(regmap, TMP464_TEMP_HYST_REG,
+ DIV_ROUND_CLOSEST(val, 1000) << 7);
+ break;
+ case hwmon_temp_max:
+ val = temp_to_limit_reg(clamp_val(val, -255000, 255500));
+ err = regmap_write(regmap, TMP464_THERM_LIMIT[channel], val);
+ break;
+ case hwmon_temp_crit:
+ val = temp_to_limit_reg(clamp_val(val, -255000, 255500));
+ err = regmap_write(regmap, TMP464_THERM2_LIMIT[channel], val);
+ break;
+ case hwmon_temp_offset:
+ val = temp_to_offset_reg(clamp_val(val, -128000, 127937));
+ err = regmap_write(regmap, TMP464_TEMP_OFFSET_REG(channel), val);
+ break;
+ case hwmon_temp_enable:
+ data->channel[channel].enabled = !!val;
+ err = tmp464_enable_channels(data);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int tmp464_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct tmp464_data *data = dev_get_drvdata(dev);
+ int err;
+
+ mutex_lock(&data->update_lock);
+
+ switch (type) {
+ case hwmon_chip:
+ err = tmp464_chip_write(data, attr, channel, val);
+ break;
+ case hwmon_temp:
+ err = tmp464_temp_write(data, attr, channel, val);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return err;
+}
+
+static umode_t tmp464_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct tmp464_data *data = _data;
+
+ if (channel >= data->channels)
+ return 0;
+
+ if (type == hwmon_chip) {
+ if (attr == hwmon_chip_update_interval)
+ return 0644;
+ return 0;
+ }
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_crit_hyst:
+ return 0444;
+ case hwmon_temp_enable:
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ return 0644;
+ case hwmon_temp_max_hyst:
+ if (!channel)
+ return 0644;
+ return 0444;
+ case hwmon_temp_label:
+ if (data->channel[channel].label)
+ return 0444;
+ return 0;
+ case hwmon_temp_fault:
+ if (channel)
+ return 0444;
+ return 0;
+ case hwmon_temp_offset:
+ if (channel)
+ return 0644;
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static void tmp464_restore_lock(void *regmap)
+{
+ regmap_write(regmap, TMP464_LOCK_REG, TMP464_LOCK_VAL);
+}
+
+static void tmp464_restore_config(void *_data)
+{
+ struct tmp464_data *data = _data;
+
+ regmap_write(data->regmap, TMP464_CONFIG_REG, data->config_orig);
+}
+
+static int tmp464_init_client(struct device *dev, struct tmp464_data *data)
+{
+ struct regmap *regmap = data->regmap;
+ unsigned int regval;
+ int err;
+
+ err = regmap_read(regmap, TMP464_LOCK_REG, &regval);
+ if (err)
+ return err;
+ if (regval == TMP464_LOCKED) {
+ /* Explicitly unlock chip if it is locked */
+ err = regmap_write(regmap, TMP464_LOCK_REG, TMP464_UNLOCK_VAL);
+ if (err)
+ return err;
+ /* and lock it again when unloading the driver */
+ err = devm_add_action_or_reset(dev, tmp464_restore_lock, regmap);
+ if (err)
+ return err;
+ }
+
+ err = regmap_read(regmap, TMP464_CONFIG_REG, &regval);
+ if (err)
+ return err;
+ data->config_orig = regval;
+ err = devm_add_action_or_reset(dev, tmp464_restore_config, data);
+ if (err)
+ return err;
+
+ /* Default to 500 ms update interval */
+ err = regmap_update_bits(regmap, TMP464_CONFIG_REG,
+ TMP464_CONFIG_CONVERSION_RATE_MASK | TMP464_CONFIG_SHUTDOWN,
+ BIT(TMP464_CONFIG_CONVERSION_RATE_B0) |
+ BIT(TMP464_CONFIG_CONVERSION_RATE_B2));
+ if (err)
+ return err;
+
+ data->update_interval = 500;
+
+ return tmp464_enable_channels(data);
+}
+
+static int tmp464_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ char *name, *chip;
+ int reg;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ reg = i2c_smbus_read_word_swapped(client, TMP464_MANUFACTURER_ID_REG);
+ if (reg < 0)
+ return reg;
+ if (reg != TMP464_MANUFACTURER_ID)
+ return -ENODEV;
+
+ /* Check for "always return zero" bits */
+ reg = i2c_smbus_read_word_swapped(client, TMP464_THERM_STATUS_REG);
+ if (reg < 0)
+ return reg;
+ if (reg & 0x1f)
+ return -ENODEV;
+ reg = i2c_smbus_read_word_swapped(client, TMP464_THERM2_STATUS_REG);
+ if (reg < 0)
+ return reg;
+ if (reg & 0x1f)
+ return -ENODEV;
+
+ reg = i2c_smbus_read_word_swapped(client, TMP464_DEVICE_ID_REG);
+ if (reg < 0)
+ return reg;
+ switch (reg) {
+ case TMP464_DEVICE_ID:
+ name = "tmp464";
+ chip = "TMP464";
+ break;
+ case TMP468_DEVICE_ID:
+ name = "tmp468";
+ chip = "TMP468";
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ strscpy(info->type, name, I2C_NAME_SIZE);
+ dev_info(&adapter->dev, "Detected TI %s chip at 0x%02x\n", chip, client->addr);
+
+ return 0;
+}
+
+static int tmp464_probe_child_from_dt(struct device *dev,
+ struct device_node *child,
+ struct tmp464_data *data)
+
+{
+ struct regmap *regmap = data->regmap;
+ u32 channel;
+ s32 nfactor;
+ int err;
+
+ err = of_property_read_u32(child, "reg", &channel);
+ if (err) {
+ dev_err(dev, "missing reg property of %pOFn\n", child);
+ return err;
+ }
+
+ if (channel >= data->channels) {
+ dev_err(dev, "invalid reg %d of %pOFn\n", channel, child);
+ return -EINVAL;
+ }
+
+ of_property_read_string(child, "label", &data->channel[channel].label);
+
+ data->channel[channel].enabled = of_device_is_available(child);
+
+ err = of_property_read_s32(child, "ti,n-factor", &nfactor);
+ if (err && err != -EINVAL)
+ return err;
+ if (!err) {
+ if (channel == 0) {
+ dev_err(dev, "n-factor can't be set for internal channel\n");
+ return -EINVAL;
+ }
+ if (nfactor > 127 || nfactor < -128) {
+ dev_err(dev, "n-factor for channel %d invalid (%d)\n",
+ channel, nfactor);
+ return -EINVAL;
+ }
+ err = regmap_write(regmap, TMP464_N_FACTOR_REG(channel),
+ (nfactor << 8) & 0xff00);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int tmp464_probe_from_dt(struct device *dev, struct tmp464_data *data)
+{
+ const struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int err;
+
+ for_each_child_of_node(np, child) {
+ if (strcmp(child->name, "channel"))
+ continue;
+
+ err = tmp464_probe_child_from_dt(dev, child, data);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops tmp464_ops = {
+ .is_visible = tmp464_is_visible,
+ .read = tmp464_read,
+ .read_string = tmp464_read_string,
+ .write = tmp464_write,
+};
+
+static const struct hwmon_channel_info *tmp464_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM |
+ HWMON_T_LABEL | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_OFFSET | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT | HWMON_T_LABEL | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_OFFSET | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT | HWMON_T_LABEL | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_OFFSET | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT | HWMON_T_LABEL | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_OFFSET | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT | HWMON_T_LABEL | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_OFFSET | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT | HWMON_T_LABEL | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_OFFSET | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT | HWMON_T_LABEL | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_OFFSET | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT | HWMON_T_LABEL | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_OFFSET | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT | HWMON_T_LABEL | HWMON_T_ENABLE),
+ NULL
+};
+
+static const struct hwmon_chip_info tmp464_chip_info = {
+ .ops = &tmp464_ops,
+ .info = tmp464_info,
+};
+
+/* regmap */
+
+static bool tmp464_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return (reg < TMP464_TEMP_REG(TMP468_NUM_CHANNELS) ||
+ reg == TMP464_THERM_STATUS_REG ||
+ reg == TMP464_THERM2_STATUS_REG ||
+ reg == TMP464_REMOTE_OPEN_REG);
+}
+
+static const struct regmap_config tmp464_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = TMP464_DEVICE_ID_REG,
+ .volatile_reg = tmp464_is_volatile_reg,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .cache_type = REGCACHE_RBTREE,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+static int tmp464_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct tmp464_data *data;
+ int i, err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_err(&client->dev, "i2c functionality check failed\n");
+ return -ENODEV;
+ }
+ data = devm_kzalloc(dev, sizeof(struct tmp464_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ mutex_init(&data->update_lock);
+
+ if (dev->of_node)
+ data->channels = (int)(unsigned long)of_device_get_match_data(&client->dev);
+ else
+ data->channels = i2c_match_id(tmp464_id, client)->driver_data;
+
+ data->regmap = devm_regmap_init_i2c(client, &tmp464_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ for (i = 0; i < data->channels; i++)
+ data->channel[i].enabled = true;
+
+ err = tmp464_init_client(dev, data);
+ if (err)
+ return err;
+
+ if (dev->of_node) {
+ err = tmp464_probe_from_dt(dev, data);
+ if (err)
+ return err;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data, &tmp464_chip_info, NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct i2c_driver tmp464_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "tmp464",
+ .of_match_table = of_match_ptr(tmp464_of_match),
+ },
+ .probe_new = tmp464_probe,
+ .id_table = tmp464_id,
+ .detect = tmp464_detect,
+ .address_list = normal_i2c,
+};
+
+module_i2c_driver(tmp464_driver);
+
+MODULE_AUTHOR("Agathe Porte <agathe.porte@nokia.com>");
+MODULE_DESCRIPTION("Texas Instruments TMP464 temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/vexpress-hwmon.c b/drivers/hwmon/vexpress-hwmon.c
index 44d798be3d59..2ac5fb96bba4 100644
--- a/drivers/hwmon/vexpress-hwmon.c
+++ b/drivers/hwmon/vexpress-hwmon.c
@@ -207,7 +207,6 @@ MODULE_DEVICE_TABLE(of, vexpress_hwmon_of_match);
static int vexpress_hwmon_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct vexpress_hwmon_data *data;
const struct vexpress_hwmon_type *type;
@@ -216,10 +215,9 @@ static int vexpress_hwmon_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, data);
- match = of_match_device(vexpress_hwmon_of_match, &pdev->dev);
- if (!match)
+ type = of_device_get_match_data(&pdev->dev);
+ if (!type)
return -ENODEV;
- type = match->data;
data->reg = devm_regmap_init_vexpress_config(&pdev->dev);
if (IS_ERR(data->reg))
diff --git a/drivers/hwtracing/coresight/coresight-config.h b/drivers/hwtracing/coresight/coresight-config.h
index 9bd44b940add..2e1670523461 100644
--- a/drivers/hwtracing/coresight/coresight-config.h
+++ b/drivers/hwtracing/coresight/coresight-config.h
@@ -231,7 +231,7 @@ struct cscfg_config_csdev {
bool enabled;
struct list_head node;
int nr_feat;
- struct cscfg_feature_csdev *feats_csdev[0];
+ struct cscfg_feature_csdev *feats_csdev[];
};
/**
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 432ade0842f6..70a07b4e9967 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -658,13 +658,11 @@ static void msc_buffer_clear_hw_header(struct msc *msc)
list_for_each_entry(win, &msc->win_list, entry) {
unsigned int blk;
- size_t hw_sz = sizeof(struct msc_block_desc) -
- offsetof(struct msc_block_desc, hw_tag);
for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
struct msc_block_desc *bdesc = sg_virt(sg);
- memset(&bdesc->hw_tag, 0, hw_sz);
+ memset_startat(bdesc, 0, hw_tag);
}
}
}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 0b66e25c0e2d..b7640cfe0020 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -64,6 +64,7 @@ static struct cpuidle_driver intel_idle_driver = {
/* intel_idle.max_cstate=0 disables driver */
static int max_cstate = CPUIDLE_STATE_MAX - 1;
static unsigned int disabled_states_mask;
+static unsigned int preferred_states_mask;
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
@@ -121,9 +122,6 @@ static unsigned int mwait_substates __initdata;
* If the local APIC timer is not known to be reliable in the target idle state,
* enable one-shot tick broadcasting for the target CPU before executing MWAIT.
*
- * Optionally call leave_mm() for the target CPU upfront to avoid wakeups due to
- * flushing user TLBs.
- *
* Must be called under local_irq_disable().
*/
static __cpuidle int intel_idle(struct cpuidle_device *dev,
@@ -761,6 +759,46 @@ static struct cpuidle_state icx_cstates[] __initdata = {
.enter = NULL }
};
+/*
+ * On Sapphire Rapids Xeon C1 has to be disabled if C1E is enabled, and vice
+ * versa. On SPR C1E is enabled only if "C1E promotion" bit is set in
+ * MSR_IA32_POWER_CTL. But in this case there effectively no C1, because C1
+ * requests are promoted to C1E. If the "C1E promotion" bit is cleared, then
+ * both C1 and C1E requests end up with C1, so there is effectively no C1E.
+ *
+ * By default we enable C1 and disable C1E by marking it with
+ * 'CPUIDLE_FLAG_UNUSABLE'.
+ */
+static struct cpuidle_state spr_cstates[] __initdata = {
+ {
+ .name = "C1",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C1E",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE |
+ CPUIDLE_FLAG_UNUSABLE,
+ .exit_latency = 2,
+ .target_residency = 4,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 290,
+ .target_residency = 800,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .enter = NULL }
+};
+
static struct cpuidle_state atom_cstates[] __initdata = {
{
.name = "C1E",
@@ -1104,6 +1142,12 @@ static const struct idle_cpu idle_cpu_icx __initconst = {
.use_acpi = true,
};
+static const struct idle_cpu idle_cpu_spr __initconst = {
+ .state_table = spr_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_avn __initconst = {
.state_table = avn_cstates,
.disable_promotion_to_c1e = true,
@@ -1166,6 +1210,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &idle_cpu_icx),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx),
+ X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
@@ -1353,6 +1398,8 @@ static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
+static void c1e_promotion_enable(void);
+
/**
* ivt_idle_state_table_update - Tune the idle states table for Ivy Town.
*
@@ -1523,6 +1570,41 @@ static void __init skx_idle_state_table_update(void)
}
}
+/**
+ * spr_idle_state_table_update - Adjust Sapphire Rapids idle states table.
+ */
+static void __init spr_idle_state_table_update(void)
+{
+ unsigned long long msr;
+
+ /* Check if user prefers C1E over C1. */
+ if (preferred_states_mask & BIT(2)) {
+ if (preferred_states_mask & BIT(1))
+ /* Both can't be enabled, stick to the defaults. */
+ return;
+
+ spr_cstates[0].flags |= CPUIDLE_FLAG_UNUSABLE;
+ spr_cstates[1].flags &= ~CPUIDLE_FLAG_UNUSABLE;
+
+ /* Enable C1E using the "C1E promotion" bit. */
+ c1e_promotion_enable();
+ disable_promotion_to_c1e = false;
+ }
+
+ /*
+ * By default, the C6 state assumes the worst-case scenario of package
+ * C6. However, if PC6 is disabled, we update the numbers to match
+ * core C6.
+ */
+ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
+
+ /* Limit value 2 and above allow for PC6. */
+ if ((msr & 0x7) < 2) {
+ spr_cstates[2].exit_latency = 190;
+ spr_cstates[2].target_residency = 600;
+ }
+}
+
static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
{
unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1;
@@ -1557,6 +1639,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
case INTEL_FAM6_SKYLAKE_X:
skx_idle_state_table_update();
break;
+ case INTEL_FAM6_SAPPHIRERAPIDS_X:
+ spr_idle_state_table_update();
+ break;
}
for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
@@ -1629,6 +1714,15 @@ static void auto_demotion_disable(void)
wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
}
+static void c1e_promotion_enable(void)
+{
+ unsigned long long msr_bits;
+
+ rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ msr_bits |= 0x2;
+ wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
+}
+
static void c1e_promotion_disable(void)
{
unsigned long long msr_bits;
@@ -1798,3 +1892,14 @@ module_param(max_cstate, int, 0444);
*/
module_param_named(states_off, disabled_states_mask, uint, 0444);
MODULE_PARM_DESC(states_off, "Mask of disabled idle states");
+/*
+ * Some platforms come with mutually exclusive C-states, so that if one is
+ * enabled, the other C-states must not be used. Example: C1 and C1E on
+ * Sapphire Rapids platform. This parameter allows for selecting the
+ * preferred C-states among the groups of mutually exclusive C-states - the
+ * selected C-states will be registered, the other C-states from the mutually
+ * exclusive group won't be registered. If the platform has no mutually
+ * exclusive C-states, this parameter has no effect.
+ */
+module_param_named(preferred_cstates, preferred_states_mask, uint, 0444);
+MODULE_PARM_DESC(preferred_cstates, "Mask of preferred idle states");
diff --git a/drivers/iio/accel/bma400_spi.c b/drivers/iio/accel/bma400_spi.c
index 9f622e37477b..9040a717b247 100644
--- a/drivers/iio/accel/bma400_spi.c
+++ b/drivers/iio/accel/bma400_spi.c
@@ -87,11 +87,9 @@ static int bma400_spi_probe(struct spi_device *spi)
return bma400_probe(&spi->dev, regmap, id->name);
}
-static int bma400_spi_remove(struct spi_device *spi)
+static void bma400_spi_remove(struct spi_device *spi)
{
bma400_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id bma400_spi_ids[] = {
diff --git a/drivers/iio/accel/bmc150-accel-spi.c b/drivers/iio/accel/bmc150-accel-spi.c
index 11559567cb39..80007cc2d044 100644
--- a/drivers/iio/accel/bmc150-accel-spi.c
+++ b/drivers/iio/accel/bmc150-accel-spi.c
@@ -35,11 +35,9 @@ static int bmc150_accel_probe(struct spi_device *spi)
true);
}
-static int bmc150_accel_remove(struct spi_device *spi)
+static void bmc150_accel_remove(struct spi_device *spi)
{
bmc150_accel_core_remove(&spi->dev);
-
- return 0;
}
static const struct acpi_device_id bmc150_accel_acpi_match[] = {
diff --git a/drivers/iio/accel/bmi088-accel-spi.c b/drivers/iio/accel/bmi088-accel-spi.c
index 758ad2f12896..06d99d9949f3 100644
--- a/drivers/iio/accel/bmi088-accel-spi.c
+++ b/drivers/iio/accel/bmi088-accel-spi.c
@@ -56,11 +56,9 @@ static int bmi088_accel_probe(struct spi_device *spi)
true);
}
-static int bmi088_accel_remove(struct spi_device *spi)
+static void bmi088_accel_remove(struct spi_device *spi)
{
bmi088_accel_core_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id bmi088_accel_id[] = {
diff --git a/drivers/iio/accel/kxsd9-spi.c b/drivers/iio/accel/kxsd9-spi.c
index 441e6b764281..57c451cfb9e5 100644
--- a/drivers/iio/accel/kxsd9-spi.c
+++ b/drivers/iio/accel/kxsd9-spi.c
@@ -32,11 +32,9 @@ static int kxsd9_spi_probe(struct spi_device *spi)
spi_get_device_id(spi)->name);
}
-static int kxsd9_spi_remove(struct spi_device *spi)
+static void kxsd9_spi_remove(struct spi_device *spi)
{
kxsd9_common_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id kxsd9_spi_id[] = {
diff --git a/drivers/iio/accel/mma7455_spi.c b/drivers/iio/accel/mma7455_spi.c
index ecf690692dcc..b746031551a3 100644
--- a/drivers/iio/accel/mma7455_spi.c
+++ b/drivers/iio/accel/mma7455_spi.c
@@ -22,11 +22,9 @@ static int mma7455_spi_probe(struct spi_device *spi)
return mma7455_core_probe(&spi->dev, regmap, id->name);
}
-static int mma7455_spi_remove(struct spi_device *spi)
+static void mma7455_spi_remove(struct spi_device *spi)
{
mma7455_core_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id mma7455_spi_ids[] = {
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index 43ecacbdc95a..83c81072511e 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -1524,7 +1524,7 @@ error_ret:
return ret;
}
-static int sca3000_remove(struct spi_device *spi)
+static void sca3000_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct sca3000_state *st = iio_priv(indio_dev);
@@ -1535,8 +1535,6 @@ static int sca3000_remove(struct spi_device *spi)
sca3000_stop_all_interrupts(st);
if (spi->irq)
free_irq(spi->irq, indio_dev);
-
- return 0;
}
static const struct spi_device_id sca3000_id[] = {
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 1d345d66742d..c17d9b5fbaf6 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -479,7 +479,7 @@ error_disable_reg:
return ret;
}
-static int ad7266_remove(struct spi_device *spi)
+static void ad7266_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7266_state *st = iio_priv(indio_dev);
@@ -488,8 +488,6 @@ static int ad7266_remove(struct spi_device *spi)
iio_triggered_buffer_cleanup(indio_dev);
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-
- return 0;
}
static const struct spi_device_id ad7266_id[] = {
diff --git a/drivers/iio/adc/ltc2496.c b/drivers/iio/adc/ltc2496.c
index dd956a7c216e..5a55f79f2574 100644
--- a/drivers/iio/adc/ltc2496.c
+++ b/drivers/iio/adc/ltc2496.c
@@ -78,13 +78,11 @@ static int ltc2496_probe(struct spi_device *spi)
return ltc2497core_probe(dev, indio_dev);
}
-static int ltc2496_remove(struct spi_device *spi)
+static void ltc2496_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
ltc2497core_remove(indio_dev);
-
- return 0;
}
static const struct of_device_id ltc2496_of_match[] = {
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 8d1cff28cae0..b4c69acb33e3 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -459,15 +459,13 @@ reg_disable:
return ret;
}
-static int mcp320x_remove(struct spi_device *spi)
+static void mcp320x_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct mcp320x *adc = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
regulator_disable(adc->reg);
-
- return 0;
}
static const struct of_device_id mcp320x_dt_ids[] = {
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index 13535f148c4c..1cb4590fe412 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -321,7 +321,7 @@ reg_disable:
return ret;
}
-static int mcp3911_remove(struct spi_device *spi)
+static void mcp3911_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct mcp3911 *adc = iio_priv(indio_dev);
@@ -331,8 +331,6 @@ static int mcp3911_remove(struct spi_device *spi)
clk_disable_unprepare(adc->clki);
if (adc->vref)
regulator_disable(adc->vref);
-
- return 0;
}
static const struct of_device_id mcp3911_dt_ids[] = {
diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c
index 6eb62b564dae..59d75d09604f 100644
--- a/drivers/iio/adc/ti-adc12138.c
+++ b/drivers/iio/adc/ti-adc12138.c
@@ -503,7 +503,7 @@ err_clk_disable:
return ret;
}
-static int adc12138_remove(struct spi_device *spi)
+static void adc12138_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct adc12138 *adc = iio_priv(indio_dev);
@@ -514,8 +514,6 @@ static int adc12138_remove(struct spi_device *spi)
regulator_disable(adc->vref_n);
regulator_disable(adc->vref_p);
clk_disable_unprepare(adc->cclk);
-
- return 0;
}
static const struct of_device_id adc12138_dt_ids[] = {
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index a7efa3eada2c..e3658b969c5b 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -662,7 +662,7 @@ error_destroy_mutex:
return ret;
}
-static int ti_ads7950_remove(struct spi_device *spi)
+static void ti_ads7950_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ti_ads7950_state *st = iio_priv(indio_dev);
@@ -672,8 +672,6 @@ static int ti_ads7950_remove(struct spi_device *spi)
iio_triggered_buffer_cleanup(indio_dev);
regulator_disable(st->reg);
mutex_destroy(&st->slock);
-
- return 0;
}
static const struct spi_device_id ti_ads7950_id[] = {
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 2e24717d7f55..22c2583eedd0 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -479,7 +479,7 @@ err_regulator_disable:
return ret;
}
-static int ads8688_remove(struct spi_device *spi)
+static void ads8688_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ads8688_state *st = iio_priv(indio_dev);
@@ -489,8 +489,6 @@ static int ads8688_remove(struct spi_device *spi)
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-
- return 0;
}
static const struct spi_device_id ads8688_id[] = {
diff --git a/drivers/iio/adc/ti-tlc4541.c b/drivers/iio/adc/ti-tlc4541.c
index 403b787f9f7e..2406eda9dfc6 100644
--- a/drivers/iio/adc/ti-tlc4541.c
+++ b/drivers/iio/adc/ti-tlc4541.c
@@ -224,7 +224,7 @@ error_disable_reg:
return ret;
}
-static int tlc4541_remove(struct spi_device *spi)
+static void tlc4541_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct tlc4541_state *st = iio_priv(indio_dev);
@@ -232,8 +232,6 @@ static int tlc4541_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
regulator_disable(st->reg);
-
- return 0;
}
static const struct of_device_id tlc4541_dt_ids[] = {
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index cfcf18a0bce8..1134ae12e531 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -298,7 +298,7 @@ error_disable_reg:
return ret;
}
-static int ad8366_remove(struct spi_device *spi)
+static void ad8366_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad8366_state *st = iio_priv(indio_dev);
@@ -308,8 +308,6 @@ static int ad8366_remove(struct spi_device *spi)
if (!IS_ERR(reg))
regulator_disable(reg);
-
- return 0;
}
static const struct spi_device_id ad8366_id[] = {
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index 1aee87100038..eafaf4529df5 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -586,7 +586,7 @@ err_setup_irq:
return ret;
}
-static int ssp_remove(struct spi_device *spi)
+static void ssp_remove(struct spi_device *spi)
{
struct ssp_data *data = spi_get_drvdata(spi);
@@ -608,8 +608,6 @@ static int ssp_remove(struct spi_device *spi)
mutex_destroy(&data->pending_lock);
mfd_remove_devices(&spi->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index 2d3b14c407d8..ecbc6a51d60f 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -521,7 +521,7 @@ error_free_channels:
return ret;
}
-static int ad5360_remove(struct spi_device *spi)
+static void ad5360_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5360_state *st = iio_priv(indio_dev);
@@ -531,8 +531,6 @@ static int ad5360_remove(struct spi_device *spi)
kfree(indio_dev->channels);
regulator_bulk_disable(st->chip_info->num_vrefs, st->vref_reg);
-
- return 0;
}
static const struct spi_device_id ad5360_ids[] = {
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index e38860a6a9f3..82e1d9bd773e 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -488,11 +488,9 @@ static int ad5380_spi_probe(struct spi_device *spi)
return ad5380_probe(&spi->dev, regmap, id->driver_data, id->name);
}
-static int ad5380_spi_remove(struct spi_device *spi)
+static void ad5380_spi_remove(struct spi_device *spi)
{
ad5380_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id ad5380_spi_ids[] = {
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 1c9b54c012a7..14cfabacbea5 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -491,11 +491,9 @@ static int ad5446_spi_probe(struct spi_device *spi)
&ad5446_spi_chip_info[id->driver_data]);
}
-static int ad5446_spi_remove(struct spi_device *spi)
+static void ad5446_spi_remove(struct spi_device *spi)
{
ad5446_remove(&spi->dev);
-
- return 0;
}
static struct spi_driver ad5446_spi_driver = {
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
index f5e93c6acc9d..bad9bdaafa94 100644
--- a/drivers/iio/dac/ad5449.c
+++ b/drivers/iio/dac/ad5449.c
@@ -330,7 +330,7 @@ error_disable_reg:
return ret;
}
-static int ad5449_spi_remove(struct spi_device *spi)
+static void ad5449_spi_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5449 *st = iio_priv(indio_dev);
@@ -338,8 +338,6 @@ static int ad5449_spi_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
regulator_bulk_disable(st->chip_info->num_channels, st->vref_reg);
-
- return 0;
}
static const struct spi_device_id ad5449_spi_ids[] = {
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index b631261efa97..8507573aa13e 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -336,7 +336,7 @@ error_disable_reg:
return ret;
}
-static int ad5504_remove(struct spi_device *spi)
+static void ad5504_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5504_state *st = iio_priv(indio_dev);
@@ -345,8 +345,6 @@ static int ad5504_remove(struct spi_device *spi)
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-
- return 0;
}
static const struct spi_device_id ad5504_id[] = {
diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c
index 6bfd7951e18c..0f7abfa75bec 100644
--- a/drivers/iio/dac/ad5592r.c
+++ b/drivers/iio/dac/ad5592r.c
@@ -130,11 +130,9 @@ static int ad5592r_spi_probe(struct spi_device *spi)
return ad5592r_probe(&spi->dev, id->name, &ad5592r_rw_ops);
}
-static int ad5592r_spi_remove(struct spi_device *spi)
+static void ad5592r_spi_remove(struct spi_device *spi)
{
ad5592r_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id ad5592r_spi_ids[] = {
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 3c98941b9f99..371e812850eb 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -293,7 +293,7 @@ error_disable_reg:
return ret;
}
-static int ad5624r_remove(struct spi_device *spi)
+static void ad5624r_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5624r_state *st = iio_priv(indio_dev);
@@ -301,8 +301,6 @@ static int ad5624r_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-
- return 0;
}
static const struct spi_device_id ad5624r_id[] = {
diff --git a/drivers/iio/dac/ad5686-spi.c b/drivers/iio/dac/ad5686-spi.c
index 2628810fdbb1..d26fb29b6b04 100644
--- a/drivers/iio/dac/ad5686-spi.c
+++ b/drivers/iio/dac/ad5686-spi.c
@@ -95,11 +95,9 @@ static int ad5686_spi_probe(struct spi_device *spi)
ad5686_spi_write, ad5686_spi_read);
}
-static int ad5686_spi_remove(struct spi_device *spi)
+static void ad5686_spi_remove(struct spi_device *spi)
{
ad5686_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id ad5686_spi_id[] = {
diff --git a/drivers/iio/dac/ad5761.c b/drivers/iio/dac/ad5761.c
index e37e095e94fc..4cb8471db81e 100644
--- a/drivers/iio/dac/ad5761.c
+++ b/drivers/iio/dac/ad5761.c
@@ -394,7 +394,7 @@ disable_regulator_err:
return ret;
}
-static int ad5761_remove(struct spi_device *spi)
+static void ad5761_remove(struct spi_device *spi)
{
struct iio_dev *iio_dev = spi_get_drvdata(spi);
struct ad5761_state *st = iio_priv(iio_dev);
@@ -403,8 +403,6 @@ static int ad5761_remove(struct spi_device *spi)
if (!IS_ERR_OR_NULL(st->vref_reg))
regulator_disable(st->vref_reg);
-
- return 0;
}
static const struct spi_device_id ad5761_id[] = {
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index ae089b9145cb..d235a8047ba0 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -332,7 +332,7 @@ error_disable_reg:
return ret;
}
-static int ad5764_remove(struct spi_device *spi)
+static void ad5764_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5764_state *st = iio_priv(indio_dev);
@@ -341,8 +341,6 @@ static int ad5764_remove(struct spi_device *spi)
if (st->chip_info->int_vref == 0)
regulator_bulk_disable(ARRAY_SIZE(st->vref_reg), st->vref_reg);
-
- return 0;
}
static const struct spi_device_id ad5764_ids[] = {
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index 7b4579d73d18..2b14914b4050 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -428,7 +428,7 @@ error_disable_reg_pos:
return ret;
}
-static int ad5791_remove(struct spi_device *spi)
+static void ad5791_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5791_state *st = iio_priv(indio_dev);
@@ -439,8 +439,6 @@ static int ad5791_remove(struct spi_device *spi)
if (!IS_ERR(st->reg_vss))
regulator_disable(st->reg_vss);
-
- return 0;
}
static const struct spi_device_id ad5791_id[] = {
diff --git a/drivers/iio/dac/ad8801.c b/drivers/iio/dac/ad8801.c
index 5ecfdad54dec..6be35c92d435 100644
--- a/drivers/iio/dac/ad8801.c
+++ b/drivers/iio/dac/ad8801.c
@@ -193,7 +193,7 @@ error_disable_vrefh_reg:
return ret;
}
-static int ad8801_remove(struct spi_device *spi)
+static void ad8801_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad8801_state *state = iio_priv(indio_dev);
@@ -202,8 +202,6 @@ static int ad8801_remove(struct spi_device *spi)
if (state->vrefl_reg)
regulator_disable(state->vrefl_reg);
regulator_disable(state->vrefh_reg);
-
- return 0;
}
static const struct spi_device_id ad8801_ids[] = {
diff --git a/drivers/iio/dac/ltc1660.c b/drivers/iio/dac/ltc1660.c
index f6ec9bf5815e..c76233c9bb72 100644
--- a/drivers/iio/dac/ltc1660.c
+++ b/drivers/iio/dac/ltc1660.c
@@ -206,15 +206,13 @@ error_disable_reg:
return ret;
}
-static int ltc1660_remove(struct spi_device *spi)
+static void ltc1660_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ltc1660_priv *priv = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
regulator_disable(priv->vref_reg);
-
- return 0;
}
static const struct of_device_id ltc1660_dt_ids[] = {
diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c
index 53e4b887d372..aed46c80757e 100644
--- a/drivers/iio/dac/ltc2632.c
+++ b/drivers/iio/dac/ltc2632.c
@@ -372,7 +372,7 @@ static int ltc2632_probe(struct spi_device *spi)
return iio_device_register(indio_dev);
}
-static int ltc2632_remove(struct spi_device *spi)
+static void ltc2632_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ltc2632_state *st = iio_priv(indio_dev);
@@ -381,8 +381,6 @@ static int ltc2632_remove(struct spi_device *spi)
if (st->vref_reg)
regulator_disable(st->vref_reg);
-
- return 0;
}
static const struct spi_device_id ltc2632_id[] = {
diff --git a/drivers/iio/dac/mcp4922.c b/drivers/iio/dac/mcp4922.c
index 0ae414ee1716..cb9e60e71b91 100644
--- a/drivers/iio/dac/mcp4922.c
+++ b/drivers/iio/dac/mcp4922.c
@@ -172,7 +172,7 @@ error_disable_reg:
return ret;
}
-static int mcp4922_remove(struct spi_device *spi)
+static void mcp4922_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct mcp4922_state *state;
@@ -180,8 +180,6 @@ static int mcp4922_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
state = iio_priv(indio_dev);
regulator_disable(state->vref_reg);
-
- return 0;
}
static const struct spi_device_id mcp4922_id[] = {
diff --git a/drivers/iio/dac/ti-dac082s085.c b/drivers/iio/dac/ti-dac082s085.c
index 6beda2193683..4e1156e6deb2 100644
--- a/drivers/iio/dac/ti-dac082s085.c
+++ b/drivers/iio/dac/ti-dac082s085.c
@@ -313,7 +313,7 @@ err:
return ret;
}
-static int ti_dac_remove(struct spi_device *spi)
+static void ti_dac_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
@@ -321,8 +321,6 @@ static int ti_dac_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
mutex_destroy(&ti_dac->lock);
regulator_disable(ti_dac->vref);
-
- return 0;
}
static const struct of_device_id ti_dac_of_id[] = {
diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c
index 99f275829ec2..e10d17e60ed3 100644
--- a/drivers/iio/dac/ti-dac7311.c
+++ b/drivers/iio/dac/ti-dac7311.c
@@ -292,7 +292,7 @@ err:
return ret;
}
-static int ti_dac_remove(struct spi_device *spi)
+static void ti_dac_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
@@ -300,7 +300,6 @@ static int ti_dac_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
mutex_destroy(&ti_dac->lock);
regulator_disable(ti_dac->vref);
- return 0;
}
static const struct of_device_id ti_dac_of_id[] = {
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 3d9eba716b69..f3521330f6fb 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -589,7 +589,7 @@ error_disable_clk:
return ret;
}
-static int adf4350_remove(struct spi_device *spi)
+static void adf4350_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct adf4350_state *st = iio_priv(indio_dev);
@@ -604,8 +604,6 @@ static int adf4350_remove(struct spi_device *spi)
if (!IS_ERR(reg))
regulator_disable(reg);
-
- return 0;
}
static const struct of_device_id adf4350_of_match[] = {
diff --git a/drivers/iio/gyro/bmg160_spi.c b/drivers/iio/gyro/bmg160_spi.c
index 745962e1e423..fc2e453527b9 100644
--- a/drivers/iio/gyro/bmg160_spi.c
+++ b/drivers/iio/gyro/bmg160_spi.c
@@ -27,11 +27,9 @@ static int bmg160_spi_probe(struct spi_device *spi)
return bmg160_core_probe(&spi->dev, regmap, spi->irq, id->name);
}
-static int bmg160_spi_remove(struct spi_device *spi)
+static void bmg160_spi_remove(struct spi_device *spi)
{
bmg160_core_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id bmg160_spi_id[] = {
diff --git a/drivers/iio/gyro/fxas21002c_spi.c b/drivers/iio/gyro/fxas21002c_spi.c
index 77ceebef4e34..c3ac169facf9 100644
--- a/drivers/iio/gyro/fxas21002c_spi.c
+++ b/drivers/iio/gyro/fxas21002c_spi.c
@@ -34,11 +34,9 @@ static int fxas21002c_spi_probe(struct spi_device *spi)
return fxas21002c_core_probe(&spi->dev, regmap, spi->irq, id->name);
}
-static int fxas21002c_spi_remove(struct spi_device *spi)
+static void fxas21002c_spi_remove(struct spi_device *spi)
{
fxas21002c_core_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id fxas21002c_spi_id[] = {
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 273f16dcaff8..856ec901b091 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -570,7 +570,7 @@ err_disable_reg:
return ret;
}
-static int afe4403_remove(struct spi_device *spi)
+static void afe4403_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct afe4403_data *afe = iio_priv(indio_dev);
@@ -586,8 +586,6 @@ static int afe4403_remove(struct spi_device *spi)
ret = regulator_disable(afe->regulator);
if (ret)
dev_warn(afe->dev, "Unable to disable regulator\n");
-
- return 0;
}
static const struct spi_device_id afe4403_ids[] = {
diff --git a/drivers/iio/magnetometer/bmc150_magn_spi.c b/drivers/iio/magnetometer/bmc150_magn_spi.c
index c6ed3ea8460a..4c570412d65c 100644
--- a/drivers/iio/magnetometer/bmc150_magn_spi.c
+++ b/drivers/iio/magnetometer/bmc150_magn_spi.c
@@ -29,11 +29,9 @@ static int bmc150_magn_spi_probe(struct spi_device *spi)
return bmc150_magn_probe(&spi->dev, regmap, spi->irq, id->name);
}
-static int bmc150_magn_spi_remove(struct spi_device *spi)
+static void bmc150_magn_spi_remove(struct spi_device *spi)
{
bmc150_magn_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id bmc150_magn_spi_id[] = {
diff --git a/drivers/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c
index 89cf59a62c28..a99dd9b33e95 100644
--- a/drivers/iio/magnetometer/hmc5843_spi.c
+++ b/drivers/iio/magnetometer/hmc5843_spi.c
@@ -74,11 +74,9 @@ static int hmc5843_spi_probe(struct spi_device *spi)
id->driver_data, id->name);
}
-static int hmc5843_spi_remove(struct spi_device *spi)
+static void hmc5843_spi_remove(struct spi_device *spi)
{
hmc5843_common_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id hmc5843_id[] = {
diff --git a/drivers/iio/potentiometer/max5487.c b/drivers/iio/potentiometer/max5487.c
index 007c2bd324cb..42723c996c9f 100644
--- a/drivers/iio/potentiometer/max5487.c
+++ b/drivers/iio/potentiometer/max5487.c
@@ -112,7 +112,7 @@ static int max5487_spi_probe(struct spi_device *spi)
return iio_device_register(indio_dev);
}
-static int max5487_spi_remove(struct spi_device *spi)
+static void max5487_spi_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
int ret;
@@ -123,8 +123,6 @@ static int max5487_spi_remove(struct spi_device *spi)
ret = max5487_write_cmd(spi, MAX5487_COPY_AB_TO_NV);
if (ret)
dev_warn(&spi->dev, "Failed to save wiper regs to NV regs\n");
-
- return 0;
}
static const struct spi_device_id max5487_id[] = {
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index 9fa2dcd71760..7ccd960ced5d 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -107,11 +107,9 @@ static int ms5611_spi_probe(struct spi_device *spi)
spi_get_device_id(spi)->driver_data);
}
-static int ms5611_spi_remove(struct spi_device *spi)
+static void ms5611_spi_remove(struct spi_device *spi)
{
ms5611_remove(spi_get_drvdata(spi));
-
- return 0;
}
static const struct of_device_id ms5611_spi_matches[] = {
diff --git a/drivers/iio/pressure/zpa2326_spi.c b/drivers/iio/pressure/zpa2326_spi.c
index 85201a4bae44..ee8ed77536ca 100644
--- a/drivers/iio/pressure/zpa2326_spi.c
+++ b/drivers/iio/pressure/zpa2326_spi.c
@@ -57,11 +57,9 @@ static int zpa2326_probe_spi(struct spi_device *spi)
spi->irq, ZPA2326_DEVICE_ID, regmap);
}
-static int zpa2326_remove_spi(struct spi_device *spi)
+static void zpa2326_remove_spi(struct spi_device *spi)
{
zpa2326_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id zpa2326_spi_ids[] = {
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 5a3bd41b331c..4d98f931a13d 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2016 HGST, a Western Digital Company.
*/
+#include <linux/memremap.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/pci-p2pdma.h>
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index 0b61df52332a..290ea8ac3838 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -433,8 +433,7 @@ void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
dev->port[port_num].dbg_cc_params = dbg_cc_params;
- dbg_cc_params->root = debugfs_create_dir("cc_params",
- mdev->priv.dbg_root);
+ dbg_cc_params->root = debugfs_create_dir("cc_params", mlx5_debugfs_get_dev_root(mdev));
for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
dbg_cc_params->params[i].offset = i;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 08b7f6bc56c3..fc036b4794fd 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -1055,7 +1055,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
int cmd_out_len = uverbs_attr_get_len(attrs,
MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
void *cmd_out;
- int err;
+ int err, err2;
int uid;
c = devx_ufile2uctx(attrs);
@@ -1076,14 +1076,16 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
return PTR_ERR(cmd_out);
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
- err = mlx5_cmd_exec(dev->mdev, cmd_in,
- uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
- cmd_out, cmd_out_len);
- if (err)
+ err = mlx5_cmd_do(dev->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err && err != -EREMOTEIO)
return err;
- return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
+ err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
cmd_out_len);
+
+ return err2 ?: err;
}
static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
@@ -1457,7 +1459,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
struct devx_obj *obj;
u16 obj_type = 0;
- int err;
+ int err, err2 = 0;
int uid;
u32 obj_id;
u16 opcode;
@@ -1497,15 +1499,18 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
!is_apu_cq(dev, cmd_in)) {
obj->flags |= DEVX_OBJ_FLAGS_CQ;
obj->core_cq.comp = devx_cq_comp;
- err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
- cmd_in, cmd_in_len, cmd_out,
- cmd_out_len);
+ err = mlx5_create_cq(dev->mdev, &obj->core_cq,
+ cmd_in, cmd_in_len, cmd_out,
+ cmd_out_len);
} else {
- err = mlx5_cmd_exec(dev->mdev, cmd_in,
- cmd_in_len,
- cmd_out, cmd_out_len);
+ err = mlx5_cmd_do(dev->mdev, cmd_in, cmd_in_len,
+ cmd_out, cmd_out_len);
}
+ if (err == -EREMOTEIO)
+ err2 = uverbs_copy_to(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
+ cmd_out, cmd_out_len);
if (err)
goto obj_free;
@@ -1548,7 +1553,7 @@ obj_destroy:
sizeof(out));
obj_free:
kfree(obj);
- return err;
+ return err2 ?: err;
}
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
@@ -1563,7 +1568,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
void *cmd_out;
- int err;
+ int err, err2;
int uid;
if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
@@ -1586,14 +1591,16 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
devx_set_umem_valid(cmd_in);
- err = mlx5_cmd_exec(mdev->mdev, cmd_in,
- uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
- cmd_out, cmd_out_len);
- if (err)
+ err = mlx5_cmd_do(mdev->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err && err != -EREMOTEIO)
return err;
- return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
+ err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
cmd_out, cmd_out_len);
+
+ return err2 ?: err;
}
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
@@ -1607,7 +1614,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
void *cmd_out;
- int err;
+ int err, err2;
int uid;
struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
@@ -1629,14 +1636,16 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
return PTR_ERR(cmd_out);
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
- err = mlx5_cmd_exec(mdev->mdev, cmd_in,
- uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
- cmd_out, cmd_out_len);
- if (err)
+ err = mlx5_cmd_do(mdev->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err && err != -EREMOTEIO)
return err;
- return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
+ err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
cmd_out, cmd_out_len);
+
+ return err2 ?: err;
}
struct devx_async_event_queue {
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 85f526c861e9..32a0ea820573 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4178,7 +4178,7 @@ static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
if (!mlx5_debugfs_root)
return 0;
- root = debugfs_create_dir("delay_drop", dev->mdev->priv.dbg_root);
+ root = debugfs_create_dir("delay_drop", mlx5_debugfs_get_dev_root(dev->mdev));
dev->delay_drop.dir_debugfs = root;
debugfs_create_atomic_t("num_timeout_events", 0400, root,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 157d862fb864..32cb7068f0ca 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -140,6 +140,19 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
}
+static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out)
+{
+ if (status == -ENXIO) /* core driver is not available */
+ return;
+
+ mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
+ if (status != -EREMOTEIO) /* driver specific failure */
+ return;
+
+ /* Failed in FW, print cmd out failure details */
+ mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out);
+}
+
static void create_mkey_callback(int status, struct mlx5_async_work *context)
{
struct mlx5_ib_mr *mr =
@@ -149,7 +162,7 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
unsigned long flags;
if (status) {
- mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
+ create_mkey_warn(dev, status, mr->out);
kfree(mr);
spin_lock_irqsave(&ent->lock, flags);
ent->pending--;
@@ -683,7 +696,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
if (!mlx5_debugfs_root || dev->is_rep)
return;
- cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
+ cache->root = debugfs_create_dir("mr_cache", mlx5_debugfs_get_dev_root(dev->mdev));
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 29475cf8c7c3..b7fe47107d76 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4465,6 +4465,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
MLX5_ST_SZ_BYTES(create_dct_in), out,
sizeof(out));
+ err = mlx5_cmd_check(dev->mdev, err, qp->dct.in, out);
if (err)
return err;
resp.dctn = qp->dct.mdct.mqp.qpn;
diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
index 8844eacf2380..542e4c63a8de 100644
--- a/drivers/infiniband/hw/mlx5/qpc.c
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -220,7 +220,7 @@ int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
init_completion(&dct->drained);
MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
- err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
+ err = mlx5_cmd_do(dev->mdev, in, inlen, out, outlen);
if (err)
return err;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 0c607da9ee10..9417ee0b1eff 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -556,7 +556,7 @@ config KEYBOARD_PMIC8XXX
config KEYBOARD_SAMSUNG
tristate "Samsung keypad support"
- depends on HAVE_CLK
+ depends on HAS_IOMEM && HAVE_CLK
select INPUT_MATRIXKMAP
help
Say Y here if you want to use the keypad on your Samsung mobile
diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c
index eda1b23002b5..d1f5354d5ea2 100644
--- a/drivers/input/keyboard/applespi.c
+++ b/drivers/input/keyboard/applespi.c
@@ -1858,7 +1858,7 @@ static void applespi_drain_reads(struct applespi_data *applespi)
spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags);
}
-static int applespi_remove(struct spi_device *spi)
+static void applespi_remove(struct spi_device *spi)
{
struct applespi_data *applespi = spi_get_drvdata(spi);
@@ -1871,8 +1871,6 @@ static int applespi_remove(struct spi_device *spi)
applespi_drain_reads(applespi);
debugfs_remove_recursive(applespi->debugfs_root);
-
- return 0;
}
static void applespi_shutdown(struct spi_device *spi)
diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c
index 6e51c9bc619f..91e44d4c66f7 100644
--- a/drivers/input/misc/adxl34x-spi.c
+++ b/drivers/input/misc/adxl34x-spi.c
@@ -87,13 +87,11 @@ static int adxl34x_spi_probe(struct spi_device *spi)
return 0;
}
-static int adxl34x_spi_remove(struct spi_device *spi)
+static void adxl34x_spi_remove(struct spi_device *spi)
{
struct adxl34x *ac = spi_get_drvdata(spi);
adxl34x_remove(ac);
-
- return 0;
}
static int __maybe_unused adxl34x_spi_suspend(struct device *dev)
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 47af62c12267..e1758d5ffe42 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -186,55 +186,21 @@ static int elan_get_fwinfo(u16 ic_type, u8 iap_version, u16 *validpage_count,
return 0;
}
-static int elan_enable_power(struct elan_tp_data *data)
+static int elan_set_power(struct elan_tp_data *data, bool on)
{
int repeat = ETP_RETRY_COUNT;
int error;
- error = regulator_enable(data->vcc);
- if (error) {
- dev_err(&data->client->dev,
- "failed to enable regulator: %d\n", error);
- return error;
- }
-
do {
- error = data->ops->power_control(data->client, true);
+ error = data->ops->power_control(data->client, on);
if (error >= 0)
return 0;
msleep(30);
} while (--repeat > 0);
- dev_err(&data->client->dev, "failed to enable power: %d\n", error);
- return error;
-}
-
-static int elan_disable_power(struct elan_tp_data *data)
-{
- int repeat = ETP_RETRY_COUNT;
- int error;
-
- do {
- error = data->ops->power_control(data->client, false);
- if (!error) {
- error = regulator_disable(data->vcc);
- if (error) {
- dev_err(&data->client->dev,
- "failed to disable regulator: %d\n",
- error);
- /* Attempt to power the chip back up */
- data->ops->power_control(data->client, true);
- break;
- }
-
- return 0;
- }
-
- msleep(30);
- } while (--repeat > 0);
-
- dev_err(&data->client->dev, "failed to disable power: %d\n", error);
+ dev_err(&data->client->dev, "failed to set power %s: %d\n",
+ on ? "on" : "off", error);
return error;
}
@@ -1399,9 +1365,19 @@ static int __maybe_unused elan_suspend(struct device *dev)
/* Enable wake from IRQ */
data->irq_wake = (enable_irq_wake(client->irq) == 0);
} else {
- ret = elan_disable_power(data);
+ ret = elan_set_power(data, false);
+ if (ret)
+ goto err;
+
+ ret = regulator_disable(data->vcc);
+ if (ret) {
+ dev_err(dev, "error %d disabling regulator\n", ret);
+ /* Attempt to power the chip back up */
+ elan_set_power(data, true);
+ }
}
+err:
mutex_unlock(&data->sysfs_mutex);
return ret;
}
@@ -1412,12 +1388,18 @@ static int __maybe_unused elan_resume(struct device *dev)
struct elan_tp_data *data = i2c_get_clientdata(client);
int error;
- if (device_may_wakeup(dev) && data->irq_wake) {
+ if (!device_may_wakeup(dev)) {
+ error = regulator_enable(data->vcc);
+ if (error) {
+ dev_err(dev, "error %d enabling regulator\n", error);
+ goto err;
+ }
+ } else if (data->irq_wake) {
disable_irq_wake(client->irq);
data->irq_wake = false;
}
- error = elan_enable_power(data);
+ error = elan_set_power(data, true);
if (error) {
dev_err(dev, "power up when resuming failed: %d\n", error);
goto err;
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
index 16119f760d11..c0163b983ce6 100644
--- a/drivers/input/rmi4/Kconfig
+++ b/drivers/input/rmi4/Kconfig
@@ -110,7 +110,7 @@ config RMI4_F3A
config RMI4_F54
bool "RMI4 Function 54 (Analog diagnostics)"
- depends on VIDEO_V4L2=y || (RMI4_CORE=m && VIDEO_V4L2=m)
+ depends on VIDEO_DEV=y || (RMI4_CORE=m && VIDEO_DEV=m)
select VIDEOBUF2_VMALLOC
select RMI4_F55
help
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index fcb1b646436a..1581f6ef0927 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1787,15 +1787,13 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0);
input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
- /* Verify that a device really has an endpoint */
- if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
+ err = usb_find_common_endpoints(intf->cur_altsetting,
+ NULL, NULL, &endpoint, NULL);
+ if (err) {
dev_err(&intf->dev,
- "interface has %d endpoints, but must have minimum 1\n",
- intf->cur_altsetting->desc.bNumEndpoints);
- err = -EINVAL;
+ "interface has no int in endpoints, but must have minimum 1\n");
goto fail3;
}
- endpoint = &intf->cur_altsetting->endpoint[0].desc;
/* Go set up our URB, which is called when the tablet receives
* input.
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 2f6adfb7b938..ff7794cecf69 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -131,7 +131,7 @@ config TOUCHSCREEN_ATMEL_MXT
config TOUCHSCREEN_ATMEL_MXT_T37
bool "Support T37 Diagnostic Data"
depends on TOUCHSCREEN_ATMEL_MXT
- depends on VIDEO_V4L2=y || (TOUCHSCREEN_ATMEL_MXT=m && VIDEO_V4L2=m)
+ depends on VIDEO_DEV=y || (TOUCHSCREEN_ATMEL_MXT=m && VIDEO_DEV=m)
select VIDEOBUF2_VMALLOC
help
Say Y here if you want support to output data from the T37
@@ -1252,7 +1252,7 @@ config TOUCHSCREEN_SUN4I
config TOUCHSCREEN_SUR40
tristate "Samsung SUR40 (Surface 2.0/PixelSense) touchscreen"
depends on USB && MEDIA_USB_SUPPORT && HAS_DMA
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
select VIDEOBUF2_DMA_SG
help
Say Y here if you want support for the Samsung SUR40 touchscreen
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index a25a77dd9a32..bed68a68f330 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1411,13 +1411,11 @@ static int ads7846_probe(struct spi_device *spi)
return 0;
}
-static int ads7846_remove(struct spi_device *spi)
+static void ads7846_remove(struct spi_device *spi)
{
struct ads7846 *ts = spi_get_drvdata(spi);
ads7846_stop(ts);
-
- return 0;
}
static struct spi_driver ads7846_driver = {
diff --git a/drivers/input/touchscreen/cyttsp4_spi.c b/drivers/input/touchscreen/cyttsp4_spi.c
index 2aec41eb76b7..5d7db84f2749 100644
--- a/drivers/input/touchscreen/cyttsp4_spi.c
+++ b/drivers/input/touchscreen/cyttsp4_spi.c
@@ -164,12 +164,10 @@ static int cyttsp4_spi_probe(struct spi_device *spi)
return PTR_ERR_OR_ZERO(ts);
}
-static int cyttsp4_spi_remove(struct spi_device *spi)
+static void cyttsp4_spi_remove(struct spi_device *spi)
{
struct cyttsp4 *ts = spi_get_drvdata(spi);
cyttsp4_remove(ts);
-
- return 0;
}
static struct spi_driver cyttsp4_spi_driver = {
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index a3bfc7a41679..752e8ba4fecb 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -18,6 +18,7 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/platform_data/x86/soc.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/of.h>
@@ -805,21 +806,6 @@ static int goodix_reset(struct goodix_ts_data *ts)
}
#ifdef ACPI_GPIO_SUPPORT
-#include <asm/cpu_device_id.h>
-#include <asm/intel-family.h>
-
-static const struct x86_cpu_id baytrail_cpu_ids[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, X86_FEATURE_ANY, },
- {}
-};
-
-static inline bool is_byt(void)
-{
- const struct x86_cpu_id *id = x86_match_cpu(baytrail_cpu_ids);
-
- return !!id;
-}
-
static const struct acpi_gpio_params first_gpio = { 0, 0, false };
static const struct acpi_gpio_params second_gpio = { 1, 0, false };
@@ -878,7 +864,7 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
const struct acpi_gpio_mapping *gpio_mapping = NULL;
struct device *dev = &ts->client->dev;
LIST_HEAD(resources);
- int ret;
+ int irq, ret;
ts->gpio_count = 0;
ts->gpio_int_idx = -1;
@@ -891,6 +877,20 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
acpi_dev_free_resource_list(&resources);
+ /*
+ * CHT devices should have a GpioInt + a regular GPIO ACPI resource.
+ * Some CHT devices have a bug (where the also is bogus Interrupt
+ * resource copied from a previous BYT based generation). i2c-core-acpi
+ * will use the non-working Interrupt resource, fix this up.
+ */
+ if (soc_intel_is_cht() && ts->gpio_count == 2 && ts->gpio_int_idx != -1) {
+ irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 0);
+ if (irq > 0 && irq != ts->client->irq) {
+ dev_warn(dev, "Overriding IRQ %d -> %d\n", ts->client->irq, irq);
+ ts->client->irq = irq;
+ }
+ }
+
if (ts->gpio_count == 2 && ts->gpio_int_idx == 0) {
ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
gpio_mapping = acpi_goodix_int_first_gpios;
@@ -903,7 +903,7 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
dev_info(dev, "Using ACPI INTI and INTO methods for IRQ pin access\n");
ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_METHOD;
gpio_mapping = acpi_goodix_reset_only_gpios;
- } else if (is_byt() && ts->gpio_count == 2 && ts->gpio_int_idx == -1) {
+ } else if (soc_intel_is_byt() && ts->gpio_count == 2 && ts->gpio_int_idx == -1) {
dev_info(dev, "No ACPI GpioInt resource, assuming that the GPIO order is reset, int\n");
ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
gpio_mapping = acpi_goodix_int_last_gpios;
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index a2f55920b9b2..555dfe98b3c4 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -64,11 +64,9 @@ static int tsc2005_probe(struct spi_device *spi)
tsc2005_cmd);
}
-static int tsc2005_remove(struct spi_device *spi)
+static void tsc2005_remove(struct spi_device *spi)
{
tsc200x_remove(&spi->dev);
-
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
index 129ebc810de8..8bd03278ad9a 100644
--- a/drivers/input/touchscreen/zinitix.c
+++ b/drivers/input/touchscreen/zinitix.c
@@ -135,7 +135,7 @@ struct point_coord {
struct touch_event {
__le16 status;
- u8 finger_cnt;
+ u8 finger_mask;
u8 time_stamp;
struct point_coord point_coord[MAX_SUPPORTED_FINGER_NUM];
};
@@ -322,11 +322,32 @@ static int zinitix_send_power_on_sequence(struct bt541_ts_data *bt541)
static void zinitix_report_finger(struct bt541_ts_data *bt541, int slot,
const struct point_coord *p)
{
+ u16 x, y;
+
+ if (unlikely(!(p->sub_status &
+ (SUB_BIT_UP | SUB_BIT_DOWN | SUB_BIT_MOVE)))) {
+ dev_dbg(&bt541->client->dev, "unknown finger event %#02x\n",
+ p->sub_status);
+ return;
+ }
+
+ x = le16_to_cpu(p->x);
+ y = le16_to_cpu(p->y);
+
input_mt_slot(bt541->input_dev, slot);
- input_mt_report_slot_state(bt541->input_dev, MT_TOOL_FINGER, true);
- touchscreen_report_pos(bt541->input_dev, &bt541->prop,
- le16_to_cpu(p->x), le16_to_cpu(p->y), true);
- input_report_abs(bt541->input_dev, ABS_MT_TOUCH_MAJOR, p->width);
+ if (input_mt_report_slot_state(bt541->input_dev, MT_TOOL_FINGER,
+ !(p->sub_status & SUB_BIT_UP))) {
+ touchscreen_report_pos(bt541->input_dev,
+ &bt541->prop, x, y, true);
+ input_report_abs(bt541->input_dev,
+ ABS_MT_TOUCH_MAJOR, p->width);
+ dev_dbg(&bt541->client->dev, "finger %d %s (%u, %u)\n",
+ slot, p->sub_status & SUB_BIT_DOWN ? "down" : "move",
+ x, y);
+ } else {
+ dev_dbg(&bt541->client->dev, "finger %d up (%u, %u)\n",
+ slot, x, y);
+ }
}
static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
@@ -334,6 +355,7 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
struct bt541_ts_data *bt541 = bt541_handler;
struct i2c_client *client = bt541->client;
struct touch_event touch_event;
+ unsigned long finger_mask;
int error;
int i;
@@ -346,10 +368,14 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
goto out;
}
- for (i = 0; i < MAX_SUPPORTED_FINGER_NUM; i++)
- if (touch_event.point_coord[i].sub_status & SUB_BIT_EXIST)
- zinitix_report_finger(bt541, i,
- &touch_event.point_coord[i]);
+ finger_mask = touch_event.finger_mask;
+ for_each_set_bit(i, &finger_mask, MAX_SUPPORTED_FINGER_NUM) {
+ const struct point_coord *p = &touch_event.point_coord[i];
+
+ /* Only process contacts that are actually reported */
+ if (p->sub_status & SUB_BIT_EXIST)
+ zinitix_report_finger(bt541, i, p);
+ }
input_mt_sync_frame(bt541->input_dev);
input_sync(bt541->input_dev);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 3eb68fa1b8cc..c79a0df090c0 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -144,8 +144,8 @@ config IOMMU_DMA
select IRQ_MSI_IOMMU
select NEED_SG_DMA_LENGTH
-# Shared Virtual Addressing library
-config IOMMU_SVA_LIB
+# Shared Virtual Addressing
+config IOMMU_SVA
bool
select IOASID
@@ -379,7 +379,7 @@ config ARM_SMMU_V3
config ARM_SMMU_V3_SVA
bool "Shared Virtual Addressing support for the ARM SMMUv3"
depends on ARM_SMMU_V3
- select IOMMU_SVA_LIB
+ select IOMMU_SVA
select MMU_NOTIFIER
help
Support for sharing process address spaces with devices using the
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index bc7f730edbb0..44475a9b3eea 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -27,6 +27,6 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
-obj-$(CONFIG_IOMMU_SVA_LIB) += iommu-sva-lib.o io-pgfault.o
+obj-$(CONFIG_IOMMU_SVA) += iommu-sva-lib.o io-pgfault.o
obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o
obj-$(CONFIG_APPLE_DART) += apple-dart.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 416815a525d6..bb95edf74415 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -14,6 +14,7 @@
extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_apply_erratum_63(u16 devid);
+extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index ffc89c4fb120..47108ed44fbb 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -110,6 +110,7 @@
#define PASID_MASK 0x0000ffff
/* MMIO status bits */
+#define MMIO_STATUS_EVT_OVERFLOW_INT_MASK (1 << 0)
#define MMIO_STATUS_EVT_INT_MASK (1 << 1)
#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
#define MMIO_STATUS_PPR_INT_MASK (1 << 6)
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index b10fb52ea442..7bfe37e52e21 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -658,6 +658,16 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu)
}
/*
+ * This function restarts event logging in case the IOMMU experienced
+ * an event log buffer overflow.
+ */
+void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
+{
+ iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
+ iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+}
+
+/*
* This function resets the command buffer if the IOMMU stopped fetching
* commands from it.
*/
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index b1bf4125b0f7..6608d1717574 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -492,18 +492,18 @@ static void v1_free_pgtable(struct io_pgtable *iop)
dom = container_of(pgtable, struct protection_domain, iop);
- /* Update data structure */
- amd_iommu_domain_clr_pt_root(dom);
-
- /* Make changes visible to IOMMUs */
- amd_iommu_domain_update(dom);
-
/* Page-table is not visible to IOMMU anymore, so free it */
BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
pgtable->mode > PAGE_MODE_6_LEVEL);
free_sub_pt(pgtable->root, pgtable->mode, &freelist);
+ /* Update data structure */
+ amd_iommu_domain_clr_pt_root(dom);
+
+ /* Make changes visible to IOMMUs */
+ amd_iommu_domain_update(dom);
+
put_pages_list(&freelist);
}
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 461f1844ed1f..a18b549951bb 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -764,7 +764,8 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
#endif /* !CONFIG_IRQ_REMAP */
#define AMD_IOMMU_INT_MASK \
- (MMIO_STATUS_EVT_INT_MASK | \
+ (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
+ MMIO_STATUS_EVT_INT_MASK | \
MMIO_STATUS_PPR_INT_MASK | \
MMIO_STATUS_GALOG_INT_MASK)
@@ -774,7 +775,7 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
while (status & AMD_IOMMU_INT_MASK) {
- /* Enable EVT and PPR and GA interrupts again */
+ /* Enable interrupt sources again */
writel(AMD_IOMMU_INT_MASK,
iommu->mmio_base + MMIO_STATUS_OFFSET);
@@ -795,6 +796,11 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
}
#endif
+ if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) {
+ pr_info_ratelimited("IOMMU event log overflow\n");
+ amd_iommu_restart_event_logging(iommu);
+ }
+
/*
* Hardware bug: ERBT1312
* When re-enabling interrupt (by writing 1
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index a737ba5f727e..22ddd05bbdcd 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -340,14 +340,12 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
if (IS_ERR(bond->smmu_mn)) {
ret = PTR_ERR(bond->smmu_mn);
- goto err_free_pasid;
+ goto err_free_bond;
}
list_add(&bond->list, &master->bonds);
return &bond->sva;
-err_free_pasid:
- iommu_sva_free_pasid(mm);
err_free_bond:
kfree(bond);
return ERR_PTR(ret);
@@ -377,7 +375,6 @@ void arm_smmu_sva_unbind(struct iommu_sva *handle)
if (refcount_dec_and_test(&bond->refs)) {
list_del(&bond->list);
arm_smmu_mmu_notifier_put(bond->smmu_mn);
- iommu_sva_free_pasid(bond->mm);
kfree(bond);
}
mutex_unlock(&sva_lock);
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index 247d0f2d5fdf..39a06d245f12 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -52,7 +52,7 @@ config INTEL_IOMMU_SVM
select PCI_PRI
select MMU_NOTIFIER
select IOASID
- select IOMMU_SVA_LIB
+ select IOMMU_SVA
help
Shared Virtual Memory (SVM) provides a facility for devices
to access DMA resources through process address space by
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 92fea3fbbb11..1ce1741a7fa4 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -2738,7 +2738,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
spin_unlock_irqrestore(&device_domain_lock, flags);
/* PASID table is mandatory for a PCI device in scalable mode. */
- if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
+ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
dev_err(dev, "PASID table allocation failed\n");
@@ -4781,7 +4781,7 @@ attach_failed:
link_failed:
spin_unlock_irqrestore(&device_domain_lock, flags);
if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
- ioasid_put(domain->default_pasid);
+ ioasid_free(domain->default_pasid);
return ret;
}
@@ -4811,7 +4811,7 @@ static void aux_domain_remove_dev(struct dmar_domain *domain,
spin_unlock_irqrestore(&device_domain_lock, flags);
if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
- ioasid_put(domain->default_pasid);
+ ioasid_free(domain->default_pasid);
}
static int prepare_domain_attach_device(struct iommu_domain *domain,
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 5b5d69b04fcc..51ac2096b3da 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -514,11 +514,6 @@ static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm,
return iommu_sva_alloc_pasid(mm, PASID_MIN, max_pasid - 1);
}
-static void intel_svm_free_pasid(struct mm_struct *mm)
-{
- iommu_sva_free_pasid(mm);
-}
-
static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
struct device *dev,
struct mm_struct *mm,
@@ -662,8 +657,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
kfree(svm);
}
}
- /* Drop a PASID reference and free it if no reference. */
- intel_svm_free_pasid(mm);
}
out:
return ret;
@@ -1047,8 +1040,6 @@ struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void
}
sva = intel_svm_bind_mm(iommu, dev, mm, flags);
- if (IS_ERR_OR_NULL(sva))
- intel_svm_free_pasid(mm);
mutex_unlock(&pasid_mutex);
return sva;
diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
index 06fee7416816..a786c034907c 100644
--- a/drivers/iommu/ioasid.c
+++ b/drivers/iommu/ioasid.c
@@ -2,7 +2,7 @@
/*
* I/O Address Space ID allocator. There is one global IOASID space, split into
* subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and
- * free IOASIDs with ioasid_alloc and ioasid_put.
+ * free IOASIDs with ioasid_alloc() and ioasid_free().
*/
#include <linux/ioasid.h>
#include <linux/module.h>
@@ -15,7 +15,6 @@ struct ioasid_data {
struct ioasid_set *set;
void *private;
struct rcu_head rcu;
- refcount_t refs;
};
/*
@@ -315,7 +314,6 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
data->set = set;
data->private = private;
- refcount_set(&data->refs, 1);
/*
* Custom allocator needs allocator data to perform platform specific
@@ -348,35 +346,11 @@ exit_free:
EXPORT_SYMBOL_GPL(ioasid_alloc);
/**
- * ioasid_get - obtain a reference to the IOASID
- * @ioasid: the ID to get
- */
-void ioasid_get(ioasid_t ioasid)
-{
- struct ioasid_data *ioasid_data;
-
- spin_lock(&ioasid_allocator_lock);
- ioasid_data = xa_load(&active_allocator->xa, ioasid);
- if (ioasid_data)
- refcount_inc(&ioasid_data->refs);
- else
- WARN_ON(1);
- spin_unlock(&ioasid_allocator_lock);
-}
-EXPORT_SYMBOL_GPL(ioasid_get);
-
-/**
- * ioasid_put - Release a reference to an ioasid
+ * ioasid_free - Free an ioasid
* @ioasid: the ID to remove
- *
- * Put a reference to the IOASID, free it when the number of references drops to
- * zero.
- *
- * Return: %true if the IOASID was freed, %false otherwise.
*/
-bool ioasid_put(ioasid_t ioasid)
+void ioasid_free(ioasid_t ioasid)
{
- bool free = false;
struct ioasid_data *ioasid_data;
spin_lock(&ioasid_allocator_lock);
@@ -386,10 +360,6 @@ bool ioasid_put(ioasid_t ioasid)
goto exit_unlock;
}
- free = refcount_dec_and_test(&ioasid_data->refs);
- if (!free)
- goto exit_unlock;
-
active_allocator->ops->free(ioasid, active_allocator->ops->pdata);
/* Custom allocator needs additional steps to free the xa element */
if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) {
@@ -399,9 +369,8 @@ bool ioasid_put(ioasid_t ioasid)
exit_unlock:
spin_unlock(&ioasid_allocator_lock);
- return free;
}
-EXPORT_SYMBOL_GPL(ioasid_put);
+EXPORT_SYMBOL_GPL(ioasid_free);
/**
* ioasid_find - Find IOASID data
diff --git a/drivers/iommu/iommu-sva-lib.c b/drivers/iommu/iommu-sva-lib.c
index bd41405d34e9..106506143896 100644
--- a/drivers/iommu/iommu-sva-lib.c
+++ b/drivers/iommu/iommu-sva-lib.c
@@ -18,8 +18,7 @@ static DECLARE_IOASID_SET(iommu_sva_pasid);
*
* Try to allocate a PASID for this mm, or take a reference to the existing one
* provided it fits within the [@min, @max] range. On success the PASID is
- * available in mm->pasid, and must be released with iommu_sva_free_pasid().
- * @min must be greater than 0, because 0 indicates an unused mm->pasid.
+ * available in mm->pasid and will be available for the lifetime of the mm.
*
* Returns 0 on success and < 0 on error.
*/
@@ -33,38 +32,24 @@ int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
return -EINVAL;
mutex_lock(&iommu_sva_lock);
- if (mm->pasid) {
- if (mm->pasid >= min && mm->pasid <= max)
- ioasid_get(mm->pasid);
- else
+ /* Is a PASID already associated with this mm? */
+ if (pasid_valid(mm->pasid)) {
+ if (mm->pasid < min || mm->pasid >= max)
ret = -EOVERFLOW;
- } else {
- pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm);
- if (pasid == INVALID_IOASID)
- ret = -ENOMEM;
- else
- mm->pasid = pasid;
+ goto out;
}
+
+ pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm);
+ if (!pasid_valid(pasid))
+ ret = -ENOMEM;
+ else
+ mm_pasid_set(mm, pasid);
+out:
mutex_unlock(&iommu_sva_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid);
-/**
- * iommu_sva_free_pasid - Release the mm's PASID
- * @mm: the mm
- *
- * Drop one reference to a PASID allocated with iommu_sva_alloc_pasid()
- */
-void iommu_sva_free_pasid(struct mm_struct *mm)
-{
- mutex_lock(&iommu_sva_lock);
- if (ioasid_put(mm->pasid))
- mm->pasid = 0;
- mutex_unlock(&iommu_sva_lock);
-}
-EXPORT_SYMBOL_GPL(iommu_sva_free_pasid);
-
/* ioasid_find getter() requires a void * argument */
static bool __mmget_not_zero(void *mm)
{
diff --git a/drivers/iommu/iommu-sva-lib.h b/drivers/iommu/iommu-sva-lib.h
index 031155010ca8..8909ea1094e3 100644
--- a/drivers/iommu/iommu-sva-lib.h
+++ b/drivers/iommu/iommu-sva-lib.h
@@ -9,7 +9,6 @@
#include <linux/mm_types.h>
int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max);
-void iommu_sva_free_pasid(struct mm_struct *mm);
struct mm_struct *iommu_sva_find(ioasid_t pasid);
/* I/O Page fault */
@@ -17,7 +16,7 @@ struct device;
struct iommu_fault;
struct iopf_queue;
-#ifdef CONFIG_IOMMU_SVA_LIB
+#ifdef CONFIG_IOMMU_SVA
int iommu_queue_iopf(struct iommu_fault *fault, void *cookie);
int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
@@ -28,7 +27,7 @@ struct iopf_queue *iopf_queue_alloc(const char *name);
void iopf_queue_free(struct iopf_queue *queue);
int iopf_queue_discard_partial(struct iopf_queue *queue);
-#else /* CONFIG_IOMMU_SVA_LIB */
+#else /* CONFIG_IOMMU_SVA */
static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
{
return -ENODEV;
@@ -64,5 +63,5 @@ static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
{
return -ENODEV;
}
-#endif /* CONFIG_IOMMU_SVA_LIB */
+#endif /* CONFIG_IOMMU_SVA */
#endif /* _IOMMU_SVA_LIB_H */
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 25b834104790..77df61092be3 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -562,22 +562,52 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct mtk_iommu_data *data;
+ struct device_link *link;
+ struct device *larbdev;
+ unsigned int larbid, larbidx, i;
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return ERR_PTR(-ENODEV); /* Not a iommu client device */
data = dev_iommu_priv_get(dev);
+ /*
+ * Link the consumer device with the smi-larb device(supplier).
+ * The device that connects with each a larb is a independent HW.
+ * All the ports in each a device should be in the same larbs.
+ */
+ larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
+ for (i = 1; i < fwspec->num_ids; i++) {
+ larbidx = MTK_M4U_TO_LARB(fwspec->ids[i]);
+ if (larbid != larbidx) {
+ dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n",
+ larbid, larbidx);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+ larbdev = data->larb_imu[larbid].dev;
+ link = device_link_add(dev, larbdev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
+ if (!link)
+ dev_err(dev, "Unable to link %s\n", dev_name(larbdev));
return &data->iommu;
}
static void mtk_iommu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct mtk_iommu_data *data;
+ struct device *larbdev;
+ unsigned int larbid;
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return;
+ data = dev_iommu_priv_get(dev);
+ larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
+ larbdev = data->larb_imu[larbid].dev;
+ device_link_remove(dev, larbdev);
+
iommu_fwspec_free(dev);
}
@@ -848,6 +878,10 @@ static int mtk_iommu_probe(struct platform_device *pdev)
plarbdev = of_find_device_by_node(larbnode);
if (!plarbdev) {
of_node_put(larbnode);
+ return -ENODEV;
+ }
+ if (!plarbdev->dev.driver) {
+ of_node_put(larbnode);
return -EPROBE_DEFER;
}
data->larb_imu[id].dev = &plarbdev->dev;
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index be22fcf988ce..4052aad75a81 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -423,7 +423,18 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct of_phandle_args iommu_spec;
struct mtk_iommu_data *data;
- int err, idx = 0;
+ int err, idx = 0, larbid, larbidx;
+ struct device_link *link;
+ struct device *larbdev;
+
+ /*
+ * In the deferred case, free the existed fwspec.
+ * Always initialize the fwspec internally.
+ */
+ if (fwspec) {
+ iommu_fwspec_free(dev);
+ fwspec = dev_iommu_fwspec_get(dev);
+ }
while (!of_parse_phandle_with_args(dev->of_node, "iommus",
"#iommu-cells",
@@ -444,6 +455,23 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
data = dev_iommu_priv_get(dev);
+ /* Link the consumer device with the smi-larb device(supplier) */
+ larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
+ for (idx = 1; idx < fwspec->num_ids; idx++) {
+ larbidx = mt2701_m4u_to_larb(fwspec->ids[idx]);
+ if (larbid != larbidx) {
+ dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n",
+ larbid, larbidx);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ larbdev = data->larb_imu[larbid].dev;
+ link = device_link_add(dev, larbdev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
+ if (!link)
+ dev_err(dev, "Unable to link %s\n", dev_name(larbdev));
+
return &data->iommu;
}
@@ -464,10 +492,18 @@ static void mtk_iommu_probe_finalize(struct device *dev)
static void mtk_iommu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct mtk_iommu_data *data;
+ struct device *larbdev;
+ unsigned int larbid;
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return;
+ data = dev_iommu_priv_get(dev);
+ larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
+ larbdev = data->larb_imu[larbid].dev;
+ device_link_remove(dev, larbdev);
+
iommu_fwspec_free(dev);
}
@@ -595,6 +631,10 @@ static int mtk_iommu_probe(struct platform_device *pdev)
plarbdev = of_find_device_by_node(larbnode);
if (!plarbdev) {
of_node_put(larbnode);
+ return -ENODEV;
+ }
+ if (!plarbdev->dev.driver) {
+ of_node_put(larbnode);
return -EPROBE_DEFER;
}
data->larb_imu[i].dev = &plarbdev->dev;
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index e900e3c46903..2561ce8a2ce8 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -808,8 +808,10 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
return NULL;
mc = platform_get_drvdata(pdev);
- if (!mc)
+ if (!mc) {
+ put_device(&pdev->dev);
return NULL;
+ }
return mc->smmu;
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 7038957f4a77..680d2fcf2686 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -430,6 +430,14 @@ config QCOM_PDC
Power Domain Controller driver to manage and configure wakeup
IRQs for Qualcomm Technologies Inc (QTI) mobile chips.
+config QCOM_MPM
+ tristate "QCOM MPM"
+ depends on ARCH_QCOM
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ MSM Power Manager driver to manage and configure wakeup
+ IRQs for Qualcomm Technologies Inc (QTI) mobile chips.
+
config CSKY_MPINTC
bool
depends on CSKY
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index c1f611cbfbf8..160a1d8ceaa9 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -92,8 +92,8 @@ obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
obj-$(CONFIG_ARCH_SYNQUACER) += irq-sni-exiu.o
obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o
-obj-$(CONFIG_NDS32) += irq-ativic32.o
obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o
+obj-$(CONFIG_QCOM_MPM) += irq-qcom-mpm.o
obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o
obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 38091ebb9403..12dd48727a15 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -24,7 +24,7 @@
* - Default "this CPU" register view and explicit per-CPU views
*
* In addition, this driver also handles FIQs, as these are routed to the same
- * IRQ vector. These are used for Fast IPIs (TODO), the ARMv8 timer IRQs, and
+ * IRQ vector. These are used for Fast IPIs, the ARMv8 timer IRQs, and
* performance counters (TODO).
*
* Implementation notes:
@@ -52,9 +52,12 @@
#include <linux/irqchip.h>
#include <linux/irqchip/arm-vgic-info.h>
#include <linux/irqdomain.h>
+#include <linux/jump_label.h>
#include <linux/limits.h>
#include <linux/of_address.h>
#include <linux/slab.h>
+#include <asm/apple_m1_pmu.h>
+#include <asm/cputype.h>
#include <asm/exception.h>
#include <asm/sysreg.h>
#include <asm/virt.h>
@@ -62,20 +65,22 @@
#include <dt-bindings/interrupt-controller/apple-aic.h>
/*
- * AIC registers (MMIO)
+ * AIC v1 registers (MMIO)
*/
#define AIC_INFO 0x0004
-#define AIC_INFO_NR_HW GENMASK(15, 0)
+#define AIC_INFO_NR_IRQ GENMASK(15, 0)
#define AIC_CONFIG 0x0010
#define AIC_WHOAMI 0x2000
#define AIC_EVENT 0x2004
-#define AIC_EVENT_TYPE GENMASK(31, 16)
+#define AIC_EVENT_DIE GENMASK(31, 24)
+#define AIC_EVENT_TYPE GENMASK(23, 16)
#define AIC_EVENT_NUM GENMASK(15, 0)
-#define AIC_EVENT_TYPE_HW 1
+#define AIC_EVENT_TYPE_FIQ 0 /* Software use */
+#define AIC_EVENT_TYPE_IRQ 1
#define AIC_EVENT_TYPE_IPI 4
#define AIC_EVENT_IPI_OTHER 1
#define AIC_EVENT_IPI_SELF 2
@@ -91,34 +96,73 @@
#define AIC_IPI_SELF BIT(31)
#define AIC_TARGET_CPU 0x3000
-#define AIC_SW_SET 0x4000
-#define AIC_SW_CLR 0x4080
-#define AIC_MASK_SET 0x4100
-#define AIC_MASK_CLR 0x4180
#define AIC_CPU_IPI_SET(cpu) (0x5008 + ((cpu) << 7))
#define AIC_CPU_IPI_CLR(cpu) (0x500c + ((cpu) << 7))
#define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7))
#define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7))
+#define AIC_MAX_IRQ 0x400
+
+/*
+ * AIC v2 registers (MMIO)
+ */
+
+#define AIC2_VERSION 0x0000
+#define AIC2_VERSION_VER GENMASK(7, 0)
+
+#define AIC2_INFO1 0x0004
+#define AIC2_INFO1_NR_IRQ GENMASK(15, 0)
+#define AIC2_INFO1_LAST_DIE GENMASK(27, 24)
+
+#define AIC2_INFO2 0x0008
+
+#define AIC2_INFO3 0x000c
+#define AIC2_INFO3_MAX_IRQ GENMASK(15, 0)
+#define AIC2_INFO3_MAX_DIE GENMASK(27, 24)
+
+#define AIC2_RESET 0x0010
+#define AIC2_RESET_RESET BIT(0)
+
+#define AIC2_CONFIG 0x0014
+#define AIC2_CONFIG_ENABLE BIT(0)
+#define AIC2_CONFIG_PREFER_PCPU BIT(28)
+
+#define AIC2_TIMEOUT 0x0028
+#define AIC2_CLUSTER_PRIO 0x0030
+#define AIC2_DELAY_GROUPS 0x0100
+
+#define AIC2_IRQ_CFG 0x2000
+
+/*
+ * AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG:
+ *
+ * Repeat for each die:
+ * IRQ_CFG: u32 * MAX_IRQS
+ * SW_SET: u32 * (MAX_IRQS / 32)
+ * SW_CLR: u32 * (MAX_IRQS / 32)
+ * MASK_SET: u32 * (MAX_IRQS / 32)
+ * MASK_CLR: u32 * (MAX_IRQS / 32)
+ * HW_STATE: u32 * (MAX_IRQS / 32)
+ *
+ * This is followed by a set of event registers, each 16K page aligned.
+ * The first one is the AP event register we will use. Unfortunately,
+ * the actual implemented die count is not specified anywhere in the
+ * capability registers, so we have to explicitly specify the event
+ * register as a second reg entry in the device tree to remain
+ * forward-compatible.
+ */
+
+#define AIC2_IRQ_CFG_TARGET GENMASK(3, 0)
+#define AIC2_IRQ_CFG_DELAY_IDX GENMASK(7, 5)
+
#define MASK_REG(x) (4 * ((x) >> 5))
#define MASK_BIT(x) BIT((x) & GENMASK(4, 0))
/*
* IMP-DEF sysregs that control FIQ sources
- * Note: sysreg-based IPIs are not supported yet.
*/
-/* Core PMC control register */
-#define SYS_IMP_APL_PMCR0_EL1 sys_reg(3, 1, 15, 0, 0)
-#define PMCR0_IMODE GENMASK(10, 8)
-#define PMCR0_IMODE_OFF 0
-#define PMCR0_IMODE_PMI 1
-#define PMCR0_IMODE_AIC 2
-#define PMCR0_IMODE_HALT 3
-#define PMCR0_IMODE_FIQ 4
-#define PMCR0_IACT BIT(11)
-
/* IPI request registers */
#define SYS_IMP_APL_IPI_RR_LOCAL_EL1 sys_reg(3, 5, 15, 0, 0)
#define SYS_IMP_APL_IPI_RR_GLOBAL_EL1 sys_reg(3, 5, 15, 0, 1)
@@ -155,7 +199,18 @@
#define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4)
#define UPMSR_IACT BIT(0)
-#define AIC_NR_FIQ 4
+/* MPIDR fields */
+#define MPIDR_CPU(x) MPIDR_AFFINITY_LEVEL(x, 0)
+#define MPIDR_CLUSTER(x) MPIDR_AFFINITY_LEVEL(x, 1)
+
+#define AIC_IRQ_HWIRQ(die, irq) (FIELD_PREP(AIC_EVENT_DIE, die) | \
+ FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_IRQ) | \
+ FIELD_PREP(AIC_EVENT_NUM, irq))
+#define AIC_FIQ_HWIRQ(x) (FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_FIQ) | \
+ FIELD_PREP(AIC_EVENT_NUM, x))
+#define AIC_HWIRQ_IRQ(x) FIELD_GET(AIC_EVENT_NUM, x)
+#define AIC_HWIRQ_DIE(x) FIELD_GET(AIC_EVENT_DIE, x)
+#define AIC_NR_FIQ 6
#define AIC_NR_SWIPI 32
/*
@@ -173,11 +228,81 @@
#define AIC_TMR_EL02_PHYS AIC_TMR_GUEST_PHYS
#define AIC_TMR_EL02_VIRT AIC_TMR_GUEST_VIRT
+DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
+
+struct aic_info {
+ int version;
+
+ /* Register offsets */
+ u32 event;
+ u32 target_cpu;
+ u32 irq_cfg;
+ u32 sw_set;
+ u32 sw_clr;
+ u32 mask_set;
+ u32 mask_clr;
+
+ u32 die_stride;
+
+ /* Features */
+ bool fast_ipi;
+};
+
+static const struct aic_info aic1_info = {
+ .version = 1,
+
+ .event = AIC_EVENT,
+ .target_cpu = AIC_TARGET_CPU,
+};
+
+static const struct aic_info aic1_fipi_info = {
+ .version = 1,
+
+ .event = AIC_EVENT,
+ .target_cpu = AIC_TARGET_CPU,
+
+ .fast_ipi = true,
+};
+
+static const struct aic_info aic2_info = {
+ .version = 2,
+
+ .irq_cfg = AIC2_IRQ_CFG,
+
+ .fast_ipi = true,
+};
+
+static const struct of_device_id aic_info_match[] = {
+ {
+ .compatible = "apple,t8103-aic",
+ .data = &aic1_fipi_info,
+ },
+ {
+ .compatible = "apple,aic",
+ .data = &aic1_info,
+ },
+ {
+ .compatible = "apple,aic2",
+ .data = &aic2_info,
+ },
+ {}
+};
+
struct aic_irq_chip {
void __iomem *base;
+ void __iomem *event;
struct irq_domain *hw_domain;
struct irq_domain *ipi_domain;
- int nr_hw;
+ struct {
+ cpumask_t aff;
+ } *fiq_aff[AIC_NR_FIQ];
+
+ int nr_irq;
+ int max_irq;
+ int nr_die;
+ int max_die;
+
+ struct aic_info info;
};
static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
@@ -205,18 +330,24 @@ static void aic_ic_write(struct aic_irq_chip *ic, u32 reg, u32 val)
static void aic_irq_mask(struct irq_data *d)
{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
- aic_ic_write(ic, AIC_MASK_SET + MASK_REG(irqd_to_hwirq(d)),
- MASK_BIT(irqd_to_hwirq(d)));
+ u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
+ u32 irq = AIC_HWIRQ_IRQ(hwirq);
+
+ aic_ic_write(ic, ic->info.mask_set + off + MASK_REG(irq), MASK_BIT(irq));
}
static void aic_irq_unmask(struct irq_data *d)
{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
- aic_ic_write(ic, AIC_MASK_CLR + MASK_REG(d->hwirq),
- MASK_BIT(irqd_to_hwirq(d)));
+ u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
+ u32 irq = AIC_HWIRQ_IRQ(hwirq);
+
+ aic_ic_write(ic, ic->info.mask_clr + off + MASK_REG(irq), MASK_BIT(irq));
}
static void aic_irq_eoi(struct irq_data *d)
@@ -239,12 +370,12 @@ static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
* We cannot use a relaxed read here, as reads from DMA buffers
* need to be ordered after the IRQ fires.
*/
- event = readl(ic->base + AIC_EVENT);
+ event = readl(ic->event + ic->info.event);
type = FIELD_GET(AIC_EVENT_TYPE, event);
irq = FIELD_GET(AIC_EVENT_NUM, event);
- if (type == AIC_EVENT_TYPE_HW)
- generic_handle_domain_irq(aic_irqc->hw_domain, irq);
+ if (type == AIC_EVENT_TYPE_IRQ)
+ generic_handle_domain_irq(aic_irqc->hw_domain, event);
else if (type == AIC_EVENT_TYPE_IPI && irq == 1)
aic_handle_ipi(regs);
else if (event != 0)
@@ -271,12 +402,14 @@ static int aic_irq_set_affinity(struct irq_data *d,
struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
int cpu;
+ BUG_ON(!ic->info.target_cpu);
+
if (force)
cpu = cpumask_first(mask_val);
else
cpu = cpumask_any_and(mask_val, cpu_online_mask);
- aic_ic_write(ic, AIC_TARGET_CPU + hwirq * 4, BIT(cpu));
+ aic_ic_write(ic, ic->info.target_cpu + AIC_HWIRQ_IRQ(hwirq) * 4, BIT(cpu));
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK;
@@ -300,15 +433,21 @@ static struct irq_chip aic_chip = {
.irq_set_type = aic_irq_set_type,
};
+static struct irq_chip aic2_chip = {
+ .name = "AIC2",
+ .irq_mask = aic_irq_mask,
+ .irq_unmask = aic_irq_unmask,
+ .irq_eoi = aic_irq_eoi,
+ .irq_set_type = aic_irq_set_type,
+};
+
/*
* FIQ irqchip
*/
static unsigned long aic_fiq_get_idx(struct irq_data *d)
{
- struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
-
- return irqd_to_hwirq(d) - ic->nr_hw;
+ return AIC_HWIRQ_IRQ(irqd_to_hwirq(d));
}
static void aic_fiq_set_mask(struct irq_data *d)
@@ -386,17 +525,21 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
*/
if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) {
- pr_err_ratelimited("Fast IPI fired. Acking.\n");
- write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
+ if (static_branch_likely(&use_fast_ipi)) {
+ aic_handle_ipi(regs);
+ } else {
+ pr_err_ratelimited("Fast IPI fired. Acking.\n");
+ write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
+ }
}
if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
generic_handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL0_PHYS);
+ AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS));
if (TIMER_FIRING(read_sysreg(cntv_ctl_el0)))
generic_handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL0_VIRT);
+ AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT));
if (is_kernel_in_hyp_mode()) {
uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2);
@@ -404,24 +547,23 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
if ((enabled & VM_TMR_FIQ_ENABLE_P) &&
TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02)))
generic_handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL02_PHYS);
+ AIC_FIQ_HWIRQ(AIC_TMR_EL02_PHYS));
if ((enabled & VM_TMR_FIQ_ENABLE_V) &&
TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02)))
generic_handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL02_VIRT);
+ AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
}
- if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
- (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) {
- /*
- * Not supported yet, let's figure out how to handle this when
- * we implement these proprietary performance counters. For now,
- * just mask it and move on.
- */
- pr_err_ratelimited("PMC FIQ fired. Masking.\n");
- sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT,
- FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF));
+ if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) {
+ int irq;
+ if (cpumask_test_cpu(smp_processor_id(),
+ &aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
+ irq = AIC_CPU_PMU_P;
+ else
+ irq = AIC_CPU_PMU_E;
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ AIC_FIQ_HWIRQ(irq));
}
if (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ &&
@@ -455,13 +597,29 @@ static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq,
irq_hw_number_t hw)
{
struct aic_irq_chip *ic = id->host_data;
+ u32 type = FIELD_GET(AIC_EVENT_TYPE, hw);
+ struct irq_chip *chip = &aic_chip;
- if (hw < ic->nr_hw) {
- irq_domain_set_info(id, irq, hw, &aic_chip, id->host_data,
+ if (ic->info.version == 2)
+ chip = &aic2_chip;
+
+ if (type == AIC_EVENT_TYPE_IRQ) {
+ irq_domain_set_info(id, irq, hw, chip, id->host_data,
handle_fasteoi_irq, NULL, NULL);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
} else {
- irq_set_percpu_devid(irq);
+ int fiq = FIELD_GET(AIC_EVENT_NUM, hw);
+
+ switch (fiq) {
+ case AIC_CPU_PMU_P:
+ case AIC_CPU_PMU_E:
+ irq_set_percpu_devid_partition(irq, &ic->fiq_aff[fiq]->aff);
+ break;
+ default:
+ irq_set_percpu_devid(irq);
+ break;
+ }
+
irq_domain_set_info(id, irq, hw, &fiq_chip, id->host_data,
handle_percpu_devid_irq, NULL, NULL);
}
@@ -475,32 +633,46 @@ static int aic_irq_domain_translate(struct irq_domain *id,
unsigned int *type)
{
struct aic_irq_chip *ic = id->host_data;
+ u32 *args;
+ u32 die = 0;
- if (fwspec->param_count != 3 || !is_of_node(fwspec->fwnode))
+ if (fwspec->param_count < 3 || fwspec->param_count > 4 ||
+ !is_of_node(fwspec->fwnode))
return -EINVAL;
+ args = &fwspec->param[1];
+
+ if (fwspec->param_count == 4) {
+ die = args[0];
+ args++;
+ }
+
switch (fwspec->param[0]) {
case AIC_IRQ:
- if (fwspec->param[1] >= ic->nr_hw)
+ if (die >= ic->nr_die)
return -EINVAL;
- *hwirq = fwspec->param[1];
+ if (args[0] >= ic->nr_irq)
+ return -EINVAL;
+ *hwirq = AIC_IRQ_HWIRQ(die, args[0]);
break;
case AIC_FIQ:
- if (fwspec->param[1] >= AIC_NR_FIQ)
+ if (die != 0)
+ return -EINVAL;
+ if (args[0] >= AIC_NR_FIQ)
return -EINVAL;
- *hwirq = ic->nr_hw + fwspec->param[1];
+ *hwirq = AIC_FIQ_HWIRQ(args[0]);
/*
* In EL1 the non-redirected registers are the guest's,
* not EL2's, so remap the hwirqs to match.
*/
if (!is_kernel_in_hyp_mode()) {
- switch (fwspec->param[1]) {
+ switch (args[0]) {
case AIC_TMR_GUEST_PHYS:
- *hwirq = ic->nr_hw + AIC_TMR_EL0_PHYS;
+ *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS);
break;
case AIC_TMR_GUEST_VIRT:
- *hwirq = ic->nr_hw + AIC_TMR_EL0_VIRT;
+ *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT);
break;
case AIC_TMR_HV_PHYS:
case AIC_TMR_HV_VIRT:
@@ -514,7 +686,7 @@ static int aic_irq_domain_translate(struct irq_domain *id,
return -EINVAL;
}
- *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ *type = args[1] & IRQ_TYPE_SENSE_MASK;
return 0;
}
@@ -563,6 +735,22 @@ static const struct irq_domain_ops aic_irq_domain_ops = {
* IPI irqchip
*/
+static void aic_ipi_send_fast(int cpu)
+{
+ u64 mpidr = cpu_logical_map(cpu);
+ u64 my_mpidr = read_cpuid_mpidr();
+ u64 cluster = MPIDR_CLUSTER(mpidr);
+ u64 idx = MPIDR_CPU(mpidr);
+
+ if (MPIDR_CLUSTER(my_mpidr) == cluster)
+ write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx),
+ SYS_IMP_APL_IPI_RR_LOCAL_EL1);
+ else
+ write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster),
+ SYS_IMP_APL_IPI_RR_GLOBAL_EL1);
+ isb();
+}
+
static void aic_ipi_mask(struct irq_data *d)
{
u32 irq_bit = BIT(irqd_to_hwirq(d));
@@ -588,8 +776,12 @@ static void aic_ipi_unmask(struct irq_data *d)
* If a pending vIPI was unmasked, raise a HW IPI to ourselves.
* No barriers needed here since this is a self-IPI.
*/
- if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit)
- aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
+ if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) {
+ if (static_branch_likely(&use_fast_ipi))
+ aic_ipi_send_fast(smp_processor_id());
+ else
+ aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
+ }
}
static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
@@ -617,8 +809,12 @@ static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
smp_mb__after_atomic();
if (!(pending & irq_bit) &&
- (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit))
- send |= AIC_IPI_SEND_CPU(cpu);
+ (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) {
+ if (static_branch_likely(&use_fast_ipi))
+ aic_ipi_send_fast(cpu);
+ else
+ send |= AIC_IPI_SEND_CPU(cpu);
+ }
}
/*
@@ -650,8 +846,16 @@ static void aic_handle_ipi(struct pt_regs *regs)
/*
* Ack the IPI. We need to order this after the AIC event read, but
* that is enforced by normal MMIO ordering guarantees.
+ *
+ * For the Fast IPI case, this needs to be ordered before the vIPI
+ * handling below, so we need to isb();
*/
- aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
+ if (static_branch_likely(&use_fast_ipi)) {
+ write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
+ isb();
+ } else {
+ aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
+ }
/*
* The mask read does not need to be ordered. Only we can change
@@ -679,7 +883,8 @@ static void aic_handle_ipi(struct pt_regs *regs)
* No ordering needed here; at worst this just changes the timing of
* when the next IPI will be delivered.
*/
- aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
+ if (!static_branch_likely(&use_fast_ipi))
+ aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
}
static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq,
@@ -766,20 +971,27 @@ static int aic_init_cpu(unsigned int cpu)
/* Commit all of the above */
isb();
- /*
- * Make sure the kernel's idea of logical CPU order is the same as AIC's
- * If we ever end up with a mismatch here, we will have to introduce
- * a mapping table similar to what other irqchip drivers do.
- */
- WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id());
+ if (aic_irqc->info.version == 1) {
+ /*
+ * Make sure the kernel's idea of logical CPU order is the same as AIC's
+ * If we ever end up with a mismatch here, we will have to introduce
+ * a mapping table similar to what other irqchip drivers do.
+ */
+ WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id());
- /*
- * Always keep IPIs unmasked at the hardware level (except auto-masking
- * by AIC during processing). We manage masks at the vIPI level.
- */
- aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER);
- aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
- aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
+ /*
+ * Always keep IPIs unmasked at the hardware level (except auto-masking
+ * by AIC during processing). We manage masks at the vIPI level.
+ * These registers only exist on AICv1, AICv2 always uses fast IPIs.
+ */
+ aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER);
+ if (static_branch_likely(&use_fast_ipi)) {
+ aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER);
+ } else {
+ aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
+ aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
+ }
+ }
/* Initialize the local mask state */
__this_cpu_write(aic_fiq_unmasked, 0);
@@ -793,68 +1005,193 @@ static struct gic_kvm_info vgic_info __initdata = {
.no_hw_deactivation = true,
};
+static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff)
+{
+ int i, n;
+ u32 fiq;
+
+ if (of_property_read_u32(aff, "apple,fiq-index", &fiq) ||
+ WARN_ON(fiq >= AIC_NR_FIQ) || ic->fiq_aff[fiq])
+ return;
+
+ n = of_property_count_elems_of_size(aff, "cpus", sizeof(u32));
+ if (WARN_ON(n < 0))
+ return;
+
+ ic->fiq_aff[fiq] = kzalloc(sizeof(*ic->fiq_aff[fiq]), GFP_KERNEL);
+ if (!ic->fiq_aff[fiq])
+ return;
+
+ for (i = 0; i < n; i++) {
+ struct device_node *cpu_node;
+ u32 cpu_phandle;
+ int cpu;
+
+ if (of_property_read_u32_index(aff, "cpus", i, &cpu_phandle))
+ continue;
+
+ cpu_node = of_find_node_by_phandle(cpu_phandle);
+ if (WARN_ON(!cpu_node))
+ continue;
+
+ cpu = of_cpu_node_to_id(cpu_node);
+ if (WARN_ON(cpu < 0))
+ continue;
+
+ cpumask_set_cpu(cpu, &ic->fiq_aff[fiq]->aff);
+ }
+}
+
static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent)
{
- int i;
+ int i, die;
+ u32 off, start_off;
void __iomem *regs;
- u32 info;
struct aic_irq_chip *irqc;
+ struct device_node *affs;
+ const struct of_device_id *match;
regs = of_iomap(node, 0);
if (WARN_ON(!regs))
return -EIO;
irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
- if (!irqc)
+ if (!irqc) {
+ iounmap(regs);
return -ENOMEM;
+ }
- aic_irqc = irqc;
irqc->base = regs;
- info = aic_ic_read(irqc, AIC_INFO);
- irqc->nr_hw = FIELD_GET(AIC_INFO_NR_HW, info);
+ match = of_match_node(aic_info_match, node);
+ if (!match)
+ goto err_unmap;
- irqc->hw_domain = irq_domain_create_linear(of_node_to_fwnode(node),
- irqc->nr_hw + AIC_NR_FIQ,
- &aic_irq_domain_ops, irqc);
- if (WARN_ON(!irqc->hw_domain)) {
- iounmap(irqc->base);
- kfree(irqc);
- return -ENODEV;
+ irqc->info = *(struct aic_info *)match->data;
+
+ aic_irqc = irqc;
+
+ switch (irqc->info.version) {
+ case 1: {
+ u32 info;
+
+ info = aic_ic_read(irqc, AIC_INFO);
+ irqc->nr_irq = FIELD_GET(AIC_INFO_NR_IRQ, info);
+ irqc->max_irq = AIC_MAX_IRQ;
+ irqc->nr_die = irqc->max_die = 1;
+
+ off = start_off = irqc->info.target_cpu;
+ off += sizeof(u32) * irqc->max_irq; /* TARGET_CPU */
+
+ irqc->event = irqc->base;
+
+ break;
}
+ case 2: {
+ u32 info1, info3;
+
+ info1 = aic_ic_read(irqc, AIC2_INFO1);
+ info3 = aic_ic_read(irqc, AIC2_INFO3);
+
+ irqc->nr_irq = FIELD_GET(AIC2_INFO1_NR_IRQ, info1);
+ irqc->max_irq = FIELD_GET(AIC2_INFO3_MAX_IRQ, info3);
+ irqc->nr_die = FIELD_GET(AIC2_INFO1_LAST_DIE, info1) + 1;
+ irqc->max_die = FIELD_GET(AIC2_INFO3_MAX_DIE, info3);
+
+ off = start_off = irqc->info.irq_cfg;
+ off += sizeof(u32) * irqc->max_irq; /* IRQ_CFG */
+
+ irqc->event = of_iomap(node, 1);
+ if (WARN_ON(!irqc->event))
+ goto err_unmap;
+
+ break;
+ }
+ }
+
+ irqc->info.sw_set = off;
+ off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_SET */
+ irqc->info.sw_clr = off;
+ off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_CLR */
+ irqc->info.mask_set = off;
+ off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_SET */
+ irqc->info.mask_clr = off;
+ off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_CLR */
+ off += sizeof(u32) * (irqc->max_irq >> 5); /* HW_STATE */
+
+ if (irqc->info.fast_ipi)
+ static_branch_enable(&use_fast_ipi);
+ else
+ static_branch_disable(&use_fast_ipi);
+
+ irqc->info.die_stride = off - start_off;
+
+ irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node),
+ &aic_irq_domain_ops, irqc);
+ if (WARN_ON(!irqc->hw_domain))
+ goto err_unmap;
irq_domain_update_bus_token(irqc->hw_domain, DOMAIN_BUS_WIRED);
- if (aic_init_smp(irqc, node)) {
- irq_domain_remove(irqc->hw_domain);
- iounmap(irqc->base);
- kfree(irqc);
- return -ENODEV;
+ if (aic_init_smp(irqc, node))
+ goto err_remove_domain;
+
+ affs = of_get_child_by_name(node, "affinities");
+ if (affs) {
+ struct device_node *chld;
+
+ for_each_child_of_node(affs, chld)
+ build_fiq_affinity(irqc, chld);
}
set_handle_irq(aic_handle_irq);
set_handle_fiq(aic_handle_fiq);
- for (i = 0; i < BITS_TO_U32(irqc->nr_hw); i++)
- aic_ic_write(irqc, AIC_MASK_SET + i * 4, U32_MAX);
- for (i = 0; i < BITS_TO_U32(irqc->nr_hw); i++)
- aic_ic_write(irqc, AIC_SW_CLR + i * 4, U32_MAX);
- for (i = 0; i < irqc->nr_hw; i++)
- aic_ic_write(irqc, AIC_TARGET_CPU + i * 4, 1);
+ off = 0;
+ for (die = 0; die < irqc->nr_die; die++) {
+ for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
+ aic_ic_write(irqc, irqc->info.mask_set + off + i * 4, U32_MAX);
+ for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
+ aic_ic_write(irqc, irqc->info.sw_clr + off + i * 4, U32_MAX);
+ if (irqc->info.target_cpu)
+ for (i = 0; i < irqc->nr_irq; i++)
+ aic_ic_write(irqc, irqc->info.target_cpu + off + i * 4, 1);
+ off += irqc->info.die_stride;
+ }
+
+ if (irqc->info.version == 2) {
+ u32 config = aic_ic_read(irqc, AIC2_CONFIG);
+
+ config |= AIC2_CONFIG_ENABLE;
+ aic_ic_write(irqc, AIC2_CONFIG, config);
+ }
if (!is_kernel_in_hyp_mode())
pr_info("Kernel running in EL1, mapping interrupts");
+ if (static_branch_likely(&use_fast_ipi))
+ pr_info("Using Fast IPIs");
+
cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING,
"irqchip/apple-aic/ipi:starting",
aic_init_cpu, NULL);
vgic_set_kvm_info(&vgic_info);
- pr_info("Initialized with %d IRQs, %d FIQs, %d vIPIs\n",
- irqc->nr_hw, AIC_NR_FIQ, AIC_NR_SWIPI);
+ pr_info("Initialized with %d/%d IRQs * %d/%d die(s), %d FIQs, %d vIPIs",
+ irqc->nr_irq, irqc->max_irq, irqc->nr_die, irqc->max_die, AIC_NR_FIQ, AIC_NR_SWIPI);
return 0;
+
+err_remove_domain:
+ irq_domain_remove(irqc->hw_domain);
+err_unmap:
+ if (irqc->event && irqc->event != irqc->base)
+ iounmap(irqc->event);
+ iounmap(irqc->base);
+ kfree(irqc);
+ return -ENODEV;
}
-IRQCHIP_DECLARE(apple_m1_aic, "apple,aic", aic_of_ic_init);
+IRQCHIP_DECLARE(apple_aic, "apple,aic", aic_of_ic_init);
+IRQCHIP_DECLARE(apple_aic2, "apple,aic2", aic_of_ic_init);
diff --git a/drivers/irqchip/irq-ativic32.c b/drivers/irqchip/irq-ativic32.c
deleted file mode 100644
index 223dd2f97d28..000000000000
--- a/drivers/irqchip/irq-ativic32.c
+++ /dev/null
@@ -1,156 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2005-2017 Andes Technology Corporation
-
-#include <linux/irq.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/hardirq.h>
-#include <linux/interrupt.h>
-#include <linux/irqdomain.h>
-#include <linux/irqchip.h>
-#include <nds32_intrinsic.h>
-
-#include <asm/irq_regs.h>
-
-unsigned long wake_mask;
-
-static void ativic32_ack_irq(struct irq_data *data)
-{
- __nds32__mtsr_dsb(BIT(data->hwirq), NDS32_SR_INT_PEND2);
-}
-
-static void ativic32_mask_irq(struct irq_data *data)
-{
- unsigned long int_mask2 = __nds32__mfsr(NDS32_SR_INT_MASK2);
- __nds32__mtsr_dsb(int_mask2 & (~(BIT(data->hwirq))), NDS32_SR_INT_MASK2);
-}
-
-static void ativic32_unmask_irq(struct irq_data *data)
-{
- unsigned long int_mask2 = __nds32__mfsr(NDS32_SR_INT_MASK2);
- __nds32__mtsr_dsb(int_mask2 | (BIT(data->hwirq)), NDS32_SR_INT_MASK2);
-}
-
-static int nointc_set_wake(struct irq_data *data, unsigned int on)
-{
- unsigned long int_mask = __nds32__mfsr(NDS32_SR_INT_MASK);
- static unsigned long irq_orig_bit;
- u32 bit = 1 << data->hwirq;
-
- if (on) {
- if (int_mask & bit)
- __assign_bit(data->hwirq, &irq_orig_bit, true);
- else
- __assign_bit(data->hwirq, &irq_orig_bit, false);
-
- __assign_bit(data->hwirq, &int_mask, true);
- __assign_bit(data->hwirq, &wake_mask, true);
-
- } else {
- if (!(irq_orig_bit & bit))
- __assign_bit(data->hwirq, &int_mask, false);
-
- __assign_bit(data->hwirq, &wake_mask, false);
- __assign_bit(data->hwirq, &irq_orig_bit, false);
- }
-
- __nds32__mtsr_dsb(int_mask, NDS32_SR_INT_MASK);
-
- return 0;
-}
-
-static struct irq_chip ativic32_chip = {
- .name = "ativic32",
- .irq_ack = ativic32_ack_irq,
- .irq_mask = ativic32_mask_irq,
- .irq_unmask = ativic32_unmask_irq,
- .irq_set_wake = nointc_set_wake,
-};
-
-static unsigned int __initdata nivic_map[6] = { 6, 2, 10, 16, 24, 32 };
-
-static struct irq_domain *root_domain;
-static int ativic32_irq_domain_map(struct irq_domain *id, unsigned int virq,
- irq_hw_number_t hw)
-{
-
- unsigned long int_trigger_type;
- u32 type;
- struct irq_data *irq_data;
- int_trigger_type = __nds32__mfsr(NDS32_SR_INT_TRIGGER);
- irq_data = irq_get_irq_data(virq);
- if (!irq_data)
- return -EINVAL;
-
- if (int_trigger_type & (BIT(hw))) {
- irq_set_chip_and_handler(virq, &ativic32_chip, handle_edge_irq);
- type = IRQ_TYPE_EDGE_RISING;
- } else {
- irq_set_chip_and_handler(virq, &ativic32_chip, handle_level_irq);
- type = IRQ_TYPE_LEVEL_HIGH;
- }
-
- irqd_set_trigger_type(irq_data, type);
- return 0;
-}
-
-static const struct irq_domain_ops ativic32_ops = {
- .map = ativic32_irq_domain_map,
- .xlate = irq_domain_xlate_onecell
-};
-
-static irq_hw_number_t get_intr_src(void)
-{
- return ((__nds32__mfsr(NDS32_SR_ITYPE) & ITYPE_mskVECTOR) >> ITYPE_offVECTOR)
- - NDS32_VECTOR_offINTERRUPT;
-}
-
-static void ativic32_handle_irq(struct pt_regs *regs)
-{
- irq_hw_number_t hwirq = get_intr_src();
- generic_handle_domain_irq(root_domain, hwirq);
-}
-
-/*
- * TODO: convert nds32 to GENERIC_IRQ_MULTI_HANDLER so that this entry logic
- * can live in arch code.
- */
-asmlinkage void asm_do_IRQ(struct pt_regs *regs)
-{
- struct pt_regs *old_regs;
-
- irq_enter();
- old_regs = set_irq_regs(regs);
- ativic32_handle_irq(regs);
- set_irq_regs(old_regs);
- irq_exit();
-}
-
-int __init ativic32_init_irq(struct device_node *node, struct device_node *parent)
-{
- unsigned long int_vec_base, nivic, nr_ints;
-
- if (WARN(parent, "non-root ativic32 are not supported"))
- return -EINVAL;
-
- int_vec_base = __nds32__mfsr(NDS32_SR_IVB);
-
- if (((int_vec_base & IVB_mskIVIC_VER) >> IVB_offIVIC_VER) == 0)
- panic("Unable to use atcivic32 for this cpu.\n");
-
- nivic = (int_vec_base & IVB_mskNIVIC) >> IVB_offNIVIC;
- if (nivic >= ARRAY_SIZE(nivic_map))
- panic("The number of input for ativic32 is not supported.\n");
-
- nr_ints = nivic_map[nivic];
-
- root_domain = irq_domain_add_linear(node, nr_ints,
- &ativic32_ops, NULL);
-
- if (!root_domain)
- panic("%s: unable to create IRQ domain\n", node->full_name);
-
- return 0;
-}
-IRQCHIP_DECLARE(ativic32, "andestech,ativic32", ativic32_init_irq);
diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c
index 5cc268880f8e..46a3aa60e50e 100644
--- a/drivers/irqchip/irq-ftintc010.c
+++ b/drivers/irqchip/irq-ftintc010.c
@@ -11,7 +11,6 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/irqchip.h>
-#include <linux/irqchip/versatile-fpga.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 5e935d97207d..0efe1a9a9f3b 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1211,7 +1211,7 @@ static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
* Ensure that stores to Normal memory are visible to the
* other CPUs before issuing the IPI.
*/
- wmb();
+ dsb(ishst);
for_each_cpu(cpu, mask) {
u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index b8bb46c65a97..58ba835bee1f 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -34,6 +34,7 @@
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
@@ -66,7 +67,6 @@ union gic_base {
};
struct gic_chip_data {
- struct irq_chip chip;
union gic_base dist_base;
union gic_base cpu_base;
void __iomem *raw_dist_base;
@@ -397,18 +397,15 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static const struct irq_chip gic_chip = {
- .irq_mask = gic_mask_irq,
- .irq_unmask = gic_unmask_irq,
- .irq_eoi = gic_eoi_irq,
- .irq_set_type = gic_set_type,
- .irq_retrigger = gic_retrigger,
- .irq_get_irqchip_state = gic_irq_get_irqchip_state,
- .irq_set_irqchip_state = gic_irq_set_irqchip_state,
- .flags = IRQCHIP_SET_TYPE_MASKED |
- IRQCHIP_SKIP_SET_WAKE |
- IRQCHIP_MASK_ON_SUSPEND,
-};
+static void gic_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
+
+ if (gic->domain->dev)
+ seq_printf(p, gic->domain->dev->of_node->name);
+ else
+ seq_printf(p, "GIC-%d", (int)(gic - &gic_data[0]));
+}
void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
{
@@ -799,8 +796,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
+ struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
unsigned int cpu;
+ if (unlikely(gic != &gic_data[0]))
+ return -EINVAL;
+
if (!force)
cpu = cpumask_any_and(mask_val, cpu_online_mask);
else
@@ -880,6 +881,39 @@ static __init void gic_smp_init(void)
#define gic_ipi_send_mask NULL
#endif
+static const struct irq_chip gic_chip = {
+ .irq_mask = gic_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_eoi = gic_eoi_irq,
+ .irq_set_type = gic_set_type,
+ .irq_retrigger = gic_retrigger,
+ .irq_set_affinity = gic_set_affinity,
+ .ipi_send_mask = gic_ipi_send_mask,
+ .irq_get_irqchip_state = gic_irq_get_irqchip_state,
+ .irq_set_irqchip_state = gic_irq_set_irqchip_state,
+ .irq_print_chip = gic_irq_print_chip,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static const struct irq_chip gic_chip_mode1 = {
+ .name = "GICv2",
+ .irq_mask = gic_eoimode1_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_eoi = gic_eoimode1_eoi_irq,
+ .irq_set_type = gic_set_type,
+ .irq_retrigger = gic_retrigger,
+ .irq_set_affinity = gic_set_affinity,
+ .ipi_send_mask = gic_ipi_send_mask,
+ .irq_get_irqchip_state = gic_irq_get_irqchip_state,
+ .irq_set_irqchip_state = gic_irq_set_irqchip_state,
+ .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
#ifdef CONFIG_BL_SWITCHER
/*
* gic_send_sgi - send a SGI directly to given CPU interface number
@@ -1024,15 +1058,19 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
{
struct gic_chip_data *gic = d->host_data;
struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
+ const struct irq_chip *chip;
+
+ chip = (static_branch_likely(&supports_deactivate_key) &&
+ gic == &gic_data[0]) ? &gic_chip_mode1 : &gic_chip;
switch (hw) {
case 0 ... 31:
irq_set_percpu_devid(irq);
- irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
break;
default:
- irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
irq_set_probe(irq);
irqd_set_single_target(irqd);
@@ -1127,26 +1165,6 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
.unmap = gic_irq_domain_unmap,
};
-static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
- const char *name, bool use_eoimode1)
-{
- /* Initialize irq_chip */
- gic->chip = gic_chip;
- gic->chip.name = name;
- gic->chip.parent_device = dev;
-
- if (use_eoimode1) {
- gic->chip.irq_mask = gic_eoimode1_mask_irq;
- gic->chip.irq_eoi = gic_eoimode1_eoi_irq;
- gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
- }
-
- if (gic == &gic_data[0]) {
- gic->chip.irq_set_affinity = gic_set_affinity;
- gic->chip.ipi_send_mask = gic_ipi_send_mask;
- }
-}
-
static int gic_init_bases(struct gic_chip_data *gic,
struct fwnode_handle *handle)
{
@@ -1246,7 +1264,6 @@ error:
static int __init __gic_init_bases(struct gic_chip_data *gic,
struct fwnode_handle *handle)
{
- char *name;
int i, ret;
if (WARN_ON(!gic || gic->domain))
@@ -1266,18 +1283,8 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
pr_info("GIC: Using split EOI/Deactivate mode\n");
}
- if (static_branch_likely(&supports_deactivate_key) && gic == &gic_data[0]) {
- name = kasprintf(GFP_KERNEL, "GICv2");
- gic_init_chip(gic, NULL, name, true);
- } else {
- name = kasprintf(GFP_KERNEL, "GIC-%d", (int)(gic-&gic_data[0]));
- gic_init_chip(gic, NULL, name, false);
- }
-
ret = gic_init_bases(gic, handle);
- if (ret)
- kfree(name);
- else if (gic == &gic_data[0])
+ if (gic == &gic_data[0])
gic_smp_init();
return ret;
@@ -1460,8 +1467,6 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
if (!*gic)
return -ENOMEM;
- gic_init_chip(*gic, dev, dev->of_node->name, false);
-
ret = gic_of_setup(*gic, dev->of_node);
if (ret)
return ret;
@@ -1472,6 +1477,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
return ret;
}
+ irq_domain_set_pm_device((*gic)->domain, dev);
irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic);
return 0;
diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c
index e86ff743e98c..80aaea82468a 100644
--- a/drivers/irqchip/irq-imx-intmux.c
+++ b/drivers/irqchip/irq-imx-intmux.c
@@ -61,7 +61,6 @@
#define CHAN_MAX_NUM 0x8
struct intmux_irqchip_data {
- struct irq_chip chip;
u32 saved_reg;
int chanidx;
int irq;
@@ -114,7 +113,7 @@ static void imx_intmux_irq_unmask(struct irq_data *d)
raw_spin_unlock_irqrestore(&data->lock, flags);
}
-static struct irq_chip imx_intmux_irq_chip = {
+static struct irq_chip imx_intmux_irq_chip __ro_after_init = {
.name = "intmux",
.irq_mask = imx_intmux_irq_mask,
.irq_unmask = imx_intmux_irq_unmask,
@@ -126,7 +125,7 @@ static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq,
struct intmux_irqchip_data *data = h->host_data;
irq_set_chip_data(irq, data);
- irq_set_chip_and_handler(irq, &data->chip, handle_level_irq);
+ irq_set_chip_and_handler(irq, &imx_intmux_irq_chip, handle_level_irq);
return 0;
}
@@ -241,8 +240,6 @@ static int imx_intmux_probe(struct platform_device *pdev)
}
for (i = 0; i < channum; i++) {
- data->irqchip_data[i].chip = imx_intmux_irq_chip;
- data->irqchip_data[i].chip.parent_device = &pdev->dev;
data->irqchip_data[i].chanidx = i;
data->irqchip_data[i].irq = irq_of_parse_and_map(np, i);
@@ -260,6 +257,7 @@ static int imx_intmux_probe(struct platform_device *pdev)
goto out;
}
data->irqchip_data[i].domain = domain;
+ irq_domain_set_pm_device(domain, &pdev->dev);
/* disable all interrupt sources of this channel firstly */
writel_relaxed(0, data->regs + CHANIER(i));
diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c
index a29357f39450..4d70a857133f 100644
--- a/drivers/irqchip/irq-lpc32xx.c
+++ b/drivers/irqchip/irq-lpc32xx.c
@@ -11,6 +11,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <asm/exception.h>
@@ -25,8 +26,8 @@
struct lpc32xx_irq_chip {
void __iomem *base;
+ phys_addr_t addr;
struct irq_domain *domain;
- struct irq_chip chip;
};
static struct lpc32xx_irq_chip *lpc32xx_mic_irqc;
@@ -118,6 +119,24 @@ static int lpc32xx_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
+static void lpc32xx_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+
+ if (ic == lpc32xx_mic_irqc)
+ seq_printf(p, "%08x.mic", ic->addr);
+ else
+ seq_printf(p, "%08x.sic", ic->addr);
+}
+
+static const struct irq_chip lpc32xx_chip = {
+ .irq_ack = lpc32xx_irq_ack,
+ .irq_mask = lpc32xx_irq_mask,
+ .irq_unmask = lpc32xx_irq_unmask,
+ .irq_set_type = lpc32xx_irq_set_type,
+ .irq_print_chip = lpc32xx_irq_print_chip,
+};
+
static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs)
{
struct lpc32xx_irq_chip *ic = lpc32xx_mic_irqc;
@@ -153,7 +172,7 @@ static int lpc32xx_irq_domain_map(struct irq_domain *id, unsigned int virq,
struct lpc32xx_irq_chip *ic = id->host_data;
irq_set_chip_data(virq, ic);
- irq_set_chip_and_handler(virq, &ic->chip, handle_level_irq);
+ irq_set_chip_and_handler(virq, &lpc32xx_chip, handle_level_irq);
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_noprobe(virq);
@@ -183,6 +202,7 @@ static int __init lpc32xx_of_ic_init(struct device_node *node,
if (!irqc)
return -ENOMEM;
+ irqc->addr = addr;
irqc->base = of_iomap(node, 0);
if (!irqc->base) {
pr_err("%pOF: unable to map registers\n", node);
@@ -190,21 +210,11 @@ static int __init lpc32xx_of_ic_init(struct device_node *node,
return -EINVAL;
}
- irqc->chip.irq_ack = lpc32xx_irq_ack;
- irqc->chip.irq_mask = lpc32xx_irq_mask;
- irqc->chip.irq_unmask = lpc32xx_irq_unmask;
- irqc->chip.irq_set_type = lpc32xx_irq_set_type;
- if (is_mic)
- irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.mic", addr);
- else
- irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.sic", addr);
-
irqc->domain = irq_domain_add_linear(node, NR_LPC32XX_IC_IRQS,
&lpc32xx_irq_domain_ops, irqc);
if (!irqc->domain) {
pr_err("unable to add irq domain\n");
iounmap(irqc->base);
- kfree(irqc->chip.name);
kfree(irqc);
return -ENODEV;
}
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index d90ff0b92480..2aaa9aad3e87 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -16,7 +16,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
-#define NUM_CHANNEL 8
+#define MAX_NUM_CHANNEL 64
#define MAX_INPUT_MUX 256
#define REG_EDGE_POL 0x00
@@ -26,6 +26,8 @@
/* use for A1 like chips */
#define REG_PIN_A1_SEL 0x04
+/* Used for s4 chips */
+#define REG_EDGE_POL_S4 0x1c
/*
* Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by
@@ -51,15 +53,22 @@ static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
unsigned int channel,
unsigned long hwirq);
static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl);
+static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
+ unsigned int type, u32 *channel_hwirq);
+static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
+ unsigned int type, u32 *channel_hwirq);
struct irq_ctl_ops {
void (*gpio_irq_sel_pin)(struct meson_gpio_irq_controller *ctl,
unsigned int channel, unsigned long hwirq);
void (*gpio_irq_init)(struct meson_gpio_irq_controller *ctl);
+ int (*gpio_irq_set_type)(struct meson_gpio_irq_controller *ctl,
+ unsigned int type, u32 *channel_hwirq);
};
struct meson_gpio_irq_params {
unsigned int nr_hwirq;
+ unsigned int nr_channels;
bool support_edge_both;
unsigned int edge_both_offset;
unsigned int edge_single_offset;
@@ -68,28 +77,44 @@ struct meson_gpio_irq_params {
struct irq_ctl_ops ops;
};
-#define INIT_MESON_COMMON(irqs, init, sel) \
+#define INIT_MESON_COMMON(irqs, init, sel, type) \
.nr_hwirq = irqs, \
.ops = { \
.gpio_irq_init = init, \
.gpio_irq_sel_pin = sel, \
+ .gpio_irq_set_type = type, \
},
#define INIT_MESON8_COMMON_DATA(irqs) \
INIT_MESON_COMMON(irqs, meson_gpio_irq_init_dummy, \
- meson8_gpio_irq_sel_pin) \
+ meson8_gpio_irq_sel_pin, \
+ meson8_gpio_irq_set_type) \
.edge_single_offset = 0, \
.pol_low_offset = 16, \
.pin_sel_mask = 0xff, \
+ .nr_channels = 8, \
#define INIT_MESON_A1_COMMON_DATA(irqs) \
INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
- meson_a1_gpio_irq_sel_pin) \
+ meson_a1_gpio_irq_sel_pin, \
+ meson8_gpio_irq_set_type) \
.support_edge_both = true, \
.edge_both_offset = 16, \
.edge_single_offset = 8, \
.pol_low_offset = 0, \
.pin_sel_mask = 0x7f, \
+ .nr_channels = 8, \
+
+#define INIT_MESON_S4_COMMON_DATA(irqs) \
+ INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
+ meson_a1_gpio_irq_sel_pin, \
+ meson_s4_gpio_irq_set_type) \
+ .support_edge_both = true, \
+ .edge_both_offset = 0, \
+ .edge_single_offset = 12, \
+ .pol_low_offset = 0, \
+ .pin_sel_mask = 0xff, \
+ .nr_channels = 12, \
static const struct meson_gpio_irq_params meson8_params = {
INIT_MESON8_COMMON_DATA(134)
@@ -121,6 +146,10 @@ static const struct meson_gpio_irq_params a1_params = {
INIT_MESON_A1_COMMON_DATA(62)
};
+static const struct meson_gpio_irq_params s4_params = {
+ INIT_MESON_S4_COMMON_DATA(82)
+};
+
static const struct of_device_id meson_irq_gpio_matches[] = {
{ .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
{ .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
@@ -130,14 +159,15 @@ static const struct of_device_id meson_irq_gpio_matches[] = {
{ .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params },
{ .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params },
{ .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params },
+ { .compatible = "amlogic,meson-s4-gpio-intc", .data = &s4_params },
{ }
};
struct meson_gpio_irq_controller {
const struct meson_gpio_irq_params *params;
void __iomem *base;
- u32 channel_irqs[NUM_CHANNEL];
- DECLARE_BITMAP(channel_map, NUM_CHANNEL);
+ u32 channel_irqs[MAX_NUM_CHANNEL];
+ DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL);
spinlock_t lock;
};
@@ -207,8 +237,8 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
spin_lock_irqsave(&ctl->lock, flags);
/* Find a free channel */
- idx = find_first_zero_bit(ctl->channel_map, NUM_CHANNEL);
- if (idx >= NUM_CHANNEL) {
+ idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels);
+ if (idx >= ctl->params->nr_channels) {
spin_unlock_irqrestore(&ctl->lock, flags);
pr_err("No channel available\n");
return -ENOSPC;
@@ -256,9 +286,8 @@ meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl,
clear_bit(idx, ctl->channel_map);
}
-static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
- unsigned int type,
- u32 *channel_hwirq)
+static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
+ unsigned int type, u32 *channel_hwirq)
{
u32 val = 0;
unsigned int idx;
@@ -299,6 +328,51 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
return 0;
}
+/*
+ * gpio irq relative registers for s4
+ * -PADCTRL_GPIO_IRQ_CTRL0
+ * bit[31]: enable/disable all the irq lines
+ * bit[12-23]: single edge trigger
+ * bit[0-11]: polarity trigger
+ *
+ * -PADCTRL_GPIO_IRQ_CTRL[X]
+ * bit[0-16]: 7 bits to choose gpio source for irq line 2*[X] - 2
+ * bit[16-22]:7 bits to choose gpio source for irq line 2*[X] - 1
+ * where X = 1-6
+ *
+ * -PADCTRL_GPIO_IRQ_CTRL[7]
+ * bit[0-11]: both edge trigger
+ */
+static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
+ unsigned int type, u32 *channel_hwirq)
+{
+ u32 val = 0;
+ unsigned int idx;
+
+ idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
+
+ type &= IRQ_TYPE_SENSE_MASK;
+
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4, BIT(idx), 0);
+
+ if (type == IRQ_TYPE_EDGE_BOTH) {
+ val |= BIT(ctl->params->edge_both_offset + idx);
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4,
+ BIT(ctl->params->edge_both_offset + idx), val);
+ return 0;
+ }
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))
+ val |= BIT(ctl->params->pol_low_offset + idx);
+
+ if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
+ val |= BIT(ctl->params->edge_single_offset + idx);
+
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
+ BIT(idx) | BIT(12 + idx), val);
+ return 0;
+};
+
static unsigned int meson_gpio_irq_type_output(unsigned int type)
{
unsigned int sense = type & IRQ_TYPE_SENSE_MASK;
@@ -323,7 +397,7 @@ static int meson_gpio_irq_set_type(struct irq_data *data, unsigned int type)
u32 *channel_hwirq = irq_data_get_irq_chip_data(data);
int ret;
- ret = meson_gpio_irq_type_setup(ctl, type, channel_hwirq);
+ ret = ctl->params->ops.gpio_irq_set_type(ctl, type, channel_hwirq);
if (ret)
return ret;
@@ -450,10 +524,10 @@ static int meson_gpio_irq_parse_dt(struct device_node *node, struct meson_gpio_i
ret = of_property_read_variable_u32_array(node,
"amlogic,channel-interrupts",
ctl->channel_irqs,
- NUM_CHANNEL,
- NUM_CHANNEL);
+ ctl->params->nr_channels,
+ ctl->params->nr_channels);
if (ret < 0) {
- pr_err("can't get %d channel interrupts\n", NUM_CHANNEL);
+ pr_err("can't get %d channel interrupts\n", ctl->params->nr_channels);
return ret;
}
@@ -507,7 +581,7 @@ static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *
}
pr_info("%d to %d gpio interrupt mux initialized\n",
- ctl->params->nr_hwirq, NUM_CHANNEL);
+ ctl->params->nr_hwirq, ctl->params->nr_channels);
return 0;
diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c
index 870f9866b8da..ef3d3646ccc2 100644
--- a/drivers/irqchip/irq-mvebu-pic.c
+++ b/drivers/irqchip/irq-mvebu-pic.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#define PIC_CAUSE 0x0
#define PIC_MASK 0x4
@@ -29,7 +30,7 @@ struct mvebu_pic {
void __iomem *base;
u32 parent_irq;
struct irq_domain *domain;
- struct irq_chip irq_chip;
+ struct platform_device *pdev;
};
static void mvebu_pic_reset(struct mvebu_pic *pic)
@@ -66,6 +67,20 @@ static void mvebu_pic_unmask_irq(struct irq_data *d)
writel(reg, pic->base + PIC_MASK);
}
+static void mvebu_pic_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct mvebu_pic *pic = irq_data_get_irq_chip_data(d);
+
+ seq_printf(p, dev_name(&pic->pdev->dev));
+}
+
+static const struct irq_chip mvebu_pic_chip = {
+ .irq_mask = mvebu_pic_mask_irq,
+ .irq_unmask = mvebu_pic_unmask_irq,
+ .irq_eoi = mvebu_pic_eoi_irq,
+ .irq_print_chip = mvebu_pic_print_chip,
+};
+
static int mvebu_pic_irq_map(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq)
{
@@ -73,8 +88,7 @@ static int mvebu_pic_irq_map(struct irq_domain *domain, unsigned int virq,
irq_set_percpu_devid(virq);
irq_set_chip_data(virq, pic);
- irq_set_chip_and_handler(virq, &pic->irq_chip,
- handle_percpu_devid_irq);
+ irq_set_chip_and_handler(virq, &mvebu_pic_chip, handle_percpu_devid_irq);
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_probe(virq);
@@ -120,22 +134,16 @@ static int mvebu_pic_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct mvebu_pic *pic;
- struct irq_chip *irq_chip;
pic = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pic), GFP_KERNEL);
if (!pic)
return -ENOMEM;
+ pic->pdev = pdev;
pic->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pic->base))
return PTR_ERR(pic->base);
- irq_chip = &pic->irq_chip;
- irq_chip->name = dev_name(&pdev->dev);
- irq_chip->irq_mask = mvebu_pic_mask_irq;
- irq_chip->irq_unmask = mvebu_pic_unmask_irq;
- irq_chip->irq_eoi = mvebu_pic_eoi_irq;
-
pic->parent_irq = irq_of_parse_and_map(node, 0);
if (pic->parent_irq <= 0) {
dev_err(&pdev->dev, "Failed to parse parent interrupt\n");
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index ba4759b3e269..ba6332b00a0a 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -37,25 +37,12 @@
static struct irq_domain *nvic_irq_domain;
-static void __nvic_handle_irq(irq_hw_number_t hwirq)
+static void __irq_entry nvic_handle_irq(struct pt_regs *regs)
{
- generic_handle_domain_irq(nvic_irq_domain, hwirq);
-}
+ unsigned long icsr = readl_relaxed(BASEADDR_V7M_SCB + V7M_SCB_ICSR);
+ irq_hw_number_t hwirq = (icsr & V7M_SCB_ICSR_VECTACTIVE) - 16;
-/*
- * TODO: restructure the ARMv7M entry logic so that this entry logic can live
- * in arch code.
- */
-asmlinkage void __exception_irq_entry
-nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
-{
- struct pt_regs *old_regs;
-
- irq_enter();
- old_regs = set_irq_regs(regs);
- __nvic_handle_irq(hwirq);
- set_irq_regs(old_regs);
- irq_exit();
+ generic_handle_domain_irq(nvic_irq_domain, hwirq);
}
static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -107,6 +94,7 @@ static int __init nvic_of_init(struct device_node *node,
if (!nvic_irq_domain) {
pr_warn("Failed to allocate irq domain\n");
+ iounmap(nvic_base);
return -ENOMEM;
}
@@ -116,6 +104,7 @@ static int __init nvic_of_init(struct device_node *node,
if (ret) {
pr_warn("Failed to allocate irq chips\n");
irq_domain_remove(nvic_irq_domain);
+ iounmap(nvic_base);
return ret;
}
@@ -141,6 +130,7 @@ static int __init nvic_of_init(struct device_node *node,
for (i = 0; i < irqs; i += 4)
writel_relaxed(0, nvic_base + NVIC_IPR + i);
+ set_handle_irq(nvic_handle_irq);
return 0;
}
IRQCHIP_DECLARE(armv7m_nvic, "arm,armv7m-nvic", nvic_of_init);
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
new file mode 100644
index 000000000000..eea5a753618c
--- /dev/null
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Linaro Limited
+ * Copyright (c) 2010-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/irq.h>
+#include <linux/spinlock.h>
+
+/*
+ * This is the driver for Qualcomm MPM (MSM Power Manager) interrupt controller,
+ * which is commonly found on Qualcomm SoCs built on the RPM architecture.
+ * Sitting in always-on domain, MPM monitors the wakeup interrupts when SoC is
+ * asleep, and wakes up the AP when one of those interrupts occurs. This driver
+ * doesn't directly access physical MPM registers though. Instead, the access
+ * is bridged via a piece of internal memory (SRAM) that is accessible to both
+ * AP and RPM. This piece of memory is called 'vMPM' in the driver.
+ *
+ * When SoC is awake, the vMPM is owned by AP and the register setup by this
+ * driver all happens on vMPM. When AP is about to get power collapsed, the
+ * driver sends a mailbox notification to RPM, which will take over the vMPM
+ * ownership and dump vMPM into physical MPM registers. On wakeup, AP is woken
+ * up by a MPM pin/interrupt, and RPM will copy STATUS registers into vMPM.
+ * Then AP start owning vMPM again.
+ *
+ * vMPM register map:
+ *
+ * 31 0
+ * +--------------------------------+
+ * | TIMER0 | 0x00
+ * +--------------------------------+
+ * | TIMER1 | 0x04
+ * +--------------------------------+
+ * | ENABLE0 | 0x08
+ * +--------------------------------+
+ * | ... | ...
+ * +--------------------------------+
+ * | ENABLEn |
+ * +--------------------------------+
+ * | FALLING_EDGE0 |
+ * +--------------------------------+
+ * | ... |
+ * +--------------------------------+
+ * | STATUSn |
+ * +--------------------------------+
+ *
+ * n = DIV_ROUND_UP(pin_cnt, 32)
+ *
+ */
+
+#define MPM_REG_ENABLE 0
+#define MPM_REG_FALLING_EDGE 1
+#define MPM_REG_RISING_EDGE 2
+#define MPM_REG_POLARITY 3
+#define MPM_REG_STATUS 4
+
+/* MPM pin map to GIC hwirq */
+struct mpm_gic_map {
+ int pin;
+ irq_hw_number_t hwirq;
+};
+
+struct qcom_mpm_priv {
+ void __iomem *base;
+ raw_spinlock_t lock;
+ struct mbox_client mbox_client;
+ struct mbox_chan *mbox_chan;
+ struct mpm_gic_map *maps;
+ unsigned int map_cnt;
+ unsigned int reg_stride;
+ struct irq_domain *domain;
+ struct generic_pm_domain genpd;
+};
+
+static u32 qcom_mpm_read(struct qcom_mpm_priv *priv, unsigned int reg,
+ unsigned int index)
+{
+ unsigned int offset = (reg * priv->reg_stride + index + 2) * 4;
+
+ return readl_relaxed(priv->base + offset);
+}
+
+static void qcom_mpm_write(struct qcom_mpm_priv *priv, unsigned int reg,
+ unsigned int index, u32 val)
+{
+ unsigned int offset = (reg * priv->reg_stride + index + 2) * 4;
+
+ writel_relaxed(val, priv->base + offset);
+
+ /* Ensure the write is completed */
+ wmb();
+}
+
+static void qcom_mpm_enable_irq(struct irq_data *d, bool en)
+{
+ struct qcom_mpm_priv *priv = d->chip_data;
+ int pin = d->hwirq;
+ unsigned int index = pin / 32;
+ unsigned int shift = pin % 32;
+ unsigned long flags, val;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ val = qcom_mpm_read(priv, MPM_REG_ENABLE, index);
+ __assign_bit(shift, &val, en);
+ qcom_mpm_write(priv, MPM_REG_ENABLE, index, val);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void qcom_mpm_mask(struct irq_data *d)
+{
+ qcom_mpm_enable_irq(d, false);
+
+ if (d->parent_data)
+ irq_chip_mask_parent(d);
+}
+
+static void qcom_mpm_unmask(struct irq_data *d)
+{
+ qcom_mpm_enable_irq(d, true);
+
+ if (d->parent_data)
+ irq_chip_unmask_parent(d);
+}
+
+static void mpm_set_type(struct qcom_mpm_priv *priv, bool set, unsigned int reg,
+ unsigned int index, unsigned int shift)
+{
+ unsigned long flags, val;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ val = qcom_mpm_read(priv, reg, index);
+ __assign_bit(shift, &val, set);
+ qcom_mpm_write(priv, reg, index, val);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int qcom_mpm_set_type(struct irq_data *d, unsigned int type)
+{
+ struct qcom_mpm_priv *priv = d->chip_data;
+ int pin = d->hwirq;
+ unsigned int index = pin / 32;
+ unsigned int shift = pin % 32;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ mpm_set_type(priv, true, MPM_REG_RISING_EDGE, index, shift);
+ else
+ mpm_set_type(priv, false, MPM_REG_RISING_EDGE, index, shift);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ mpm_set_type(priv, true, MPM_REG_FALLING_EDGE, index, shift);
+ else
+ mpm_set_type(priv, false, MPM_REG_FALLING_EDGE, index, shift);
+
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ mpm_set_type(priv, true, MPM_REG_POLARITY, index, shift);
+ else
+ mpm_set_type(priv, false, MPM_REG_POLARITY, index, shift);
+
+ if (!d->parent_data)
+ return 0;
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ type = IRQ_TYPE_EDGE_RISING;
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ type = IRQ_TYPE_LEVEL_HIGH;
+
+ return irq_chip_set_type_parent(d, type);
+}
+
+static struct irq_chip qcom_mpm_chip = {
+ .name = "mpm",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = qcom_mpm_mask,
+ .irq_unmask = qcom_mpm_unmask,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = qcom_mpm_set_type,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
+};
+
+static struct mpm_gic_map *get_mpm_gic_map(struct qcom_mpm_priv *priv, int pin)
+{
+ struct mpm_gic_map *maps = priv->maps;
+ int i;
+
+ for (i = 0; i < priv->map_cnt; i++) {
+ if (maps[i].pin == pin)
+ return &maps[i];
+ }
+
+ return NULL;
+}
+
+static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct qcom_mpm_priv *priv = domain->host_data;
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ struct mpm_gic_map *map;
+ irq_hw_number_t pin;
+ unsigned int type;
+ int ret;
+
+ ret = irq_domain_translate_twocell(domain, fwspec, &pin, &type);
+ if (ret)
+ return ret;
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, pin,
+ &qcom_mpm_chip, priv);
+ if (ret)
+ return ret;
+
+ map = get_mpm_gic_map(priv, pin);
+ if (map == NULL)
+ return irq_domain_disconnect_hierarchy(domain->parent, virq);
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ type = IRQ_TYPE_EDGE_RISING;
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ type = IRQ_TYPE_LEVEL_HIGH;
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = 0;
+ parent_fwspec.param[1] = map->hwirq;
+ parent_fwspec.param[2] = type;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+ &parent_fwspec);
+}
+
+static const struct irq_domain_ops qcom_mpm_ops = {
+ .alloc = qcom_mpm_alloc,
+ .free = irq_domain_free_irqs_common,
+ .translate = irq_domain_translate_twocell,
+};
+
+/* Triggered by RPM when system resumes from deep sleep */
+static irqreturn_t qcom_mpm_handler(int irq, void *dev_id)
+{
+ struct qcom_mpm_priv *priv = dev_id;
+ unsigned long enable, pending;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long flags;
+ int i, j;
+
+ for (i = 0; i < priv->reg_stride; i++) {
+ raw_spin_lock_irqsave(&priv->lock, flags);
+ enable = qcom_mpm_read(priv, MPM_REG_ENABLE, i);
+ pending = qcom_mpm_read(priv, MPM_REG_STATUS, i);
+ pending &= enable;
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+
+ for_each_set_bit(j, &pending, 32) {
+ unsigned int pin = 32 * i + j;
+ struct irq_desc *desc = irq_resolve_mapping(priv->domain, pin);
+ struct irq_data *d = &desc->irq_data;
+
+ if (!irqd_is_level_type(d))
+ irq_set_irqchip_state(d->irq,
+ IRQCHIP_STATE_PENDING, true);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+static int mpm_pd_power_off(struct generic_pm_domain *genpd)
+{
+ struct qcom_mpm_priv *priv = container_of(genpd, struct qcom_mpm_priv,
+ genpd);
+ int i, ret;
+
+ for (i = 0; i < priv->reg_stride; i++)
+ qcom_mpm_write(priv, MPM_REG_STATUS, i, 0);
+
+ /* Notify RPM to write vMPM into HW */
+ ret = mbox_send_message(priv->mbox_chan, NULL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static bool gic_hwirq_is_mapped(struct mpm_gic_map *maps, int cnt, u32 hwirq)
+{
+ int i;
+
+ for (i = 0; i < cnt; i++)
+ if (maps[i].hwirq == hwirq)
+ return true;
+
+ return false;
+}
+
+static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
+{
+ struct platform_device *pdev = of_find_device_by_node(np);
+ struct device *dev = &pdev->dev;
+ struct irq_domain *parent_domain;
+ struct generic_pm_domain *genpd;
+ struct qcom_mpm_priv *priv;
+ unsigned int pin_cnt;
+ int i, irq;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(np, "qcom,mpm-pin-count", &pin_cnt);
+ if (ret) {
+ dev_err(dev, "failed to read qcom,mpm-pin-count: %d\n", ret);
+ return ret;
+ }
+
+ priv->reg_stride = DIV_ROUND_UP(pin_cnt, 32);
+
+ ret = of_property_count_u32_elems(np, "qcom,mpm-pin-map");
+ if (ret < 0) {
+ dev_err(dev, "failed to read qcom,mpm-pin-map: %d\n", ret);
+ return ret;
+ }
+
+ if (ret % 2) {
+ dev_err(dev, "invalid qcom,mpm-pin-map\n");
+ return -EINVAL;
+ }
+
+ priv->map_cnt = ret / 2;
+ priv->maps = devm_kcalloc(dev, priv->map_cnt, sizeof(*priv->maps),
+ GFP_KERNEL);
+ if (!priv->maps)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->map_cnt; i++) {
+ u32 pin, hwirq;
+
+ of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2, &pin);
+ of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2 + 1, &hwirq);
+
+ if (gic_hwirq_is_mapped(priv->maps, i, hwirq)) {
+ dev_warn(dev, "failed to map pin %d as GIC hwirq %d is already mapped\n",
+ pin, hwirq);
+ continue;
+ }
+
+ priv->maps[i].pin = pin;
+ priv->maps[i].hwirq = hwirq;
+ }
+
+ raw_spin_lock_init(&priv->lock);
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (!priv->base)
+ return PTR_ERR(priv->base);
+
+ for (i = 0; i < priv->reg_stride; i++) {
+ qcom_mpm_write(priv, MPM_REG_ENABLE, i, 0);
+ qcom_mpm_write(priv, MPM_REG_FALLING_EDGE, i, 0);
+ qcom_mpm_write(priv, MPM_REG_RISING_EDGE, i, 0);
+ qcom_mpm_write(priv, MPM_REG_POLARITY, i, 0);
+ qcom_mpm_write(priv, MPM_REG_STATUS, i, 0);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ genpd = &priv->genpd;
+ genpd->flags = GENPD_FLAG_IRQ_SAFE;
+ genpd->power_off = mpm_pd_power_off;
+
+ genpd->name = devm_kasprintf(dev, GFP_KERNEL, "%s", dev_name(dev));
+ if (!genpd->name)
+ return -ENOMEM;
+
+ ret = pm_genpd_init(genpd, NULL, false);
+ if (ret) {
+ dev_err(dev, "failed to init genpd: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_genpd_add_provider_simple(np, genpd);
+ if (ret) {
+ dev_err(dev, "failed to add genpd provider: %d\n", ret);
+ goto remove_genpd;
+ }
+
+ priv->mbox_client.dev = dev;
+ priv->mbox_chan = mbox_request_channel(&priv->mbox_client, 0);
+ if (IS_ERR(priv->mbox_chan)) {
+ ret = PTR_ERR(priv->mbox_chan);
+ dev_err(dev, "failed to acquire IPC channel: %d\n", ret);
+ return ret;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ dev_err(dev, "failed to find MPM parent domain\n");
+ ret = -ENXIO;
+ goto free_mbox;
+ }
+
+ priv->domain = irq_domain_create_hierarchy(parent_domain,
+ IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP, pin_cnt,
+ of_node_to_fwnode(np), &qcom_mpm_ops, priv);
+ if (!priv->domain) {
+ dev_err(dev, "failed to create MPM domain\n");
+ ret = -ENOMEM;
+ goto free_mbox;
+ }
+
+ irq_domain_update_bus_token(priv->domain, DOMAIN_BUS_WAKEUP);
+
+ ret = devm_request_irq(dev, irq, qcom_mpm_handler, IRQF_NO_SUSPEND,
+ "qcom_mpm", priv);
+ if (ret) {
+ dev_err(dev, "failed to request irq: %d\n", ret);
+ goto remove_domain;
+ }
+
+ return 0;
+
+remove_domain:
+ irq_domain_remove(priv->domain);
+free_mbox:
+ mbox_free_channel(priv->mbox_chan);
+remove_genpd:
+ pm_genpd_remove(genpd);
+ return ret;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_mpm)
+IRQCHIP_MATCH("qcom,mpm", qcom_mpm_init)
+IRQCHIP_PLATFORM_DRIVER_END(qcom_mpm)
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. MSM Power Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 37f9a4499fdb..e83756aca14e 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -508,7 +508,6 @@ static int intc_irqpin_probe(struct platform_device *pdev)
irq_chip = &p->irq_chip;
irq_chip->name = "intc-irqpin";
- irq_chip->parent_device = dev;
irq_chip->irq_mask = disable_fn;
irq_chip->irq_unmask = enable_fn;
irq_chip->irq_set_type = intc_irqpin_irq_set_type;
@@ -523,6 +522,8 @@ static int intc_irqpin_probe(struct platform_device *pdev)
goto err0;
}
+ irq_domain_set_pm_device(p->irq_domain, dev);
+
if (p->shared_irqs) {
/* request one shared interrupt */
if (devm_request_irq(dev, p->irq[0].requested_irq,
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 909325f88239..1ee5e9941f67 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -188,13 +188,14 @@ static int irqc_probe(struct platform_device *pdev)
p->gc->reg_base = p->cpu_int_base;
p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
- p->gc->chip_types[0].chip.parent_device = dev;
p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type;
p->gc->chip_types[0].chip.irq_set_wake = irqc_irq_set_wake;
p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+ irq_domain_set_pm_device(p->irq_domain, dev);
+
/* request interrupts one by one */
for (k = 0; k < p->number_of_irqs; k++) {
if (devm_request_irq(dev, p->irq[k].requested_irq,
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 09cc98266d30..bb87e4c3b88e 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -44,8 +44,8 @@
* Each hart context has a vector of interrupt enable bits associated with it.
* There's one bit for each interrupt source.
*/
-#define ENABLE_BASE 0x2000
-#define ENABLE_PER_HART 0x80
+#define CONTEXT_ENABLE_BASE 0x2000
+#define CONTEXT_ENABLE_SIZE 0x80
/*
* Each hart context has a set of control registers associated with it. Right
@@ -53,7 +53,7 @@
* take an interrupt, and a register to claim interrupts.
*/
#define CONTEXT_BASE 0x200000
-#define CONTEXT_PER_HART 0x1000
+#define CONTEXT_SIZE 0x1000
#define CONTEXT_THRESHOLD 0x00
#define CONTEXT_CLAIM 0x04
@@ -81,17 +81,21 @@ static int plic_parent_irq __ro_after_init;
static bool plic_cpuhp_setup_done __ro_after_init;
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
-static inline void plic_toggle(struct plic_handler *handler,
- int hwirq, int enable)
+static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable)
{
- u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32);
+ u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32);
u32 hwirq_mask = 1 << (hwirq % 32);
- raw_spin_lock(&handler->enable_lock);
if (enable)
writel(readl(reg) | hwirq_mask, reg);
else
writel(readl(reg) & ~hwirq_mask, reg);
+}
+
+static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
+{
+ raw_spin_lock(&handler->enable_lock);
+ __plic_toggle(handler->enable_base, hwirq, enable);
raw_spin_unlock(&handler->enable_lock);
}
@@ -324,8 +328,18 @@ static int __init plic_init(struct device_node *node,
* Skip contexts other than external interrupts for our
* privilege level.
*/
- if (parent.args[0] != RV_IRQ_EXT)
+ if (parent.args[0] != RV_IRQ_EXT) {
+ /* Disable S-mode enable bits if running in M-mode. */
+ if (IS_ENABLED(CONFIG_RISCV_M_MODE)) {
+ void __iomem *enable_base = priv->regs +
+ CONTEXT_ENABLE_BASE +
+ i * CONTEXT_ENABLE_SIZE;
+
+ for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
+ __plic_toggle(enable_base, hwirq, 0);
+ }
continue;
+ }
hartid = riscv_of_parent_hartid(parent.np);
if (hartid < 0) {
@@ -361,11 +375,11 @@ static int __init plic_init(struct device_node *node,
cpumask_set_cpu(cpu, &priv->lmask);
handler->present = true;
- handler->hart_base =
- priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
+ handler->hart_base = priv->regs + CONTEXT_BASE +
+ i * CONTEXT_SIZE;
raw_spin_lock_init(&handler->enable_lock);
- handler->enable_base =
- priv->regs + ENABLE_BASE + i * ENABLE_PER_HART;
+ handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE +
+ i * CONTEXT_ENABLE_SIZE;
handler->priv = priv;
done:
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index b7cb2da71888..9d18f47040eb 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -214,6 +214,48 @@ static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
{ .exti = 73, .irq_parent = 129, .chip = &stm32_exti_h_chip },
};
+static const struct stm32_desc_irq stm32mp13_desc_irq[] = {
+ { .exti = 0, .irq_parent = 6, .chip = &stm32_exti_h_chip },
+ { .exti = 1, .irq_parent = 7, .chip = &stm32_exti_h_chip },
+ { .exti = 2, .irq_parent = 8, .chip = &stm32_exti_h_chip },
+ { .exti = 3, .irq_parent = 9, .chip = &stm32_exti_h_chip },
+ { .exti = 4, .irq_parent = 10, .chip = &stm32_exti_h_chip },
+ { .exti = 5, .irq_parent = 24, .chip = &stm32_exti_h_chip },
+ { .exti = 6, .irq_parent = 65, .chip = &stm32_exti_h_chip },
+ { .exti = 7, .irq_parent = 66, .chip = &stm32_exti_h_chip },
+ { .exti = 8, .irq_parent = 67, .chip = &stm32_exti_h_chip },
+ { .exti = 9, .irq_parent = 68, .chip = &stm32_exti_h_chip },
+ { .exti = 10, .irq_parent = 41, .chip = &stm32_exti_h_chip },
+ { .exti = 11, .irq_parent = 43, .chip = &stm32_exti_h_chip },
+ { .exti = 12, .irq_parent = 77, .chip = &stm32_exti_h_chip },
+ { .exti = 13, .irq_parent = 78, .chip = &stm32_exti_h_chip },
+ { .exti = 14, .irq_parent = 106, .chip = &stm32_exti_h_chip },
+ { .exti = 15, .irq_parent = 109, .chip = &stm32_exti_h_chip },
+ { .exti = 16, .irq_parent = 1, .chip = &stm32_exti_h_chip },
+ { .exti = 19, .irq_parent = 3, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 21, .irq_parent = 32, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 22, .irq_parent = 34, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 23, .irq_parent = 73, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 24, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 25, .irq_parent = 114, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 26, .irq_parent = 38, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 27, .irq_parent = 39, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 28, .irq_parent = 40, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 29, .irq_parent = 72, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 30, .irq_parent = 53, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 31, .irq_parent = 54, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 32, .irq_parent = 83, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 33, .irq_parent = 84, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 44, .irq_parent = 96, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 47, .irq_parent = 92, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 48, .irq_parent = 116, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 50, .irq_parent = 117, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 52, .irq_parent = 118, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 53, .irq_parent = 119, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 68, .irq_parent = 63, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 70, .irq_parent = 98, .chip = &stm32_exti_h_chip_direct },
+};
+
static const struct stm32_exti_drv_data stm32mp1_drv_data = {
.exti_banks = stm32mp1_exti_banks,
.bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
@@ -221,6 +263,13 @@ static const struct stm32_exti_drv_data stm32mp1_drv_data = {
.irq_nr = ARRAY_SIZE(stm32mp1_desc_irq),
};
+static const struct stm32_exti_drv_data stm32mp13_drv_data = {
+ .exti_banks = stm32mp1_exti_banks,
+ .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
+ .desc_irqs = stm32mp13_desc_irq,
+ .irq_nr = ARRAY_SIZE(stm32mp13_desc_irq),
+};
+
static const struct
stm32_desc_irq *stm32_exti_get_desc(const struct stm32_exti_drv_data *drv_data,
irq_hw_number_t hwirq)
@@ -922,6 +971,7 @@ static int stm32_exti_probe(struct platform_device *pdev)
/* platform driver only for MP1 */
static const struct of_device_id stm32_exti_ids[] = {
{ .compatible = "st,stm32mp1-exti", .data = &stm32mp1_drv_data},
+ { .compatible = "st,stm32mp13-exti", .data = &stm32mp13_drv_data},
{},
};
MODULE_DEVICE_TABLE(of, stm32_exti_ids);
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index f032db23b30f..b2d61d4f6fe6 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -19,14 +19,15 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#define IRQ_MASK 0x4
#define IRQ_STATUS 0x8
struct ts4800_irq_data {
void __iomem *base;
+ struct platform_device *pdev;
struct irq_domain *domain;
- struct irq_chip irq_chip;
};
static void ts4800_irq_mask(struct irq_data *d)
@@ -47,12 +48,25 @@ static void ts4800_irq_unmask(struct irq_data *d)
writew(reg & ~mask, data->base + IRQ_MASK);
}
+static void ts4800_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct ts4800_irq_data *data = irq_data_get_irq_chip_data(d);
+
+ seq_printf(p, "%s", dev_name(&data->pdev->dev));
+}
+
+static const struct irq_chip ts4800_chip = {
+ .irq_mask = ts4800_irq_mask,
+ .irq_unmask = ts4800_irq_unmask,
+ .irq_print_chip = ts4800_irq_print_chip,
+};
+
static int ts4800_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
struct ts4800_irq_data *data = d->host_data;
- irq_set_chip_and_handler(irq, &data->irq_chip, handle_simple_irq);
+ irq_set_chip_and_handler(irq, &ts4800_chip, handle_simple_irq);
irq_set_chip_data(irq, data);
irq_set_noprobe(irq);
@@ -92,13 +106,13 @@ static int ts4800_ic_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct ts4800_irq_data *data;
- struct irq_chip *irq_chip;
int parent_irq;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ data->pdev = pdev;
data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
@@ -111,11 +125,6 @@ static int ts4800_ic_probe(struct platform_device *pdev)
return -EINVAL;
}
- irq_chip = &data->irq_chip;
- irq_chip->name = dev_name(&pdev->dev);
- irq_chip->irq_mask = ts4800_irq_mask;
- irq_chip->irq_unmask = ts4800_irq_unmask;
-
data->domain = irq_domain_add_linear(node, 8, &ts4800_ic_ops, data);
if (!data->domain) {
dev_err(&pdev->dev, "cannot add IRQ domain\n");
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index f2757b6aecc8..ba543ed9c154 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -7,12 +7,12 @@
#include <linux/io.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
-#include <linux/irqchip/versatile-fpga.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/seq_file.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
@@ -34,14 +34,12 @@
/**
* struct fpga_irq_data - irq data container for the FPGA IRQ controller
* @base: memory offset in virtual memory
- * @chip: chip container for this instance
* @domain: IRQ domain for this instance
* @valid: mask for valid IRQs on this controller
* @used_irqs: number of active IRQs on this controller
*/
struct fpga_irq_data {
void __iomem *base;
- struct irq_chip chip;
u32 valid;
struct irq_domain *domain;
u8 used_irqs;
@@ -67,6 +65,20 @@ static void fpga_irq_unmask(struct irq_data *d)
writel(mask, f->base + IRQ_ENABLE_SET);
}
+static void fpga_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
+
+ seq_printf(p, irq_domain_get_of_node(f->domain)->name);
+}
+
+static const struct irq_chip fpga_chip = {
+ .irq_ack = fpga_irq_mask,
+ .irq_mask = fpga_irq_mask,
+ .irq_unmask = fpga_irq_unmask,
+ .irq_print_chip = fpga_irq_print_chip,
+};
+
static void fpga_irq_handle(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -116,7 +128,7 @@ static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs)
* Keep iterating over all registered FPGA IRQ controllers until there are
* no pending interrupts.
*/
-asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs)
+static asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs)
{
int i, handled;
@@ -135,8 +147,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq,
if (!(f->valid & BIT(hwirq)))
return -EPERM;
irq_set_chip_data(irq, f);
- irq_set_chip_and_handler(irq, &f->chip,
- handle_level_irq);
+ irq_set_chip_and_handler(irq, &fpga_chip, handle_level_irq);
irq_set_probe(irq);
return 0;
}
@@ -146,8 +157,8 @@ static const struct irq_domain_ops fpga_irqdomain_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
-void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
- int parent_irq, u32 valid, struct device_node *node)
+static void __init fpga_irq_init(void __iomem *base, int parent_irq,
+ u32 valid, struct device_node *node)
{
struct fpga_irq_data *f;
int i;
@@ -158,10 +169,6 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
}
f = &fpga_irq_devices[fpga_irq_id];
f->base = base;
- f->chip.name = name;
- f->chip.irq_ack = fpga_irq_mask;
- f->chip.irq_mask = fpga_irq_mask;
- f->chip.irq_unmask = fpga_irq_unmask;
f->valid = valid;
if (parent_irq != -1) {
@@ -169,20 +176,19 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
f);
}
- /* This will also allocate irq descriptors */
- f->domain = irq_domain_add_simple(node, fls(valid), irq_start,
+ f->domain = irq_domain_add_linear(node, fls(valid),
&fpga_irqdomain_ops, f);
/* This will allocate all valid descriptors in the linear case */
for (i = 0; i < fls(valid); i++)
if (valid & BIT(i)) {
- if (!irq_start)
- irq_create_mapping(f->domain, i);
+ /* Is this still required? */
+ irq_create_mapping(f->domain, i);
f->used_irqs++;
}
pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs",
- fpga_irq_id, name, base, f->used_irqs);
+ fpga_irq_id, node->name, base, f->used_irqs);
if (parent_irq != -1)
pr_cont(", parent IRQ: %d\n", parent_irq);
else
@@ -192,8 +198,8 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
}
#ifdef CONFIG_OF
-int __init fpga_irq_of_init(struct device_node *node,
- struct device_node *parent)
+static int __init fpga_irq_of_init(struct device_node *node,
+ struct device_node *parent)
{
void __iomem *base;
u32 clear_mask;
@@ -222,7 +228,7 @@ int __init fpga_irq_of_init(struct device_node *node,
parent_irq = -1;
}
- fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
+ fpga_irq_init(base, parent_irq, valid_mask, node);
/*
* On Versatile AB/PB, some secondary interrupts have a direct
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index 356a59755d63..238d3d344949 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -32,6 +32,8 @@
#define MER_ME (1<<0)
#define MER_HIE (1<<1)
+#define SPURIOUS_IRQ (-1U)
+
static DEFINE_STATIC_KEY_FALSE(xintc_is_be);
struct xintc_irq_chip {
@@ -110,20 +112,6 @@ static struct irq_chip intc_dev = {
.irq_mask_ack = intc_mask_ack,
};
-unsigned int xintc_get_irq(void)
-{
- unsigned int irq = -1;
- u32 hwirq;
-
- hwirq = xintc_read(primary_intc, IVR);
- if (hwirq != -1U)
- irq = irq_find_mapping(primary_intc->root_domain, hwirq);
-
- pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
-
- return irq;
-}
-
static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
struct xintc_irq_chip *irqc = d->host_data;
@@ -164,6 +152,19 @@ static void xil_intc_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static void xil_intc_handle_irq(struct pt_regs *regs)
+{
+ u32 hwirq;
+
+ do {
+ hwirq = xintc_read(primary_intc, IVR);
+ if (unlikely(hwirq == SPURIOUS_IRQ))
+ break;
+
+ generic_handle_domain_irq(primary_intc->root_domain, hwirq);
+ } while (true);
+}
+
static int __init xilinx_intc_of_init(struct device_node *intc,
struct device_node *parent)
{
@@ -233,6 +234,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
} else {
primary_intc = irqc;
irq_set_default_host(primary_intc->root_domain);
+ set_handle_irq(xil_intc_handle_irq);
}
return 0;
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index 173e6520e06e..d96916cf6a41 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -21,23 +21,19 @@
#include <linux/slab.h>
#include <linux/types.h>
-#define PDC_MAX_IRQS 168
#define PDC_MAX_GPIO_IRQS 256
-#define CLEAR_INTR(reg, intr) (reg & ~(1 << intr))
-#define ENABLE_INTR(reg, intr) (reg | (1 << intr))
-
#define IRQ_ENABLE_BANK 0x10
#define IRQ_i_CFG 0x110
-#define PDC_NO_PARENT_IRQ ~0UL
-
struct pdc_pin_region {
u32 pin_base;
u32 parent_base;
u32 cnt;
};
+#define pin_to_hwirq(r, p) ((r)->parent_base + (p) - (r)->pin_base)
+
static DEFINE_RAW_SPINLOCK(pdc_lock);
static void __iomem *pdc_base;
static struct pdc_pin_region *pdc_region;
@@ -56,17 +52,18 @@ static u32 pdc_reg_read(int reg, u32 i)
static void pdc_enable_intr(struct irq_data *d, bool on)
{
int pin_out = d->hwirq;
+ unsigned long enable;
+ unsigned long flags;
u32 index, mask;
- u32 enable;
index = pin_out / 32;
mask = pin_out % 32;
- raw_spin_lock(&pdc_lock);
+ raw_spin_lock_irqsave(&pdc_lock, flags);
enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
- enable = on ? ENABLE_INTR(enable, mask) : CLEAR_INTR(enable, mask);
+ __assign_bit(mask, &enable, on);
pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
- raw_spin_unlock(&pdc_lock);
+ raw_spin_unlock_irqrestore(&pdc_lock, flags);
}
static void qcom_pdc_gic_disable(struct irq_data *d)
@@ -186,34 +183,17 @@ static struct irq_chip qcom_pdc_gic_chip = {
.irq_set_affinity = irq_chip_set_affinity_parent,
};
-static irq_hw_number_t get_parent_hwirq(int pin)
+static struct pdc_pin_region *get_pin_region(int pin)
{
int i;
- struct pdc_pin_region *region;
for (i = 0; i < pdc_region_cnt; i++) {
- region = &pdc_region[i];
- if (pin >= region->pin_base &&
- pin < region->pin_base + region->cnt)
- return (region->parent_base + pin - region->pin_base);
+ if (pin >= pdc_region[i].pin_base &&
+ pin < pdc_region[i].pin_base + pdc_region[i].cnt)
+ return &pdc_region[i];
}
- return PDC_NO_PARENT_IRQ;
-}
-
-static int qcom_pdc_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
- unsigned long *hwirq, unsigned int *type)
-{
- if (is_of_node(fwspec->fwnode)) {
- if (fwspec->param_count != 2)
- return -EINVAL;
-
- *hwirq = fwspec->param[0];
- *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
- return 0;
- }
-
- return -EINVAL;
+ return NULL;
}
static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq,
@@ -221,55 +201,12 @@ static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq,
{
struct irq_fwspec *fwspec = data;
struct irq_fwspec parent_fwspec;
- irq_hw_number_t hwirq, parent_hwirq;
- unsigned int type;
- int ret;
-
- ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
- if (ret)
- return ret;
-
- ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
- &qcom_pdc_gic_chip, NULL);
- if (ret)
- return ret;
-
- parent_hwirq = get_parent_hwirq(hwirq);
- if (parent_hwirq == PDC_NO_PARENT_IRQ)
- return irq_domain_disconnect_hierarchy(domain->parent, virq);
-
- if (type & IRQ_TYPE_EDGE_BOTH)
- type = IRQ_TYPE_EDGE_RISING;
-
- if (type & IRQ_TYPE_LEVEL_MASK)
- type = IRQ_TYPE_LEVEL_HIGH;
-
- parent_fwspec.fwnode = domain->parent->fwnode;
- parent_fwspec.param_count = 3;
- parent_fwspec.param[0] = 0;
- parent_fwspec.param[1] = parent_hwirq;
- parent_fwspec.param[2] = type;
-
- return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
- &parent_fwspec);
-}
-
-static const struct irq_domain_ops qcom_pdc_ops = {
- .translate = qcom_pdc_translate,
- .alloc = qcom_pdc_alloc,
- .free = irq_domain_free_irqs_common,
-};
-
-static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *data)
-{
- struct irq_fwspec *fwspec = data;
- struct irq_fwspec parent_fwspec;
- irq_hw_number_t hwirq, parent_hwirq;
+ struct pdc_pin_region *region;
+ irq_hw_number_t hwirq;
unsigned int type;
int ret;
- ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
+ ret = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type);
if (ret)
return ret;
@@ -281,8 +218,8 @@ static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
if (ret)
return ret;
- parent_hwirq = get_parent_hwirq(hwirq);
- if (parent_hwirq == PDC_NO_PARENT_IRQ)
+ region = get_pin_region(hwirq);
+ if (!region)
return irq_domain_disconnect_hierarchy(domain->parent, virq);
if (type & IRQ_TYPE_EDGE_BOTH)
@@ -294,23 +231,16 @@ static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param_count = 3;
parent_fwspec.param[0] = 0;
- parent_fwspec.param[1] = parent_hwirq;
+ parent_fwspec.param[1] = pin_to_hwirq(region, hwirq);
parent_fwspec.param[2] = type;
return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
&parent_fwspec);
}
-static int qcom_pdc_gpio_domain_select(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- enum irq_domain_bus_token bus_token)
-{
- return bus_token == DOMAIN_BUS_WAKEUP;
-}
-
-static const struct irq_domain_ops qcom_pdc_gpio_ops = {
- .select = qcom_pdc_gpio_domain_select,
- .alloc = qcom_pdc_gpio_alloc,
+static const struct irq_domain_ops qcom_pdc_ops = {
+ .translate = irq_domain_translate_twocell,
+ .alloc = qcom_pdc_alloc,
.free = irq_domain_free_irqs_common,
};
@@ -361,7 +291,7 @@ static int pdc_setup_pin_mapping(struct device_node *np)
static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
{
- struct irq_domain *parent_domain, *pdc_domain, *pdc_gpio_domain;
+ struct irq_domain *parent_domain, *pdc_domain;
int ret;
pdc_base = of_iomap(node, 0);
@@ -383,32 +313,21 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
goto fail;
}
- pdc_domain = irq_domain_create_hierarchy(parent_domain, 0, PDC_MAX_IRQS,
- of_fwnode_handle(node),
- &qcom_pdc_ops, NULL);
- if (!pdc_domain) {
- pr_err("%pOF: GIC domain add failed\n", node);
- ret = -ENOMEM;
- goto fail;
- }
-
- pdc_gpio_domain = irq_domain_create_hierarchy(parent_domain,
+ pdc_domain = irq_domain_create_hierarchy(parent_domain,
IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP,
PDC_MAX_GPIO_IRQS,
of_fwnode_handle(node),
- &qcom_pdc_gpio_ops, NULL);
- if (!pdc_gpio_domain) {
- pr_err("%pOF: PDC domain add failed for GPIO domain\n", node);
+ &qcom_pdc_ops, NULL);
+ if (!pdc_domain) {
+ pr_err("%pOF: PDC domain add failed\n", node);
ret = -ENOMEM;
- goto remove;
+ goto fail;
}
- irq_domain_update_bus_token(pdc_gpio_domain, DOMAIN_BUS_WAKEUP);
+ irq_domain_update_bus_token(pdc_domain, DOMAIN_BUS_WAKEUP);
return 0;
-remove:
- irq_domain_remove(pdc_domain);
fail:
kfree(pdc_region);
iounmap(pdc_base);
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index bd087cca1c1d..af17459c1a5c 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2005,7 +2005,11 @@ setup_hw(struct hfc_pci *hc)
}
/* Allocate memory for FIFOS */
/* the memory needs to be on a 32k boundary within the first 4G */
- dma_set_mask(&hc->pdev->dev, 0xFFFF8000);
+ if (dma_set_mask(&hc->pdev->dev, 0xFFFF8000)) {
+ printk(KERN_WARNING
+ "HFC-PCI: No usable DMA configuration!\n");
+ return -EIO;
+ }
buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle,
GFP_KERNEL);
/* We silently assume the address is okay if nonzero */
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index 39f841b42488..4f8d85bb3ce1 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -1062,7 +1062,7 @@ ipac_rme(struct hscx_hw *hx)
if (!hx->bch.rx_skb)
return;
if (hx->bch.rx_skb->len < 2) {
- pr_debug("%s: B%1d frame to short %d\n",
+ pr_debug("%s: B%1d frame too short %d\n",
hx->ip->name, hx->bch.nr, hx->bch.rx_skb->len);
skb_trim(hx->bch.rx_skb, 0);
} else {
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 56943409b60d..48b3d43e2502 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -466,7 +466,7 @@ isar_rcv_frame(struct isar_ch *ch)
rcv_mbox(ch->is, ptr);
if (ch->is->cmsb & HDLC_FED) {
if (ch->bch.rx_skb->len < 3) { /* last 2 are the FCS */
- pr_debug("%s: ISAR frame to short %d\n",
+ pr_debug("%s: ISAR frame too short %d\n",
ch->is->name, ch->bch.rx_skb->len);
skb_trim(ch->bch.rx_skb, 0);
break;
@@ -542,7 +542,7 @@ isar_rcv_frame(struct isar_ch *ch)
rcv_mbox(ch->is, ptr);
if (ch->is->cmsb & HDLC_FED) {
if (ch->bch.rx_skb->len < 3) { /* last 2 are the FCS */
- pr_info("%s: ISAR frame to short %d\n",
+ pr_info("%s: ISAR frame too short %d\n",
ch->is->name, ch->bch.rx_skb->len);
skb_trim(ch->bch.rx_skb, 0);
break;
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c
index e11ca6bbc7f4..c3b2c99b5cd5 100644
--- a/drivers/isdn/mISDN/dsp_pipeline.c
+++ b/drivers/isdn/mISDN/dsp_pipeline.c
@@ -192,7 +192,7 @@ void dsp_pipeline_destroy(struct dsp_pipeline *pipeline)
int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
{
int found = 0;
- char *dup, *tok, *name, *args;
+ char *dup, *next, *tok, *name, *args;
struct dsp_element_entry *entry, *n;
struct dsp_pipeline_entry *pipeline_entry;
struct mISDN_dsp_element *elem;
@@ -203,10 +203,10 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
if (!list_empty(&pipeline->list))
_dsp_pipeline_destroy(pipeline);
- dup = kstrdup(cfg, GFP_ATOMIC);
+ dup = next = kstrdup(cfg, GFP_ATOMIC);
if (!dup)
return 0;
- while ((tok = strsep(&dup, "|"))) {
+ while ((tok = strsep(&next, "|"))) {
if (!strlen(tok))
continue;
name = strsep(&tok, "(");
diff --git a/drivers/leds/leds-cr0014114.c b/drivers/leds/leds-cr0014114.c
index d03cfd3c0bfb..c87686bd7c18 100644
--- a/drivers/leds/leds-cr0014114.c
+++ b/drivers/leds/leds-cr0014114.c
@@ -266,14 +266,12 @@ static int cr0014114_probe(struct spi_device *spi)
return 0;
}
-static int cr0014114_remove(struct spi_device *spi)
+static void cr0014114_remove(struct spi_device *spi)
{
struct cr0014114 *priv = spi_get_drvdata(spi);
cancel_delayed_work_sync(&priv->work);
mutex_destroy(&priv->lock);
-
- return 0;
}
static const struct of_device_id cr0014114_dt_ids[] = {
diff --git a/drivers/leds/leds-dac124s085.c b/drivers/leds/leds-dac124s085.c
index 20dc9b9d7dea..cf5fb1195f87 100644
--- a/drivers/leds/leds-dac124s085.c
+++ b/drivers/leds/leds-dac124s085.c
@@ -85,15 +85,13 @@ eledcr:
return ret;
}
-static int dac124s085_remove(struct spi_device *spi)
+static void dac124s085_remove(struct spi_device *spi)
{
struct dac124s085 *dac = spi_get_drvdata(spi);
int i;
for (i = 0; i < ARRAY_SIZE(dac->leds); i++)
led_classdev_unregister(&dac->leds[i].ldev);
-
- return 0;
}
static struct spi_driver dac124s085_driver = {
diff --git a/drivers/leds/leds-el15203000.c b/drivers/leds/leds-el15203000.c
index f9eb59a25570..7e7b617bcd56 100644
--- a/drivers/leds/leds-el15203000.c
+++ b/drivers/leds/leds-el15203000.c
@@ -315,13 +315,11 @@ static int el15203000_probe(struct spi_device *spi)
return el15203000_probe_dt(priv);
}
-static int el15203000_remove(struct spi_device *spi)
+static void el15203000_remove(struct spi_device *spi)
{
struct el15203000 *priv = spi_get_drvdata(spi);
mutex_destroy(&priv->lock);
-
- return 0;
}
static const struct of_device_id el15203000_dt_ids[] = {
diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c
index f1964c96fb15..2bc5c99daf51 100644
--- a/drivers/leds/leds-spi-byte.c
+++ b/drivers/leds/leds-spi-byte.c
@@ -130,13 +130,11 @@ static int spi_byte_probe(struct spi_device *spi)
return 0;
}
-static int spi_byte_remove(struct spi_device *spi)
+static void spi_byte_remove(struct spi_device *spi)
{
struct spi_byte_led *led = spi_get_drvdata(spi);
mutex_destroy(&led->mutex);
-
- return 0;
}
static struct spi_driver spi_byte_driver = {
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index b5ea378e66cb..998a5cfdbc4e 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -204,6 +204,7 @@ config BLK_DEV_DM
tristate "Device mapper support"
select BLOCK_HOLDER_DEPRECATED if SYSFS
select BLK_DEV_DM_BUILTIN
+ select BLK_MQ_STACKING
depends on DAX || DAX=n
help
Device-mapper is a low level volume manager. It works by allowing
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 88c573eeb598..ad9f16689419 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -2060,9 +2060,11 @@ int bch_btree_check(struct cache_set *c)
}
}
+ /*
+ * Must wait for all threads to stop.
+ */
wait_event_interruptible(check_state->wait,
- atomic_read(&check_state->started) == 0 ||
- test_bit(CACHE_SET_IO_DISABLE, &c->flags));
+ atomic_read(&check_state->started) == 0);
for (i = 0; i < check_state->total_threads; i++) {
if (check_state->infos[i].result) {
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9c6f9ec55b72..020712c5203f 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -26,7 +26,8 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
struct bio *bio = &b->bio;
- bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
+ bio_init(bio, NULL, bio->bi_inline_vecs,
+ meta_bucket_pages(&c->cache->sb), 0);
return bio;
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 61bd79babf7a..7c2ca52ca3e4 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -53,14 +53,12 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
reread: left = ca->sb.bucket_size - offset;
len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
- bio_reset(bio);
+ bio_reset(bio, ca->bdev, REQ_OP_READ);
bio->bi_iter.bi_sector = bucket + offset;
- bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = len << 9;
bio->bi_end_io = journal_read_endio;
bio->bi_private = &cl;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
bch_bio_map(bio, data);
closure_bio_submit(ca->set, bio, &cl);
@@ -611,11 +609,9 @@ static void do_journal_discard(struct cache *ca)
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
- bio_init(bio, bio->bi_inline_vecs, 1);
- bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
+ bio_init(bio, ca->bdev, bio->bi_inline_vecs, 1, REQ_OP_DISCARD);
bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
- bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = journal_discard_endio;
@@ -773,16 +769,14 @@ static void journal_write_unlocked(struct closure *cl)
atomic_long_add(sectors, &ca->meta_sectors_written);
- bio_reset(bio);
+ bio_reset(bio, ca->bdev, REQ_OP_WRITE |
+ REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA);
+ bch_bio_map(bio, w->data);
bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
- bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
bio->bi_private = w;
- bio_set_op_attrs(bio, REQ_OP_WRITE,
- REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
- bch_bio_map(bio, w->data);
trace_bcache_journal_write(bio, w->data->keys);
bio_list_add(&list, bio);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index b9c3d27ec093..99499d1f6e66 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -79,8 +79,8 @@ static void moving_init(struct moving_io *io)
{
struct bio *bio = &io->bio.bio;
- bio_init(bio, bio->bi_inline_vecs,
- DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS));
+ bio_init(bio, NULL, bio->bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0);
bio_get(bio);
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index d15aae6c51c1..fdd0194f84dd 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -44,10 +44,10 @@ static void bio_csum(struct bio *bio, struct bkey *k)
uint64_t csum = 0;
bio_for_each_segment(bv, bio, iter) {
- void *d = kmap(bv.bv_page) + bv.bv_offset;
+ void *d = bvec_kmap_local(&bv);
csum = crc64_be(csum, d, bv.bv_len);
- kunmap(bv.bv_page);
+ kunmap_local(d);
}
k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
@@ -685,8 +685,7 @@ static void do_bio_hook(struct search *s,
{
struct bio *bio = &s->bio.bio;
- bio_init(bio, NULL, 0);
- __bio_clone_fast(bio, orig_bio);
+ bio_init_clone(bio->bi_bdev, bio, orig_bio, GFP_NOIO);
/*
* bi_end_io can be set separately somewhere else, e.g. the
* variants in,
@@ -831,11 +830,11 @@ static void cached_dev_read_done(struct closure *cl)
*/
if (s->iop.bio) {
- bio_reset(s->iop.bio);
+ bio_reset(s->iop.bio, s->cache_miss->bi_bdev, REQ_OP_READ);
s->iop.bio->bi_iter.bi_sector =
s->cache_miss->bi_iter.bi_sector;
- bio_copy_dev(s->iop.bio, s->cache_miss);
s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
+ bio_clone_blkg_association(s->iop.bio, s->cache_miss);
bch_bio_map(s->iop.bio, NULL);
bio_copy_data(s->cache_miss, s->iop.bio);
@@ -913,14 +912,13 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
/* btree_search_recurse()'s btree iterator is no good anymore */
ret = miss == bio ? MAP_DONE : -EINTR;
- cache_bio = bio_alloc_bioset(GFP_NOWAIT,
+ cache_bio = bio_alloc_bioset(miss->bi_bdev,
DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
- &dc->disk.bio_split);
+ 0, GFP_NOWAIT, &dc->disk.bio_split);
if (!cache_bio)
goto out_submit;
cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
- bio_copy_dev(cache_bio, miss);
cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
cache_bio->bi_end_io = backing_request_endio;
@@ -1025,21 +1023,21 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
*/
struct bio *flush;
- flush = bio_alloc_bioset(GFP_NOIO, 0,
- &dc->disk.bio_split);
+ flush = bio_alloc_bioset(bio->bi_bdev, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH,
+ GFP_NOIO, &dc->disk.bio_split);
if (!flush) {
s->iop.status = BLK_STS_RESOURCE;
goto insert_data;
}
- bio_copy_dev(flush, bio);
flush->bi_end_io = backing_request_endio;
flush->bi_private = cl;
- flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
/* I/O request sent to backing device */
closure_bio_submit(s->iop.c, flush, cl);
}
} else {
- s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
+ s->iop.bio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
+ &dc->disk.bio_split);
/* I/O request sent to backing device */
bio->bi_end_io = backing_request_endio;
closure_bio_submit(s->iop.c, bio, cl);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 140f35dc0c45..bf3de149d3c9 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -18,7 +18,6 @@
#include <linux/blkdev.h>
#include <linux/pagemap.h>
#include <linux/debugfs.h>
-#include <linux/genhd.h>
#include <linux/idr.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
@@ -343,8 +342,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
down(&dc->sb_write_mutex);
closure_init(cl, parent);
- bio_init(bio, dc->sb_bv, 1);
- bio_set_dev(bio, dc->bdev);
+ bio_init(bio, dc->bdev, dc->sb_bv, 1, 0);
bio->bi_end_io = write_bdev_super_endio;
bio->bi_private = dc;
@@ -387,8 +385,7 @@ void bcache_write_super(struct cache_set *c)
if (ca->sb.version < version)
ca->sb.version = version;
- bio_init(bio, ca->sb_bv, 1);
- bio_set_dev(bio, ca->bdev);
+ bio_init(bio, ca->bdev, ca->sb_bv, 1, 0);
bio->bi_end_io = write_super_endio;
bio->bi_private = ca;
@@ -2240,7 +2237,7 @@ static int cache_alloc(struct cache *ca)
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
- bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
+ bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0);
/*
* when ca->sb.njournal_buckets is not zero, journal exists,
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index c7560f66dca8..9ee0005874cd 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -292,8 +292,8 @@ static void dirty_init(struct keybuf_key *w)
struct dirty_io *io = w->private;
struct bio *bio = &io->bio;
- bio_init(bio, bio->bi_inline_vecs,
- DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
+ bio_init(bio, NULL, bio->bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0);
if (!io->dc->writeback_percent)
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
@@ -585,10 +585,13 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
sectors_dirty = atomic_add_return(s,
d->stripe_sectors_dirty + stripe);
- if (sectors_dirty == d->stripe_size)
- set_bit(stripe, d->full_dirty_stripes);
- else
- clear_bit(stripe, d->full_dirty_stripes);
+ if (sectors_dirty == d->stripe_size) {
+ if (!test_bit(stripe, d->full_dirty_stripes))
+ set_bit(stripe, d->full_dirty_stripes);
+ } else {
+ if (test_bit(stripe, d->full_dirty_stripes))
+ clear_bit(stripe, d->full_dirty_stripes);
+ }
nr_sectors -= s;
stripe_offset = 0;
@@ -998,9 +1001,11 @@ void bch_sectors_dirty_init(struct bcache_device *d)
}
}
+ /*
+ * Must wait for all threads to stop.
+ */
wait_event_interruptible(state->wait,
- atomic_read(&state->started) == 0 ||
- test_bit(CACHE_SET_IO_DISABLE, &c->flags));
+ atomic_read(&state->started) == 0);
out:
kfree(state);
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 447d030036d1..89fdfb49d564 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -744,21 +744,14 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
spin_unlock_irq(&cache->lock);
}
-static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
- dm_oblock_t oblock, bool bio_has_pbd)
-{
- if (bio_has_pbd)
- check_if_tick_bio_needed(cache, bio);
- remap_to_origin(cache, bio);
- if (bio_data_dir(bio) == WRITE)
- clear_discard(cache, oblock_to_dblock(cache, oblock));
-}
-
static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
dm_oblock_t oblock)
{
// FIXME: check_if_tick_bio_needed() is called way too much through this interface
- __remap_to_origin_clear_discard(cache, bio, oblock, true);
+ check_if_tick_bio_needed(cache, bio);
+ remap_to_origin(cache, bio);
+ if (bio_data_dir(bio) == WRITE)
+ clear_discard(cache, oblock_to_dblock(cache, oblock));
}
static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
@@ -826,16 +819,15 @@ static void issue_op(struct bio *bio, void *context)
static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock)
{
- struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs);
+ struct bio *origin_bio = bio_alloc_clone(cache->origin_dev->bdev, bio,
+ GFP_NOIO, &cache->bs);
BUG_ON(!origin_bio);
bio_chain(origin_bio, bio);
- /*
- * Passing false to __remap_to_origin_clear_discard() skips
- * all code that might use per_bio_data (since clone doesn't have it)
- */
- __remap_to_origin_clear_discard(cache, origin_bio, oblock, false);
+
+ if (bio_data_dir(origin_bio) == WRITE)
+ clear_discard(cache, oblock_to_dblock(cache, oblock));
submit_bio(origin_bio);
remap_to_cache(cache, bio, cblock);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index b855fef4f38a..72d18c3fbf1f 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -11,7 +11,6 @@
#include <linux/kthread.h>
#include <linux/ktime.h>
-#include <linux/genhd.h>
#include <linux/blk-mq.h>
#include <linux/blk-crypto-profile.h>
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d4ae31558826..e2b0af4a2ee8 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -234,7 +234,7 @@ static volatile unsigned long dm_crypt_pages_per_client;
#define DM_CRYPT_MEMORY_PERCENT 2
#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16)
-static void clone_init(struct dm_crypt_io *, struct bio *);
+static void crypt_endio(struct bio *clone);
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
struct scatterlist *sg);
@@ -1364,11 +1364,10 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
}
if (r == -EBADMSG) {
- char b[BDEVNAME_SIZE];
sector_t s = le64_to_cpu(*sector);
- DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu",
- bio_devname(ctx->bio_in, b), s);
+ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+ ctx->bio_in->bi_bdev, s);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
ctx->bio_in, s, 0);
}
@@ -1672,11 +1671,10 @@ retry:
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
mutex_lock(&cc->bio_alloc_lock);
- clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
- if (!clone)
- goto out;
-
- clone_init(io, clone);
+ clone = bio_alloc_bioset(cc->dev->bdev, nr_iovecs, io->base_bio->bi_opf,
+ GFP_NOIO, &cc->bs);
+ clone->bi_private = io;
+ clone->bi_end_io = crypt_endio;
remaining_size = size;
@@ -1702,7 +1700,7 @@ retry:
bio_put(clone);
clone = NULL;
}
-out:
+
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
mutex_unlock(&cc->bio_alloc_lock);
@@ -1829,34 +1827,25 @@ static void crypt_endio(struct bio *clone)
crypt_dec_pending(io);
}
-static void clone_init(struct dm_crypt_io *io, struct bio *clone)
-{
- struct crypt_config *cc = io->cc;
-
- clone->bi_private = io;
- clone->bi_end_io = crypt_endio;
- bio_set_dev(clone, cc->dev->bdev);
- clone->bi_opf = io->base_bio->bi_opf;
-}
-
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
{
struct crypt_config *cc = io->cc;
struct bio *clone;
/*
- * We need the original biovec array in order to decrypt
- * the whole bio data *afterwards* -- thanks to immutable
- * biovecs we don't need to worry about the block layer
- * modifying the biovec array; so leverage bio_clone_fast().
+ * We need the original biovec array in order to decrypt the whole bio
+ * data *afterwards* -- thanks to immutable biovecs we don't need to
+ * worry about the block layer modifying the biovec array; so leverage
+ * bio_alloc_clone().
*/
- clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
+ clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs);
if (!clone)
return 1;
+ clone->bi_private = io;
+ clone->bi_end_io = crypt_endio;
crypt_inc_pending(io);
- clone_init(io, clone);
clone->bi_iter.bi_sector = cc->start + io->sector;
if (dm_crypt_integrity_io_alloc(io, clone)) {
@@ -2179,11 +2168,10 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
if (error == -EBADMSG) {
- char b[BDEVNAME_SIZE];
sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
- DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu",
- bio_devname(ctx->bio_in, b), s);
+ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+ ctx->bio_in->bi_bdev, s);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
ctx->bio_in, s, 0);
io->error = BLK_STS_PROTECTION;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index eb4b5e52bd6f..c58a5111cb57 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1788,12 +1788,11 @@ again:
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
if (unlikely(r)) {
if (r > 0) {
- char b[BDEVNAME_SIZE];
sector_t s;
s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
- DMERR_LIMIT("%s: Checksum failed at sector 0x%llx",
- bio_devname(bio, b), s);
+ DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
+ bio->bi_bdev, s);
r = -EILSEQ;
atomic64_inc(&ic->number_of_mismatches);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2d3cda0acacb..23e038f8dc84 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -345,11 +345,10 @@ static void do_region(int op, int op_flags, unsigned region,
(PAGE_SIZE >> SECTOR_SHIFT)));
}
- bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios);
+ bio = bio_alloc_bioset(where->bdev, num_bvecs, op | op_flags,
+ GFP_NOIO, &io->client->bios);
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
- bio_set_dev(bio, where->bdev);
bio->bi_end_io = endio;
- bio_set_op_attrs(bio, op, op_flags);
store_io_and_region_in_bio(bio, io, region);
if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 139b09b06eda..c9d036d6bb2e 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -217,18 +217,12 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
void *ptr;
size_t ret;
- bio = bio_alloc(GFP_KERNEL, 1);
- if (!bio) {
- DMERR("Couldn't alloc log bio");
- goto error;
- }
+ bio = bio_alloc(lc->logdev->bdev, 1, REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
log_end_super : log_end_io;
bio->bi_private = lc;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
page = alloc_page(GFP_KERNEL);
if (!page) {
@@ -275,18 +269,12 @@ static int write_inline_data(struct log_writes_c *lc, void *entry,
atomic_inc(&lc->io_blocks);
- bio = bio_alloc(GFP_KERNEL, bio_pages);
- if (!bio) {
- DMERR("Couldn't alloc inline data bio");
- goto error;
- }
-
+ bio = bio_alloc(lc->logdev->bdev, bio_pages, REQ_OP_WRITE,
+ GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
for (i = 0; i < bio_pages; i++) {
pg_datalen = min_t(int, datalen, PAGE_SIZE);
@@ -322,7 +310,6 @@ static int write_inline_data(struct log_writes_c *lc, void *entry,
error_bio:
bio_free_pages(bio);
bio_put(bio);
-error:
put_io_block(lc);
return -1;
}
@@ -363,17 +350,12 @@ static int log_one_block(struct log_writes_c *lc,
goto out;
atomic_inc(&lc->io_blocks);
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(block->vec_cnt));
- if (!bio) {
- DMERR("Couldn't alloc log bio");
- goto error;
- }
+ bio = bio_alloc(lc->logdev->bdev, bio_max_segs(block->vec_cnt),
+ REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
for (i = 0; i < block->vec_cnt; i++) {
/*
@@ -385,18 +367,13 @@ static int log_one_block(struct log_writes_c *lc,
if (ret != block->vecs[i].bv_len) {
atomic_inc(&lc->io_blocks);
submit_bio(bio);
- bio = bio_alloc(GFP_KERNEL,
- bio_max_segs(block->vec_cnt - i));
- if (!bio) {
- DMERR("Couldn't alloc log bio");
- goto error;
- }
+ bio = bio_alloc(lc->logdev->bdev,
+ bio_max_segs(block->vec_cnt - i),
+ REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
ret = bio_add_page(bio, block->vecs[i].bv_page,
block->vecs[i].bv_len, 0);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 579ab6183d4d..6948d5db9092 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -303,21 +303,6 @@ static void end_clone_request(struct request *clone, blk_status_t error)
dm_complete_request(tio->orig, error);
}
-static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq)
-{
- blk_status_t r;
-
- if (blk_queue_io_stat(clone->q))
- clone->rq_flags |= RQF_IO_STAT;
-
- clone->start_time_ns = ktime_get_ns();
- r = blk_insert_cloned_request(clone->q, clone);
- if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
- /* must complete clone in terms of original request */
- dm_complete_request(rq, r);
- return r;
-}
-
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
void *data)
{
@@ -398,13 +383,20 @@ static int map_request(struct dm_rq_target_io *tio)
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
- ret = dm_dispatch_clone_request(clone, rq);
- if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
+ ret = blk_insert_cloned_request(clone);
+ switch (ret) {
+ case BLK_STS_OK:
+ break;
+ case BLK_STS_RESOURCE:
+ case BLK_STS_DEV_RESOURCE:
blk_rq_unprep_clone(clone);
blk_mq_cleanup_rq(clone);
tio->ti->type->release_clone_rq(clone, &tio->info);
tio->clone = NULL;
return DM_MAPIO_REQUEUE;
+ default:
+ /* must complete clone in terms of original request */
+ dm_complete_request(rq, ret);
}
break;
case DM_MAPIO_REQUEUE:
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index dcf34c6b05ad..0d336b5ec571 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -141,11 +141,6 @@ struct dm_snapshot {
* for them to be committed.
*/
struct bio_list bios_queued_during_merge;
-
- /*
- * Flush data after merge.
- */
- struct bio flush_bio;
};
/*
@@ -1127,17 +1122,6 @@ shut:
static void error_bios(struct bio *bio);
-static int flush_data(struct dm_snapshot *s)
-{
- struct bio *flush_bio = &s->flush_bio;
-
- bio_reset(flush_bio);
- bio_set_dev(flush_bio, s->origin->bdev);
- flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
-
- return submit_bio_wait(flush_bio);
-}
-
static void merge_callback(int read_err, unsigned long write_err, void *context)
{
struct dm_snapshot *s = context;
@@ -1151,7 +1135,7 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
goto shut;
}
- if (flush_data(s) < 0) {
+ if (blkdev_issue_flush(s->origin->bdev) < 0) {
DMERR("Flush after merge failed: shutting down merge");
goto shut;
}
@@ -1340,7 +1324,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->first_merging_chunk = 0;
s->num_merging_chunks = 0;
bio_list_init(&s->bios_queued_during_merge);
- bio_init(&s->flush_bio, NULL, 0);
/* Allocate hash table for COW data */
if (init_hash_tables(s)) {
@@ -1528,8 +1511,6 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
- bio_uninit(&s->flush_bio);
-
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index ec119d2422d5..f4234d615aa1 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -282,8 +282,6 @@ struct pool {
struct dm_bio_prison_cell **cell_sort_array;
mempool_t mapping_pool;
-
- struct bio flush_bio;
};
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
@@ -1179,25 +1177,17 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
return;
}
- discard_parent = bio_alloc(GFP_NOIO, 1);
- if (!discard_parent) {
- DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
- dm_device_name(tc->pool->pool_md));
- queue_passdown_pt2(m);
-
- } else {
- discard_parent->bi_end_io = passdown_endio;
- discard_parent->bi_private = m;
-
- if (m->maybe_shared)
- passdown_double_checking_shared_status(m, discard_parent);
- else {
- struct discard_op op;
-
- begin_discard(&op, tc, discard_parent);
- r = issue_discard(&op, m->data_block, data_end);
- end_discard(&op, r);
- }
+ discard_parent = bio_alloc(NULL, 1, 0, GFP_NOIO);
+ discard_parent->bi_end_io = passdown_endio;
+ discard_parent->bi_private = m;
+ if (m->maybe_shared)
+ passdown_double_checking_shared_status(m, discard_parent);
+ else {
+ struct discard_op op;
+
+ begin_discard(&op, tc, discard_parent);
+ r = issue_discard(&op, m->data_block, data_end);
+ end_discard(&op, r);
}
}
@@ -2913,7 +2903,6 @@ static void __pool_destroy(struct pool *pool)
if (pool->next_mapping)
mempool_free(pool->next_mapping, &pool->mapping_pool);
mempool_exit(&pool->mapping_pool);
- bio_uninit(&pool->flush_bio);
dm_deferred_set_destroy(pool->shared_read_ds);
dm_deferred_set_destroy(pool->all_io_ds);
kfree(pool);
@@ -2994,7 +2983,6 @@ static struct pool *pool_create(struct mapped_device *pool_md,
pool->low_water_triggered = false;
pool->suspended = true;
pool->out_of_data_space = false;
- bio_init(&pool->flush_bio, NULL, 0);
pool->shared_read_ds = dm_deferred_set_create();
if (!pool->shared_read_ds) {
@@ -3201,13 +3189,8 @@ static void metadata_low_callback(void *context)
static int metadata_pre_commit_callback(void *context)
{
struct pool *pool = context;
- struct bio *flush_bio = &pool->flush_bio;
-
- bio_reset(flush_bio);
- bio_set_dev(flush_bio, pool->data_dev);
- flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
- return submit_bio_wait(flush_bio);
+ return blkdev_issue_flush(pool->data_dev);
}
static sector_t get_dev_size(struct block_device *bdev)
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 4f31591d2d25..5630b470ba42 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -1821,11 +1821,11 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
max_pages = e->wc_list_contiguous;
- bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
+ bio = bio_alloc_bioset(wc->dev->bdev, max_pages, REQ_OP_WRITE,
+ GFP_NOIO, &wc->bio_set);
wb = container_of(bio, struct writeback_struct, bio);
wb->wc = wc;
bio->bi_end_io = writecache_writeback_endio;
- bio_set_dev(bio, wc->dev->bdev);
bio->bi_iter.bi_sector = read_original_sector(wc, e);
if (max_pages <= WB_LIST_INLINE ||
unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
@@ -1852,7 +1852,8 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
wb->wc_list[wb->wc_list_n++] = f;
e = f;
}
- bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
+ if (WC_MODE_FUA(wc))
+ bio->bi_opf |= REQ_FUA;
if (writecache_has_error(wc)) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index ee4626d08557..e5f1eb27ce2e 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -550,11 +550,8 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
if (!mblk)
return ERR_PTR(-ENOMEM);
- bio = bio_alloc(GFP_NOIO, 1);
- if (!bio) {
- dmz_free_mblock(zmd, mblk);
- return ERR_PTR(-ENOMEM);
- }
+ bio = bio_alloc(dev->bdev, 1, REQ_OP_READ | REQ_META | REQ_PRIO,
+ GFP_NOIO);
spin_lock(&zmd->mblk_lock);
@@ -578,10 +575,8 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
/* Submit read BIO */
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
submit_bio(bio);
@@ -725,19 +720,14 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
if (dmz_bdev_is_dying(dev))
return -EIO;
- bio = bio_alloc(GFP_NOIO, 1);
- if (!bio) {
- set_bit(DMZ_META_ERROR, &mblk->state);
- return -ENOMEM;
- }
+ bio = bio_alloc(dev->bdev, 1, REQ_OP_WRITE | REQ_META | REQ_PRIO,
+ GFP_NOIO);
set_bit(DMZ_META_WRITING, &mblk->state);
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
- bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
submit_bio(bio);
@@ -759,13 +749,9 @@ static int dmz_rdwr_block(struct dmz_dev *dev, int op,
if (dmz_bdev_is_dying(dev))
return -EIO;
- bio = bio_alloc(GFP_NOIO, 1);
- if (!bio)
- return -ENOMEM;
-
+ bio = bio_alloc(dev->bdev, 1, op | REQ_SYNC | REQ_META | REQ_PRIO,
+ GFP_NOIO);
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, dev->bdev);
- bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
ret = submit_bio_wait(bio);
bio_put(bio);
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 166c4e9d99c9..a3f6d3ef3817 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -125,11 +125,10 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
if (dev->flags & DMZ_BDEV_DYING)
return -EIO;
- clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
+ clone = bio_alloc_clone(dev->bdev, bio, GFP_NOIO, &dmz->bio_set);
if (!clone)
return -ENOMEM;
- bio_set_dev(clone, dev->bdev);
bioctx->dev = dev;
clone->bi_iter.bi_sector =
dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 997ace47bbd5..183ce0d6728f 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -79,10 +79,14 @@ struct clone_info {
#define DM_IO_BIO_OFFSET \
(offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))
+static inline struct dm_target_io *clone_to_tio(struct bio *clone)
+{
+ return container_of(clone, struct dm_target_io, clone);
+}
+
void *dm_per_bio_data(struct bio *bio, size_t data_size)
{
- struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
- if (!tio->inside_dm_io)
+ if (!clone_to_tio(bio)->inside_dm_io)
return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
return (char *)bio - DM_IO_BIO_OFFSET - data_size;
}
@@ -477,10 +481,7 @@ out:
u64 dm_start_time_ns_from_clone(struct bio *bio)
{
- struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
- struct dm_io *io = tio->io;
-
- return jiffies_to_nsecs(io->start_time);
+ return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time);
}
EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
@@ -519,11 +520,9 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
struct dm_target_io *tio;
struct bio *clone;
- clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
- if (!clone)
- return NULL;
+ clone = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, &md->io_bs);
- tio = container_of(clone, struct dm_target_io, clone);
+ tio = clone_to_tio(clone);
tio->inside_dm_io = true;
tio->io = NULL;
@@ -545,8 +544,8 @@ static void free_io(struct mapped_device *md, struct dm_io *io)
bio_put(&io->tio.clone);
}
-static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
- unsigned target_bio_nr, gfp_t gfp_mask)
+static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
+ unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask)
{
struct dm_target_io *tio;
@@ -554,11 +553,12 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *t
/* the dm_target_io embedded in ci->io is available */
tio = &ci->io->tio;
} else {
- struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
+ struct bio *clone = bio_alloc_clone(ci->bio->bi_bdev, ci->bio,
+ gfp_mask, &ci->io->md->bs);
if (!clone)
return NULL;
- tio = container_of(clone, struct dm_target_io, clone);
+ tio = clone_to_tio(clone);
tio->inside_dm_io = false;
}
@@ -566,15 +566,16 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *t
tio->io = ci->io;
tio->ti = ti;
tio->target_bio_nr = target_bio_nr;
+ tio->len_ptr = len;
- return tio;
+ return &tio->clone;
}
-static void free_tio(struct dm_target_io *tio)
+static void free_tio(struct bio *clone)
{
- if (tio->inside_dm_io)
+ if (clone_to_tio(clone)->inside_dm_io)
return;
- bio_put(&tio->clone);
+ bio_put(clone);
}
/*
@@ -879,7 +880,7 @@ static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
static void clone_endio(struct bio *bio)
{
blk_status_t error = bio->bi_status;
- struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ struct dm_target_io *tio = clone_to_tio(bio);
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
@@ -930,7 +931,7 @@ static void clone_endio(struct bio *bio)
up(&md->swap_bios_semaphore);
}
- free_tio(tio);
+ free_tio(bio);
dm_io_dec_pending(io, error);
}
@@ -1085,7 +1086,7 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
*/
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
- struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ struct dm_target_io *tio = clone_to_tio(bio);
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
BUG_ON(bio->bi_opf & REQ_PREFLUSH);
@@ -1115,11 +1116,11 @@ static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
mutex_unlock(&md->swap_bios_lock);
}
-static void __map_bio(struct dm_target_io *tio)
+static void __map_bio(struct bio *clone)
{
+ struct dm_target_io *tio = clone_to_tio(clone);
int r;
sector_t sector;
- struct bio *clone = &tio->clone;
struct dm_io *io = tio->io;
struct dm_target *ti = tio->ti;
@@ -1164,7 +1165,7 @@ static void __map_bio(struct dm_target_io *tio)
struct mapped_device *md = io->md;
up(&md->swap_bios_semaphore);
}
- free_tio(tio);
+ free_tio(clone);
dm_io_dec_pending(io, BLK_STS_IOERR);
break;
case DM_MAPIO_REQUEUE:
@@ -1172,7 +1173,7 @@ static void __map_bio(struct dm_target_io *tio)
struct mapped_device *md = io->md;
up(&md->swap_bios_semaphore);
}
- free_tio(tio);
+ free_tio(clone);
dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
break;
default:
@@ -1190,106 +1191,75 @@ static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
/*
* Creates a bio that consists of range of complete bvecs.
*/
-static int clone_bio(struct dm_target_io *tio, struct bio *bio,
- sector_t sector, unsigned len)
+static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
+ sector_t sector, unsigned *len)
{
- struct bio *clone = &tio->clone;
- int r;
-
- __bio_clone_fast(clone, bio);
-
- r = bio_crypt_clone(clone, bio, GFP_NOIO);
- if (r < 0)
- return r;
-
- if (bio_integrity(bio)) {
- if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
- !dm_target_passes_integrity(tio->ti->type))) {
- DMWARN("%s: the target %s doesn't support integrity data.",
- dm_device_name(tio->io->md),
- tio->ti->type->name);
- return -EIO;
- }
-
- r = bio_integrity_clone(clone, bio, GFP_NOIO);
- if (r < 0)
- return r;
- }
+ struct bio *bio = ci->bio, *clone;
+ clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
- clone->bi_iter.bi_size = to_bytes(len);
+ clone->bi_iter.bi_size = to_bytes(*len);
if (bio_integrity(bio))
bio_integrity_trim(clone);
+ __map_bio(clone);
return 0;
}
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
- struct dm_target *ti, unsigned num_bios)
+ struct dm_target *ti, unsigned num_bios,
+ unsigned *len)
{
- struct dm_target_io *tio;
+ struct bio *bio;
int try;
- if (!num_bios)
- return;
-
- if (num_bios == 1) {
- tio = alloc_tio(ci, ti, 0, GFP_NOIO);
- bio_list_add(blist, &tio->clone);
- return;
- }
-
for (try = 0; try < 2; try++) {
int bio_nr;
- struct bio *bio;
if (try)
mutex_lock(&ci->io->md->table_devices_lock);
for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
- tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
- if (!tio)
+ bio = alloc_tio(ci, ti, bio_nr, len,
+ try ? GFP_NOIO : GFP_NOWAIT);
+ if (!bio)
break;
- bio_list_add(blist, &tio->clone);
+ bio_list_add(blist, bio);
}
if (try)
mutex_unlock(&ci->io->md->table_devices_lock);
if (bio_nr == num_bios)
return;
- while ((bio = bio_list_pop(blist))) {
- tio = container_of(bio, struct dm_target_io, clone);
- free_tio(tio);
- }
+ while ((bio = bio_list_pop(blist)))
+ free_tio(bio);
}
}
-static void __clone_and_map_simple_bio(struct clone_info *ci,
- struct dm_target_io *tio, unsigned *len)
-{
- struct bio *clone = &tio->clone;
-
- tio->len_ptr = len;
-
- __bio_clone_fast(clone, ci->bio);
- if (len)
- bio_setup_sector(clone, ci->sector, *len);
- __map_bio(tio);
-}
-
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
unsigned num_bios, unsigned *len)
{
struct bio_list blist = BIO_EMPTY_LIST;
- struct bio *bio;
- struct dm_target_io *tio;
-
- alloc_multiple_bios(&blist, ci, ti, num_bios);
+ struct bio *clone;
- while ((bio = bio_list_pop(&blist))) {
- tio = container_of(bio, struct dm_target_io, clone);
- __clone_and_map_simple_bio(ci, tio, len);
+ switch (num_bios) {
+ case 0:
+ break;
+ case 1:
+ clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
+ if (len)
+ bio_setup_sector(clone, ci->sector, *len);
+ __map_bio(clone);
+ break;
+ default:
+ alloc_multiple_bios(&blist, ci, ti, num_bios, len);
+ while ((clone = bio_list_pop(&blist))) {
+ if (len)
+ bio_setup_sector(clone, ci->sector, *len);
+ __map_bio(clone);
+ }
+ break;
}
}
@@ -1304,9 +1274,8 @@ static int __send_empty_flush(struct clone_info *ci)
* need to reference it after submit. It's just used as
* the basis for the clone(s).
*/
- bio_init(&flush_bio, NULL, 0);
- flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
- bio_set_dev(&flush_bio, ci->io->md->disk->part0);
+ bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
ci->bio = &flush_bio;
ci->sector_count = 0;
@@ -1319,25 +1288,6 @@ static int __send_empty_flush(struct clone_info *ci)
return 0;
}
-static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
- sector_t sector, unsigned *len)
-{
- struct bio *bio = ci->bio;
- struct dm_target_io *tio;
- int r;
-
- tio = alloc_tio(ci, ti, 0, GFP_NOIO);
- tio->len_ptr = len;
- r = clone_bio(tio, bio, sector, *len);
- if (r < 0) {
- free_tio(tio);
- return r;
- }
- __map_bio(tio);
-
- return 0;
-}
-
static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
unsigned num_bios)
{
diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c
index c0dc6f2ef4a3..50ad818978a4 100644
--- a/drivers/md/md-faulty.c
+++ b/drivers/md/md-faulty.c
@@ -205,9 +205,9 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
}
}
if (failit) {
- struct bio *b = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+ struct bio *b = bio_alloc_clone(conf->rdev->bdev, bio, GFP_NOIO,
+ &mddev->bio_set);
- bio_set_dev(b, conf->rdev->bdev);
b->bi_private = bio;
b->bi_end_io = faulty_fail;
bio = b;
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index e7d6486f090f..3081a936350d 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -121,11 +121,9 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
}
multipath = conf->multipaths + mp_bh->path;
- bio_init(&mp_bh->bio, NULL, 0);
- __bio_clone_fast(&mp_bh->bio, bio);
+ bio_init_clone(multipath->rdev->bdev, &mp_bh->bio, bio, GFP_NOIO);
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
- bio_set_dev(&mp_bh->bio, multipath->rdev->bdev);
mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
@@ -299,7 +297,6 @@ static void multipathd(struct md_thread *thread)
md_check_recovery(mddev);
for (;;) {
- char b[BDEVNAME_SIZE];
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head))
break;
@@ -311,13 +308,13 @@ static void multipathd(struct md_thread *thread)
bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
if ((mp_bh->path = multipath_map (conf))<0) {
- pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
- bio_devname(bio, b),
+ pr_err("multipath: %pg: unrecoverable IO read error for block %llu\n",
+ bio->bi_bdev,
(unsigned long long)bio->bi_iter.bi_sector);
multipath_end_bh_io(mp_bh, BLK_STS_IOERR);
} else {
- pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
- bio_devname(bio, b),
+ pr_err("multipath: %pg: redirecting sector %llu to another IO path\n",
+ bio->bi_bdev,
(unsigned long long)bio->bi_iter.bi_sector);
*bio = *(mp_bh->master_bio);
bio->bi_iter.bi_sector +=
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4d38bd7dadd6..309b3af906ad 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -562,11 +562,11 @@ static void submit_flushes(struct work_struct *ws)
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- bi = bio_alloc_bioset(GFP_NOIO, 0, &mddev->bio_set);
+ bi = bio_alloc_bioset(rdev->bdev, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH,
+ GFP_NOIO, &mddev->bio_set);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
- bio_set_dev(bi, rdev->bdev);
- bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
atomic_inc(&mddev->flush_pending);
submit_bio(bi);
rcu_read_lock();
@@ -955,7 +955,6 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
* If an error occurred, call md_error
*/
struct bio *bio;
- int ff = 0;
if (!page)
return;
@@ -963,11 +962,13 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
if (test_bit(Faulty, &rdev->flags))
return;
- bio = bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
+ bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev,
+ 1,
+ REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA,
+ GFP_NOIO, &mddev->sync_set);
atomic_inc(&rdev->nr_pending);
- bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
@@ -976,8 +977,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
test_bit(FailFast, &rdev->flags) &&
!test_bit(LastDev, &rdev->flags))
- ff = MD_FAILFAST;
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
+ bio->bi_opf |= MD_FAILFAST;
atomic_inc(&mddev->pending_writes);
submit_bio(bio);
@@ -998,13 +998,11 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct bio bio;
struct bio_vec bvec;
- bio_init(&bio, &bvec, 1);
-
if (metadata_op && rdev->meta_bdev)
- bio_set_dev(&bio, rdev->meta_bdev);
+ bio_init(&bio, rdev->meta_bdev, &bvec, 1, op | op_flags);
else
- bio_set_dev(&bio, rdev->bdev);
- bio.bi_opf = op | op_flags;
+ bio_init(&bio, rdev->bdev, &bvec, 1, op | op_flags);
+
if (metadata_op)
bio.bi_iter.bi_sector = sector + rdev->sb_start;
else if (rdev->mddev->reshape_position != MaxSector &&
@@ -8636,13 +8634,14 @@ static void md_end_io_acct(struct bio *bio)
*/
void md_account_bio(struct mddev *mddev, struct bio **bio)
{
+ struct block_device *bdev = (*bio)->bi_bdev;
struct md_io_acct *md_io_acct;
struct bio *clone;
- if (!blk_queue_io_stat((*bio)->bi_bdev->bd_disk->queue))
+ if (!blk_queue_io_stat(bdev->bd_disk->queue))
return;
- clone = bio_clone_fast(*bio, GFP_NOIO, &mddev->io_acct_set);
+ clone = bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_acct_set);
md_io_acct = container_of(clone, struct md_io_acct, bio_clone);
md_io_acct->orig_bio = *bio;
md_io_acct->start_time = bio_start_io_acct(*bio);
@@ -9583,7 +9582,7 @@ static int md_notify_reboot(struct notifier_block *this,
* driver, we do want to have a safe RAID driver ...
*/
if (need_delay)
- mdelay(1000*1);
+ msleep(1000);
return NOTIFY_DONE;
}
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 83f9a4f3d82e..e61f6cad4e08 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -28,6 +28,11 @@ struct resync_pages {
struct page *pages[RESYNC_PAGES];
};
+struct raid1_plug_cb {
+ struct blk_plug_cb cb;
+ struct bio_list pending;
+};
+
static void rbio_pool_free(void *rbio, void *data)
{
kfree(rbio);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e2d8acb1e988..934186724d21 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -824,7 +824,6 @@ static void flush_pending_writes(struct r1conf *conf)
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
- conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
/*
@@ -1126,7 +1125,8 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
int i = 0;
struct bio *behind_bio = NULL;
- behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set);
+ behind_bio = bio_alloc_bioset(NULL, vcnt, 0, GFP_NOIO,
+ &r1_bio->mddev->bio_set);
if (!behind_bio)
return;
@@ -1166,12 +1166,6 @@ free_pages:
bio_put(behind_bio);
}
-struct raid1_plug_cb {
- struct blk_plug_cb cb;
- struct bio_list pending;
- int pending_cnt;
-};
-
static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
@@ -1183,7 +1177,6 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
if (from_schedule || current->bio_list) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->pending_bio_list, &plug->pending);
- conf->pending_count += plug->pending_cnt;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_barrier);
md_wakeup_thread(mddev->thread);
@@ -1319,13 +1312,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
if (!r1bio_existed && blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
r1_bio->start_time = bio_start_io_acct(bio);
- read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
+ read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp,
+ &mddev->bio_set);
r1_bio->bios[rdisk] = read_bio;
read_bio->bi_iter.bi_sector = r1_bio->sector +
mirror->rdev->data_offset;
- bio_set_dev(read_bio, mirror->rdev->bdev);
read_bio->bi_end_io = raid1_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
if (test_bit(FailFast, &mirror->rdev->flags) &&
@@ -1545,24 +1538,25 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
first_clone = 0;
}
- if (r1_bio->behind_master_bio)
- mbio = bio_clone_fast(r1_bio->behind_master_bio,
- GFP_NOIO, &mddev->bio_set);
- else
- mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
-
if (r1_bio->behind_master_bio) {
+ mbio = bio_alloc_clone(rdev->bdev,
+ r1_bio->behind_master_bio,
+ GFP_NOIO, &mddev->bio_set);
if (test_bit(CollisionCheck, &rdev->flags))
wait_for_serialization(rdev, r1_bio);
if (test_bit(WriteMostly, &rdev->flags))
atomic_inc(&r1_bio->behind_remaining);
- } else if (mddev->serialize_policy)
- wait_for_serialization(rdev, r1_bio);
+ } else {
+ mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
+ &mddev->bio_set);
+
+ if (mddev->serialize_policy)
+ wait_for_serialization(rdev, r1_bio);
+ }
r1_bio->bios[i] = mbio;
mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
- bio_set_dev(mbio, rdev->bdev);
mbio->bi_end_io = raid1_end_write_request;
mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
if (test_bit(FailFast, &rdev->flags) &&
@@ -1586,11 +1580,9 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
plug = NULL;
if (plug) {
bio_list_add(&plug->pending, mbio);
- plug->pending_cnt++;
} else {
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
md_wakeup_thread(mddev->thread);
}
@@ -2070,15 +2062,14 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
} while (!success && d != r1_bio->read_disk);
if (!success) {
- char b[BDEVNAME_SIZE];
int abort = 0;
/* Cannot read from anywhere, this block is lost.
* Record a bad block on each device. If that doesn't
* work just disable and interrupt the recovery.
* Don't fail devices as that won't really help.
*/
- pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
- mdname(mddev), bio_devname(bio, b),
+ pr_crit_ratelimited("md/raid1:%s: %pg: unrecoverable I/O read error for block %llu\n",
+ mdname(mddev), bio->bi_bdev,
(unsigned long long)r1_bio->sector);
for (d = 0; d < conf->raid_disks * 2; d++) {
rdev = conf->mirrors[d].rdev;
@@ -2165,11 +2156,10 @@ static void process_checks(struct r1bio *r1_bio)
continue;
/* fixup the bio for reuse, but preserve errno */
status = b->bi_status;
- bio_reset(b);
+ bio_reset(b, conf->mirrors[i].rdev->bdev, REQ_OP_READ);
b->bi_status = status;
b->bi_iter.bi_sector = r1_bio->sector +
conf->mirrors[i].rdev->data_offset;
- bio_set_dev(b, conf->mirrors[i].rdev->bdev);
b->bi_end_io = end_sync_read;
rp->raid_bio = r1_bio;
b->bi_private = rp;
@@ -2416,12 +2406,12 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
/* Write at 'sector' for 'sectors'*/
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
- wbio = bio_clone_fast(r1_bio->behind_master_bio,
- GFP_NOIO,
- &mddev->bio_set);
+ wbio = bio_alloc_clone(rdev->bdev,
+ r1_bio->behind_master_bio,
+ GFP_NOIO, &mddev->bio_set);
} else {
- wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
- &mddev->bio_set);
+ wbio = bio_alloc_clone(rdev->bdev, r1_bio->master_bio,
+ GFP_NOIO, &mddev->bio_set);
}
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
@@ -2430,7 +2420,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
bio_trim(wbio, sector - r1_bio->sector, sectors);
wbio->bi_iter.bi_sector += rdev->data_offset;
- bio_set_dev(wbio, rdev->bdev);
if (submit_bio_wait(wbio) < 0)
/* failure! */
@@ -2650,7 +2639,7 @@ static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
for (i = conf->poolinfo->raid_disks; i--; ) {
bio = r1bio->bios[i];
rps = bio->bi_private;
- bio_reset(bio);
+ bio_reset(bio, NULL, 0);
bio->bi_private = rps;
}
r1bio->master_bio = NULL;
@@ -3058,7 +3047,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
init_waitqueue_head(&conf->wait_barrier);
bio_list_init(&conf->pending_bio_list);
- conf->pending_count = 0;
conf->recovery_disabled = mddev->recovery_disabled - 1;
err = -EIO;
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index ccf10e59b116..ebb6788820e7 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -87,7 +87,6 @@ struct r1conf {
/* queue pending writes to be submitted on unplug */
struct bio_list pending_bio_list;
- int pending_count;
/* for use when syncing mirrors:
* We don't allow both normal IO and resync/recovery IO at
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 2b969f70a31f..b369ebb965a9 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -861,7 +861,6 @@ static void flush_pending_writes(struct r10conf *conf)
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
- conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
/*
@@ -1054,16 +1053,9 @@ static sector_t choose_data_offset(struct r10bio *r10_bio,
return rdev->new_data_offset;
}
-struct raid10_plug_cb {
- struct blk_plug_cb cb;
- struct bio_list pending;
- int pending_cnt;
-};
-
static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
- struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
- cb);
+ struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, cb);
struct mddev *mddev = plug->cb.data;
struct r10conf *conf = mddev->private;
struct bio *bio;
@@ -1071,7 +1063,6 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
if (from_schedule || current->bio_list) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->pending_bio_list, &plug->pending);
- conf->pending_count += plug->pending_cnt;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_barrier);
md_wakeup_thread(mddev->thread);
@@ -1208,14 +1199,13 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
r10_bio->start_time = bio_start_io_acct(bio);
- read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
+ read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
r10_bio->devs[slot].bio = read_bio;
r10_bio->devs[slot].rdev = rdev;
read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
choose_data_offset(r10_bio, rdev);
- bio_set_dev(read_bio, rdev->bdev);
read_bio->bi_end_io = raid10_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
if (test_bit(FailFast, &rdev->flags) &&
@@ -1239,7 +1229,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
unsigned long flags;
struct blk_plug_cb *cb;
- struct raid10_plug_cb *plug = NULL;
+ struct raid1_plug_cb *plug = NULL;
struct r10conf *conf = mddev->private;
struct md_rdev *rdev;
int devnum = r10_bio->devs[n_copy].devnum;
@@ -1255,7 +1245,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
} else
rdev = conf->mirrors[devnum].rdev;
- mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+ mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
if (replacement)
r10_bio->devs[n_copy].repl_bio = mbio;
else
@@ -1263,7 +1253,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
choose_data_offset(r10_bio, rdev));
- bio_set_dev(mbio, rdev->bdev);
mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua);
if (!replacement && test_bit(FailFast,
@@ -1282,16 +1271,14 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
if (cb)
- plug = container_of(cb, struct raid10_plug_cb, cb);
+ plug = container_of(cb, struct raid1_plug_cb, cb);
else
plug = NULL;
if (plug) {
bio_list_add(&plug->pending, mbio);
- plug->pending_cnt++;
} else {
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
md_wakeup_thread(mddev->thread);
}
@@ -1812,7 +1799,8 @@ retry_discard:
*/
if (r10_bio->devs[disk].bio) {
struct md_rdev *rdev = conf->mirrors[disk].rdev;
- mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+ mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
+ &mddev->bio_set);
mbio->bi_end_io = raid10_end_discard_request;
mbio->bi_private = r10_bio;
r10_bio->devs[disk].bio = mbio;
@@ -1825,7 +1813,8 @@ retry_discard:
}
if (r10_bio->devs[disk].repl_bio) {
struct md_rdev *rrdev = conf->mirrors[disk].replacement;
- rbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+ rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
+ &mddev->bio_set);
rbio->bi_end_io = raid10_end_discard_request;
rbio->bi_private = r10_bio;
r10_bio->devs[disk].repl_bio = rbio;
@@ -2422,7 +2411,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
* bi_vecs, as the read request might have corrupted these
*/
rp = get_resync_pages(tbio);
- bio_reset(tbio);
+ bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE);
md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
@@ -2430,7 +2419,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
tbio->bi_private = rp;
tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
tbio->bi_end_io = end_sync_write;
- bio_set_op_attrs(tbio, REQ_OP_WRITE, 0);
bio_copy_data(tbio, fbio);
@@ -2441,7 +2429,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
- bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
submit_bio_noacct(tbio);
}
@@ -2894,12 +2881,12 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
if (sectors > sect_to_write)
sectors = sect_to_write;
/* Write at 'sector' for 'sectors' */
- wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+ wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
+ &mddev->bio_set);
bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
wbio->bi_iter.bi_sector = wsector +
choose_data_offset(r10_bio, rdev);
- bio_set_dev(wbio, rdev->bdev);
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
if (submit_bio_wait(wbio) < 0)
@@ -3160,12 +3147,12 @@ static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
for (i = 0; i < nalloc; i++) {
bio = r10bio->devs[i].bio;
rp = bio->bi_private;
- bio_reset(bio);
+ bio_reset(bio, NULL, 0);
bio->bi_private = rp;
bio = r10bio->devs[i].repl_bio;
if (bio) {
rp = bio->bi_private;
- bio_reset(bio);
+ bio_reset(bio, NULL, 0);
bio->bi_private = rp;
}
}
@@ -4892,14 +4879,12 @@ read_more:
return sectors_done;
}
- read_bio = bio_alloc_bioset(GFP_KERNEL, RESYNC_PAGES, &mddev->bio_set);
-
- bio_set_dev(read_bio, rdev->bdev);
+ read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ,
+ GFP_KERNEL, &mddev->bio_set);
read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset);
read_bio->bi_private = r10_bio;
read_bio->bi_end_io = end_reshape_read;
- bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
r10_bio->master_bio = read_bio;
r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index c34bb196790e..5c0804d8bb1f 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -75,7 +75,6 @@ struct r10conf {
/* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list;
- int pending_count;
spinlock_t resync_lock;
atomic_t nr_pending;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 0b5dcaabbc15..a7d50ff9020a 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -735,10 +735,9 @@ static void r5l_submit_current_io(struct r5l_log *log)
static struct bio *r5l_bio_alloc(struct r5l_log *log)
{
- struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS, &log->bs);
+ struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS,
+ REQ_OP_WRITE, GFP_NOIO, &log->bs);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- bio_set_dev(bio, log->rdev->bdev);
bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
return bio;
@@ -1267,6 +1266,8 @@ static void r5l_log_flush_endio(struct bio *bio)
r5l_io_run_stripes(io);
list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
spin_unlock_irqrestore(&log->io_list_lock, flags);
+
+ bio_uninit(bio);
}
/*
@@ -1302,10 +1303,9 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
if (!do_flush)
return;
- bio_reset(&log->flush_bio);
- bio_set_dev(&log->flush_bio, log->rdev->bdev);
+ bio_init(&log->flush_bio, log->rdev->bdev, NULL, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH);
log->flush_bio.bi_end_io = r5l_log_flush_endio;
- log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(&log->flush_bio);
}
@@ -1623,10 +1623,10 @@ struct r5l_recovery_ctx {
* just copy data from the pool.
*/
struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
+ struct bio_vec ra_bvec[R5L_RECOVERY_PAGE_POOL_SIZE];
sector_t pool_offset; /* offset of first page in the pool */
int total_pages; /* total allocated pages */
int valid_pages; /* pages with valid data */
- struct bio *ra_bio; /* bio to do the read ahead */
};
static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
@@ -1634,10 +1634,6 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
{
struct page *page;
- ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_VECS, &log->bs);
- if (!ctx->ra_bio)
- return -ENOMEM;
-
ctx->valid_pages = 0;
ctx->total_pages = 0;
while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
@@ -1649,10 +1645,8 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
ctx->total_pages += 1;
}
- if (ctx->total_pages == 0) {
- bio_put(ctx->ra_bio);
+ if (ctx->total_pages == 0)
return -ENOMEM;
- }
ctx->pool_offset = 0;
return 0;
@@ -1665,7 +1659,6 @@ static void r5l_recovery_free_ra_pool(struct r5l_log *log,
for (i = 0; i < ctx->total_pages; ++i)
put_page(ctx->ra_pool[i]);
- bio_put(ctx->ra_bio);
}
/*
@@ -1678,17 +1671,19 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
struct r5l_recovery_ctx *ctx,
sector_t offset)
{
- bio_reset(ctx->ra_bio);
- bio_set_dev(ctx->ra_bio, log->rdev->bdev);
- bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0);
- ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
+ struct bio bio;
+ int ret;
+
+ bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
+ R5L_RECOVERY_PAGE_POOL_SIZE, REQ_OP_READ);
+ bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
ctx->valid_pages = 0;
ctx->pool_offset = offset;
while (ctx->valid_pages < ctx->total_pages) {
- bio_add_page(ctx->ra_bio,
- ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0);
+ __bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
+ 0);
ctx->valid_pages += 1;
offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
@@ -1697,7 +1692,9 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
break;
}
- return submit_bio_wait(ctx->ra_bio);
+ ret = submit_bio_wait(&bio);
+ bio_uninit(&bio);
+ return ret;
}
/*
@@ -3108,7 +3105,6 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
INIT_LIST_HEAD(&log->io_end_ios);
INIT_LIST_HEAD(&log->flushing_ios);
INIT_LIST_HEAD(&log->finished_ios);
- bio_init(&log->flush_bio, NULL, 0);
log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
if (!log->io_kc)
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 4ab417915d7f..ea4cd8dd4dc3 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -250,7 +250,8 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
INIT_LIST_HEAD(&io->stripe_list);
atomic_set(&io->pending_stripes, 0);
atomic_set(&io->pending_flushes, 0);
- bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
+ bio_init(&io->bio, log->rdev->bdev, io->biovec, PPL_IO_INLINE_BVECS,
+ REQ_OP_WRITE | REQ_FUA);
pplhdr = page_address(io->header_page);
clear_page(pplhdr);
@@ -416,12 +417,10 @@ static void ppl_log_endio(struct bio *bio)
static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
{
- char b[BDEVNAME_SIZE];
-
- pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n",
+ pr_debug("%s: seq: %llu size: %u sector: %llu dev: %pg\n",
__func__, io->seq, bio->bi_iter.bi_size,
(unsigned long long)bio->bi_iter.bi_sector,
- bio_devname(bio, b));
+ bio->bi_bdev);
submit_bio(bio);
}
@@ -465,8 +464,6 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
bio->bi_end_io = ppl_log_endio;
- bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
- bio_set_dev(bio, log->rdev->bdev);
bio->bi_iter.bi_sector = log->next_io_sector;
bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
bio->bi_write_hint = ppl_conf->write_hint;
@@ -496,11 +493,10 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
struct bio *prev = bio;
- bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS,
+ bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_VECS,
+ prev->bi_opf, GFP_NOIO,
&ppl_conf->bs);
- bio->bi_opf = prev->bi_opf;
bio->bi_write_hint = prev->bi_write_hint;
- bio_copy_dev(bio, prev);
bio->bi_iter.bi_sector = bio_end_sector(prev);
bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
@@ -590,9 +586,8 @@ static void ppl_flush_endio(struct bio *bio)
struct ppl_log *log = io->log;
struct ppl_conf *ppl_conf = log->ppl_conf;
struct r5conf *conf = ppl_conf->mddev->private;
- char b[BDEVNAME_SIZE];
- pr_debug("%s: dev: %s\n", __func__, bio_devname(bio, b));
+ pr_debug("%s: dev: %pg\n", __func__, bio->bi_bdev);
if (bio->bi_status) {
struct md_rdev *rdev;
@@ -635,16 +630,14 @@ static void ppl_do_flush(struct ppl_io_unit *io)
if (bdev) {
struct bio *bio;
- char b[BDEVNAME_SIZE];
- bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs);
- bio_set_dev(bio, bdev);
+ bio = bio_alloc_bioset(bdev, 0, GFP_NOIO,
+ REQ_OP_WRITE | REQ_PREFLUSH,
+ &ppl_conf->flush_bs);
bio->bi_private = io;
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
bio->bi_end_io = ppl_flush_endio;
- pr_debug("%s: dev: %s\n", __func__,
- bio_devname(bio, b));
+ pr_debug("%s: dev: %ps\n", __func__, bio->bi_bdev);
submit_bio(bio);
flushed_disks++;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ffe720c73b0a..8bd5f06390ea 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1060,6 +1060,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
int i, disks = sh->disks;
struct stripe_head *head_sh = sh;
struct bio_list pending_bios = BIO_EMPTY_LIST;
+ struct r5dev *dev;
bool should_defer;
might_sleep();
@@ -1094,8 +1095,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
op_flags |= REQ_SYNC;
again:
- bi = &sh->dev[i].req;
- rbi = &sh->dev[i].rreq; /* For writing to replacement */
+ dev = &sh->dev[i];
+ bi = &dev->req;
+ rbi = &dev->rreq; /* For writing to replacement */
rcu_read_lock();
rrdev = rcu_dereference(conf->disks[i].replacement);
@@ -1171,8 +1173,7 @@ again:
set_bit(STRIPE_IO_STARTED, &sh->state);
- bio_set_dev(bi, rdev->bdev);
- bio_set_op_attrs(bi, op, op_flags);
+ bio_init(bi, rdev->bdev, &dev->vec, 1, op | op_flags);
bi->bi_end_io = op_is_write(op)
? raid5_end_write_request
: raid5_end_read_request;
@@ -1238,8 +1239,7 @@ again:
set_bit(STRIPE_IO_STARTED, &sh->state);
- bio_set_dev(rbi, rrdev->bdev);
- bio_set_op_attrs(rbi, op, op_flags);
+ bio_init(rbi, rrdev->bdev, &dev->rvec, 1, op | op_flags);
BUG_ON(!op_is_write(op));
rbi->bi_end_io = raid5_end_write_request;
rbi->bi_private = sh;
@@ -2294,7 +2294,6 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
int disks, struct r5conf *conf)
{
struct stripe_head *sh;
- int i;
sh = kmem_cache_zalloc(sc, gfp);
if (sh) {
@@ -2307,12 +2306,6 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
atomic_set(&sh->count, 1);
sh->raid_conf = conf;
sh->log_start = MaxSector;
- for (i = 0; i < disks; i++) {
- struct r5dev *dev = &sh->dev[i];
-
- bio_init(&dev->req, &dev->vec, 1);
- bio_init(&dev->rreq, &dev->rvec, 1);
- }
if (raid5_has_ppl(conf)) {
sh->ppl_page = alloc_page(gfp);
@@ -2677,7 +2670,6 @@ static void raid5_end_read_request(struct bio * bi)
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_status);
if (i == disks) {
- bio_reset(bi);
BUG();
return;
}
@@ -2785,7 +2777,7 @@ static void raid5_end_read_request(struct bio * bi)
}
}
rdev_dec_pending(rdev, conf->mddev);
- bio_reset(bi);
+ bio_uninit(bi);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh);
@@ -2823,7 +2815,6 @@ static void raid5_end_write_request(struct bio *bi)
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_status);
if (i == disks) {
- bio_reset(bi);
BUG();
return;
}
@@ -2860,7 +2851,7 @@ static void raid5_end_write_request(struct bio *bi)
if (sh->batch_head && bi->bi_status && !replacement)
set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
- bio_reset(bi);
+ bio_uninit(bi);
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -5438,14 +5429,14 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
return 0;
}
- align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->io_acct_set);
+ align_bio = bio_alloc_clone(rdev->bdev, raid_bio, GFP_NOIO,
+ &mddev->io_acct_set);
md_io_acct = container_of(align_bio, struct md_io_acct, bio_clone);
raid_bio->bi_next = (void *)rdev;
if (blk_queue_io_stat(raid_bio->bi_bdev->bd_disk->queue))
md_io_acct->start_time = bio_start_io_acct(raid_bio);
md_io_acct->orig_bio = raid_bio;
- bio_set_dev(align_bio, rdev->bdev);
align_bio->bi_end_io = raid5_align_endio;
align_bio->bi_private = md_io_acct;
align_bio->bi_iter.bi_sector = sector;
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index f3f24c63536b..ba6592b3dab2 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -160,6 +160,9 @@ menu "Media core support"
config VIDEO_DEV
tristate "Video4Linux core"
default MEDIA_CAMERA_SUPPORT || MEDIA_ANALOG_TV_SUPPORT || MEDIA_RADIO_SUPPORT || MEDIA_SDR_SUPPORT || MEDIA_PLATFORM_SUPPORT || MEDIA_TEST_SUPPORT
+ depends on (I2C || I2C=n)
+ select RATIONAL
+ select VIDEOBUF2_V4L2 if VIDEOBUF2_CORE
help
Enables the V4L2 API, used by cameras, analog TV, video grabbers,
radio devices and by some input devices.
@@ -216,13 +219,12 @@ menu "Media drivers"
comment "Drivers filtered as selected at 'Filter media drivers'"
depends on MEDIA_SUPPORT_FILTER
+comment "Media drivers"
+
source "drivers/media/usb/Kconfig"
source "drivers/media/pci/Kconfig"
source "drivers/media/radio/Kconfig"
-# Common driver options
-source "drivers/media/common/Kconfig"
-
if MEDIA_PLATFORM_SUPPORT
source "drivers/media/platform/Kconfig"
source "drivers/media/mmc/Kconfig"
@@ -234,6 +236,9 @@ endif
source "drivers/media/firewire/Kconfig"
+# Common driver options
+source "drivers/media/common/Kconfig"
+
endmenu
#
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index d18357bf1346..20fac24e4f0f 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -8,7 +8,7 @@
# when compiled as builtin drivers
#
obj-y += i2c/ tuners/
-obj-$(CONFIG_DVB_CORE) += dvb-frontends/
+obj-$(CONFIG_DVB_CORE) += dvb-frontends/
#
# Now, let's link-in the media controller core
@@ -18,7 +18,7 @@ ifeq ($(CONFIG_MEDIA_CONTROLLER),y)
endif
obj-$(CONFIG_VIDEO_DEV) += v4l2-core/
-obj-$(CONFIG_DVB_CORE) += dvb-core/
+obj-$(CONFIG_DVB_CORE) += dvb-core/
# There are both core and drivers at RC subtree - merge before drivers
obj-y += rc/
diff --git a/drivers/media/cec/platform/Makefile b/drivers/media/cec/platform/Makefile
index ea6f8ee8161c..26d2bc778394 100644
--- a/drivers/media/cec/platform/Makefile
+++ b/drivers/media/cec/platform/Makefile
@@ -4,12 +4,12 @@
#
# Please keep it in alphabetic order
-obj-$(CONFIG_CEC_CROS_EC) += cros-ec/
-obj-$(CONFIG_CEC_GPIO) += cec-gpio/
-obj-$(CONFIG_CEC_MESON_AO) += meson/
-obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
-obj-$(CONFIG_CEC_SECO) += seco/
-obj-$(CONFIG_CEC_STI) += sti/
-obj-$(CONFIG_CEC_STM32) += stm32/
-obj-$(CONFIG_CEC_TEGRA) += tegra/
+obj-$(CONFIG_CEC_CROS_EC) += cros-ec/
+obj-$(CONFIG_CEC_GPIO) += cec-gpio/
+obj-$(CONFIG_CEC_MESON_AO) += meson/
+obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
+obj-$(CONFIG_CEC_SECO) += seco/
+obj-$(CONFIG_CEC_STI) += sti/
+obj-$(CONFIG_CEC_STM32) += stm32/
+obj-$(CONFIG_CEC_TEGRA) += tegra/
diff --git a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
index 2d95e16cd248..8c8d8fc5e63e 100644
--- a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
+++ b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
@@ -215,6 +215,8 @@ struct cec_dmi_match {
static const struct cec_dmi_match cec_dmi_match_table[] = {
/* Google Fizz */
{ "Google", "Fizz", "0000:00:02.0", "Port B" },
+ /* Google Brask */
+ { "Google", "Brask", "0000:00:02.0", "Port B" },
};
static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
diff --git a/drivers/media/cec/platform/seco/seco-cec.c b/drivers/media/cec/platform/seco/seco-cec.c
index ae138cc253fd..51a6fcfd077d 100644
--- a/drivers/media/cec/platform/seco/seco-cec.c
+++ b/drivers/media/cec/platform/seco/seco-cec.c
@@ -12,7 +12,6 @@
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/gpio/consumer.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -129,7 +128,7 @@ static int secocec_adap_enable(struct cec_adapter *adap, bool enable)
if (status)
goto err;
- dev_dbg(dev, "Device enabled");
+ dev_dbg(dev, "Device enabled\n");
} else {
/* Clear the status register */
status = smb_rd16(SECOCEC_STATUS_REG_1, &val);
@@ -141,7 +140,7 @@ static int secocec_adap_enable(struct cec_adapter *adap, bool enable)
~SECOCEC_ENABLE_REG_1_CEC &
~SECOCEC_ENABLE_REG_1_IR);
- dev_dbg(dev, "Device disabled");
+ dev_dbg(dev, "Device disabled\n");
}
return 0;
@@ -264,12 +263,12 @@ static void secocec_rx_done(struct cec_adapter *adap, u16 status_val)
if (status_val & SECOCEC_STATUS_RX_OVERFLOW_MASK) {
/* NOTE: Untested, it also might not be necessary */
- dev_warn(dev, "Received more than 16 bytes. Discarding");
+ dev_warn(dev, "Received more than 16 bytes. Discarding\n");
flag_overflow = true;
}
if (status_val & SECOCEC_STATUS_RX_ERROR_MASK) {
- dev_warn(dev, "Message received with errors. Discarding");
+ dev_warn(dev, "Message received with errors. Discarding\n");
status = -EIO;
goto rxerr;
}
@@ -390,12 +389,12 @@ static int secocec_ir_probe(void *priv)
if (status != 0)
goto err;
- dev_dbg(dev, "IR enabled");
+ dev_dbg(dev, "IR enabled\n");
status = devm_rc_register_device(dev, cec->ir);
if (status) {
- dev_err(dev, "Failed to prepare input device");
+ dev_err(dev, "Failed to prepare input device\n");
cec->ir = NULL;
goto err;
}
@@ -408,7 +407,7 @@ err:
smb_wr16(SECOCEC_ENABLE_REG_1,
val & ~SECOCEC_ENABLE_REG_1_IR);
- dev_dbg(dev, "IR disabled");
+ dev_dbg(dev, "IR disabled\n");
return status;
}
@@ -431,13 +430,13 @@ static int secocec_ir_rx(struct secocec_data *priv)
rc_keydown(cec->ir, RC_PROTO_RC5, RC_SCANCODE_RC5(addr, key), toggle);
- dev_dbg(dev, "IR key pressed: 0x%02x addr 0x%02x toggle 0x%02x", key,
+ dev_dbg(dev, "IR key pressed: 0x%02x addr 0x%02x toggle 0x%02x\n", key,
addr, toggle);
return 0;
err:
- dev_err(dev, "IR Receive message failed (%d)", status);
+ dev_err(dev, "IR Receive message failed (%d)\n", status);
return -EIO;
}
#else
@@ -497,7 +496,7 @@ static irqreturn_t secocec_irq_handler(int irq, void *priv)
return IRQ_HANDLED;
err:
- dev_err_once(dev, "IRQ: R/W SMBus operation failed (%d)", status);
+ dev_err_once(dev, "IRQ: R/W SMBus operation failed %d\n", status);
/* Reset status register */
val = SECOCEC_STATUS_REG_1_CEC | SECOCEC_STATUS_REG_1_IR;
@@ -551,18 +550,18 @@ static int secocec_acpi_probe(struct secocec_data *sdev)
struct gpio_desc *gpio;
int irq = 0;
- gpio = devm_gpiod_get(dev, NULL, GPIOF_IN);
+ gpio = devm_gpiod_get(dev, NULL, GPIOD_IN);
if (IS_ERR(gpio)) {
- dev_err(dev, "Cannot request interrupt gpio");
+ dev_err(dev, "Cannot request interrupt gpio\n");
return PTR_ERR(gpio);
}
irq = gpiod_to_irq(gpio);
if (irq < 0) {
- dev_err(dev, "Cannot find valid irq");
+ dev_err(dev, "Cannot find valid irq\n");
return -ENODEV;
}
- dev_dbg(dev, "irq-gpio is bound to IRQ %d", irq);
+ dev_dbg(dev, "irq-gpio is bound to IRQ %d\n", irq);
sdev->irq = irq;
@@ -590,7 +589,7 @@ static int secocec_probe(struct platform_device *pdev)
/* Request SMBus regions */
if (!request_muxed_region(BRA_SMB_BASE_ADDR, 7, "CEC00001")) {
- dev_err(dev, "Request memory region failed");
+ dev_err(dev, "Request memory region failed\n");
return -ENXIO;
}
@@ -598,14 +597,14 @@ static int secocec_probe(struct platform_device *pdev)
secocec->dev = dev;
if (!has_acpi_companion(dev)) {
- dev_dbg(dev, "Cannot find any ACPI companion");
+ dev_dbg(dev, "Cannot find any ACPI companion\n");
ret = -ENODEV;
goto err;
}
ret = secocec_acpi_probe(secocec);
if (ret) {
- dev_err(dev, "Cannot assign gpio to IRQ");
+ dev_err(dev, "Cannot assign gpio to IRQ\n");
ret = -ENODEV;
goto err;
}
@@ -613,11 +612,11 @@ static int secocec_probe(struct platform_device *pdev)
/* Firmware version check */
ret = smb_rd16(SECOCEC_VERSION, &val);
if (ret) {
- dev_err(dev, "Cannot check fw version");
+ dev_err(dev, "Cannot check fw version\n");
goto err;
}
if (val < SECOCEC_LATEST_FW) {
- dev_err(dev, "CEC Firmware not supported (v.%04x). Use ver > v.%04x",
+ dev_err(dev, "CEC Firmware not supported (v.%04x). Use ver > v.%04x\n",
val, SECOCEC_LATEST_FW);
ret = -EINVAL;
goto err;
@@ -631,7 +630,7 @@ static int secocec_probe(struct platform_device *pdev)
dev_name(&pdev->dev), secocec);
if (ret) {
- dev_err(dev, "Cannot request IRQ %d", secocec->irq);
+ dev_err(dev, "Cannot request IRQ %d\n", secocec->irq);
ret = -EIO;
goto err;
}
@@ -666,7 +665,7 @@ static int secocec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, secocec);
- dev_dbg(dev, "Device registered");
+ dev_dbg(dev, "Device registered\n");
return ret;
@@ -691,14 +690,14 @@ static int secocec_remove(struct platform_device *pdev)
smb_wr16(SECOCEC_ENABLE_REG_1, val & ~SECOCEC_ENABLE_REG_1_IR);
- dev_dbg(&pdev->dev, "IR disabled");
+ dev_dbg(&pdev->dev, "IR disabled\n");
}
cec_notifier_cec_adap_unregister(secocec->notifier, secocec->cec_adap);
cec_unregister_adapter(secocec->cec_adap);
release_region(BRA_SMB_BASE_ADDR, 7);
- dev_dbg(&pdev->dev, "CEC device removed");
+ dev_dbg(&pdev->dev, "CEC device removed\n");
return 0;
}
@@ -709,7 +708,7 @@ static int secocec_suspend(struct device *dev)
int status;
u16 val;
- dev_dbg(dev, "Device going to suspend, disabling");
+ dev_dbg(dev, "Device going to suspend, disabling\n");
/* Clear the status register */
status = smb_rd16(SECOCEC_STATUS_REG_1, &val);
@@ -733,7 +732,7 @@ static int secocec_suspend(struct device *dev)
return 0;
err:
- dev_err(dev, "Suspend failed (err: %d)", status);
+ dev_err(dev, "Suspend failed: %d\n", status);
return status;
}
@@ -742,7 +741,7 @@ static int secocec_resume(struct device *dev)
int status;
u16 val;
- dev_dbg(dev, "Resuming device from suspend");
+ dev_dbg(dev, "Resuming device from suspend\n");
/* Clear the status register */
status = smb_rd16(SECOCEC_STATUS_REG_1, &val);
@@ -762,12 +761,12 @@ static int secocec_resume(struct device *dev)
if (status)
goto err;
- dev_dbg(dev, "Device resumed from suspend");
+ dev_dbg(dev, "Device resumed from suspend\n");
return 0;
err:
- dev_err(dev, "Resume failed (err: %d)", status);
+ dev_err(dev, "Resume failed: %d\n", status);
return status;
}
diff --git a/drivers/media/common/Kconfig b/drivers/media/common/Kconfig
index 0f6bde0f793e..a2ae71270054 100644
--- a/drivers/media/common/Kconfig
+++ b/drivers/media/common/Kconfig
@@ -6,23 +6,23 @@ config MEDIA_COMMON_OPTIONS
comment "common driver options"
depends on MEDIA_COMMON_OPTIONS
-config VIDEO_CX2341X
- tristate
-
-config VIDEO_TVEEPROM
+config CYPRESS_FIRMWARE
tristate
- depends on I2C
+ depends on USB
config TTPCI_EEPROM
tristate
depends on I2C
-config CYPRESS_FIRMWARE
+config VIDEO_CX2341X
tristate
- depends on USB
-source "drivers/media/common/videobuf2/Kconfig"
+config VIDEO_TVEEPROM
+ tristate
+ depends on I2C
+
source "drivers/media/common/b2c2/Kconfig"
source "drivers/media/common/saa7146/Kconfig"
source "drivers/media/common/siano/Kconfig"
source "drivers/media/common/v4l2-tpg/Kconfig"
+source "drivers/media/common/videobuf2/Kconfig"
diff --git a/drivers/media/common/Makefile b/drivers/media/common/Makefile
index 55b5a1900124..ad0b1e95fb12 100644
--- a/drivers/media/common/Makefile
+++ b/drivers/media/common/Makefile
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y += b2c2/ saa7146/ siano/ v4l2-tpg/ videobuf2/
-obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
-obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
+
+# Please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
obj-$(CONFIG_CYPRESS_FIRMWARE) += cypress_firmware.o
obj-$(CONFIG_TTPCI_EEPROM) += ttpci-eeprom.o
+obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
+obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
diff --git a/drivers/media/common/saa7146/Kconfig b/drivers/media/common/saa7146/Kconfig
index 3e85c0c3fd9a..a0aa155e5d85 100644
--- a/drivers/media/common/saa7146/Kconfig
+++ b/drivers/media/common/saa7146/Kconfig
@@ -5,6 +5,6 @@ config VIDEO_SAA7146
config VIDEO_SAA7146_VV
tristate
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
select VIDEOBUF_DMA_SG
select VIDEO_SAA7146
diff --git a/drivers/media/common/videobuf2/Makefile b/drivers/media/common/videobuf2/Makefile
index 54306f8d096c..a6fe3f304685 100644
--- a/drivers/media/common/videobuf2/Makefile
+++ b/drivers/media/common/videobuf2/Makefile
@@ -6,10 +6,12 @@ ifeq ($(CONFIG_TRACEPOINTS),y)
videobuf2-common-objs += vb2-trace.o
endif
+# Please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-common.o
-obj-$(CONFIG_VIDEOBUF2_V4L2) += videobuf2-v4l2.o
-obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o
-obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o
obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o
obj-$(CONFIG_VIDEOBUF2_DMA_SG) += videobuf2-dma-sg.o
obj-$(CONFIG_VIDEOBUF2_DVB) += videobuf2-dvb.o
+obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o
+obj-$(CONFIG_VIDEOBUF2_V4L2) += videobuf2-v4l2.o
+obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index ecf065cd4a67..678b359717c4 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -132,12 +132,12 @@ static void vb2_dc_prepare(void *buf_priv)
if (!buf->non_coherent_mem)
return;
- /* For both USERPTR and non-coherent MMAP */
- dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
-
/* Non-coherent MMAP only */
if (buf->vaddr)
flush_kernel_vmap_range(buf->vaddr, buf->size);
+
+ /* For both USERPTR and non-coherent MMAP */
+ dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
}
static void vb2_dc_finish(void *buf_priv)
@@ -152,12 +152,12 @@ static void vb2_dc_finish(void *buf_priv)
if (!buf->non_coherent_mem)
return;
- /* For both USERPTR and non-coherent MMAP */
- dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
-
/* Non-coherent MMAP only */
if (buf->vaddr)
invalidate_kernel_vmap_range(buf->vaddr, buf->size);
+
+ /* For both USERPTR and non-coherent MMAP */
+ dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
}
/*********************************************/
diff --git a/drivers/media/dvb-core/Kconfig b/drivers/media/dvb-core/Kconfig
index 6ffac618417b..8b3f2d53cd62 100644
--- a/drivers/media/dvb-core/Kconfig
+++ b/drivers/media/dvb-core/Kconfig
@@ -6,7 +6,7 @@
config DVB_MMAP
bool "Enable DVB memory-mapped API (EXPERIMENTAL)"
depends on DVB_CORE
- depends on VIDEO_V4L2=y || VIDEO_V4L2=DVB_CORE
+ depends on VIDEO_DEV=y || VIDEO_DEV=DVB_CORE
select VIDEOBUF2_VMALLOC
help
This option enables DVB experimental memory-mapped API, which
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 2c1ed98d43c5..2ef2ff2a38ff 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -11,6 +11,23 @@ menu "Customise DVB Frontends"
comment "Multistandard (satellite) frontends"
depends on DVB_CORE
+config DVB_M88DS3103
+ tristate "Montage Technology M88DS3103"
+ depends on DVB_CORE && I2C && I2C_MUX
+ select REGMAP_I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y when you want to support this frontend.
+
+config DVB_MXL5XX
+ tristate "MaxLinear MxL5xx based tuner-demodulators"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ MaxLinear MxL5xx family of DVB-S/S2 tuners/demodulators.
+
+ Say Y when you want to support these frontends.
+
config DVB_STB0899
tristate "STB0899 based"
depends on DVB_CORE && I2C
@@ -60,23 +77,6 @@ config DVB_STV6111
Say Y when you want to support these frontends.
-config DVB_MXL5XX
- tristate "MaxLinear MxL5xx based tuner-demodulators"
- depends on DVB_CORE && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- MaxLinear MxL5xx family of DVB-S/S2 tuners/demodulators.
-
- Say Y when you want to support these frontends.
-
-config DVB_M88DS3103
- tristate "Montage Technology M88DS3103"
- depends on DVB_CORE && I2C && I2C_MUX
- select REGMAP_I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- Say Y when you want to support this frontend.
-
comment "Multistandard (cable + terrestrial) frontends"
depends on DVB_CORE
@@ -89,40 +89,40 @@ config DVB_DRXK
Say Y when you want to support this frontend.
-config DVB_TDA18271C2DD
- tristate "NXP TDA18271C2 silicon tuner"
+config DVB_MN88472
+ tristate "Panasonic MN88472"
depends on DVB_CORE && I2C
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- NXP TDA18271 silicon tuner.
-
- Say Y when you want to support this tuner.
+ Say Y when you want to support this frontend.
-config DVB_SI2165
- tristate "Silicon Labs si2165 based"
+config DVB_MN88473
+ tristate "Panasonic MN88473"
depends on DVB_CORE && I2C
select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A DVB-C/T demodulator.
-
Say Y when you want to support this frontend.
-config DVB_MN88472
- tristate "Panasonic MN88472"
+config DVB_SI2165
+ tristate "Silicon Labs si2165 based"
depends on DVB_CORE && I2C
select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
+ A DVB-C/T demodulator.
+
Say Y when you want to support this frontend.
-config DVB_MN88473
- tristate "Panasonic MN88473"
+config DVB_TDA18271C2DD
+ tristate "NXP TDA18271C2 silicon tuner"
depends on DVB_CORE && I2C
- select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Say Y when you want to support this frontend.
+ NXP TDA18271 silicon tuner.
+
+ Say Y when you want to support this tuner.
comment "DVB-S (satellite) frontends"
depends on DVB_CORE
@@ -134,6 +134,27 @@ config DVB_CX24110
help
A DVB-S tuner module. Say Y when you want to support this frontend.
+config DVB_CX24116
+ tristate "Conexant CX24116 based"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A DVB-S/S2 tuner module. Say Y when you want to support this frontend.
+
+config DVB_CX24117
+ tristate "Conexant CX24117 based"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A Dual DVB-S/S2 tuner module. Say Y when you want to support this frontend.
+
+config DVB_CX24120
+ tristate "Conexant CX24120 based"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A DVB-S/S2 tuner module. Say Y when you want to support this frontend.
+
config DVB_CX24123
tristate "Conexant CX24123 based"
depends on DVB_CORE && I2C
@@ -141,22 +162,23 @@ config DVB_CX24123
help
A DVB-S tuner module. Say Y when you want to support this frontend.
-config DVB_MT312
- tristate "Zarlink VP310/MT312/ZL10313 based"
+config DVB_DS3000
+ tristate "Montage Tehnology DS3000 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A DVB-S tuner module. Say Y when you want to support this frontend.
+ A DVB-S/S2 tuner module. Say Y when you want to support this frontend.
-config DVB_ZL10036
- tristate "Zarlink ZL10036 silicon tuner"
+config DVB_MB86A16
+ tristate "Fujitsu MB86A16 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A DVB-S tuner module. Say Y when you want to support this frontend.
+ A DVB-S/DSS Direct Conversion reveiver.
+ Say Y when you want to support this frontend.
-config DVB_ZL10039
- tristate "Zarlink ZL10039 silicon tuner"
+config DVB_MT312
+ tristate "Zarlink VP310/MT312/ZL10313 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
@@ -169,8 +191,8 @@ config DVB_S5H1420
help
A DVB-S tuner module. Say Y when you want to support this frontend.
-config DVB_STV0288
- tristate "ST STV0288 based"
+config DVB_SI21XX
+ tristate "Silicon Labs SI21XX based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
@@ -183,19 +205,19 @@ config DVB_STB6000
help
A DVB-S silicon tuner module. Say Y when you want to support this tuner.
-config DVB_STV0299
- tristate "ST STV0299 based"
+config DVB_STV0288
+ tristate "ST STV0288 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
A DVB-S tuner module. Say Y when you want to support this frontend.
-config DVB_STV6110
- tristate "ST STV6110 silicon tuner"
+config DVB_STV0299
+ tristate "ST STV0299 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A DVB-S silicon tuner module. Say Y when you want to support this tuner.
+ A DVB-S tuner module. Say Y when you want to support this frontend.
config DVB_STV0900
tristate "ST STV0900 based"
@@ -204,49 +226,42 @@ config DVB_STV0900
help
A DVB-S/S2 demodulator. Say Y when you want to support this frontend.
-config DVB_TDA8083
- tristate "Philips TDA8083 based"
- depends on DVB_CORE && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- A DVB-S tuner module. Say Y when you want to support this frontend.
-
-config DVB_TDA10086
- tristate "Philips TDA10086 based"
+config DVB_STV6110
+ tristate "ST STV6110 silicon tuner"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A DVB-S tuner module. Say Y when you want to support this frontend.
+ A DVB-S silicon tuner module. Say Y when you want to support this tuner.
-config DVB_TDA8261
- tristate "Philips TDA8261 based"
+config DVB_TDA10071
+ tristate "NXP TDA10071"
depends on DVB_CORE && I2C
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A DVB-S tuner module. Say Y when you want to support this frontend.
+ Say Y when you want to support this frontend.
-config DVB_VES1X93
- tristate "VLSI VES1893 or VES1993 based"
+config DVB_TDA10086
+ tristate "Philips TDA10086 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
A DVB-S tuner module. Say Y when you want to support this frontend.
-config DVB_TUNER_ITD1000
- tristate "Integrant ITD1000 Zero IF tuner for DVB-S/DSS"
+config DVB_TDA8083
+ tristate "Philips TDA8083 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
A DVB-S tuner module. Say Y when you want to support this frontend.
-config DVB_TUNER_CX24113
- tristate "Conexant CX24113/CX24128 tuner for DVB-S/DSS"
+config DVB_TDA8261
+ tristate "Philips TDA8261 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
A DVB-S tuner module. Say Y when you want to support this frontend.
-
config DVB_TDA826X
tristate "Philips TDA826X silicon tuner"
depends on DVB_CORE && I2C
@@ -254,86 +269,71 @@ config DVB_TDA826X
help
A DVB-S silicon tuner module. Say Y when you want to support this tuner.
-config DVB_TUA6100
- tristate "Infineon TUA6100 PLL"
- depends on DVB_CORE && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- A DVB-S PLL chip.
-
-config DVB_CX24116
- tristate "Conexant CX24116 based"
- depends on DVB_CORE && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- A DVB-S/S2 tuner module. Say Y when you want to support this frontend.
-
-config DVB_CX24117
- tristate "Conexant CX24117 based"
+config DVB_TS2020
+ tristate "Montage Tehnology TS2020 based tuners"
depends on DVB_CORE && I2C
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A Dual DVB-S/S2 tuner module. Say Y when you want to support this frontend.
+ A DVB-S/S2 silicon tuner. Say Y when you want to support this tuner.
-config DVB_CX24120
- tristate "Conexant CX24120 based"
+config DVB_TUA6100
+ tristate "Infineon TUA6100 PLL"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A DVB-S/S2 tuner module. Say Y when you want to support this frontend.
+ A DVB-S PLL chip.
-config DVB_SI21XX
- tristate "Silicon Labs SI21XX based"
+config DVB_TUNER_CX24113
+ tristate "Conexant CX24113/CX24128 tuner for DVB-S/DSS"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
A DVB-S tuner module. Say Y when you want to support this frontend.
-config DVB_TS2020
- tristate "Montage Tehnology TS2020 based tuners"
+config DVB_TUNER_ITD1000
+ tristate "Integrant ITD1000 Zero IF tuner for DVB-S/DSS"
depends on DVB_CORE && I2C
- select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A DVB-S/S2 silicon tuner. Say Y when you want to support this tuner.
+ A DVB-S tuner module. Say Y when you want to support this frontend.
-config DVB_DS3000
- tristate "Montage Tehnology DS3000 based"
+config DVB_VES1X93
+ tristate "VLSI VES1893 or VES1993 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A DVB-S/S2 tuner module. Say Y when you want to support this frontend.
+ A DVB-S tuner module. Say Y when you want to support this frontend.
-config DVB_MB86A16
- tristate "Fujitsu MB86A16 based"
+config DVB_ZL10036
+ tristate "Zarlink ZL10036 silicon tuner"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A DVB-S/DSS Direct Conversion reveiver.
- Say Y when you want to support this frontend.
+ A DVB-S tuner module. Say Y when you want to support this frontend.
-config DVB_TDA10071
- tristate "NXP TDA10071"
+config DVB_ZL10039
+ tristate "Zarlink ZL10039 silicon tuner"
depends on DVB_CORE && I2C
- select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Say Y when you want to support this frontend.
+ A DVB-S tuner module. Say Y when you want to support this frontend.
comment "DVB-T (terrestrial) frontends"
depends on DVB_CORE
-config DVB_SP887X
- tristate "Spase sp887x based"
- depends on DVB_CORE && I2C
+config DVB_AF9013
+ tristate "Afatech AF9013 demodulator"
+ depends on DVB_CORE && I2C && I2C_MUX
+ select REGMAP
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A DVB-T tuner module. Say Y when you want to support this frontend.
+ Say Y when you want to support this frontend.
- This driver needs external firmware. Please use the command
- "<kerneldir>/scripts/get_dvb_firmware sp887x" to
- download/extract it, and then copy it to /usr/lib/hotplug/firmware
- or /lib/firmware (depending on configuration of firmware hotplug).
+config DVB_AS102_FE
+ tristate
+ depends on DVB_CORE
+ default DVB_AS102
config DVB_CX22700
tristate "Conexant CX22700 based"
@@ -349,64 +349,20 @@ config DVB_CX22702
help
A DVB-T tuner module. Say Y when you want to support this frontend.
-config DVB_S5H1432
- tristate "Samsung s5h1432 demodulator (OFDM)"
- depends on DVB_CORE && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- A DVB-T tuner module. Say Y when you want to support this frontend.
-
-config DVB_DRXD
- tristate "Micronas DRXD driver"
- depends on DVB_CORE && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- A DVB-T tuner module. Say Y when you want to support this frontend.
-
- Note: this driver was based on vendor driver reference code (released
- under the GPL) as opposed to the existing drx397xd driver, which
- was written via reverse engineering.
-
-config DVB_L64781
- tristate "LSI L64781"
- depends on DVB_CORE && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- A DVB-T tuner module. Say Y when you want to support this frontend.
-
-config DVB_TDA1004X
- tristate "Philips TDA10045H/TDA10046H based"
- depends on DVB_CORE && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- A DVB-T tuner module. Say Y when you want to support this frontend.
-
- This driver needs external firmware. Please use the commands
- "<kerneldir>/scripts/get_dvb_firmware tda10045",
- "<kerneldir>/scripts/get_dvb_firmware tda10046" to
- download/extract them, and then copy them to /usr/lib/hotplug/firmware
- or /lib/firmware (depending on configuration of firmware hotplug).
-
-config DVB_NXT6000
- tristate "NxtWave Communications NXT6000 based"
- depends on DVB_CORE && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- A DVB-T tuner module. Say Y when you want to support this frontend.
-
-config DVB_MT352
- tristate "Zarlink MT352 based"
+config DVB_CXD2820R
+ tristate "Sony CXD2820R"
depends on DVB_CORE && I2C
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A DVB-T tuner module. Say Y when you want to support this frontend.
+ Say Y when you want to support this frontend.
-config DVB_ZL10353
- tristate "Zarlink ZL10353 based"
+config DVB_CXD2841ER
+ tristate "Sony CXD2841ER"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A DVB-T tuner module. Say Y when you want to support this frontend.
+ Say Y when you want to support this frontend.
config DVB_DIB3000MB
tristate "DiBcom 3000M-B"
@@ -448,20 +404,16 @@ config DVB_DIB9000
A DVB-T tuner module. Designed for mobile usage. Say Y when you want
to support this frontend.
-config DVB_TDA10048
- tristate "Philips TDA10048HN based"
+config DVB_DRXD
+ tristate "Micronas DRXD driver"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
A DVB-T tuner module. Say Y when you want to support this frontend.
-config DVB_AF9013
- tristate "Afatech AF9013 demodulator"
- depends on DVB_CORE && I2C && I2C_MUX
- select REGMAP
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- Say Y when you want to support this frontend.
+ Note: this driver was based on vendor driver reference code (released
+ under the GPL) as opposed to the existing drx397xd driver, which
+ was written via reverse engineering.
config DVB_EC100
tristate "E3C EC100"
@@ -470,27 +422,31 @@ config DVB_EC100
help
Say Y when you want to support this frontend.
-config DVB_STV0367
- tristate "ST STV0367 based"
+config DVB_GP8PSK_FE
+ tristate
+ depends on DVB_CORE
+ default DVB_USB_GP8PSK
+
+config DVB_L64781
+ tristate "LSI L64781"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A DVB-T/C tuner module. Say Y when you want to support this frontend.
+ A DVB-T tuner module. Say Y when you want to support this frontend.
-config DVB_CXD2820R
- tristate "Sony CXD2820R"
+config DVB_MT352
+ tristate "Zarlink MT352 based"
depends on DVB_CORE && I2C
- select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Say Y when you want to support this frontend.
+ A DVB-T tuner module. Say Y when you want to support this frontend.
-config DVB_CXD2841ER
- tristate "Sony CXD2841ER"
+config DVB_NXT6000
+ tristate "NxtWave Communications NXT6000 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Say Y when you want to support this frontend.
+ A DVB-T tuner module. Say Y when you want to support this frontend.
config DVB_RTL2830
tristate "Realtek RTL2830 DVB-T"
@@ -510,13 +466,20 @@ config DVB_RTL2832
config DVB_RTL2832_SDR
tristate "Realtek RTL2832 SDR"
- depends on DVB_CORE && I2C && I2C_MUX && VIDEO_V4L2 && MEDIA_SDR_SUPPORT && USB
+ depends on DVB_CORE && I2C && I2C_MUX && VIDEO_DEV && MEDIA_SDR_SUPPORT && USB
select DVB_RTL2832
select VIDEOBUF2_VMALLOC
default m if !MEDIA_SUBDRV_AUTOSELECT
help
Say Y when you want to support this SDR module.
+config DVB_S5H1432
+ tristate "Samsung s5h1432 demodulator (OFDM)"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A DVB-T tuner module. Say Y when you want to support this frontend.
+
config DVB_SI2168
tristate "Silicon Labs Si2168"
depends on DVB_CORE && I2C && I2C_MUX
@@ -524,10 +487,44 @@ config DVB_SI2168
help
Say Y when you want to support this frontend.
-config DVB_AS102_FE
- tristate
- depends on DVB_CORE
- default DVB_AS102
+config DVB_SP887X
+ tristate "Spase sp887x based"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A DVB-T tuner module. Say Y when you want to support this frontend.
+
+ This driver needs external firmware. Please use the command
+ "<kerneldir>/scripts/get_dvb_firmware sp887x" to
+ download/extract it, and then copy it to /usr/lib/hotplug/firmware
+ or /lib/firmware (depending on configuration of firmware hotplug).
+
+config DVB_STV0367
+ tristate "ST STV0367 based"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A DVB-T/C tuner module. Say Y when you want to support this frontend.
+
+config DVB_TDA10048
+ tristate "Philips TDA10048HN based"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A DVB-T tuner module. Say Y when you want to support this frontend.
+
+config DVB_TDA1004X
+ tristate "Philips TDA10045H/TDA10046H based"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A DVB-T tuner module. Say Y when you want to support this frontend.
+
+ This driver needs external firmware. Please use the commands
+ "<kerneldir>/scripts/get_dvb_firmware tda10045",
+ "<kerneldir>/scripts/get_dvb_firmware tda10046" to
+ download/extract them, and then copy them to /usr/lib/hotplug/firmware
+ or /lib/firmware (depending on configuration of firmware hotplug).
config DVB_ZD1301_DEMOD
tristate "ZyDAS ZD1301"
@@ -536,18 +533,20 @@ config DVB_ZD1301_DEMOD
help
Say Y when you want to support this frontend.
-config DVB_GP8PSK_FE
- tristate
- depends on DVB_CORE
- default DVB_USB_GP8PSK
+config DVB_ZL10353
+ tristate "Zarlink ZL10353 based"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A DVB-T tuner module. Say Y when you want to support this frontend.
source "drivers/media/dvb-frontends/cxd2880/Kconfig"
comment "DVB-C (cable) frontends"
depends on DVB_CORE
-config DVB_VES1820
- tristate "VLSI VES1820 based"
+config DVB_STV0297
+ tristate "ST STV0297 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
@@ -567,8 +566,8 @@ config DVB_TDA10023
help
A DVB-C tuner module. Say Y when you want to support this frontend.
-config DVB_STV0297
- tristate "ST STV0297 based"
+config DVB_VES1820
+ tristate "VLSI VES1820 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
@@ -577,46 +576,27 @@ config DVB_STV0297
comment "ATSC (North American/Korean Terrestrial/Cable DTV) frontends"
depends on DVB_CORE
-config DVB_NXT200X
- tristate "NxtWave Communications NXT2002/NXT2004 based"
+config DVB_AU8522
depends on DVB_CORE && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
- to support this frontend.
-
- This driver needs external firmware. Please use the commands
- "<kerneldir>/scripts/get_dvb_firmware nxt2002" and
- "<kerneldir>/scripts/get_dvb_firmware nxt2004" to
- download/extract them, and then copy them to /usr/lib/hotplug/firmware
- or /lib/firmware (depending on configuration of firmware hotplug).
+ tristate
-config DVB_OR51211
- tristate "Oren OR51211 based"
+config DVB_AU8522_DTV
+ tristate "Auvitek AU8522 based DTV demod"
depends on DVB_CORE && I2C
+ select DVB_AU8522
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- An ATSC 8VSB tuner module. Say Y when you want to support this frontend.
-
- This driver needs external firmware. Please use the command
- "<kerneldir>/scripts/get_dvb_firmware or51211" to
- download it, and then copy it to /usr/lib/hotplug/firmware
- or /lib/firmware (depending on configuration of firmware hotplug).
+ An ATSC 8VSB, QAM64/256 & NTSC demodulator module. Say Y when
+ you want to enable DTV demodulation support for this frontend.
-config DVB_OR51132
- tristate "Oren OR51132 based"
- depends on DVB_CORE && I2C
+config DVB_AU8522_V4L
+ tristate "Auvitek AU8522 based ATV demod"
+ depends on VIDEO_DEV && DVB_CORE && I2C
+ select DVB_AU8522
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
- to support this frontend.
-
- This driver needs external firmware. Please use the commands
- "<kerneldir>/scripts/get_dvb_firmware or51132_vsb" and/or
- "<kerneldir>/scripts/get_dvb_firmware or51132_qam" to
- download firmwares for 8VSB and QAM64/256, respectively. Copy them to
- /usr/lib/hotplug/firmware or /lib/firmware (depending on
- configuration of firmware hotplug).
+ An ATSC 8VSB, QAM64/256 & NTSC demodulator module. Say Y when
+ you want to enable ATV demodulation support for this frontend.
config DVB_BCM3510
tristate "Broadcom BCM3510"
@@ -626,12 +606,12 @@ config DVB_BCM3510
An ATSC 8VSB/16VSB and QAM64/256 tuner module. Say Y when you want to
support this frontend.
-config DVB_LGDT330X
- tristate "LG Electronics LGDT3302/LGDT3303 based"
+config DVB_LG2160
+ tristate "LG Electronics LG216x based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
+ An ATSC/MH demodulator module. Say Y when you want
to support this frontend.
config DVB_LGDT3305
@@ -650,72 +630,83 @@ config DVB_LGDT3306A
An ATSC 8VSB and QAM-B 64/256 demodulator module. Say Y when you want
to support this frontend.
-config DVB_LG2160
- tristate "LG Electronics LG216x based"
+config DVB_LGDT330X
+ tristate "LG Electronics LGDT3302/LGDT3303 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- An ATSC/MH demodulator module. Say Y when you want
+ An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
to support this frontend.
-config DVB_S5H1409
- tristate "Samsung S5H1409 based"
+config DVB_MXL692
+ tristate "MaxLinear MXL692 based"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ MaxLinear MxL692 is a combo tuner-demodulator that
+ supports ATSC 8VSB and QAM modes. Say Y when you want to
+ support this frontend.
+
+config DVB_NXT200X
+ tristate "NxtWave Communications NXT2002/NXT2004 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
to support this frontend.
-config DVB_AU8522
- depends on DVB_CORE && I2C
- tristate
+ This driver needs external firmware. Please use the commands
+ "<kerneldir>/scripts/get_dvb_firmware nxt2002" and
+ "<kerneldir>/scripts/get_dvb_firmware nxt2004" to
+ download/extract them, and then copy them to /usr/lib/hotplug/firmware
+ or /lib/firmware (depending on configuration of firmware hotplug).
-config DVB_AU8522_DTV
- tristate "Auvitek AU8522 based DTV demod"
+config DVB_OR51132
+ tristate "Oren OR51132 based"
depends on DVB_CORE && I2C
- select DVB_AU8522
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- An ATSC 8VSB, QAM64/256 & NTSC demodulator module. Say Y when
- you want to enable DTV demodulation support for this frontend.
+ An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
+ to support this frontend.
-config DVB_AU8522_V4L
- tristate "Auvitek AU8522 based ATV demod"
- depends on VIDEO_V4L2 && DVB_CORE && I2C
- select DVB_AU8522
+ This driver needs external firmware. Please use the commands
+ "<kerneldir>/scripts/get_dvb_firmware or51132_vsb" and/or
+ "<kerneldir>/scripts/get_dvb_firmware or51132_qam" to
+ download firmwares for 8VSB and QAM64/256, respectively. Copy them to
+ /usr/lib/hotplug/firmware or /lib/firmware (depending on
+ configuration of firmware hotplug).
+
+config DVB_OR51211
+ tristate "Oren OR51211 based"
+ depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- An ATSC 8VSB, QAM64/256 & NTSC demodulator module. Say Y when
- you want to enable ATV demodulation support for this frontend.
+ An ATSC 8VSB tuner module. Say Y when you want to support this frontend.
-config DVB_S5H1411
- tristate "Samsung S5H1411 based"
+ This driver needs external firmware. Please use the command
+ "<kerneldir>/scripts/get_dvb_firmware or51211" to
+ download it, and then copy it to /usr/lib/hotplug/firmware
+ or /lib/firmware (depending on configuration of firmware hotplug).
+
+config DVB_S5H1409
+ tristate "Samsung S5H1409 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
to support this frontend.
-config DVB_MXL692
- tristate "MaxLinear MXL692 based"
+config DVB_S5H1411
+ tristate "Samsung S5H1411 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- MaxLinear MxL692 is a combo tuner-demodulator that
- supports ATSC 8VSB and QAM modes. Say Y when you want to
- support this frontend.
+ An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
+ to support this frontend.
comment "ISDB-T (terrestrial) frontends"
depends on DVB_CORE
-config DVB_S921
- tristate "Sharp S921 frontend"
- depends on DVB_CORE && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- AN ISDB-T DQPSK, QPSK, 16QAM and 64QAM 1seg tuner module.
- Say Y when you want to support this frontend.
-
config DVB_DIB8000
tristate "DiBcom 8000MB/MC"
depends on DVB_CORE && I2C
@@ -732,17 +723,17 @@ config DVB_MB86A20S
A driver for Fujitsu mb86a20s ISDB-T/ISDB-Tsb demodulator.
Say Y when you want to support this frontend.
-comment "ISDB-S (satellite) & ISDB-T (terrestrial) frontends"
- depends on DVB_CORE
-
-config DVB_TC90522
- tristate "Toshiba TC90522"
+config DVB_S921
+ tristate "Sharp S921 frontend"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Toshiba TC90522 2xISDB-S 8PSK + 2xISDB-T OFDM demodulator.
+ AN ISDB-T DQPSK, QPSK, 16QAM and 64QAM 1seg tuner module.
Say Y when you want to support this frontend.
+comment "ISDB-S (satellite) & ISDB-T (terrestrial) frontends"
+ depends on DVB_CORE
+
config DVB_MN88443X
tristate "Socionext MN88443x"
depends on DVB_CORE && I2C
@@ -753,6 +744,14 @@ config DVB_MN88443X
ISDB-S + ISDB-T demodulator.
Say Y when you want to support this frontend.
+config DVB_TC90522
+ tristate "Toshiba TC90522"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Toshiba TC90522 2xISDB-S 8PSK + 2xISDB-T OFDM demodulator.
+ Say Y when you want to support this frontend.
+
comment "Digital terrestrial only tuners/PLL"
depends on DVB_CORE
@@ -785,42 +784,44 @@ config DVB_TUNER_DIB0090
comment "SEC control devices for DVB-S"
depends on DVB_CORE
-source "drivers/media/dvb-frontends/drx39xyj/Kconfig"
+config DVB_A8293
+ tristate "Allegro A8293"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
-config DVB_LNBH25
- tristate "LNBH25 SEC controller"
+config DVB_AF9033
+ tristate "Afatech AF9033 DVB-T demodulator"
+ depends on DVB_CORE && I2C
+ select REGMAP_I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+
+config DVB_ASCOT2E
+ tristate "Sony Ascot2E tuner"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- An SEC control chip.
- Say Y when you want to support this chip.
+ Say Y when you want to support this frontend.
-config DVB_LNBH29
- tristate "LNBH29 SEC controller"
+config DVB_ATBM8830
+ tristate "AltoBeam ATBM8830/8831 DMB-TH demodulator"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- LNB power supply and control voltage
- regulator chip with step-up converter
- and I2C interface for STMicroelectronics LNBH29.
- Say Y when you want to support this chip.
+ A DMB-TH tuner module. Say Y when you want to support this frontend.
-config DVB_LNBP21
- tristate "LNBP21/LNBH24 SEC controllers"
+config DVB_HELENE
+ tristate "Sony HELENE Sat/Ter tuner (CXD2858ER)"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- An SEC control chips.
+ Say Y when you want to support this frontend.
-config DVB_LNBP22
- tristate "LNBP22 SEC controllers"
+config DVB_HORUS3A
+ tristate "Sony Horus3A tuner"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- LNB power supply and control voltage
- regulator chip with step-up converter
- and I2C interface.
- Say Y when you want to support this chip.
+ Say Y when you want to support this frontend.
config DVB_ISL6405
tristate "ISL6405 SEC controller"
@@ -843,10 +844,12 @@ config DVB_ISL6423
help
A SEC controller chip from Intersil
-config DVB_A8293
- tristate "Allegro A8293"
+config DVB_IX2505V
+ tristate "Sharp IX2505V silicon tuner"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A DVB-S tuner module. Say Y when you want to support this frontend.
config DVB_LGS8GL5
tristate "Silicon Legend LGS-8GL5 demodulator (OFDM)"
@@ -863,30 +866,40 @@ config DVB_LGS8GXX
help
A DMB-TH tuner module. Say Y when you want to support this frontend.
-config DVB_ATBM8830
- tristate "AltoBeam ATBM8830/8831 DMB-TH demodulator"
+config DVB_LNBH25
+ tristate "LNBH25 SEC controller"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A DMB-TH tuner module. Say Y when you want to support this frontend.
+ An SEC control chip.
+ Say Y when you want to support this chip.
-config DVB_TDA665x
- tristate "TDA665x tuner"
+config DVB_LNBH29
+ tristate "LNBH29 SEC controller"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Support for tuner modules based on Philips TDA6650/TDA6651 chips.
+ LNB power supply and control voltage
+ regulator chip with step-up converter
+ and I2C interface for STMicroelectronics LNBH29.
Say Y when you want to support this chip.
- Currently supported tuners:
- * Panasonic ENV57H12D5 (ET-50DT)
+config DVB_LNBP21
+ tristate "LNBP21/LNBH24 SEC controllers"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ An SEC control chips.
-config DVB_IX2505V
- tristate "Sharp IX2505V silicon tuner"
+config DVB_LNBP22
+ tristate "LNBP22 SEC controllers"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A DVB-S tuner module. Say Y when you want to support this frontend.
+ LNB power supply and control voltage
+ regulator chip with step-up converter
+ and I2C interface.
+ Say Y when you want to support this chip.
config DVB_M88RS2000
tristate "M88RS2000 DVB-S demodulator and tuner"
@@ -896,32 +909,18 @@ config DVB_M88RS2000
A DVB-S tuner module.
Say Y when you want to support this frontend.
-config DVB_AF9033
- tristate "Afatech AF9033 DVB-T demodulator"
- depends on DVB_CORE && I2C
- select REGMAP_I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
-
-config DVB_HORUS3A
- tristate "Sony Horus3A tuner"
+config DVB_TDA665x
+ tristate "TDA665x tuner"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Say Y when you want to support this frontend.
+ Support for tuner modules based on Philips TDA6650/TDA6651 chips.
+ Say Y when you want to support this chip.
-config DVB_ASCOT2E
- tristate "Sony Ascot2E tuner"
- depends on DVB_CORE && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- Say Y when you want to support this frontend.
+ Currently supported tuners:
+ * Panasonic ENV57H12D5 (ET-50DT)
-config DVB_HELENE
- tristate "Sony HELENE Sat/Ter tuner (CXD2858ER)"
- depends on DVB_CORE && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- Say Y when you want to support this frontend.
+source "drivers/media/dvb-frontends/drx39xyj/Kconfig"
comment "Common Interface (EN50221) controller drivers"
depends on DVB_CORE
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index d32e4c0be576..a93146cb428c 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -10,126 +10,129 @@ ifdef CONFIG_DVB_RTL2832_SDR
ccflags-y += -I$(srctree)/drivers/media/usb/dvb-usb-v2
endif
-stb0899-objs := stb0899_drv.o stb0899_algo.o
-stv0900-objs := stv0900_core.o stv0900_sw.o
-drxd-objs := drxd_firm.o drxd_hard.o
cxd2820r-objs := cxd2820r_core.o cxd2820r_c.o cxd2820r_t.o cxd2820r_t2.o
+drxd-objs := drxd_firm.o drxd_hard.o
drxk-objs := drxk_hard.o
+stb0899-objs := stb0899_drv.o stb0899_algo.o
+stv0900-objs := stv0900_core.o stv0900_sw.o
-obj-$(CONFIG_DVB_PLL) += dvb-pll.o
-obj-$(CONFIG_DVB_STV0299) += stv0299.o
-obj-$(CONFIG_DVB_STB0899) += stb0899.o
-obj-$(CONFIG_DVB_STB6100) += stb6100.o
+# Please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
+
+obj-$(CONFIG_DVB_A8293) += a8293.o
+obj-$(CONFIG_DVB_AF9013) += af9013.o
+obj-$(CONFIG_DVB_AF9033) += af9033.o
+obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o
+obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o
+obj-$(CONFIG_DVB_ATBM8830) += atbm8830.o
+obj-$(CONFIG_DVB_AU8522) += au8522_common.o
+obj-$(CONFIG_DVB_AU8522_DTV) += au8522_dig.o
+obj-$(CONFIG_DVB_AU8522_V4L) += au8522_decoder.o
+obj-$(CONFIG_DVB_BCM3510) += bcm3510.o
obj-$(CONFIG_DVB_CX22700) += cx22700.o
-obj-$(CONFIG_DVB_S5H1432) += s5h1432.o
+obj-$(CONFIG_DVB_CX22702) += cx22702.o
obj-$(CONFIG_DVB_CX24110) += cx24110.o
-obj-$(CONFIG_DVB_TDA8083) += tda8083.o
-obj-$(CONFIG_DVB_L64781) += l64781.o
+obj-$(CONFIG_DVB_CX24116) += cx24116.o
+obj-$(CONFIG_DVB_CX24117) += cx24117.o
+obj-$(CONFIG_DVB_CX24120) += cx24120.o
+obj-$(CONFIG_DVB_CX24123) += cx24123.o
+obj-$(CONFIG_DVB_CXD2099) += cxd2099.o
+obj-$(CONFIG_DVB_CXD2820R) += cxd2820r.o
+obj-$(CONFIG_DVB_CXD2841ER) += cxd2841er.o
+obj-$(CONFIG_DVB_CXD2880) += cxd2880/
obj-$(CONFIG_DVB_DIB3000MB) += dib3000mb.o
obj-$(CONFIG_DVB_DIB3000MC) += dib3000mc.o dibx000_common.o
obj-$(CONFIG_DVB_DIB7000M) += dib7000m.o dibx000_common.o
obj-$(CONFIG_DVB_DIB7000P) += dib7000p.o dibx000_common.o
obj-$(CONFIG_DVB_DIB8000) += dib8000.o dibx000_common.o
obj-$(CONFIG_DVB_DIB9000) += dib9000.o dibx000_common.o
-obj-$(CONFIG_DVB_MT312) += mt312.o
-obj-$(CONFIG_DVB_VES1820) += ves1820.o
-obj-$(CONFIG_DVB_VES1X93) += ves1x93.o
-obj-$(CONFIG_DVB_TDA1004X) += tda1004x.o
-obj-$(CONFIG_DVB_SP887X) += sp887x.o
-obj-$(CONFIG_DVB_NXT6000) += nxt6000.o
-obj-$(CONFIG_DVB_MT352) += mt352.o
-obj-$(CONFIG_DVB_ZL10036) += zl10036.o
-obj-$(CONFIG_DVB_ZL10039) += zl10039.o
-obj-$(CONFIG_DVB_ZL10353) += zl10353.o
-obj-$(CONFIG_DVB_CX22702) += cx22702.o
+obj-$(CONFIG_DVB_DRX39XYJ) += drx39xyj/
obj-$(CONFIG_DVB_DRXD) += drxd.o
-obj-$(CONFIG_DVB_TDA10021) += tda10021.o
-obj-$(CONFIG_DVB_TDA10023) += tda10023.o
-obj-$(CONFIG_DVB_STV0297) += stv0297.o
-obj-$(CONFIG_DVB_NXT200X) += nxt200x.o
-obj-$(CONFIG_DVB_OR51211) += or51211.o
-obj-$(CONFIG_DVB_OR51132) += or51132.o
-obj-$(CONFIG_DVB_BCM3510) += bcm3510.o
-obj-$(CONFIG_DVB_S5H1420) += s5h1420.o
-obj-$(CONFIG_DVB_LGDT330X) += lgdt330x.o
+obj-$(CONFIG_DVB_DRXK) += drxk.o
+obj-$(CONFIG_DVB_DS3000) += ds3000.o
+obj-$(CONFIG_DVB_DUMMY_FE) += dvb_dummy_fe.o
+obj-$(CONFIG_DVB_EC100) += ec100.o
+obj-$(CONFIG_DVB_GP8PSK_FE) += gp8psk-fe.o
+obj-$(CONFIG_DVB_HELENE) += helene.o
+obj-$(CONFIG_DVB_HORUS3A) += horus3a.o
+obj-$(CONFIG_DVB_ISL6405) += isl6405.o
+obj-$(CONFIG_DVB_ISL6421) += isl6421.o
+obj-$(CONFIG_DVB_ISL6423) += isl6423.o
+obj-$(CONFIG_DVB_IX2505V) += ix2505v.o
+obj-$(CONFIG_DVB_L64781) += l64781.o
+obj-$(CONFIG_DVB_LG2160) += lg2160.o
obj-$(CONFIG_DVB_LGDT3305) += lgdt3305.o
obj-$(CONFIG_DVB_LGDT3306A) += lgdt3306a.o
-obj-$(CONFIG_DVB_MXL692) += mxl692.o
-obj-$(CONFIG_DVB_LG2160) += lg2160.o
-obj-$(CONFIG_DVB_CX24123) += cx24123.o
+obj-$(CONFIG_DVB_LGDT330X) += lgdt330x.o
+obj-$(CONFIG_DVB_LGS8GL5) += lgs8gl5.o
+obj-$(CONFIG_DVB_LGS8GXX) += lgs8gxx.o
obj-$(CONFIG_DVB_LNBH25) += lnbh25.o
obj-$(CONFIG_DVB_LNBH29) += lnbh29.o
obj-$(CONFIG_DVB_LNBP21) += lnbp21.o
obj-$(CONFIG_DVB_LNBP22) += lnbp22.o
-obj-$(CONFIG_DVB_ISL6405) += isl6405.o
-obj-$(CONFIG_DVB_ISL6421) += isl6421.o
-obj-$(CONFIG_DVB_TDA10086) += tda10086.o
-obj-$(CONFIG_DVB_TDA826X) += tda826x.o
-obj-$(CONFIG_DVB_TDA8261) += tda8261.o
-obj-$(CONFIG_DVB_TUNER_DIB0070) += dib0070.o
-obj-$(CONFIG_DVB_TUNER_DIB0090) += dib0090.o
-obj-$(CONFIG_DVB_TUA6100) += tua6100.o
+obj-$(CONFIG_DVB_M88DS3103) += m88ds3103.o
+obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o
+obj-$(CONFIG_DVB_MB86A16) += mb86a16.o
+obj-$(CONFIG_DVB_MB86A20S) += mb86a20s.o
+obj-$(CONFIG_DVB_MN88443X) += mn88443x.o
+obj-$(CONFIG_DVB_MN88472) += mn88472.o
+obj-$(CONFIG_DVB_MN88473) += mn88473.o
+obj-$(CONFIG_DVB_MT312) += mt312.o
+obj-$(CONFIG_DVB_MT352) += mt352.o
+obj-$(CONFIG_DVB_MXL5XX) += mxl5xx.o
+obj-$(CONFIG_DVB_MXL692) += mxl692.o
+obj-$(CONFIG_DVB_NXT200X) += nxt200x.o
+obj-$(CONFIG_DVB_NXT6000) += nxt6000.o
+obj-$(CONFIG_DVB_OR51132) += or51132.o
+obj-$(CONFIG_DVB_OR51211) += or51211.o
+obj-$(CONFIG_DVB_PLL) += dvb-pll.o
+obj-$(CONFIG_DVB_RTL2830) += rtl2830.o
+obj-$(CONFIG_DVB_RTL2832) += rtl2832.o
+obj-$(CONFIG_DVB_RTL2832_SDR) += rtl2832_sdr.o
obj-$(CONFIG_DVB_S5H1409) += s5h1409.o
-obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o
-obj-$(CONFIG_DVB_AU8522) += au8522_common.o
-obj-$(CONFIG_DVB_AU8522_DTV) += au8522_dig.o
-obj-$(CONFIG_DVB_AU8522_V4L) += au8522_decoder.o
-obj-$(CONFIG_DVB_TDA10048) += tda10048.o
-obj-$(CONFIG_DVB_TUNER_CX24113) += cx24113.o
obj-$(CONFIG_DVB_S5H1411) += s5h1411.o
-obj-$(CONFIG_DVB_LGS8GL5) += lgs8gl5.o
-obj-$(CONFIG_DVB_TDA665x) += tda665x.o
-obj-$(CONFIG_DVB_LGS8GXX) += lgs8gxx.o
-obj-$(CONFIG_DVB_ATBM8830) += atbm8830.o
-obj-$(CONFIG_DVB_DUMMY_FE) += dvb_dummy_fe.o
-obj-$(CONFIG_DVB_AF9013) += af9013.o
-obj-$(CONFIG_DVB_CX24116) += cx24116.o
-obj-$(CONFIG_DVB_CX24117) += cx24117.o
-obj-$(CONFIG_DVB_CX24120) += cx24120.o
-obj-$(CONFIG_DVB_SI21XX) += si21xx.o
+obj-$(CONFIG_DVB_S5H1420) += s5h1420.o
+obj-$(CONFIG_DVB_S5H1432) += s5h1432.o
+obj-$(CONFIG_DVB_S921) += s921.o
+obj-$(CONFIG_DVB_SI2165) += si2165.o
obj-$(CONFIG_DVB_SI2168) += si2168.o
-obj-$(CONFIG_DVB_STV0288) += stv0288.o
+obj-$(CONFIG_DVB_SI21XX) += si21xx.o
+obj-$(CONFIG_DVB_SP2) += sp2.o
+obj-$(CONFIG_DVB_SP887X) += sp887x.o
+obj-$(CONFIG_DVB_STB0899) += stb0899.o
obj-$(CONFIG_DVB_STB6000) += stb6000.o
-obj-$(CONFIG_DVB_S921) += s921.o
-obj-$(CONFIG_DVB_STV6110) += stv6110.o
+obj-$(CONFIG_DVB_STB6100) += stb6100.o
+obj-$(CONFIG_DVB_STV0288) += stv0288.o
+obj-$(CONFIG_DVB_STV0297) += stv0297.o
+obj-$(CONFIG_DVB_STV0299) += stv0299.o
+obj-$(CONFIG_DVB_STV0367) += stv0367.o
obj-$(CONFIG_DVB_STV0900) += stv0900.o
obj-$(CONFIG_DVB_STV090x) += stv090x.o
-obj-$(CONFIG_DVB_STV6110x) += stv6110x.o
-obj-$(CONFIG_DVB_M88DS3103) += m88ds3103.o
-obj-$(CONFIG_DVB_MN88472) += mn88472.o
-obj-$(CONFIG_DVB_MN88473) += mn88473.o
-obj-$(CONFIG_DVB_ISL6423) += isl6423.o
-obj-$(CONFIG_DVB_EC100) += ec100.o
-obj-$(CONFIG_DVB_DS3000) += ds3000.o
-obj-$(CONFIG_DVB_TS2020) += ts2020.o
-obj-$(CONFIG_DVB_MB86A16) += mb86a16.o
-obj-$(CONFIG_DVB_DRX39XYJ) += drx39xyj/
-obj-$(CONFIG_DVB_MB86A20S) += mb86a20s.o
-obj-$(CONFIG_DVB_IX2505V) += ix2505v.o
-obj-$(CONFIG_DVB_STV0367) += stv0367.o
-obj-$(CONFIG_DVB_CXD2820R) += cxd2820r.o
-obj-$(CONFIG_DVB_CXD2841ER) += cxd2841er.o
-obj-$(CONFIG_DVB_DRXK) += drxk.o
-obj-$(CONFIG_DVB_TDA18271C2DD) += tda18271c2dd.o
obj-$(CONFIG_DVB_STV0910) += stv0910.o
+obj-$(CONFIG_DVB_STV6110) += stv6110.o
+obj-$(CONFIG_DVB_STV6110x) += stv6110x.o
obj-$(CONFIG_DVB_STV6111) += stv6111.o
-obj-$(CONFIG_DVB_MXL5XX) += mxl5xx.o
-obj-$(CONFIG_DVB_SI2165) += si2165.o
-obj-$(CONFIG_DVB_A8293) += a8293.o
-obj-$(CONFIG_DVB_SP2) += sp2.o
-obj-$(CONFIG_DVB_TDA10071) += tda10071.o
-obj-$(CONFIG_DVB_RTL2830) += rtl2830.o
-obj-$(CONFIG_DVB_RTL2832) += rtl2832.o
-obj-$(CONFIG_DVB_RTL2832_SDR) += rtl2832_sdr.o
-obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o
-obj-$(CONFIG_DVB_AF9033) += af9033.o
-obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o
-obj-$(CONFIG_DVB_GP8PSK_FE) += gp8psk-fe.o
obj-$(CONFIG_DVB_TC90522) += tc90522.o
-obj-$(CONFIG_DVB_MN88443X) += mn88443x.o
-obj-$(CONFIG_DVB_HORUS3A) += horus3a.o
-obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o
-obj-$(CONFIG_DVB_HELENE) += helene.o
+obj-$(CONFIG_DVB_TDA10021) += tda10021.o
+obj-$(CONFIG_DVB_TDA10023) += tda10023.o
+obj-$(CONFIG_DVB_TDA10048) += tda10048.o
+obj-$(CONFIG_DVB_TDA1004X) += tda1004x.o
+obj-$(CONFIG_DVB_TDA10071) += tda10071.o
+obj-$(CONFIG_DVB_TDA10086) += tda10086.o
+obj-$(CONFIG_DVB_TDA18271C2DD) += tda18271c2dd.o
+obj-$(CONFIG_DVB_TDA665x) += tda665x.o
+obj-$(CONFIG_DVB_TDA8083) += tda8083.o
+obj-$(CONFIG_DVB_TDA8261) += tda8261.o
+obj-$(CONFIG_DVB_TDA826X) += tda826x.o
+obj-$(CONFIG_DVB_TS2020) += ts2020.o
+obj-$(CONFIG_DVB_TUA6100) += tua6100.o
+obj-$(CONFIG_DVB_TUNER_CX24113) += cx24113.o
+obj-$(CONFIG_DVB_TUNER_DIB0070) += dib0070.o
+obj-$(CONFIG_DVB_TUNER_DIB0090) += dib0090.o
+obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o
+obj-$(CONFIG_DVB_VES1820) += ves1820.o
+obj-$(CONFIG_DVB_VES1X93) += ves1x93.o
obj-$(CONFIG_DVB_ZD1301_DEMOD) += zd1301_demod.o
-obj-$(CONFIG_DVB_CXD2099) += cxd2099.o
-obj-$(CONFIG_DVB_CXD2880) += cxd2880/
+obj-$(CONFIG_DVB_ZL10036) += zl10036.o
+obj-$(CONFIG_DVB_ZL10039) += zl10039.o
+obj-$(CONFIG_DVB_ZL10353) += zl10353.o
diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
index 692600ce5f23..2e11a246aae0 100644
--- a/drivers/media/dvb-frontends/dib3000mc.c
+++ b/drivers/media/dvb-frontends/dib3000mc.c
@@ -859,7 +859,7 @@ int dib3000mc_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defa
int k;
u8 new_addr;
- static u8 DIB3000MC_I2C_ADDRESS[] = {20,22,24,26};
+ static const u8 DIB3000MC_I2C_ADDRESS[] = { 20, 22, 24, 26 };
dmcst = kzalloc(sizeof(struct dib3000mc_state), GFP_KERNEL);
if (dmcst == NULL)
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 55bee50aa871..a90d2f51868f 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -1188,8 +1188,8 @@ static int dib7000p_autosearch_is_irq(struct dvb_frontend *demod)
static void dib7000p_spur_protect(struct dib7000p_state *state, u32 rf_khz, u32 bw)
{
- static s16 notch[] = { 16143, 14402, 12238, 9713, 6902, 3888, 759, -2392 };
- static u8 sine[] = { 0, 2, 3, 5, 6, 8, 9, 11, 13, 14, 16, 17, 19, 20, 22,
+ static const s16 notch[] = { 16143, 14402, 12238, 9713, 6902, 3888, 759, -2392 };
+ static const u8 sine[] = { 0, 2, 3, 5, 6, 8, 9, 11, 13, 14, 16, 17, 19, 20, 22,
24, 25, 27, 28, 30, 31, 33, 34, 36, 38, 39, 41, 42, 44, 45, 47, 48, 50, 51,
53, 55, 56, 58, 59, 61, 62, 64, 65, 67, 68, 70, 71, 73, 74, 76, 77, 79, 80,
82, 83, 85, 86, 88, 89, 91, 92, 94, 95, 97, 98, 99, 101, 102, 104, 105,
diff --git a/drivers/media/dvb-frontends/si21xx.c b/drivers/media/dvb-frontends/si21xx.c
index 001b23588389..2d29d2c4d434 100644
--- a/drivers/media/dvb-frontends/si21xx.c
+++ b/drivers/media/dvb-frontends/si21xx.c
@@ -336,7 +336,7 @@ static int si21xx_wait_diseqc_idle(struct si21xx_state *state, int timeout)
dprintk("%s\n", __func__);
while ((si21_readreg(state, LNB_CTRL_REG_1) & 0x8) == 8) {
- if (jiffies - start > timeout) {
+ if (time_is_before_jiffies(start + timeout)) {
dprintk("%s: timeout!!\n", __func__);
return -ETIMEDOUT;
}
diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
index 421395ea3334..b5263a0ee5aa 100644
--- a/drivers/media/dvb-frontends/stv0299.c
+++ b/drivers/media/dvb-frontends/stv0299.c
@@ -161,8 +161,9 @@ static int stv0299_set_FEC(struct stv0299_state *state, enum fe_code_rate fec)
static enum fe_code_rate stv0299_get_fec(struct stv0299_state *state)
{
- static enum fe_code_rate fec_tab[] = { FEC_2_3, FEC_3_4, FEC_5_6,
- FEC_7_8, FEC_1_2 };
+ static const enum fe_code_rate fec_tab[] = {
+ FEC_2_3, FEC_3_4, FEC_5_6, FEC_7_8, FEC_1_2
+ };
u8 index;
dprintk ("%s\n", __func__);
@@ -183,7 +184,7 @@ static int stv0299_wait_diseqc_fifo (struct stv0299_state* state, int timeout)
dprintk ("%s\n", __func__);
while (stv0299_readreg(state, 0x0a) & 1) {
- if (jiffies - start > timeout) {
+ if (time_is_before_jiffies(start + timeout)) {
dprintk ("%s: timeout!!\n", __func__);
return -ETIMEDOUT;
}
@@ -200,7 +201,7 @@ static int stv0299_wait_diseqc_idle (struct stv0299_state* state, int timeout)
dprintk ("%s\n", __func__);
while ((stv0299_readreg(state, 0x0a) & 3) != 2 ) {
- if (jiffies - start > timeout) {
+ if (time_is_before_jiffies(start + timeout)) {
dprintk ("%s: timeout!!\n", __func__);
return -ETIMEDOUT;
}
diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c
index 5be11fd65e3b..e3e1c3db2c85 100644
--- a/drivers/media/dvb-frontends/tda8083.c
+++ b/drivers/media/dvb-frontends/tda8083.c
@@ -162,7 +162,7 @@ static void tda8083_wait_diseqc_fifo (struct tda8083_state* state, int timeout)
{
unsigned long start = jiffies;
- while (jiffies - start < timeout &&
+ while (time_is_after_jiffies(start + timeout) &&
!(tda8083_readreg(state, 0x02) & 0x80))
{
msleep(50);
diff --git a/drivers/media/firewire/Makefile b/drivers/media/firewire/Makefile
index 3670c85af6f5..d5551e6389bf 100644
--- a/drivers/media/firewire/Makefile
+++ b/drivers/media/firewire/Makefile
@@ -2,4 +2,4 @@
obj-$(CONFIG_DVB_FIREDTV) += firedtv.o
firedtv-y += firedtv-avc.o firedtv-ci.o firedtv-dvb.o firedtv-fe.o firedtv-fw.o
-firedtv-$(CONFIG_DVB_FIREDTV_INPUT) += firedtv-rc.o
+firedtv-$(CONFIG_DVB_FIREDTV_INPUT) += firedtv-rc.o
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 69c56e24a612..fae2baabb773 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -3,7 +3,7 @@
# Multimedia Video device configuration
#
-if VIDEO_V4L2
+if VIDEO_DEV
comment "IR I2C driver auto-selected by 'Autoselect ancillary drivers'"
depends on MEDIA_SUBDRV_AUTOSELECT && I2C && RC_CORE
@@ -22,705 +22,6 @@ config VIDEO_IR_I2C
In doubt, say Y.
#
-# V4L2 I2C drivers that aren't related with Camera support
-#
-
-comment "audio, video and radio I2C drivers auto-selected by 'Autoselect ancillary drivers'"
- depends on MEDIA_HIDE_ANCILLARY_SUBDRV
-#
-# Encoder / Decoder module configuration
-#
-
-menu "Audio decoders, processors and mixers"
- visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
-
-config VIDEO_TVAUDIO
- tristate "Simple audio decoder chips"
- depends on VIDEO_V4L2 && I2C
- help
- Support for several audio decoder chips found on some bt8xx boards:
- Philips: tda9840, tda9873h, tda9874h/a, tda9850, tda985x, tea6300,
- tea6320, tea6420, tda8425, ta8874z.
- Microchip: pic16c54 based design on ProVideo PV951 board.
-
- To compile this driver as a module, choose M here: the
- module will be called tvaudio.
-
-config VIDEO_TDA7432
- tristate "Philips TDA7432 audio processor"
- depends on VIDEO_V4L2 && I2C
- help
- Support for tda7432 audio decoder chip found on some bt8xx boards.
-
- To compile this driver as a module, choose M here: the
- module will be called tda7432.
-
-config VIDEO_TDA9840
- tristate "Philips TDA9840 audio processor"
- depends on I2C
- help
- Support for tda9840 audio decoder chip found on some Zoran boards.
-
- To compile this driver as a module, choose M here: the
- module will be called tda9840.
-
-config VIDEO_TDA1997X
- tristate "NXP TDA1997x HDMI receiver"
- depends on VIDEO_V4L2 && I2C
- depends on SND_SOC
- select HDMI
- select SND_PCM
- select V4L2_FWNODE
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- help
- V4L2 subdevice driver for the NXP TDA1997x HDMI receivers.
-
- To compile this driver as a module, choose M here: the
- module will be called tda1997x.
-
-config VIDEO_TEA6415C
- tristate "Philips TEA6415C audio processor"
- depends on I2C
- help
- Support for tea6415c audio decoder chip found on some bt8xx boards.
-
- To compile this driver as a module, choose M here: the
- module will be called tea6415c.
-
-config VIDEO_TEA6420
- tristate "Philips TEA6420 audio processor"
- depends on I2C
- help
- Support for tea6420 audio decoder chip found on some bt8xx boards.
-
- To compile this driver as a module, choose M here: the
- module will be called tea6420.
-
-config VIDEO_MSP3400
- tristate "Micronas MSP34xx audio decoders"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Micronas MSP34xx series of audio decoders.
-
- To compile this driver as a module, choose M here: the
- module will be called msp3400.
-
-config VIDEO_CS3308
- tristate "Cirrus Logic CS3308 audio ADC"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Cirrus Logic CS3308 High Performance 8-Channel
- Analog Volume Control
-
- To compile this driver as a module, choose M here: the
- module will be called cs3308.
-
-config VIDEO_CS5345
- tristate "Cirrus Logic CS5345 audio ADC"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Cirrus Logic CS5345 24-bit, 192 kHz
- stereo A/D converter.
-
- To compile this driver as a module, choose M here: the
- module will be called cs5345.
-
-config VIDEO_CS53L32A
- tristate "Cirrus Logic CS53L32A audio ADC"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Cirrus Logic CS53L32A low voltage
- stereo A/D converter.
-
- To compile this driver as a module, choose M here: the
- module will be called cs53l32a.
-
-config VIDEO_TLV320AIC23B
- tristate "Texas Instruments TLV320AIC23B audio codec"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Texas Instruments TLV320AIC23B audio codec.
-
- To compile this driver as a module, choose M here: the
- module will be called tlv320aic23b.
-
-config VIDEO_UDA1342
- tristate "Philips UDA1342 audio codec"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Philips UDA1342 audio codec.
-
- To compile this driver as a module, choose M here: the
- module will be called uda1342.
-
-config VIDEO_WM8775
- tristate "Wolfson Microelectronics WM8775 audio ADC with input mixer"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Wolfson Microelectronics WM8775 high
- performance stereo A/D Converter with a 4 channel input mixer.
-
- To compile this driver as a module, choose M here: the
- module will be called wm8775.
-
-config VIDEO_WM8739
- tristate "Wolfson Microelectronics WM8739 stereo audio ADC"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Wolfson Microelectronics WM8739
- stereo A/D Converter.
-
- To compile this driver as a module, choose M here: the
- module will be called wm8739.
-
-config VIDEO_VP27SMPX
- tristate "Panasonic VP27's internal MPX"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the internal MPX of the Panasonic VP27s tuner.
-
- To compile this driver as a module, choose M here: the
- module will be called vp27smpx.
-
-config VIDEO_SONY_BTF_MPX
- tristate "Sony BTF's internal MPX"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the internal MPX of the Sony BTF-PG472Z tuner.
-
- To compile this driver as a module, choose M here: the
- module will be called sony-btf-mpx.
-endmenu
-
-menu "RDS decoders"
- visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
-
-config VIDEO_SAA6588
- tristate "SAA6588 Radio Chip RDS decoder support"
- depends on VIDEO_V4L2 && I2C
-
- help
- Support for this Radio Data System (RDS) decoder. This allows
- seeing radio station identification transmitted using this
- standard.
-
- To compile this driver as a module, choose M here: the
- module will be called saa6588.
-endmenu
-
-menu "Video decoders"
- visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
-
-config VIDEO_ADV7180
- tristate "Analog Devices ADV7180 decoder"
- depends on GPIOLIB && VIDEO_V4L2 && I2C
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_ASYNC
- help
- Support for the Analog Devices ADV7180 video decoder.
-
- To compile this driver as a module, choose M here: the
- module will be called adv7180.
-
-config VIDEO_ADV7183
- tristate "Analog Devices ADV7183 decoder"
- depends on VIDEO_V4L2 && I2C
- help
- V4l2 subdevice driver for the Analog Devices
- ADV7183 video decoder.
-
- To compile this driver as a module, choose M here: the
- module will be called adv7183.
-
-config VIDEO_ADV748X
- tristate "Analog Devices ADV748x decoder"
- depends on VIDEO_V4L2 && I2C
- depends on OF
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select REGMAP_I2C
- select V4L2_FWNODE
- help
- V4L2 subdevice driver for the Analog Devices
- ADV7481 and ADV7482 HDMI/Analog video decoders.
-
- To compile this driver as a module, choose M here: the
- module will be called adv748x.
-
-config VIDEO_ADV7604
- tristate "Analog Devices ADV7604 decoder"
- depends on VIDEO_V4L2 && I2C
- depends on GPIOLIB || COMPILE_TEST
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select REGMAP_I2C
- select HDMI
- select V4L2_FWNODE
- help
- Support for the Analog Devices ADV7604 video decoder.
-
- This is a Analog Devices Component/Graphics Digitizer
- with 4:1 Multiplexed HDMI Receiver.
-
- To compile this driver as a module, choose M here: the
- module will be called adv7604.
-
-config VIDEO_ADV7604_CEC
- bool "Enable Analog Devices ADV7604 CEC support"
- depends on VIDEO_ADV7604
- select CEC_CORE
- help
- When selected the adv7604 will support the optional
- HDMI CEC feature.
-
-config VIDEO_ADV7842
- tristate "Analog Devices ADV7842 decoder"
- depends on VIDEO_V4L2 && I2C
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select HDMI
- help
- Support for the Analog Devices ADV7842 video decoder.
-
- This is a Analog Devices Component/Graphics/SD Digitizer
- with 2:1 Multiplexed HDMI Receiver.
-
- To compile this driver as a module, choose M here: the
- module will be called adv7842.
-
-config VIDEO_ADV7842_CEC
- bool "Enable Analog Devices ADV7842 CEC support"
- depends on VIDEO_ADV7842
- select CEC_CORE
- help
- When selected the adv7842 will support the optional
- HDMI CEC feature.
-
-config VIDEO_BT819
- tristate "BT819A VideoStream decoder"
- depends on VIDEO_V4L2 && I2C
- help
- Support for BT819A video decoder.
-
- To compile this driver as a module, choose M here: the
- module will be called bt819.
-
-config VIDEO_BT856
- tristate "BT856 VideoStream decoder"
- depends on VIDEO_V4L2 && I2C
- help
- Support for BT856 video decoder.
-
- To compile this driver as a module, choose M here: the
- module will be called bt856.
-
-config VIDEO_BT866
- tristate "BT866 VideoStream decoder"
- depends on VIDEO_V4L2 && I2C
- help
- Support for BT866 video decoder.
-
- To compile this driver as a module, choose M here: the
- module will be called bt866.
-
-config VIDEO_KS0127
- tristate "KS0127 video decoder"
- depends on VIDEO_V4L2 && I2C
- help
- Support for KS0127 video decoder.
-
- This chip is used on AverMedia AVS6EYES Zoran-based MJPEG
- cards.
-
- To compile this driver as a module, choose M here: the
- module will be called ks0127.
-
-config VIDEO_ML86V7667
- tristate "OKI ML86V7667 video decoder"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the OKI Semiconductor ML86V7667 video decoder.
-
- To compile this driver as a module, choose M here: the
- module will be called ml86v7667.
-
-config VIDEO_SAA7110
- tristate "Philips SAA7110 video decoder"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Philips SAA7110 video decoders.
-
- To compile this driver as a module, choose M here: the
- module will be called saa7110.
-
-config VIDEO_SAA711X
- tristate "Philips SAA7111/3/4/5 video decoders"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Philips SAA7111/3/4/5 video decoders.
-
- To compile this driver as a module, choose M here: the
- module will be called saa7115.
-
-config VIDEO_TC358743
- tristate "Toshiba TC358743 decoder"
- depends on VIDEO_V4L2 && I2C
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select HDMI
- select V4L2_FWNODE
- help
- Support for the Toshiba TC358743 HDMI to MIPI CSI-2 bridge.
-
- To compile this driver as a module, choose M here: the
- module will be called tc358743.
-
-config VIDEO_TC358743_CEC
- bool "Enable Toshiba TC358743 CEC support"
- depends on VIDEO_TC358743
- select CEC_CORE
- help
- When selected the tc358743 will support the optional
- HDMI CEC feature.
-
-config VIDEO_TVP514X
- tristate "Texas Instruments TVP514x video decoder"
- depends on VIDEO_V4L2 && I2C
- select V4L2_FWNODE
- help
- This is a Video4Linux2 sensor driver for the TI TVP5146/47
- decoder. It is currently working with the TI OMAP3 camera
- controller.
-
- To compile this driver as a module, choose M here: the
- module will be called tvp514x.
-
-config VIDEO_TVP5150
- tristate "Texas Instruments TVP5150 video decoder"
- depends on VIDEO_V4L2 && I2C
- select V4L2_FWNODE
- select REGMAP_I2C
- help
- Support for the Texas Instruments TVP5150 video decoder.
-
- To compile this driver as a module, choose M here: the
- module will be called tvp5150.
-
-config VIDEO_TVP7002
- tristate "Texas Instruments TVP7002 video decoder"
- depends on VIDEO_V4L2 && I2C
- select V4L2_FWNODE
- help
- Support for the Texas Instruments TVP7002 video decoder.
-
- To compile this driver as a module, choose M here: the
- module will be called tvp7002.
-
-config VIDEO_TW2804
- tristate "Techwell TW2804 multiple video decoder"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Techwell tw2804 multiple video decoder.
-
- To compile this driver as a module, choose M here: the
- module will be called tw2804.
-
-config VIDEO_TW9903
- tristate "Techwell TW9903 video decoder"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Techwell tw9903 multi-standard video decoder
- with high quality down scaler.
-
- To compile this driver as a module, choose M here: the
- module will be called tw9903.
-
-config VIDEO_TW9906
- tristate "Techwell TW9906 video decoder"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Techwell tw9906 enhanced multi-standard comb filter
- video decoder with YCbCr input support.
-
- To compile this driver as a module, choose M here: the
- module will be called tw9906.
-
-config VIDEO_TW9910
- tristate "Techwell TW9910 video decoder"
- depends on VIDEO_V4L2 && I2C
- select V4L2_ASYNC
- help
- Support for Techwell TW9910 NTSC/PAL/SECAM video decoder.
-
- To compile this driver as a module, choose M here: the
- module will be called tw9910.
-
-config VIDEO_VPX3220
- tristate "vpx3220a, vpx3216b & vpx3214c video decoders"
- depends on VIDEO_V4L2 && I2C
- help
- Support for VPX322x video decoders.
-
- To compile this driver as a module, choose M here: the
- module will be called vpx3220.
-
-config VIDEO_MAX9286
- tristate "Maxim MAX9286 GMSL deserializer support"
- depends on I2C && I2C_MUX
- depends on VIDEO_V4L2
- depends on OF_GPIO
- select V4L2_FWNODE
- select VIDEO_V4L2_SUBDEV_API
- select MEDIA_CONTROLLER
- help
- This driver supports the Maxim MAX9286 GMSL deserializer.
-
- To compile this driver as a module, choose M here: the
- module will be called max9286.
-
-comment "Video and audio decoders"
-
-config VIDEO_SAA717X
- tristate "Philips SAA7171/3/4 audio/video decoders"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Philips SAA7171/3/4 audio/video decoders.
-
- To compile this driver as a module, choose M here: the
- module will be called saa717x.
-
-source "drivers/media/i2c/cx25840/Kconfig"
-
-endmenu
-
-menu "Video encoders"
- visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
-
-config VIDEO_SAA7127
- tristate "Philips SAA7127/9 digital video encoders"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Philips SAA7127/9 digital video encoders.
-
- To compile this driver as a module, choose M here: the
- module will be called saa7127.
-
-config VIDEO_SAA7185
- tristate "Philips SAA7185 video encoder"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Philips SAA7185 video encoder.
-
- To compile this driver as a module, choose M here: the
- module will be called saa7185.
-
-config VIDEO_ADV7170
- tristate "Analog Devices ADV7170 video encoder"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Analog Devices ADV7170 video encoder driver
-
- To compile this driver as a module, choose M here: the
- module will be called adv7170.
-
-config VIDEO_ADV7175
- tristate "Analog Devices ADV7175 video encoder"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Analog Devices ADV7175 video encoder driver
-
- To compile this driver as a module, choose M here: the
- module will be called adv7175.
-
-config VIDEO_ADV7343
- tristate "ADV7343 video encoder"
- depends on I2C
- select V4L2_ASYNC
- help
- Support for Analog Devices I2C bus based ADV7343 encoder.
-
- To compile this driver as a module, choose M here: the
- module will be called adv7343.
-
-config VIDEO_ADV7393
- tristate "ADV7393 video encoder"
- depends on I2C
- help
- Support for Analog Devices I2C bus based ADV7393 encoder.
-
- To compile this driver as a module, choose M here: the
- module will be called adv7393.
-
-config VIDEO_ADV7511
- tristate "Analog Devices ADV7511 encoder"
- depends on VIDEO_V4L2 && I2C
- depends on DRM_I2C_ADV7511=n || COMPILE_TEST
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select HDMI
- help
- Support for the Analog Devices ADV7511 video encoder.
-
- This is a Analog Devices HDMI transmitter.
-
- To compile this driver as a module, choose M here: the
- module will be called adv7511.
-
-config VIDEO_ADV7511_CEC
- bool "Enable Analog Devices ADV7511 CEC support"
- depends on VIDEO_ADV7511
- select CEC_CORE
- help
- When selected the adv7511 will support the optional
- HDMI CEC feature.
-
-config VIDEO_AD9389B
- tristate "Analog Devices AD9389B encoder"
- depends on VIDEO_V4L2 && I2C
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
-
- help
- Support for the Analog Devices AD9389B video encoder.
-
- This is a Analog Devices HDMI transmitter.
-
- To compile this driver as a module, choose M here: the
- module will be called ad9389b.
-
-config VIDEO_AK881X
- tristate "AK8813/AK8814 video encoders"
- depends on I2C
- help
- Video output driver for AKM AK8813 and AK8814 TV encoders
-
-config VIDEO_THS8200
- tristate "Texas Instruments THS8200 video encoder"
- depends on VIDEO_V4L2 && I2C
- select V4L2_ASYNC
- help
- Support for the Texas Instruments THS8200 video encoder.
-
- To compile this driver as a module, choose M here: the
- module will be called ths8200.
-endmenu
-
-menu "Video improvement chips"
- visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
-
-config VIDEO_UPD64031A
- tristate "NEC Electronics uPD64031A Ghost Reduction"
- depends on VIDEO_V4L2 && I2C
- select V4L2_ASYNC
- help
- Support for the NEC Electronics uPD64031A Ghost Reduction
- video chip. It is most often found in NTSC TV cards made for
- Japan and is used to reduce the 'ghosting' effect that can
- be present in analog TV broadcasts.
-
- To compile this driver as a module, choose M here: the
- module will be called upd64031a.
-
-config VIDEO_UPD64083
- tristate "NEC Electronics uPD64083 3-Dimensional Y/C separation"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the NEC Electronics uPD64083 3-Dimensional Y/C
- separation video chip. It is used to improve the quality of
- the colors of a composite signal.
-
- To compile this driver as a module, choose M here: the
- module will be called upd64083.
-endmenu
-
-menu "Audio/Video compression chips"
- visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
-
-config VIDEO_SAA6752HS
- tristate "Philips SAA6752HS MPEG-2 Audio/Video Encoder"
- depends on VIDEO_V4L2 && I2C
- select CRC32
- help
- Support for the Philips SAA6752HS MPEG-2 video and MPEG-audio/AC-3
- audio encoder with multiplexer.
-
- To compile this driver as a module, choose M here: the
- module will be called saa6752hs.
-
-endmenu
-
-menu "SDR tuner chips"
- visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
-
-config SDR_MAX2175
- tristate "Maxim 2175 RF to Bits tuner"
- depends on VIDEO_V4L2 && MEDIA_SDR_SUPPORT && I2C
- select REGMAP_I2C
- select V4L2_ASYNC
- help
- Support for Maxim 2175 tuner. It is an advanced analog/digital
- radio receiver with RF-to-Bits front-end designed for SDR solutions.
-
- To compile this driver as a module, choose M here; the
- module will be called max2175.
-
-
-endmenu
-
-menu "Miscellaneous helper chips"
- visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
-
-config VIDEO_THS7303
- tristate "THS7303/53 Video Amplifier"
- depends on VIDEO_V4L2 && I2C
- select V4L2_ASYNC
- help
- Support for TI THS7303/53 video amplifier
-
- To compile this driver as a module, choose M here: the
- module will be called ths7303.
-
-config VIDEO_M52790
- tristate "Mitsubishi M52790 A/V switch"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Mitsubishi M52790 A/V switch.
-
- To compile this driver as a module, choose M here: the
- module will be called m52790.
-
-config VIDEO_I2C
- tristate "I2C transport video support"
- depends on VIDEO_V4L2 && I2C
- select VIDEOBUF2_VMALLOC
- imply HWMON
- help
- Enable the I2C transport video support which supports the
- following:
- * Panasonic AMG88xx Grid-Eye Sensors
- * Melexis MLX90640 Thermal Cameras
-
- To compile this driver as a module, choose M here: the
- module will be called video-i2c
-
-config VIDEO_ST_MIPID02
- tristate "STMicroelectronics MIPID02 CSI-2 to PARALLEL bridge"
- depends on I2C && VIDEO_V4L2
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
- help
- Support for STMicroelectronics MIPID02 CSI-2 to PARALLEL bridge.
- It is used to allow usage of CSI-2 sensor with PARALLEL port
- controller.
-
- To compile this driver as a module, choose M here: the
- module will be called st-mipid02.
-endmenu
-
-#
# V4L2 I2C drivers that are related with Camera support
#
@@ -735,7 +36,7 @@ config VIDEO_CCS_PLL
config VIDEO_HI556
tristate "Hynix Hi-556 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -748,7 +49,7 @@ config VIDEO_HI556
config VIDEO_HI846
tristate "Hynix Hi-846 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -759,9 +60,22 @@ config VIDEO_HI846
To compile this driver as a module, choose M here: the
module will be called hi846.
+config VIDEO_HI847
+ tristate "Hynix Hi-847 sensor support"
+ depends on I2C && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Hynix
+ Hi-847 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hi847.
+
config VIDEO_IMX208
tristate "Sony IMX208 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_DEV && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Sony
@@ -772,7 +86,7 @@ config VIDEO_IMX208
config VIDEO_IMX214
tristate "Sony IMX214 sensor support"
- depends on GPIOLIB && I2C && VIDEO_V4L2
+ depends on GPIOLIB && I2C && VIDEO_DEV
select V4L2_FWNODE
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
@@ -786,7 +100,7 @@ config VIDEO_IMX214
config VIDEO_IMX219
tristate "Sony IMX219 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -799,7 +113,7 @@ config VIDEO_IMX219
config VIDEO_IMX258
tristate "Sony IMX258 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
help
@@ -811,7 +125,7 @@ config VIDEO_IMX258
config VIDEO_IMX274
tristate "Sony IMX274 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select REGMAP_I2C
@@ -821,7 +135,7 @@ config VIDEO_IMX274
config VIDEO_IMX290
tristate "Sony IMX290 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select REGMAP_I2C
@@ -835,7 +149,7 @@ config VIDEO_IMX290
config VIDEO_IMX319
tristate "Sony IMX319 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
help
@@ -848,7 +162,7 @@ config VIDEO_IMX319
config VIDEO_IMX334
tristate "Sony IMX334 sensor support"
depends on OF_GPIO
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select VIDEO_V4L2_SUBDEV_API
select MEDIA_CONTROLLER
select V4L2_FWNODE
@@ -862,7 +176,7 @@ config VIDEO_IMX334
config VIDEO_IMX335
tristate "Sony IMX335 sensor support"
depends on OF_GPIO
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select VIDEO_V4L2_SUBDEV_API
select MEDIA_CONTROLLER
select V4L2_FWNODE
@@ -875,7 +189,7 @@ config VIDEO_IMX335
config VIDEO_IMX355
tristate "Sony IMX355 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
help
@@ -888,7 +202,7 @@ config VIDEO_IMX355
config VIDEO_IMX412
tristate "Sony IMX412 sensor support"
depends on OF_GPIO
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select VIDEO_V4L2_SUBDEV_API
select MEDIA_CONTROLLER
select V4L2_FWNODE
@@ -899,9 +213,119 @@ config VIDEO_IMX412
To compile this driver as a module, choose M here: the
module will be called imx412.
+config VIDEO_MAX9271_LIB
+ tristate
+
+config VIDEO_MT9M001
+ tristate "mt9m001 support"
+ depends on I2C && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ This driver supports MT9M001 cameras from Micron, monochrome
+ and colour models.
+
+config VIDEO_MT9M032
+ tristate "MT9M032 camera sensor support"
+ depends on I2C && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEO_APTINA_PLL
+ help
+ This driver supports MT9M032 camera sensors from Aptina, monochrome
+ models only.
+
+config VIDEO_MT9M111
+ tristate "mt9m111, mt9m112 and mt9m131 support"
+ depends on I2C && VIDEO_DEV
+ select V4L2_FWNODE
+ help
+ This driver supports MT9M111, MT9M112 and MT9M131 cameras from
+ Micron/Aptina
+
+config VIDEO_MT9P031
+ tristate "Aptina MT9P031 support"
+ depends on I2C && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEO_APTINA_PLL
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Aptina
+ (Micron) mt9p031 5 Mpixel camera.
+
+config VIDEO_MT9T001
+ tristate "Aptina MT9T001 support"
+ depends on I2C && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ This is a Video4Linux2 sensor driver for the Aptina
+ (Micron) mt0t001 3 Mpixel camera.
+
+config VIDEO_MT9T112
+ tristate "Aptina MT9T111/MT9T112 support"
+ depends on I2C && VIDEO_DEV
+ help
+ This is a Video4Linux2 sensor driver for the Aptina
+ (Micron) MT9T111 and MT9T112 3 Mpixel camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mt9t112.
+
+config VIDEO_MT9V011
+ tristate "Micron mt9v011 sensor support"
+ depends on I2C && VIDEO_DEV
+ help
+ This is a Video4Linux2 sensor driver for the Micron
+ mt0v011 1.3 Mpixel camera. It currently only works with the
+ em28xx driver.
+
+config VIDEO_MT9V032
+ tristate "Micron MT9V032 sensor support"
+ depends on I2C && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select REGMAP_I2C
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Micron
+ MT9V032 752x480 CMOS sensor.
+
+config VIDEO_MT9V111
+ tristate "Aptina MT9V111 sensor support"
+ depends on I2C && VIDEO_DEV
+ help
+ This is a Video4Linux2 sensor driver for the Aptina/Micron
+ MT9V111 sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mt9v111.
+
+config VIDEO_NOON010PC30
+ tristate "Siliconfile NOON010PC30 sensor support"
+ depends on I2C && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ This driver supports NOON010PC30 CIF camera from Siliconfile
+
+config VIDEO_OG01A1B
+ tristate "OmniVision OG01A1B sensor support"
+ depends on I2C && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OG01A1B camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called og01a1b.
+
config VIDEO_OV02A10
tristate "OmniVision OV02A10 sensor support"
- depends on VIDEO_V4L2 && I2C
+ depends on VIDEO_DEV && I2C
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -912,9 +336,42 @@ config VIDEO_OV02A10
To compile this driver as a module, choose M here: the
module will be called ov02a10.
+config VIDEO_OV08D10
+ tristate "OmniVision OV08D10 sensor support"
+ depends on I2C && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV08D10 camera sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov08d10.
+
+config VIDEO_OV13858
+ tristate "OmniVision OV13858 sensor support"
+ depends on I2C && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV13858 camera.
+
+config VIDEO_OV13B10
+ tristate "OmniVision OV13B10 sensor support"
+ depends on I2C && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV13B10 camera.
+
config VIDEO_OV2640
tristate "OmniVision OV2640 sensor support"
- depends on VIDEO_V4L2 && I2C
+ depends on VIDEO_DEV && I2C
help
This is a Video4Linux2 sensor driver for the OmniVision
OV2640 camera.
@@ -924,7 +381,7 @@ config VIDEO_OV2640
config VIDEO_OV2659
tristate "OmniVision OV2659 sensor support"
- depends on VIDEO_V4L2 && I2C && GPIOLIB
+ depends on VIDEO_DEV && I2C && GPIOLIB
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -935,7 +392,7 @@ config VIDEO_OV2659
config VIDEO_OV2680
tristate "OmniVision OV2680 sensor support"
- depends on VIDEO_V4L2 && I2C
+ depends on VIDEO_DEV && I2C
select MEDIA_CONTROLLER
select V4L2_FWNODE
help
@@ -947,7 +404,7 @@ config VIDEO_OV2680
config VIDEO_OV2685
tristate "OmniVision OV2685 sensor support"
- depends on VIDEO_V4L2 && I2C
+ depends on VIDEO_DEV && I2C
select MEDIA_CONTROLLER
select V4L2_FWNODE
help
@@ -959,7 +416,7 @@ config VIDEO_OV2685
config VIDEO_OV2740
tristate "OmniVision OV2740 sensor support"
- depends on VIDEO_V4L2 && I2C
+ depends on VIDEO_DEV && I2C
depends on ACPI || COMPILE_TEST
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
@@ -975,7 +432,7 @@ config VIDEO_OV2740
config VIDEO_OV5640
tristate "OmniVision OV5640 sensor support"
depends on OF
- depends on GPIOLIB && VIDEO_V4L2 && I2C
+ depends on GPIOLIB && VIDEO_DEV && I2C
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -986,7 +443,7 @@ config VIDEO_OV5640
config VIDEO_OV5645
tristate "OmniVision OV5645 sensor support"
depends on OF
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -999,7 +456,7 @@ config VIDEO_OV5645
config VIDEO_OV5647
tristate "OmniVision OV5647 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -1012,7 +469,7 @@ config VIDEO_OV5647
config VIDEO_OV5648
tristate "OmniVision OV5648 sensor support"
- depends on I2C && PM && VIDEO_V4L2
+ depends on I2C && PM && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -1023,19 +480,9 @@ config VIDEO_OV5648
To compile this driver as a module, choose M here: the
module will be called ov5648.
-config VIDEO_OV6650
- tristate "OmniVision OV6650 sensor support"
- depends on I2C && VIDEO_V4L2
- help
- This is a Video4Linux2 sensor driver for the OmniVision
- OV6650 camera.
-
- To compile this driver as a module, choose M here: the
- module will be called ov6650.
-
config VIDEO_OV5670
tristate "OmniVision OV5670 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -1048,7 +495,7 @@ config VIDEO_OV5670
config VIDEO_OV5675
tristate "OmniVision OV5675 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -1061,7 +508,7 @@ config VIDEO_OV5675
config VIDEO_OV5693
tristate "OmniVision OV5693 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -1072,7 +519,7 @@ config VIDEO_OV5693
config VIDEO_OV5695
tristate "OmniVision OV5695 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -1081,9 +528,19 @@ config VIDEO_OV5695
To compile this driver as a module, choose M here: the
module will be called ov5695.
+config VIDEO_OV6650
+ tristate "OmniVision OV6650 sensor support"
+ depends on I2C && VIDEO_DEV
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV6650 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov6650.
+
config VIDEO_OV7251
tristate "OmniVision OV7251 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -1094,21 +551,9 @@ config VIDEO_OV7251
To compile this driver as a module, choose M here: the
module will be called ov7251.
-config VIDEO_OV772X
- tristate "OmniVision OV772x sensor support"
- depends on I2C && VIDEO_V4L2
- select REGMAP_SCCB
- select V4L2_FWNODE
- help
- This is a Video4Linux2 sensor driver for the OmniVision
- OV772x camera.
-
- To compile this driver as a module, choose M here: the
- module will be called ov772x.
-
config VIDEO_OV7640
tristate "OmniVision OV7640 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
help
This is a Video4Linux2 sensor driver for the OmniVision
OV7640 camera.
@@ -1118,16 +563,28 @@ config VIDEO_OV7640
config VIDEO_OV7670
tristate "OmniVision OV7670 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV7670 VGA camera. It currently only works with the M88ALP01
controller.
+config VIDEO_OV772X
+ tristate "OmniVision OV772x sensor support"
+ depends on I2C && VIDEO_DEV
+ select REGMAP_SCCB
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV772x camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov772x.
+
config VIDEO_OV7740
tristate "OmniVision OV7740 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select REGMAP_SCCB
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -1135,7 +592,7 @@ config VIDEO_OV7740
config VIDEO_OV8856
tristate "OmniVision OV8856 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -1148,7 +605,7 @@ config VIDEO_OV8856
config VIDEO_OV8865
tristate "OmniVision OV8865 sensor support"
- depends on I2C && PM && VIDEO_V4L2
+ depends on I2C && PM && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -1162,7 +619,7 @@ config VIDEO_OV8865
config VIDEO_OV9282
tristate "OmniVision OV9282 sensor support"
depends on OF_GPIO
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select VIDEO_V4L2_SUBDEV_API
select MEDIA_CONTROLLER
select V4L2_FWNODE
@@ -1175,14 +632,14 @@ config VIDEO_OV9282
config VIDEO_OV9640
tristate "OmniVision OV9640 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
help
This is a Video4Linux2 sensor driver for the OmniVision
OV9640 camera sensor.
config VIDEO_OV9650
tristate "OmniVision OV9650/OV9652 sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select REGMAP_SCCB
@@ -1192,7 +649,7 @@ config VIDEO_OV9650
config VIDEO_OV9734
tristate "OmniVision OV9734 sensor support"
- depends on VIDEO_V4L2 && I2C
+ depends on VIDEO_DEV && I2C
depends on ACPI || COMPILE_TEST
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
@@ -1204,141 +661,6 @@ config VIDEO_OV9734
To compile this driver as a module, choose M here: the
module's name is ov9734.
-config VIDEO_OV13858
- tristate "OmniVision OV13858 sensor support"
- depends on I2C && VIDEO_V4L2
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
- help
- This is a Video4Linux2 sensor driver for the OmniVision
- OV13858 camera.
-
-config VIDEO_OV13B10
- tristate "OmniVision OV13B10 sensor support"
- depends on I2C && VIDEO_V4L2
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
- help
- This is a Video4Linux2 sensor driver for the OmniVision
- OV13B10 camera.
-
-config VIDEO_VS6624
- tristate "ST VS6624 sensor support"
- depends on VIDEO_V4L2 && I2C
- help
- This is a Video4Linux2 sensor driver for the ST VS6624
- camera.
-
- To compile this driver as a module, choose M here: the
- module will be called vs6624.
-
-config VIDEO_MT9M001
- tristate "mt9m001 support"
- depends on I2C && VIDEO_V4L2
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- help
- This driver supports MT9M001 cameras from Micron, monochrome
- and colour models.
-
-config VIDEO_MT9M032
- tristate "MT9M032 camera sensor support"
- depends on I2C && VIDEO_V4L2
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select VIDEO_APTINA_PLL
- help
- This driver supports MT9M032 camera sensors from Aptina, monochrome
- models only.
-
-config VIDEO_MT9M111
- tristate "mt9m111, mt9m112 and mt9m131 support"
- depends on I2C && VIDEO_V4L2
- select V4L2_FWNODE
- help
- This driver supports MT9M111, MT9M112 and MT9M131 cameras from
- Micron/Aptina
-
-config VIDEO_MT9P031
- tristate "Aptina MT9P031 support"
- depends on I2C && VIDEO_V4L2
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select VIDEO_APTINA_PLL
- select V4L2_FWNODE
- help
- This is a Video4Linux2 sensor driver for the Aptina
- (Micron) mt9p031 5 Mpixel camera.
-
-config VIDEO_MT9T001
- tristate "Aptina MT9T001 support"
- depends on I2C && VIDEO_V4L2
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- help
- This is a Video4Linux2 sensor driver for the Aptina
- (Micron) mt0t001 3 Mpixel camera.
-
-config VIDEO_MT9T112
- tristate "Aptina MT9T111/MT9T112 support"
- depends on I2C && VIDEO_V4L2
- help
- This is a Video4Linux2 sensor driver for the Aptina
- (Micron) MT9T111 and MT9T112 3 Mpixel camera.
-
- To compile this driver as a module, choose M here: the
- module will be called mt9t112.
-
-config VIDEO_MT9V011
- tristate "Micron mt9v011 sensor support"
- depends on I2C && VIDEO_V4L2
- help
- This is a Video4Linux2 sensor driver for the Micron
- mt0v011 1.3 Mpixel camera. It currently only works with the
- em28xx driver.
-
-config VIDEO_MT9V032
- tristate "Micron MT9V032 sensor support"
- depends on I2C && VIDEO_V4L2
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select REGMAP_I2C
- select V4L2_FWNODE
- help
- This is a Video4Linux2 sensor driver for the Micron
- MT9V032 752x480 CMOS sensor.
-
-config VIDEO_MT9V111
- tristate "Aptina MT9V111 sensor support"
- depends on I2C && VIDEO_V4L2
- help
- This is a Video4Linux2 sensor driver for the Aptina/Micron
- MT9V111 sensor.
-
- To compile this driver as a module, choose M here: the
- module will be called mt9v111.
-
-config VIDEO_SR030PC30
- tristate "Siliconfile SR030PC30 sensor support"
- depends on I2C && VIDEO_V4L2
- help
- This driver supports SR030PC30 VGA camera from Siliconfile
-
-config VIDEO_NOON010PC30
- tristate "Siliconfile NOON010PC30 sensor support"
- depends on I2C && VIDEO_V4L2
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- help
- This driver supports NOON010PC30 CIF camera from Siliconfile
-
-source "drivers/media/i2c/m5mols/Kconfig"
-
-config VIDEO_MAX9271_LIB
- tristate
-
config VIDEO_RDACM20
tristate "IMI RDACM20 camera support"
depends on I2C
@@ -1369,7 +691,7 @@ config VIDEO_RDACM21
config VIDEO_RJ54N1
tristate "Sharp RJ54N1CB0C sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
help
This is a V4L2 sensor driver for Sharp RJ54N1CB0C CMOS image
sensor.
@@ -1377,27 +699,19 @@ config VIDEO_RJ54N1
To compile this driver as a module, choose M here: the
module will be called rj54n1.
-config VIDEO_S5K6AA
- tristate "Samsung S5K6AAFX sensor support"
- depends on I2C && VIDEO_V4L2
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- help
- This is a V4L2 sensor driver for Samsung S5K6AA(FX) 1.3M
- camera sensor with an embedded SoC image signal processor.
-
-config VIDEO_S5K6A3
- tristate "Samsung S5K6A3 sensor support"
- depends on I2C && VIDEO_V4L2
+config VIDEO_S5C73M3
+ tristate "Samsung S5C73M3 sensor support"
+ depends on I2C && SPI && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
help
- This is a V4L2 sensor driver for Samsung S5K6A3 raw
- camera sensor.
+ This is a V4L2 sensor driver for Samsung S5C73M3
+ 8 Mpixel camera.
config VIDEO_S5K4ECGX
tristate "Samsung S5K4ECGX sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select CRC32
@@ -1407,7 +721,7 @@ config VIDEO_S5K4ECGX
config VIDEO_S5K5BAF
tristate "Samsung S5K5BAF sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -1415,18 +729,43 @@ config VIDEO_S5K5BAF
This is a V4L2 sensor driver for Samsung S5K5BAF 2M
camera sensor with an embedded SoC image signal processor.
-source "drivers/media/i2c/ccs/Kconfig"
-source "drivers/media/i2c/et8ek8/Kconfig"
+config VIDEO_S5K6A3
+ tristate "Samsung S5K6A3 sensor support"
+ depends on I2C && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ This is a V4L2 sensor driver for Samsung S5K6A3 raw
+ camera sensor.
-config VIDEO_S5C73M3
- tristate "Samsung S5C73M3 sensor support"
- depends on I2C && SPI && VIDEO_V4L2
+config VIDEO_S5K6AA
+ tristate "Samsung S5K6AAFX sensor support"
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
- This is a V4L2 sensor driver for Samsung S5C73M3
- 8 Mpixel camera.
+ This is a V4L2 sensor driver for Samsung S5K6AA(FX) 1.3M
+ camera sensor with an embedded SoC image signal processor.
+
+config VIDEO_SR030PC30
+ tristate "Siliconfile SR030PC30 sensor support"
+ depends on I2C && VIDEO_DEV
+ help
+ This driver supports SR030PC30 VGA camera from Siliconfile
+
+config VIDEO_VS6624
+ tristate "ST VS6624 sensor support"
+ depends on VIDEO_DEV && I2C
+ help
+ This is a Video4Linux2 sensor driver for the ST VS6624
+ camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vs6624.
+
+source "drivers/media/i2c/ccs/Kconfig"
+source "drivers/media/i2c/et8ek8/Kconfig"
+source "drivers/media/i2c/m5mols/Kconfig"
endmenu
@@ -1435,7 +774,7 @@ menu "Lens drivers"
config VIDEO_AD5820
tristate "AD5820 lens voice coil support"
- depends on GPIOLIB && I2C && VIDEO_V4L2
+ depends on GPIOLIB && I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select V4L2_ASYNC
help
@@ -1444,7 +783,7 @@ config VIDEO_AD5820
config VIDEO_AK7375
tristate "AK7375 lens voice coil support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_ASYNC
@@ -1456,7 +795,7 @@ config VIDEO_AK7375
config VIDEO_DW9714
tristate "DW9714 lens voice coil support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_ASYNC
@@ -1468,7 +807,7 @@ config VIDEO_DW9714
config VIDEO_DW9768
tristate "DW9768 lens voice coil support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -1480,7 +819,7 @@ config VIDEO_DW9768
config VIDEO_DW9807_VCM
tristate "DW9807 lens voice coil support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_ASYNC
@@ -1497,7 +836,7 @@ menu "Flash devices"
config VIDEO_ADP1653
tristate "ADP1653 flash support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select V4L2_ASYNC
help
@@ -1506,7 +845,7 @@ config VIDEO_ADP1653
config VIDEO_LM3560
tristate "LM3560 dual flash driver support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select REGMAP_I2C
select V4L2_ASYNC
@@ -1516,13 +855,727 @@ config VIDEO_LM3560
config VIDEO_LM3646
tristate "LM3646 dual flash driver support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select REGMAP_I2C
select V4L2_ASYNC
help
This is a driver for the lm3646 dual flash controllers. It controls
flash, torch LEDs.
+
+endmenu
+
+#
+# V4L2 I2C drivers that aren't related with Camera support
+#
+
+comment "audio, video and radio I2C drivers auto-selected by 'Autoselect ancillary drivers'"
+ depends on MEDIA_HIDE_ANCILLARY_SUBDRV
+#
+# Encoder / Decoder module configuration
+#
+
+menu "Audio decoders, processors and mixers"
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
+
+config VIDEO_CS3308
+ tristate "Cirrus Logic CS3308 audio ADC"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Cirrus Logic CS3308 High Performance 8-Channel
+ Analog Volume Control
+
+ To compile this driver as a module, choose M here: the
+ module will be called cs3308.
+
+config VIDEO_CS5345
+ tristate "Cirrus Logic CS5345 audio ADC"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Cirrus Logic CS5345 24-bit, 192 kHz
+ stereo A/D converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cs5345.
+
+config VIDEO_CS53L32A
+ tristate "Cirrus Logic CS53L32A audio ADC"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Cirrus Logic CS53L32A low voltage
+ stereo A/D converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cs53l32a.
+
+config VIDEO_MSP3400
+ tristate "Micronas MSP34xx audio decoders"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Micronas MSP34xx series of audio decoders.
+
+ To compile this driver as a module, choose M here: the
+ module will be called msp3400.
+
+config VIDEO_SONY_BTF_MPX
+ tristate "Sony BTF's internal MPX"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the internal MPX of the Sony BTF-PG472Z tuner.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sony-btf-mpx.
+
+config VIDEO_TDA1997X
+ tristate "NXP TDA1997x HDMI receiver"
+ depends on VIDEO_DEV && I2C
+ depends on SND_SOC
+ select HDMI
+ select SND_PCM
+ select V4L2_FWNODE
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ V4L2 subdevice driver for the NXP TDA1997x HDMI receivers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tda1997x.
+
+config VIDEO_TDA7432
+ tristate "Philips TDA7432 audio processor"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for tda7432 audio decoder chip found on some bt8xx boards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tda7432.
+
+config VIDEO_TDA9840
+ tristate "Philips TDA9840 audio processor"
+ depends on I2C
+ help
+ Support for tda9840 audio decoder chip found on some Zoran boards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tda9840.
+
+config VIDEO_TEA6415C
+ tristate "Philips TEA6415C audio processor"
+ depends on I2C
+ help
+ Support for tea6415c audio decoder chip found on some bt8xx boards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tea6415c.
+
+config VIDEO_TEA6420
+ tristate "Philips TEA6420 audio processor"
+ depends on I2C
+ help
+ Support for tea6420 audio decoder chip found on some bt8xx boards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tea6420.
+
+config VIDEO_TLV320AIC23B
+ tristate "Texas Instruments TLV320AIC23B audio codec"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Texas Instruments TLV320AIC23B audio codec.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tlv320aic23b.
+
+config VIDEO_TVAUDIO
+ tristate "Simple audio decoder chips"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for several audio decoder chips found on some bt8xx boards:
+ Philips: tda9840, tda9873h, tda9874h/a, tda9850, tda985x, tea6300,
+ tea6320, tea6420, tda8425, ta8874z.
+ Microchip: pic16c54 based design on ProVideo PV951 board.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tvaudio.
+
+config VIDEO_UDA1342
+ tristate "Philips UDA1342 audio codec"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Philips UDA1342 audio codec.
+
+ To compile this driver as a module, choose M here: the
+ module will be called uda1342.
+
+config VIDEO_VP27SMPX
+ tristate "Panasonic VP27's internal MPX"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the internal MPX of the Panasonic VP27s tuner.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vp27smpx.
+
+config VIDEO_WM8739
+ tristate "Wolfson Microelectronics WM8739 stereo audio ADC"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Wolfson Microelectronics WM8739
+ stereo A/D Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called wm8739.
+
+config VIDEO_WM8775
+ tristate "Wolfson Microelectronics WM8775 audio ADC with input mixer"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Wolfson Microelectronics WM8775 high
+ performance stereo A/D Converter with a 4 channel input mixer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called wm8775.
+
+endmenu
+
+menu "RDS decoders"
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
+
+config VIDEO_SAA6588
+ tristate "SAA6588 Radio Chip RDS decoder support"
+ depends on VIDEO_DEV && I2C
+
+ help
+ Support for this Radio Data System (RDS) decoder. This allows
+ seeing radio station identification transmitted using this
+ standard.
+
+ To compile this driver as a module, choose M here: the
+ module will be called saa6588.
+
+endmenu
+
+menu "Video decoders"
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
+
+config VIDEO_ADV7180
+ tristate "Analog Devices ADV7180 decoder"
+ depends on GPIOLIB && VIDEO_DEV && I2C
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_ASYNC
+ help
+ Support for the Analog Devices ADV7180 video decoder.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adv7180.
+
+config VIDEO_ADV7183
+ tristate "Analog Devices ADV7183 decoder"
+ depends on VIDEO_DEV && I2C
+ help
+ V4l2 subdevice driver for the Analog Devices
+ ADV7183 video decoder.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adv7183.
+
+config VIDEO_ADV748X
+ tristate "Analog Devices ADV748x decoder"
+ depends on VIDEO_DEV && I2C
+ depends on OF
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select REGMAP_I2C
+ select V4L2_FWNODE
+ help
+ V4L2 subdevice driver for the Analog Devices
+ ADV7481 and ADV7482 HDMI/Analog video decoders.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adv748x.
+
+config VIDEO_ADV7604
+ tristate "Analog Devices ADV7604 decoder"
+ depends on VIDEO_DEV && I2C
+ depends on GPIOLIB || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select REGMAP_I2C
+ select HDMI
+ select V4L2_FWNODE
+ help
+ Support for the Analog Devices ADV7604 video decoder.
+
+ This is a Analog Devices Component/Graphics Digitizer
+ with 4:1 Multiplexed HDMI Receiver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adv7604.
+
+config VIDEO_ADV7604_CEC
+ bool "Enable Analog Devices ADV7604 CEC support"
+ depends on VIDEO_ADV7604
+ select CEC_CORE
+ help
+ When selected the adv7604 will support the optional
+ HDMI CEC feature.
+
+config VIDEO_ADV7842
+ tristate "Analog Devices ADV7842 decoder"
+ depends on VIDEO_DEV && I2C
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select HDMI
+ help
+ Support for the Analog Devices ADV7842 video decoder.
+
+ This is a Analog Devices Component/Graphics/SD Digitizer
+ with 2:1 Multiplexed HDMI Receiver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adv7842.
+
+config VIDEO_ADV7842_CEC
+ bool "Enable Analog Devices ADV7842 CEC support"
+ depends on VIDEO_ADV7842
+ select CEC_CORE
+ help
+ When selected the adv7842 will support the optional
+ HDMI CEC feature.
+
+config VIDEO_BT819
+ tristate "BT819A VideoStream decoder"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for BT819A video decoder.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bt819.
+
+config VIDEO_BT856
+ tristate "BT856 VideoStream decoder"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for BT856 video decoder.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bt856.
+
+config VIDEO_BT866
+ tristate "BT866 VideoStream decoder"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for BT866 video decoder.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bt866.
+
+config VIDEO_ISL7998X
+ tristate "Intersil ISL7998x video decoder"
+ depends on VIDEO_DEV && I2C
+ depends on OF_GPIO
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Support for Intersil ISL7998x analog to MIPI-CSI2 or
+ BT.656 decoder.
+
+config VIDEO_KS0127
+ tristate "KS0127 video decoder"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for KS0127 video decoder.
+
+ This chip is used on AverMedia AVS6EYES Zoran-based MJPEG
+ cards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ks0127.
+
+config VIDEO_MAX9286
+ tristate "Maxim MAX9286 GMSL deserializer support"
+ depends on I2C && I2C_MUX
+ depends on VIDEO_DEV
+ depends on OF_GPIO
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ select MEDIA_CONTROLLER
+ help
+ This driver supports the Maxim MAX9286 GMSL deserializer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called max9286.
+
+config VIDEO_ML86V7667
+ tristate "OKI ML86V7667 video decoder"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the OKI Semiconductor ML86V7667 video decoder.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ml86v7667.
+
+config VIDEO_SAA7110
+ tristate "Philips SAA7110 video decoder"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Philips SAA7110 video decoders.
+
+ To compile this driver as a module, choose M here: the
+ module will be called saa7110.
+
+config VIDEO_SAA711X
+ tristate "Philips SAA7111/3/4/5 video decoders"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Philips SAA7111/3/4/5 video decoders.
+
+ To compile this driver as a module, choose M here: the
+ module will be called saa7115.
+
+config VIDEO_TC358743
+ tristate "Toshiba TC358743 decoder"
+ depends on VIDEO_DEV && I2C
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select HDMI
+ select V4L2_FWNODE
+ help
+ Support for the Toshiba TC358743 HDMI to MIPI CSI-2 bridge.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tc358743.
+
+config VIDEO_TC358743_CEC
+ bool "Enable Toshiba TC358743 CEC support"
+ depends on VIDEO_TC358743
+ select CEC_CORE
+ help
+ When selected the tc358743 will support the optional
+ HDMI CEC feature.
+
+config VIDEO_TVP514X
+ tristate "Texas Instruments TVP514x video decoder"
+ depends on VIDEO_DEV && I2C
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the TI TVP5146/47
+ decoder. It is currently working with the TI OMAP3 camera
+ controller.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tvp514x.
+
+config VIDEO_TVP5150
+ tristate "Texas Instruments TVP5150 video decoder"
+ depends on VIDEO_DEV && I2C
+ select V4L2_FWNODE
+ select REGMAP_I2C
+ help
+ Support for the Texas Instruments TVP5150 video decoder.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tvp5150.
+
+config VIDEO_TVP7002
+ tristate "Texas Instruments TVP7002 video decoder"
+ depends on VIDEO_DEV && I2C
+ select V4L2_FWNODE
+ help
+ Support for the Texas Instruments TVP7002 video decoder.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tvp7002.
+
+config VIDEO_TW2804
+ tristate "Techwell TW2804 multiple video decoder"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Techwell tw2804 multiple video decoder.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tw2804.
+
+config VIDEO_TW9903
+ tristate "Techwell TW9903 video decoder"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Techwell tw9903 multi-standard video decoder
+ with high quality down scaler.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tw9903.
+
+config VIDEO_TW9906
+ tristate "Techwell TW9906 video decoder"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Techwell tw9906 enhanced multi-standard comb filter
+ video decoder with YCbCr input support.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tw9906.
+
+config VIDEO_TW9910
+ tristate "Techwell TW9910 video decoder"
+ depends on VIDEO_DEV && I2C
+ select V4L2_ASYNC
+ help
+ Support for Techwell TW9910 NTSC/PAL/SECAM video decoder.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tw9910.
+
+config VIDEO_VPX3220
+ tristate "vpx3220a, vpx3216b & vpx3214c video decoders"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for VPX322x video decoders.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vpx3220.
+
+comment "Video and audio decoders"
+
+config VIDEO_SAA717X
+ tristate "Philips SAA7171/3/4 audio/video decoders"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Philips SAA7171/3/4 audio/video decoders.
+
+ To compile this driver as a module, choose M here: the
+ module will be called saa717x.
+
+source "drivers/media/i2c/cx25840/Kconfig"
+
+endmenu
+
+menu "Video encoders"
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
+
+config VIDEO_AD9389B
+ tristate "Analog Devices AD9389B encoder"
+ depends on VIDEO_DEV && I2C
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+
+ help
+ Support for the Analog Devices AD9389B video encoder.
+
+ This is a Analog Devices HDMI transmitter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad9389b.
+
+config VIDEO_ADV7170
+ tristate "Analog Devices ADV7170 video encoder"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Analog Devices ADV7170 video encoder driver
+
+ To compile this driver as a module, choose M here: the
+ module will be called adv7170.
+
+config VIDEO_ADV7175
+ tristate "Analog Devices ADV7175 video encoder"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Analog Devices ADV7175 video encoder driver
+
+ To compile this driver as a module, choose M here: the
+ module will be called adv7175.
+
+config VIDEO_ADV7343
+ tristate "ADV7343 video encoder"
+ depends on I2C
+ select V4L2_ASYNC
+ help
+ Support for Analog Devices I2C bus based ADV7343 encoder.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adv7343.
+
+config VIDEO_ADV7393
+ tristate "ADV7393 video encoder"
+ depends on I2C
+ help
+ Support for Analog Devices I2C bus based ADV7393 encoder.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adv7393.
+
+config VIDEO_ADV7511
+ tristate "Analog Devices ADV7511 encoder"
+ depends on VIDEO_DEV && I2C
+ depends on DRM_I2C_ADV7511=n || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select HDMI
+ help
+ Support for the Analog Devices ADV7511 video encoder.
+
+ This is a Analog Devices HDMI transmitter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adv7511.
+
+config VIDEO_ADV7511_CEC
+ bool "Enable Analog Devices ADV7511 CEC support"
+ depends on VIDEO_ADV7511
+ select CEC_CORE
+ help
+ When selected the adv7511 will support the optional
+ HDMI CEC feature.
+
+config VIDEO_AK881X
+ tristate "AK8813/AK8814 video encoders"
+ depends on I2C
+ help
+ Video output driver for AKM AK8813 and AK8814 TV encoders
+
+config VIDEO_SAA7127
+ tristate "Philips SAA7127/9 digital video encoders"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Philips SAA7127/9 digital video encoders.
+
+ To compile this driver as a module, choose M here: the
+ module will be called saa7127.
+
+config VIDEO_SAA7185
+ tristate "Philips SAA7185 video encoder"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Philips SAA7185 video encoder.
+
+ To compile this driver as a module, choose M here: the
+ module will be called saa7185.
+
+config VIDEO_THS8200
+ tristate "Texas Instruments THS8200 video encoder"
+ depends on VIDEO_DEV && I2C
+ select V4L2_ASYNC
+ help
+ Support for the Texas Instruments THS8200 video encoder.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ths8200.
+
+endmenu
+
+menu "Video improvement chips"
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
+
+config VIDEO_UPD64031A
+ tristate "NEC Electronics uPD64031A Ghost Reduction"
+ depends on VIDEO_DEV && I2C
+ select V4L2_ASYNC
+ help
+ Support for the NEC Electronics uPD64031A Ghost Reduction
+ video chip. It is most often found in NTSC TV cards made for
+ Japan and is used to reduce the 'ghosting' effect that can
+ be present in analog TV broadcasts.
+
+ To compile this driver as a module, choose M here: the
+ module will be called upd64031a.
+
+config VIDEO_UPD64083
+ tristate "NEC Electronics uPD64083 3-Dimensional Y/C separation"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the NEC Electronics uPD64083 3-Dimensional Y/C
+ separation video chip. It is used to improve the quality of
+ the colors of a composite signal.
+
+ To compile this driver as a module, choose M here: the
+ module will be called upd64083.
+
+endmenu
+
+menu "Audio/Video compression chips"
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
+
+config VIDEO_SAA6752HS
+ tristate "Philips SAA6752HS MPEG-2 Audio/Video Encoder"
+ depends on VIDEO_DEV && I2C
+ select CRC32
+ help
+ Support for the Philips SAA6752HS MPEG-2 video and MPEG-audio/AC-3
+ audio encoder with multiplexer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called saa6752hs.
+
+endmenu
+
+menu "SDR tuner chips"
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
+
+config SDR_MAX2175
+ tristate "Maxim 2175 RF to Bits tuner"
+ depends on VIDEO_DEV && MEDIA_SDR_SUPPORT && I2C
+ select REGMAP_I2C
+ select V4L2_ASYNC
+ help
+ Support for Maxim 2175 tuner. It is an advanced analog/digital
+ radio receiver with RF-to-Bits front-end designed for SDR solutions.
+
+ To compile this driver as a module, choose M here; the
+ module will be called max2175.
+
+endmenu
+
+menu "Miscellaneous helper chips"
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
+
+config VIDEO_I2C
+ tristate "I2C transport video support"
+ depends on VIDEO_DEV && I2C
+ select VIDEOBUF2_VMALLOC
+ imply HWMON
+ help
+ Enable the I2C transport video support which supports the
+ following:
+ * Panasonic AMG88xx Grid-Eye Sensors
+ * Melexis MLX90640 Thermal Cameras
+
+ To compile this driver as a module, choose M here: the
+ module will be called video-i2c
+
+config VIDEO_M52790
+ tristate "Mitsubishi M52790 A/V switch"
+ depends on VIDEO_DEV && I2C
+ help
+ Support for the Mitsubishi M52790 A/V switch.
+
+ To compile this driver as a module, choose M here: the
+ module will be called m52790.
+
+config VIDEO_ST_MIPID02
+ tristate "STMicroelectronics MIPID02 CSI-2 to PARALLEL bridge"
+ depends on I2C && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ Support for STMicroelectronics MIPID02 CSI-2 to PARALLEL bridge.
+ It is used to allow usage of CSI-2 sensor with PARALLEL port
+ controller.
+
+ To compile this driver as a module, choose M here: the
+ module will be called st-mipid02.
+
+config VIDEO_THS7303
+ tristate "THS7303/53 Video Amplifier"
+ depends on VIDEO_DEV && I2C
+ select V4L2_ASYNC
+ help
+ Support for TI THS7303/53 video amplifier
+
+ To compile this driver as a module, choose M here: the
+ module will be called ths7303.
+
endmenu
-endif # VIDEO_V4L2
+endif # VIDEO_DEV
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index b01f6cd05ee8..3e1696963e7f 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -1,31 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
-msp3400-objs := msp3400-driver.o msp3400-kthreads.o
-obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
-obj-$(CONFIG_VIDEO_CCS) += ccs/
-obj-$(CONFIG_VIDEO_ET8EK8) += et8ek8/
-obj-$(CONFIG_VIDEO_CX25840) += cx25840/
-obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/
+msp3400-objs := msp3400-driver.o msp3400-kthreads.o
-obj-$(CONFIG_VIDEO_APTINA_PLL) += aptina-pll.o
-obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
-obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
-obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o
-obj-$(CONFIG_VIDEO_TDA9840) += tda9840.o
-obj-$(CONFIG_VIDEO_TDA1997X) += tda1997x.o
-obj-$(CONFIG_VIDEO_TEA6415C) += tea6415c.o
-obj-$(CONFIG_VIDEO_TEA6420) += tea6420.o
-obj-$(CONFIG_VIDEO_SAA7110) += saa7110.o
-obj-$(CONFIG_VIDEO_SAA711X) += saa7115.o
-obj-$(CONFIG_VIDEO_SAA717X) += saa717x.o
-obj-$(CONFIG_VIDEO_SAA7127) += saa7127.o
-obj-$(CONFIG_VIDEO_SAA7185) += saa7185.o
-obj-$(CONFIG_VIDEO_SAA6752HS) += saa6752hs.o
-obj-$(CONFIG_VIDEO_AD5820) += ad5820.o
-obj-$(CONFIG_VIDEO_AK7375) += ak7375.o
-obj-$(CONFIG_VIDEO_DW9714) += dw9714.o
-obj-$(CONFIG_VIDEO_DW9768) += dw9768.o
-obj-$(CONFIG_VIDEO_DW9807_VCM) += dw9807-vcm.o
+obj-$(CONFIG_SDR_MAX2175) += max2175.o
+obj-$(CONFIG_VIDEO_AD5820) += ad5820.o
+obj-$(CONFIG_VIDEO_AD9389B) += ad9389b.o
+obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o
obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o
@@ -33,39 +13,68 @@ obj-$(CONFIG_VIDEO_ADV7183) += adv7183.o
obj-$(CONFIG_VIDEO_ADV7343) += adv7343.o
obj-$(CONFIG_VIDEO_ADV7393) += adv7393.o
obj-$(CONFIG_VIDEO_ADV748X) += adv748x/
+obj-$(CONFIG_VIDEO_ADV7511) += adv7511-v4l2.o
obj-$(CONFIG_VIDEO_ADV7604) += adv7604.o
obj-$(CONFIG_VIDEO_ADV7842) += adv7842.o
-obj-$(CONFIG_VIDEO_AD9389B) += ad9389b.o
-obj-$(CONFIG_VIDEO_ADV7511) += adv7511-v4l2.o
-obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
-obj-$(CONFIG_VIDEO_VS6624) += vs6624.o
+obj-$(CONFIG_VIDEO_AK7375) += ak7375.o
+obj-$(CONFIG_VIDEO_AK881X) += ak881x.o
+obj-$(CONFIG_VIDEO_APTINA_PLL) += aptina-pll.o
obj-$(CONFIG_VIDEO_BT819) += bt819.o
obj-$(CONFIG_VIDEO_BT856) += bt856.o
obj-$(CONFIG_VIDEO_BT866) += bt866.o
-obj-$(CONFIG_VIDEO_KS0127) += ks0127.o
-obj-$(CONFIG_VIDEO_THS7303) += ths7303.o
-obj-$(CONFIG_VIDEO_THS8200) += ths8200.o
-obj-$(CONFIG_VIDEO_TVP5150) += tvp5150.o
-obj-$(CONFIG_VIDEO_TVP514X) += tvp514x.o
-obj-$(CONFIG_VIDEO_TVP7002) += tvp7002.o
-obj-$(CONFIG_VIDEO_TW2804) += tw2804.o
-obj-$(CONFIG_VIDEO_TW9903) += tw9903.o
-obj-$(CONFIG_VIDEO_TW9906) += tw9906.o
-obj-$(CONFIG_VIDEO_TW9910) += tw9910.o
+obj-$(CONFIG_VIDEO_CCS) += ccs/
+obj-$(CONFIG_VIDEO_CCS_PLL) += ccs-pll.o
obj-$(CONFIG_VIDEO_CS3308) += cs3308.o
obj-$(CONFIG_VIDEO_CS5345) += cs5345.o
obj-$(CONFIG_VIDEO_CS53L32A) += cs53l32a.o
+obj-$(CONFIG_VIDEO_CX25840) += cx25840/
+obj-$(CONFIG_VIDEO_DW9714) += dw9714.o
+obj-$(CONFIG_VIDEO_DW9768) += dw9768.o
+obj-$(CONFIG_VIDEO_DW9807_VCM) += dw9807-vcm.o
+obj-$(CONFIG_VIDEO_ET8EK8) += et8ek8/
+obj-$(CONFIG_VIDEO_HI556) += hi556.o
+obj-$(CONFIG_VIDEO_HI846) += hi846.o
+obj-$(CONFIG_VIDEO_HI847) += hi847.o
+obj-$(CONFIG_VIDEO_I2C) += video-i2c.o
+obj-$(CONFIG_VIDEO_IMX208) += imx208.o
+obj-$(CONFIG_VIDEO_IMX214) += imx214.o
+obj-$(CONFIG_VIDEO_IMX219) += imx219.o
+obj-$(CONFIG_VIDEO_IMX258) += imx258.o
+obj-$(CONFIG_VIDEO_IMX274) += imx274.o
+obj-$(CONFIG_VIDEO_IMX290) += imx290.o
+obj-$(CONFIG_VIDEO_IMX319) += imx319.o
+obj-$(CONFIG_VIDEO_IMX334) += imx334.o
+obj-$(CONFIG_VIDEO_IMX335) += imx335.o
+obj-$(CONFIG_VIDEO_IMX355) += imx355.o
+obj-$(CONFIG_VIDEO_IMX412) += imx412.o
+obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
+obj-$(CONFIG_VIDEO_ISL7998X) += isl7998x.o
+obj-$(CONFIG_VIDEO_KS0127) += ks0127.o
+obj-$(CONFIG_VIDEO_LM3560) += lm3560.o
+obj-$(CONFIG_VIDEO_LM3646) += lm3646.o
obj-$(CONFIG_VIDEO_M52790) += m52790.o
-obj-$(CONFIG_VIDEO_TLV320AIC23B) += tlv320aic23b.o
-obj-$(CONFIG_VIDEO_UDA1342) += uda1342.o
-obj-$(CONFIG_VIDEO_WM8775) += wm8775.o
-obj-$(CONFIG_VIDEO_WM8739) += wm8739.o
-obj-$(CONFIG_VIDEO_VP27SMPX) += vp27smpx.o
-obj-$(CONFIG_VIDEO_SONY_BTF_MPX) += sony-btf-mpx.o
-obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
-obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
+obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/
+obj-$(CONFIG_VIDEO_MAX9271_LIB) += max9271.o
+obj-$(CONFIG_VIDEO_MAX9286) += max9286.o
+obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
+obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
+obj-$(CONFIG_VIDEO_MT9M001) += mt9m001.o
+obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o
+obj-$(CONFIG_VIDEO_MT9M111) += mt9m111.o
+obj-$(CONFIG_VIDEO_MT9P031) += mt9p031.o
+obj-$(CONFIG_VIDEO_MT9T001) += mt9t001.o
+obj-$(CONFIG_VIDEO_MT9T112) += mt9t112.o
+obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
+obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
+obj-$(CONFIG_VIDEO_MT9V111) += mt9v111.o
+obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
+obj-$(CONFIG_VIDEO_OG01A1B) += og01a1b.o
obj-$(CONFIG_VIDEO_OV02A10) += ov02a10.o
+obj-$(CONFIG_VIDEO_OV08D10) += ov08d10.o
+obj-$(CONFIG_VIDEO_OV13858) += ov13858.o
+obj-$(CONFIG_VIDEO_OV13B10) += ov13b10.o
obj-$(CONFIG_VIDEO_OV2640) += ov2640.o
+obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_OV2680) += ov2680.o
obj-$(CONFIG_VIDEO_OV2685) += ov2685.o
obj-$(CONFIG_VIDEO_OV2740) += ov2740.o
@@ -89,51 +98,46 @@ obj-$(CONFIG_VIDEO_OV9282) += ov9282.o
obj-$(CONFIG_VIDEO_OV9640) += ov9640.o
obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
obj-$(CONFIG_VIDEO_OV9734) += ov9734.o
-obj-$(CONFIG_VIDEO_OV13858) += ov13858.o
-obj-$(CONFIG_VIDEO_OV13B10) += ov13b10.o
-obj-$(CONFIG_VIDEO_MT9M001) += mt9m001.o
-obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o
-obj-$(CONFIG_VIDEO_MT9M111) += mt9m111.o
-obj-$(CONFIG_VIDEO_MT9P031) += mt9p031.o
-obj-$(CONFIG_VIDEO_MT9T001) += mt9t001.o
-obj-$(CONFIG_VIDEO_MT9T112) += mt9t112.o
-obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
-obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
-obj-$(CONFIG_VIDEO_MT9V111) += mt9v111.o
-obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
-obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
-obj-$(CONFIG_VIDEO_RJ54N1) += rj54n1cb0c.o
-obj-$(CONFIG_VIDEO_S5K6AA) += s5k6aa.o
-obj-$(CONFIG_VIDEO_S5K6A3) += s5k6a3.o
-obj-$(CONFIG_VIDEO_S5K4ECGX) += s5k4ecgx.o
-obj-$(CONFIG_VIDEO_S5K5BAF) += s5k5baf.o
-obj-$(CONFIG_VIDEO_S5C73M3) += s5c73m3/
-obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o
-obj-$(CONFIG_VIDEO_LM3560) += lm3560.o
-obj-$(CONFIG_VIDEO_LM3646) += lm3646.o
-obj-$(CONFIG_VIDEO_CCS_PLL) += ccs-pll.o
-obj-$(CONFIG_VIDEO_AK881X) += ak881x.o
-obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
-obj-$(CONFIG_VIDEO_I2C) += video-i2c.o
-obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
-obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
-obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
-obj-$(CONFIG_VIDEO_HI556) += hi556.o
-obj-$(CONFIG_VIDEO_HI846) += hi846.o
-obj-$(CONFIG_VIDEO_IMX208) += imx208.o
-obj-$(CONFIG_VIDEO_IMX214) += imx214.o
-obj-$(CONFIG_VIDEO_IMX219) += imx219.o
-obj-$(CONFIG_VIDEO_IMX258) += imx258.o
-obj-$(CONFIG_VIDEO_IMX274) += imx274.o
-obj-$(CONFIG_VIDEO_IMX290) += imx290.o
-obj-$(CONFIG_VIDEO_IMX319) += imx319.o
-obj-$(CONFIG_VIDEO_IMX334) += imx334.o
-obj-$(CONFIG_VIDEO_IMX335) += imx335.o
-obj-$(CONFIG_VIDEO_IMX355) += imx355.o
-obj-$(CONFIG_VIDEO_IMX412) += imx412.o
-obj-$(CONFIG_VIDEO_MAX9286) += max9286.o
-obj-$(CONFIG_VIDEO_MAX9271_LIB) += max9271.o
-obj-$(CONFIG_VIDEO_RDACM20) += rdacm20.o
-obj-$(CONFIG_VIDEO_RDACM21) += rdacm21.o
+obj-$(CONFIG_VIDEO_RDACM20) += rdacm20.o
+obj-$(CONFIG_VIDEO_RDACM21) += rdacm21.o
+obj-$(CONFIG_VIDEO_RJ54N1) += rj54n1cb0c.o
+obj-$(CONFIG_VIDEO_S5C73M3) += s5c73m3/
+obj-$(CONFIG_VIDEO_S5K4ECGX) += s5k4ecgx.o
+obj-$(CONFIG_VIDEO_S5K5BAF) += s5k5baf.o
+obj-$(CONFIG_VIDEO_S5K6A3) += s5k6a3.o
+obj-$(CONFIG_VIDEO_S5K6AA) += s5k6aa.o
+obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o
+obj-$(CONFIG_VIDEO_SAA6752HS) += saa6752hs.o
+obj-$(CONFIG_VIDEO_SAA7110) += saa7110.o
+obj-$(CONFIG_VIDEO_SAA711X) += saa7115.o
+obj-$(CONFIG_VIDEO_SAA7127) += saa7127.o
+obj-$(CONFIG_VIDEO_SAA717X) += saa717x.o
+obj-$(CONFIG_VIDEO_SAA7185) += saa7185.o
+obj-$(CONFIG_VIDEO_SONY_BTF_MPX) += sony-btf-mpx.o
+obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
obj-$(CONFIG_VIDEO_ST_MIPID02) += st-mipid02.o
-obj-$(CONFIG_SDR_MAX2175) += max2175.o
+obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
+obj-$(CONFIG_VIDEO_TDA1997X) += tda1997x.o
+obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
+obj-$(CONFIG_VIDEO_TDA9840) += tda9840.o
+obj-$(CONFIG_VIDEO_TEA6415C) += tea6415c.o
+obj-$(CONFIG_VIDEO_TEA6420) += tea6420.o
+obj-$(CONFIG_VIDEO_THS7303) += ths7303.o
+obj-$(CONFIG_VIDEO_THS8200) += ths8200.o
+obj-$(CONFIG_VIDEO_TLV320AIC23B) += tlv320aic23b.o
+obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
+obj-$(CONFIG_VIDEO_TVP514X) += tvp514x.o
+obj-$(CONFIG_VIDEO_TVP5150) += tvp5150.o
+obj-$(CONFIG_VIDEO_TVP7002) += tvp7002.o
+obj-$(CONFIG_VIDEO_TW2804) += tw2804.o
+obj-$(CONFIG_VIDEO_TW9903) += tw9903.o
+obj-$(CONFIG_VIDEO_TW9906) += tw9906.o
+obj-$(CONFIG_VIDEO_TW9910) += tw9910.o
+obj-$(CONFIG_VIDEO_UDA1342) += uda1342.o
+obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
+obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
+obj-$(CONFIG_VIDEO_VP27SMPX) += vp27smpx.o
+obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
+obj-$(CONFIG_VIDEO_VS6624) += vs6624.o
+obj-$(CONFIG_VIDEO_WM8739) += wm8739.o
+obj-$(CONFIG_VIDEO_WM8775) += wm8775.o
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index d9a99fcfacb1..4f5db195e66d 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -784,16 +784,16 @@ static int adv7180_get_mbus_config(struct v4l2_subdev *sd,
if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
cfg->type = V4L2_MBUS_CSI2_DPHY;
- cfg->flags = V4L2_MBUS_CSI2_1_LANE |
- V4L2_MBUS_CSI2_CHANNEL_0 |
- V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
+ cfg->bus.mipi_csi2.num_data_lanes = 1;
+ cfg->bus.mipi_csi2.flags = 0;
} else {
/*
* The ADV7180 sensor supports BT.601/656 output modes.
* The BT.656 is default and not yet configurable by s/w.
*/
- cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING |
- V4L2_MBUS_DATA_ACTIVE_HIGH;
+ cfg->bus.parallel.flags = V4L2_MBUS_MASTER |
+ V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_BT656;
}
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index 92cafdea3f1f..ba746a19fd39 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -7,7 +7,7 @@
#include <linux/delay.h>
#include <linux/errno.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -28,8 +28,8 @@ struct adv7183 {
v4l2_std_id std; /* Current set standard */
u32 input;
u32 output;
- unsigned reset_pin;
- unsigned oe_pin;
+ struct gpio_desc *reset_pin;
+ struct gpio_desc *oe_pin;
struct v4l2_mbus_framefmt fmt;
};
@@ -465,9 +465,9 @@ static int adv7183_s_stream(struct v4l2_subdev *sd, int enable)
struct adv7183 *decoder = to_adv7183(sd);
if (enable)
- gpio_set_value(decoder->oe_pin, 0);
+ gpiod_set_value(decoder->oe_pin, 1);
else
- gpio_set_value(decoder->oe_pin, 1);
+ gpiod_set_value(decoder->oe_pin, 0);
udelay(1);
return 0;
}
@@ -531,7 +531,6 @@ static int adv7183_probe(struct i2c_client *client,
struct v4l2_subdev_format fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
- const unsigned *pin_array;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -540,29 +539,28 @@ static int adv7183_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%02x (%s)\n",
client->addr << 1, client->adapter->name);
- pin_array = client->dev.platform_data;
- if (pin_array == NULL)
- return -EINVAL;
-
decoder = devm_kzalloc(&client->dev, sizeof(*decoder), GFP_KERNEL);
if (decoder == NULL)
return -ENOMEM;
- decoder->reset_pin = pin_array[0];
- decoder->oe_pin = pin_array[1];
-
- if (devm_gpio_request_one(&client->dev, decoder->reset_pin,
- GPIOF_OUT_INIT_LOW, "ADV7183 Reset")) {
- v4l_err(client, "failed to request GPIO %d\n", decoder->reset_pin);
- return -EBUSY;
- }
-
- if (devm_gpio_request_one(&client->dev, decoder->oe_pin,
- GPIOF_OUT_INIT_HIGH,
- "ADV7183 Output Enable")) {
- v4l_err(client, "failed to request GPIO %d\n", decoder->oe_pin);
- return -EBUSY;
- }
+ /*
+ * Requesting high will assert reset, the line should be
+ * flagged as active low in descriptor table or machine description.
+ */
+ decoder->reset_pin = devm_gpiod_get(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(decoder->reset_pin))
+ return PTR_ERR(decoder->reset_pin);
+ gpiod_set_consumer_name(decoder->reset_pin, "ADV7183 Reset");
+ /*
+ * Requesting low will start with output disabled, the line should be
+ * flagged as active low in descriptor table or machine description.
+ */
+ decoder->oe_pin = devm_gpiod_get(&client->dev, "oe",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(decoder->oe_pin))
+ return PTR_ERR(decoder->oe_pin);
+ gpiod_set_consumer_name(decoder->reset_pin, "ADV7183 Output Enable");
sd = &decoder->sd;
v4l2_i2c_subdev_init(sd, client, &adv7183_ops);
@@ -594,7 +592,8 @@ static int adv7183_probe(struct i2c_client *client,
/* reset chip */
/* reset pulse width at least 5ms */
mdelay(10);
- gpio_set_value(decoder->reset_pin, 1);
+ /* De-assert reset line (descriptor tagged active low) */
+ gpiod_set_value(decoder->reset_pin, 0);
/* wait 5ms before any further i2c writes are performed */
mdelay(5);
diff --git a/drivers/media/i2c/adv748x/adv748x-csi2.c b/drivers/media/i2c/adv748x/adv748x-csi2.c
index 589e9644fcdc..bd4f3fe0e309 100644
--- a/drivers/media/i2c/adv748x/adv748x-csi2.c
+++ b/drivers/media/i2c/adv748x/adv748x-csi2.c
@@ -222,23 +222,7 @@ static int adv748x_csi2_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad
return -EINVAL;
config->type = V4L2_MBUS_CSI2_DPHY;
- switch (tx->active_lanes) {
- case 1:
- config->flags = V4L2_MBUS_CSI2_1_LANE;
- break;
-
- case 2:
- config->flags = V4L2_MBUS_CSI2_2_LANE;
- break;
-
- case 3:
- config->flags = V4L2_MBUS_CSI2_3_LANE;
- break;
-
- case 4:
- config->flags = V4L2_MBUS_CSI2_4_LANE;
- break;
- }
+ config->bus.mipi_csi2.num_data_lanes = tx->active_lanes;
return 0;
}
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 8e13cae40ec5..202e0cd83f90 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -17,7 +17,6 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/videodev2.h>
-#include <linux/gpio.h>
#include <linux/workqueue.h>
#include <linux/hdmi.h>
#include <linux/v4l2-dv-timings.h>
@@ -522,7 +521,7 @@ static void log_infoframe(struct v4l2_subdev *sd, const struct adv7511_cfg_read_
buffer[3] = 0;
buffer[3] = hdmi_infoframe_checksum(buffer, len + 4);
- if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
+ if (hdmi_infoframe_unpack(&frame, buffer, len + 4) < 0) {
v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
return;
}
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index a2fa408d2d9f..bb0c8fc6d383 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -2484,7 +2484,7 @@ static int adv76xx_read_infoframe(struct v4l2_subdev *sd, int index,
buffer[i + 3] = infoframe_read(sd,
adv76xx_cri[index].payload_addr + i);
- if (hdmi_infoframe_unpack(frame, buffer, sizeof(buffer)) < 0) {
+ if (hdmi_infoframe_unpack(frame, buffer, len + 3) < 0) {
v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__,
adv76xx_cri[index].desc);
return -ENOENT;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 9d6eed0f8281..22caa070273b 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -2583,7 +2583,7 @@ static void log_infoframe(struct v4l2_subdev *sd, const struct adv7842_cfg_read_
for (i = 0; i < len; i++)
buffer[i + 3] = infoframe_read(sd, cri->payload_addr + i);
- if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
+ if (hdmi_infoframe_unpack(&frame, buffer, len + 3) < 0) {
v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
return;
}
diff --git a/drivers/media/i2c/ccs/Kconfig b/drivers/media/i2c/ccs/Kconfig
index 59f35b33ddc1..71671db3d993 100644
--- a/drivers/media/i2c/ccs/Kconfig
+++ b/drivers/media/i2c/ccs/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_CCS
tristate "MIPI CCS/SMIA++/SMIA sensor support"
- depends on I2C && VIDEO_V4L2 && HAVE_CLK
+ depends on I2C && VIDEO_DEV && HAVE_CLK
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select VIDEO_CCS_PLL
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index 9158d3ce45c0..03e841b8443f 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -17,7 +17,6 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/firmware.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/media/i2c/cx25840/Kconfig b/drivers/media/i2c/cx25840/Kconfig
index e392f8e023f6..46f15702cf55 100644
--- a/drivers/media/i2c/cx25840/Kconfig
+++ b/drivers/media/i2c/cx25840/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_CX25840
tristate "Conexant CX2584x audio/video decoders"
- depends on VIDEO_V4L2 && I2C
+ depends on VIDEO_DEV && I2C
help
Support for the Conexant CX2584x audio/video decoders.
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index 3863dfeb8293..cd7008ad8f2f 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -5,6 +5,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
@@ -36,6 +37,7 @@ struct dw9714_device {
struct v4l2_ctrl_handler ctrls_vcm;
struct v4l2_subdev sd;
u16 current_val;
+ struct regulator *vcc;
};
static inline struct dw9714_device *to_dw9714_vcm(struct v4l2_ctrl *ctrl)
@@ -145,6 +147,16 @@ static int dw9714_probe(struct i2c_client *client)
if (dw9714_dev == NULL)
return -ENOMEM;
+ dw9714_dev->vcc = devm_regulator_get(&client->dev, "vcc");
+ if (IS_ERR(dw9714_dev->vcc))
+ return PTR_ERR(dw9714_dev->vcc);
+
+ rval = regulator_enable(dw9714_dev->vcc);
+ if (rval < 0) {
+ dev_err(&client->dev, "failed to enable vcc: %d\n", rval);
+ return rval;
+ }
+
v4l2_i2c_subdev_init(&dw9714_dev->sd, client, &dw9714_ops);
dw9714_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS;
@@ -181,8 +193,18 @@ static int dw9714_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd);
+ int ret;
pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev)) {
+ ret = regulator_disable(dw9714_dev->vcc);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to disable vcc: %d\n", ret);
+ return ret;
+ }
+ }
+ pm_runtime_set_suspended(&client->dev);
dw9714_subdev_cleanup(dw9714_dev);
return 0;
@@ -200,6 +222,9 @@ static int __maybe_unused dw9714_vcm_suspend(struct device *dev)
struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd);
int ret, val;
+ if (pm_runtime_suspended(&client->dev))
+ return 0;
+
for (val = dw9714_dev->current_val & ~(DW9714_CTRL_STEPS - 1);
val >= 0; val -= DW9714_CTRL_STEPS) {
ret = dw9714_i2c_write(client,
@@ -208,7 +233,12 @@ static int __maybe_unused dw9714_vcm_suspend(struct device *dev)
dev_err_once(dev, "%s I2C failure: %d", __func__, ret);
usleep_range(DW9714_CTRL_DELAY_US, DW9714_CTRL_DELAY_US + 10);
}
- return 0;
+
+ ret = regulator_disable(dw9714_dev->vcc);
+ if (ret)
+ dev_err(dev, "Failed to disable vcc: %d\n", ret);
+
+ return ret;
}
/*
@@ -224,6 +254,16 @@ static int __maybe_unused dw9714_vcm_resume(struct device *dev)
struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd);
int ret, val;
+ if (pm_runtime_suspended(&client->dev))
+ return 0;
+
+ ret = regulator_enable(dw9714_dev->vcc);
+ if (ret) {
+ dev_err(dev, "Failed to enable vcc: %d\n", ret);
+ return ret;
+ }
+ usleep_range(1000, 2000);
+
for (val = dw9714_dev->current_val % DW9714_CTRL_STEPS;
val < dw9714_dev->current_val + DW9714_CTRL_STEPS - 1;
val += DW9714_CTRL_STEPS) {
diff --git a/drivers/media/i2c/et8ek8/Kconfig b/drivers/media/i2c/et8ek8/Kconfig
index afcc4ea764f6..398dd4d21df1 100644
--- a/drivers/media/i2c/et8ek8/Kconfig
+++ b/drivers/media/i2c/et8ek8/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_ET8EK8
tristate "ET8EK8 camera sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
diff --git a/drivers/media/i2c/hi847.c b/drivers/media/i2c/hi847.c
new file mode 100644
index 000000000000..7e85349e1852
--- /dev/null
+++ b/drivers/media/i2c/hi847.c
@@ -0,0 +1,3012 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2022 Intel Corporation.
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define HI847_REG_VALUE_08BIT 1
+#define HI847_REG_VALUE_16BIT 2
+#define HI847_REG_VALUE_24BIT 3
+
+#define HI847_LINK_FREQ_400MHZ 400000000ULL
+#define HI847_LINK_FREQ_200MHZ 200000000ULL
+#define HI847_SCLK 72000000ULL
+#define HI847_MCLK 19200000
+#define HI847_DATA_LANES 4
+#define HI847_RGB_DEPTH 10
+
+#define HI847_REG_CHIP_ID 0x0716
+#define HI847_CHIP_ID 0x0847
+
+#define HI847_REG_MODE_SELECT 0x0B00
+#define HI847_MODE_STANDBY 0x0000
+#define HI847_MODE_STREAMING 0x0100
+
+#define HI847_REG_MODE_TG 0x027E
+#define HI847_REG_MODE_TG_ENABLE 0x0100
+#define HI847_REG_MODE_TG_DISABLE 0x0000
+
+/* vertical-timings from sensor */
+#define HI847_REG_FLL 0x020E
+#define HI847_FLL_30FPS 0x0B51
+#define HI847_FLL_30FPS_MIN 0x0B51
+#define HI847_FLL_60FPS 0x05A9
+#define HI847_FLL_60FPS_MIN 0x05A9
+#define HI847_FLL_MAX 0x7fff
+
+/* horizontal-timings from sensor */
+#define HI847_REG_LLP 0x0206
+
+/* Exposure controls from sensor */
+#define HI847_REG_EXPOSURE 0x020A
+#define HI847_EXPOSURE_MIN 4
+#define HI847_EXPOSURE_MAX_MARGIN 4
+#define HI847_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define HI847_REG_ANALOG_GAIN 0x0212
+#define HI847_ANAL_GAIN_MIN 0
+#define HI847_ANAL_GAIN_MAX 240
+#define HI847_ANAL_GAIN_STEP 1
+
+/* Digital gain controls from sensor */
+#define HI847_REG_MWB_GR_GAIN 0x0214
+#define HI847_REG_MWB_GB_GAIN 0x0216
+#define HI847_REG_MWB_R_GAIN 0x0218
+#define HI847_REG_MWB_B_GAIN 0x021A
+#define HI847_DGTL_GAIN_MIN 1
+#define HI847_DGTL_GAIN_MAX 8191
+#define HI847_DGTL_GAIN_STEP 1
+#define HI847_DGTL_GAIN_DEFAULT 512
+
+/* Test Pattern Control */
+#define HI847_REG_ISP 0X0B04
+#define HI847_REG_ISP_TPG_EN 0x0001
+#define HI847_REG_TEST_PATTERN 0x0C0A
+
+/* Flip Mirror Controls from sensor */
+#define HI847_REG_MIRROR_FLIP 0x0202
+
+#define HI847_REG_FORMAT_X 0x0F04
+#define HI847_REG_FORMAT_Y 0x0F06
+
+enum {
+ HI847_LINK_FREQ_400MHZ_INDEX,
+ HI847_LINK_FREQ_200MHZ_INDEX,
+};
+
+struct hi847_reg {
+ u16 address;
+ u16 val;
+};
+
+struct hi847_reg_list {
+ u32 num_of_regs;
+ const struct hi847_reg *regs;
+};
+
+struct hi847_link_freq_config {
+ const struct hi847_reg_list reg_list;
+};
+
+struct hi847_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timining size */
+ u32 llp;
+
+ /* Default vertical timining size */
+ u32 fll_def;
+
+ /* Min vertical timining size */
+ u32 fll_min;
+
+ /* Link frequency needed for this resolution */
+ u32 link_freq_index;
+
+ /* Sensor register settings for this resolution */
+ const struct hi847_reg_list reg_list;
+};
+
+#define to_hi847(_sd) container_of(_sd, struct hi847, sd)
+
+//SENSOR_INITIALIZATION
+static const struct hi847_reg mipi_data_rate_lane_4[] = {
+ {0x0790, 0x0100},
+ {0x2000, 0x0000},
+ {0x2002, 0x0058},
+ {0x2006, 0x40B2},
+ {0x2008, 0xB05C},
+ {0x200A, 0x8446},
+ {0x200C, 0x40B2},
+ {0x200E, 0xB082},
+ {0x2010, 0x8450},
+ {0x2012, 0x40B2},
+ {0x2014, 0xB0AE},
+ {0x2016, 0x84C6},
+ {0x2018, 0x40B2},
+ {0x201A, 0xB11A},
+ {0x201C, 0x84BC},
+ {0x201E, 0x40B2},
+ {0x2020, 0xB34A},
+ {0x2022, 0x84B4},
+ {0x2024, 0x40B2},
+ {0x2026, 0xB386},
+ {0x2028, 0x84B0},
+ {0x202A, 0x40B2},
+ {0x202C, 0xB3B4},
+ {0x202E, 0x84B8},
+ {0x2030, 0x40B2},
+ {0x2032, 0xB0F4},
+ {0x2034, 0x8470},
+ {0x2036, 0x40B2},
+ {0x2038, 0xB3EA},
+ {0x203A, 0x847C},
+ {0x203C, 0x40B2},
+ {0x203E, 0xB658},
+ {0x2040, 0x8478},
+ {0x2042, 0x40B2},
+ {0x2044, 0xB67E},
+ {0x2046, 0x847E},
+ {0x2048, 0x40B2},
+ {0x204A, 0xB78E},
+ {0x204C, 0x843A},
+ {0x204E, 0x40B2},
+ {0x2050, 0xB980},
+ {0x2052, 0x845C},
+ {0x2054, 0x40B2},
+ {0x2056, 0xB9B0},
+ {0x2058, 0x845E},
+ {0x205A, 0x4130},
+ {0x205C, 0x1292},
+ {0x205E, 0xD016},
+ {0x2060, 0xB3D2},
+ {0x2062, 0x0B00},
+ {0x2064, 0x2002},
+ {0x2066, 0xD2E2},
+ {0x2068, 0x0381},
+ {0x206A, 0x93C2},
+ {0x206C, 0x0263},
+ {0x206E, 0x2001},
+ {0x2070, 0x4130},
+ {0x2072, 0x422D},
+ {0x2074, 0x403E},
+ {0x2076, 0x888E},
+ {0x2078, 0x403F},
+ {0x207A, 0x192A},
+ {0x207C, 0x1292},
+ {0x207E, 0x843E},
+ {0x2080, 0x3FF7},
+ {0x2082, 0x422D},
+ {0x2084, 0x403E},
+ {0x2086, 0x192A},
+ {0x2088, 0x403F},
+ {0x208A, 0x888E},
+ {0x208C, 0x1292},
+ {0x208E, 0x843E},
+ {0x2090, 0xB3D2},
+ {0x2092, 0x0267},
+ {0x2094, 0x2403},
+ {0x2096, 0xD0F2},
+ {0x2098, 0x0040},
+ {0x209A, 0x0381},
+ {0x209C, 0x90F2},
+ {0x209E, 0x0010},
+ {0x20A0, 0x0260},
+ {0x20A2, 0x2002},
+ {0x20A4, 0x1292},
+ {0x20A6, 0x84BC},
+ {0x20A8, 0x1292},
+ {0x20AA, 0xD020},
+ {0x20AC, 0x4130},
+ {0x20AE, 0x1292},
+ {0x20B0, 0x8470},
+ {0x20B2, 0x1292},
+ {0x20B4, 0x8452},
+ {0x20B6, 0x0900},
+ {0x20B8, 0x7118},
+ {0x20BA, 0x1292},
+ {0x20BC, 0x848E},
+ {0x20BE, 0x0900},
+ {0x20C0, 0x7112},
+ {0x20C2, 0x0800},
+ {0x20C4, 0x7A20},
+ {0x20C6, 0x4292},
+ {0x20C8, 0x87DE},
+ {0x20CA, 0x7334},
+ {0x20CC, 0x0F00},
+ {0x20CE, 0x7304},
+ {0x20D0, 0x421F},
+ {0x20D2, 0x8718},
+ {0x20D4, 0x1292},
+ {0x20D6, 0x846E},
+ {0x20D8, 0x1292},
+ {0x20DA, 0x8488},
+ {0x20DC, 0x0B00},
+ {0x20DE, 0x7114},
+ {0x20E0, 0x0002},
+ {0x20E2, 0x1292},
+ {0x20E4, 0x848C},
+ {0x20E6, 0x1292},
+ {0x20E8, 0x8454},
+ {0x20EA, 0x43C2},
+ {0x20EC, 0x86EE},
+ {0x20EE, 0x1292},
+ {0x20F0, 0x8444},
+ {0x20F2, 0x4130},
+ {0x20F4, 0x4392},
+ {0x20F6, 0x7360},
+ {0x20F8, 0xB3D2},
+ {0x20FA, 0x0B00},
+ {0x20FC, 0x2402},
+ {0x20FE, 0xC2E2},
+ {0x2100, 0x0381},
+ {0x2102, 0x0900},
+ {0x2104, 0x732C},
+ {0x2106, 0x4382},
+ {0x2108, 0x7360},
+ {0x210A, 0x422D},
+ {0x210C, 0x403E},
+ {0x210E, 0x87F0},
+ {0x2110, 0x403F},
+ {0x2112, 0x87E8},
+ {0x2114, 0x1292},
+ {0x2116, 0x843E},
+ {0x2118, 0x4130},
+ {0x211A, 0x120B},
+ {0x211C, 0x120A},
+ {0x211E, 0x4392},
+ {0x2120, 0x87FA},
+ {0x2122, 0x4392},
+ {0x2124, 0x760E},
+ {0x2126, 0x0900},
+ {0x2128, 0x760C},
+ {0x212A, 0x421B},
+ {0x212C, 0x760A},
+ {0x212E, 0x903B},
+ {0x2130, 0x0201},
+ {0x2132, 0x2408},
+ {0x2134, 0x903B},
+ {0x2136, 0x0102},
+ {0x2138, 0x2405},
+ {0x213A, 0x4292},
+ {0x213C, 0x030A},
+ {0x213E, 0x87F8},
+ {0x2140, 0x1292},
+ {0x2142, 0x849A},
+ {0x2144, 0x903B},
+ {0x2146, 0x0020},
+ {0x2148, 0x2010},
+ {0x214A, 0x403B},
+ {0x214C, 0x8498},
+ {0x214E, 0x422F},
+ {0x2150, 0x12AB},
+ {0x2152, 0x403F},
+ {0x2154, 0x0028},
+ {0x2156, 0x12AB},
+ {0x2158, 0x403B},
+ {0x215A, 0x84C4},
+ {0x215C, 0x407F},
+ {0x215E, 0xFFAA},
+ {0x2160, 0x12AB},
+ {0x2162, 0x407F},
+ {0x2164, 0x0055},
+ {0x2166, 0x12AB},
+ {0x2168, 0x3FDC},
+ {0x216A, 0x903B},
+ {0x216C, 0x0021},
+ {0x216E, 0x2890},
+ {0x2170, 0x903B},
+ {0x2172, 0x0100},
+ {0x2174, 0x200D},
+ {0x2176, 0x403F},
+ {0x2178, 0x0028},
+ {0x217A, 0x1292},
+ {0x217C, 0x8498},
+ {0x217E, 0x425F},
+ {0x2180, 0x0306},
+ {0x2182, 0x1292},
+ {0x2184, 0x84C4},
+ {0x2186, 0x4FC2},
+ {0x2188, 0x0318},
+ {0x218A, 0x0261},
+ {0x218C, 0x0000},
+ {0x218E, 0x3FC9},
+ {0x2190, 0x903B},
+ {0x2192, 0x0101},
+ {0x2194, 0x2858},
+ {0x2196, 0x903B},
+ {0x2198, 0x0200},
+ {0x219A, 0x2450},
+ {0x219C, 0x903B},
+ {0x219E, 0x0201},
+ {0x21A0, 0x2C47},
+ {0x21A2, 0x903B},
+ {0x21A4, 0x0102},
+ {0x21A6, 0x2041},
+ {0x21A8, 0x93E2},
+ {0x21AA, 0x0262},
+ {0x21AC, 0x240A},
+ {0x21AE, 0x425F},
+ {0x21B0, 0x0306},
+ {0x21B2, 0x1292},
+ {0x21B4, 0x84C4},
+ {0x21B6, 0x4F4E},
+ {0x21B8, 0x4EC2},
+ {0x21BA, 0x0318},
+ {0x21BC, 0x0260},
+ {0x21BE, 0x0000},
+ {0x21C0, 0x3FB0},
+ {0x21C2, 0x403A},
+ {0x21C4, 0x8030},
+ {0x21C6, 0x4382},
+ {0x21C8, 0x0326},
+ {0x21CA, 0x4382},
+ {0x21CC, 0x0328},
+ {0x21CE, 0x421B},
+ {0x21D0, 0x030C},
+ {0x21D2, 0x930B},
+ {0x21D4, 0x2420},
+ {0x21D6, 0x4A5F},
+ {0x21D8, 0x0001},
+ {0x21DA, 0x1292},
+ {0x21DC, 0x84C4},
+ {0x21DE, 0x4F4E},
+ {0x21E0, 0x4A5F},
+ {0x21E2, 0x0001},
+ {0x21E4, 0x9F0E},
+ {0x21E6, 0x2402},
+ {0x21E8, 0x5392},
+ {0x21EA, 0x0326},
+ {0x21EC, 0x4ECA},
+ {0x21EE, 0x0001},
+ {0x21F0, 0x533B},
+ {0x21F2, 0x2411},
+ {0x21F4, 0x4A6F},
+ {0x21F6, 0x1292},
+ {0x21F8, 0x84C4},
+ {0x21FA, 0x4F4E},
+ {0x21FC, 0x4A6F},
+ {0x21FE, 0x9F0E},
+ {0x2200, 0x2402},
+ {0x2202, 0x5392},
+ {0x2204, 0x0326},
+ {0x2206, 0x4ECA},
+ {0x2208, 0x0000},
+ {0x220A, 0x533B},
+ {0x220C, 0x532A},
+ {0x220E, 0x0260},
+ {0x2210, 0x0000},
+ {0x2212, 0x930B},
+ {0x2214, 0x23E0},
+ {0x2216, 0x40B2},
+ {0x2218, 0xAA55},
+ {0x221A, 0x0328},
+ {0x221C, 0xB0F2},
+ {0x221E, 0x0040},
+ {0x2220, 0x0381},
+ {0x2222, 0x277F},
+ {0x2224, 0xD3D2},
+ {0x2226, 0x0267},
+ {0x2228, 0x3F7C},
+ {0x222A, 0x0261},
+ {0x222C, 0x0000},
+ {0x222E, 0x3F79},
+ {0x2230, 0x903B},
+ {0x2232, 0x0201},
+ {0x2234, 0x23FA},
+ {0x2236, 0x1292},
+ {0x2238, 0x84C0},
+ {0x223A, 0x3F73},
+ {0x223C, 0x1292},
+ {0x223E, 0x84C0},
+ {0x2240, 0x0261},
+ {0x2242, 0x0000},
+ {0x2244, 0x3F6E},
+ {0x2246, 0x903B},
+ {0x2248, 0x0040},
+ {0x224A, 0x2018},
+ {0x224C, 0x422F},
+ {0x224E, 0x1292},
+ {0x2250, 0x8498},
+ {0x2252, 0x12B0},
+ {0x2254, 0xF0EA},
+ {0x2256, 0x907F},
+ {0x2258, 0xFFAA},
+ {0x225A, 0x240D},
+ {0x225C, 0x5392},
+ {0x225E, 0x0312},
+ {0x2260, 0x12B0},
+ {0x2262, 0xF0EA},
+ {0x2264, 0x907F},
+ {0x2266, 0x0055},
+ {0x2268, 0x2403},
+ {0x226A, 0x5392},
+ {0x226C, 0x0312},
+ {0x226E, 0x3F59},
+ {0x2270, 0x5392},
+ {0x2272, 0x0310},
+ {0x2274, 0x3F56},
+ {0x2276, 0x5392},
+ {0x2278, 0x0310},
+ {0x227A, 0x3FF2},
+ {0x227C, 0x903B},
+ {0x227E, 0x0080},
+ {0x2280, 0x23D4},
+ {0x2282, 0x4382},
+ {0x2284, 0x0312},
+ {0x2286, 0x4382},
+ {0x2288, 0x0310},
+ {0x228A, 0x0261},
+ {0x228C, 0x0000},
+ {0x228E, 0x3F49},
+ {0x2290, 0x932B},
+ {0x2292, 0x2005},
+ {0x2294, 0x403F},
+ {0x2296, 0x0028},
+ {0x2298, 0x1292},
+ {0x229A, 0x8498},
+ {0x229C, 0x3F42},
+ {0x229E, 0x903B},
+ {0x22A0, 0x0003},
+ {0x22A2, 0x284B},
+ {0x22A4, 0x923B},
+ {0x22A6, 0x2015},
+ {0x22A8, 0x403F},
+ {0x22AA, 0x0023},
+ {0x22AC, 0x1292},
+ {0x22AE, 0x8498},
+ {0x22B0, 0x421B},
+ {0x22B2, 0x87F8},
+ {0x22B4, 0x421F},
+ {0x22B6, 0x030C},
+ {0x22B8, 0x9F0B},
+ {0x22BA, 0x2F33},
+ {0x22BC, 0x1292},
+ {0x22BE, 0x84BA},
+ {0x22C0, 0x930F},
+ {0x22C2, 0x2004},
+ {0x22C4, 0x5392},
+ {0x22C6, 0x0312},
+ {0x22C8, 0x531B},
+ {0x22CA, 0x3FF4},
+ {0x22CC, 0x5392},
+ {0x22CE, 0x0310},
+ {0x22D0, 0x3FFB},
+ {0x22D2, 0x903B},
+ {0x22D4, 0x0009},
+ {0x22D6, 0x2818},
+ {0x22D8, 0x903B},
+ {0x22DA, 0x0010},
+ {0x22DC, 0x23A6},
+ {0x22DE, 0x403F},
+ {0x22E0, 0x0027},
+ {0x22E2, 0x1292},
+ {0x22E4, 0x8498},
+ {0x22E6, 0x421B},
+ {0x22E8, 0x87F8},
+ {0x22EA, 0x421F},
+ {0x22EC, 0x030C},
+ {0x22EE, 0x9F0B},
+ {0x22F0, 0x2F18},
+ {0x22F2, 0x1292},
+ {0x22F4, 0x84BA},
+ {0x22F6, 0x930F},
+ {0x22F8, 0x2004},
+ {0x22FA, 0x5392},
+ {0x22FC, 0x0312},
+ {0x22FE, 0x531B},
+ {0x2300, 0x3FF4},
+ {0x2302, 0x5392},
+ {0x2304, 0x0310},
+ {0x2306, 0x3FFB},
+ {0x2308, 0x922B},
+ {0x230A, 0x238F},
+ {0x230C, 0x421B},
+ {0x230E, 0x87F8},
+ {0x2310, 0x421F},
+ {0x2312, 0x030C},
+ {0x2314, 0x9F0B},
+ {0x2316, 0x2C0B},
+ {0x2318, 0x1292},
+ {0x231A, 0x84C2},
+ {0x231C, 0x934F},
+ {0x231E, 0x240A},
+ {0x2320, 0x5392},
+ {0x2322, 0x0312},
+ {0x2324, 0x531B},
+ {0x2326, 0x421F},
+ {0x2328, 0x030C},
+ {0x232A, 0x9F0B},
+ {0x232C, 0x2BF5},
+ {0x232E, 0x0261},
+ {0x2330, 0x0000},
+ {0x2332, 0x3EF7},
+ {0x2334, 0x5392},
+ {0x2336, 0x0310},
+ {0x2338, 0x3FF5},
+ {0x233A, 0x930B},
+ {0x233C, 0x277F},
+ {0x233E, 0x931B},
+ {0x2340, 0x277A},
+ {0x2342, 0x3F73},
+ {0x2344, 0x413A},
+ {0x2346, 0x413B},
+ {0x2348, 0x4130},
+ {0x234A, 0x4F0C},
+ {0x234C, 0x403F},
+ {0x234E, 0x0267},
+ {0x2350, 0xF0FF},
+ {0x2352, 0xFFDF},
+ {0x2354, 0x0000},
+ {0x2356, 0xF0FF},
+ {0x2358, 0xFFEF},
+ {0x235A, 0x0000},
+ {0x235C, 0x421D},
+ {0x235E, 0x84B0},
+ {0x2360, 0x403E},
+ {0x2362, 0x06F9},
+ {0x2364, 0x4C0F},
+ {0x2366, 0x1292},
+ {0x2368, 0x84AC},
+ {0x236A, 0x4F4E},
+ {0x236C, 0xB31E},
+ {0x236E, 0x2403},
+ {0x2370, 0xD0F2},
+ {0x2372, 0x0020},
+ {0x2374, 0x0267},
+ {0x2376, 0xB32E},
+ {0x2378, 0x2403},
+ {0x237A, 0xD0F2},
+ {0x237C, 0x0010},
+ {0x237E, 0x0267},
+ {0x2380, 0xC3E2},
+ {0x2382, 0x0267},
+ {0x2384, 0x4130},
+ {0x2386, 0x120B},
+ {0x2388, 0x120A},
+ {0x238A, 0x403A},
+ {0x238C, 0x1140},
+ {0x238E, 0x1292},
+ {0x2390, 0xD080},
+ {0x2392, 0x430B},
+ {0x2394, 0x4A0F},
+ {0x2396, 0x532A},
+ {0x2398, 0x1292},
+ {0x239A, 0x84A4},
+ {0x239C, 0x4F0E},
+ {0x239E, 0x430F},
+ {0x23A0, 0x5E82},
+ {0x23A2, 0x87FC},
+ {0x23A4, 0x6F82},
+ {0x23A6, 0x87FE},
+ {0x23A8, 0x531B},
+ {0x23AA, 0x923B},
+ {0x23AC, 0x2BF3},
+ {0x23AE, 0x413A},
+ {0x23B0, 0x413B},
+ {0x23B2, 0x4130},
+ {0x23B4, 0xF0F2},
+ {0x23B6, 0x007F},
+ {0x23B8, 0x0267},
+ {0x23BA, 0x421D},
+ {0x23BC, 0x84B6},
+ {0x23BE, 0x403E},
+ {0x23C0, 0x01F9},
+ {0x23C2, 0x1292},
+ {0x23C4, 0x84AC},
+ {0x23C6, 0x4F4E},
+ {0x23C8, 0xF35F},
+ {0x23CA, 0x2403},
+ {0x23CC, 0xD0F2},
+ {0x23CE, 0xFF80},
+ {0x23D0, 0x0267},
+ {0x23D2, 0xB36E},
+ {0x23D4, 0x2404},
+ {0x23D6, 0xD0F2},
+ {0x23D8, 0x0040},
+ {0x23DA, 0x0267},
+ {0x23DC, 0x3C03},
+ {0x23DE, 0xF0F2},
+ {0x23E0, 0xFFBF},
+ {0x23E2, 0x0267},
+ {0x23E4, 0xC2E2},
+ {0x23E6, 0x0267},
+ {0x23E8, 0x4130},
+ {0x23EA, 0x120B},
+ {0x23EC, 0x120A},
+ {0x23EE, 0x8231},
+ {0x23F0, 0x430B},
+ {0x23F2, 0x93C2},
+ {0x23F4, 0x0C0A},
+ {0x23F6, 0x2404},
+ {0x23F8, 0xB3D2},
+ {0x23FA, 0x0B05},
+ {0x23FC, 0x2401},
+ {0x23FE, 0x431B},
+ {0x2400, 0x422D},
+ {0x2402, 0x403E},
+ {0x2404, 0x192A},
+ {0x2406, 0x403F},
+ {0x2408, 0x888E},
+ {0x240A, 0x1292},
+ {0x240C, 0x843E},
+ {0x240E, 0x930B},
+ {0x2410, 0x20F4},
+ {0x2412, 0x93E2},
+ {0x2414, 0x0241},
+ {0x2416, 0x24EB},
+ {0x2418, 0x403A},
+ {0x241A, 0x0292},
+ {0x241C, 0x4AA2},
+ {0x241E, 0x0A00},
+ {0x2420, 0xB2E2},
+ {0x2422, 0x0361},
+ {0x2424, 0x2405},
+ {0x2426, 0x4A2F},
+ {0x2428, 0x1292},
+ {0x242A, 0x8474},
+ {0x242C, 0x4F82},
+ {0x242E, 0x0A1C},
+ {0x2430, 0x93C2},
+ {0x2432, 0x0360},
+ {0x2434, 0x34CD},
+ {0x2436, 0x430C},
+ {0x2438, 0x4C0F},
+ {0x243A, 0x5F0F},
+ {0x243C, 0x4F0D},
+ {0x243E, 0x510D},
+ {0x2440, 0x4F0E},
+ {0x2442, 0x5A0E},
+ {0x2444, 0x4E1E},
+ {0x2446, 0x0002},
+ {0x2448, 0x4F1F},
+ {0x244A, 0x192A},
+ {0x244C, 0x1202},
+ {0x244E, 0xC232},
+ {0x2450, 0x4303},
+ {0x2452, 0x4E82},
+ {0x2454, 0x0130},
+ {0x2456, 0x4F82},
+ {0x2458, 0x0138},
+ {0x245A, 0x421E},
+ {0x245C, 0x013A},
+ {0x245E, 0x421F},
+ {0x2460, 0x013C},
+ {0x2462, 0x4132},
+ {0x2464, 0x108E},
+ {0x2466, 0x108F},
+ {0x2468, 0xEF4E},
+ {0x246A, 0xEF0E},
+ {0x246C, 0xF37F},
+ {0x246E, 0xC312},
+ {0x2470, 0x100F},
+ {0x2472, 0x100E},
+ {0x2474, 0x4E8D},
+ {0x2476, 0x0000},
+ {0x2478, 0x531C},
+ {0x247A, 0x922C},
+ {0x247C, 0x2BDD},
+ {0x247E, 0xB3D2},
+ {0x2480, 0x1921},
+ {0x2482, 0x2403},
+ {0x2484, 0x410F},
+ {0x2486, 0x1292},
+ {0x2488, 0x847E},
+ {0x248A, 0x403B},
+ {0x248C, 0x843E},
+ {0x248E, 0x422D},
+ {0x2490, 0x410E},
+ {0x2492, 0x403F},
+ {0x2494, 0x1908},
+ {0x2496, 0x12AB},
+ {0x2498, 0x403D},
+ {0x249A, 0x0005},
+ {0x249C, 0x403E},
+ {0x249E, 0x0292},
+ {0x24A0, 0x403F},
+ {0x24A2, 0x86E4},
+ {0x24A4, 0x12AB},
+ {0x24A6, 0x421F},
+ {0x24A8, 0x060E},
+ {0x24AA, 0x9F82},
+ {0x24AC, 0x8720},
+ {0x24AE, 0x288D},
+ {0x24B0, 0x9382},
+ {0x24B2, 0x060E},
+ {0x24B4, 0x248A},
+ {0x24B6, 0x90BA},
+ {0x24B8, 0x0010},
+ {0x24BA, 0x0000},
+ {0x24BC, 0x2C0B},
+ {0x24BE, 0x93C2},
+ {0x24C0, 0x86EE},
+ {0x24C2, 0x2008},
+ {0x24C4, 0x403F},
+ {0x24C6, 0x06A7},
+ {0x24C8, 0xD0FF},
+ {0x24CA, 0x0007},
+ {0x24CC, 0x0000},
+ {0x24CE, 0xF0FF},
+ {0x24D0, 0xFFF8},
+ {0x24D2, 0x0000},
+ {0x24D4, 0x4392},
+ {0x24D6, 0x8720},
+ {0x24D8, 0x403F},
+ {0x24DA, 0x06A7},
+ {0x24DC, 0xD2EF},
+ {0x24DE, 0x0000},
+ {0x24E0, 0xC2EF},
+ {0x24E2, 0x0000},
+ {0x24E4, 0x93C2},
+ {0x24E6, 0x87D3},
+ {0x24E8, 0x2068},
+ {0x24EA, 0xB0F2},
+ {0x24EC, 0x0040},
+ {0x24EE, 0x0B05},
+ {0x24F0, 0x2461},
+ {0x24F2, 0xD3D2},
+ {0x24F4, 0x0410},
+ {0x24F6, 0xB3E2},
+ {0x24F8, 0x0381},
+ {0x24FA, 0x2089},
+ {0x24FC, 0x90B2},
+ {0x24FE, 0x0030},
+ {0x2500, 0x0A00},
+ {0x2502, 0x2C52},
+ {0x2504, 0x93C2},
+ {0x2506, 0x86EE},
+ {0x2508, 0x204F},
+ {0x250A, 0x430E},
+ {0x250C, 0x430C},
+ {0x250E, 0x4C0F},
+ {0x2510, 0x5F0F},
+ {0x2512, 0x5F0F},
+ {0x2514, 0x5F0F},
+ {0x2516, 0x4F1F},
+ {0x2518, 0x8668},
+ {0x251A, 0xF03F},
+ {0x251C, 0x07FF},
+ {0x251E, 0x903F},
+ {0x2520, 0x0400},
+ {0x2522, 0x343E},
+ {0x2524, 0x5F0E},
+ {0x2526, 0x531C},
+ {0x2528, 0x923C},
+ {0x252A, 0x2BF1},
+ {0x252C, 0x4E0F},
+ {0x252E, 0x930E},
+ {0x2530, 0x3834},
+ {0x2532, 0x110F},
+ {0x2534, 0x110F},
+ {0x2536, 0x110F},
+ {0x2538, 0x9382},
+ {0x253A, 0x86EE},
+ {0x253C, 0x2023},
+ {0x253E, 0x5F82},
+ {0x2540, 0x87D6},
+ {0x2542, 0x403B},
+ {0x2544, 0x87D6},
+ {0x2546, 0x4B2F},
+ {0x2548, 0x12B0},
+ {0x254A, 0xB624},
+ {0x254C, 0x4F8B},
+ {0x254E, 0x0000},
+ {0x2550, 0x430C},
+ {0x2552, 0x4C0D},
+ {0x2554, 0x5D0D},
+ {0x2556, 0x5D0D},
+ {0x2558, 0x5D0D},
+ {0x255A, 0x403A},
+ {0x255C, 0x87D8},
+ {0x255E, 0x421B},
+ {0x2560, 0x87D6},
+ {0x2562, 0x4B0F},
+ {0x2564, 0x8A2F},
+ {0x2566, 0x4F0E},
+ {0x2568, 0x4E0F},
+ {0x256A, 0x5F0F},
+ {0x256C, 0x7F0F},
+ {0x256E, 0xE33F},
+ {0x2570, 0x8E8D},
+ {0x2572, 0x8668},
+ {0x2574, 0x7F8D},
+ {0x2576, 0x866A},
+ {0x2578, 0x531C},
+ {0x257A, 0x923C},
+ {0x257C, 0x2BEA},
+ {0x257E, 0x4B8A},
+ {0x2580, 0x0000},
+ {0x2582, 0x3C45},
+ {0x2584, 0x9382},
+ {0x2586, 0x86F0},
+ {0x2588, 0x2005},
+ {0x258A, 0x4382},
+ {0x258C, 0x87D6},
+ {0x258E, 0x4382},
+ {0x2590, 0x87D8},
+ {0x2592, 0x3FD7},
+ {0x2594, 0x4F82},
+ {0x2596, 0x87D6},
+ {0x2598, 0x3FD4},
+ {0x259A, 0x503F},
+ {0x259C, 0x0007},
+ {0x259E, 0x3FC9},
+ {0x25A0, 0x5F0E},
+ {0x25A2, 0x503E},
+ {0x25A4, 0xF800},
+ {0x25A6, 0x3FBF},
+ {0x25A8, 0x430F},
+ {0x25AA, 0x12B0},
+ {0x25AC, 0xB624},
+ {0x25AE, 0x4382},
+ {0x25B0, 0x87D6},
+ {0x25B2, 0x3C2D},
+ {0x25B4, 0xC3D2},
+ {0x25B6, 0x0410},
+ {0x25B8, 0x3F9E},
+ {0x25BA, 0x430D},
+ {0x25BC, 0x403E},
+ {0x25BE, 0x0050},
+ {0x25C0, 0x403F},
+ {0x25C2, 0x85C8},
+ {0x25C4, 0x1292},
+ {0x25C6, 0x844E},
+ {0x25C8, 0x3F90},
+ {0x25CA, 0x5392},
+ {0x25CC, 0x8720},
+ {0x25CE, 0x3F84},
+ {0x25D0, 0x403B},
+ {0x25D2, 0x843E},
+ {0x25D4, 0x4A0F},
+ {0x25D6, 0x532F},
+ {0x25D8, 0x422D},
+ {0x25DA, 0x4F0E},
+ {0x25DC, 0x403F},
+ {0x25DE, 0x0E08},
+ {0x25E0, 0x12AB},
+ {0x25E2, 0x422D},
+ {0x25E4, 0x403E},
+ {0x25E6, 0x192A},
+ {0x25E8, 0x410F},
+ {0x25EA, 0x12AB},
+ {0x25EC, 0x3F48},
+ {0x25EE, 0x93C2},
+ {0x25F0, 0x86EE},
+ {0x25F2, 0x2312},
+ {0x25F4, 0x403A},
+ {0x25F6, 0x86E4},
+ {0x25F8, 0x3F11},
+ {0x25FA, 0x403D},
+ {0x25FC, 0x0200},
+ {0x25FE, 0x422E},
+ {0x2600, 0x403F},
+ {0x2602, 0x192A},
+ {0x2604, 0x1292},
+ {0x2606, 0x844E},
+ {0x2608, 0xC3D2},
+ {0x260A, 0x1921},
+ {0x260C, 0x3F02},
+ {0x260E, 0x422D},
+ {0x2610, 0x403E},
+ {0x2612, 0x888E},
+ {0x2614, 0x403F},
+ {0x2616, 0x192A},
+ {0x2618, 0x1292},
+ {0x261A, 0x843E},
+ {0x261C, 0x5231},
+ {0x261E, 0x413A},
+ {0x2620, 0x413B},
+ {0x2622, 0x4130},
+ {0x2624, 0x4382},
+ {0x2626, 0x052C},
+ {0x2628, 0x4F0D},
+ {0x262A, 0x930D},
+ {0x262C, 0x3402},
+ {0x262E, 0xE33D},
+ {0x2630, 0x531D},
+ {0x2632, 0xF03D},
+ {0x2634, 0x07F0},
+ {0x2636, 0x4D0E},
+ {0x2638, 0xC312},
+ {0x263A, 0x100E},
+ {0x263C, 0x110E},
+ {0x263E, 0x110E},
+ {0x2640, 0x110E},
+ {0x2642, 0x930F},
+ {0x2644, 0x3803},
+ {0x2646, 0x4EC2},
+ {0x2648, 0x052C},
+ {0x264A, 0x3C04},
+ {0x264C, 0x4EC2},
+ {0x264E, 0x052D},
+ {0x2650, 0xE33D},
+ {0x2652, 0x531D},
+ {0x2654, 0x4D0F},
+ {0x2656, 0x4130},
+ {0x2658, 0x1292},
+ {0x265A, 0xD048},
+ {0x265C, 0x93C2},
+ {0x265E, 0x86EE},
+ {0x2660, 0x200D},
+ {0x2662, 0xB0F2},
+ {0x2664, 0x0020},
+ {0x2666, 0x0381},
+ {0x2668, 0x2407},
+ {0x266A, 0x9292},
+ {0x266C, 0x8722},
+ {0x266E, 0x0384},
+ {0x2670, 0x2C03},
+ {0x2672, 0xD3D2},
+ {0x2674, 0x0649},
+ {0x2676, 0x4130},
+ {0x2678, 0xC3D2},
+ {0x267A, 0x0649},
+ {0x267C, 0x4130},
+ {0x267E, 0x120B},
+ {0x2680, 0x120A},
+ {0x2682, 0x1209},
+ {0x2684, 0x1208},
+ {0x2686, 0x1207},
+ {0x2688, 0x1206},
+ {0x268A, 0x1205},
+ {0x268C, 0x1204},
+ {0x268E, 0x8231},
+ {0x2690, 0x4F81},
+ {0x2692, 0x0000},
+ {0x2694, 0x4381},
+ {0x2696, 0x0002},
+ {0x2698, 0x4304},
+ {0x269A, 0x411C},
+ {0x269C, 0x0002},
+ {0x269E, 0x5C0C},
+ {0x26A0, 0x4C0F},
+ {0x26A2, 0x5F0F},
+ {0x26A4, 0x5F0F},
+ {0x26A6, 0x5F0F},
+ {0x26A8, 0x5F0F},
+ {0x26AA, 0x5F0F},
+ {0x26AC, 0x503F},
+ {0x26AE, 0x1980},
+ {0x26B0, 0x440D},
+ {0x26B2, 0x5D0D},
+ {0x26B4, 0x4D0E},
+ {0x26B6, 0x5F0E},
+ {0x26B8, 0x4E2E},
+ {0x26BA, 0x4D05},
+ {0x26BC, 0x5505},
+ {0x26BE, 0x5F05},
+ {0x26C0, 0x4516},
+ {0x26C2, 0x0008},
+ {0x26C4, 0x4517},
+ {0x26C6, 0x000A},
+ {0x26C8, 0x460A},
+ {0x26CA, 0x470B},
+ {0x26CC, 0xF30A},
+ {0x26CE, 0xF32B},
+ {0x26D0, 0x4A81},
+ {0x26D2, 0x0004},
+ {0x26D4, 0x4B81},
+ {0x26D6, 0x0006},
+ {0x26D8, 0xB03E},
+ {0x26DA, 0x2000},
+ {0x26DC, 0x2404},
+ {0x26DE, 0xF03E},
+ {0x26E0, 0x1FFF},
+ {0x26E2, 0xE33E},
+ {0x26E4, 0x531E},
+ {0x26E6, 0xF317},
+ {0x26E8, 0x503E},
+ {0x26EA, 0x2000},
+ {0x26EC, 0x4E0F},
+ {0x26EE, 0x5F0F},
+ {0x26F0, 0x7F0F},
+ {0x26F2, 0xE33F},
+ {0x26F4, 0x512C},
+ {0x26F6, 0x4C28},
+ {0x26F8, 0x4309},
+ {0x26FA, 0x4E0A},
+ {0x26FC, 0x4F0B},
+ {0x26FE, 0x480C},
+ {0x2700, 0x490D},
+ {0x2702, 0x1202},
+ {0x2704, 0xC232},
+ {0x2706, 0x12B0},
+ {0x2708, 0xFFC0},
+ {0x270A, 0x4132},
+ {0x270C, 0x108E},
+ {0x270E, 0x108F},
+ {0x2710, 0xEF4E},
+ {0x2712, 0xEF0E},
+ {0x2714, 0xF37F},
+ {0x2716, 0xC312},
+ {0x2718, 0x100F},
+ {0x271A, 0x100E},
+ {0x271C, 0x4E85},
+ {0x271E, 0x0018},
+ {0x2720, 0x4F85},
+ {0x2722, 0x001A},
+ {0x2724, 0x480A},
+ {0x2726, 0x490B},
+ {0x2728, 0x460C},
+ {0x272A, 0x470D},
+ {0x272C, 0x1202},
+ {0x272E, 0xC232},
+ {0x2730, 0x12B0},
+ {0x2732, 0xFFC0},
+ {0x2734, 0x4132},
+ {0x2736, 0x4E0C},
+ {0x2738, 0x4F0D},
+ {0x273A, 0x108C},
+ {0x273C, 0x108D},
+ {0x273E, 0xED4C},
+ {0x2740, 0xED0C},
+ {0x2742, 0xF37D},
+ {0x2744, 0xC312},
+ {0x2746, 0x100D},
+ {0x2748, 0x100C},
+ {0x274A, 0x411E},
+ {0x274C, 0x0004},
+ {0x274E, 0x411F},
+ {0x2750, 0x0006},
+ {0x2752, 0x5E0E},
+ {0x2754, 0x6F0F},
+ {0x2756, 0x5E0E},
+ {0x2758, 0x6F0F},
+ {0x275A, 0x5E0E},
+ {0x275C, 0x6F0F},
+ {0x275E, 0xDE0C},
+ {0x2760, 0xDF0D},
+ {0x2762, 0x4C85},
+ {0x2764, 0x002C},
+ {0x2766, 0x4D85},
+ {0x2768, 0x002E},
+ {0x276A, 0x5314},
+ {0x276C, 0x9224},
+ {0x276E, 0x2B95},
+ {0x2770, 0x5391},
+ {0x2772, 0x0002},
+ {0x2774, 0x92A1},
+ {0x2776, 0x0002},
+ {0x2778, 0x2B8F},
+ {0x277A, 0x5231},
+ {0x277C, 0x4134},
+ {0x277E, 0x4135},
+ {0x2780, 0x4136},
+ {0x2782, 0x4137},
+ {0x2784, 0x4138},
+ {0x2786, 0x4139},
+ {0x2788, 0x413A},
+ {0x278A, 0x413B},
+ {0x278C, 0x4130},
+ {0x278E, 0x120B},
+ {0x2790, 0x120A},
+ {0x2792, 0x1209},
+ {0x2794, 0x8031},
+ {0x2796, 0x000C},
+ {0x2798, 0x425F},
+ {0x279A, 0x0205},
+ {0x279C, 0xC312},
+ {0x279E, 0x104F},
+ {0x27A0, 0x114F},
+ {0x27A2, 0x114F},
+ {0x27A4, 0x114F},
+ {0x27A6, 0x114F},
+ {0x27A8, 0x114F},
+ {0x27AA, 0xF37F},
+ {0x27AC, 0x4F0B},
+ {0x27AE, 0xF31B},
+ {0x27B0, 0x5B0B},
+ {0x27B2, 0x5B0B},
+ {0x27B4, 0x5B0B},
+ {0x27B6, 0x503B},
+ {0x27B8, 0xD194},
+ {0x27BA, 0x4219},
+ {0x27BC, 0x0508},
+ {0x27BE, 0xF039},
+ {0x27C0, 0x2000},
+ {0x27C2, 0x4F0A},
+ {0x27C4, 0xC312},
+ {0x27C6, 0x100A},
+ {0x27C8, 0xE31A},
+ {0x27CA, 0x421F},
+ {0x27CC, 0x87DE},
+ {0x27CE, 0x503F},
+ {0x27D0, 0xFF60},
+ {0x27D2, 0x903F},
+ {0x27D4, 0x00C8},
+ {0x27D6, 0x2C02},
+ {0x27D8, 0x403F},
+ {0x27DA, 0x00C8},
+ {0x27DC, 0x4F82},
+ {0x27DE, 0x7322},
+ {0x27E0, 0xB3D2},
+ {0x27E2, 0x0381},
+ {0x27E4, 0x2009},
+ {0x27E6, 0x421F},
+ {0x27E8, 0x86F0},
+ {0x27EA, 0xD21F},
+ {0x27EC, 0x86EE},
+ {0x27EE, 0x930F},
+ {0x27F0, 0x24B9},
+ {0x27F2, 0x40F2},
+ {0x27F4, 0xFF80},
+ {0x27F6, 0x0619},
+ {0x27F8, 0x1292},
+ {0x27FA, 0xD00A},
+ {0x27FC, 0xB3D2},
+ {0x27FE, 0x0385},
+ {0x2800, 0x2405},
+ {0x2802, 0x421F},
+ {0x2804, 0x880A},
+ {0x2806, 0x4F92},
+ {0x2808, 0x0002},
+ {0x280A, 0x8714},
+ {0x280C, 0x430D},
+ {0x280E, 0x93C2},
+ {0x2810, 0x87D0},
+ {0x2812, 0x2003},
+ {0x2814, 0xB2F2},
+ {0x2816, 0x0360},
+ {0x2818, 0x2001},
+ {0x281A, 0x431D},
+ {0x281C, 0x425F},
+ {0x281E, 0x87D3},
+ {0x2820, 0xD25F},
+ {0x2822, 0x87D2},
+ {0x2824, 0xF37F},
+ {0x2826, 0x5F0F},
+ {0x2828, 0x425E},
+ {0x282A, 0x87CD},
+ {0x282C, 0xDE0F},
+ {0x282E, 0x5F0F},
+ {0x2830, 0x5B0F},
+ {0x2832, 0x4FA2},
+ {0x2834, 0x0402},
+ {0x2836, 0x930D},
+ {0x2838, 0x2007},
+ {0x283A, 0x930A},
+ {0x283C, 0x248E},
+ {0x283E, 0x4F5F},
+ {0x2840, 0x0001},
+ {0x2842, 0xF37F},
+ {0x2844, 0x4FC2},
+ {0x2846, 0x0403},
+ {0x2848, 0x93C2},
+ {0x284A, 0x87CD},
+ {0x284C, 0x2483},
+ {0x284E, 0xC2F2},
+ {0x2850, 0x0400},
+ {0x2852, 0xB2E2},
+ {0x2854, 0x0265},
+ {0x2856, 0x2407},
+ {0x2858, 0x421F},
+ {0x285A, 0x0508},
+ {0x285C, 0xF03F},
+ {0x285E, 0xFFDF},
+ {0x2860, 0xD90F},
+ {0x2862, 0x4F82},
+ {0x2864, 0x0508},
+ {0x2866, 0xB3D2},
+ {0x2868, 0x0383},
+ {0x286A, 0x2484},
+ {0x286C, 0x403F},
+ {0x286E, 0x0508},
+ {0x2870, 0x4FB1},
+ {0x2872, 0x0000},
+ {0x2874, 0x4FB1},
+ {0x2876, 0x0002},
+ {0x2878, 0x4FB1},
+ {0x287A, 0x0004},
+ {0x287C, 0x403F},
+ {0x287E, 0x0500},
+ {0x2880, 0x4FB1},
+ {0x2882, 0x0006},
+ {0x2884, 0x4FB1},
+ {0x2886, 0x0008},
+ {0x2888, 0x4FB1},
+ {0x288A, 0x000A},
+ {0x288C, 0xB3E2},
+ {0x288E, 0x0383},
+ {0x2890, 0x2412},
+ {0x2892, 0xC2E1},
+ {0x2894, 0x0002},
+ {0x2896, 0xB2E2},
+ {0x2898, 0x0383},
+ {0x289A, 0x434F},
+ {0x289C, 0x634F},
+ {0x289E, 0xF37F},
+ {0x28A0, 0x4F4E},
+ {0x28A2, 0x114E},
+ {0x28A4, 0x434E},
+ {0x28A6, 0x104E},
+ {0x28A8, 0x415F},
+ {0x28AA, 0x0007},
+ {0x28AC, 0xF07F},
+ {0x28AE, 0x007F},
+ {0x28B0, 0xDE4F},
+ {0x28B2, 0x4FC1},
+ {0x28B4, 0x0007},
+ {0x28B6, 0xB2F2},
+ {0x28B8, 0x0383},
+ {0x28BA, 0x2415},
+ {0x28BC, 0xF0F1},
+ {0x28BE, 0xFFBF},
+ {0x28C0, 0x0000},
+ {0x28C2, 0xB0F2},
+ {0x28C4, 0x0010},
+ {0x28C6, 0x0383},
+ {0x28C8, 0x434E},
+ {0x28CA, 0x634E},
+ {0x28CC, 0x5E4E},
+ {0x28CE, 0x5E4E},
+ {0x28D0, 0x5E4E},
+ {0x28D2, 0x5E4E},
+ {0x28D4, 0x5E4E},
+ {0x28D6, 0x5E4E},
+ {0x28D8, 0x415F},
+ {0x28DA, 0x0006},
+ {0x28DC, 0xF07F},
+ {0x28DE, 0xFFBF},
+ {0x28E0, 0xDE4F},
+ {0x28E2, 0x4FC1},
+ {0x28E4, 0x0006},
+ {0x28E6, 0xB0F2},
+ {0x28E8, 0x0020},
+ {0x28EA, 0x0383},
+ {0x28EC, 0x2410},
+ {0x28EE, 0xF0F1},
+ {0x28F0, 0xFFDF},
+ {0x28F2, 0x0002},
+ {0x28F4, 0xB0F2},
+ {0x28F6, 0x0040},
+ {0x28F8, 0x0383},
+ {0x28FA, 0x434E},
+ {0x28FC, 0x634E},
+ {0x28FE, 0x5E4E},
+ {0x2900, 0x5E4E},
+ {0x2902, 0x415F},
+ {0x2904, 0x0008},
+ {0x2906, 0xC26F},
+ {0x2908, 0xDE4F},
+ {0x290A, 0x4FC1},
+ {0x290C, 0x0008},
+ {0x290E, 0x93C2},
+ {0x2910, 0x0383},
+ {0x2912, 0x3412},
+ {0x2914, 0xF0F1},
+ {0x2916, 0xFFDF},
+ {0x2918, 0x0000},
+ {0x291A, 0x425E},
+ {0x291C, 0x0382},
+ {0x291E, 0xF35E},
+ {0x2920, 0x5E4E},
+ {0x2922, 0x5E4E},
+ {0x2924, 0x5E4E},
+ {0x2926, 0x5E4E},
+ {0x2928, 0x5E4E},
+ {0x292A, 0x415F},
+ {0x292C, 0x0006},
+ {0x292E, 0xF07F},
+ {0x2930, 0xFFDF},
+ {0x2932, 0xDE4F},
+ {0x2934, 0x4FC1},
+ {0x2936, 0x0006},
+ {0x2938, 0x410F},
+ {0x293A, 0x4FB2},
+ {0x293C, 0x0508},
+ {0x293E, 0x4FB2},
+ {0x2940, 0x050A},
+ {0x2942, 0x4FB2},
+ {0x2944, 0x050C},
+ {0x2946, 0x4FB2},
+ {0x2948, 0x0500},
+ {0x294A, 0x4FB2},
+ {0x294C, 0x0502},
+ {0x294E, 0x4FB2},
+ {0x2950, 0x0504},
+ {0x2952, 0x3C10},
+ {0x2954, 0xD2F2},
+ {0x2956, 0x0400},
+ {0x2958, 0x3F7C},
+ {0x295A, 0x4F6F},
+ {0x295C, 0xF37F},
+ {0x295E, 0x4FC2},
+ {0x2960, 0x0402},
+ {0x2962, 0x3F72},
+ {0x2964, 0x90F2},
+ {0x2966, 0x0011},
+ {0x2968, 0x0619},
+ {0x296A, 0x2B46},
+ {0x296C, 0x50F2},
+ {0x296E, 0xFFF0},
+ {0x2970, 0x0619},
+ {0x2972, 0x3F42},
+ {0x2974, 0x5031},
+ {0x2976, 0x000C},
+ {0x2978, 0x4139},
+ {0x297A, 0x413A},
+ {0x297C, 0x413B},
+ {0x297E, 0x4130},
+ {0x2980, 0x0900},
+ {0x2982, 0x7312},
+ {0x2984, 0x421F},
+ {0x2986, 0x0A08},
+ {0x2988, 0xF03F},
+ {0x298A, 0xF7FF},
+ {0x298C, 0x4F82},
+ {0x298E, 0x0A88},
+ {0x2990, 0x0900},
+ {0x2992, 0x7312},
+ {0x2994, 0x421F},
+ {0x2996, 0x0A0E},
+ {0x2998, 0xF03F},
+ {0x299A, 0x7FFF},
+ {0x299C, 0x4F82},
+ {0x299E, 0x0A8E},
+ {0x29A0, 0x0900},
+ {0x29A2, 0x7312},
+ {0x29A4, 0x421F},
+ {0x29A6, 0x0A1E},
+ {0x29A8, 0xC31F},
+ {0x29AA, 0x4F82},
+ {0x29AC, 0x0A9E},
+ {0x29AE, 0x4130},
+ {0x29B0, 0x4292},
+ {0x29B2, 0x0A08},
+ {0x29B4, 0x0A88},
+ {0x29B6, 0x0900},
+ {0x29B8, 0x7312},
+ {0x29BA, 0x4292},
+ {0x29BC, 0x0A0E},
+ {0x29BE, 0x0A8E},
+ {0x29C0, 0x0900},
+ {0x29C2, 0x7312},
+ {0x29C4, 0x4292},
+ {0x29C6, 0x0A1E},
+ {0x29C8, 0x0A9E},
+ {0x29CA, 0x4130},
+ {0x29CC, 0x7400},
+ {0x29CE, 0x8058},
+ {0x29D0, 0x1807},
+ {0x29D2, 0x00E0},
+ {0x29D4, 0x7002},
+ {0x29D6, 0x17C7},
+ {0x29D8, 0x0045},
+ {0x29DA, 0x0006},
+ {0x29DC, 0x17CC},
+ {0x29DE, 0x0015},
+ {0x29E0, 0x1512},
+ {0x29E2, 0x216F},
+ {0x29E4, 0x005B},
+ {0x29E6, 0x005D},
+ {0x29E8, 0x00DE},
+ {0x29EA, 0x00DD},
+ {0x29EC, 0x5023},
+ {0x29EE, 0x00DE},
+ {0x29F0, 0x005B},
+ {0x29F2, 0x0410},
+ {0x29F4, 0x0091},
+ {0x29F6, 0x0015},
+ {0x29F8, 0x0040},
+ {0x29FA, 0x7023},
+ {0x29FC, 0x1653},
+ {0x29FE, 0x0156},
+ {0x2A00, 0x0001},
+ {0x2A02, 0x2081},
+ {0x2A04, 0x7020},
+ {0x2A06, 0x2F99},
+ {0x2A08, 0x005C},
+ {0x2A0A, 0x0000},
+ {0x2A0C, 0x5040},
+ {0x2A0E, 0x0045},
+ {0x2A10, 0x213A},
+ {0x2A12, 0x0303},
+ {0x2A14, 0x0148},
+ {0x2A16, 0x0049},
+ {0x2A18, 0x0045},
+ {0x2A1A, 0x0046},
+ {0x2A1C, 0x05DD},
+ {0x2A1E, 0x00DE},
+ {0x2A20, 0x00DD},
+ {0x2A22, 0x00DC},
+ {0x2A24, 0x00DE},
+ {0x2A26, 0x04D6},
+ {0x2A28, 0x2014},
+ {0x2A2A, 0x2081},
+ {0x2A2C, 0x7087},
+ {0x2A2E, 0x2F99},
+ {0x2A30, 0x005C},
+ {0x2A32, 0x0002},
+ {0x2A34, 0x5060},
+ {0x2A36, 0x31C0},
+ {0x2A38, 0x2122},
+ {0x2A3A, 0x7800},
+ {0x2A3C, 0xC08C},
+ {0x2A3E, 0x0001},
+ {0x2A40, 0x9038},
+ {0x2A42, 0x59F7},
+ {0x2A44, 0x907A},
+ {0x2A46, 0x03D8},
+ {0x2A48, 0x8D90},
+ {0x2A4A, 0x01C0},
+ {0x2A4C, 0x7400},
+ {0x2A4E, 0x8058},
+ {0x2A50, 0x1807},
+ {0x2A52, 0x00E0},
+ {0x2A54, 0x7002},
+ {0x2A56, 0x17C7},
+ {0x2A58, 0x0045},
+ {0x2A5A, 0x0006},
+ {0x2A5C, 0x17CC},
+ {0x2A5E, 0x0015},
+ {0x2A60, 0x1512},
+ {0x2A62, 0x216F},
+ {0x2A64, 0x005B},
+ {0x2A66, 0x005D},
+ {0x2A68, 0x00DE},
+ {0x2A6A, 0x00DD},
+ {0x2A6C, 0x5023},
+ {0x2A6E, 0x00DE},
+ {0x2A70, 0x005B},
+ {0x2A72, 0x0410},
+ {0x2A74, 0x0091},
+ {0x2A76, 0x0015},
+ {0x2A78, 0x0040},
+ {0x2A7A, 0x7023},
+ {0x2A7C, 0x1653},
+ {0x2A7E, 0x0156},
+ {0x2A80, 0x0001},
+ {0x2A82, 0x2081},
+ {0x2A84, 0x7020},
+ {0x2A86, 0x2F99},
+ {0x2A88, 0x005C},
+ {0x2A8A, 0x0000},
+ {0x2A8C, 0x5040},
+ {0x2A8E, 0x0045},
+ {0x2A90, 0x213A},
+ {0x2A92, 0x0303},
+ {0x2A94, 0x0148},
+ {0x2A96, 0x0049},
+ {0x2A98, 0x0045},
+ {0x2A9A, 0x0046},
+ {0x2A9C, 0x05DD},
+ {0x2A9E, 0x00DE},
+ {0x2AA0, 0x00DD},
+ {0x2AA2, 0x00DC},
+ {0x2AA4, 0x00DE},
+ {0x2AA6, 0x0296},
+ {0x2AA8, 0x2014},
+ {0x2AAA, 0x2081},
+ {0x2AAC, 0x7087},
+ {0x2AAE, 0x2F99},
+ {0x2AB0, 0x005C},
+ {0x2AB2, 0x0002},
+ {0x2AB4, 0x5060},
+ {0x2AB6, 0x31C0},
+ {0x2AB8, 0x2122},
+ {0x2ABA, 0x7800},
+ {0x2ABC, 0xC08C},
+ {0x2ABE, 0x0001},
+ {0x2AC0, 0x9038},
+ {0x2AC2, 0x59F7},
+ {0x2AC4, 0x907A},
+ {0x2AC6, 0x03D8},
+ {0x2AC8, 0x8D90},
+ {0x2ACA, 0x01C0},
+ {0x2ACC, 0x7400},
+ {0x2ACE, 0x2002},
+ {0x2AD0, 0x70DF},
+ {0x2AD2, 0x2F21},
+ {0x2AD4, 0x04C1},
+ {0x2AD6, 0x0D80},
+ {0x2AD8, 0x7800},
+ {0x2ADA, 0x0041},
+ {0x2ADC, 0x7400},
+ {0x2ADE, 0x2004},
+ {0x2AE0, 0x70DF},
+ {0x2AE2, 0x2F21},
+ {0x2AE4, 0x04C2},
+ {0x2AE6, 0x0D80},
+ {0x2AE8, 0x7800},
+ {0x2AEA, 0x7400},
+ {0x2AEC, 0x2008},
+ {0x2AEE, 0x70DF},
+ {0x2AF0, 0x2F21},
+ {0x2AF2, 0x04C3},
+ {0x2AF4, 0x0D80},
+ {0x2AF6, 0x7800},
+ {0x2AF8, 0x7400},
+ {0x2AFA, 0x0004},
+ {0x2AFC, 0x70DF},
+ {0x2AFE, 0x2F22},
+ {0x2B00, 0x7008},
+ {0x2B02, 0x2F1F},
+ {0x2B04, 0x7021},
+ {0x2B06, 0x2F01},
+ {0x2B08, 0x7800},
+ {0x2B0A, 0x7400},
+ {0x2B0C, 0x0002},
+ {0x2B0E, 0x70DF},
+ {0x2B10, 0x3F5F},
+ {0x2B12, 0x703A},
+ {0x2B14, 0x2F01},
+ {0x2B16, 0x7800},
+ {0x2B18, 0x7400},
+ {0x2B1A, 0x2010},
+ {0x2B1C, 0x70DF},
+ {0x2B1E, 0x3F40},
+ {0x2B20, 0x700A},
+ {0x2B22, 0x0FC0},
+ {0x2B24, 0x7800},
+ {0x2B26, 0x7400},
+ {0x2B28, 0x2004},
+ {0x2B2A, 0x70DF},
+ {0x2B2C, 0x2F21},
+ {0x2B2E, 0x04C2},
+ {0x2B30, 0x0D80},
+ {0x2B32, 0x7800},
+ {0x2B34, 0x0041},
+ {0x2B36, 0x7400},
+ {0x2B38, 0x2002},
+ {0x2B3A, 0x70DF},
+ {0x2B3C, 0x2F22},
+ {0x2B3E, 0x04C1},
+ {0x2B40, 0x0D80},
+ {0x2B42, 0x7800},
+ {0x2B44, 0x7400},
+ {0x2B46, 0x0001},
+ {0x2B48, 0x70DF},
+ {0x2B4A, 0x3F5F},
+ {0x2B4C, 0x703A},
+ {0x2B4E, 0x2F01},
+ {0x2B50, 0x7800},
+ {0x2B52, 0x7400},
+ {0x2B54, 0x200A},
+ {0x2B56, 0x70DF},
+ {0x2B58, 0x3F40},
+ {0x2B5A, 0x700A},
+ {0x2B5C, 0x0FC0},
+ {0x2B5E, 0x7800},
+ {0x2B60, 0x7400},
+ {0x2B62, 0x2015},
+ {0x2B64, 0x70DF},
+ {0x2B66, 0x3F5F},
+ {0x2B68, 0x703A},
+ {0x2B6A, 0x2F01},
+ {0x2B6C, 0x7800},
+ {0x2B6E, 0x7400},
+ {0x2B70, 0x7800},
+ {0x2B72, 0x007F},
+ {0x2B74, 0x0000},
+ {0x2B76, 0xB9CC},
+ {0x2B78, 0x0000},
+ {0x2B7A, 0xB9CC},
+ {0x2B7C, 0xBA3C},
+ {0x2B7E, 0x0002},
+ {0x2B80, 0x0000},
+ {0x2B82, 0xBA4C},
+ {0x2B84, 0x0000},
+ {0x2B86, 0xBA4C},
+ {0x2B88, 0xBABC},
+ {0x2B8A, 0x0002},
+ {0x2B8C, 0x0063},
+ {0x2B8E, 0xBB26},
+ {0x2B90, 0x0063},
+ {0x2B92, 0xBB36},
+ {0x2B94, 0x0063},
+ {0x2B96, 0xBAEA},
+ {0x2B98, 0x0063},
+ {0x2B9A, 0xBAF8},
+ {0x2B9C, 0xBADA},
+ {0x2B9E, 0x0004},
+ {0x2BA0, 0x0063},
+ {0x2BA2, 0xBAEA},
+ {0x2BA4, 0x0063},
+ {0x2BA6, 0xBB18},
+ {0x2BA8, 0x0063},
+ {0x2BAA, 0xBB26},
+ {0x2BAC, 0x0063},
+ {0x2BAE, 0xBB44},
+ {0x2BB0, 0xBADA},
+ {0x2BB2, 0x0004},
+ {0x2BB4, 0x0063},
+ {0x2BB6, 0xBACC},
+ {0x2BB8, 0x0063},
+ {0x2BBA, 0xBADC},
+ {0x2BBC, 0x0063},
+ {0x2BBE, 0xBAEA},
+ {0x2BC0, 0x0063},
+ {0x2BC2, 0xBAF8},
+ {0x2BC4, 0xBADA},
+ {0x2BC6, 0x0004},
+ {0x2BC8, 0x0063},
+ {0x2BCA, 0xBAEA},
+ {0x2BCC, 0x0063},
+ {0x2BCE, 0xBB18},
+ {0x2BD0, 0x0063},
+ {0x2BD2, 0xBACC},
+ {0x2BD4, 0x0063},
+ {0x2BD6, 0xBB0A},
+ {0x2BD8, 0xBADA},
+ {0x2BDA, 0x0004},
+ {0x2BDC, 0x0063},
+ {0x2BDE, 0xBACC},
+ {0x2BE0, 0x0063},
+ {0x2BE2, 0xBADC},
+ {0x2BE4, 0x0063},
+ {0x2BE6, 0xBAEA},
+ {0x2BE8, 0x0063},
+ {0x2BEA, 0xBB18},
+ {0x2BEC, 0xBADA},
+ {0x2BEE, 0x0004},
+ {0x2BF0, 0xFFFF},
+ {0x2BF2, 0xBB6E},
+ {0x2BF4, 0x0000},
+ {0x2BF6, 0x0000},
+ {0x2BF8, 0x0000},
+ {0x2BFA, 0x0000},
+ {0x2BFC, 0x0000},
+ {0x2BFE, 0x0000},
+ {0x2C00, 0xBB72},
+ {0x2C02, 0x0001},
+ {0x2C04, 0x0063},
+ {0x2C06, 0xBB52},
+ {0x2C08, 0x0063},
+ {0x2C0A, 0xBB60},
+ {0x2C0C, 0x0000},
+ {0x2C0E, 0x0000},
+ {0x2C10, 0x0000},
+ {0x2C12, 0x0000},
+ {0x2C14, 0xBADA},
+ {0x2C16, 0x0002},
+ {0x2C18, 0x0066},
+ {0x2C1A, 0x0067},
+ {0x2C1C, 0x00AF},
+ {0x2C1E, 0x01CF},
+ {0x2C20, 0x0087},
+ {0x2C22, 0x0083},
+ {0x2C24, 0x011B},
+ {0x2C26, 0x035A},
+ {0x2C28, 0x00FA},
+ {0x2C2A, 0x00F2},
+ {0x2C2C, 0x00A6},
+ {0x2C2E, 0x00A4},
+ {0x2C30, 0xFFFF},
+ {0x2C32, 0x002C},
+ {0x2C34, 0x0058},
+ {0x2C36, 0x0000},
+ {0x2C38, 0x0000},
+ {0x2C3A, 0xBC18},
+ {0x2C3C, 0xBB74},
+ {0x2C3E, 0xBB80},
+ {0x2C40, 0xBC32},
+ {0x2C42, 0xBB8C},
+ {0x2C44, 0xBBA0},
+ {0x2C46, 0xBB8C},
+ {0x2C48, 0xBBA0},
+ {0x2C4A, 0xBC04},
+ {0x2C4C, 0xBC04},
+ {0x2C4E, 0xBBF0},
+ {0x2C50, 0xBBF0},
+ {0x2C52, 0xBBB4},
+ {0x2C54, 0xBBC8},
+ {0x2C56, 0xBBB4},
+ {0x2C58, 0xBBC8},
+ {0x2C5A, 0xBC04},
+ {0x2C5C, 0xBC04},
+ {0x2C5E, 0xBBF0},
+ {0x2C60, 0xBBF0},
+ {0x2C62, 0xBB8C},
+ {0x2C64, 0xBBA0},
+ {0x2C66, 0xBB8C},
+ {0x2C68, 0xBBA0},
+ {0x2C6A, 0xBC04},
+ {0x2C6C, 0xBC04},
+ {0x2C6E, 0xBBF0},
+ {0x2C70, 0xBBF0},
+ {0x2C72, 0xBBB4},
+ {0x2C74, 0xBBC8},
+ {0x2C76, 0xBBB4},
+ {0x2C78, 0xBBC8},
+ {0x2C7A, 0xBC04},
+ {0x2C7C, 0xBC04},
+ {0x2C7E, 0xBBF0},
+ {0x2C80, 0xBBF0},
+ {0x3800, 0x880E},
+ {0x3802, 0xBC62},
+ {0x3804, 0xBC40},
+ {0x3806, 0xD13E},
+ {0x3808, 0xBC42},
+ {0x380A, 0xBC3C},
+ {0x380C, 0x0000},
+ {0x380E, 0x0040},
+ {0x3810, 0x0040},
+ {0x3812, 0x0040},
+ {0x3814, 0x0043},
+ {0x3816, 0x0046},
+ {0x3818, 0x004B},
+ {0x381A, 0x004D},
+ {0x381C, 0x0051},
+ {0x381E, 0x0055},
+ {0x3820, 0x005A},
+ {0x3822, 0x005E},
+ {0x3824, 0x0062},
+ {0x3826, 0x0067},
+ {0x3828, 0x006C},
+ {0x382A, 0x0070},
+ {0x382C, 0x0078},
+ {0x382E, 0x0086},
+ {0x3830, 0x0090},
+ {0x3832, 0x0096},
+ {0x3834, 0x009D},
+ {0x3836, 0x00A5},
+ {0x3838, 0x00AD},
+ {0x383A, 0x00B4},
+ {0x383C, 0x00B9},
+ {0x383E, 0x00BE},
+ {0x3840, 0x00C3},
+ {0x3842, 0x00C8},
+ {0x3844, 0x00CD},
+ {0x3846, 0x00D2},
+ {0x3848, 0x00D7},
+ {0x384A, 0x00DC},
+ {0x384C, 0x00DC},
+ {0x384E, 0x0000},
+ {0x3850, 0x0000},
+ {0x3852, 0x0000},
+ {0x3854, 0x0000},
+ {0x3856, 0x0000},
+ {0x3858, 0x0000},
+ {0x385A, 0x0000},
+ {0x385C, 0x0000},
+ {0x385E, 0x0000},
+ {0x3860, 0x0000},
+ {0x3862, 0x0000},
+ {0x3864, 0x0000},
+ {0x3866, 0x0000},
+ {0x3868, 0x0000},
+ {0x386A, 0x0000},
+ {0x386C, 0x0000},
+ {0x386E, 0x0000},
+ {0x3870, 0x0000},
+ {0x3872, 0x0000},
+ {0x3874, 0x0000},
+ {0x3876, 0x0000},
+ {0x3878, 0x0000},
+ {0x387A, 0x0000},
+ {0x387C, 0x0000},
+ {0x387E, 0x0000},
+ {0x3880, 0x0000},
+ {0x3882, 0x0000},
+ {0x3884, 0x0000},
+ {0x3886, 0x0000},
+ {0x3888, 0x0000},
+ {0x388A, 0x0000},
+ {0x388C, 0x0000},
+ {0x026A, 0xFFFF},
+ {0x026C, 0x00FF},
+ {0x026E, 0x0000},
+ {0x0360, 0x1E8E},
+ {0x040E, 0x01EB},
+ {0x0600, 0x1130},
+ {0x0602, 0x3112},
+ {0x0604, 0x8048},
+ {0x0606, 0x00E9},
+ {0x067A, 0x0404},
+ {0x067C, 0x0404},
+ {0x06A8, 0x0240},
+ {0x06AA, 0x00CA},
+ {0x06AC, 0x0041},
+ {0x06B4, 0x3FFF},
+ {0x06DE, 0x0404},
+ {0x06E0, 0x0404},
+ {0x06E2, 0xFF00},
+ {0x06E4, 0x8333},
+ {0x06E6, 0x8333},
+ {0x06E8, 0x8333},
+ {0x06EA, 0x8333},
+ {0x052A, 0x0000},
+ {0x052C, 0x0000},
+ {0x0F06, 0x0002},
+ {0x0A04, 0xB4C5},
+ {0x0A06, 0xC400},
+ {0x0A08, 0x988A},
+ {0x0A0A, 0xA387},
+ {0x0A0E, 0xEEC0},
+ {0x0A12, 0x0000},
+ {0x0A18, 0x0010},
+ {0x0A1C, 0x0040},
+ {0x0A20, 0x0015},
+ {0x0C00, 0x0021},
+ {0x0C16, 0x0002},
+ {0x0708, 0x6FC0},
+ {0x070C, 0x0000},
+ {0x120C, 0x1428},
+ {0x121A, 0x0000},
+ {0x121C, 0x1896},
+ {0x121E, 0x0032},
+ {0x1220, 0x0000},
+ {0x1222, 0x96FF},
+ {0x1244, 0x0000},
+ {0x105C, 0x0F0B},
+ {0x1958, 0x0000},
+ {0x195A, 0x004C},
+ {0x195C, 0x0097},
+ {0x195E, 0x0221},
+ {0x1960, 0x03FE},
+ {0x1980, 0x00E0},
+ {0x1982, 0x0010},
+ {0x1984, 0x2018},
+ {0x1986, 0x0008},
+ {0x1988, 0x0000},
+ {0x198A, 0x0000},
+ {0x198C, 0x0880},
+ {0x198E, 0x0000},
+ {0x1990, 0x1A00},
+ {0x1992, 0x0000},
+ {0x1994, 0x2800},
+ {0x1996, 0x0002},
+ {0x1962, 0x0000},
+ {0x1964, 0x004C},
+ {0x1966, 0x0097},
+ {0x1968, 0x0221},
+ {0x196A, 0x03FE},
+ {0x19C0, 0x00E0},
+ {0x19C2, 0x0010},
+ {0x19C4, 0x2018},
+ {0x19C6, 0x0008},
+ {0x19C8, 0x0000},
+ {0x19CA, 0x0000},
+ {0x19CC, 0x0880},
+ {0x19CE, 0x0000},
+ {0x19D0, 0x1A00},
+ {0x19D2, 0x0000},
+ {0x19D4, 0x2800},
+ {0x19D6, 0x0002},
+ {0x196C, 0x0000},
+ {0x196E, 0x004C},
+ {0x1970, 0x0097},
+ {0x1972, 0x0221},
+ {0x1974, 0x03FE},
+ {0x1A00, 0x00E0},
+ {0x1A02, 0x0010},
+ {0x1A04, 0x2018},
+ {0x1A06, 0x0008},
+ {0x1A08, 0x0000},
+ {0x1A0A, 0x0000},
+ {0x1A0C, 0x0880},
+ {0x1A0E, 0x0000},
+ {0x1A10, 0x1A00},
+ {0x1A12, 0x0000},
+ {0x1A14, 0x2800},
+ {0x1A16, 0x0002},
+ {0x1976, 0x0000},
+ {0x1978, 0x004C},
+ {0x197A, 0x0097},
+ {0x197C, 0x0221},
+ {0x197E, 0x03FE},
+ {0x1A40, 0x00E0},
+ {0x1A42, 0x0010},
+ {0x1A44, 0x2018},
+ {0x1A46, 0x0008},
+ {0x1A48, 0x0000},
+ {0x1A4A, 0x0000},
+ {0x1A4C, 0x0880},
+ {0x1A4E, 0x0000},
+ {0x1A50, 0x1A00},
+ {0x1A52, 0x0000},
+ {0x1A54, 0x2800},
+ {0x1A56, 0x0002},
+ {0x192A, 0x0201},
+ {0x0384, 0x0001},
+ {0x027E, 0x0100},
+};
+
+static const struct hi847_reg mode_3264x2448_regs[] = {
+ {0x0B00, 0x0000},
+ {0x0204, 0x0000},
+ {0x0206, 0x033C},
+ {0x020A, 0x0B4D},
+ {0x020E, 0x0B51},
+ {0x0214, 0x0200},
+ {0x0216, 0x0200},
+ {0x0218, 0x0200},
+ {0x021A, 0x0200},
+ {0x0224, 0x002E},
+ {0x022A, 0x0017},
+ {0x022C, 0x0E1F},
+ {0x022E, 0x09C1},
+ {0x0234, 0x1111},
+ {0x0236, 0x1111},
+ {0x0238, 0x1111},
+ {0x023A, 0x1111},
+ {0x0250, 0x0000},
+ {0x0252, 0x0006},
+ {0x0254, 0x0000},
+ {0x0256, 0x0000},
+ {0x0258, 0x0000},
+ {0x025A, 0x0000},
+ {0x025C, 0x0000},
+ {0x025E, 0x0202},
+ {0x0268, 0x00CD},
+ {0x0440, 0x0002},
+ {0x0F00, 0x0000},
+ {0x0F04, 0x0008},
+ {0x0F06, 0x0002},
+ {0x0B02, 0x0100},
+ {0x0B04, 0x00DC},
+ {0x0B12, 0x0CC0},
+ {0x0B14, 0x0990},
+ {0x0B20, 0x0100},
+ {0x1100, 0x1100},
+ {0x1102, 0x0008},
+ {0x1108, 0x0202},
+ {0x1118, 0x0000},
+ {0x0A10, 0xB040},
+ {0x0C14, 0x0008},
+ {0x0C18, 0x0CC0},
+ {0x0C1A, 0x0990},
+ {0x0730, 0x0001},
+ {0x0732, 0x0000},
+ {0x0734, 0x0300},
+ {0x0736, 0x004B},
+ {0x0738, 0x0001},
+ {0x073C, 0x0900},
+ {0x0740, 0x0000},
+ {0x0742, 0x0000},
+ {0x0744, 0x0300},
+ {0x0746, 0x007D},
+ {0x0748, 0x0002},
+ {0x074A, 0x0900},
+ {0x074C, 0x0000},
+ {0x074E, 0x0100},
+ {0x0750, 0x0000},
+ {0x1200, 0x0946},
+ {0x1202, 0x1A00},
+ {0x120E, 0x6027},
+ {0x1210, 0x8027},
+ {0x1246, 0x0105},
+ {0x1000, 0x0300},
+ {0x1002, 0xC311},
+ {0x1004, 0x2BB0},
+ {0x1010, 0x087B},
+ {0x1012, 0x0040},
+ {0x1014, 0x0020},
+ {0x1016, 0x0020},
+ {0x101A, 0x0020},
+ {0x1020, 0xC107},
+ {0x1022, 0x081E},
+ {0x1024, 0x0509},
+ {0x1026, 0x0B0A},
+ {0x1028, 0x1409},
+ {0x102A, 0x0B05},
+ {0x102C, 0x1400},
+ {0x1038, 0x0000},
+ {0x103E, 0x0001},
+ {0x1040, 0x0000},
+ {0x1042, 0x0008},
+ {0x1044, 0x0120},
+ {0x1046, 0x01B0},
+ {0x1048, 0x0090},
+ {0x1066, 0x089C},
+ {0x1600, 0x0000},
+ {0x1608, 0x0028},
+ {0x160A, 0x0C80},
+ {0x160C, 0x001A},
+ {0x160E, 0x0960},
+ {0x0252, 0x0009},
+ {0x0202, 0x0000},
+};
+
+static const struct hi847_reg mode_1632x1224_regs[] = {
+ {0x0B00, 0x0000},
+ {0x0204, 0x0200},
+ {0x0206, 0x033C},
+ {0x020A, 0x05A5},
+ {0x020E, 0x05A9},
+ {0x0214, 0x0200},
+ {0x0216, 0x0200},
+ {0x0218, 0x0200},
+ {0x021A, 0x0200},
+ {0x0224, 0x002C},
+ {0x022A, 0x0015},
+ {0x022C, 0x0E2D},
+ {0x022E, 0x09C1},
+ {0x0234, 0x3311},
+ {0x0236, 0x3311},
+ {0x0238, 0x3311},
+ {0x023A, 0x2222},
+ {0x0250, 0x0000},
+ {0x0252, 0x0006},
+ {0x0254, 0x0000},
+ {0x0256, 0x0000},
+ {0x0258, 0x0000},
+ {0x025A, 0x0000},
+ {0x025C, 0x0000},
+ {0x025E, 0x0202},
+ {0x0268, 0x00CD},
+ {0x0440, 0x0002},
+ {0x0F00, 0x0400},
+ {0x0F04, 0x0004},
+ {0x0F06, 0x0002},
+ {0x0B02, 0x0100},
+ {0x0B04, 0x00FC},
+ {0x0B12, 0x0660},
+ {0x0B14, 0x04C8},
+ {0x0B20, 0x0200},
+ {0x1100, 0x1100},
+ {0x1102, 0x0008},
+ {0x1108, 0x0402},
+ {0x1118, 0x0000},
+ {0x0A10, 0xB060},
+ {0x0C14, 0x0008},
+ {0x0C18, 0x0CC0},
+ {0x0C1A, 0x04C8},
+ {0x0730, 0x0001},
+ {0x0732, 0x0000},
+ {0x0734, 0x0300},
+ {0x0736, 0x004B},
+ {0x0738, 0x0001},
+ {0x073C, 0x0900},
+ {0x0740, 0x0000},
+ {0x0742, 0x0000},
+ {0x0744, 0x0300},
+ {0x0746, 0x007D},
+ {0x0748, 0x0002},
+ {0x074A, 0x0900},
+ {0x074C, 0x0100},
+ {0x074E, 0x0100},
+ {0x0750, 0x0000},
+ {0x1200, 0x0946},
+ {0x1202, 0x1A00},
+ {0x120E, 0x6027},
+ {0x1210, 0x8027},
+ {0x1246, 0x0105},
+ {0x1000, 0x0300},
+ {0x1002, 0xC311},
+ {0x1004, 0x2BB0},
+ {0x1010, 0x042B},
+ {0x1012, 0x0012},
+ {0x1014, 0x0020},
+ {0x1016, 0x0020},
+ {0x101A, 0x0020},
+ {0x1020, 0xC103},
+ {0x1022, 0x040F},
+ {0x1024, 0x0304},
+ {0x1026, 0x0607},
+ {0x1028, 0x0D06},
+ {0x102A, 0x0605},
+ {0x102C, 0x0C00},
+ {0x1038, 0x0000},
+ {0x103E, 0x0101},
+ {0x1040, 0x0000},
+ {0x1042, 0x0008},
+ {0x1044, 0x0120},
+ {0x1046, 0x01B0},
+ {0x1048, 0x0090},
+ {0x1066, 0x043B},
+ {0x1600, 0x0400},
+ {0x1608, 0x0028},
+ {0x160A, 0x0C80},
+ {0x160C, 0x001A},
+ {0x160E, 0x0960},
+ {0x0252, 0x0009},
+ {0x0202, 0x0000},
+};
+
+static const char * const hi847_test_pattern_menu[] = {
+ "No Pattern",
+ "Solid Colour",
+ "100% Colour Bars",
+ "Fade To Grey Colour Bars",
+ "PN9",
+ "Horizontal Gradient Pattern",
+ "Vertical Gradient Pattern",
+ "Check Board",
+ "Slant Pattern",
+};
+
+static const s64 link_freq_menu_items[] = {
+ HI847_LINK_FREQ_400MHZ,
+ HI847_LINK_FREQ_200MHZ,
+};
+
+static const struct hi847_link_freq_config link_freq_configs[] = {
+ [HI847_LINK_FREQ_400MHZ_INDEX] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_lane_4),
+ .regs = mipi_data_rate_lane_4,
+ }
+ },
+ [HI847_LINK_FREQ_200MHZ_INDEX] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_lane_4),
+ .regs = mipi_data_rate_lane_4,
+ }
+ }
+};
+
+static const struct hi847_mode supported_modes[] = {
+ {
+ .width = 3264,
+ .height = 2448,
+ .fll_def = HI847_FLL_30FPS,
+ .fll_min = HI847_FLL_30FPS_MIN,
+ .llp = 0x033C,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_3264x2448_regs),
+ .regs = mode_3264x2448_regs,
+ },
+ .link_freq_index = HI847_LINK_FREQ_400MHZ_INDEX,
+ },
+ {
+ .width = 1632,
+ .height = 1224,
+ .fll_def = HI847_FLL_60FPS,
+ .fll_min = HI847_FLL_60FPS_MIN,
+ .llp = 0x033C,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1632x1224_regs),
+ .regs = mode_1632x1224_regs,
+ },
+ .link_freq_index = HI847_LINK_FREQ_200MHZ_INDEX,
+ }
+};
+
+struct hi847 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *hflip;
+
+ /* Current mode */
+ const struct hi847_mode *cur_mode;
+
+ /* To serialize asynchronus callbacks */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+static u64 to_pixel_rate(u32 f_index)
+{
+ u64 pixel_rate = link_freq_menu_items[f_index] * 2 * HI847_DATA_LANES;
+
+ do_div(pixel_rate, HI847_RGB_DEPTH);
+
+ return pixel_rate;
+}
+
+static int hi847_read_reg(struct hi847 *hi847, u16 reg, u16 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[4] = {0};
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, addr_buf);
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(addr_buf);
+ msgs[0].buf = addr_buf;
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+static int hi847_write_reg(struct hi847 *hi847, u16 reg, u16 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
+ u8 buf[6];
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << 8 * (4 - len), buf + 2);
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+static int hi847_write_reg_list(struct hi847 *hi847,
+ const struct hi847_reg_list *r_list)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < r_list->num_of_regs; i++) {
+ ret = hi847_write_reg(hi847, r_list->regs[i].address,
+ HI847_REG_VALUE_16BIT,
+ r_list->regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "failed to write reg 0x%4.4x. error = %d",
+ r_list->regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hi847_update_digital_gain(struct hi847 *hi847, u32 d_gain)
+{
+ int ret;
+
+ ret = hi847_write_reg(hi847, HI847_REG_MWB_GR_GAIN,
+ HI847_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ ret = hi847_write_reg(hi847, HI847_REG_MWB_GB_GAIN,
+ HI847_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ ret = hi847_write_reg(hi847, HI847_REG_MWB_R_GAIN,
+ HI847_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ return hi847_write_reg(hi847, HI847_REG_MWB_B_GAIN,
+ HI847_REG_VALUE_16BIT, d_gain);
+}
+
+static int hi847_test_pattern(struct hi847 *hi847, u32 pattern)
+{
+ int ret;
+ u32 val;
+
+ if (pattern) {
+ ret = hi847_read_reg(hi847, HI847_REG_ISP,
+ HI847_REG_VALUE_16BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = hi847_write_reg(hi847, HI847_REG_ISP,
+ HI847_REG_VALUE_16BIT,
+ val | HI847_REG_ISP_TPG_EN);
+ if (ret)
+ return ret;
+ }
+
+ ret = hi847_read_reg(hi847, HI847_REG_TEST_PATTERN,
+ HI847_REG_VALUE_16BIT, &val);
+ if (ret)
+ return ret;
+
+ return hi847_write_reg(hi847, HI847_REG_TEST_PATTERN,
+ HI847_REG_VALUE_16BIT, val | pattern << 8);
+}
+
+static int hi847_grbg_shift(struct hi847 *hi847)
+{
+ int ret;
+ int hflip, vflip;
+
+ /* regs shift for full size */
+ static const u32 FORMAT_X_SHIFT_1[2][2] = {
+ { 0x0008, 0x0007, },
+ { 0x0008, 0x0007, },
+ };
+
+ static const u32 FORMAT_Y_SHIFT_1[2][2] = {
+ { 0x0002, 0x0002, },
+ { 0x0001, 0x0001, },
+ };
+
+ /* regs shift for binning size */
+ static const u32 FORMAT_X_SHIFT_2[2][2] = {
+ { 0x0004, 0x0003, },
+ { 0x0004, 0x0003, },
+ };
+
+ static const u32 FORMAT_Y_SHIFT_2[2][2] = {
+ { 0x0002, 0x0002, },
+ { 0x0001, 0x0001, },
+ };
+
+ hflip = hi847->hflip->val;
+ vflip = hi847->vflip->val;
+
+ if (hi847->cur_mode->width == 3264) {
+ ret = hi847_write_reg(hi847, HI847_REG_FORMAT_X,
+ HI847_REG_VALUE_16BIT,
+ FORMAT_X_SHIFT_1[vflip][hflip]);
+ if (ret)
+ return ret;
+
+ return hi847_write_reg(hi847, HI847_REG_FORMAT_Y,
+ HI847_REG_VALUE_16BIT,
+ FORMAT_Y_SHIFT_1[vflip][hflip]);
+ } else {
+ ret = hi847_write_reg(hi847, HI847_REG_FORMAT_X,
+ HI847_REG_VALUE_16BIT,
+ FORMAT_X_SHIFT_2[vflip][hflip]);
+ if (ret)
+ return ret;
+
+ return hi847_write_reg(hi847, HI847_REG_FORMAT_Y,
+ HI847_REG_VALUE_16BIT,
+ FORMAT_Y_SHIFT_2[vflip][hflip]);
+ }
+}
+
+static int hi847_set_ctrl_hflip(struct hi847 *hi847, u32 ctrl_val)
+{
+ int ret;
+ u32 val;
+
+ ret = hi847_read_reg(hi847, HI847_REG_MIRROR_FLIP,
+ HI847_REG_VALUE_16BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = hi847_grbg_shift(hi847);
+ if (ret)
+ return ret;
+
+ return hi847_write_reg(hi847, HI847_REG_MIRROR_FLIP,
+ HI847_REG_VALUE_16BIT,
+ ctrl_val ? val | BIT(8) : val & ~BIT(8));
+}
+
+static int hi847_set_ctrl_vflip(struct hi847 *hi847, u8 ctrl_val)
+{
+ int ret;
+ u32 val;
+
+ ret = hi847_read_reg(hi847, HI847_REG_MIRROR_FLIP,
+ HI847_REG_VALUE_16BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = hi847_grbg_shift(hi847);
+ if (ret)
+ return ret;
+
+ return hi847_write_reg(hi847, HI847_REG_MIRROR_FLIP,
+ HI847_REG_VALUE_16BIT,
+ ctrl_val ? val | BIT(9) : val & ~BIT(9));
+}
+
+static int hi847_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hi847 *hi847 = container_of(ctrl->handler,
+ struct hi847, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
+ s64 exposure_max;
+ int ret = 0;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = hi847->cur_mode->height + ctrl->val -
+ HI847_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(hi847->exposure,
+ hi847->exposure->minimum,
+ exposure_max, hi847->exposure->step,
+ exposure_max);
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = hi847_write_reg(hi847, HI847_REG_ANALOG_GAIN,
+ HI847_REG_VALUE_16BIT, ctrl->val);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = hi847_update_digital_gain(hi847, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ ret = hi847_write_reg(hi847, HI847_REG_EXPOSURE,
+ HI847_REG_VALUE_16BIT, ctrl->val);
+ break;
+
+ case V4L2_CID_VBLANK:
+ /* Update FLL that meets expected vertical blanking */
+ ret = hi847_write_reg(hi847, HI847_REG_FLL,
+ HI847_REG_VALUE_16BIT,
+ hi847->cur_mode->height + ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = hi847_test_pattern(hi847, ctrl->val);
+ break;
+
+ case V4L2_CID_HFLIP:
+ hi847_set_ctrl_hflip(hi847, ctrl->val);
+ break;
+
+ case V4L2_CID_VFLIP:
+ hi847_set_ctrl_vflip(hi847, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops hi847_ctrl_ops = {
+ .s_ctrl = hi847_set_ctrl,
+};
+
+static int hi847_init_controls(struct hi847 *hi847)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 exposure_max, h_blank;
+ int ret;
+
+ ctrl_hdlr = &hi847->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &hi847->mutex;
+ hi847->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &hi847_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(link_freq_menu_items) - 1,
+ 0, link_freq_menu_items);
+ if (hi847->link_freq)
+ hi847->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ hi847->pixel_rate = v4l2_ctrl_new_std
+ (ctrl_hdlr, &hi847_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ to_pixel_rate(HI847_LINK_FREQ_400MHZ_INDEX),
+ 1,
+ to_pixel_rate(HI847_LINK_FREQ_400MHZ_INDEX));
+ hi847->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi847_ctrl_ops,
+ V4L2_CID_VBLANK,
+ hi847->cur_mode->fll_min -
+ hi847->cur_mode->height,
+ HI847_FLL_MAX -
+ hi847->cur_mode->height, 1,
+ hi847->cur_mode->fll_def -
+ hi847->cur_mode->height);
+
+ h_blank = hi847->cur_mode->llp - hi847->cur_mode->width;
+
+ hi847->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi847_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank, 1,
+ h_blank);
+ if (hi847->hblank)
+ hi847->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi847_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ HI847_ANAL_GAIN_MIN, HI847_ANAL_GAIN_MAX,
+ HI847_ANAL_GAIN_STEP, HI847_ANAL_GAIN_MIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi847_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ HI847_DGTL_GAIN_MIN, HI847_DGTL_GAIN_MAX,
+ HI847_DGTL_GAIN_STEP, HI847_DGTL_GAIN_DEFAULT);
+ exposure_max = hi847->cur_mode->fll_def - HI847_EXPOSURE_MAX_MARGIN;
+ hi847->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &hi847_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ HI847_EXPOSURE_MIN, exposure_max,
+ HI847_EXPOSURE_STEP,
+ exposure_max);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &hi847_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(hi847_test_pattern_menu) - 1,
+ 0, 0, hi847_test_pattern_menu);
+ hi847->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &hi847_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ hi847->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &hi847_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ hi847->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static void hi847_assign_pad_format(const struct hi847_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int hi847_start_streaming(struct hi847 *hi847)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
+ const struct hi847_reg_list *reg_list;
+ int link_freq_index, ret;
+
+ link_freq_index = hi847->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = hi847_write_reg_list(hi847, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set plls");
+ return ret;
+ }
+
+ reg_list = &hi847->cur_mode->reg_list;
+ ret = hi847_write_reg_list(hi847, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode");
+ return ret;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(hi847->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ ret = hi847_write_reg(hi847, HI847_REG_MODE_TG,
+ HI847_REG_VALUE_16BIT, HI847_REG_MODE_TG_ENABLE);
+
+ ret = hi847_write_reg(hi847, HI847_REG_MODE_SELECT,
+ HI847_REG_VALUE_16BIT, HI847_MODE_STREAMING);
+
+ if (ret) {
+ dev_err(&client->dev, "failed to set stream");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hi847_stop_streaming(struct hi847 *hi847)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
+
+ if (hi847_write_reg(hi847, HI847_REG_MODE_TG,
+ HI847_REG_VALUE_16BIT, HI847_REG_MODE_TG_DISABLE))
+ dev_err(&client->dev, "failed to set stream 0x%x",
+ HI847_REG_MODE_TG);
+
+ if (hi847_write_reg(hi847, HI847_REG_MODE_SELECT,
+ HI847_REG_VALUE_16BIT, HI847_MODE_STANDBY))
+ dev_err(&client->dev, "failed to set stream 0x%x",
+ HI847_REG_MODE_SELECT);
+}
+
+static int hi847_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct hi847 *hi847 = to_hi847(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (hi847->streaming == enable)
+ return 0;
+
+ mutex_lock(&hi847->mutex);
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ mutex_unlock(&hi847->mutex);
+ return ret;
+ }
+
+ ret = hi847_start_streaming(hi847);
+ if (ret) {
+ enable = 0;
+ hi847_stop_streaming(hi847);
+ pm_runtime_put(&client->dev);
+ }
+ } else {
+ hi847_stop_streaming(hi847);
+ pm_runtime_put(&client->dev);
+ }
+
+ hi847->streaming = enable;
+ mutex_unlock(&hi847->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused hi847_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi847 *hi847 = to_hi847(sd);
+
+ mutex_lock(&hi847->mutex);
+ if (hi847->streaming)
+ hi847_stop_streaming(hi847);
+
+ mutex_unlock(&hi847->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused hi847_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi847 *hi847 = to_hi847(sd);
+ int ret;
+
+ mutex_lock(&hi847->mutex);
+ if (hi847->streaming) {
+ ret = hi847_start_streaming(hi847);
+ if (ret)
+ goto error;
+ }
+
+ mutex_unlock(&hi847->mutex);
+
+ return 0;
+
+error:
+ hi847_stop_streaming(hi847);
+ hi847->streaming = 0;
+ mutex_unlock(&hi847->mutex);
+ return ret;
+}
+
+static int hi847_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct hi847 *hi847 = to_hi847(sd);
+ const struct hi847_mode *mode;
+ s32 vblank_def, h_blank;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes), width,
+ height, fmt->format.width,
+ fmt->format.height);
+
+ mutex_lock(&hi847->mutex);
+ hi847_assign_pad_format(mode, &fmt->format);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) =
+ fmt->format;
+ } else {
+ hi847->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(hi847->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(hi847->pixel_rate,
+ to_pixel_rate(mode->link_freq_index));
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->fll_def - mode->height;
+ __v4l2_ctrl_modify_range(hi847->vblank,
+ mode->fll_min - mode->height,
+ HI847_FLL_MAX - mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(hi847->vblank, vblank_def);
+
+ h_blank = hi847->cur_mode->llp - hi847->cur_mode->width;
+
+ __v4l2_ctrl_modify_range(hi847->hblank, h_blank, h_blank, 1,
+ h_blank);
+ }
+
+ mutex_unlock(&hi847->mutex);
+
+ return 0;
+}
+
+static int hi847_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct hi847 *hi847 = to_hi847(sd);
+
+ mutex_lock(&hi847->mutex);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(&hi847->sd,
+ sd_state,
+ fmt->pad);
+ else
+ hi847_assign_pad_format(hi847->cur_mode, &fmt->format);
+
+ mutex_unlock(&hi847->mutex);
+
+ return 0;
+}
+
+static int hi847_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int hi847_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int hi847_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct hi847 *hi847 = to_hi847(sd);
+
+ mutex_lock(&hi847->mutex);
+ hi847_assign_pad_format(&supported_modes[0],
+ v4l2_subdev_get_try_format(sd, fh->state, 0));
+ mutex_unlock(&hi847->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops hi847_video_ops = {
+ .s_stream = hi847_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops hi847_pad_ops = {
+ .set_fmt = hi847_set_format,
+ .get_fmt = hi847_get_format,
+ .enum_mbus_code = hi847_enum_mbus_code,
+ .enum_frame_size = hi847_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops hi847_subdev_ops = {
+ .video = &hi847_video_ops,
+ .pad = &hi847_pad_ops,
+};
+
+static const struct media_entity_operations hi847_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops hi847_internal_ops = {
+ .open = hi847_open,
+};
+
+static int hi847_identify_module(struct hi847 *hi847)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
+ int ret;
+ u32 val;
+
+ ret = hi847_read_reg(hi847, HI847_REG_CHIP_ID,
+ HI847_REG_VALUE_16BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != HI847_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ HI847_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int hi847_check_hwcfg(struct device *dev)
+{
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ u32 mclk;
+ int ret;
+ unsigned int i, j;
+
+ if (!fwnode)
+ return -ENXIO;
+
+ ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
+ if (ret) {
+ dev_err(dev, "can't get clock frequency");
+ return ret;
+ }
+
+ if (mclk != HI847_MCLK) {
+ dev_err(dev, "external clock %d is not supported", mclk);
+ return -EINVAL;
+ }
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -ENXIO;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != HI847_DATA_LANES) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequencies defined");
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) {
+ for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) {
+ if (link_freq_menu_items[i] ==
+ bus_cfg.link_frequencies[j])
+ break;
+ }
+
+ if (j == bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequency %lld supported",
+ link_freq_menu_items[i]);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+ }
+
+check_hwcfg_error:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int hi847_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi847 *hi847 = to_hi847(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+ mutex_destroy(&hi847->mutex);
+
+ return 0;
+}
+
+static int hi847_probe(struct i2c_client *client)
+{
+ struct hi847 *hi847;
+ int ret;
+
+ hi847 = devm_kzalloc(&client->dev, sizeof(*hi847), GFP_KERNEL);
+ if (!hi847)
+ return -ENOMEM;
+
+ ret = hi847_check_hwcfg(&client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to get HW configuration: %d",
+ ret);
+ return ret;
+ }
+
+ v4l2_i2c_subdev_init(&hi847->sd, client, &hi847_subdev_ops);
+ ret = hi847_identify_module(hi847);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ return ret;
+ }
+
+ mutex_init(&hi847->mutex);
+ hi847->cur_mode = &supported_modes[0];
+ ret = hi847_init_controls(hi847);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ hi847->sd.internal_ops = &hi847_internal_ops;
+ hi847->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ hi847->sd.entity.ops = &hi847_subdev_entity_ops;
+ hi847->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ hi847->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&hi847->sd.entity, 1, &hi847->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&hi847->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto probe_error_media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+probe_error_media_entity_cleanup:
+ media_entity_cleanup(&hi847->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(hi847->sd.ctrl_handler);
+ mutex_destroy(&hi847->mutex);
+
+ return ret;
+}
+
+static const struct dev_pm_ops hi847_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(hi847_suspend, hi847_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id hi847_acpi_ids[] = {
+ {"HYV0847"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, hi847_acpi_ids);
+#endif
+
+static struct i2c_driver hi847_i2c_driver = {
+ .driver = {
+ .name = "hi847",
+ .pm = &hi847_pm_ops,
+ .acpi_match_table = ACPI_PTR(hi847_acpi_ids),
+ },
+ .probe_new = hi847_probe,
+ .remove = hi847_remove,
+};
+
+module_i2c_driver(hi847_i2c_driver);
+
+MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_DESCRIPTION("Hynix HI847 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index 2aa15b9c23cc..7de1f2948e53 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -11,13 +11,11 @@
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/media/i2c/isl7998x.c b/drivers/media/i2c/isl7998x.c
new file mode 100644
index 000000000000..dc3068549dfa
--- /dev/null
+++ b/drivers/media/i2c/isl7998x.c
@@ -0,0 +1,1628 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intersil ISL7998x analog to MIPI CSI-2 or BT.656 decoder driver.
+ *
+ * Copyright (C) 2018-2019 Marek Vasut <marex@denx.de>
+ * Copyright (C) 2021 Michael Tretter <kernel@pengutronix.de>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ioctl.h>
+
+/*
+ * This control allows to activate and deactivate the test pattern on
+ * selected output channels.
+ * This value is ISL7998x specific.
+ */
+#define V4L2_CID_TEST_PATTERN_CHANNELS (V4L2_CID_USER_ISL7998X_BASE + 0)
+
+/*
+ * This control allows to specify the color of the test pattern.
+ * This value is ISL7998x specific.
+ */
+#define V4L2_CID_TEST_PATTERN_COLOR (V4L2_CID_USER_ISL7998X_BASE + 1)
+
+/*
+ * This control allows to specify the bar pattern in the test pattern.
+ * This value is ISL7998x specific.
+ */
+#define V4L2_CID_TEST_PATTERN_BARS (V4L2_CID_USER_ISL7998X_BASE + 2)
+
+#define ISL7998X_INPUTS 4
+
+#define ISL7998X_REG(page, reg) (((page) << 8) | (reg))
+
+#define ISL7998X_REG_PN_SIZE 256
+#define ISL7998X_REG_PN_BASE(n) ((n) * ISL7998X_REG_PN_SIZE)
+
+#define ISL7998X_REG_PX_DEC_PAGE(page) ISL7998X_REG((page), 0xff)
+#define ISL7998X_REG_PX_DEC_PAGE_MASK 0xf
+#define ISL7998X_REG_P0_PRODUCT_ID_CODE ISL7998X_REG(0, 0x00)
+#define ISL7998X_REG_P0_PRODUCT_REV_CODE ISL7998X_REG(0, 0x01)
+#define ISL7998X_REG_P0_SW_RESET_CTL ISL7998X_REG(0, 0x02)
+#define ISL7998X_REG_P0_IO_BUFFER_CTL ISL7998X_REG(0, 0x03)
+#define ISL7998X_REG_P0_IO_BUFFER_CTL_1_1 ISL7998X_REG(0, 0x04)
+#define ISL7998X_REG_P0_IO_PAD_PULL_EN_CTL ISL7998X_REG(0, 0x05)
+#define ISL7998X_REG_P0_IO_BUFFER_CTL_1_2 ISL7998X_REG(0, 0x06)
+#define ISL7998X_REG_P0_VIDEO_IN_CHAN_CTL ISL7998X_REG(0, 0x07)
+#define ISL7998X_REG_P0_CLK_CTL_1 ISL7998X_REG(0, 0x08)
+#define ISL7998X_REG_P0_CLK_CTL_2 ISL7998X_REG(0, 0x09)
+#define ISL7998X_REG_P0_CLK_CTL_3 ISL7998X_REG(0, 0x0a)
+#define ISL7998X_REG_P0_CLK_CTL_4 ISL7998X_REG(0, 0x0b)
+#define ISL7998X_REG_P0_MPP1_SYNC_CTL ISL7998X_REG(0, 0x0c)
+#define ISL7998X_REG_P0_MPP2_SYNC_CTL ISL7998X_REG(0, 0x0d)
+#define ISL7998X_REG_P0_IRQ_SYNC_CTL ISL7998X_REG(0, 0x0e)
+#define ISL7998X_REG_P0_INTERRUPT_STATUS ISL7998X_REG(0, 0x10)
+#define ISL7998X_REG_P0_CHAN_1_IRQ ISL7998X_REG(0, 0x11)
+#define ISL7998X_REG_P0_CHAN_2_IRQ ISL7998X_REG(0, 0x12)
+#define ISL7998X_REG_P0_CHAN_3_IRQ ISL7998X_REG(0, 0x13)
+#define ISL7998X_REG_P0_CHAN_4_IRQ ISL7998X_REG(0, 0x14)
+#define ISL7998X_REG_P0_SHORT_DIAG_IRQ ISL7998X_REG(0, 0x15)
+#define ISL7998X_REG_P0_CHAN_1_IRQ_EN ISL7998X_REG(0, 0x16)
+#define ISL7998X_REG_P0_CHAN_2_IRQ_EN ISL7998X_REG(0, 0x17)
+#define ISL7998X_REG_P0_CHAN_3_IRQ_EN ISL7998X_REG(0, 0x18)
+#define ISL7998X_REG_P0_CHAN_4_IRQ_EN ISL7998X_REG(0, 0x19)
+#define ISL7998X_REG_P0_SHORT_DIAG_IRQ_EN ISL7998X_REG(0, 0x1a)
+#define ISL7998X_REG_P0_CHAN_1_STATUS ISL7998X_REG(0, 0x1b)
+#define ISL7998X_REG_P0_CHAN_2_STATUS ISL7998X_REG(0, 0x1c)
+#define ISL7998X_REG_P0_CHAN_3_STATUS ISL7998X_REG(0, 0x1d)
+#define ISL7998X_REG_P0_CHAN_4_STATUS ISL7998X_REG(0, 0x1e)
+#define ISL7998X_REG_P0_SHORT_DIAG_STATUS ISL7998X_REG(0, 0x1f)
+#define ISL7998X_REG_P0_CLOCK_DELAY ISL7998X_REG(0, 0x20)
+
+#define ISL7998X_REG_PX_DEC_INPUT_FMT(pg) ISL7998X_REG((pg), 0x02)
+#define ISL7998X_REG_PX_DEC_STATUS_1(pg) ISL7998X_REG((pg), 0x03)
+#define ISL7998X_REG_PX_DEC_STATUS_1_VDLOSS BIT(7)
+#define ISL7998X_REG_PX_DEC_STATUS_1_HLOCK BIT(6)
+#define ISL7998X_REG_PX_DEC_STATUS_1_VLOCK BIT(3)
+#define ISL7998X_REG_PX_DEC_HS_DELAY_CTL(pg) ISL7998X_REG((pg), 0x04)
+#define ISL7998X_REG_PX_DEC_ANCTL(pg) ISL7998X_REG((pg), 0x06)
+#define ISL7998X_REG_PX_DEC_CROP_HI(pg) ISL7998X_REG((pg), 0x07)
+#define ISL7998X_REG_PX_DEC_VDELAY_LO(pg) ISL7998X_REG((pg), 0x08)
+#define ISL7998X_REG_PX_DEC_VACTIVE_LO(pg) ISL7998X_REG((pg), 0x09)
+#define ISL7998X_REG_PX_DEC_HDELAY_LO(pg) ISL7998X_REG((pg), 0x0a)
+#define ISL7998X_REG_PX_DEC_HACTIVE_LO(pg) ISL7998X_REG((pg), 0x0b)
+#define ISL7998X_REG_PX_DEC_CNTRL1(pg) ISL7998X_REG((pg), 0x0c)
+#define ISL7998X_REG_PX_DEC_CSC_CTL(pg) ISL7998X_REG((pg), 0x0d)
+#define ISL7998X_REG_PX_DEC_BRIGHT(pg) ISL7998X_REG((pg), 0x10)
+#define ISL7998X_REG_PX_DEC_CONTRAST(pg) ISL7998X_REG((pg), 0x11)
+#define ISL7998X_REG_PX_DEC_SHARPNESS(pg) ISL7998X_REG((pg), 0x12)
+#define ISL7998X_REG_PX_DEC_SAT_U(pg) ISL7998X_REG((pg), 0x13)
+#define ISL7998X_REG_PX_DEC_SAT_V(pg) ISL7998X_REG((pg), 0x14)
+#define ISL7998X_REG_PX_DEC_HUE(pg) ISL7998X_REG((pg), 0x15)
+#define ISL7998X_REG_PX_DEC_VERT_PEAK(pg) ISL7998X_REG((pg), 0x17)
+#define ISL7998X_REG_PX_DEC_CORING(pg) ISL7998X_REG((pg), 0x18)
+#define ISL7998X_REG_PX_DEC_SDT(pg) ISL7998X_REG((pg), 0x1c)
+#define ISL7998X_REG_PX_DEC_SDT_DET BIT(7)
+#define ISL7998X_REG_PX_DEC_SDT_NOW GENMASK(6, 4)
+#define ISL7998X_REG_PX_DEC_SDT_STANDARD GENMASK(2, 0)
+#define ISL7998X_REG_PX_DEC_SDT_STANDARD_NTSC_M 0
+#define ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL 1
+#define ISL7998X_REG_PX_DEC_SDT_STANDARD_SECAM 2
+#define ISL7998X_REG_PX_DEC_SDT_STANDARD_NTSC_443 3
+#define ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL_M 4
+#define ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL_CN 5
+#define ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL_60 6
+#define ISL7998X_REG_PX_DEC_SDT_STANDARD_UNKNOWN 7
+#define ISL7998X_REG_PX_DEC_SDTR(pg) ISL7998X_REG((pg), 0x1d)
+#define ISL7998X_REG_PX_DEC_SDTR_ATSTART BIT(7)
+#define ISL7998X_REG_PX_DEC_CLMPG(pg) ISL7998X_REG((pg), 0x20)
+#define ISL7998X_REG_PX_DEC_IAGC(pg) ISL7998X_REG((pg), 0x21)
+#define ISL7998X_REG_PX_DEC_AGCGAIN(pg) ISL7998X_REG((pg), 0x22)
+#define ISL7998X_REG_PX_DEC_PEAKWT(pg) ISL7998X_REG((pg), 0x23)
+#define ISL7998X_REG_PX_DEC_CLMPL(pg) ISL7998X_REG((pg), 0x24)
+#define ISL7998X_REG_PX_DEC_SYNCT(pg) ISL7998X_REG((pg), 0x25)
+#define ISL7998X_REG_PX_DEC_MISSCNT(pg) ISL7998X_REG((pg), 0x26)
+#define ISL7998X_REG_PX_DEC_PCLAMP(pg) ISL7998X_REG((pg), 0x27)
+#define ISL7998X_REG_PX_DEC_VERT_CTL_1(pg) ISL7998X_REG((pg), 0x28)
+#define ISL7998X_REG_PX_DEC_VERT_CTL_2(pg) ISL7998X_REG((pg), 0x29)
+#define ISL7998X_REG_PX_DEC_CLR_KILL_LVL(pg) ISL7998X_REG((pg), 0x2a)
+#define ISL7998X_REG_PX_DEC_COMB_FILTER_CTL(pg) ISL7998X_REG((pg), 0x2b)
+#define ISL7998X_REG_PX_DEC_LUMA_DELAY(pg) ISL7998X_REG((pg), 0x2c)
+#define ISL7998X_REG_PX_DEC_MISC1(pg) ISL7998X_REG((pg), 0x2d)
+#define ISL7998X_REG_PX_DEC_MISC2(pg) ISL7998X_REG((pg), 0x2e)
+#define ISL7998X_REG_PX_DEC_MISC3(pg) ISL7998X_REG((pg), 0x2f)
+#define ISL7998X_REG_PX_DEC_MVSN(pg) ISL7998X_REG((pg), 0x30)
+#define ISL7998X_REG_PX_DEC_CSTATUS2(pg) ISL7998X_REG((pg), 0x31)
+#define ISL7998X_REG_PX_DEC_HFREF(pg) ISL7998X_REG((pg), 0x32)
+#define ISL7998X_REG_PX_DEC_CLMD(pg) ISL7998X_REG((pg), 0x33)
+#define ISL7998X_REG_PX_DEC_ID_DET_CTL(pg) ISL7998X_REG((pg), 0x34)
+#define ISL7998X_REG_PX_DEC_CLCNTL(pg) ISL7998X_REG((pg), 0x35)
+#define ISL7998X_REG_PX_DEC_DIFF_CLMP_CTL_1(pg) ISL7998X_REG((pg), 0x36)
+#define ISL7998X_REG_PX_DEC_DIFF_CLMP_CTL_2(pg) ISL7998X_REG((pg), 0x37)
+#define ISL7998X_REG_PX_DEC_DIFF_CLMP_CTL_3(pg) ISL7998X_REG((pg), 0x38)
+#define ISL7998X_REG_PX_DEC_DIFF_CLMP_CTL_4(pg) ISL7998X_REG((pg), 0x39)
+#define ISL7998X_REG_PX_DEC_SHORT_DET_CTL(pg) ISL7998X_REG((pg), 0x3a)
+#define ISL7998X_REG_PX_DEC_SHORT_DET_CTL_1(pg) ISL7998X_REG((pg), 0x3b)
+#define ISL7998X_REG_PX_DEC_AFE_TST_MUX_CTL(pg) ISL7998X_REG((pg), 0x3c)
+#define ISL7998X_REG_PX_DEC_DATA_CONV(pg) ISL7998X_REG((pg), 0x3d)
+#define ISL7998X_REG_PX_DEC_INTERNAL_TEST(pg) ISL7998X_REG((pg), 0x3f)
+#define ISL7998X_REG_PX_DEC_H_DELAY_CTL(pg) ISL7998X_REG((pg), 0x43)
+#define ISL7998X_REG_PX_DEC_H_DELAY_II_HI(pg) ISL7998X_REG((pg), 0x44)
+#define ISL7998X_REG_PX_DEC_H_DELAY_II_LOW(pg) ISL7998X_REG((pg), 0x45)
+
+#define ISL7998X_REG_PX_ACA_CTL_1(pg) ISL7998X_REG((pg), 0x80)
+#define ISL7998X_REG_PX_ACA_GAIN_CTL(pg) ISL7998X_REG((pg), 0x81)
+#define ISL7998X_REG_PX_ACA_Y_AVG_HI_LIMIT(pg) ISL7998X_REG((pg), 0x82)
+#define ISL7998X_REG_PX_ACA_Y_AVG_LO_LIMIT(pg) ISL7998X_REG((pg), 0x83)
+#define ISL7998X_REG_PX_ACA_Y_DET_THRESHOLD(pg) ISL7998X_REG((pg), 0x84)
+#define ISL7998X_REG_PX_ACA_BLACK_LVL(pg) ISL7998X_REG((pg), 0x85)
+#define ISL7998X_REG_PX_ACA_CENTER_LVL(pg) ISL7998X_REG((pg), 0x86)
+#define ISL7998X_REG_PX_ACA_WHITE_LVL(pg) ISL7998X_REG((pg), 0x87)
+#define ISL7998X_REG_PX_ACA_MEAN_OFF_LIMIT(pg) ISL7998X_REG((pg), 0x88)
+#define ISL7998X_REG_PX_ACA_MEAN_OFF_UPGAIN(pg) ISL7998X_REG((pg), 0x89)
+#define ISL7998X_REG_PX_ACA_MEAN_OFF_SLOPE(pg) ISL7998X_REG((pg), 0x8a)
+#define ISL7998X_REG_PX_ACA_MEAN_OFF_DNGAIN(pg) ISL7998X_REG((pg), 0x8b)
+#define ISL7998X_REG_PX_ACA_DELTA_CO_THRES(pg) ISL7998X_REG((pg), 0x8c)
+#define ISL7998X_REG_PX_ACA_DELTA_SLOPE(pg) ISL7998X_REG((pg), 0x8d)
+#define ISL7998X_REG_PX_ACA_LO_HI_AVG_THRES(pg) ISL7998X_REG((pg), 0x8e)
+#define ISL7998X_REG_PX_ACA_LO_MAX_LVL_CTL(pg) ISL7998X_REG((pg), 0x8f)
+#define ISL7998X_REG_PX_ACA_HI_MAX_LVL_CTL(pg) ISL7998X_REG((pg), 0x90)
+#define ISL7998X_REG_PX_ACA_LO_UPGAIN_CTL(pg) ISL7998X_REG((pg), 0x91)
+#define ISL7998X_REG_PX_ACA_LO_DNGAIN_CTL(pg) ISL7998X_REG((pg), 0x92)
+#define ISL7998X_REG_PX_ACA_HI_UPGAIN_CTL(pg) ISL7998X_REG((pg), 0x93)
+#define ISL7998X_REG_PX_ACA_HI_DNGAIN_CTL(pg) ISL7998X_REG((pg), 0x94)
+#define ISL7998X_REG_PX_ACA_LOPASS_FLT_COEF(pg) ISL7998X_REG((pg), 0x95)
+#define ISL7998X_REG_PX_ACA_PDF_INDEX(pg) ISL7998X_REG((pg), 0x96)
+#define ISL7998X_REG_PX_ACA_HIST_WIN_H_STT(pg) ISL7998X_REG((pg), 0x97)
+#define ISL7998X_REG_PX_ACA_HIST_WIN_H_SZ1(pg) ISL7998X_REG((pg), 0x98)
+#define ISL7998X_REG_PX_ACA_HIST_WIN_H_SZ2(pg) ISL7998X_REG((pg), 0x99)
+#define ISL7998X_REG_PX_ACA_HIST_WIN_V_STT(pg) ISL7998X_REG((pg), 0x9a)
+#define ISL7998X_REG_PX_ACA_HIST_WIN_V_SZ1(pg) ISL7998X_REG((pg), 0x9b)
+#define ISL7998X_REG_PX_ACA_HIST_WIN_V_SZ2(pg) ISL7998X_REG((pg), 0x9c)
+#define ISL7998X_REG_PX_ACA_Y_AVG(pg) ISL7998X_REG((pg), 0xa0)
+#define ISL7998X_REG_PX_ACA_Y_AVG_LIM(pg) ISL7998X_REG((pg), 0xa1)
+#define ISL7998X_REG_PX_ACA_LO_AVG(pg) ISL7998X_REG((pg), 0xa2)
+#define ISL7998X_REG_PX_ACA_HI_AVG(pg) ISL7998X_REG((pg), 0xa3)
+#define ISL7998X_REG_PX_ACA_Y_MAX(pg) ISL7998X_REG((pg), 0xa4)
+#define ISL7998X_REG_PX_ACA_Y_MIN(pg) ISL7998X_REG((pg), 0xa5)
+#define ISL7998X_REG_PX_ACA_MOFFSET(pg) ISL7998X_REG((pg), 0xa6)
+#define ISL7998X_REG_PX_ACA_LO_GAIN(pg) ISL7998X_REG((pg), 0xa7)
+#define ISL7998X_REG_PX_ACA_HI_GAIN(pg) ISL7998X_REG((pg), 0xa8)
+#define ISL7998X_REG_PX_ACA_LL_SLOPE(pg) ISL7998X_REG((pg), 0xa9)
+#define ISL7998X_REG_PX_ACA_LH_SLOPE(pg) ISL7998X_REG((pg), 0xaa)
+#define ISL7998X_REG_PX_ACA_HL_SLOPE(pg) ISL7998X_REG((pg), 0xab)
+#define ISL7998X_REG_PX_ACA_HH_SLOPE(pg) ISL7998X_REG((pg), 0xac)
+#define ISL7998X_REG_PX_ACA_X_LOW(pg) ISL7998X_REG((pg), 0xad)
+#define ISL7998X_REG_PX_ACA_X_MEAN(pg) ISL7998X_REG((pg), 0xae)
+#define ISL7998X_REG_PX_ACA_X_HIGH(pg) ISL7998X_REG((pg), 0xaf)
+#define ISL7998X_REG_PX_ACA_Y_LOW(pg) ISL7998X_REG((pg), 0xb0)
+#define ISL7998X_REG_PX_ACA_Y_MEAN(pg) ISL7998X_REG((pg), 0xb1)
+#define ISL7998X_REG_PX_ACA_Y_HIGH(pg) ISL7998X_REG((pg), 0xb2)
+#define ISL7998X_REG_PX_ACA_CTL_2(pg) ISL7998X_REG((pg), 0xb3)
+#define ISL7998X_REG_PX_ACA_CTL_3(pg) ISL7998X_REG((pg), 0xb4)
+#define ISL7998X_REG_PX_ACA_CTL_4(pg) ISL7998X_REG((pg), 0xb5)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_HIST(pg) ISL7998X_REG((pg), 0xc0)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_TL_H(pg) ISL7998X_REG((pg), 0xc1)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_TL_L(pg) ISL7998X_REG((pg), 0xc2)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_TL_H(pg) ISL7998X_REG((pg), 0xc3)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_TL_L(pg) ISL7998X_REG((pg), 0xc4)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_TR_H(pg) ISL7998X_REG((pg), 0xc5)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_TR_L(pg) ISL7998X_REG((pg), 0xc6)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_TR_H(pg) ISL7998X_REG((pg), 0xc7)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_TR_L(pg) ISL7998X_REG((pg), 0xc8)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_BL_H(pg) ISL7998X_REG((pg), 0xc9)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_BL_L(pg) ISL7998X_REG((pg), 0xca)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_BL_H(pg) ISL7998X_REG((pg), 0xcb)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_BL_L(pg) ISL7998X_REG((pg), 0xcc)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_BR_H(pg) ISL7998X_REG((pg), 0xcd)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_BR_L(pg) ISL7998X_REG((pg), 0xce)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_BR_H(pg) ISL7998X_REG((pg), 0xcf)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_BR_L(pg) ISL7998X_REG((pg), 0xd0)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_LM_H(pg) ISL7998X_REG((pg), 0xd1)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_LM_L(pg) ISL7998X_REG((pg), 0xd2)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_LM_H(pg) ISL7998X_REG((pg), 0xd3)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_LM_L(pg) ISL7998X_REG((pg), 0xd4)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_TM_H(pg) ISL7998X_REG((pg), 0xd5)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_TM_L(pg) ISL7998X_REG((pg), 0xd6)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_TM_H(pg) ISL7998X_REG((pg), 0xd7)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_TM_L(pg) ISL7998X_REG((pg), 0xd8)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_BM_H(pg) ISL7998X_REG((pg), 0xd9)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_BM_L(pg) ISL7998X_REG((pg), 0xda)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_BM_H(pg) ISL7998X_REG((pg), 0xdb)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_BM_L(pg) ISL7998X_REG((pg), 0xdc)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_RM_H(pg) ISL7998X_REG((pg), 0xdd)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_RM_L(pg) ISL7998X_REG((pg), 0xde)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_RM_H(pg) ISL7998X_REG((pg), 0xdf)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_RM_L(pg) ISL7998X_REG((pg), 0xe0)
+#define ISL7998X_REG_PX_ACA_HIST_DATA_LO(pg) ISL7998X_REG((pg), 0xe1)
+#define ISL7998X_REG_PX_ACA_HIST_DATA_MID(pg) ISL7998X_REG((pg), 0xe2)
+#define ISL7998X_REG_PX_ACA_HIST_DATA_HI(pg) ISL7998X_REG((pg), 0xe3)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_CLR(pg) ISL7998X_REG((pg), 0xe4)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_CB_CLR(pg) ISL7998X_REG((pg), 0xe5)
+#define ISL7998X_REG_PX_ACA_FLEX_WIN_CR_CLR(pg) ISL7998X_REG((pg), 0xe6)
+#define ISL7998X_REG_PX_ACA_XFER_HIST_HOST(pg) ISL7998X_REG((pg), 0xe7)
+
+#define ISL7998X_REG_P5_LI_ENGINE_CTL ISL7998X_REG(5, 0x00)
+#define ISL7998X_REG_P5_LI_ENGINE_LINE_CTL ISL7998X_REG(5, 0x01)
+#define ISL7998X_REG_P5_LI_ENGINE_PIC_WIDTH ISL7998X_REG(5, 0x02)
+#define ISL7998X_REG_P5_LI_ENGINE_SYNC_CTL ISL7998X_REG(5, 0x03)
+#define ISL7998X_REG_P5_LI_ENGINE_VC_ASSIGNMENT ISL7998X_REG(5, 0x04)
+#define ISL7998X_REG_P5_LI_ENGINE_TYPE_CTL ISL7998X_REG(5, 0x05)
+#define ISL7998X_REG_P5_LI_ENGINE_FIFO_CTL ISL7998X_REG(5, 0x06)
+#define ISL7998X_REG_P5_MIPI_READ_START_CTL ISL7998X_REG(5, 0x07)
+#define ISL7998X_REG_P5_PSEUDO_FRM_FIELD_CTL ISL7998X_REG(5, 0x08)
+#define ISL7998X_REG_P5_ONE_FIELD_MODE_CTL ISL7998X_REG(5, 0x09)
+#define ISL7998X_REG_P5_MIPI_INT_HW_TST_CTR ISL7998X_REG(5, 0x0a)
+#define ISL7998X_REG_P5_TP_GEN_BAR_PATTERN ISL7998X_REG(5, 0x0b)
+#define ISL7998X_REG_P5_MIPI_PCNT_PSFRM ISL7998X_REG(5, 0x0c)
+#define ISL7998X_REG_P5_LI_ENGINE_TP_GEN_CTL ISL7998X_REG(5, 0x0d)
+#define ISL7998X_REG_P5_MIPI_VBLANK_PSFRM ISL7998X_REG(5, 0x0e)
+#define ISL7998X_REG_P5_LI_ENGINE_CTL_2 ISL7998X_REG(5, 0x0f)
+#define ISL7998X_REG_P5_MIPI_WCNT_1 ISL7998X_REG(5, 0x10)
+#define ISL7998X_REG_P5_MIPI_WCNT_2 ISL7998X_REG(5, 0x11)
+#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_1 ISL7998X_REG(5, 0x12)
+#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_2 ISL7998X_REG(5, 0x13)
+#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_3 ISL7998X_REG(5, 0x14)
+#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_4 ISL7998X_REG(5, 0x15)
+#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_5 ISL7998X_REG(5, 0x16)
+#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_6 ISL7998X_REG(5, 0x17)
+#define ISL7998X_REG_P5_MIPI_DPHY_PARAMS_1 ISL7998X_REG(5, 0x18)
+#define ISL7998X_REG_P5_MIPI_DPHY_SOT_PERIOD ISL7998X_REG(5, 0x19)
+#define ISL7998X_REG_P5_MIPI_DPHY_EOT_PERIOD ISL7998X_REG(5, 0x1a)
+#define ISL7998X_REG_P5_MIPI_DPHY_PARAMS_2 ISL7998X_REG(5, 0x1b)
+#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_7 ISL7998X_REG(5, 0x1c)
+#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_8 ISL7998X_REG(5, 0x1d)
+#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_9 ISL7998X_REG(5, 0x1e)
+#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_10 ISL7998X_REG(5, 0x1f)
+#define ISL7998X_REG_P5_TP_GEN_MIPI ISL7998X_REG(5, 0x20)
+#define ISL7998X_REG_P5_ESC_MODE_TIME_CTL ISL7998X_REG(5, 0x21)
+#define ISL7998X_REG_P5_AUTO_TEST_ERR_DET ISL7998X_REG(5, 0x22)
+#define ISL7998X_REG_P5_MIPI_TIMING ISL7998X_REG(5, 0x23)
+#define ISL7998X_REG_P5_PIC_HEIGHT_HIGH ISL7998X_REG(5, 0x24)
+#define ISL7998X_REG_P5_PIC_HEIGHT_LOW ISL7998X_REG(5, 0x25)
+#define ISL7998X_REG_P5_MIPI_SP_HS_TRL_CTL ISL7998X_REG(5, 0x26)
+#define ISL7998X_REG_P5_FIFO_THRSH_CNT_1 ISL7998X_REG(5, 0x28)
+#define ISL7998X_REG_P5_FIFO_THRSH_CNT_2 ISL7998X_REG(5, 0x29)
+#define ISL7998X_REG_P5_TP_GEN_RND_SYNC_CTL_1 ISL7998X_REG(5, 0x2a)
+#define ISL7998X_REG_P5_TP_GEN_RND_SYNC_CTL_2 ISL7998X_REG(5, 0x2b)
+#define ISL7998X_REG_P5_PSF_FIELD_END_CTL_1 ISL7998X_REG(5, 0x2c)
+#define ISL7998X_REG_P5_PSF_FIELD_END_CTL_2 ISL7998X_REG(5, 0x2d)
+#define ISL7998X_REG_P5_PSF_FIELD_END_CTL_3 ISL7998X_REG(5, 0x2e)
+#define ISL7998X_REG_P5_PSF_FIELD_END_CTL_4 ISL7998X_REG(5, 0x2f)
+#define ISL7998X_REG_P5_MIPI_ANA_DATA_CTL_1 ISL7998X_REG(5, 0x30)
+#define ISL7998X_REG_P5_MIPI_ANA_DATA_CTL_2 ISL7998X_REG(5, 0x31)
+#define ISL7998X_REG_P5_MIPI_ANA_CLK_CTL ISL7998X_REG(5, 0x32)
+#define ISL7998X_REG_P5_PLL_ANA_STATUS ISL7998X_REG(5, 0x33)
+#define ISL7998X_REG_P5_PLL_ANA_MISC_CTL ISL7998X_REG(5, 0x34)
+#define ISL7998X_REG_P5_MIPI_ANA ISL7998X_REG(5, 0x35)
+#define ISL7998X_REG_P5_PLL_ANA ISL7998X_REG(5, 0x36)
+#define ISL7998X_REG_P5_TOTAL_PF_LINE_CNT_1 ISL7998X_REG(5, 0x38)
+#define ISL7998X_REG_P5_TOTAL_PF_LINE_CNT_2 ISL7998X_REG(5, 0x39)
+#define ISL7998X_REG_P5_H_LINE_CNT_1 ISL7998X_REG(5, 0x3a)
+#define ISL7998X_REG_P5_H_LINE_CNT_2 ISL7998X_REG(5, 0x3b)
+#define ISL7998X_REG_P5_HIST_LINE_CNT_1 ISL7998X_REG(5, 0x3c)
+#define ISL7998X_REG_P5_HIST_LINE_CNT_2 ISL7998X_REG(5, 0x3d)
+
+static const struct reg_sequence isl7998x_init_seq_1[] = {
+ { ISL7998X_REG_P0_SHORT_DIAG_IRQ_EN, 0xff },
+ { ISL7998X_REG_PX_DEC_SDT(0x1), 0x00 },
+ { ISL7998X_REG_PX_DEC_SHORT_DET_CTL_1(0x1), 0x03 },
+ { ISL7998X_REG_PX_DEC_SDT(0x2), 0x00 },
+ { ISL7998X_REG_PX_DEC_SHORT_DET_CTL_1(0x2), 0x03 },
+ { ISL7998X_REG_PX_DEC_SDT(0x3), 0x00 },
+ { ISL7998X_REG_PX_DEC_SHORT_DET_CTL_1(0x3), 0x03 },
+ { ISL7998X_REG_PX_DEC_SDT(0x4), 0x00 },
+ { ISL7998X_REG_PX_DEC_SHORT_DET_CTL_1(0x4), 0x03 },
+ { ISL7998X_REG_P5_LI_ENGINE_CTL, 0x00 },
+ { ISL7998X_REG_P0_SW_RESET_CTL, 0x1f, 10 },
+ { ISL7998X_REG_P0_IO_BUFFER_CTL, 0x00 },
+ { ISL7998X_REG_P0_MPP2_SYNC_CTL, 0xc9 },
+ { ISL7998X_REG_P0_IRQ_SYNC_CTL, 0xc9 },
+ { ISL7998X_REG_P0_CHAN_1_IRQ, 0x03 },
+ { ISL7998X_REG_P0_CHAN_2_IRQ, 0x00 },
+ { ISL7998X_REG_P0_CHAN_3_IRQ, 0x00 },
+ { ISL7998X_REG_P0_CHAN_4_IRQ, 0x00 },
+ { ISL7998X_REG_P5_LI_ENGINE_CTL, 0x02 },
+ { ISL7998X_REG_P5_LI_ENGINE_LINE_CTL, 0x85 },
+ { ISL7998X_REG_P5_LI_ENGINE_PIC_WIDTH, 0xa0 },
+ { ISL7998X_REG_P5_LI_ENGINE_SYNC_CTL, 0x18 },
+ { ISL7998X_REG_P5_LI_ENGINE_TYPE_CTL, 0x40 },
+ { ISL7998X_REG_P5_LI_ENGINE_FIFO_CTL, 0x40 },
+ { ISL7998X_REG_P5_MIPI_WCNT_1, 0x05 },
+ { ISL7998X_REG_P5_MIPI_WCNT_2, 0xa0 },
+ { ISL7998X_REG_P5_TP_GEN_MIPI, 0x00 },
+ { ISL7998X_REG_P5_ESC_MODE_TIME_CTL, 0x0c },
+ { ISL7998X_REG_P5_MIPI_SP_HS_TRL_CTL, 0x00 },
+ { ISL7998X_REG_P5_TP_GEN_RND_SYNC_CTL_1, 0x00 },
+ { ISL7998X_REG_P5_TP_GEN_RND_SYNC_CTL_2, 0x19 },
+ { ISL7998X_REG_P5_PSF_FIELD_END_CTL_1, 0x18 },
+ { ISL7998X_REG_P5_PSF_FIELD_END_CTL_2, 0xf1 },
+ { ISL7998X_REG_P5_PSF_FIELD_END_CTL_3, 0x00 },
+ { ISL7998X_REG_P5_PSF_FIELD_END_CTL_4, 0xf1 },
+ { ISL7998X_REG_P5_MIPI_ANA_DATA_CTL_1, 0x00 },
+ { ISL7998X_REG_P5_MIPI_ANA_DATA_CTL_2, 0x00 },
+ { ISL7998X_REG_P5_MIPI_ANA_CLK_CTL, 0x00 },
+ { ISL7998X_REG_P5_PLL_ANA_STATUS, 0xc0 },
+ { ISL7998X_REG_P5_PLL_ANA_MISC_CTL, 0x18 },
+ { ISL7998X_REG_P5_PLL_ANA, 0x00 },
+ { ISL7998X_REG_P0_SW_RESET_CTL, 0x10, 10 },
+ /* Page 0xf means write to all of pages 1,2,3,4 */
+ { ISL7998X_REG_PX_DEC_VDELAY_LO(0xf), 0x14 },
+ { ISL7998X_REG_PX_DEC_MISC3(0xf), 0xe6 },
+ { ISL7998X_REG_PX_DEC_CLMD(0xf), 0x85 },
+ { ISL7998X_REG_PX_DEC_H_DELAY_II_LOW(0xf), 0x11 },
+ { ISL7998X_REG_PX_ACA_XFER_HIST_HOST(0xf), 0x00 },
+ { ISL7998X_REG_P0_CLK_CTL_1, 0x1f },
+ { ISL7998X_REG_P0_CLK_CTL_2, 0x43 },
+ { ISL7998X_REG_P0_CLK_CTL_3, 0x4f },
+};
+
+static const struct reg_sequence isl7998x_init_seq_2[] = {
+ { ISL7998X_REG_P5_LI_ENGINE_SYNC_CTL, 0x10 },
+ { ISL7998X_REG_P5_LI_ENGINE_VC_ASSIGNMENT, 0xe4 },
+ { ISL7998X_REG_P5_LI_ENGINE_TYPE_CTL, 0x00 },
+ { ISL7998X_REG_P5_LI_ENGINE_FIFO_CTL, 0x60 },
+ { ISL7998X_REG_P5_MIPI_READ_START_CTL, 0x2b },
+ { ISL7998X_REG_P5_PSEUDO_FRM_FIELD_CTL, 0x02 },
+ { ISL7998X_REG_P5_ONE_FIELD_MODE_CTL, 0x00 },
+ { ISL7998X_REG_P5_MIPI_INT_HW_TST_CTR, 0x62 },
+ { ISL7998X_REG_P5_TP_GEN_BAR_PATTERN, 0x02 },
+ { ISL7998X_REG_P5_MIPI_PCNT_PSFRM, 0x36 },
+ { ISL7998X_REG_P5_LI_ENGINE_TP_GEN_CTL, 0x00 },
+ { ISL7998X_REG_P5_MIPI_VBLANK_PSFRM, 0x6c },
+ { ISL7998X_REG_P5_LI_ENGINE_CTL_2, 0x00 },
+ { ISL7998X_REG_P5_MIPI_WCNT_1, 0x05 },
+ { ISL7998X_REG_P5_MIPI_WCNT_2, 0xa0 },
+ { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_1, 0x77 },
+ { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_2, 0x17 },
+ { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_3, 0x08 },
+ { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_4, 0x38 },
+ { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_5, 0x14 },
+ { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_6, 0xf6 },
+ { ISL7998X_REG_P5_MIPI_DPHY_PARAMS_1, 0x00 },
+ { ISL7998X_REG_P5_MIPI_DPHY_SOT_PERIOD, 0x17 },
+ { ISL7998X_REG_P5_MIPI_DPHY_EOT_PERIOD, 0x0a },
+ { ISL7998X_REG_P5_MIPI_DPHY_PARAMS_2, 0x71 },
+ { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_7, 0x7a },
+ { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_8, 0x0f },
+ { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_9, 0x8c },
+ { ISL7998X_REG_P5_MIPI_SP_HS_TRL_CTL, 0x08 },
+ { ISL7998X_REG_P5_FIFO_THRSH_CNT_1, 0x01 },
+ { ISL7998X_REG_P5_FIFO_THRSH_CNT_2, 0x0e },
+ { ISL7998X_REG_P5_TP_GEN_RND_SYNC_CTL_1, 0x00 },
+ { ISL7998X_REG_P5_TP_GEN_RND_SYNC_CTL_2, 0x00 },
+ { ISL7998X_REG_P5_TOTAL_PF_LINE_CNT_1, 0x03 },
+ { ISL7998X_REG_P5_TOTAL_PF_LINE_CNT_2, 0xc0 },
+ { ISL7998X_REG_P5_H_LINE_CNT_1, 0x06 },
+ { ISL7998X_REG_P5_H_LINE_CNT_2, 0xb3 },
+ { ISL7998X_REG_P5_HIST_LINE_CNT_1, 0x00 },
+ { ISL7998X_REG_P5_HIST_LINE_CNT_2, 0xf1 },
+ { ISL7998X_REG_P5_LI_ENGINE_FIFO_CTL, 0x00 },
+ { ISL7998X_REG_P5_MIPI_ANA, 0x00 },
+ /*
+ * Wait a bit after reset so that the chip can capture a frame
+ * and update internal line counters.
+ */
+ { ISL7998X_REG_P0_SW_RESET_CTL, 0x00, 50 },
+};
+
+enum isl7998x_pads {
+ ISL7998X_PAD_OUT,
+ ISL7998X_PAD_VIN1,
+ ISL7998X_PAD_VIN2,
+ ISL7998X_PAD_VIN3,
+ ISL7998X_PAD_VIN4,
+ ISL7998X_NUM_PADS
+};
+
+struct isl7998x_datafmt {
+ u32 code;
+ enum v4l2_colorspace colorspace;
+};
+
+static const struct isl7998x_datafmt isl7998x_colour_fmts[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_SRGB },
+};
+
+/* Menu items for LINK_FREQ V4L2 control */
+static const s64 link_freq_menu_items[] = {
+ /* 1 channel, 1 lane or 2 channels, 2 lanes */
+ 108000000,
+ /* 2 channels, 1 lane or 4 channels, 2 lanes */
+ 216000000,
+ /* 4 channels, 1 lane */
+ 432000000,
+};
+
+/* Menu items for TEST_PATTERN V4L2 control */
+static const char * const isl7998x_test_pattern_menu[] = {
+ "Disabled",
+ "Enabled",
+};
+
+static const char * const isl7998x_test_pattern_bars[] = {
+ "bbbbwb", "bbbwwb", "bbwbwb", "bbwwwb",
+};
+
+static const char * const isl7998x_test_pattern_colors[] = {
+ "Yellow", "Blue", "Green", "Pink",
+};
+
+struct isl7998x_mode {
+ unsigned int width;
+ unsigned int height;
+ enum v4l2_field field;
+};
+
+static const struct isl7998x_mode supported_modes[] = {
+ {
+ .width = 720,
+ .height = 576,
+ .field = V4L2_FIELD_SEQ_TB,
+ },
+ {
+ .width = 720,
+ .height = 480,
+ .field = V4L2_FIELD_SEQ_BT,
+ },
+};
+
+static const struct isl7998x_video_std {
+ const v4l2_std_id norm;
+ unsigned int id;
+ const struct isl7998x_mode *mode;
+} isl7998x_std_res[] = {
+ { V4L2_STD_NTSC_443,
+ ISL7998X_REG_PX_DEC_SDT_STANDARD_NTSC_443,
+ &supported_modes[1] },
+ { V4L2_STD_PAL_M,
+ ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL_M,
+ &supported_modes[1] },
+ { V4L2_STD_PAL_Nc,
+ ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL_CN,
+ &supported_modes[0] },
+ { V4L2_STD_PAL_N,
+ ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL,
+ &supported_modes[0] },
+ { V4L2_STD_PAL_60,
+ ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL_60,
+ &supported_modes[1] },
+ { V4L2_STD_NTSC,
+ ISL7998X_REG_PX_DEC_SDT_STANDARD_NTSC_M,
+ &supported_modes[1] },
+ { V4L2_STD_PAL,
+ ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL,
+ &supported_modes[0] },
+ { V4L2_STD_SECAM,
+ ISL7998X_REG_PX_DEC_SDT_STANDARD_SECAM,
+ &supported_modes[0] },
+ { V4L2_STD_UNKNOWN,
+ ISL7998X_REG_PX_DEC_SDT_STANDARD_UNKNOWN,
+ &supported_modes[1] },
+};
+
+struct isl7998x {
+ struct v4l2_subdev subdev;
+ struct regmap *regmap;
+ struct gpio_desc *pd_gpio;
+ struct gpio_desc *rstb_gpio;
+ unsigned int nr_mipi_lanes;
+ u32 nr_inputs;
+
+ const struct isl7998x_datafmt *fmt;
+ v4l2_std_id norm;
+ struct media_pad pads[ISL7998X_NUM_PADS];
+
+ int enabled;
+
+ /* protect fmt, norm, enabled */
+ struct mutex lock;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ /* protect ctrl_handler */
+ struct mutex ctrl_mutex;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ u8 test_pattern;
+ u8 test_pattern_bars;
+ u8 test_pattern_chans;
+ u8 test_pattern_color;
+};
+
+static struct isl7998x *sd_to_isl7998x(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct isl7998x, subdev);
+}
+
+static struct isl7998x *i2c_to_isl7998x(const struct i2c_client *client)
+{
+ return sd_to_isl7998x(i2c_get_clientdata(client));
+}
+
+static unsigned int isl7998x_norm_to_val(v4l2_std_id norm)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(isl7998x_std_res); i++)
+ if (isl7998x_std_res[i].norm & norm)
+ break;
+ if (i == ARRAY_SIZE(isl7998x_std_res))
+ return ISL7998X_REG_PX_DEC_SDT_STANDARD_UNKNOWN;
+
+ return isl7998x_std_res[i].id;
+}
+
+static const struct isl7998x_mode *isl7998x_norm_to_mode(v4l2_std_id norm)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(isl7998x_std_res); i++)
+ if (isl7998x_std_res[i].norm & norm)
+ break;
+ /* Use NTSC default resolution during standard detection */
+ if (i == ARRAY_SIZE(isl7998x_std_res))
+ return &supported_modes[1];
+
+ return isl7998x_std_res[i].mode;
+}
+
+static int isl7998x_get_nr_inputs(struct device_node *of_node)
+{
+ struct device_node *port;
+ unsigned int inputs = 0;
+ unsigned int i;
+
+ if (of_graph_get_endpoint_count(of_node) > ISL7998X_NUM_PADS)
+ return -EINVAL;
+
+ /*
+ * The driver does not provide means to remap the input ports. It
+ * always configures input ports to start from VID1. Ensure that the
+ * device tree is correct.
+ */
+ for (i = ISL7998X_PAD_VIN1; i <= ISL7998X_PAD_VIN4; i++) {
+ port = of_graph_get_port_by_id(of_node, i);
+ if (!port)
+ continue;
+
+ inputs |= BIT(i);
+ of_node_put(port);
+ }
+
+ switch (inputs) {
+ case BIT(ISL7998X_PAD_VIN1):
+ return 1;
+ case BIT(ISL7998X_PAD_VIN1) | BIT(ISL7998X_PAD_VIN2):
+ return 2;
+ case BIT(ISL7998X_PAD_VIN1) | BIT(ISL7998X_PAD_VIN2) |
+ BIT(ISL7998X_PAD_VIN3) | BIT(ISL7998X_PAD_VIN4):
+ return 4;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int isl7998x_wait_power_on(struct isl7998x *isl7998x)
+{
+ struct device *dev = isl7998x->subdev.dev;
+ u32 chip_id;
+ int ret;
+ int err;
+
+ ret = read_poll_timeout(regmap_read, err, !err, 2000, 20000, false,
+ isl7998x->regmap,
+ ISL7998X_REG_P0_PRODUCT_ID_CODE, &chip_id);
+ if (ret) {
+ dev_err(dev, "timeout while waiting for ISL7998X\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "Found ISL799%x\n", chip_id);
+
+ return ret;
+}
+
+static int isl7998x_set_standard(struct isl7998x *isl7998x, v4l2_std_id norm)
+{
+ const struct isl7998x_mode *mode = isl7998x_norm_to_mode(norm);
+ unsigned int val = isl7998x_norm_to_val(norm);
+ unsigned int width = mode->width;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ISL7998X_INPUTS; i++) {
+ ret = regmap_write_bits(isl7998x->regmap,
+ ISL7998X_REG_PX_DEC_SDT(i + 1),
+ ISL7998X_REG_PX_DEC_SDT_STANDARD,
+ val);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_write(isl7998x->regmap,
+ ISL7998X_REG_P5_LI_ENGINE_LINE_CTL,
+ 0x20 | ((width >> 7) & 0x1f));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(isl7998x->regmap,
+ ISL7998X_REG_P5_LI_ENGINE_PIC_WIDTH,
+ (width << 1) & 0xff);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int isl7998x_init(struct isl7998x *isl7998x)
+{
+ const unsigned int lanes = isl7998x->nr_mipi_lanes;
+ const u32 isl7998x_video_in_chan_map[] = { 0x00, 0x11, 0x02, 0x02 };
+ const struct reg_sequence isl7998x_init_seq_custom[] = {
+ { ISL7998X_REG_P0_VIDEO_IN_CHAN_CTL,
+ isl7998x_video_in_chan_map[isl7998x->nr_inputs - 1] },
+ { ISL7998X_REG_P0_CLK_CTL_4,
+ (lanes == 1) ? 0x40 : 0x41 },
+ { ISL7998X_REG_P5_LI_ENGINE_CTL,
+ (lanes == 1) ? 0x01 : 0x02 },
+ };
+ struct device *dev = isl7998x->subdev.dev;
+ struct regmap *regmap = isl7998x->regmap;
+ int ret;
+
+ dev_dbg(dev, "configuring %d lanes for %d inputs (norm %s)\n",
+ isl7998x->nr_mipi_lanes, isl7998x->nr_inputs,
+ v4l2_norm_to_name(isl7998x->norm));
+
+ ret = regmap_register_patch(regmap, isl7998x_init_seq_1,
+ ARRAY_SIZE(isl7998x_init_seq_1));
+ if (ret)
+ return ret;
+
+ mutex_lock(&isl7998x->lock);
+ ret = isl7998x_set_standard(isl7998x, isl7998x->norm);
+ mutex_unlock(&isl7998x->lock);
+ if (ret)
+ return ret;
+
+ ret = regmap_register_patch(regmap, isl7998x_init_seq_custom,
+ ARRAY_SIZE(isl7998x_init_seq_custom));
+ if (ret)
+ return ret;
+
+ return regmap_register_patch(regmap, isl7998x_init_seq_2,
+ ARRAY_SIZE(isl7998x_init_seq_2));
+}
+
+static int isl7998x_set_test_pattern(struct isl7998x *isl7998x)
+{
+ const struct reg_sequence isl7998x_init_seq_tpg_off[] = {
+ { ISL7998X_REG_P5_LI_ENGINE_TP_GEN_CTL, 0 },
+ { ISL7998X_REG_P5_LI_ENGINE_CTL_2, 0 }
+ };
+ const struct reg_sequence isl7998x_init_seq_tpg_on[] = {
+ { ISL7998X_REG_P5_TP_GEN_BAR_PATTERN,
+ isl7998x->test_pattern_bars << 6 },
+ { ISL7998X_REG_P5_LI_ENGINE_CTL_2,
+ isl7998x->norm & V4L2_STD_PAL ? BIT(2) : 0 },
+ { ISL7998X_REG_P5_LI_ENGINE_TP_GEN_CTL,
+ (isl7998x->test_pattern_chans << 4) |
+ (isl7998x->test_pattern_color << 2) }
+ };
+ struct device *dev = isl7998x->subdev.dev;
+ struct regmap *regmap = isl7998x->regmap;
+ int ret;
+
+ if (pm_runtime_get_if_in_use(dev) <= 0)
+ return 0;
+
+ if (isl7998x->test_pattern != 0) {
+ dev_dbg(dev, "enabling test pattern: channels 0x%x, %s, %s\n",
+ isl7998x->test_pattern_chans,
+ isl7998x_test_pattern_bars[isl7998x->test_pattern_bars],
+ isl7998x_test_pattern_colors[isl7998x->test_pattern_color]);
+ ret = regmap_register_patch(regmap, isl7998x_init_seq_tpg_on,
+ ARRAY_SIZE(isl7998x_init_seq_tpg_on));
+ } else {
+ ret = regmap_register_patch(regmap, isl7998x_init_seq_tpg_off,
+ ARRAY_SIZE(isl7998x_init_seq_tpg_off));
+ }
+
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int isl7998x_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct isl7998x *isl7998x = sd_to_isl7998x(sd);
+ int ret;
+ u32 val;
+
+ ret = regmap_read(isl7998x->regmap, reg->reg, &val);
+ if (ret)
+ return ret;
+
+ reg->size = 1;
+ reg->val = val;
+
+ return 0;
+}
+
+static int isl7998x_s_register(struct v4l2_subdev *sd,
+ const struct v4l2_dbg_register *reg)
+{
+ struct isl7998x *isl7998x = sd_to_isl7998x(sd);
+
+ return regmap_write(isl7998x->regmap, reg->reg, reg->val);
+}
+#endif
+
+static int isl7998x_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm)
+{
+ struct isl7998x *isl7998x = sd_to_isl7998x(sd);
+
+ mutex_lock(&isl7998x->lock);
+ *norm = isl7998x->norm;
+ mutex_unlock(&isl7998x->lock);
+
+ return 0;
+}
+
+static int isl7998x_s_std(struct v4l2_subdev *sd, v4l2_std_id norm)
+{
+ struct isl7998x *isl7998x = sd_to_isl7998x(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct device *dev = &client->dev;
+ int ret = 0;
+
+ mutex_lock(&isl7998x->lock);
+ if (isl7998x->enabled) {
+ ret = -EBUSY;
+ mutex_unlock(&isl7998x->lock);
+ return ret;
+ }
+ isl7998x->norm = norm;
+ mutex_unlock(&isl7998x->lock);
+
+ if (pm_runtime_get_if_in_use(dev) <= 0)
+ return ret;
+
+ ret = isl7998x_set_standard(isl7998x, norm);
+
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
+static int isl7998x_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ struct isl7998x *isl7998x = sd_to_isl7998x(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct device *dev = &client->dev;
+ unsigned int std_id[ISL7998X_INPUTS];
+ unsigned int i;
+ int ret;
+ u32 reg;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "starting video standard detection\n");
+
+ mutex_lock(&isl7998x->lock);
+ if (isl7998x->enabled) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ ret = isl7998x_set_standard(isl7998x, V4L2_STD_UNKNOWN);
+ if (ret)
+ goto out_unlock;
+
+ for (i = 0; i < ISL7998X_INPUTS; i++) {
+ ret = regmap_write(isl7998x->regmap,
+ ISL7998X_REG_PX_DEC_SDTR(i + 1),
+ ISL7998X_REG_PX_DEC_SDTR_ATSTART);
+ if (ret)
+ goto out_reset_std;
+ }
+
+ for (i = 0; i < ISL7998X_INPUTS; i++) {
+ ret = regmap_read_poll_timeout(isl7998x->regmap,
+ ISL7998X_REG_PX_DEC_SDT(i + 1),
+ reg,
+ !(reg & ISL7998X_REG_PX_DEC_SDT_DET),
+ 2000, 500 * USEC_PER_MSEC);
+ if (ret)
+ goto out_reset_std;
+ std_id[i] = FIELD_GET(ISL7998X_REG_PX_DEC_SDT_NOW, reg);
+ }
+
+ /*
+ * According to Renesas FAE, all input cameras must have the
+ * same standard on this chip.
+ */
+ for (i = 0; i < isl7998x->nr_inputs; i++) {
+ dev_dbg(dev, "input %d: detected %s\n",
+ i, v4l2_norm_to_name(isl7998x_std_res[std_id[i]].norm));
+ if (std_id[0] != std_id[i])
+ dev_warn(dev,
+ "incompatible standards: %s on input %d (expected %s)\n",
+ v4l2_norm_to_name(isl7998x_std_res[std_id[i]].norm), i,
+ v4l2_norm_to_name(isl7998x_std_res[std_id[0]].norm));
+ }
+
+ *std = isl7998x_std_res[std_id[0]].norm;
+
+out_reset_std:
+ isl7998x_set_standard(isl7998x, isl7998x->norm);
+out_unlock:
+ mutex_unlock(&isl7998x->lock);
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
+static int isl7998x_g_tvnorms(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ *std = V4L2_STD_ALL;
+
+ return 0;
+}
+
+static int isl7998x_g_input_status(struct v4l2_subdev *sd, u32 *status)
+{
+ struct isl7998x *isl7998x = sd_to_isl7998x(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct device *dev = &client->dev;
+ unsigned int i;
+ int ret = 0;
+ u32 reg;
+
+ if (!pm_runtime_active(dev)) {
+ *status |= V4L2_IN_ST_NO_POWER;
+ return 0;
+ }
+
+ for (i = 0; i < isl7998x->nr_inputs; i++) {
+ ret = regmap_read(isl7998x->regmap,
+ ISL7998X_REG_PX_DEC_STATUS_1(i + 1), &reg);
+ if (!ret) {
+ if (reg & ISL7998X_REG_PX_DEC_STATUS_1_VDLOSS)
+ *status |= V4L2_IN_ST_NO_SIGNAL;
+ if (!(reg & ISL7998X_REG_PX_DEC_STATUS_1_HLOCK))
+ *status |= V4L2_IN_ST_NO_H_LOCK;
+ if (!(reg & ISL7998X_REG_PX_DEC_STATUS_1_VLOCK))
+ *status |= V4L2_IN_ST_NO_V_LOCK;
+ }
+ }
+
+ return ret;
+}
+
+static int isl7998x_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct isl7998x *isl7998x = sd_to_isl7998x(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct device *dev = &client->dev;
+ int ret = 0;
+ u32 reg;
+
+ dev_dbg(dev, "stream %s\n", enable ? "ON" : "OFF");
+
+ mutex_lock(&isl7998x->lock);
+ if (isl7998x->enabled == enable)
+ goto out;
+ isl7998x->enabled = enable;
+
+ if (enable) {
+ ret = isl7998x_set_test_pattern(isl7998x);
+ if (ret)
+ goto out;
+ }
+
+ regmap_read(isl7998x->regmap,
+ ISL7998X_REG_P5_LI_ENGINE_CTL, &reg);
+ if (enable)
+ reg &= ~BIT(7);
+ else
+ reg |= BIT(7);
+ ret = regmap_write(isl7998x->regmap,
+ ISL7998X_REG_P5_LI_ENGINE_CTL, reg);
+
+out:
+ mutex_unlock(&isl7998x->lock);
+
+ return ret;
+}
+
+static int isl7998x_pre_streamon(struct v4l2_subdev *sd, u32 flags)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct device *dev = &client->dev;
+
+ return pm_runtime_resume_and_get(dev);
+}
+
+static int isl7998x_post_streamoff(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct device *dev = &client->dev;
+
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static int isl7998x_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(isl7998x_colour_fmts))
+ return -EINVAL;
+
+ code->code = isl7998x_colour_fmts[code->index].code;
+
+ return 0;
+}
+
+static int isl7998x_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != isl7998x_colour_fmts[0].code)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int isl7998x_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct isl7998x *isl7998x = sd_to_isl7998x(sd);
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ const struct isl7998x_mode *mode;
+
+ mutex_lock(&isl7998x->lock);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ format->format = *v4l2_subdev_get_try_format(sd, sd_state,
+ format->pad);
+ goto out;
+ }
+
+ mode = isl7998x_norm_to_mode(isl7998x->norm);
+
+ mf->width = mode->width;
+ mf->height = mode->height;
+ mf->code = isl7998x->fmt->code;
+ mf->field = mode->field;
+ mf->colorspace = 0;
+
+out:
+ mutex_unlock(&isl7998x->lock);
+
+ return 0;
+}
+
+static int isl7998x_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct isl7998x *isl7998x = sd_to_isl7998x(sd);
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ const struct isl7998x_mode *mode;
+
+ mutex_lock(&isl7998x->lock);
+
+ mode = isl7998x_norm_to_mode(isl7998x->norm);
+
+ mf->width = mode->width;
+ mf->height = mode->height;
+ mf->code = isl7998x->fmt->code;
+ mf->field = mode->field;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ *v4l2_subdev_get_try_format(sd, sd_state, format->pad) = format->format;
+
+ mutex_unlock(&isl7998x->lock);
+
+ return 0;
+}
+
+static int isl7998x_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct isl7998x *isl7998x = container_of(ctrl->handler,
+ struct isl7998x, ctrl_handler);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_TEST_PATTERN_BARS:
+ mutex_lock(&isl7998x->lock);
+ isl7998x->test_pattern_bars = ctrl->val & 0x3;
+ ret = isl7998x_set_test_pattern(isl7998x);
+ mutex_unlock(&isl7998x->lock);
+ break;
+ case V4L2_CID_TEST_PATTERN_CHANNELS:
+ mutex_lock(&isl7998x->lock);
+ isl7998x->test_pattern_chans = ctrl->val & 0xf;
+ ret = isl7998x_set_test_pattern(isl7998x);
+ mutex_unlock(&isl7998x->lock);
+ break;
+ case V4L2_CID_TEST_PATTERN_COLOR:
+ mutex_lock(&isl7998x->lock);
+ isl7998x->test_pattern_color = ctrl->val & 0x3;
+ ret = isl7998x_set_test_pattern(isl7998x);
+ mutex_unlock(&isl7998x->lock);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ mutex_lock(&isl7998x->lock);
+ isl7998x->test_pattern = ctrl->val;
+ ret = isl7998x_set_test_pattern(isl7998x);
+ mutex_unlock(&isl7998x->lock);
+ break;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_subdev_core_ops isl7998x_subdev_core_ops = {
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = isl7998x_g_register,
+ .s_register = isl7998x_s_register,
+#endif
+};
+
+static const struct v4l2_subdev_video_ops isl7998x_subdev_video_ops = {
+ .g_std = isl7998x_g_std,
+ .s_std = isl7998x_s_std,
+ .querystd = isl7998x_querystd,
+ .g_tvnorms = isl7998x_g_tvnorms,
+ .g_input_status = isl7998x_g_input_status,
+ .s_stream = isl7998x_s_stream,
+ .pre_streamon = isl7998x_pre_streamon,
+ .post_streamoff = isl7998x_post_streamoff,
+};
+
+static const struct v4l2_subdev_pad_ops isl7998x_subdev_pad_ops = {
+ .enum_mbus_code = isl7998x_enum_mbus_code,
+ .enum_frame_size = isl7998x_enum_frame_size,
+ .get_fmt = isl7998x_get_fmt,
+ .set_fmt = isl7998x_set_fmt,
+};
+
+static const struct v4l2_subdev_ops isl7998x_subdev_ops = {
+ .core = &isl7998x_subdev_core_ops,
+ .video = &isl7998x_subdev_video_ops,
+ .pad = &isl7998x_subdev_pad_ops,
+};
+
+static const struct media_entity_operations isl7998x_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_ctrl_ops isl7998x_ctrl_ops = {
+ .s_ctrl = isl7998x_set_ctrl,
+};
+
+static const struct v4l2_ctrl_config isl7998x_ctrls[] = {
+ {
+ .ops = &isl7998x_ctrl_ops,
+ .id = V4L2_CID_TEST_PATTERN_BARS,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Test Pattern Bars",
+ .max = ARRAY_SIZE(isl7998x_test_pattern_bars) - 1,
+ .def = 0,
+ .qmenu = isl7998x_test_pattern_bars,
+ }, {
+ .ops = &isl7998x_ctrl_ops,
+ .id = V4L2_CID_TEST_PATTERN_CHANNELS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Test Pattern Channels",
+ .min = 0,
+ .max = 0xf,
+ .step = 1,
+ .def = 0xf,
+ .flags = 0,
+ }, {
+ .ops = &isl7998x_ctrl_ops,
+ .id = V4L2_CID_TEST_PATTERN_COLOR,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Test Pattern Color",
+ .max = ARRAY_SIZE(isl7998x_test_pattern_colors) - 1,
+ .def = 0,
+ .qmenu = isl7998x_test_pattern_colors,
+ },
+};
+
+#define ISL7998X_REG_DECODER_ACA_READABLE_RANGE(page) \
+ /* Decoder range */ \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_INPUT_FMT(page), \
+ ISL7998X_REG_PX_DEC_HS_DELAY_CTL(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_ANCTL(page), \
+ ISL7998X_REG_PX_DEC_CSC_CTL(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_BRIGHT(page), \
+ ISL7998X_REG_PX_DEC_HUE(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_VERT_PEAK(page), \
+ ISL7998X_REG_PX_DEC_CORING(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_SDT(page), \
+ ISL7998X_REG_PX_DEC_SDTR(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_CLMPG(page), \
+ ISL7998X_REG_PX_DEC_DATA_CONV(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_INTERNAL_TEST(page), \
+ ISL7998X_REG_PX_DEC_INTERNAL_TEST(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_H_DELAY_CTL(page), \
+ ISL7998X_REG_PX_DEC_H_DELAY_II_LOW(page)), \
+ /* ACA range */ \
+ regmap_reg_range(ISL7998X_REG_PX_ACA_CTL_1(page), \
+ ISL7998X_REG_PX_ACA_HIST_WIN_V_SZ2(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_ACA_Y_AVG(page), \
+ ISL7998X_REG_PX_ACA_CTL_4(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_ACA_FLEX_WIN_HIST(page), \
+ ISL7998X_REG_PX_ACA_XFER_HIST_HOST(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_PAGE(page), \
+ ISL7998X_REG_PX_DEC_PAGE(page))
+
+#define ISL7998X_REG_DECODER_ACA_WRITEABLE_RANGE(page) \
+ /* Decoder range */ \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_INPUT_FMT(page), \
+ ISL7998X_REG_PX_DEC_INPUT_FMT(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_HS_DELAY_CTL(page), \
+ ISL7998X_REG_PX_DEC_HS_DELAY_CTL(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_ANCTL(page), \
+ ISL7998X_REG_PX_DEC_CSC_CTL(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_BRIGHT(page), \
+ ISL7998X_REG_PX_DEC_HUE(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_VERT_PEAK(page), \
+ ISL7998X_REG_PX_DEC_CORING(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_SDT(page), \
+ ISL7998X_REG_PX_DEC_SDTR(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_CLMPG(page), \
+ ISL7998X_REG_PX_DEC_MISC3(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_CLMD(page), \
+ ISL7998X_REG_PX_DEC_DATA_CONV(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_INTERNAL_TEST(page), \
+ ISL7998X_REG_PX_DEC_INTERNAL_TEST(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_H_DELAY_CTL(page), \
+ ISL7998X_REG_PX_DEC_H_DELAY_II_LOW(page)), \
+ /* ACA range */ \
+ regmap_reg_range(ISL7998X_REG_PX_ACA_CTL_1(page), \
+ ISL7998X_REG_PX_ACA_HIST_WIN_V_SZ2(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_ACA_CTL_2(page), \
+ ISL7998X_REG_PX_ACA_CTL_4(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_ACA_FLEX_WIN_HIST(page), \
+ ISL7998X_REG_PX_ACA_HIST_DATA_LO(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_ACA_XFER_HIST_HOST(page), \
+ ISL7998X_REG_PX_ACA_XFER_HIST_HOST(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_PAGE(page), \
+ ISL7998X_REG_PX_DEC_PAGE(page))
+
+#define ISL7998X_REG_DECODER_ACA_VOLATILE_RANGE(page) \
+ /* Decoder range */ \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_STATUS_1(page), \
+ ISL7998X_REG_PX_DEC_STATUS_1(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_SDT(page), \
+ ISL7998X_REG_PX_DEC_SDT(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_DEC_MVSN(page), \
+ ISL7998X_REG_PX_DEC_HFREF(page)), \
+ /* ACA range */ \
+ regmap_reg_range(ISL7998X_REG_PX_ACA_Y_AVG(page), \
+ ISL7998X_REG_PX_ACA_Y_HIGH(page)), \
+ regmap_reg_range(ISL7998X_REG_PX_ACA_HIST_DATA_LO(page), \
+ ISL7998X_REG_PX_ACA_FLEX_WIN_CR_CLR(page))
+
+static const struct regmap_range isl7998x_readable_ranges[] = {
+ regmap_reg_range(ISL7998X_REG_P0_PRODUCT_ID_CODE,
+ ISL7998X_REG_P0_IRQ_SYNC_CTL),
+ regmap_reg_range(ISL7998X_REG_P0_INTERRUPT_STATUS,
+ ISL7998X_REG_P0_CLOCK_DELAY),
+ regmap_reg_range(ISL7998X_REG_PX_DEC_PAGE(0),
+ ISL7998X_REG_PX_DEC_PAGE(0)),
+
+ ISL7998X_REG_DECODER_ACA_READABLE_RANGE(1),
+ ISL7998X_REG_DECODER_ACA_READABLE_RANGE(2),
+ ISL7998X_REG_DECODER_ACA_READABLE_RANGE(3),
+ ISL7998X_REG_DECODER_ACA_READABLE_RANGE(4),
+
+ regmap_reg_range(ISL7998X_REG_P5_LI_ENGINE_CTL,
+ ISL7998X_REG_P5_MIPI_SP_HS_TRL_CTL),
+ regmap_reg_range(ISL7998X_REG_P5_FIFO_THRSH_CNT_1,
+ ISL7998X_REG_P5_PLL_ANA),
+ regmap_reg_range(ISL7998X_REG_P5_TOTAL_PF_LINE_CNT_1,
+ ISL7998X_REG_P5_HIST_LINE_CNT_2),
+ regmap_reg_range(ISL7998X_REG_PX_DEC_PAGE(5),
+ ISL7998X_REG_PX_DEC_PAGE(5)),
+};
+
+static const struct regmap_range isl7998x_writeable_ranges[] = {
+ regmap_reg_range(ISL7998X_REG_P0_SW_RESET_CTL,
+ ISL7998X_REG_P0_IRQ_SYNC_CTL),
+ regmap_reg_range(ISL7998X_REG_P0_CHAN_1_IRQ,
+ ISL7998X_REG_P0_SHORT_DIAG_IRQ_EN),
+ regmap_reg_range(ISL7998X_REG_P0_CLOCK_DELAY,
+ ISL7998X_REG_P0_CLOCK_DELAY),
+ regmap_reg_range(ISL7998X_REG_PX_DEC_PAGE(0),
+ ISL7998X_REG_PX_DEC_PAGE(0)),
+
+ ISL7998X_REG_DECODER_ACA_WRITEABLE_RANGE(1),
+ ISL7998X_REG_DECODER_ACA_WRITEABLE_RANGE(2),
+ ISL7998X_REG_DECODER_ACA_WRITEABLE_RANGE(3),
+ ISL7998X_REG_DECODER_ACA_WRITEABLE_RANGE(4),
+
+ regmap_reg_range(ISL7998X_REG_P5_LI_ENGINE_CTL,
+ ISL7998X_REG_P5_ESC_MODE_TIME_CTL),
+ regmap_reg_range(ISL7998X_REG_P5_MIPI_SP_HS_TRL_CTL,
+ ISL7998X_REG_P5_PLL_ANA),
+ regmap_reg_range(ISL7998X_REG_P5_TOTAL_PF_LINE_CNT_1,
+ ISL7998X_REG_P5_HIST_LINE_CNT_2),
+ regmap_reg_range(ISL7998X_REG_PX_DEC_PAGE(5),
+ ISL7998X_REG_PX_DEC_PAGE(5)),
+
+ ISL7998X_REG_DECODER_ACA_WRITEABLE_RANGE(0xf),
+};
+
+static const struct regmap_range isl7998x_volatile_ranges[] = {
+ /* Product id code register is used to check availability */
+ regmap_reg_range(ISL7998X_REG_P0_PRODUCT_ID_CODE,
+ ISL7998X_REG_P0_PRODUCT_ID_CODE),
+ regmap_reg_range(ISL7998X_REG_P0_MPP1_SYNC_CTL,
+ ISL7998X_REG_P0_IRQ_SYNC_CTL),
+ regmap_reg_range(ISL7998X_REG_P0_INTERRUPT_STATUS,
+ ISL7998X_REG_P0_INTERRUPT_STATUS),
+ regmap_reg_range(ISL7998X_REG_P0_CHAN_1_STATUS,
+ ISL7998X_REG_P0_SHORT_DIAG_STATUS),
+
+ ISL7998X_REG_DECODER_ACA_VOLATILE_RANGE(1),
+ ISL7998X_REG_DECODER_ACA_VOLATILE_RANGE(2),
+ ISL7998X_REG_DECODER_ACA_VOLATILE_RANGE(3),
+ ISL7998X_REG_DECODER_ACA_VOLATILE_RANGE(4),
+
+ regmap_reg_range(ISL7998X_REG_P5_AUTO_TEST_ERR_DET,
+ ISL7998X_REG_P5_PIC_HEIGHT_LOW),
+};
+
+static const struct regmap_access_table isl7998x_readable_table = {
+ .yes_ranges = isl7998x_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(isl7998x_readable_ranges),
+};
+
+static const struct regmap_access_table isl7998x_writeable_table = {
+ .yes_ranges = isl7998x_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(isl7998x_writeable_ranges),
+};
+
+static const struct regmap_access_table isl7998x_volatile_table = {
+ .yes_ranges = isl7998x_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(isl7998x_volatile_ranges),
+};
+
+static const struct regmap_range_cfg isl7998x_ranges[] = {
+ {
+ .range_min = ISL7998X_REG_PN_BASE(0),
+ .range_max = ISL7998X_REG_PX_ACA_XFER_HIST_HOST(0xf),
+ .selector_reg = ISL7998X_REG_PX_DEC_PAGE(0),
+ .selector_mask = ISL7998X_REG_PX_DEC_PAGE_MASK,
+ .window_start = 0,
+ .window_len = 256,
+ }
+};
+
+static const struct regmap_config isl7998x_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = ISL7998X_REG_PX_ACA_XFER_HIST_HOST(0xf),
+ .ranges = isl7998x_ranges,
+ .num_ranges = ARRAY_SIZE(isl7998x_ranges),
+ .rd_table = &isl7998x_readable_table,
+ .wr_table = &isl7998x_writeable_table,
+ .volatile_table = &isl7998x_volatile_table,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int isl7998x_mc_init(struct isl7998x *isl7998x)
+{
+ unsigned int i;
+
+ isl7998x->subdev.entity.ops = &isl7998x_entity_ops;
+ isl7998x->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+
+ isl7998x->pads[ISL7998X_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ for (i = ISL7998X_PAD_VIN1; i < ISL7998X_NUM_PADS; i++)
+ isl7998x->pads[i].flags = MEDIA_PAD_FL_SINK;
+
+ return media_entity_pads_init(&isl7998x->subdev.entity,
+ ISL7998X_NUM_PADS,
+ isl7998x->pads);
+}
+
+static int get_link_freq_menu_index(unsigned int lanes,
+ unsigned int inputs)
+{
+ int ret = -EINVAL;
+
+ switch (lanes) {
+ case 1:
+ if (inputs == 1)
+ ret = 0;
+ if (inputs == 2)
+ ret = 1;
+ if (inputs == 4)
+ ret = 2;
+ break;
+ case 2:
+ if (inputs == 2)
+ ret = 0;
+ if (inputs == 4)
+ ret = 1;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void isl7998x_remove_controls(struct isl7998x *isl7998x)
+{
+ v4l2_ctrl_handler_free(&isl7998x->ctrl_handler);
+ mutex_destroy(&isl7998x->ctrl_mutex);
+}
+
+static int isl7998x_init_controls(struct isl7998x *isl7998x)
+{
+ struct v4l2_subdev *sd = &isl7998x->subdev;
+ int link_freq_index;
+ unsigned int i;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(&isl7998x->ctrl_handler,
+ 2 + ARRAY_SIZE(isl7998x_ctrls));
+ if (ret)
+ return ret;
+
+ mutex_init(&isl7998x->ctrl_mutex);
+ isl7998x->ctrl_handler.lock = &isl7998x->ctrl_mutex;
+ link_freq_index = get_link_freq_menu_index(isl7998x->nr_mipi_lanes,
+ isl7998x->nr_inputs);
+ if (link_freq_index < 0 ||
+ link_freq_index >= ARRAY_SIZE(link_freq_menu_items)) {
+ dev_err(sd->dev,
+ "failed to find MIPI link freq: %d lanes, %d inputs\n",
+ isl7998x->nr_mipi_lanes, isl7998x->nr_inputs);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ isl7998x->link_freq = v4l2_ctrl_new_int_menu(&isl7998x->ctrl_handler,
+ &isl7998x_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(link_freq_menu_items) - 1,
+ link_freq_index,
+ link_freq_menu_items);
+ if (isl7998x->link_freq)
+ isl7998x->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ for (i = 0; i < ARRAY_SIZE(isl7998x_ctrls); i++)
+ v4l2_ctrl_new_custom(&isl7998x->ctrl_handler,
+ &isl7998x_ctrls[i], NULL);
+
+ v4l2_ctrl_new_std_menu_items(&isl7998x->ctrl_handler,
+ &isl7998x_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(isl7998x_test_pattern_menu) - 1,
+ 0, 0, isl7998x_test_pattern_menu);
+
+ ret = isl7998x->ctrl_handler.error;
+ if (ret)
+ goto err;
+
+ isl7998x->subdev.ctrl_handler = &isl7998x->ctrl_handler;
+ v4l2_ctrl_handler_setup(&isl7998x->ctrl_handler);
+
+ return 0;
+
+err:
+ isl7998x_remove_controls(isl7998x);
+
+ return ret;
+}
+
+static int isl7998x_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct v4l2_fwnode_endpoint endpoint = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct fwnode_handle *ep;
+ struct isl7998x *isl7998x;
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ int nr_inputs;
+ int ret;
+
+ ret = i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA);
+ if (!ret) {
+ dev_warn(&adapter->dev,
+ "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n");
+ return -EIO;
+ }
+
+ isl7998x = devm_kzalloc(dev, sizeof(*isl7998x), GFP_KERNEL);
+ if (!isl7998x)
+ return -ENOMEM;
+
+ isl7998x->pd_gpio = devm_gpiod_get_optional(dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(isl7998x->pd_gpio))
+ return dev_err_probe(dev, PTR_ERR(isl7998x->pd_gpio),
+ "Failed to retrieve/request PD GPIO\n");
+
+ isl7998x->rstb_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(isl7998x->rstb_gpio))
+ return dev_err_probe(dev, PTR_ERR(isl7998x->rstb_gpio),
+ "Failed to retrieve/request RSTB GPIO\n");
+
+ isl7998x->regmap = devm_regmap_init_i2c(client, &isl7998x_regmap);
+ if (IS_ERR(isl7998x->regmap))
+ return dev_err_probe(dev, PTR_ERR(isl7998x->regmap),
+ "Failed to allocate register map\n");
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev),
+ ISL7998X_PAD_OUT, 0, 0);
+ if (!ep)
+ return dev_err_probe(dev, -EINVAL, "Missing endpoint node\n");
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &endpoint);
+ fwnode_handle_put(ep);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to parse endpoint\n");
+
+ if (endpoint.bus.mipi_csi2.num_data_lanes == 0 ||
+ endpoint.bus.mipi_csi2.num_data_lanes > 2)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid number of MIPI lanes\n");
+
+ isl7998x->nr_mipi_lanes = endpoint.bus.mipi_csi2.num_data_lanes;
+
+ nr_inputs = isl7998x_get_nr_inputs(dev->of_node);
+ if (nr_inputs < 0)
+ return dev_err_probe(dev, nr_inputs,
+ "Invalid number of input ports\n");
+ isl7998x->nr_inputs = nr_inputs;
+
+ v4l2_i2c_subdev_init(&isl7998x->subdev, client, &isl7998x_subdev_ops);
+ isl7998x->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ ret = isl7998x_mc_init(isl7998x);
+ if (ret < 0)
+ return ret;
+
+ isl7998x->fmt = &isl7998x_colour_fmts[0];
+ isl7998x->norm = V4L2_STD_NTSC;
+ isl7998x->enabled = 0;
+
+ mutex_init(&isl7998x->lock);
+
+ ret = isl7998x_init_controls(isl7998x);
+ if (ret)
+ goto err_entity_cleanup;
+
+ ret = v4l2_async_register_subdev(&isl7998x->subdev);
+ if (ret < 0)
+ goto err_controls_cleanup;
+
+ pm_runtime_enable(dev);
+
+ return 0;
+
+err_controls_cleanup:
+ isl7998x_remove_controls(isl7998x);
+err_entity_cleanup:
+ media_entity_cleanup(&isl7998x->subdev.entity);
+
+ return ret;
+}
+
+static int isl7998x_remove(struct i2c_client *client)
+{
+ struct isl7998x *isl7998x = i2c_to_isl7998x(client);
+
+ pm_runtime_disable(&client->dev);
+ v4l2_async_unregister_subdev(&isl7998x->subdev);
+ isl7998x_remove_controls(isl7998x);
+ media_entity_cleanup(&isl7998x->subdev.entity);
+
+ return 0;
+}
+
+static const struct of_device_id isl7998x_of_match[] = {
+ { .compatible = "isil,isl79987", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, isl7998x_of_match);
+
+static const struct i2c_device_id isl7998x_id[] = {
+ { "isl79987", 0 },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(i2c, isl7998x_id);
+
+static int __maybe_unused isl7998x_runtime_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct isl7998x *isl7998x = sd_to_isl7998x(sd);
+ int ret;
+
+ gpiod_set_value(isl7998x->rstb_gpio, 1);
+ gpiod_set_value(isl7998x->pd_gpio, 0);
+ gpiod_set_value(isl7998x->rstb_gpio, 0);
+
+ ret = isl7998x_wait_power_on(isl7998x);
+ if (ret)
+ goto err;
+
+ ret = isl7998x_init(isl7998x);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ gpiod_set_value(isl7998x->pd_gpio, 1);
+
+ return ret;
+}
+
+static int __maybe_unused isl7998x_runtime_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct isl7998x *isl7998x = sd_to_isl7998x(sd);
+
+ gpiod_set_value(isl7998x->pd_gpio, 1);
+
+ return 0;
+}
+
+static const struct dev_pm_ops isl7998x_pm_ops = {
+ SET_RUNTIME_PM_OPS(isl7998x_runtime_suspend,
+ isl7998x_runtime_resume,
+ NULL)
+};
+
+static struct i2c_driver isl7998x_i2c_driver = {
+ .driver = {
+ .name = "isl7998x",
+ .of_match_table = of_match_ptr(isl7998x_of_match),
+ .pm = &isl7998x_pm_ops,
+ },
+ .probe_new = isl7998x_probe,
+ .remove = isl7998x_remove,
+ .id_table = isl7998x_id,
+};
+
+module_i2c_driver(isl7998x_i2c_driver);
+
+MODULE_DESCRIPTION("Intersil ISL7998x Analog to MIPI CSI-2/BT656 decoder");
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/m5mols/Kconfig b/drivers/media/i2c/m5mols/Kconfig
index 6f0ef33b7ee1..7f0af32f4376 100644
--- a/drivers/media/i2c/m5mols/Kconfig
+++ b/drivers/media/i2c/m5mols/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_M5MOLS
tristate "Fujitsu M-5MOLS 8MP sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
help
diff --git a/drivers/media/i2c/m5mols/m5mols.h b/drivers/media/i2c/m5mols/m5mols.h
index 60c102fa7df5..d8545d2280af 100644
--- a/drivers/media/i2c/m5mols/m5mols.h
+++ b/drivers/media/i2c/m5mols/m5mols.h
@@ -13,6 +13,7 @@
#define M5MOLS_H
#include <linux/sizes.h>
+#include <linux/gpio/consumer.h>
#include <media/v4l2-subdev.h>
#include "m5mols_reg.h"
@@ -181,6 +182,7 @@ struct m5mols_version {
* @stabilization: image stabilization control
* @jpeg_quality: JPEG compression quality control
* @set_power: optional power callback to the board code
+ * @reset: GPIO driving the reset pin of M-5MOLS
* @lock: mutex protecting the structure fields below
* @ffmt: current fmt according to resolution type
* @res_type: current resolution type
@@ -224,6 +226,7 @@ struct m5mols_info {
struct v4l2_ctrl *jpeg_quality;
int (*set_power)(struct device *dev, int on);
+ struct gpio_desc *reset;
struct mutex lock;
diff --git a/drivers/media/i2c/m5mols/m5mols_capture.c b/drivers/media/i2c/m5mols/m5mols_capture.c
index e1b1d689c044..275c5b2539fd 100644
--- a/drivers/media/i2c/m5mols/m5mols_capture.c
+++ b/drivers/media/i2c/m5mols/m5mols_capture.c
@@ -15,7 +15,6 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/videodev2.h>
#include <media/v4l2-ctrls.h>
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index e29be0242f07..c19590389bfe 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -14,7 +14,7 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/videodev2.h>
#include <linux/module.h>
@@ -752,7 +752,6 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
{
struct v4l2_subdev *sd = &info->sd;
struct i2c_client *client = v4l2_get_subdevdata(sd);
- const struct m5mols_platform_data *pdata = info->pdata;
int ret;
if (info->power == enable)
@@ -772,7 +771,7 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
return ret;
}
- gpio_set_value(pdata->gpio_reset, !pdata->reset_polarity);
+ gpiod_set_value(info->reset, 0);
info->power = 1;
return ret;
@@ -785,7 +784,7 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
if (info->set_power)
info->set_power(&client->dev, 0);
- gpio_set_value(pdata->gpio_reset, pdata->reset_polarity);
+ gpiod_set_value(info->reset, 1);
info->isp_ready = 0;
info->power = 0;
@@ -944,7 +943,6 @@ static int m5mols_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct m5mols_platform_data *pdata = client->dev.platform_data;
- unsigned long gpio_flags;
struct m5mols_info *info;
struct v4l2_subdev *sd;
int ret;
@@ -954,11 +952,6 @@ static int m5mols_probe(struct i2c_client *client,
return -EINVAL;
}
- if (!gpio_is_valid(pdata->gpio_reset)) {
- dev_err(&client->dev, "No valid RESET GPIO specified\n");
- return -EINVAL;
- }
-
if (!client->irq) {
dev_err(&client->dev, "Interrupt not assigned\n");
return -EINVAL;
@@ -968,18 +961,16 @@ static int m5mols_probe(struct i2c_client *client,
if (!info)
return -ENOMEM;
+ /* This asserts reset, descriptor shall have polarity specified */
+ info->reset = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(info->reset))
+ return PTR_ERR(info->reset);
+ /* Notice: the "N" in M5MOLS_NRST implies active low */
+ gpiod_set_consumer_name(info->reset, "M5MOLS_NRST");
+
info->pdata = pdata;
info->set_power = pdata->set_power;
- gpio_flags = pdata->reset_polarity
- ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
- ret = devm_gpio_request_one(&client->dev, pdata->gpio_reset, gpio_flags,
- "M5MOLS_NRST");
- if (ret) {
- dev_err(&client->dev, "Failed to request gpio: %d\n", ret);
- return ret;
- }
-
ret = devm_regulator_bulk_get(&client->dev, ARRAY_SIZE(supplies),
supplies);
if (ret) {
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index bc46a0957b40..0eea200124d2 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -257,7 +257,7 @@ static const struct regmap_config max2175_regmap_config = {
.reg_defaults = max2175_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(max2175_reg_defaults),
.volatile_table = &max2175_volatile_regs,
- .cache_type = REGCACHE_FLAT,
+ .cache_type = REGCACHE_RBTREE,
};
struct max2175 {
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index eb2b8e42335b..d2a4915ed9f7 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -15,6 +15,7 @@
#include <linux/fwnode.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
#include <linux/module.h>
@@ -168,6 +169,8 @@ struct max9286_priv {
u32 init_rev_chan_mv;
u32 rev_chan_mv;
+ u32 gpio_poc[2];
+
struct v4l2_ctrl_handler ctrls;
struct v4l2_ctrl *pixelrate;
@@ -846,6 +849,10 @@ static const struct v4l2_subdev_internal_ops max9286_subdev_internal_ops = {
.open = max9286_open,
};
+static const struct media_entity_operations max9286_media_ops = {
+ .link_validate = v4l2_subdev_link_validate
+};
+
static int max9286_s_ctrl(struct v4l2_ctrl *ctrl)
{
switch (ctrl->id) {
@@ -895,6 +902,7 @@ static int max9286_v4l2_register(struct max9286_priv *priv)
goto err_async;
priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ priv->sd.entity.ops = &max9286_media_ops;
priv->pads[MAX9286_SRC_PAD].flags = MEDIA_PAD_FL_SOURCE;
for (i = 0; i < MAX9286_SRC_PAD; i++)
@@ -1025,20 +1033,27 @@ static int max9286_setup(struct max9286_priv *priv)
return 0;
}
-static void max9286_gpio_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int max9286_gpio_set(struct max9286_priv *priv, unsigned int offset,
+ int value)
{
- struct max9286_priv *priv = gpiochip_get_data(chip);
-
if (value)
priv->gpio_state |= BIT(offset);
else
priv->gpio_state &= ~BIT(offset);
- max9286_write(priv, 0x0f, MAX9286_0X0F_RESERVED | priv->gpio_state);
+ return max9286_write(priv, 0x0f,
+ MAX9286_0X0F_RESERVED | priv->gpio_state);
+}
+
+static void max9286_gpiochip_set(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct max9286_priv *priv = gpiochip_get_data(chip);
+
+ max9286_gpio_set(priv, offset, value);
}
-static int max9286_gpio_get(struct gpio_chip *chip, unsigned int offset)
+static int max9286_gpiochip_get(struct gpio_chip *chip, unsigned int offset)
{
struct max9286_priv *priv = gpiochip_get_data(chip);
@@ -1057,13 +1072,10 @@ static int max9286_register_gpio(struct max9286_priv *priv)
gpio->owner = THIS_MODULE;
gpio->ngpio = 2;
gpio->base = -1;
- gpio->set = max9286_gpio_set;
- gpio->get = max9286_gpio_get;
+ gpio->set = max9286_gpiochip_set;
+ gpio->get = max9286_gpiochip_get;
gpio->can_sleep = true;
- /* GPIO values default to high */
- priv->gpio_state = BIT(0) | BIT(1);
-
ret = devm_gpiochip_add_data(dev, gpio, priv);
if (ret)
dev_err(dev, "Unable to create gpio_chip\n");
@@ -1071,6 +1083,70 @@ static int max9286_register_gpio(struct max9286_priv *priv)
return ret;
}
+static int max9286_parse_gpios(struct max9286_priv *priv)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ /* GPIO values default to high */
+ priv->gpio_state = BIT(0) | BIT(1);
+
+ /*
+ * Parse the "gpio-poc" vendor property. If the property is not
+ * specified the camera power is controlled by a regulator.
+ */
+ ret = of_property_read_u32_array(dev->of_node, "maxim,gpio-poc",
+ priv->gpio_poc, 2);
+ if (ret == -EINVAL) {
+ /*
+ * If gpio lines are not used for the camera power, register
+ * a gpio controller for consumers.
+ */
+ ret = max9286_register_gpio(priv);
+ if (ret)
+ return ret;
+
+ priv->regulator = devm_regulator_get(dev, "poc");
+ if (IS_ERR(priv->regulator)) {
+ return dev_err_probe(dev, PTR_ERR(priv->regulator),
+ "Unable to get PoC regulator (%ld)\n",
+ PTR_ERR(priv->regulator));
+ }
+
+ return 0;
+ }
+
+ /* If the property is specified make sure it is well formed. */
+ if (ret || priv->gpio_poc[0] > 1 ||
+ (priv->gpio_poc[1] != GPIO_ACTIVE_HIGH &&
+ priv->gpio_poc[1] != GPIO_ACTIVE_LOW)) {
+ dev_err(dev, "Invalid 'gpio-poc' property\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int max9286_poc_enable(struct max9286_priv *priv, bool enable)
+{
+ int ret;
+
+ /* If the regulator is not available, use gpio to control power. */
+ if (!priv->regulator)
+ ret = max9286_gpio_set(priv, priv->gpio_poc[0],
+ enable ^ priv->gpio_poc[1]);
+ else if (enable)
+ ret = regulator_enable(priv->regulator);
+ else
+ ret = regulator_disable(priv->regulator);
+
+ if (ret < 0)
+ dev_err(&priv->client->dev, "Unable to turn power %s\n",
+ enable ? "on" : "off");
+
+ return ret;
+}
+
static int max9286_init(struct device *dev)
{
struct max9286_priv *priv;
@@ -1080,17 +1156,14 @@ static int max9286_init(struct device *dev)
client = to_i2c_client(dev);
priv = i2c_get_clientdata(client);
- /* Enable the bus power. */
- ret = regulator_enable(priv->regulator);
- if (ret < 0) {
- dev_err(&client->dev, "Unable to turn PoC on\n");
+ ret = max9286_poc_enable(priv, true);
+ if (ret)
return ret;
- }
ret = max9286_setup(priv);
if (ret) {
dev_err(dev, "Unable to setup max9286\n");
- goto err_regulator;
+ goto err_poc_disable;
}
/*
@@ -1100,7 +1173,7 @@ static int max9286_init(struct device *dev)
ret = max9286_v4l2_register(priv);
if (ret) {
dev_err(dev, "Failed to register with V4L2\n");
- goto err_regulator;
+ goto err_poc_disable;
}
ret = max9286_i2c_mux_init(priv);
@@ -1116,8 +1189,8 @@ static int max9286_init(struct device *dev)
err_v4l2_register:
max9286_v4l2_unregister(priv);
-err_regulator:
- regulator_disable(priv->regulator);
+err_poc_disable:
+ max9286_poc_enable(priv, false);
return ret;
}
@@ -1288,18 +1361,10 @@ static int max9286_probe(struct i2c_client *client)
*/
max9286_configure_i2c(priv, false);
- ret = max9286_register_gpio(priv);
+ ret = max9286_parse_gpios(priv);
if (ret)
goto err_powerdown;
- priv->regulator = devm_regulator_get(&client->dev, "poc");
- if (IS_ERR(priv->regulator)) {
- ret = PTR_ERR(priv->regulator);
- dev_err_probe(&client->dev, ret,
- "Unable to get PoC regulator\n");
- goto err_powerdown;
- }
-
ret = max9286_parse_dt(priv);
if (ret)
goto err_powerdown;
@@ -1326,7 +1391,7 @@ static int max9286_remove(struct i2c_client *client)
max9286_v4l2_unregister(priv);
- regulator_disable(priv->regulator);
+ max9286_poc_enable(priv, false);
gpiod_set_value_cansleep(priv->gpiod_pwdn, 0);
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index 4a1410ebb4c8..48cc0b0922f4 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -223,9 +223,10 @@ static int ml86v7667_get_mbus_config(struct v4l2_subdev *sd,
unsigned int pad,
struct v4l2_mbus_config *cfg)
{
- cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING |
- V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_BT656;
+ cfg->bus.parallel.flags = V4L2_MBUS_MASTER |
+ V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_DATA_ACTIVE_HIGH;
return 0;
}
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index c9f0bd997ea7..ad13b0c890c0 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -695,10 +695,12 @@ static int mt9m001_get_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
/* MT9M001 has all capture_format parameters fixed */
- cfg->flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
- V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
- V4L2_MBUS_DATA_ACTIVE_HIGH | V4L2_MBUS_MASTER;
cfg->type = V4L2_MBUS_PARALLEL;
+ cfg->bus.parallel.flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_MASTER;
return 0;
}
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 91a44359bcd3..afc86efa9e3e 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -9,7 +9,6 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/log2.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
#include <linux/v4l2-mediabus.h>
@@ -1143,14 +1142,16 @@ static int mt9m111_get_mbus_config(struct v4l2_subdev *sd,
{
struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
- cfg->flags = V4L2_MBUS_MASTER |
- V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
- V4L2_MBUS_DATA_ACTIVE_HIGH;
+ cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags |= mt9m111->pclk_sample ? V4L2_MBUS_PCLK_SAMPLE_RISING :
- V4L2_MBUS_PCLK_SAMPLE_FALLING;
+ cfg->bus.parallel.flags = V4L2_MBUS_MASTER |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_HIGH;
- cfg->type = V4L2_MBUS_PARALLEL;
+ cfg->bus.parallel.flags |= mt9m111->pclk_sample ?
+ V4L2_MBUS_PCLK_SAMPLE_RISING :
+ V4L2_MBUS_PCLK_SAMPLE_FALLING;
return 0;
}
diff --git a/drivers/media/i2c/noon010pc30.c b/drivers/media/i2c/noon010pc30.c
index f3ac379ef34a..bc5187f46365 100644
--- a/drivers/media/i2c/noon010pc30.c
+++ b/drivers/media/i2c/noon010pc30.c
@@ -10,7 +10,7 @@
*/
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
@@ -130,8 +130,8 @@ struct noon010_info {
struct media_pad pad;
struct v4l2_ctrl_handler hdl;
struct regulator_bulk_data supply[NOON010_NUM_SUPPLIES];
- u32 gpio_nreset;
- u32 gpio_nstby;
+ struct gpio_desc *reset;
+ struct gpio_desc *stby;
/* Protects the struct members below */
struct mutex lock;
@@ -393,29 +393,33 @@ static int power_enable(struct noon010_info *info)
return 0;
}
- if (gpio_is_valid(info->gpio_nstby))
- gpio_set_value(info->gpio_nstby, 0);
+ /* Assert standby: line should be flagged active low in descriptor */
+ if (info->stby)
+ gpiod_set_value(info->stby, 1);
- if (gpio_is_valid(info->gpio_nreset))
- gpio_set_value(info->gpio_nreset, 0);
+ /* Assert reset: line should be flagged active low in descriptor */
+ if (info->reset)
+ gpiod_set_value(info->reset, 1);
ret = regulator_bulk_enable(NOON010_NUM_SUPPLIES, info->supply);
if (ret)
return ret;
- if (gpio_is_valid(info->gpio_nreset)) {
+ /* De-assert reset and standby */
+ if (info->reset) {
msleep(50);
- gpio_set_value(info->gpio_nreset, 1);
+ gpiod_set_value(info->reset, 0);
}
- if (gpio_is_valid(info->gpio_nstby)) {
+ if (info->stby) {
udelay(1000);
- gpio_set_value(info->gpio_nstby, 1);
+ gpiod_set_value(info->stby, 0);
}
- if (gpio_is_valid(info->gpio_nreset)) {
+ /* Cycle reset: assert and deassert */
+ if (info->reset) {
udelay(1000);
- gpio_set_value(info->gpio_nreset, 0);
+ gpiod_set_value(info->reset, 1);
msleep(100);
- gpio_set_value(info->gpio_nreset, 1);
+ gpiod_set_value(info->reset, 0);
msleep(20);
}
info->power = 1;
@@ -438,11 +442,12 @@ static int power_disable(struct noon010_info *info)
if (ret)
return ret;
- if (gpio_is_valid(info->gpio_nstby))
- gpio_set_value(info->gpio_nstby, 0);
+ /* Assert standby and reset */
+ if (info->stby)
+ gpiod_set_value(info->stby, 1);
- if (gpio_is_valid(info->gpio_nreset))
- gpio_set_value(info->gpio_nreset, 0);
+ if (info->reset)
+ gpiod_set_value(info->reset, 1);
info->power = 0;
@@ -741,34 +746,24 @@ static int noon010_probe(struct i2c_client *client,
goto np_err;
info->i2c_reg_page = -1;
- info->gpio_nreset = -EINVAL;
- info->gpio_nstby = -EINVAL;
info->curr_fmt = &noon010_formats[0];
info->curr_win = &noon010_sizes[0];
- if (gpio_is_valid(pdata->gpio_nreset)) {
- ret = devm_gpio_request_one(&client->dev, pdata->gpio_nreset,
- GPIOF_OUT_INIT_LOW,
- "NOON010PC30 NRST");
- if (ret) {
- dev_err(&client->dev, "GPIO request error: %d\n", ret);
- goto np_err;
- }
- info->gpio_nreset = pdata->gpio_nreset;
- gpio_export(info->gpio_nreset, 0);
+ /* Request reset asserted so we get put into reset */
+ info->reset = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(info->reset)) {
+ ret = PTR_ERR(info->reset);
+ goto np_err;
}
+ gpiod_set_consumer_name(info->reset, "NOON010PC30 NRST");
- if (gpio_is_valid(pdata->gpio_nstby)) {
- ret = devm_gpio_request_one(&client->dev, pdata->gpio_nstby,
- GPIOF_OUT_INIT_LOW,
- "NOON010PC30 NSTBY");
- if (ret) {
- dev_err(&client->dev, "GPIO request error: %d\n", ret);
- goto np_err;
- }
- info->gpio_nstby = pdata->gpio_nstby;
- gpio_export(info->gpio_nstby, 0);
+ /* Request standby asserted so we get put into standby */
+ info->stby = devm_gpiod_get(&client->dev, "standby", GPIOD_OUT_HIGH);
+ if (IS_ERR(info->stby)) {
+ ret = PTR_ERR(info->stby);
+ goto np_err;
}
+ gpiod_set_consumer_name(info->reset, "NOON010PC30 STBY");
for (i = 0; i < NOON010_NUM_SUPPLIES; i++)
info->supply[i].supply = noon010_supply_name[i];
diff --git a/drivers/media/i2c/og01a1b.c b/drivers/media/i2c/og01a1b.c
new file mode 100644
index 000000000000..87179fc04e00
--- /dev/null
+++ b/drivers/media/i2c/og01a1b.c
@@ -0,0 +1,1128 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2022 Intel Corporation.
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OG01A1B_REG_VALUE_08BIT 1
+#define OG01A1B_REG_VALUE_16BIT 2
+#define OG01A1B_REG_VALUE_24BIT 3
+
+#define OG01A1B_LINK_FREQ_500MHZ 500000000ULL
+#define OG01A1B_SCLK 120000000LL
+#define OG01A1B_MCLK 19200000
+#define OG01A1B_DATA_LANES 2
+#define OG01A1B_RGB_DEPTH 10
+
+#define OG01A1B_REG_CHIP_ID 0x300a
+#define OG01A1B_CHIP_ID 0x470141
+
+#define OG01A1B_REG_MODE_SELECT 0x0100
+#define OG01A1B_MODE_STANDBY 0x00
+#define OG01A1B_MODE_STREAMING 0x01
+
+/* vertical-timings from sensor */
+#define OG01A1B_REG_VTS 0x380e
+#define OG01A1B_VTS_120FPS 0x0498
+#define OG01A1B_VTS_120FPS_MIN 0x0498
+#define OG01A1B_VTS_MAX 0x7fff
+
+/* horizontal-timings from sensor */
+#define OG01A1B_REG_HTS 0x380c
+
+/* Exposure controls from sensor */
+#define OG01A1B_REG_EXPOSURE 0x3501
+#define OG01A1B_EXPOSURE_MIN 1
+#define OG01A1B_EXPOSURE_MAX_MARGIN 14
+#define OG01A1B_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define OG01A1B_REG_ANALOG_GAIN 0x3508
+#define OG01A1B_ANAL_GAIN_MIN 16
+#define OG01A1B_ANAL_GAIN_MAX 248 /* Max = 15.5x */
+#define OG01A1B_ANAL_GAIN_STEP 1
+
+/* Digital gain controls from sensor */
+#define OG01A1B_REG_DIG_GAIN 0x350a
+#define OG01A1B_DGTL_GAIN_MIN 1024
+#define OG01A1B_DGTL_GAIN_MAX 16384 /* Max = 16x */
+#define OG01A1B_DGTL_GAIN_STEP 1
+#define OG01A1B_DGTL_GAIN_DEFAULT 1024
+
+/* Group Access */
+#define OG01A1B_REG_GROUP_ACCESS 0x3208
+#define OG01A1B_GROUP_HOLD_START 0x0
+#define OG01A1B_GROUP_HOLD_END 0x10
+#define OG01A1B_GROUP_HOLD_LAUNCH 0xa0
+
+/* Test Pattern Control */
+#define OG01A1B_REG_TEST_PATTERN 0x5100
+#define OG01A1B_TEST_PATTERN_ENABLE BIT(7)
+#define OG01A1B_TEST_PATTERN_BAR_SHIFT 2
+
+#define to_og01a1b(_sd) container_of(_sd, struct og01a1b, sd)
+
+enum {
+ OG01A1B_LINK_FREQ_1000MBPS,
+};
+
+struct og01a1b_reg {
+ u16 address;
+ u8 val;
+};
+
+struct og01a1b_reg_list {
+ u32 num_of_regs;
+ const struct og01a1b_reg *regs;
+};
+
+struct og01a1b_link_freq_config {
+ const struct og01a1b_reg_list reg_list;
+};
+
+struct og01a1b_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timining size */
+ u32 hts;
+
+ /* Default vertical timining size */
+ u32 vts_def;
+
+ /* Min vertical timining size */
+ u32 vts_min;
+
+ /* Link frequency needed for this resolution */
+ u32 link_freq_index;
+
+ /* Sensor register settings for this resolution */
+ const struct og01a1b_reg_list reg_list;
+};
+
+static const struct og01a1b_reg mipi_data_rate_1000mbps[] = {
+ {0x0103, 0x01},
+ {0x0303, 0x02},
+ {0x0304, 0x00},
+ {0x0305, 0xd2},
+ {0x0323, 0x02},
+ {0x0324, 0x01},
+ {0x0325, 0x77},
+};
+
+static const struct og01a1b_reg mode_1280x1024_regs[] = {
+ {0x0300, 0x0a},
+ {0x0301, 0x29},
+ {0x0302, 0x31},
+ {0x0303, 0x02},
+ {0x0304, 0x00},
+ {0x0305, 0xd2},
+ {0x0306, 0x00},
+ {0x0307, 0x01},
+ {0x0308, 0x02},
+ {0x0309, 0x00},
+ {0x0310, 0x00},
+ {0x0311, 0x00},
+ {0x0312, 0x07},
+ {0x0313, 0x00},
+ {0x0314, 0x00},
+ {0x0315, 0x00},
+ {0x0320, 0x02},
+ {0x0321, 0x01},
+ {0x0322, 0x01},
+ {0x0323, 0x02},
+ {0x0324, 0x01},
+ {0x0325, 0x77},
+ {0x0326, 0xce},
+ {0x0327, 0x04},
+ {0x0329, 0x02},
+ {0x032a, 0x04},
+ {0x032b, 0x04},
+ {0x032c, 0x02},
+ {0x032d, 0x01},
+ {0x032e, 0x00},
+ {0x300d, 0x02},
+ {0x300e, 0x04},
+ {0x3021, 0x08},
+ {0x301e, 0x03},
+ {0x3103, 0x00},
+ {0x3106, 0x08},
+ {0x3107, 0x40},
+ {0x3216, 0x01},
+ {0x3217, 0x00},
+ {0x3218, 0xc0},
+ {0x3219, 0x55},
+ {0x3500, 0x00},
+ {0x3501, 0x04},
+ {0x3502, 0x8a},
+ {0x3506, 0x01},
+ {0x3507, 0x72},
+ {0x3508, 0x01},
+ {0x3509, 0x00},
+ {0x350a, 0x01},
+ {0x350b, 0x00},
+ {0x350c, 0x00},
+ {0x3541, 0x00},
+ {0x3542, 0x40},
+ {0x3605, 0xe0},
+ {0x3606, 0x41},
+ {0x3614, 0x20},
+ {0x3620, 0x0b},
+ {0x3630, 0x07},
+ {0x3636, 0xa0},
+ {0x3637, 0xf9},
+ {0x3638, 0x09},
+ {0x3639, 0x38},
+ {0x363f, 0x09},
+ {0x3640, 0x17},
+ {0x3662, 0x04},
+ {0x3665, 0x80},
+ {0x3670, 0x68},
+ {0x3674, 0x00},
+ {0x3677, 0x3f},
+ {0x3679, 0x00},
+ {0x369f, 0x19},
+ {0x36a0, 0x03},
+ {0x36a2, 0x19},
+ {0x36a3, 0x03},
+ {0x370d, 0x66},
+ {0x370f, 0x00},
+ {0x3710, 0x03},
+ {0x3715, 0x03},
+ {0x3716, 0x03},
+ {0x3717, 0x06},
+ {0x3733, 0x00},
+ {0x3778, 0x00},
+ {0x37a8, 0x0f},
+ {0x37a9, 0x01},
+ {0x37aa, 0x07},
+ {0x37bd, 0x1c},
+ {0x37c1, 0x2f},
+ {0x37c3, 0x09},
+ {0x37c8, 0x1d},
+ {0x37ca, 0x30},
+ {0x37df, 0x00},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x05},
+ {0x3805, 0x0f},
+ {0x3806, 0x04},
+ {0x3807, 0x0f},
+ {0x3808, 0x05},
+ {0x3809, 0x00},
+ {0x380a, 0x04},
+ {0x380b, 0x00},
+ {0x380c, 0x03},
+ {0x380d, 0x50},
+ {0x380e, 0x04},
+ {0x380f, 0x98},
+ {0x3810, 0x00},
+ {0x3811, 0x08},
+ {0x3812, 0x00},
+ {0x3813, 0x08},
+ {0x3814, 0x11},
+ {0x3815, 0x11},
+ {0x3820, 0x40},
+ {0x3821, 0x04},
+ {0x3826, 0x00},
+ {0x3827, 0x00},
+ {0x382a, 0x08},
+ {0x382b, 0x52},
+ {0x382d, 0xba},
+ {0x383d, 0x14},
+ {0x384a, 0xa2},
+ {0x3866, 0x0e},
+ {0x3867, 0x07},
+ {0x3884, 0x00},
+ {0x3885, 0x08},
+ {0x3893, 0x68},
+ {0x3894, 0x2a},
+ {0x3898, 0x00},
+ {0x3899, 0x31},
+ {0x389a, 0x04},
+ {0x389b, 0x00},
+ {0x389c, 0x0b},
+ {0x389d, 0xad},
+ {0x389f, 0x08},
+ {0x38a0, 0x00},
+ {0x38a1, 0x00},
+ {0x38a8, 0x70},
+ {0x38ac, 0xea},
+ {0x38b2, 0x00},
+ {0x38b3, 0x08},
+ {0x38bc, 0x20},
+ {0x38c4, 0x0c},
+ {0x38c5, 0x3a},
+ {0x38c7, 0x3a},
+ {0x38e1, 0xc0},
+ {0x38ec, 0x3c},
+ {0x38f0, 0x09},
+ {0x38f1, 0x6f},
+ {0x38fe, 0x3c},
+ {0x391e, 0x00},
+ {0x391f, 0x00},
+ {0x3920, 0xa5},
+ {0x3921, 0x00},
+ {0x3922, 0x00},
+ {0x3923, 0x00},
+ {0x3924, 0x05},
+ {0x3925, 0x00},
+ {0x3926, 0x00},
+ {0x3927, 0x00},
+ {0x3928, 0x1a},
+ {0x3929, 0x01},
+ {0x392a, 0xb4},
+ {0x392b, 0x00},
+ {0x392c, 0x10},
+ {0x392f, 0x40},
+ {0x4000, 0xcf},
+ {0x4003, 0x40},
+ {0x4008, 0x00},
+ {0x4009, 0x07},
+ {0x400a, 0x02},
+ {0x400b, 0x54},
+ {0x400c, 0x00},
+ {0x400d, 0x07},
+ {0x4010, 0xc0},
+ {0x4012, 0x02},
+ {0x4014, 0x04},
+ {0x4015, 0x04},
+ {0x4017, 0x02},
+ {0x4042, 0x01},
+ {0x4306, 0x04},
+ {0x4307, 0x12},
+ {0x4509, 0x00},
+ {0x450b, 0x83},
+ {0x4604, 0x68},
+ {0x4608, 0x0a},
+ {0x4700, 0x06},
+ {0x4800, 0x64},
+ {0x481b, 0x3c},
+ {0x4825, 0x32},
+ {0x4833, 0x18},
+ {0x4837, 0x0f},
+ {0x4850, 0x40},
+ {0x4860, 0x00},
+ {0x4861, 0xec},
+ {0x4864, 0x00},
+ {0x4883, 0x00},
+ {0x4888, 0x90},
+ {0x4889, 0x05},
+ {0x488b, 0x04},
+ {0x4f00, 0x04},
+ {0x4f10, 0x04},
+ {0x4f21, 0x01},
+ {0x4f22, 0x40},
+ {0x4f23, 0x44},
+ {0x4f24, 0x51},
+ {0x4f25, 0x41},
+ {0x5000, 0x1f},
+ {0x500a, 0x00},
+ {0x5100, 0x00},
+ {0x5111, 0x20},
+ {0x3020, 0x20},
+ {0x3613, 0x03},
+ {0x38c9, 0x02},
+ {0x5304, 0x01},
+ {0x3620, 0x08},
+ {0x3639, 0x58},
+ {0x363a, 0x10},
+ {0x3674, 0x04},
+ {0x3780, 0xff},
+ {0x3781, 0xff},
+ {0x3782, 0x00},
+ {0x3783, 0x01},
+ {0x3798, 0xa3},
+ {0x37aa, 0x10},
+ {0x38a8, 0xf0},
+ {0x38c4, 0x09},
+ {0x38c5, 0xb0},
+ {0x38df, 0x80},
+ {0x38ff, 0x05},
+ {0x4010, 0xf1},
+ {0x4011, 0x70},
+ {0x3667, 0x80},
+ {0x4d00, 0x4a},
+ {0x4d01, 0x18},
+ {0x4d02, 0xbb},
+ {0x4d03, 0xde},
+ {0x4d04, 0x93},
+ {0x4d05, 0xff},
+ {0x4d09, 0x0a},
+ {0x37aa, 0x16},
+ {0x3606, 0x42},
+ {0x3605, 0x00},
+ {0x36a2, 0x17},
+ {0x300d, 0x0a},
+ {0x4d00, 0x4d},
+ {0x4d01, 0x95},
+ {0x3d8C, 0x70},
+ {0x3d8d, 0xE9},
+ {0x5300, 0x00},
+ {0x5301, 0x10},
+ {0x5302, 0x00},
+ {0x5303, 0xE3},
+ {0x3d88, 0x00},
+ {0x3d89, 0x10},
+ {0x3d8a, 0x00},
+ {0x3d8b, 0xE3},
+ {0x4f22, 0x00},
+};
+
+static const char * const og01a1b_test_pattern_menu[] = {
+ "Disabled",
+ "Standard Color Bar",
+ "Top-Bottom Darker Color Bar",
+ "Right-Left Darker Color Bar",
+ "Bottom-Top Darker Color Bar"
+};
+
+static const s64 link_freq_menu_items[] = {
+ OG01A1B_LINK_FREQ_500MHZ,
+};
+
+static const struct og01a1b_link_freq_config link_freq_configs[] = {
+ [OG01A1B_LINK_FREQ_1000MBPS] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_1000mbps),
+ .regs = mipi_data_rate_1000mbps,
+ }
+ }
+};
+
+static const struct og01a1b_mode supported_modes[] = {
+ {
+ .width = 1280,
+ .height = 1024,
+ .hts = 848,
+ .vts_def = OG01A1B_VTS_120FPS,
+ .vts_min = OG01A1B_VTS_120FPS_MIN,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1280x1024_regs),
+ .regs = mode_1280x1024_regs,
+ },
+ .link_freq_index = OG01A1B_LINK_FREQ_1000MBPS,
+ },
+};
+
+struct og01a1b {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ /* Current mode */
+ const struct og01a1b_mode *cur_mode;
+
+ /* To serialize asynchronus callbacks */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+static u64 to_pixel_rate(u32 f_index)
+{
+ u64 pixel_rate = link_freq_menu_items[f_index] * 2 * OG01A1B_DATA_LANES;
+
+ do_div(pixel_rate, OG01A1B_RGB_DEPTH);
+
+ return pixel_rate;
+}
+
+static u64 to_pixels_per_line(u32 hts, u32 f_index)
+{
+ u64 ppl = hts * to_pixel_rate(f_index);
+
+ do_div(ppl, OG01A1B_SCLK);
+
+ return ppl;
+}
+
+static int og01a1b_read_reg(struct og01a1b *og01a1b, u16 reg, u16 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[4] = {0};
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, addr_buf);
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(addr_buf);
+ msgs[0].buf = addr_buf;
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+static int og01a1b_write_reg(struct og01a1b *og01a1b, u16 reg, u16 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
+ u8 buf[6];
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << 8 * (4 - len), buf + 2);
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+static int og01a1b_write_reg_list(struct og01a1b *og01a1b,
+ const struct og01a1b_reg_list *r_list)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < r_list->num_of_regs; i++) {
+ ret = og01a1b_write_reg(og01a1b, r_list->regs[i].address, 1,
+ r_list->regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "failed to write reg 0x%4.4x. error = %d",
+ r_list->regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int og01a1b_test_pattern(struct og01a1b *og01a1b, u32 pattern)
+{
+ if (pattern)
+ pattern = (pattern - 1) << OG01A1B_TEST_PATTERN_BAR_SHIFT |
+ OG01A1B_TEST_PATTERN_ENABLE;
+
+ return og01a1b_write_reg(og01a1b, OG01A1B_REG_TEST_PATTERN,
+ OG01A1B_REG_VALUE_08BIT, pattern);
+}
+
+static int og01a1b_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct og01a1b *og01a1b = container_of(ctrl->handler,
+ struct og01a1b, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
+ s64 exposure_max;
+ int ret = 0;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = og01a1b->cur_mode->height + ctrl->val -
+ OG01A1B_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(og01a1b->exposure,
+ og01a1b->exposure->minimum,
+ exposure_max, og01a1b->exposure->step,
+ exposure_max);
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = og01a1b_write_reg(og01a1b, OG01A1B_REG_ANALOG_GAIN,
+ OG01A1B_REG_VALUE_16BIT,
+ ctrl->val << 4);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = og01a1b_write_reg(og01a1b, OG01A1B_REG_DIG_GAIN,
+ OG01A1B_REG_VALUE_24BIT,
+ ctrl->val << 6);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ ret = og01a1b_write_reg(og01a1b, OG01A1B_REG_EXPOSURE,
+ OG01A1B_REG_VALUE_16BIT, ctrl->val);
+ break;
+
+ case V4L2_CID_VBLANK:
+ ret = og01a1b_write_reg(og01a1b, OG01A1B_REG_VTS,
+ OG01A1B_REG_VALUE_16BIT,
+ og01a1b->cur_mode->height + ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = og01a1b_test_pattern(og01a1b, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops og01a1b_ctrl_ops = {
+ .s_ctrl = og01a1b_set_ctrl,
+};
+
+static int og01a1b_init_controls(struct og01a1b *og01a1b)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 exposure_max, h_blank;
+ int ret;
+
+ ctrl_hdlr = &og01a1b->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &og01a1b->mutex;
+ og01a1b->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
+ &og01a1b_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE
+ (link_freq_menu_items) - 1,
+ 0, link_freq_menu_items);
+ if (og01a1b->link_freq)
+ og01a1b->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ og01a1b->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &og01a1b_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ to_pixel_rate
+ (OG01A1B_LINK_FREQ_1000MBPS),
+ 1,
+ to_pixel_rate
+ (OG01A1B_LINK_FREQ_1000MBPS));
+ og01a1b->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &og01a1b_ctrl_ops,
+ V4L2_CID_VBLANK,
+ og01a1b->cur_mode->vts_min -
+ og01a1b->cur_mode->height,
+ OG01A1B_VTS_MAX -
+ og01a1b->cur_mode->height, 1,
+ og01a1b->cur_mode->vts_def -
+ og01a1b->cur_mode->height);
+ h_blank = to_pixels_per_line(og01a1b->cur_mode->hts,
+ og01a1b->cur_mode->link_freq_index) -
+ og01a1b->cur_mode->width;
+ og01a1b->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &og01a1b_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank,
+ 1, h_blank);
+ if (og01a1b->hblank)
+ og01a1b->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &og01a1b_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OG01A1B_ANAL_GAIN_MIN, OG01A1B_ANAL_GAIN_MAX,
+ OG01A1B_ANAL_GAIN_STEP, OG01A1B_ANAL_GAIN_MIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &og01a1b_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OG01A1B_DGTL_GAIN_MIN, OG01A1B_DGTL_GAIN_MAX,
+ OG01A1B_DGTL_GAIN_STEP, OG01A1B_DGTL_GAIN_DEFAULT);
+ exposure_max = (og01a1b->cur_mode->vts_def -
+ OG01A1B_EXPOSURE_MAX_MARGIN);
+ og01a1b->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &og01a1b_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OG01A1B_EXPOSURE_MIN,
+ exposure_max,
+ OG01A1B_EXPOSURE_STEP,
+ exposure_max);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &og01a1b_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(og01a1b_test_pattern_menu) - 1,
+ 0, 0, og01a1b_test_pattern_menu);
+
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ og01a1b->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static void og01a1b_update_pad_format(const struct og01a1b_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int og01a1b_start_streaming(struct og01a1b *og01a1b)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
+ const struct og01a1b_reg_list *reg_list;
+ int link_freq_index, ret;
+
+ link_freq_index = og01a1b->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+
+ ret = og01a1b_write_reg_list(og01a1b, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set plls");
+ return ret;
+ }
+
+ reg_list = &og01a1b->cur_mode->reg_list;
+ ret = og01a1b_write_reg_list(og01a1b, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode");
+ return ret;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(og01a1b->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ ret = og01a1b_write_reg(og01a1b, OG01A1B_REG_MODE_SELECT,
+ OG01A1B_REG_VALUE_08BIT,
+ OG01A1B_MODE_STREAMING);
+ if (ret) {
+ dev_err(&client->dev, "failed to set stream");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void og01a1b_stop_streaming(struct og01a1b *og01a1b)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
+
+ if (og01a1b_write_reg(og01a1b, OG01A1B_REG_MODE_SELECT,
+ OG01A1B_REG_VALUE_08BIT, OG01A1B_MODE_STANDBY))
+ dev_err(&client->dev, "failed to set stream");
+}
+
+static int og01a1b_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct og01a1b *og01a1b = to_og01a1b(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (og01a1b->streaming == enable)
+ return 0;
+
+ mutex_lock(&og01a1b->mutex);
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ mutex_unlock(&og01a1b->mutex);
+ return ret;
+ }
+
+ ret = og01a1b_start_streaming(og01a1b);
+ if (ret) {
+ enable = 0;
+ og01a1b_stop_streaming(og01a1b);
+ pm_runtime_put(&client->dev);
+ }
+ } else {
+ og01a1b_stop_streaming(og01a1b);
+ pm_runtime_put(&client->dev);
+ }
+
+ og01a1b->streaming = enable;
+ mutex_unlock(&og01a1b->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused og01a1b_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct og01a1b *og01a1b = to_og01a1b(sd);
+
+ mutex_lock(&og01a1b->mutex);
+ if (og01a1b->streaming)
+ og01a1b_stop_streaming(og01a1b);
+
+ mutex_unlock(&og01a1b->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused og01a1b_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct og01a1b *og01a1b = to_og01a1b(sd);
+ int ret;
+
+ mutex_lock(&og01a1b->mutex);
+ if (og01a1b->streaming) {
+ ret = og01a1b_start_streaming(og01a1b);
+ if (ret) {
+ og01a1b->streaming = false;
+ og01a1b_stop_streaming(og01a1b);
+ mutex_unlock(&og01a1b->mutex);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&og01a1b->mutex);
+
+ return 0;
+}
+
+static int og01a1b_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct og01a1b *og01a1b = to_og01a1b(sd);
+ const struct og01a1b_mode *mode;
+ s32 vblank_def, h_blank;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes), width,
+ height, fmt->format.width,
+ fmt->format.height);
+
+ mutex_lock(&og01a1b->mutex);
+ og01a1b_update_pad_format(mode, &fmt->format);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, sd_state,
+ fmt->pad) = fmt->format;
+ } else {
+ og01a1b->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(og01a1b->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(og01a1b->pixel_rate,
+ to_pixel_rate(mode->link_freq_index));
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->vts_def - mode->height;
+ __v4l2_ctrl_modify_range(og01a1b->vblank,
+ mode->vts_min - mode->height,
+ OG01A1B_VTS_MAX - mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(og01a1b->vblank, vblank_def);
+ h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) -
+ mode->width;
+ __v4l2_ctrl_modify_range(og01a1b->hblank, h_blank, h_blank, 1,
+ h_blank);
+ }
+
+ mutex_unlock(&og01a1b->mutex);
+
+ return 0;
+}
+
+static int og01a1b_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct og01a1b *og01a1b = to_og01a1b(sd);
+
+ mutex_lock(&og01a1b->mutex);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(&og01a1b->sd,
+ sd_state,
+ fmt->pad);
+ else
+ og01a1b_update_pad_format(og01a1b->cur_mode, &fmt->format);
+
+ mutex_unlock(&og01a1b->mutex);
+
+ return 0;
+}
+
+static int og01a1b_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int og01a1b_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int og01a1b_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct og01a1b *og01a1b = to_og01a1b(sd);
+
+ mutex_lock(&og01a1b->mutex);
+ og01a1b_update_pad_format(&supported_modes[0],
+ v4l2_subdev_get_try_format(sd, fh->state, 0));
+ mutex_unlock(&og01a1b->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops og01a1b_video_ops = {
+ .s_stream = og01a1b_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops og01a1b_pad_ops = {
+ .set_fmt = og01a1b_set_format,
+ .get_fmt = og01a1b_get_format,
+ .enum_mbus_code = og01a1b_enum_mbus_code,
+ .enum_frame_size = og01a1b_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops og01a1b_subdev_ops = {
+ .video = &og01a1b_video_ops,
+ .pad = &og01a1b_pad_ops,
+};
+
+static const struct media_entity_operations og01a1b_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops og01a1b_internal_ops = {
+ .open = og01a1b_open,
+};
+
+static int og01a1b_identify_module(struct og01a1b *og01a1b)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
+ int ret;
+ u32 val;
+
+ ret = og01a1b_read_reg(og01a1b, OG01A1B_REG_CHIP_ID,
+ OG01A1B_REG_VALUE_24BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != OG01A1B_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ OG01A1B_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int og01a1b_check_hwcfg(struct device *dev)
+{
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ u32 mclk;
+ int ret;
+ unsigned int i, j;
+
+ if (!fwnode)
+ return -ENXIO;
+
+ ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
+
+ if (ret) {
+ dev_err(dev, "can't get clock frequency");
+ return ret;
+ }
+
+ if (mclk != OG01A1B_MCLK) {
+ dev_err(dev, "external clock %d is not supported", mclk);
+ return -EINVAL;
+ }
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -ENXIO;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != OG01A1B_DATA_LANES) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequencies defined");
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) {
+ for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) {
+ if (link_freq_menu_items[i] ==
+ bus_cfg.link_frequencies[j])
+ break;
+ }
+
+ if (j == bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequency %lld supported",
+ link_freq_menu_items[i]);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+ }
+
+check_hwcfg_error:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int og01a1b_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct og01a1b *og01a1b = to_og01a1b(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+ mutex_destroy(&og01a1b->mutex);
+
+ return 0;
+}
+
+static int og01a1b_probe(struct i2c_client *client)
+{
+ struct og01a1b *og01a1b;
+ int ret;
+
+ ret = og01a1b_check_hwcfg(&client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to check HW configuration: %d",
+ ret);
+ return ret;
+ }
+
+ og01a1b = devm_kzalloc(&client->dev, sizeof(*og01a1b), GFP_KERNEL);
+ if (!og01a1b)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&og01a1b->sd, client, &og01a1b_subdev_ops);
+ ret = og01a1b_identify_module(og01a1b);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ return ret;
+ }
+
+ mutex_init(&og01a1b->mutex);
+ og01a1b->cur_mode = &supported_modes[0];
+ ret = og01a1b_init_controls(og01a1b);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ og01a1b->sd.internal_ops = &og01a1b_internal_ops;
+ og01a1b->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ og01a1b->sd.entity.ops = &og01a1b_subdev_entity_ops;
+ og01a1b->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ og01a1b->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&og01a1b->sd.entity, 1, &og01a1b->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&og01a1b->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto probe_error_media_entity_cleanup;
+ }
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+probe_error_media_entity_cleanup:
+ media_entity_cleanup(&og01a1b->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(og01a1b->sd.ctrl_handler);
+ mutex_destroy(&og01a1b->mutex);
+
+ return ret;
+}
+
+static const struct dev_pm_ops og01a1b_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(og01a1b_suspend, og01a1b_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id og01a1b_acpi_ids[] = {
+ {"OVTI01AC"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, og01a1b_acpi_ids);
+#endif
+
+static struct i2c_driver og01a1b_i2c_driver = {
+ .driver = {
+ .name = "og01a1b",
+ .pm = &og01a1b_pm_ops,
+ .acpi_match_table = ACPI_PTR(og01a1b_acpi_ids),
+ },
+ .probe_new = og01a1b_probe,
+ .remove = og01a1b_remove,
+};
+
+module_i2c_driver(og01a1b_i2c_driver);
+
+MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_DESCRIPTION("OmniVision OG01A1B sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov08d10.c b/drivers/media/i2c/ov08d10.c
new file mode 100644
index 000000000000..e5ef6466a3ec
--- /dev/null
+++ b/drivers/media/i2c/ov08d10.c
@@ -0,0 +1,1528 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2022 Intel Corporation.
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OV08D10_SCLK 144000000ULL
+#define OV08D10_XVCLK_19_2 19200000
+#define OV08D10_ROWCLK 36000
+#define OV08D10_DATA_LANES 2
+#define OV08D10_RGB_DEPTH 10
+
+#define OV08D10_REG_PAGE 0xfd
+#define OV08D10_REG_GLOBAL_EFFECTIVE 0x01
+#define OV08D10_REG_CHIP_ID_0 0x00
+#define OV08D10_REG_CHIP_ID_1 0x01
+#define OV08D10_ID_MASK GENMASK(15, 0)
+#define OV08D10_CHIP_ID 0x5608
+
+#define OV08D10_REG_MODE_SELECT 0xa0
+#define OV08D10_MODE_STANDBY 0x00
+#define OV08D10_MODE_STREAMING 0x01
+
+/* vertical-timings from sensor */
+#define OV08D10_REG_VTS_H 0x05
+#define OV08D10_REG_VTS_L 0x06
+#define OV08D10_VTS_MAX 0x7fff
+
+/* Exposure controls from sensor */
+#define OV08D10_REG_EXPOSURE_H 0x02
+#define OV08D10_REG_EXPOSURE_M 0x03
+#define OV08D10_REG_EXPOSURE_L 0x04
+#define OV08D10_EXPOSURE_MIN 6
+#define OV08D10_EXPOSURE_MAX_MARGIN 6
+#define OV08D10_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define OV08D10_REG_ANALOG_GAIN 0x24
+#define OV08D10_ANAL_GAIN_MIN 128
+#define OV08D10_ANAL_GAIN_MAX 2047
+#define OV08D10_ANAL_GAIN_STEP 1
+
+/* Digital gain controls from sensor */
+#define OV08D10_REG_MWB_DGAIN_C 0x21
+#define OV08D10_REG_MWB_DGAIN_F 0x22
+#define OV08D10_DGTL_GAIN_MIN 0
+#define OV08D10_DGTL_GAIN_MAX 4095
+#define OV08D10_DGTL_GAIN_STEP 1
+#define OV08D10_DGTL_GAIN_DEFAULT 1024
+
+/* Test Pattern Control */
+#define OV08D10_REG_TEST_PATTERN 0x12
+#define OV08D10_TEST_PATTERN_ENABLE 0x01
+#define OV08D10_TEST_PATTERN_DISABLE 0x00
+
+/* Flip Mirror Controls from sensor */
+#define OV08D10_REG_FLIP_OPT 0x32
+#define OV08D10_REG_FLIP_MASK 0x3
+
+#define to_ov08d10(_sd) container_of(_sd, struct ov08d10, sd)
+
+struct ov08d10_reg {
+ u8 address;
+ u8 val;
+};
+
+struct ov08d10_reg_list {
+ u32 num_of_regs;
+ const struct ov08d10_reg *regs;
+};
+
+struct ov08d10_link_freq_config {
+ const struct ov08d10_reg_list reg_list;
+};
+
+struct ov08d10_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timining size */
+ u32 hts;
+
+ /* Default vertical timining size */
+ u32 vts_def;
+
+ /* Min vertical timining size */
+ u32 vts_min;
+
+ /* Link frequency needed for this resolution */
+ u32 link_freq_index;
+
+ /* Sensor register settings for this resolution */
+ const struct ov08d10_reg_list reg_list;
+
+ /* Number of data lanes */
+ u8 data_lanes;
+};
+
+/* 3280x2460, 3264x2448 need 720Mbps/lane, 2 lanes */
+static const struct ov08d10_reg mipi_data_rate_720mbps[] = {
+ {0xfd, 0x00},
+ {0x11, 0x2a},
+ {0x14, 0x43},
+ {0x1a, 0x04},
+ {0x1b, 0xe1},
+ {0x1e, 0x13},
+ {0xb7, 0x02}
+};
+
+/* 1632x1224 needs 360Mbps/lane, 2 lanes */
+static const struct ov08d10_reg mipi_data_rate_360mbps[] = {
+ {0xfd, 0x00},
+ {0x1a, 0x04},
+ {0x1b, 0xe1},
+ {0x1d, 0x00},
+ {0x1c, 0x19},
+ {0x11, 0x2a},
+ {0x14, 0x54},
+ {0x1e, 0x13},
+ {0xb7, 0x02}
+};
+
+static const struct ov08d10_reg lane_2_mode_3280x2460[] = {
+ /* 3280x2460 resolution */
+ {0xfd, 0x01},
+ {0x12, 0x00},
+ {0x03, 0x12},
+ {0x04, 0x58},
+ {0x07, 0x05},
+ {0x21, 0x02},
+ {0x24, 0x30},
+ {0x33, 0x03},
+ {0x01, 0x03},
+ {0x19, 0x10},
+ {0x42, 0x55},
+ {0x43, 0x00},
+ {0x47, 0x07},
+ {0x48, 0x08},
+ {0xb2, 0x7f},
+ {0xb3, 0x7b},
+ {0xbd, 0x08},
+ {0xd2, 0x57},
+ {0xd3, 0x10},
+ {0xd4, 0x08},
+ {0xd5, 0x08},
+ {0xd6, 0x06},
+ {0xb1, 0x00},
+ {0xb4, 0x00},
+ {0xb7, 0x0a},
+ {0xbc, 0x44},
+ {0xbf, 0x48},
+ {0xc1, 0x10},
+ {0xc3, 0x24},
+ {0xc8, 0x03},
+ {0xc9, 0xf8},
+ {0xe1, 0x33},
+ {0xe2, 0xbb},
+ {0x51, 0x0c},
+ {0x52, 0x0a},
+ {0x57, 0x8c},
+ {0x59, 0x09},
+ {0x5a, 0x08},
+ {0x5e, 0x10},
+ {0x60, 0x02},
+ {0x6d, 0x5c},
+ {0x76, 0x16},
+ {0x7c, 0x11},
+ {0x90, 0x28},
+ {0x91, 0x16},
+ {0x92, 0x1c},
+ {0x93, 0x24},
+ {0x95, 0x48},
+ {0x9c, 0x06},
+ {0xca, 0x0c},
+ {0xce, 0x0d},
+ {0xfd, 0x01},
+ {0xc0, 0x00},
+ {0xdd, 0x18},
+ {0xde, 0x19},
+ {0xdf, 0x32},
+ {0xe0, 0x70},
+ {0xfd, 0x01},
+ {0xc2, 0x05},
+ {0xd7, 0x88},
+ {0xd8, 0x77},
+ {0xd9, 0x00},
+ {0xfd, 0x07},
+ {0x00, 0xf8},
+ {0x01, 0x2b},
+ {0x05, 0x40},
+ {0x08, 0x06},
+ {0x09, 0x11},
+ {0x28, 0x6f},
+ {0x2a, 0x20},
+ {0x2b, 0x05},
+ {0x5e, 0x10},
+ {0x52, 0x00},
+ {0x53, 0x7c},
+ {0x54, 0x00},
+ {0x55, 0x7c},
+ {0x56, 0x00},
+ {0x57, 0x7c},
+ {0x58, 0x00},
+ {0x59, 0x7c},
+ {0xfd, 0x02},
+ {0x9a, 0x30},
+ {0xa8, 0x02},
+ {0xfd, 0x02},
+ {0xa1, 0x01},
+ {0xa2, 0x09},
+ {0xa3, 0x9c},
+ {0xa5, 0x00},
+ {0xa6, 0x0c},
+ {0xa7, 0xd0},
+ {0xfd, 0x00},
+ {0x24, 0x01},
+ {0xc0, 0x16},
+ {0xc1, 0x08},
+ {0xc2, 0x30},
+ {0x8e, 0x0c},
+ {0x8f, 0xd0},
+ {0x90, 0x09},
+ {0x91, 0x9c},
+ {0xfd, 0x05},
+ {0x04, 0x40},
+ {0x07, 0x00},
+ {0x0d, 0x01},
+ {0x0f, 0x01},
+ {0x10, 0x00},
+ {0x11, 0x00},
+ {0x12, 0x0c},
+ {0x13, 0xcf},
+ {0x14, 0x00},
+ {0x15, 0x00},
+ {0xfd, 0x00},
+ {0x20, 0x0f},
+ {0xe7, 0x03},
+ {0xe7, 0x00}
+};
+
+static const struct ov08d10_reg lane_2_mode_3264x2448[] = {
+ /* 3264x2448 resolution */
+ {0xfd, 0x01},
+ {0x12, 0x00},
+ {0x03, 0x12},
+ {0x04, 0x58},
+ {0x07, 0x05},
+ {0x21, 0x02},
+ {0x24, 0x30},
+ {0x33, 0x03},
+ {0x01, 0x03},
+ {0x19, 0x10},
+ {0x42, 0x55},
+ {0x43, 0x00},
+ {0x47, 0x07},
+ {0x48, 0x08},
+ {0xb2, 0x7f},
+ {0xb3, 0x7b},
+ {0xbd, 0x08},
+ {0xd2, 0x57},
+ {0xd3, 0x10},
+ {0xd4, 0x08},
+ {0xd5, 0x08},
+ {0xd6, 0x06},
+ {0xb1, 0x00},
+ {0xb4, 0x00},
+ {0xb7, 0x0a},
+ {0xbc, 0x44},
+ {0xbf, 0x48},
+ {0xc1, 0x10},
+ {0xc3, 0x24},
+ {0xc8, 0x03},
+ {0xc9, 0xf8},
+ {0xe1, 0x33},
+ {0xe2, 0xbb},
+ {0x51, 0x0c},
+ {0x52, 0x0a},
+ {0x57, 0x8c},
+ {0x59, 0x09},
+ {0x5a, 0x08},
+ {0x5e, 0x10},
+ {0x60, 0x02},
+ {0x6d, 0x5c},
+ {0x76, 0x16},
+ {0x7c, 0x11},
+ {0x90, 0x28},
+ {0x91, 0x16},
+ {0x92, 0x1c},
+ {0x93, 0x24},
+ {0x95, 0x48},
+ {0x9c, 0x06},
+ {0xca, 0x0c},
+ {0xce, 0x0d},
+ {0xfd, 0x01},
+ {0xc0, 0x00},
+ {0xdd, 0x18},
+ {0xde, 0x19},
+ {0xdf, 0x32},
+ {0xe0, 0x70},
+ {0xfd, 0x01},
+ {0xc2, 0x05},
+ {0xd7, 0x88},
+ {0xd8, 0x77},
+ {0xd9, 0x00},
+ {0xfd, 0x07},
+ {0x00, 0xf8},
+ {0x01, 0x2b},
+ {0x05, 0x40},
+ {0x08, 0x06},
+ {0x09, 0x11},
+ {0x28, 0x6f},
+ {0x2a, 0x20},
+ {0x2b, 0x05},
+ {0x5e, 0x10},
+ {0x52, 0x00},
+ {0x53, 0x7c},
+ {0x54, 0x00},
+ {0x55, 0x7c},
+ {0x56, 0x00},
+ {0x57, 0x7c},
+ {0x58, 0x00},
+ {0x59, 0x7c},
+ {0xfd, 0x02},
+ {0x9a, 0x30},
+ {0xa8, 0x02},
+ {0xfd, 0x02},
+ {0xa1, 0x09},
+ {0xa2, 0x09},
+ {0xa3, 0x90},
+ {0xa5, 0x08},
+ {0xa6, 0x0c},
+ {0xa7, 0xc0},
+ {0xfd, 0x00},
+ {0x24, 0x01},
+ {0xc0, 0x16},
+ {0xc1, 0x08},
+ {0xc2, 0x30},
+ {0x8e, 0x0c},
+ {0x8f, 0xc0},
+ {0x90, 0x09},
+ {0x91, 0x90},
+ {0xfd, 0x05},
+ {0x04, 0x40},
+ {0x07, 0x00},
+ {0x0d, 0x01},
+ {0x0f, 0x01},
+ {0x10, 0x00},
+ {0x11, 0x00},
+ {0x12, 0x0c},
+ {0x13, 0xcf},
+ {0x14, 0x00},
+ {0x15, 0x00},
+ {0xfd, 0x00},
+ {0x20, 0x0f},
+ {0xe7, 0x03},
+ {0xe7, 0x00}
+};
+
+static const struct ov08d10_reg lane_2_mode_1632x1224[] = {
+ /* 1640x1232 resolution */
+ {0xfd, 0x01},
+ {0x1a, 0x0a},
+ {0x1b, 0x08},
+ {0x2a, 0x01},
+ {0x2b, 0x9a},
+ {0xfd, 0x01},
+ {0x12, 0x00},
+ {0x03, 0x05},
+ {0x04, 0xe2},
+ {0x07, 0x05},
+ {0x21, 0x02},
+ {0x24, 0x30},
+ {0x33, 0x03},
+ {0x31, 0x06},
+ {0x33, 0x03},
+ {0x01, 0x03},
+ {0x19, 0x10},
+ {0x42, 0x55},
+ {0x43, 0x00},
+ {0x47, 0x07},
+ {0x48, 0x08},
+ {0xb2, 0x7f},
+ {0xb3, 0x7b},
+ {0xbd, 0x08},
+ {0xd2, 0x57},
+ {0xd3, 0x10},
+ {0xd4, 0x08},
+ {0xd5, 0x08},
+ {0xd6, 0x06},
+ {0xb1, 0x00},
+ {0xb4, 0x00},
+ {0xb7, 0x0a},
+ {0xbc, 0x44},
+ {0xbf, 0x48},
+ {0xc1, 0x10},
+ {0xc3, 0x24},
+ {0xc8, 0x03},
+ {0xc9, 0xf8},
+ {0xe1, 0x33},
+ {0xe2, 0xbb},
+ {0x51, 0x0c},
+ {0x52, 0x0a},
+ {0x57, 0x8c},
+ {0x59, 0x09},
+ {0x5a, 0x08},
+ {0x5e, 0x10},
+ {0x60, 0x02},
+ {0x6d, 0x5c},
+ {0x76, 0x16},
+ {0x7c, 0x1a},
+ {0x90, 0x28},
+ {0x91, 0x16},
+ {0x92, 0x1c},
+ {0x93, 0x24},
+ {0x95, 0x48},
+ {0x9c, 0x06},
+ {0xca, 0x0c},
+ {0xce, 0x0d},
+ {0xfd, 0x01},
+ {0xc0, 0x00},
+ {0xdd, 0x18},
+ {0xde, 0x19},
+ {0xdf, 0x32},
+ {0xe0, 0x70},
+ {0xfd, 0x01},
+ {0xc2, 0x05},
+ {0xd7, 0x88},
+ {0xd8, 0x77},
+ {0xd9, 0x00},
+ {0xfd, 0x07},
+ {0x00, 0xf8},
+ {0x01, 0x2b},
+ {0x05, 0x40},
+ {0x08, 0x03},
+ {0x09, 0x08},
+ {0x28, 0x6f},
+ {0x2a, 0x20},
+ {0x2b, 0x05},
+ {0x2c, 0x01},
+ {0x50, 0x02},
+ {0x51, 0x03},
+ {0x5e, 0x00},
+ {0x52, 0x00},
+ {0x53, 0x7c},
+ {0x54, 0x00},
+ {0x55, 0x7c},
+ {0x56, 0x00},
+ {0x57, 0x7c},
+ {0x58, 0x00},
+ {0x59, 0x7c},
+ {0xfd, 0x02},
+ {0x9a, 0x30},
+ {0xa8, 0x02},
+ {0xfd, 0x02},
+ {0xa9, 0x04},
+ {0xaa, 0xd0},
+ {0xab, 0x06},
+ {0xac, 0x68},
+ {0xa1, 0x09},
+ {0xa2, 0x04},
+ {0xa3, 0xc8},
+ {0xa5, 0x04},
+ {0xa6, 0x06},
+ {0xa7, 0x60},
+ {0xfd, 0x05},
+ {0x06, 0x80},
+ {0x18, 0x06},
+ {0x19, 0x68},
+ {0xfd, 0x00},
+ {0x24, 0x01},
+ {0xc0, 0x16},
+ {0xc1, 0x08},
+ {0xc2, 0x30},
+ {0x8e, 0x06},
+ {0x8f, 0x60},
+ {0x90, 0x04},
+ {0x91, 0xc8},
+ {0x93, 0x0e},
+ {0x94, 0x77},
+ {0x95, 0x77},
+ {0x96, 0x10},
+ {0x98, 0x88},
+ {0x9c, 0x1a},
+ {0xfd, 0x05},
+ {0x04, 0x40},
+ {0x07, 0x99},
+ {0x0d, 0x03},
+ {0x0f, 0x03},
+ {0x10, 0x00},
+ {0x11, 0x00},
+ {0x12, 0x0c},
+ {0x13, 0xcf},
+ {0x14, 0x00},
+ {0x15, 0x00},
+ {0xfd, 0x00},
+ {0x20, 0x0f},
+ {0xe7, 0x03},
+ {0xe7, 0x00},
+};
+
+static const char * const ov08d10_test_pattern_menu[] = {
+ "Disabled",
+ "Standard Color Bar",
+};
+
+struct ov08d10 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ struct clk *xvclk;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *exposure;
+
+ /* Current mode */
+ const struct ov08d10_mode *cur_mode;
+
+ /* To serialize asynchronus callbacks */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+
+ /* lanes index */
+ u8 nlanes;
+
+ const struct ov08d10_lane_cfg *priv_lane;
+ u8 modes_size;
+};
+
+struct ov08d10_lane_cfg {
+ const s64 link_freq_menu[2];
+ const struct ov08d10_link_freq_config link_freq_configs[2];
+ const struct ov08d10_mode sp_modes[3];
+};
+
+static const struct ov08d10_lane_cfg lane_cfg_2 = {
+ {
+ 720000000,
+ 360000000,
+ },
+ {{
+ .reg_list = {
+ .num_of_regs =
+ ARRAY_SIZE(mipi_data_rate_720mbps),
+ .regs = mipi_data_rate_720mbps,
+ }
+ },
+ {
+ .reg_list = {
+ .num_of_regs =
+ ARRAY_SIZE(mipi_data_rate_360mbps),
+ .regs = mipi_data_rate_360mbps,
+ }
+ }},
+ {{
+ .width = 3280,
+ .height = 2460,
+ .hts = 1840,
+ .vts_def = 2504,
+ .vts_min = 2504,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(lane_2_mode_3280x2460),
+ .regs = lane_2_mode_3280x2460,
+ },
+ .link_freq_index = 0,
+ .data_lanes = 2,
+ },
+ {
+ .width = 3264,
+ .height = 2448,
+ .hts = 1840,
+ .vts_def = 2504,
+ .vts_min = 2504,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(lane_2_mode_3264x2448),
+ .regs = lane_2_mode_3264x2448,
+ },
+ .link_freq_index = 0,
+ .data_lanes = 2,
+ },
+ {
+ .width = 1632,
+ .height = 1224,
+ .hts = 1912,
+ .vts_def = 3736,
+ .vts_min = 3736,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(lane_2_mode_1632x1224),
+ .regs = lane_2_mode_1632x1224,
+ },
+ .link_freq_index = 1,
+ .data_lanes = 2,
+ }}
+};
+
+static u32 ov08d10_get_format_code(struct ov08d10 *ov08d10)
+{
+ static const u32 codes[2][2] = {
+ { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10},
+ { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10},
+ };
+
+ return codes[ov08d10->vflip->val][ov08d10->hflip->val];
+}
+
+static unsigned int ov08d10_modes_num(const struct ov08d10 *ov08d10)
+{
+ unsigned int i, count = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ov08d10->priv_lane->sp_modes); i++) {
+ if (ov08d10->priv_lane->sp_modes[i].width == 0)
+ break;
+ count++;
+ }
+
+ return count;
+}
+
+static u64 to_rate(const s64 *link_freq_menu,
+ u32 f_index, u8 nlanes)
+{
+ u64 pixel_rate = link_freq_menu[f_index] * 2 * nlanes;
+
+ do_div(pixel_rate, OV08D10_RGB_DEPTH);
+
+ return pixel_rate;
+}
+
+static u64 to_pixels_per_line(const s64 *link_freq_menu, u32 hts,
+ u32 f_index, u8 nlanes)
+{
+ u64 ppl = hts * to_rate(link_freq_menu, f_index, nlanes);
+
+ do_div(ppl, OV08D10_SCLK);
+
+ return ppl;
+}
+
+static int ov08d10_write_reg_list(struct ov08d10 *ov08d10,
+ const struct ov08d10_reg_list *r_list)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov08d10->sd);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < r_list->num_of_regs; i++) {
+ ret = i2c_smbus_write_byte_data(client, r_list->regs[i].address,
+ r_list->regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "failed to write reg 0x%2.2x. error = %d",
+ r_list->regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ov08d10_update_analog_gain(struct ov08d10 *ov08d10, u32 a_gain)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov08d10->sd);
+ u8 val;
+ int ret;
+
+ val = ((a_gain >> 3) & 0xFF);
+ /* CIS control registers */
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x01);
+ if (ret < 0)
+ return ret;
+
+ /* update AGAIN */
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_ANALOG_GAIN, val);
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_write_byte_data(client,
+ OV08D10_REG_GLOBAL_EFFECTIVE, 0x01);
+}
+
+static int ov08d10_update_digital_gain(struct ov08d10 *ov08d10, u32 d_gain)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov08d10->sd);
+ u8 val;
+ int ret;
+
+ d_gain = (d_gain >> 1);
+ /* CIS control registers */
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x01);
+ if (ret < 0)
+ return ret;
+
+ val = ((d_gain >> 8) & 0x3F);
+ /* update DGAIN */
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_MWB_DGAIN_C, val);
+ if (ret < 0)
+ return ret;
+
+ val = d_gain & 0xFF;
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_MWB_DGAIN_F, val);
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_write_byte_data(client,
+ OV08D10_REG_GLOBAL_EFFECTIVE, 0x01);
+}
+
+static int ov08d10_set_exposure(struct ov08d10 *ov08d10, u32 exposure)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov08d10->sd);
+ u8 val;
+ u8 hts_h, hts_l;
+ u32 hts, cur_vts, exp_cal;
+ int ret;
+
+ cur_vts = ov08d10->cur_mode->vts_def;
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x01);
+ if (ret < 0)
+ return ret;
+
+ hts_h = i2c_smbus_read_byte_data(client, 0x37);
+ hts_l = i2c_smbus_read_byte_data(client, 0x38);
+ hts = ((hts_h << 8) | (hts_l));
+ exp_cal = 66 * OV08D10_ROWCLK / hts;
+ exposure = exposure * exp_cal / (cur_vts - OV08D10_EXPOSURE_MAX_MARGIN);
+ /* CIS control registers */
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x01);
+ if (ret < 0)
+ return ret;
+
+ /* update exposure */
+ val = ((exposure >> 16) & 0xFF);
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_EXPOSURE_H, val);
+ if (ret < 0)
+ return ret;
+
+ val = ((exposure >> 8) & 0xFF);
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_EXPOSURE_M, val);
+ if (ret < 0)
+ return ret;
+
+ val = exposure & 0xFF;
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_EXPOSURE_L, val);
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_write_byte_data(client,
+ OV08D10_REG_GLOBAL_EFFECTIVE, 0x01);
+}
+
+static int ov08d10_set_vblank(struct ov08d10 *ov08d10, u32 vblank)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov08d10->sd);
+ u8 val;
+ int ret;
+
+ /* CIS control registers */
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x01);
+ if (ret < 0)
+ return ret;
+
+ val = ((vblank >> 8) & 0xFF);
+ /* update vblank */
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_VTS_H, val);
+ if (ret < 0)
+ return ret;
+
+ val = vblank & 0xFF;
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_VTS_L, val);
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_write_byte_data(client,
+ OV08D10_REG_GLOBAL_EFFECTIVE, 0x01);
+}
+
+static int ov08d10_test_pattern(struct ov08d10 *ov08d10, u32 pattern)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov08d10->sd);
+ u8 val;
+ int ret;
+
+ if (pattern)
+ val = OV08D10_TEST_PATTERN_ENABLE;
+ else
+ val = OV08D10_TEST_PATTERN_DISABLE;
+
+ /* CIS control registers */
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x01);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client,
+ OV08D10_REG_TEST_PATTERN, val);
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_write_byte_data(client,
+ OV08D10_REG_GLOBAL_EFFECTIVE, 0x01);
+}
+
+static int ov08d10_set_ctrl_flip(struct ov08d10 *ov08d10, u32 ctrl_val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov08d10->sd);
+ u8 val;
+ int ret;
+
+ /* System control registers */
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x01);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(client, OV08D10_REG_FLIP_OPT);
+ if (ret < 0)
+ return ret;
+
+ val = ret | (ctrl_val & OV08D10_REG_FLIP_MASK);
+
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x01);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_FLIP_OPT, val);
+
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_write_byte_data(client,
+ OV08D10_REG_GLOBAL_EFFECTIVE, 0x01);
+}
+
+static int ov08d10_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov08d10 *ov08d10 = container_of(ctrl->handler,
+ struct ov08d10, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&ov08d10->sd);
+ s64 exposure_max;
+ int ret;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = ov08d10->cur_mode->height + ctrl->val -
+ OV08D10_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(ov08d10->exposure,
+ ov08d10->exposure->minimum,
+ exposure_max, ov08d10->exposure->step,
+ exposure_max);
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = ov08d10_update_analog_gain(ov08d10, ctrl->val);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = ov08d10_update_digital_gain(ov08d10, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ ret = ov08d10_set_exposure(ov08d10, ctrl->val);
+ break;
+
+ case V4L2_CID_VBLANK:
+ ret = ov08d10_set_vblank(ov08d10, ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov08d10_test_pattern(ov08d10, ctrl->val);
+ break;
+
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ ret = ov08d10_set_ctrl_flip(ov08d10,
+ ov08d10->hflip->val |
+ ov08d10->vflip->val << 1);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov08d10_ctrl_ops = {
+ .s_ctrl = ov08d10_set_ctrl,
+};
+
+static int ov08d10_init_controls(struct ov08d10 *ov08d10)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ u8 link_freq_size;
+ s64 exposure_max;
+ s64 vblank_def;
+ s64 vblank_min;
+ s64 h_blank;
+ s64 pixel_rate_max;
+ const struct ov08d10_mode *mode;
+ int ret;
+
+ ctrl_hdlr = &ov08d10->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &ov08d10->mutex;
+ link_freq_size = ARRAY_SIZE(ov08d10->priv_lane->link_freq_menu);
+ ov08d10->link_freq =
+ v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov08d10_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ link_freq_size - 1,
+ 0,
+ ov08d10->priv_lane->link_freq_menu);
+ if (ov08d10->link_freq)
+ ov08d10->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ pixel_rate_max = to_rate(ov08d10->priv_lane->link_freq_menu, 0,
+ ov08d10->cur_mode->data_lanes);
+ ov08d10->pixel_rate =
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov08d10_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0, pixel_rate_max, 1,
+ pixel_rate_max);
+
+ mode = ov08d10->cur_mode;
+ vblank_def = mode->vts_def - mode->height;
+ vblank_min = mode->vts_min - mode->height;
+ ov08d10->vblank =
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov08d10_ctrl_ops,
+ V4L2_CID_VBLANK, vblank_min,
+ OV08D10_VTS_MAX - mode->height, 1,
+ vblank_def);
+
+ h_blank = to_pixels_per_line(ov08d10->priv_lane->link_freq_menu,
+ mode->hts, mode->link_freq_index,
+ mode->data_lanes) -
+ mode->width;
+ ov08d10->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov08d10_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank,
+ 1, h_blank);
+ if (ov08d10->hblank)
+ ov08d10->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov08d10_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV08D10_ANAL_GAIN_MIN, OV08D10_ANAL_GAIN_MAX,
+ OV08D10_ANAL_GAIN_STEP, OV08D10_ANAL_GAIN_MIN);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov08d10_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OV08D10_DGTL_GAIN_MIN, OV08D10_DGTL_GAIN_MAX,
+ OV08D10_DGTL_GAIN_STEP, OV08D10_DGTL_GAIN_DEFAULT);
+
+ exposure_max = mode->vts_def - OV08D10_EXPOSURE_MAX_MARGIN;
+ ov08d10->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov08d10_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV08D10_EXPOSURE_MIN,
+ exposure_max,
+ OV08D10_EXPOSURE_STEP,
+ exposure_max);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov08d10_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov08d10_test_pattern_menu) - 1,
+ 0, 0, ov08d10_test_pattern_menu);
+
+ ov08d10->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &ov08d10_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ ov08d10->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &ov08d10_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ ov08d10->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static void ov08d10_update_pad_format(struct ov08d10 *ov08d10,
+ const struct ov08d10_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = ov08d10_get_format_code(ov08d10);
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int ov08d10_start_streaming(struct ov08d10 *ov08d10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov08d10->sd);
+ const struct ov08d10_reg_list *reg_list;
+ int link_freq_index, ret;
+
+ link_freq_index = ov08d10->cur_mode->link_freq_index;
+ reg_list =
+ &ov08d10->priv_lane->link_freq_configs[link_freq_index].reg_list;
+
+ /* soft reset */
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x00);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to reset sensor");
+ return ret;
+ }
+ ret = i2c_smbus_write_byte_data(client, 0x20, 0x0e);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to reset sensor");
+ return ret;
+ }
+ usleep_range(3000, 4000);
+ ret = i2c_smbus_write_byte_data(client, 0x20, 0x0b);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to reset sensor");
+ return ret;
+ }
+
+ /* update sensor setting */
+ ret = ov08d10_write_reg_list(ov08d10, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set plls");
+ return ret;
+ }
+
+ reg_list = &ov08d10->cur_mode->reg_list;
+ ret = ov08d10_write_reg_list(ov08d10, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode");
+ return ret;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(ov08d10->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x00);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_MODE_SELECT,
+ OV08D10_MODE_STREAMING);
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x01);
+}
+
+static void ov08d10_stop_streaming(struct ov08d10 *ov08d10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov08d10->sd);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x00);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to stop streaming");
+ return;
+ }
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_MODE_SELECT,
+ OV08D10_MODE_STANDBY);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to stop streaming");
+ return;
+ }
+
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x01);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to stop streaming");
+ return;
+ }
+}
+
+static int ov08d10_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov08d10 *ov08d10 = to_ov08d10(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (ov08d10->streaming == enable)
+ return 0;
+
+ mutex_lock(&ov08d10->mutex);
+ if (enable) {
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0) {
+ mutex_unlock(&ov08d10->mutex);
+ return ret;
+ }
+
+ ret = ov08d10_start_streaming(ov08d10);
+ if (ret) {
+ enable = 0;
+ ov08d10_stop_streaming(ov08d10);
+ pm_runtime_put(&client->dev);
+ }
+ } else {
+ ov08d10_stop_streaming(ov08d10);
+ pm_runtime_put(&client->dev);
+ }
+
+ ov08d10->streaming = enable;
+
+ /* vflip and hflip cannot change during streaming */
+ __v4l2_ctrl_grab(ov08d10->vflip, enable);
+ __v4l2_ctrl_grab(ov08d10->hflip, enable);
+
+ mutex_unlock(&ov08d10->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused ov08d10_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov08d10 *ov08d10 = to_ov08d10(sd);
+
+ mutex_lock(&ov08d10->mutex);
+ if (ov08d10->streaming)
+ ov08d10_stop_streaming(ov08d10);
+
+ mutex_unlock(&ov08d10->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused ov08d10_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov08d10 *ov08d10 = to_ov08d10(sd);
+ int ret;
+
+ mutex_lock(&ov08d10->mutex);
+
+ if (ov08d10->streaming) {
+ ret = ov08d10_start_streaming(ov08d10);
+ if (ret) {
+ ov08d10->streaming = false;
+ ov08d10_stop_streaming(ov08d10);
+ mutex_unlock(&ov08d10->mutex);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&ov08d10->mutex);
+
+ return 0;
+}
+
+static int ov08d10_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov08d10 *ov08d10 = to_ov08d10(sd);
+ const struct ov08d10_mode *mode;
+ s32 vblank_def, h_blank;
+ s64 pixel_rate;
+
+ mode = v4l2_find_nearest_size(ov08d10->priv_lane->sp_modes,
+ ov08d10->modes_size,
+ width, height, fmt->format.width,
+ fmt->format.height);
+
+ mutex_lock(&ov08d10->mutex);
+ ov08d10_update_pad_format(ov08d10, mode, &fmt->format);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) =
+ fmt->format;
+ } else {
+ ov08d10->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(ov08d10->link_freq, mode->link_freq_index);
+ pixel_rate = to_rate(ov08d10->priv_lane->link_freq_menu,
+ mode->link_freq_index,
+ ov08d10->cur_mode->data_lanes);
+ __v4l2_ctrl_s_ctrl_int64(ov08d10->pixel_rate, pixel_rate);
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->vts_def - mode->height;
+ __v4l2_ctrl_modify_range(ov08d10->vblank,
+ mode->vts_min - mode->height,
+ OV08D10_VTS_MAX - mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(ov08d10->vblank, vblank_def);
+ h_blank = to_pixels_per_line(ov08d10->priv_lane->link_freq_menu,
+ mode->hts,
+ mode->link_freq_index,
+ ov08d10->cur_mode->data_lanes)
+ - mode->width;
+ __v4l2_ctrl_modify_range(ov08d10->hblank, h_blank, h_blank, 1,
+ h_blank);
+ }
+
+ mutex_unlock(&ov08d10->mutex);
+
+ return 0;
+}
+
+static int ov08d10_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov08d10 *ov08d10 = to_ov08d10(sd);
+
+ mutex_lock(&ov08d10->mutex);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(&ov08d10->sd,
+ sd_state,
+ fmt->pad);
+ else
+ ov08d10_update_pad_format(ov08d10, ov08d10->cur_mode,
+ &fmt->format);
+
+ mutex_unlock(&ov08d10->mutex);
+
+ return 0;
+}
+
+static int ov08d10_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct ov08d10 *ov08d10 = to_ov08d10(sd);
+
+ if (code->index > 0)
+ return -EINVAL;
+
+ mutex_lock(&ov08d10->mutex);
+ code->code = ov08d10_get_format_code(ov08d10);
+ mutex_unlock(&ov08d10->mutex);
+
+ return 0;
+}
+
+static int ov08d10_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct ov08d10 *ov08d10 = to_ov08d10(sd);
+
+ if (fse->index >= ov08d10->modes_size)
+ return -EINVAL;
+
+ mutex_lock(&ov08d10->mutex);
+ if (fse->code != ov08d10_get_format_code(ov08d10)) {
+ mutex_unlock(&ov08d10->mutex);
+ return -EINVAL;
+ }
+ mutex_unlock(&ov08d10->mutex);
+
+ fse->min_width = ov08d10->priv_lane->sp_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = ov08d10->priv_lane->sp_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int ov08d10_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct ov08d10 *ov08d10 = to_ov08d10(sd);
+
+ mutex_lock(&ov08d10->mutex);
+ ov08d10_update_pad_format(ov08d10, &ov08d10->priv_lane->sp_modes[0],
+ v4l2_subdev_get_try_format(sd, fh->state, 0));
+ mutex_unlock(&ov08d10->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov08d10_video_ops = {
+ .s_stream = ov08d10_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov08d10_pad_ops = {
+ .set_fmt = ov08d10_set_format,
+ .get_fmt = ov08d10_get_format,
+ .enum_mbus_code = ov08d10_enum_mbus_code,
+ .enum_frame_size = ov08d10_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops ov08d10_subdev_ops = {
+ .video = &ov08d10_video_ops,
+ .pad = &ov08d10_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops ov08d10_internal_ops = {
+ .open = ov08d10_open,
+};
+
+static int ov08d10_identify_module(struct ov08d10 *ov08d10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov08d10->sd);
+ u32 val;
+ u16 chip_id;
+ int ret;
+
+ /* System control registers */
+ ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x00);
+ if (ret < 0)
+ return ret;
+
+ /* Validate the chip ID */
+ ret = i2c_smbus_read_byte_data(client, OV08D10_REG_CHIP_ID_0);
+ if (ret < 0)
+ return ret;
+
+ val = ret << 8;
+
+ ret = i2c_smbus_read_byte_data(client, OV08D10_REG_CHIP_ID_1);
+ if (ret < 0)
+ return ret;
+
+ chip_id = val | ret;
+
+ if ((chip_id & OV08D10_ID_MASK) != OV08D10_CHIP_ID) {
+ dev_err(&client->dev, "unexpected sensor id(0x%04x)\n",
+ chip_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ov08d10_get_hwcfg(struct ov08d10 *ov08d10, struct device *dev)
+{
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ u32 xvclk_rate;
+ unsigned int i, j;
+ int ret;
+
+ if (!fwnode)
+ return -ENXIO;
+
+ ret = fwnode_property_read_u32(fwnode, "clock-frequency", &xvclk_rate);
+ if (ret)
+ return ret;
+
+ if (xvclk_rate != OV08D10_XVCLK_19_2)
+ dev_warn(dev, "external clock rate %u is unsupported",
+ xvclk_rate);
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -ENXIO;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ /* Get number of data lanes */
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ dev_dbg(dev, "Using %u data lanes\n", ov08d10->cur_mode->data_lanes);
+
+ ov08d10->priv_lane = &lane_cfg_2;
+ ov08d10->modes_size = ov08d10_modes_num(ov08d10);
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequencies defined");
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ov08d10->priv_lane->link_freq_menu); i++) {
+ for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) {
+ if (ov08d10->priv_lane->link_freq_menu[i] ==
+ bus_cfg.link_frequencies[j])
+ break;
+ }
+
+ if (j == bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequency %lld supported",
+ ov08d10->priv_lane->link_freq_menu[i]);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+ }
+
+check_hwcfg_error:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int ov08d10_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov08d10 *ov08d10 = to_ov08d10(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+ mutex_destroy(&ov08d10->mutex);
+
+ return 0;
+}
+
+static int ov08d10_probe(struct i2c_client *client)
+{
+ struct ov08d10 *ov08d10;
+ int ret;
+
+ ov08d10 = devm_kzalloc(&client->dev, sizeof(*ov08d10), GFP_KERNEL);
+ if (!ov08d10)
+ return -ENOMEM;
+
+ ret = ov08d10_get_hwcfg(ov08d10, &client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to get HW configuration: %d",
+ ret);
+ return ret;
+ }
+
+ v4l2_i2c_subdev_init(&ov08d10->sd, client, &ov08d10_subdev_ops);
+
+ ret = ov08d10_identify_module(ov08d10);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ return ret;
+ }
+
+ mutex_init(&ov08d10->mutex);
+ ov08d10->cur_mode = &ov08d10->priv_lane->sp_modes[0];
+ ret = ov08d10_init_controls(ov08d10);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ov08d10->sd.internal_ops = &ov08d10_internal_ops;
+ ov08d10->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov08d10->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ov08d10->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&ov08d10->sd.entity, 1, &ov08d10->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&ov08d10->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto probe_error_media_entity_cleanup;
+ }
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+probe_error_media_entity_cleanup:
+ media_entity_cleanup(&ov08d10->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(ov08d10->sd.ctrl_handler);
+ mutex_destroy(&ov08d10->mutex);
+
+ return ret;
+}
+
+static const struct dev_pm_ops ov08d10_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ov08d10_suspend, ov08d10_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ov08d10_acpi_ids[] = {
+ { "OVTI08D1" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(acpi, ov08d10_acpi_ids);
+#endif
+
+static struct i2c_driver ov08d10_i2c_driver = {
+ .driver = {
+ .name = "ov08d10",
+ .pm = &ov08d10_pm_ops,
+ .acpi_match_table = ACPI_PTR(ov08d10_acpi_ids),
+ },
+ .probe_new = ov08d10_probe,
+ .remove = ov08d10_remove,
+};
+
+module_i2c_driver(ov08d10_i2c_driver);
+
+MODULE_AUTHOR("Su, Jimmy <jimmy.su@intel.com>");
+MODULE_DESCRIPTION("OmniVision ov08d10 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index bab720c7c1de..d5f0eabf20c6 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -1162,6 +1162,7 @@ static int ov2740_probe(struct i2c_client *client)
if (!ov2740)
return -ENOMEM;
+ v4l2_i2c_subdev_init(&ov2740->sd, client, &ov2740_subdev_ops);
full_power = acpi_dev_state_d0(&client->dev);
if (full_power) {
ret = ov2740_identify_module(ov2740);
@@ -1171,13 +1172,6 @@ static int ov2740_probe(struct i2c_client *client)
}
}
- v4l2_i2c_subdev_init(&ov2740->sd, client, &ov2740_subdev_ops);
- ret = ov2740_identify_module(ov2740);
- if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
- return ret;
- }
-
mutex_init(&ov2740->mutex);
ov2740->cur_mode = &supported_modes[0];
ret = ov2740_init_controls(ov2740);
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index ddbd71394db3..db5a19babe67 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -2293,7 +2293,6 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
struct ov5640_dev *sensor = to_ov5640_dev(sd);
const struct ov5640_mode_info *new_mode;
struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
- struct v4l2_mbus_framefmt *fmt;
int ret;
if (format->pad != 0)
@@ -2311,12 +2310,10 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
if (ret)
goto out;
- if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
- else
- fmt = &sensor->fmt;
-
- *fmt = *mbus_fmt;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, sd_state, 0) = *mbus_fmt;
+ goto out;
+ }
if (new_mode != sensor->current_mode) {
sensor->current_mode = new_mode;
@@ -2325,6 +2322,9 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
if (mbus_fmt->code != sensor->fmt.code)
sensor->pending_fmt_change = true;
+ /* update format even if code is unchanged, resolution might change */
+ sensor->fmt = *mbus_fmt;
+
__v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
ov5640_calc_pixel_rate(sensor));
out:
diff --git a/drivers/media/i2c/ov5648.c b/drivers/media/i2c/ov5648.c
index 947d437ed0ef..930ff6897044 100644
--- a/drivers/media/i2c/ov5648.c
+++ b/drivers/media/i2c/ov5648.c
@@ -639,7 +639,7 @@ struct ov5648_ctrls {
struct v4l2_ctrl *pixel_rate;
struct v4l2_ctrl_handler handler;
-} __packed;
+};
struct ov5648_sensor {
struct device *dev;
@@ -1112,7 +1112,7 @@ static int ov5648_pad_configure(struct ov5648_sensor *sensor)
static int ov5648_mipi_configure(struct ov5648_sensor *sensor)
{
- struct v4l2_fwnode_bus_mipi_csi2 *bus_mipi_csi2 =
+ struct v4l2_mbus_config_mipi_csi2 *bus_mipi_csi2 =
&sensor->endpoint.bus.mipi_csi2;
unsigned int lanes_count = bus_mipi_csi2->num_data_lanes;
int ret;
@@ -1692,7 +1692,7 @@ static int ov5648_state_mipi_configure(struct ov5648_sensor *sensor,
u32 mbus_code)
{
struct ov5648_ctrls *ctrls = &sensor->ctrls;
- struct v4l2_fwnode_bus_mipi_csi2 *bus_mipi_csi2 =
+ struct v4l2_mbus_config_mipi_csi2 *bus_mipi_csi2 =
&sensor->endpoint.bus.mipi_csi2;
unsigned long mipi_clk_rate;
unsigned int bits_per_sample;
@@ -1778,8 +1778,14 @@ static int ov5648_state_configure(struct ov5648_sensor *sensor,
static int ov5648_state_init(struct ov5648_sensor *sensor)
{
- return ov5648_state_configure(sensor, &ov5648_modes[0],
- ov5648_mbus_codes[0]);
+ int ret;
+
+ mutex_lock(&sensor->mutex);
+ ret = ov5648_state_configure(sensor, &ov5648_modes[0],
+ ov5648_mbus_codes[0]);
+ mutex_unlock(&sensor->mutex);
+
+ return ret;
}
/* Sensor Base */
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index 00925850fa7c..82ba9f56baec 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -50,14 +50,21 @@
#define OV5675_ANAL_GAIN_STEP 1
/* Digital gain controls from sensor */
+#define OV5675_REG_DIGITAL_GAIN 0x350a
#define OV5675_REG_MWB_R_GAIN 0x5019
#define OV5675_REG_MWB_G_GAIN 0x501b
#define OV5675_REG_MWB_B_GAIN 0x501d
-#define OV5675_DGTL_GAIN_MIN 0
+#define OV5675_DGTL_GAIN_MIN 1024
#define OV5675_DGTL_GAIN_MAX 4095
#define OV5675_DGTL_GAIN_STEP 1
#define OV5675_DGTL_GAIN_DEFAULT 1024
+/* Group Access */
+#define OV5675_REG_GROUP_ACCESS 0x3208
+#define OV5675_GROUP_HOLD_START 0x0
+#define OV5675_GROUP_HOLD_END 0x10
+#define OV5675_GROUP_HOLD_LAUNCH 0xa0
+
/* Test Pattern Control */
#define OV5675_REG_TEST_PATTERN 0x4503
#define OV5675_TEST_PATTERN_ENABLE BIT(7)
@@ -587,6 +594,12 @@ static int ov5675_update_digital_gain(struct ov5675 *ov5675, u32 d_gain)
{
int ret;
+ ret = ov5675_write_reg(ov5675, OV5675_REG_GROUP_ACCESS,
+ OV5675_REG_VALUE_08BIT,
+ OV5675_GROUP_HOLD_START);
+ if (ret)
+ return ret;
+
ret = ov5675_write_reg(ov5675, OV5675_REG_MWB_R_GAIN,
OV5675_REG_VALUE_16BIT, d_gain);
if (ret)
@@ -597,8 +610,21 @@ static int ov5675_update_digital_gain(struct ov5675 *ov5675, u32 d_gain)
if (ret)
return ret;
- return ov5675_write_reg(ov5675, OV5675_REG_MWB_B_GAIN,
- OV5675_REG_VALUE_16BIT, d_gain);
+ ret = ov5675_write_reg(ov5675, OV5675_REG_MWB_B_GAIN,
+ OV5675_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ ret = ov5675_write_reg(ov5675, OV5675_REG_GROUP_ACCESS,
+ OV5675_REG_VALUE_08BIT,
+ OV5675_GROUP_HOLD_END);
+ if (ret)
+ return ret;
+
+ ret = ov5675_write_reg(ov5675, OV5675_REG_GROUP_ACCESS,
+ OV5675_REG_VALUE_08BIT,
+ OV5675_GROUP_HOLD_LAUNCH);
+ return ret;
}
static int ov5675_test_pattern(struct ov5675 *ov5675, u32 pattern)
diff --git a/drivers/media/i2c/ov5693.c b/drivers/media/i2c/ov5693.c
index 2784fcf67f3b..117ff5403312 100644
--- a/drivers/media/i2c/ov5693.c
+++ b/drivers/media/i2c/ov5693.c
@@ -950,7 +950,6 @@ static int ov5693_set_fmt(struct v4l2_subdev *sd,
unsigned int width, height;
unsigned int hblank;
int exposure_max;
- int ret = 0;
crop = __ov5693_get_pad_crop(ov5693, state, format->pad, format->which);
@@ -982,13 +981,13 @@ static int ov5693_set_fmt(struct v4l2_subdev *sd,
format->format = *fmt;
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- return ret;
+ return 0;
mutex_lock(&ov5693->lock);
- ov5693->mode.binning_x = hratio > 1 ? true : false;
+ ov5693->mode.binning_x = hratio > 1;
ov5693->mode.inc_x_odd = hratio > 1 ? 3 : 1;
- ov5693->mode.binning_y = vratio > 1 ? true : false;
+ ov5693->mode.binning_y = vratio > 1;
ov5693->mode.inc_y_odd = vratio > 1 ? 3 : 1;
ov5693->mode.vts = __ov5693_calc_vts(fmt->height);
@@ -1012,7 +1011,7 @@ static int ov5693_set_fmt(struct v4l2_subdev *sd,
exposure_max));
mutex_unlock(&ov5693->lock);
- return ret;
+ return 0;
}
static int ov5693_get_selection(struct v4l2_subdev *sd,
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index f67412150b16..6458e96d9091 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -472,9 +472,16 @@ static int ov6650_get_selection(struct v4l2_subdev *sd,
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
+ struct v4l2_rect *rect;
- if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ /* pre-select try crop rectangle */
+ rect = &sd_state->pads->try_crop;
+
+ } else {
+ /* pre-select active crop rectangle */
+ rect = &priv->rect;
+ }
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
@@ -483,14 +490,33 @@ static int ov6650_get_selection(struct v4l2_subdev *sd,
sel->r.width = W_CIF;
sel->r.height = H_CIF;
return 0;
+
case V4L2_SEL_TGT_CROP:
- sel->r = priv->rect;
+ /* use selected crop rectangle */
+ sel->r = *rect;
return 0;
+
default:
return -EINVAL;
}
}
+static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect)
+{
+ return width > rect->width >> 1 || height > rect->height >> 1;
+}
+
+static void ov6650_bind_align_crop_rectangle(struct v4l2_rect *rect)
+{
+ v4l_bound_align_image(&rect->width, 2, W_CIF, 1,
+ &rect->height, 2, H_CIF, 1, 0);
+ v4l_bound_align_image(&rect->left, DEF_HSTRT << 1,
+ (DEF_HSTRT << 1) + W_CIF - (__s32)rect->width, 1,
+ &rect->top, DEF_VSTRT << 1,
+ (DEF_VSTRT << 1) + H_CIF - (__s32)rect->height,
+ 1, 0);
+}
+
static int ov6650_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
@@ -499,18 +525,30 @@ static int ov6650_set_selection(struct v4l2_subdev *sd,
struct ov6650 *priv = to_ov6650(client);
int ret;
- if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
- sel->target != V4L2_SEL_TGT_CROP)
+ if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- v4l_bound_align_image(&sel->r.width, 2, W_CIF, 1,
- &sel->r.height, 2, H_CIF, 1, 0);
- v4l_bound_align_image(&sel->r.left, DEF_HSTRT << 1,
- (DEF_HSTRT << 1) + W_CIF - (__s32)sel->r.width, 1,
- &sel->r.top, DEF_VSTRT << 1,
- (DEF_VSTRT << 1) + H_CIF - (__s32)sel->r.height,
- 1, 0);
+ ov6650_bind_align_crop_rectangle(&sel->r);
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_rect *crop = &sd_state->pads->try_crop;
+ struct v4l2_mbus_framefmt *mf = &sd_state->pads->try_fmt;
+ /* detect current pad config scaling factor */
+ bool half_scale = !is_unscaled_ok(mf->width, mf->height, crop);
+ /* store new crop rectangle */
+ *crop = sel->r;
+
+ /* adjust frame size */
+ mf->width = crop->width >> half_scale;
+ mf->height = crop->height >> half_scale;
+
+ return 0;
+ }
+
+ /* V4L2_SUBDEV_FORMAT_ACTIVE */
+
+ /* apply new crop rectangle */
ret = ov6650_reg_write(client, REG_HSTRT, sel->r.left >> 1);
if (!ret) {
priv->rect.width += priv->rect.left - sel->r.left;
@@ -562,30 +600,13 @@ static int ov6650_get_fmt(struct v4l2_subdev *sd,
return 0;
}
-static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect)
-{
- return width > rect->width >> 1 || height > rect->height >> 1;
-}
-
#define to_clkrc(div) ((div) - 1)
/* set the format we will capture in */
-static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
+static int ov6650_s_fmt(struct v4l2_subdev *sd, u32 code, bool half_scale)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
- bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
- struct v4l2_subdev_selection sel = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .target = V4L2_SEL_TGT_CROP,
- .r.left = priv->rect.left + (priv->rect.width >> 1) -
- (mf->width >> (1 - half_scale)),
- .r.top = priv->rect.top + (priv->rect.height >> 1) -
- (mf->height >> (1 - half_scale)),
- .r.width = mf->width << half_scale,
- .r.height = mf->height << half_scale,
- };
- u32 code = mf->code;
u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask;
int ret;
@@ -653,9 +674,7 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
coma_mask |= COMA_QCIF;
}
- ret = ov6650_set_selection(sd, NULL, &sel);
- if (!ret)
- ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
+ ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
if (!ret) {
priv->half_scale = half_scale;
@@ -674,14 +693,12 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf = &format->format;
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
+ struct v4l2_rect *crop;
+ bool half_scale;
if (format->pad)
return -EINVAL;
- if (is_unscaled_ok(mf->width, mf->height, &priv->rect))
- v4l_bound_align_image(&mf->width, 2, W_CIF, 1,
- &mf->height, 2, H_CIF, 1, 0);
-
switch (mf->code) {
case MEDIA_BUS_FMT_Y10_1X10:
mf->code = MEDIA_BUS_FMT_Y8_1X8;
@@ -699,10 +716,17 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
break;
}
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ crop = &sd_state->pads->try_crop;
+ else
+ crop = &priv->rect;
+
+ half_scale = !is_unscaled_ok(mf->width, mf->height, crop);
+
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- /* store media bus format code and frame size in pad config */
- sd_state->pads->try_fmt.width = mf->width;
- sd_state->pads->try_fmt.height = mf->height;
+ /* store new mbus frame format code and size in pad config */
+ sd_state->pads->try_fmt.width = crop->width >> half_scale;
+ sd_state->pads->try_fmt.height = crop->height >> half_scale;
sd_state->pads->try_fmt.code = mf->code;
/* return default mbus frame format updated with pad config */
@@ -712,9 +736,11 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
mf->code = sd_state->pads->try_fmt.code;
} else {
- /* apply new media bus format code and frame size */
- int ret = ov6650_s_fmt(sd, mf);
+ int ret = 0;
+ /* apply new media bus frame format and scaling if changed */
+ if (mf->code != priv->code || half_scale != priv->half_scale)
+ ret = ov6650_s_fmt(sd, mf->code, half_scale);
if (ret)
return ret;
@@ -738,6 +764,33 @@ static int ov6650_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
+static int ov6650_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ int i;
+
+ /* enumerate supported frame intervals not exceeding 1 second */
+ if (fie->index > CLKRC_DIV_MASK ||
+ GET_CLKRC_DIV(fie->index) > FRAME_RATE_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ov6650_codes); i++)
+ if (fie->code == ov6650_codes[i])
+ break;
+ if (i == ARRAY_SIZE(ov6650_codes))
+ return -EINVAL;
+
+ if (!fie->width || fie->width > W_CIF ||
+ !fie->height || fie->height > H_CIF)
+ return -EINVAL;
+
+ fie->interval.numerator = GET_CLKRC_DIV(fie->index);
+ fie->interval.denominator = FRAME_RATE_MAX;
+
+ return 0;
+}
+
static int ov6650_g_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_interval *ival)
{
@@ -890,9 +943,8 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
if (!ret)
ret = ov6650_prog_dflt(client, xclk->clkrc);
if (!ret) {
- struct v4l2_mbus_framefmt mf = ov6650_def_fmt;
-
- ret = ov6650_s_fmt(sd, &mf);
+ /* driver default frame format, no scaling */
+ ret = ov6650_s_fmt(sd, ov6650_def_fmt.code, false);
}
if (!ret)
ret = v4l2_ctrl_handler_setup(&priv->hdl);
@@ -932,54 +984,18 @@ static int ov6650_get_mbus_config(struct v4l2_subdev *sd,
if (ret)
return ret;
- cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH
- | ((comj & COMJ_VSYNC_HIGH) ? V4L2_MBUS_VSYNC_ACTIVE_HIGH
- : V4L2_MBUS_VSYNC_ACTIVE_LOW)
- | ((comf & COMF_HREF_LOW) ? V4L2_MBUS_HSYNC_ACTIVE_LOW
- : V4L2_MBUS_HSYNC_ACTIVE_HIGH)
- | ((comj & COMJ_PCLK_RISING) ? V4L2_MBUS_PCLK_SAMPLE_RISING
- : V4L2_MBUS_PCLK_SAMPLE_FALLING);
cfg->type = V4L2_MBUS_PARALLEL;
+ cfg->bus.parallel.flags = V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH
+ | ((comj & COMJ_VSYNC_HIGH) ? V4L2_MBUS_VSYNC_ACTIVE_HIGH
+ : V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ | ((comf & COMF_HREF_LOW) ? V4L2_MBUS_HSYNC_ACTIVE_LOW
+ : V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ | ((comj & COMJ_PCLK_RISING) ? V4L2_MBUS_PCLK_SAMPLE_RISING
+ : V4L2_MBUS_PCLK_SAMPLE_FALLING);
return 0;
}
-/* Alter bus settings on camera side */
-static int ov6650_set_mbus_config(struct v4l2_subdev *sd,
- unsigned int pad,
- struct v4l2_mbus_config *cfg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret = 0;
-
- if (cfg->flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
- ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_PCLK_RISING, 0);
- else if (cfg->flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
- ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_PCLK_RISING);
- if (ret)
- return ret;
-
- if (cfg->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
- ret = ov6650_reg_rmw(client, REG_COMF, COMF_HREF_LOW, 0);
- else if (cfg->flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
- ret = ov6650_reg_rmw(client, REG_COMF, 0, COMF_HREF_LOW);
- if (ret)
- return ret;
-
- if (cfg->flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
- ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_VSYNC_HIGH, 0);
- else if (cfg->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
- ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_VSYNC_HIGH);
- if (ret)
- return ret;
-
- /*
- * Update the configuration to report what is actually applied to
- * the hardware.
- */
- return ov6650_get_mbus_config(sd, pad, cfg);
-}
-
static const struct v4l2_subdev_video_ops ov6650_video_ops = {
.s_stream = ov6650_s_stream,
.g_frame_interval = ov6650_g_frame_interval,
@@ -987,13 +1003,13 @@ static const struct v4l2_subdev_video_ops ov6650_video_ops = {
};
static const struct v4l2_subdev_pad_ops ov6650_pad_ops = {
- .enum_mbus_code = ov6650_enum_mbus_code,
- .get_selection = ov6650_get_selection,
- .set_selection = ov6650_set_selection,
- .get_fmt = ov6650_get_fmt,
- .set_fmt = ov6650_set_fmt,
- .get_mbus_config = ov6650_get_mbus_config,
- .set_mbus_config = ov6650_set_mbus_config,
+ .enum_mbus_code = ov6650_enum_mbus_code,
+ .enum_frame_interval = ov6650_enum_frame_interval,
+ .get_selection = ov6650_get_selection,
+ .set_selection = ov6650_set_selection,
+ .get_fmt = ov6650_get_fmt,
+ .set_fmt = ov6650_set_fmt,
+ .get_mbus_config = ov6650_get_mbus_config,
};
static const struct v4l2_subdev_ops ov6650_subdev_ops = {
diff --git a/drivers/media/i2c/ov8865.c b/drivers/media/i2c/ov8865.c
index d9d016cfa9ac..b8f4f0d3e33d 100644
--- a/drivers/media/i2c/ov8865.c
+++ b/drivers/media/i2c/ov8865.c
@@ -457,8 +457,8 @@
#define OV8865_NATIVE_WIDTH 3296
#define OV8865_NATIVE_HEIGHT 2528
-#define OV8865_ACTIVE_START_TOP 32
-#define OV8865_ACTIVE_START_LEFT 80
+#define OV8865_ACTIVE_START_LEFT 16
+#define OV8865_ACTIVE_START_TOP 40
#define OV8865_ACTIVE_WIDTH 3264
#define OV8865_ACTIVE_HEIGHT 2448
@@ -1471,7 +1471,7 @@ static int ov8865_charge_pump_configure(struct ov8865_sensor *sensor)
static int ov8865_mipi_configure(struct ov8865_sensor *sensor)
{
- struct v4l2_fwnode_bus_mipi_csi2 *bus_mipi_csi2 =
+ struct v4l2_mbus_config_mipi_csi2 *bus_mipi_csi2 =
&sensor->endpoint.bus.mipi_csi2;
unsigned int lanes_count = bus_mipi_csi2->num_data_lanes;
int ret;
@@ -2241,7 +2241,7 @@ static int ov8865_state_mipi_configure(struct ov8865_sensor *sensor,
u32 mbus_code)
{
struct ov8865_ctrls *ctrls = &sensor->ctrls;
- struct v4l2_fwnode_bus_mipi_csi2 *bus_mipi_csi2 =
+ struct v4l2_mbus_config_mipi_csi2 *bus_mipi_csi2 =
&sensor->endpoint.bus.mipi_csi2;
unsigned long mipi_clk_rate;
unsigned int bits_per_sample;
@@ -2838,8 +2838,8 @@ static int ov8865_get_selection(struct v4l2_subdev *subdev,
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
mutex_lock(&sensor->mutex);
- __ov8865_get_pad_crop(sensor, state, sel->pad,
- sel->which, &sel->r);
+ __ov8865_get_pad_crop(sensor, state, sel->pad,
+ sel->which, &sel->r);
mutex_unlock(&sensor->mutex);
break;
case V4L2_SEL_TGT_NATIVE_SIZE:
diff --git a/drivers/media/i2c/ov9640.c b/drivers/media/i2c/ov9640.c
index 0bab8c2cf160..9f44ed52d164 100644
--- a/drivers/media/i2c/ov9640.c
+++ b/drivers/media/i2c/ov9640.c
@@ -652,10 +652,12 @@ static int ov9640_get_mbus_config(struct v4l2_subdev *sd,
unsigned int pad,
struct v4l2_mbus_config *cfg)
{
- cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
- V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
- V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
+ cfg->bus.parallel.flags = V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_MASTER |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_HIGH;
return 0;
}
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index a958bbc2c33d..15ff80e6301e 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1129,7 +1129,7 @@ static void saa711x_set_lcr(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_forma
static int saa711x_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *sliced)
{
- static u16 lcr2vbi[] = {
+ static const u16 lcr2vbi[] = {
0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
0, V4L2_SLICED_CAPTION_525, /* 4 */
V4L2_SLICED_WSS_625, 0, /* 5 */
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 3205cd8298dd..e18b8947ad7e 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -69,7 +69,7 @@ static const struct v4l2_dv_timings_cap tc358743_timings_cap = {
struct tc358743_state {
struct tc358743_platform_data pdata;
- struct v4l2_fwnode_bus_mipi_csi2 bus;
+ struct v4l2_mbus_config_mipi_csi2 bus;
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler hdl;
@@ -717,7 +717,7 @@ static void tc358743_set_csi(struct v4l2_subdev *sd)
((lanes > 3) ? MASK_D3M_HSTXVREGEN : 0x0));
i2c_wr32(sd, TXOPTIONCNTRL, (state->bus.flags &
- V4L2_MBUS_CSI2_CONTINUOUS_CLOCK) ? MASK_CONTCLKMODE : 0);
+ V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK) ? 0 : MASK_CONTCLKMODE);
i2c_wr32(sd, STARTCNTRL, MASK_START);
i2c_wr32(sd, CSI_START, MASK_STRT);
@@ -1613,24 +1613,8 @@ static int tc358743_get_mbus_config(struct v4l2_subdev *sd,
cfg->type = V4L2_MBUS_CSI2_DPHY;
/* Support for non-continuous CSI-2 clock is missing in the driver */
- cfg->flags = V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
-
- switch (state->csi_lanes_in_use) {
- case 1:
- cfg->flags |= V4L2_MBUS_CSI2_1_LANE;
- break;
- case 2:
- cfg->flags |= V4L2_MBUS_CSI2_2_LANE;
- break;
- case 3:
- cfg->flags |= V4L2_MBUS_CSI2_3_LANE;
- break;
- case 4:
- cfg->flags |= V4L2_MBUS_CSI2_4_LANE;
- break;
- default:
- return -EINVAL;
- }
+ cfg->bus.mipi_csi2.flags = 0;
+ cfg->bus.mipi_csi2.num_data_lanes = state->csi_lanes_in_use;
return 0;
}
@@ -2055,7 +2039,7 @@ static int tc358743_probe(struct i2c_client *client)
/* platform data */
if (pdata) {
state->pdata = *pdata;
- state->bus.flags = V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
+ state->bus.flags = 0;
} else {
err = tc358743_probe_of(state);
if (err == -ENODEV)
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 4b16ffcaef98..65472438444b 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -1198,8 +1198,10 @@ static int tvp5150_get_mbus_config(struct v4l2_subdev *sd,
struct tvp5150 *decoder = to_tvp5150(sd);
cfg->type = decoder->mbus_type;
- cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING
- | V4L2_MBUS_FIELD_EVEN_LOW | V4L2_MBUS_DATA_ACTIVE_HIGH;
+ cfg->bus.parallel.flags = V4L2_MBUS_MASTER
+ | V4L2_MBUS_PCLK_SAMPLE_RISING
+ | V4L2_MBUS_FIELD_EVEN_LOW
+ | V4L2_MBUS_DATA_ACTIVE_HIGH;
return 0;
}
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index b411f9796191..8ab0913d8d82 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -396,20 +396,21 @@ __must_check int __media_pipeline_start(struct media_entity *entity,
struct media_link *link;
int ret;
- if (!pipe->streaming_count++) {
- ret = media_graph_walk_init(&pipe->graph, mdev);
- if (ret)
- goto error_graph_walk_start;
+ if (pipe->streaming_count) {
+ pipe->streaming_count++;
+ return 0;
}
+ ret = media_graph_walk_init(&pipe->graph, mdev);
+ if (ret)
+ return ret;
+
media_graph_walk_start(&pipe->graph, entity);
while ((entity = media_graph_walk_next(graph))) {
DECLARE_BITMAP(active, MEDIA_ENTITY_MAX_PADS);
DECLARE_BITMAP(has_no_links, MEDIA_ENTITY_MAX_PADS);
- entity->stream_count++;
-
if (entity->pipe && entity->pipe != pipe) {
pr_err("Pipe active for %s. Can't start for %s\n",
entity->name,
@@ -418,12 +419,12 @@ __must_check int __media_pipeline_start(struct media_entity *entity,
goto error;
}
- entity->pipe = pipe;
-
/* Already streaming --- no need to check. */
- if (entity->stream_count > 1)
+ if (entity->pipe)
continue;
+ entity->pipe = pipe;
+
if (!entity->ops || !entity->ops->link_validate)
continue;
@@ -479,6 +480,8 @@ __must_check int __media_pipeline_start(struct media_entity *entity,
}
}
+ pipe->streaming_count++;
+
return 0;
error:
@@ -489,24 +492,17 @@ error:
media_graph_walk_start(graph, entity_err);
while ((entity_err = media_graph_walk_next(graph))) {
- /* Sanity check for negative stream_count */
- if (!WARN_ON_ONCE(entity_err->stream_count <= 0)) {
- entity_err->stream_count--;
- if (entity_err->stream_count == 0)
- entity_err->pipe = NULL;
- }
+ entity_err->pipe = NULL;
/*
- * We haven't increased stream_count further than this
- * so we quit here.
+ * We haven't started entities further than this so we quit
+ * here.
*/
if (entity_err == entity)
break;
}
-error_graph_walk_start:
- if (!--pipe->streaming_count)
- media_graph_walk_cleanup(graph);
+ media_graph_walk_cleanup(graph);
return ret;
}
@@ -537,19 +533,15 @@ void __media_pipeline_stop(struct media_entity *entity)
if (WARN_ON(!pipe))
return;
+ if (--pipe->streaming_count)
+ return;
+
media_graph_walk_start(graph, entity);
- while ((entity = media_graph_walk_next(graph))) {
- /* Sanity check for negative stream_count */
- if (!WARN_ON_ONCE(entity->stream_count <= 0)) {
- entity->stream_count--;
- if (entity->stream_count == 0)
- entity->pipe = NULL;
- }
- }
+ while ((entity = media_graph_walk_next(graph)))
+ entity->pipe = NULL;
- if (!--pipe->streaming_count)
- media_graph_walk_cleanup(graph);
+ media_graph_walk_cleanup(graph);
}
EXPORT_SYMBOL_GPL(__media_pipeline_stop);
@@ -834,7 +826,8 @@ int __media_entity_setup_link(struct media_link *link, u32 flags)
sink = link->sink->entity;
if (!(link->flags & MEDIA_LNK_FL_DYNAMIC) &&
- (source->stream_count || sink->stream_count))
+ (media_entity_is_streaming(source) ||
+ media_entity_is_streaming(sink)))
return -EBUSY;
mdev = source->graph_obj.mdev;
diff --git a/drivers/media/mmc/Kconfig b/drivers/media/mmc/Kconfig
index 75aa6de08d53..2f9877bc61e4 100644
--- a/drivers/media/mmc/Kconfig
+++ b/drivers/media/mmc/Kconfig
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
+
source "drivers/media/mmc/siano/Kconfig"
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index 2cd8e328dda9..1224d908713a 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -12,61 +12,68 @@ if MEDIA_PCI_SUPPORT
if MEDIA_CAMERA_SUPPORT
comment "Media capture support"
+
source "drivers/media/pci/meye/Kconfig"
source "drivers/media/pci/solo6x10/Kconfig"
source "drivers/media/pci/sta2x11/Kconfig"
source "drivers/media/pci/tw5864/Kconfig"
source "drivers/media/pci/tw68/Kconfig"
source "drivers/media/pci/tw686x/Kconfig"
+
endif
if MEDIA_ANALOG_TV_SUPPORT
comment "Media capture/analog TV support"
+
+source "drivers/media/pci/dt3155/Kconfig"
source "drivers/media/pci/ivtv/Kconfig"
source "drivers/media/pci/saa7146/Kconfig"
-source "drivers/media/pci/dt3155/Kconfig"
+
endif
if MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT
comment "Media capture/analog/hybrid TV support"
+
+source "drivers/media/pci/bt8xx/Kconfig"
+source "drivers/media/pci/cobalt/Kconfig"
source "drivers/media/pci/cx18/Kconfig"
source "drivers/media/pci/cx23885/Kconfig"
source "drivers/media/pci/cx25821/Kconfig"
source "drivers/media/pci/cx88/Kconfig"
-source "drivers/media/pci/bt8xx/Kconfig"
source "drivers/media/pci/saa7134/Kconfig"
source "drivers/media/pci/saa7164/Kconfig"
-source "drivers/media/pci/cobalt/Kconfig"
endif
if MEDIA_DIGITAL_TV_SUPPORT
comment "Media digital TV PCI Adapters"
-source "drivers/media/pci/ttpci/Kconfig"
+
source "drivers/media/pci/b2c2/Kconfig"
-source "drivers/media/pci/pluto2/Kconfig"
+source "drivers/media/pci/ddbridge/Kconfig"
source "drivers/media/pci/dm1105/Kconfig"
-source "drivers/media/pci/pt1/Kconfig"
-source "drivers/media/pci/pt3/Kconfig"
source "drivers/media/pci/mantis/Kconfig"
+source "drivers/media/pci/netup_unidvb/Kconfig"
source "drivers/media/pci/ngene/Kconfig"
-source "drivers/media/pci/ddbridge/Kconfig"
+source "drivers/media/pci/pluto2/Kconfig"
+source "drivers/media/pci/pt1/Kconfig"
+source "drivers/media/pci/pt3/Kconfig"
source "drivers/media/pci/smipcie/Kconfig"
-source "drivers/media/pci/netup_unidvb/Kconfig"
-endif
+source "drivers/media/pci/ttpci/Kconfig"
-source "drivers/media/pci/intel/ipu3/Kconfig"
+endif
config VIDEO_PCI_SKELETON
tristate "Skeleton PCI V4L2 driver"
depends on SAMPLES
depends on MEDIA_TEST_SUPPORT
- depends on PCI && VIDEO_V4L2
+ depends on PCI && VIDEO_DEV
select VIDEOBUF2_MEMOPS
select VIDEOBUF2_DMA_CONTIG
help
Enable build of the skeleton PCI driver, used as a reference
when developing new drivers.
+source "drivers/media/pci/intel/ipu3/Kconfig"
+
endif #MEDIA_PCI_SUPPORT
endif #PCI
diff --git a/drivers/media/pci/Makefile b/drivers/media/pci/Makefile
index 984fa247096d..551169a3e434 100644
--- a/drivers/media/pci/Makefile
+++ b/drivers/media/pci/Makefile
@@ -3,6 +3,8 @@
# Makefile for the kernel multimedia device drivers.
#
+# Please keep it alphabetically sorted by directory
+# (e. g. LC_ALL=C sort Makefile)
obj-y += ttpci/ \
b2c2/ \
pluto2/ \
@@ -17,19 +19,23 @@ obj-y += ttpci/ \
netup_unidvb/ \
intel/
-obj-$(CONFIG_VIDEO_IVTV) += ivtv/
+# Please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
+
+obj-$(CONFIG_STA2X11_VIP) += sta2x11/
+
+obj-$(CONFIG_VIDEO_BT848) += bt8xx/
+obj-$(CONFIG_VIDEO_COBALT) += cobalt/
obj-$(CONFIG_VIDEO_CX18) += cx18/
obj-$(CONFIG_VIDEO_CX23885) += cx23885/
obj-$(CONFIG_VIDEO_CX25821) += cx25821/
obj-$(CONFIG_VIDEO_CX88) += cx88/
-obj-$(CONFIG_VIDEO_BT848) += bt8xx/
-obj-$(CONFIG_VIDEO_SAA7134) += saa7134/
-obj-$(CONFIG_VIDEO_SAA7164) += saa7164/
-obj-$(CONFIG_VIDEO_TW68) += tw68/
-obj-$(CONFIG_VIDEO_TW686X) += tw686x/
obj-$(CONFIG_VIDEO_DT3155) += dt3155/
+obj-$(CONFIG_VIDEO_IVTV) += ivtv/
obj-$(CONFIG_VIDEO_MEYE) += meye/
-obj-$(CONFIG_STA2X11_VIP) += sta2x11/
+obj-$(CONFIG_VIDEO_SAA7134) += saa7134/
+obj-$(CONFIG_VIDEO_SAA7164) += saa7164/
obj-$(CONFIG_VIDEO_SOLO6X10) += solo6x10/
-obj-$(CONFIG_VIDEO_COBALT) += cobalt/
obj-$(CONFIG_VIDEO_TW5864) += tw5864/
+obj-$(CONFIG_VIDEO_TW686X) += tw686x/
+obj-$(CONFIG_VIDEO_TW68) += tw68/
diff --git a/drivers/media/pci/bt8xx/Kconfig b/drivers/media/pci/bt8xx/Kconfig
index 3f56decbb681..927190281bd5 100644
--- a/drivers/media/pci/bt8xx/Kconfig
+++ b/drivers/media/pci/bt8xx/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_BT848
tristate "BT848 Video For Linux"
- depends on VIDEO_DEV && PCI && I2C && VIDEO_V4L2
+ depends on PCI && I2C && VIDEO_DEV
select I2C_ALGOBIT
select VIDEOBUF_DMA_SG
depends on RC_CORE
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 8cc9bec43688..5ca3d0cc653a 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -3890,7 +3890,7 @@ static int bttv_register_video(struct bttv *btv)
/* video */
vdev_init(btv, &btv->video_dev, &bttv_video_template, "video");
- btv->video_dev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
+ btv->video_dev.device_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
if (btv->tuner_type != TUNER_ABSENT)
btv->video_dev.device_caps |= V4L2_CAP_TUNER;
@@ -3911,7 +3911,7 @@ static int bttv_register_video(struct bttv *btv)
/* vbi */
vdev_init(btv, &btv->vbi_dev, &bttv_video_template, "vbi");
btv->vbi_dev.device_caps = V4L2_CAP_VBI_CAPTURE | V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING | V4L2_CAP_TUNER;
+ V4L2_CAP_STREAMING;
if (btv->tuner_type != TUNER_ABSENT)
btv->vbi_dev.device_caps |= V4L2_CAP_TUNER;
diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig
index d8d9ea6b09bc..e13e36141199 100644
--- a/drivers/media/pci/cobalt/Kconfig
+++ b/drivers/media/pci/cobalt/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_COBALT
tristate "Cisco Cobalt support"
- depends on VIDEO_V4L2 && I2C
+ depends on VIDEO_DEV && I2C
depends on PCI_MSI && MTD_COMPLEX_MAPPINGS
depends on (GPIOLIB && DRM_I2C_ADV7511=n) || COMPILE_TEST
depends on SND
diff --git a/drivers/media/pci/cx18/Kconfig b/drivers/media/pci/cx18/Kconfig
index 7074a1071302..a4e32fdcfd3d 100644
--- a/drivers/media/pci/cx18/Kconfig
+++ b/drivers/media/pci/cx18/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_CX18
tristate "Conexant cx23418 MPEG encoder support"
- depends on VIDEO_V4L2 && DVB_CORE && PCI && I2C
+ depends on VIDEO_DEV && DVB_CORE && PCI && I2C
select I2C_ALGOBIT
select VIDEOBUF_VMALLOC
depends on RC_CORE
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 59497ba6bf1f..84260972c343 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -23,7 +23,7 @@
#include "cx18-mailbox.h"
#include "cx18-ioctl.h"
#include "cx18-controls.h"
-#include "tuner-xc2028.h"
+#include "xc2028.h"
#include <linux/dma-mapping.h>
#include <media/tveeprom.h>
@@ -899,7 +899,7 @@ static int cx18_probe(struct pci_dev *pci_dev,
return -ENOMEM;
}
- cx = kzalloc(sizeof(*cx), GFP_ATOMIC);
+ cx = kzalloc(sizeof(*cx), GFP_KERNEL);
if (!cx)
return -ENOMEM;
diff --git a/drivers/media/pci/cx18/cx18-dvb.c b/drivers/media/pci/cx18/cx18-dvb.c
index 4c57a294b9fa..33e5a5b5fab4 100644
--- a/drivers/media/pci/cx18/cx18-dvb.c
+++ b/drivers/media/pci/cx18/cx18-dvb.c
@@ -22,7 +22,7 @@
#include <linux/firmware.h>
#include "mt352.h"
#include "mt352_priv.h"
-#include "tuner-xc2028.h"
+#include "xc2028.h"
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
index cf7cfda94107..160c8377e352 100644
--- a/drivers/media/pci/cx18/cx18-gpio.c
+++ b/drivers/media/pci/cx18/cx18-gpio.c
@@ -12,7 +12,7 @@
#include "cx18-io.h"
#include "cx18-cards.h"
#include "cx18-gpio.h"
-#include "tuner-xc2028.h"
+#include "xc2028.h"
/********************* GPIO stuffs *********************/
diff --git a/drivers/media/pci/cx18/cx18-queue.h b/drivers/media/pci/cx18/cx18-queue.h
index e0a34bd6539e..26f2097c0496 100644
--- a/drivers/media/pci/cx18/cx18-queue.h
+++ b/drivers/media/pci/cx18/cx18-queue.h
@@ -15,15 +15,15 @@
static inline void cx18_buf_sync_for_cpu(struct cx18_stream *s,
struct cx18_buffer *buf)
{
- pci_dma_sync_single_for_cpu(s->cx->pci_dev, buf->dma_handle,
+ dma_sync_single_for_cpu(&s->cx->pci_dev->dev, buf->dma_handle,
s->buf_size, s->dma);
}
static inline void cx18_buf_sync_for_device(struct cx18_stream *s,
struct cx18_buffer *buf)
{
- pci_dma_sync_single_for_device(s->cx->pci_dev, buf->dma_handle,
- s->buf_size, s->dma);
+ dma_sync_single_for_device(&s->cx->pci_dev->dev, buf->dma_handle,
+ s->buf_size, s->dma);
}
void _cx18_mdl_sync_for_device(struct cx18_stream *s, struct cx18_mdl *mdl);
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 0160f909f38c..9244b4320558 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -15,7 +15,7 @@
#include <linux/firmware.h>
#include <misc/altera.h>
-#include "tuner-xc2028.h"
+#include "xc2028.h"
#include "netup-eeprom.h"
#include "netup-init.h"
#include "altera-ci.h"
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 45c2f4afceb8..8fd5b6ef2428 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -28,7 +28,7 @@
#include "xc5000.h"
#include "max2165.h"
#include "tda10048.h"
-#include "tuner-xc2028.h"
+#include "xc2028.h"
#include "tuner-simple.h"
#include "dib7000p.h"
#include "dib0070.h"
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 19c34e5510ee..d2e84c6457e0 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -55,7 +55,7 @@ static void cx23885_input_process_measurements(struct cx23885_dev *dev,
} while (num != 0);
if (overrun)
- ir_raw_event_reset(kernel_ir->rc);
+ ir_raw_event_overflow(kernel_ir->rc);
else if (handle)
ir_raw_event_handle(kernel_ir->rc);
}
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index a380e0920a21..3d03f5e95786 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -24,7 +24,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
#include "cx23885-ioctl.h"
-#include "tuner-xc2028.h"
+#include "xc2028.h"
#include <media/drv-intf/cx25840.h>
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index 680e1e3fe89b..2c1d5137ac47 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -162,6 +162,9 @@ int cx8802_start_dma(struct cx8802_dev *dev,
cx_write(MO_TS_GPCNTRL, GP_COUNT_CONTROL_RESET);
q->count = 0;
+ /* clear interrupt status register */
+ cx_write(MO_TS_INTSTAT, 0x1f1111);
+
/* enable irqs */
dprintk(1, "setting the interrupt mask\n");
cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_TSINT);
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index ce4acf6de6aa..2ff3226a52ec 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -28,7 +28,7 @@
#include <media/i2c/wm8775.h>
#include "cx88-reg.h"
-#include "tuner-xc2028.h"
+#include "xc2028.h"
#include <linux/mutex.h>
diff --git a/drivers/media/pci/dt3155/Kconfig b/drivers/media/pci/dt3155/Kconfig
index a3d24b8a719b..2b76de195aa5 100644
--- a/drivers/media/pci/dt3155/Kconfig
+++ b/drivers/media/pci/dt3155/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_DT3155
tristate "DT3155 frame grabber"
- depends on PCI && VIDEO_DEV && VIDEO_V4L2
+ depends on PCI && VIDEO_DEV
select VIDEOBUF2_DMA_CONTIG
help
Enables dt3155 device driver for the DataTranslation DT3155 frame grabber.
diff --git a/drivers/media/pci/intel/ipu3/Kconfig b/drivers/media/pci/intel/ipu3/Kconfig
index dce8274c81e6..39bd3be0b43d 100644
--- a/drivers/media/pci/intel/ipu3/Kconfig
+++ b/drivers/media/pci/intel/ipu3/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_IPU3_CIO2
tristate "Intel ipu3-cio2 driver"
- depends on VIDEO_V4L2 && PCI
+ depends on VIDEO_DEV && PCI
depends on ACPI || COMPILE_TEST
depends on X86
select MEDIA_CONTROLLER
diff --git a/drivers/media/pci/ivtv/Kconfig b/drivers/media/pci/ivtv/Kconfig
index e70502902b73..9be52101bc4f 100644
--- a/drivers/media/pci/ivtv/Kconfig
+++ b/drivers/media/pci/ivtv/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_IVTV
tristate "Conexant cx23416/cx23415 MPEG encoder/decoder support"
- depends on VIDEO_V4L2 && PCI && I2C
+ depends on VIDEO_DEV && PCI && I2C
select I2C_ALGOBIT
depends on RC_CORE
select VIDEO_TUNER
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 57d4d5485d7a..f5846c22c799 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -57,7 +57,7 @@
#include <linux/dma-mapping.h>
#include <media/tveeprom.h>
#include <media/i2c/saa7115.h>
-#include "tuner-xc2028.h"
+#include "xc2028.h"
#include <uapi/linux/sched/types.h>
/* If you have already X v4l cards, then set this to X. This way
diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h
index 4cf92dee6527..ce3a7ca51736 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.h
+++ b/drivers/media/pci/ivtv/ivtv-driver.h
@@ -330,7 +330,6 @@ struct ivtv_stream {
struct ivtv *itv; /* for ease of use */
const char *name; /* name of the stream */
int type; /* stream type */
- u32 caps; /* V4L2 capabilities */
struct v4l2_fh *fh; /* pointer to the streaming filehandle */
spinlock_t qlock; /* locks access to the queues */
diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
index 856e7ab7f33e..6434c0d03a6d 100644
--- a/drivers/media/pci/ivtv/ivtv-gpio.c
+++ b/drivers/media/pci/ivtv/ivtv-gpio.c
@@ -10,7 +10,7 @@
#include "ivtv-driver.h"
#include "ivtv-cards.h"
#include "ivtv-gpio.h"
-#include "tuner-xc2028.h"
+#include "xc2028.h"
#include <media/tuner.h>
#include <media/v4l2-ctrls.h>
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 0cdf6b3210c2..fee460e2ca86 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -438,7 +438,7 @@ static int ivtv_g_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_f
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
struct v4l2_window *winfmt = &fmt->fmt.win;
- if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
+ if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -EINVAL;
if (!itv->osd_video_pbase)
return -EINVAL;
@@ -549,7 +549,7 @@ static int ivtv_try_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2
u32 chromakey = fmt->fmt.win.chromakey;
u8 global_alpha = fmt->fmt.win.global_alpha;
- if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
+ if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -EINVAL;
if (!itv->osd_video_pbase)
return -EINVAL;
@@ -1383,7 +1383,7 @@ static int ivtv_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb)
0,
};
- if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
+ if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -ENOTTY;
if (!itv->osd_video_pbase)
return -ENOTTY;
@@ -1450,7 +1450,7 @@ static int ivtv_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffe
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
struct yuv_playback_info *yi = &itv->yuv_info;
- if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
+ if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -ENOTTY;
if (!itv->osd_video_pbase)
return -ENOTTY;
@@ -1470,7 +1470,7 @@ static int ivtv_overlay(struct file *file, void *fh, unsigned int on)
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
- if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
+ if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -ENOTTY;
if (!itv->osd_video_pbase)
return -ENOTTY;
diff --git a/drivers/media/pci/ivtv/ivtv-queue.h b/drivers/media/pci/ivtv/ivtv-queue.h
index 586b0bf63c26..983e99642364 100644
--- a/drivers/media/pci/ivtv/ivtv-queue.h
+++ b/drivers/media/pci/ivtv/ivtv-queue.h
@@ -17,20 +17,20 @@
static inline int ivtv_might_use_pio(struct ivtv_stream *s)
{
- return s->dma == PCI_DMA_NONE || (SLICED_VBI_PIO && s->type == IVTV_ENC_STREAM_TYPE_VBI);
+ return s->dma == DMA_NONE || (SLICED_VBI_PIO && s->type == IVTV_ENC_STREAM_TYPE_VBI);
}
static inline int ivtv_use_pio(struct ivtv_stream *s)
{
struct ivtv *itv = s->itv;
- return s->dma == PCI_DMA_NONE ||
+ return s->dma == DMA_NONE ||
(SLICED_VBI_PIO && s->type == IVTV_ENC_STREAM_TYPE_VBI && itv->vbi.sliced_in->service_set);
}
static inline int ivtv_might_use_dma(struct ivtv_stream *s)
{
- return s->dma != PCI_DMA_NONE;
+ return s->dma != DMA_NONE;
}
static inline int ivtv_use_dma(struct ivtv_stream *s)
@@ -41,15 +41,16 @@ static inline int ivtv_use_dma(struct ivtv_stream *s)
static inline void ivtv_buf_sync_for_cpu(struct ivtv_stream *s, struct ivtv_buffer *buf)
{
if (ivtv_use_dma(s))
- pci_dma_sync_single_for_cpu(s->itv->pdev, buf->dma_handle,
- s->buf_size + 256, s->dma);
+ dma_sync_single_for_cpu(&s->itv->pdev->dev, buf->dma_handle,
+ s->buf_size + 256, s->dma);
}
static inline void ivtv_buf_sync_for_device(struct ivtv_stream *s, struct ivtv_buffer *buf)
{
if (ivtv_use_dma(s))
- pci_dma_sync_single_for_device(s->itv->pdev, buf->dma_handle,
- s->buf_size + 256, s->dma);
+ dma_sync_single_for_device(&s->itv->pdev->dev,
+ buf->dma_handle, s->buf_size + 256,
+ s->dma);
}
int ivtv_buf_copy_from_user(struct ivtv_stream *s, struct ivtv_buffer *buf, const char __user *src, int copybytes);
@@ -70,15 +71,17 @@ void ivtv_stream_free(struct ivtv_stream *s);
static inline void ivtv_stream_sync_for_cpu(struct ivtv_stream *s)
{
if (ivtv_use_dma(s))
- pci_dma_sync_single_for_cpu(s->itv->pdev, s->sg_handle,
- sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
+ dma_sync_single_for_cpu(&s->itv->pdev->dev, s->sg_handle,
+ sizeof(struct ivtv_sg_element),
+ DMA_TO_DEVICE);
}
static inline void ivtv_stream_sync_for_device(struct ivtv_stream *s)
{
if (ivtv_use_dma(s))
- pci_dma_sync_single_for_device(s->itv->pdev, s->sg_handle,
- sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
+ dma_sync_single_for_device(&s->itv->pdev->dev, s->sg_handle,
+ sizeof(struct ivtv_sg_element),
+ DMA_TO_DEVICE);
}
#endif
diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
index 6e455948cc77..13d7d55e6594 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.c
+++ b/drivers/media/pci/ivtv/ivtv-streams.c
@@ -176,7 +176,7 @@ static void ivtv_stream_init(struct ivtv *itv, int type)
s->itv = itv;
s->type = type;
s->name = ivtv_stream_info[type].name;
- s->caps = ivtv_stream_info[type].v4l2_caps;
+ s->vdev.device_caps = ivtv_stream_info[type].v4l2_caps;
if (ivtv_stream_info[type].pio)
s->dma = DMA_NONE;
@@ -299,12 +299,9 @@ static int ivtv_reg_dev(struct ivtv *itv, int type)
if (s_mpg->vdev.v4l2_dev)
num = s_mpg->vdev.num + ivtv_stream_info[type].num_offset;
}
- s->vdev.device_caps = s->caps;
- if (itv->osd_video_pbase) {
- itv->streams[IVTV_DEC_STREAM_TYPE_YUV].vdev.device_caps |=
- V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
- itv->streams[IVTV_DEC_STREAM_TYPE_MPG].vdev.device_caps |=
- V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+ if (itv->osd_video_pbase && (type == IVTV_DEC_STREAM_TYPE_YUV ||
+ type == IVTV_DEC_STREAM_TYPE_MPG)) {
+ s->vdev.device_caps |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
itv->v4l2_cap |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
}
video_set_drvdata(&s->vdev, s);
diff --git a/drivers/media/pci/ivtv/ivtv-udma.h b/drivers/media/pci/ivtv/ivtv-udma.h
index 0eef104e03b9..12b9426b2db2 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.h
+++ b/drivers/media/pci/ivtv/ivtv-udma.h
@@ -23,14 +23,14 @@ void ivtv_udma_start(struct ivtv *itv);
static inline void ivtv_udma_sync_for_device(struct ivtv *itv)
{
- pci_dma_sync_single_for_device(itv->pdev, itv->udma.SG_handle,
- sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
+ dma_sync_single_for_device(&itv->pdev->dev, itv->udma.SG_handle,
+ sizeof(itv->udma.SGarray), DMA_TO_DEVICE);
}
static inline void ivtv_udma_sync_for_cpu(struct ivtv *itv)
{
- pci_dma_sync_single_for_cpu(itv->pdev, itv->udma.SG_handle,
- sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
+ dma_sync_single_for_cpu(&itv->pdev->dev, itv->udma.SG_handle,
+ sizeof(itv->udma.SGarray), DMA_TO_DEVICE);
}
#endif
diff --git a/drivers/media/pci/meye/Kconfig b/drivers/media/pci/meye/Kconfig
index fed1f4a01817..3e69b66f1a5b 100644
--- a/drivers/media/pci/meye/Kconfig
+++ b/drivers/media/pci/meye/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_MEYE
tristate "Sony Vaio Picturebook Motion Eye Video For Linux"
- depends on PCI && VIDEO_V4L2
+ depends on PCI && VIDEO_DEV
depends on SONY_LAPTOP
depends on X86 || COMPILE_TEST
help
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index fb24d2ed3621..d3cde05a6eba 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -1214,7 +1214,7 @@ static int alsa_device_exit(struct saa7134_dev *dev)
static int saa7134_alsa_init(void)
{
- struct saa7134_dev *dev = NULL;
+ struct saa7134_dev *dev;
saa7134_dmasound_init = alsa_device_init;
saa7134_dmasound_exit = alsa_device_exit;
@@ -1229,7 +1229,7 @@ static int saa7134_alsa_init(void)
alsa_device_init(dev);
}
- if (dev == NULL)
+ if (list_empty(&saa7134_devlist))
pr_info("saa7134 ALSA: no saa7134 cards found\n");
return 0;
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index 0d82a4b27d5b..99be59af3560 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -15,7 +15,7 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include "tuner-xc2028.h"
+#include "xc2028.h"
#include <media/v4l2-common.h>
#include <media/tveeprom.h>
#include "tea5767.h"
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
index d17a1b15faee..9c6cfef03331 100644
--- a/drivers/media/pci/saa7134/saa7134-dvb.c
+++ b/drivers/media/pci/saa7134/saa7134-dvb.c
@@ -26,7 +26,7 @@
#include "mt352_priv.h" /* FIXME */
#include "tda1004x.h"
#include "nxt200x.h"
-#include "tuner-xc2028.h"
+#include "xc2028.h"
#include "xc5000.h"
#include "tda10086.h"
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 374c8e1087de..48543ad3d595 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -823,7 +823,7 @@ static int buffer_activate(struct saa7134_dev *dev,
{
struct saa7134_dmaqueue *dmaq = buf->vb2.vb2_buf.vb2_queue->drv_priv;
unsigned long base,control,bpl;
- unsigned long bpl_uv,lines_uv,base2,base3,tmp; /* planar */
+ unsigned long bpl_uv, lines_uv, base2, base3; /* planar */
video_dbg("buffer_activate buf=%p\n", buf);
buf->top_seen = 0;
@@ -868,11 +868,8 @@ static int buffer_activate(struct saa7134_dev *dev,
lines_uv = dev->height >> dev->fmt->vshift;
base2 = base + bpl * dev->height;
base3 = base2 + bpl_uv * lines_uv;
- if (dev->fmt->uvswap) {
- tmp = base2;
- base2 = base3;
- base3 = tmp;
- }
+ if (dev->fmt->uvswap)
+ swap(base2, base3);
video_dbg("uv: bpl=%ld lines=%ld base2/3=%ld/%ld\n",
bpl_uv,lines_uv,base2,base3);
if (V4L2_FIELD_HAS_BOTH(dev->field)) {
@@ -1538,8 +1535,6 @@ int saa7134_s_std(struct file *file, void *priv, v4l2_std_id id)
return -EINVAL;
}
- id = tvnorms[i].id;
-
if (!is_empress(file) && fh == dev->overlay_owner) {
spin_lock_irqsave(&dev->slock, flags);
stop_preview(dev);
diff --git a/drivers/media/pci/saa7146/Kconfig b/drivers/media/pci/saa7146/Kconfig
index 8e83cd044075..3bbb68a0ed7b 100644
--- a/drivers/media/pci/saa7146/Kconfig
+++ b/drivers/media/pci/saa7146/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_HEXIUM_GEMINI
tristate "Hexium Gemini frame grabber"
- depends on PCI && VIDEO_V4L2 && I2C
+ depends on PCI && VIDEO_DEV && I2C
select VIDEO_SAA7146_VV
help
This is a video4linux driver for the Hexium Gemini frame
@@ -13,7 +13,7 @@ config VIDEO_HEXIUM_GEMINI
config VIDEO_HEXIUM_ORION
tristate "Hexium HV-PCI6 and Orion frame grabber"
- depends on PCI && VIDEO_V4L2 && I2C
+ depends on PCI && VIDEO_DEV && I2C
select VIDEO_SAA7146_VV
help
This is a video4linux driver for the Hexium HV-PCI6 and
@@ -24,7 +24,7 @@ config VIDEO_HEXIUM_ORION
config VIDEO_MXB
tristate "Siemens-Nixdorf 'Multimedia eXtension Board'"
- depends on PCI && VIDEO_V4L2 && I2C
+ depends on PCI && VIDEO_DEV && I2C
select VIDEO_SAA7146_VV
select VIDEO_TUNER
select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/pci/saa7164/saa7164-cmd.c b/drivers/media/pci/saa7164/saa7164-cmd.c
index a65d810ce212..42bd8e76005b 100644
--- a/drivers/media/pci/saa7164/saa7164-cmd.c
+++ b/drivers/media/pci/saa7164/saa7164-cmd.c
@@ -187,7 +187,6 @@ static int saa7164_cmd_set(struct saa7164_dev *dev, struct tmComResInfo *msg,
mutex_lock(&dev->cmds[msg->id].lock);
size = msg->size;
- idx = 0;
cmds = size / bus->m_wMaxReqSize;
if (size % bus->m_wMaxReqSize == 0)
cmds -= 1;
diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig
index 27bb78513631..a96e170ab04e 100644
--- a/drivers/media/pci/sta2x11/Kconfig
+++ b/drivers/media/pci/sta2x11/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config STA2X11_VIP
tristate "STA2X11 VIP Video For Linux"
- depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS && I2C
+ depends on PCI && VIDEO_DEV && VIRT_TO_BUS && I2C
depends on STA2X11 || COMPILE_TEST
select GPIOLIB if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/pci/tw5864/Kconfig b/drivers/media/pci/tw5864/Kconfig
index d376d4ed65b9..111da223efb0 100644
--- a/drivers/media/pci/tw5864/Kconfig
+++ b/drivers/media/pci/tw5864/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_TW5864
tristate "Techwell TW5864 video/audio grabber and encoder"
- depends on VIDEO_DEV && PCI && VIDEO_V4L2
+ depends on VIDEO_DEV && PCI
select VIDEOBUF2_DMA_CONTIG
help
Support for boards based on Techwell TW5864 chip which provides
diff --git a/drivers/media/pci/tw68/Kconfig b/drivers/media/pci/tw68/Kconfig
index af0cb60337bb..ef9c0e886a09 100644
--- a/drivers/media/pci/tw68/Kconfig
+++ b/drivers/media/pci/tw68/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_TW68
tristate "Techwell tw68x Video For Linux"
- depends on VIDEO_DEV && PCI && VIDEO_V4L2
+ depends on VIDEO_DEV && PCI
select VIDEOBUF2_DMA_SG
help
Support for Techwell tw68xx based frame grabber boards.
diff --git a/drivers/media/pci/tw686x/Kconfig b/drivers/media/pci/tw686x/Kconfig
index 631c90868b8b..a4edad6aaf89 100644
--- a/drivers/media/pci/tw686x/Kconfig
+++ b/drivers/media/pci/tw686x/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_TW686X
tristate "Intersil/Techwell TW686x video capture cards"
- depends on PCI && VIDEO_DEV && VIDEO_V4L2 && SND
+ depends on PCI && VIDEO_DEV && SND
select VIDEOBUF2_VMALLOC
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_DMA_SG
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 9fbdba0fd1e7..f1056ceaf5a8 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -3,677 +3,85 @@
# Platform drivers
# Most drivers here are currently for webcam support
-menuconfig V4L_PLATFORM_DRIVERS
- bool "V4L platform devices"
- help
- Say Y here to enable support for platform-specific V4L drivers.
-
-if V4L_PLATFORM_DRIVERS
-
-source "drivers/media/platform/marvell-ccic/Kconfig"
-
-config VIDEO_VIA_CAMERA
- tristate "VIAFB camera controller support"
- depends on FB_VIA && VIDEO_V4L2
- select VIDEOBUF2_DMA_SG
- select VIDEO_OV7670
- help
- Driver support for the integrated camera controller in VIA
- Chrome9 chipsets. Currently only tested on OLPC xo-1.5 systems
- with ov7670 sensors.
-
-#
-# Platform multimedia device configuration
-#
-source "drivers/media/platform/cadence/Kconfig"
-
-source "drivers/media/platform/davinci/Kconfig"
-
-source "drivers/media/platform/omap/Kconfig"
-
-config VIDEO_ASPEED
- tristate "Aspeed AST2400 and AST2500 Video Engine driver"
- depends on VIDEO_V4L2
- select VIDEOBUF2_DMA_CONTIG
- help
- Support for the Aspeed Video Engine (VE) embedded in the Aspeed
- AST2400 and AST2500 SOCs. The VE can capture and compress video data
- from digital or analog sources.
-
-config VIDEO_SH_VOU
- tristate "SuperH VOU video output driver"
- depends on VIDEO_DEV && I2C
- depends on ARCH_SHMOBILE || COMPILE_TEST
- select VIDEOBUF2_DMA_CONTIG
- help
- Support for the Video Output Unit (VOU) on SuperH SoCs.
-
-config VIDEO_VIU
- tristate "Freescale VIU Video Driver"
- depends on VIDEO_V4L2 && (PPC_MPC512x || COMPILE_TEST) && I2C
- select VIDEOBUF_DMA_CONTIG
- default y
- help
- Support for Freescale VIU video driver. This device captures
- video data, or overlays video on DIU frame buffer.
-
- Say Y here if you want to enable VIU device on MPC5121e Rev2+.
- In doubt, say N.
-
-config VIDEO_MUX
- tristate "Video Multiplexer"
- select MULTIPLEXER
- depends on VIDEO_V4L2 && OF
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select REGMAP
- select V4L2_FWNODE
- help
- This driver provides support for N:1 video bus multiplexers.
-
-config VIDEO_OMAP3
- tristate "OMAP 3 Camera support"
- depends on VIDEO_V4L2 && I2C
- depends on (ARCH_OMAP3 && OMAP_IOMMU) || COMPILE_TEST
- depends on COMMON_CLK && OF
- select ARM_DMA_USE_IOMMU if OMAP_IOMMU
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select VIDEOBUF2_DMA_CONTIG
- select MFD_SYSCON
- select V4L2_FWNODE
- help
- Driver for an OMAP 3 camera controller.
-
-config VIDEO_OMAP3_DEBUG
- bool "OMAP 3 Camera debug messages"
- depends on VIDEO_OMAP3
- help
- Enable debug messages on OMAP 3 camera controller driver.
-
-config VIDEO_PXA27x
- tristate "PXA27x Quick Capture Interface driver"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on PXA27x || COMPILE_TEST
- select VIDEOBUF2_DMA_SG
- select SG_SPLIT
- select V4L2_FWNODE
- help
- This is a v4l2 driver for the PXA27x Quick Capture Interface
-
-config VIDEO_QCOM_CAMSS
- tristate "Qualcomm V4L2 Camera Subsystem driver"
- depends on VIDEO_V4L2
- depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select VIDEOBUF2_DMA_SG
- select V4L2_FWNODE
-
-config VIDEO_S3C_CAMIF
- tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver"
- depends on VIDEO_V4L2 && I2C && PM
- depends on ARCH_S3C64XX || PLAT_S3C24XX || COMPILE_TEST
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select VIDEOBUF2_DMA_CONTIG
- help
- This is a v4l2 driver for s3c24xx and s3c64xx SoC series camera
- host interface (CAMIF).
-
- To compile this driver as a module, choose M here: the module
- will be called s3c-camif.
-
-config VIDEO_STM32_DCMI
- tristate "STM32 Digital Camera Memory Interface (DCMI) support"
- depends on VIDEO_V4L2 && OF
- depends on ARCH_STM32 || COMPILE_TEST
- select VIDEOBUF2_DMA_CONTIG
- select MEDIA_CONTROLLER
- select V4L2_FWNODE
- help
- This module makes the STM32 Digital Camera Memory Interface (DCMI)
- available as a v4l2 device.
-
- To compile this driver as a module, choose M here: the module
- will be called stm32-dcmi.
-
-config VIDEO_RENESAS_CEU
- tristate "Renesas Capture Engine Unit (CEU) driver"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_SHMOBILE || ARCH_R7S72100 || COMPILE_TEST
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_FWNODE
+menuconfig MEDIA_PLATFORM_DRIVERS
+ bool "Media platform devices"
+ default "y"
help
- This is a v4l2 driver for the Renesas CEU Interface
+ Say Y here to enable support for platform-specific media drivers.
-config VIDEO_ROCKCHIP_ISP1
- tristate "Rockchip Image Signal Processing v1 Unit driver"
- depends on VIDEO_V4L2 && OF
- depends on ARCH_ROCKCHIP || COMPILE_TEST
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select VIDEOBUF2_DMA_CONTIG
- select VIDEOBUF2_VMALLOC
- select V4L2_FWNODE
- select GENERIC_PHY_MIPI_DPHY
- default n
- help
- Enable this to support the Image Signal Processing (ISP) module
- present in RK3399 SoCs.
-
- To compile this driver as a module, choose M here: the module
- will be called rockchip-isp1.
-
-source "drivers/media/platform/exynos4-is/Kconfig"
-source "drivers/media/platform/am437x/Kconfig"
-source "drivers/media/platform/xilinx/Kconfig"
-source "drivers/media/platform/rcar-vin/Kconfig"
-source "drivers/media/platform/atmel/Kconfig"
-source "drivers/media/platform/sunxi/Kconfig"
+if MEDIA_PLATFORM_DRIVERS
-config VIDEO_TI_CAL
- tristate "TI CAL (Camera Adaptation Layer) driver"
- depends on VIDEO_DEV && VIDEO_V4L2
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- depends on SOC_DRA7XX || ARCH_K3 || COMPILE_TEST
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_FWNODE
+config V4L_PLATFORM_DRIVERS
+ bool "V4L platform devices"
help
- Support for the TI CAL (Camera Adaptation Layer) block
- found on DRA72X SoC.
- In TI Technical Reference Manual this module is referred as
- Camera Interface Subsystem (CAMSS).
-
-if VIDEO_TI_CAL
+ Say Y here to enable support for platform-specific V4L drivers.
-config VIDEO_TI_CAL_MC
- bool "Media Controller centric mode by default"
- default n
+config SDR_PLATFORM_DRIVERS
+ bool "SDR platform devices"
+ depends on MEDIA_SDR_SUPPORT
help
- Enables Media Controller centric mode by default.
-
- If set, CAL driver will start in Media Controller mode by
- default. Note that this behavior can be overridden via
- module parameter 'mc_api'.
-
-endif # VIDEO_TI_CAL
+ Say Y here to enable support for platform-specific SDR Drivers.
-config VIDEO_RCAR_ISP
- tristate "R-Car Image Signal Processor (ISP)"
- depends on VIDEO_V4L2 && OF
- depends on ARCH_RENESAS || COMPILE_TEST
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select RESET_CONTROLLER
- select V4L2_FWNODE
+config DVB_PLATFORM_DRIVERS
+ bool "DVB platform devices"
+ depends on MEDIA_DIGITAL_TV_SUPPORT
help
- Support for Renesas R-Car Image Signal Processor (ISP).
- Enable this to support the Renesas R-Car Image Signal
- Processor (ISP).
-
- To compile this driver as a module, choose M here: the
- module will be called rcar-isp.
-
-endif # V4L_PLATFORM_DRIVERS
+ Say Y here to enable support for platform-specific Digital TV drivers.
-menuconfig V4L_MEM2MEM_DRIVERS
+config V4L_MEM2MEM_DRIVERS
bool "Memory-to-memory multimedia devices"
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
help
Say Y here to enable selecting drivers for V4L devices that
use system memory for both source and destination buffers, as opposed
to capture and output drivers, which use memory buffers for just
one of those.
-if V4L_MEM2MEM_DRIVERS
-
-config VIDEO_ALLEGRO_DVT
- tristate "Allegro DVT Video IP Core"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_ZYNQMP || COMPILE_TEST
- select V4L2_MEM2MEM_DEV
- select VIDEOBUF2_DMA_CONTIG
- select REGMAP_MMIO
- help
- Support for the encoder video IP core by Allegro DVT. This core is
- found for example on the Xilinx ZynqMP SoC in the EV family and is
- called VCU in the reference manual.
-
- To compile this driver as a module, choose M here: the module
- will be called allegro.
-
-config VIDEO_CODA
- tristate "Chips&Media Coda multi-standard codec IP"
- depends on VIDEO_DEV && VIDEO_V4L2 && OF && (ARCH_MXC || COMPILE_TEST)
- select SRAM
- select VIDEOBUF2_DMA_CONTIG
- select VIDEOBUF2_VMALLOC
- select V4L2_JPEG_HELPER
- select V4L2_MEM2MEM_DEV
- select GENERIC_ALLOCATOR
- help
- Coda is a range of video codec IPs that supports
- H.264, MPEG-4, and other video formats.
-
-config VIDEO_IMX_VDOA
- def_tristate VIDEO_CODA if SOC_IMX6Q || COMPILE_TEST
-
-config VIDEO_IMX_PXP
- tristate "i.MX Pixel Pipeline (PXP)"
- depends on VIDEO_DEV && VIDEO_V4L2 && (ARCH_MXC || COMPILE_TEST)
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- help
- The i.MX Pixel Pipeline is a memory-to-memory engine for scaling,
- color space conversion, and rotation.
-
-source "drivers/media/platform/imx-jpeg/Kconfig"
-
-config VIDEO_MEDIATEK_JPEG
- tristate "Mediatek JPEG Codec driver"
- depends on MTK_IOMMU_V1 || MTK_IOMMU || COMPILE_TEST
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_MEDIATEK || COMPILE_TEST
- depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- help
- Mediatek jpeg codec driver provides HW capability to decode
- JPEG format
-
- To compile this driver as a module, choose M here: the
- module will be called mtk-jpeg
-
-config VIDEO_MEDIATEK_VPU
- tristate "Mediatek Video Processor Unit"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_MEDIATEK || COMPILE_TEST
- help
- This driver provides downloading VPU firmware and
- communicating with VPU. This driver for hw video
- codec embedded in Mediatek's MT8173 SOCs. It is able
- to handle video decoding/encoding in a range of formats.
-
- To compile this driver as a module, choose M here: the
- module will be called mtk-vpu.
-
-config VIDEO_MEDIATEK_MDP
- tristate "Mediatek MDP driver"
- depends on MTK_IOMMU || COMPILE_TEST
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_MEDIATEK || COMPILE_TEST
- depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- select VIDEO_MEDIATEK_VPU
- help
- It is a v4l2 driver and present in Mediatek MT8173 SoCs.
- The driver supports for scaling and color space conversion.
-
- To compile this driver as a module, choose M here: the
- module will be called mtk-mdp.
-
-config VIDEO_MEDIATEK_VCODEC
- tristate "Mediatek Video Codec driver"
- depends on MTK_IOMMU || COMPILE_TEST
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_MEDIATEK || COMPILE_TEST
- depends on VIDEO_MEDIATEK_VPU || MTK_SCP
- # The two following lines ensure we have the same state ("m" or "y") as
- # our dependencies, to avoid missing symbols during link.
- depends on VIDEO_MEDIATEK_VPU || !VIDEO_MEDIATEK_VPU
- depends on MTK_SCP || !MTK_SCP
- depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- select VIDEO_MEDIATEK_VCODEC_VPU if VIDEO_MEDIATEK_VPU
- select VIDEO_MEDIATEK_VCODEC_SCP if MTK_SCP
- select V4L2_H264
- select MEDIA_CONTROLLER
- select MEDIA_CONTROLLER_REQUEST_API
- help
- Mediatek video codec driver provides HW capability to
- encode and decode in a range of video formats on MT8173
- and MT8183.
-
- Note that support for MT8173 requires VIDEO_MEDIATEK_VPU to
- also be selected. Support for MT8183 depends on MTK_SCP.
-
- To compile this driver as modules, choose M here: the
- modules will be called mtk-vcodec-dec and mtk-vcodec-enc.
-
-config VIDEO_MEDIATEK_VCODEC_VPU
- bool
-
-config VIDEO_MEDIATEK_VCODEC_SCP
- bool
+# Ancillary drivers
config VIDEO_MEM2MEM_DEINTERLACE
tristate "Deinterlace support"
- depends on VIDEO_DEV && VIDEO_V4L2
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
Generic deinterlacing V4L2 driver.
-config VIDEO_MESON_GE2D
- tristate "Amlogic 2D Graphic Acceleration Unit"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_MESON || COMPILE_TEST
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- help
- This is a v4l2 driver for Amlogic GE2D 2D graphics accelerator.
- GE2D is a standalone 2D graphic acceleration unit, with color converter,
- image scaling, BitBLT & alpha blending operations.
-
- To compile this driver as a module choose m here.
-
-config VIDEO_SAMSUNG_S5P_G2D
- tristate "Samsung S5P and EXYNOS4 G2D 2d graphics accelerator driver"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- help
- This is a v4l2 driver for Samsung S5P and EXYNOS4 G2D
- 2d graphics accelerator.
-
-config VIDEO_SAMSUNG_S5P_JPEG
- tristate "Samsung S5P/Exynos3250/Exynos4 JPEG codec driver"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- help
- This is a v4l2 driver for Samsung S5P, EXYNOS3250
- and EXYNOS4 JPEG codec
-
-config VIDEO_SAMSUNG_S5P_MFC
- tristate "Samsung S5P MFC Video Codec"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
- select VIDEOBUF2_DMA_CONTIG
- help
- MFC 5.1 and 6.x driver for V4L2
-
-config VIDEO_MX2_EMMAPRP
- tristate "MX2 eMMa-PrP support"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on SOC_IMX27 || COMPILE_TEST
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- help
- MX2X chips have a PrP that can be used to process buffers from
- memory to memory. Operations include resizing and format
- conversion.
-
-config VIDEO_SAMSUNG_EXYNOS_GSC
- tristate "Samsung Exynos G-Scaler driver"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_EXYNOS || COMPILE_TEST
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- help
- This is a v4l2 driver for Samsung EXYNOS5 SoC G-Scaler.
-
-config VIDEO_STI_BDISP
- tristate "STMicroelectronics BDISP 2D blitter driver"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_STI || COMPILE_TEST
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- help
- This v4l2 mem2mem driver is a 2D blitter for STMicroelectronics SoC.
-
-config VIDEO_STI_HVA
- tristate "STMicroelectronics HVA multi-format video encoder V4L2 driver"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_STI || COMPILE_TEST
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- help
- This V4L2 driver enables HVA (Hardware Video Accelerator) multi-format
- video encoder of STMicroelectronics SoC, allowing hardware encoding of
- raw uncompressed formats in various compressed video bitstreams format.
-
- To compile this driver as a module, choose M here:
- the module will be called st-hva.
-
-config VIDEO_STI_HVA_DEBUGFS
- bool "Export STMicroelectronics HVA internals in debugfs"
- depends on VIDEO_STI_HVA
- depends on DEBUG_FS
- help
- Select this to see information about the internal state and the last
- operation of STMicroelectronics HVA multi-format video encoder in
- debugfs.
-
- Choose N unless you know you need this.
-
-config VIDEO_STI_DELTA
- tristate "STMicroelectronics DELTA multi-format video decoder V4L2 driver"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_STI || COMPILE_TEST
- help
- This V4L2 driver enables DELTA multi-format video decoder
- of STMicroelectronics STiH4xx SoC series allowing hardware
- decoding of various compressed video bitstream format in
- raw uncompressed format.
-
- Use this option to see the decoders available for such
- hardware.
-
- Please notice that the driver will only be built if
- at least one of the DELTA decoder below is selected.
-
-if VIDEO_STI_DELTA
-
-config VIDEO_STI_DELTA_MJPEG
- bool "STMicroelectronics DELTA MJPEG support"
- default y
- help
- Enables DELTA MJPEG hardware support.
-
- To compile this driver as a module, choose M here:
- the module will be called st-delta.
-
-config VIDEO_STI_DELTA_DRIVER
- tristate
- depends on VIDEO_STI_DELTA
- depends on VIDEO_STI_DELTA_MJPEG
- default VIDEO_STI_DELTA_MJPEG
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- select RPMSG
-
-endif # VIDEO_STI_DELTA
-
-config VIDEO_STM32_DMA2D
- tristate "STM32 Chrom-Art Accelerator (DMA2D)"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_STM32 || COMPILE_TEST
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- help
- Enables DMA2D hardware support on stm32.
-
- The STM32 DMA2D is a memory-to-memory engine for pixel conversion
- and specialized DMA dedicated to image manipulation.
-
-config VIDEO_RENESAS_FDP1
- tristate "Renesas Fine Display Processor"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_RENESAS || COMPILE_TEST
- depends on (!ARM64 && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- help
- This is a V4L2 driver for the Renesas Fine Display Processor
- providing colour space conversion, and de-interlacing features.
-
- To compile this driver as a module, choose M here: the module
- will be called rcar_fdp1.
-
-config VIDEO_RENESAS_JPU
- tristate "Renesas JPEG Processing Unit"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_RENESAS || COMPILE_TEST
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- help
- This is a V4L2 driver for the Renesas JPEG Processing Unit.
-
- To compile this driver as a module, choose M here: the module
- will be called rcar_jpu.
-
-config VIDEO_RENESAS_FCP
- tristate "Renesas Frame Compression Processor"
- depends on ARCH_RENESAS || COMPILE_TEST
- depends on OF
- help
- This is a driver for the Renesas Frame Compression Processor (FCP).
- The FCP is a companion module of video processing modules in the
- Renesas R-Car Gen3 and RZ/G2 SoCs. It handles memory access for
- the codec, VSP and FDP modules.
-
- To compile this driver as a module, choose M here: the module
- will be called rcar-fcp.
-
-config VIDEO_RENESAS_VSP1
- tristate "Renesas VSP1 Video Processing Engine"
- depends on VIDEO_V4L2
- depends on ARCH_RENESAS || COMPILE_TEST
- depends on (!ARM64 && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
+config VIDEO_MUX
+ tristate "Video Multiplexer"
+ depends on V4L_PLATFORM_DRIVERS
+ select MULTIPLEXER
+ depends on VIDEO_DEV && OF
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
- select VIDEOBUF2_DMA_CONTIG
- select VIDEOBUF2_VMALLOC
- help
- This is a V4L2 driver for the Renesas VSP1 video processing engine.
-
- To compile this driver as a module, choose M here: the module
- will be called vsp1.
-
-config VIDEO_ROCKCHIP_RGA
- tristate "Rockchip Raster 2d Graphic Acceleration Unit"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_ROCKCHIP || COMPILE_TEST
- select VIDEOBUF2_DMA_SG
- select V4L2_MEM2MEM_DEV
- help
- This is a v4l2 driver for Rockchip SOC RGA 2d graphics accelerator.
- Rockchip RGA is a separate 2D raster graphic acceleration unit.
- It accelerates 2D graphics operations, such as point/line drawing,
- image scaling, rotation, BitBLT, alpha blending and image blur/sharpness.
-
- To compile this driver as a module choose m here.
-
-config VIDEO_TI_VPE
- tristate "TI VPE (Video Processing Engine) driver"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on SOC_DRA7XX || COMPILE_TEST
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- select VIDEO_TI_VPDMA
- select VIDEO_TI_SC
- select VIDEO_TI_CSC
- help
- Support for the TI VPE(Video Processing Engine) block
- found on DRA7XX SoC.
-
-config VIDEO_TI_VPE_DEBUG
- bool "VPE debug messages"
- depends on VIDEO_TI_VPE
- help
- Enable debug messages on VPE driver.
-
-config VIDEO_QCOM_VENUS
- tristate "Qualcomm Venus V4L2 encoder/decoder driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && QCOM_SMEM
- depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
- select QCOM_MDT_LOADER if ARCH_QCOM
- select QCOM_SCM
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- help
- This is a V4L2 driver for Qualcomm Venus video accelerator
- hardware. It accelerates encoding and decoding operations
- on various Qualcomm SoCs.
- To compile this driver as a module choose m here.
-
-config VIDEO_SUN8I_DEINTERLACE
- tristate "Allwinner Deinterlace driver"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_SUNXI || COMPILE_TEST
- depends on COMMON_CLK && OF
- depends on PM
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- help
- Support for the Allwinner deinterlace unit with scaling
- capability found on some SoCs, like H3.
- To compile this driver as a module choose m here.
-
-config VIDEO_SUN8I_ROTATE
- tristate "Allwinner DE2 rotation driver"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_SUNXI || COMPILE_TEST
- depends on COMMON_CLK && OF
- depends on PM
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- help
- Support for the Allwinner DE2 rotation unit.
- To compile this driver as a module choose m here.
-
-endif # V4L_MEM2MEM_DRIVERS
-
-# TI VIDEO PORT Helper Modules
-# These will be selected by VPE and VIP
-config VIDEO_TI_VPDMA
- tristate
-
-config VIDEO_TI_SC
- tristate
-
-config VIDEO_TI_CSC
- tristate
-
-menuconfig DVB_PLATFORM_DRIVERS
- bool "DVB platform devices"
- depends on MEDIA_DIGITAL_TV_SUPPORT
- help
- Say Y here to enable support for platform-specific Digital TV drivers.
-
-if DVB_PLATFORM_DRIVERS
-source "drivers/media/platform/sti/c8sectpfe/Kconfig"
-endif #DVB_PLATFORM_DRIVERS
-
-menuconfig SDR_PLATFORM_DRIVERS
- bool "SDR platform devices"
- depends on MEDIA_SDR_SUPPORT
- help
- Say Y here to enable support for platform-specific SDR Drivers.
-
-if SDR_PLATFORM_DRIVERS
-
-config VIDEO_RCAR_DRIF
- tristate "Renesas Digital Radio Interface (DRIF)"
- depends on VIDEO_V4L2
- depends on ARCH_RENESAS || COMPILE_TEST
- select VIDEOBUF2_VMALLOC
- select V4L2_ASYNC
+ select REGMAP
+ select V4L2_FWNODE
help
- Say Y if you want to enable R-Car Gen3 DRIF support. DRIF is Digital
- Radio Interface that interfaces with an RF front end chip. It is a
- receiver of digital data which uses DMA to transfer received data to
- a configured location for an application to use.
+ This driver provides support for N:1 video bus multiplexers.
- To compile this driver as a module, choose M here; the module
- will be called rcar_drif.
+# Platform drivers - Please keep it alphabetically sorted
+source "drivers/media/platform/allegro-dvt/Kconfig"
+source "drivers/media/platform/amlogic/Kconfig"
+source "drivers/media/platform/amphion/Kconfig"
+source "drivers/media/platform/aspeed/Kconfig"
+source "drivers/media/platform/atmel/Kconfig"
+source "drivers/media/platform/cadence/Kconfig"
+source "drivers/media/platform/chips-media/Kconfig"
+source "drivers/media/platform/intel/Kconfig"
+source "drivers/media/platform/marvell/Kconfig"
+source "drivers/media/platform/mediatek/Kconfig"
+source "drivers/media/platform/nvidia/Kconfig"
+source "drivers/media/platform/nxp/Kconfig"
+source "drivers/media/platform/qcom/Kconfig"
+source "drivers/media/platform/renesas/Kconfig"
+source "drivers/media/platform/rockchip/Kconfig"
+source "drivers/media/platform/samsung/Kconfig"
+source "drivers/media/platform/st/Kconfig"
+source "drivers/media/platform/sunxi/Kconfig"
+source "drivers/media/platform/ti/Kconfig"
+source "drivers/media/platform/via/Kconfig"
+source "drivers/media/platform/xilinx/Kconfig"
-endif # SDR_PLATFORM_DRIVERS
+endif # MEDIA_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 19bcbced7382..a881e97bae95 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -3,88 +3,32 @@
# Makefile for the video capture/playback device drivers.
#
-obj-$(CONFIG_VIDEO_ALLEGRO_DVT) += allegro-dvt/
-obj-$(CONFIG_VIDEO_ASPEED) += aspeed-video.o
-obj-$(CONFIG_VIDEO_CADENCE) += cadence/
-obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o
-obj-$(CONFIG_VIDEO_CAFE_CCIC) += marvell-ccic/
-obj-$(CONFIG_VIDEO_MMP_CAMERA) += marvell-ccic/
-
-obj-$(CONFIG_VIDEO_OMAP3) += omap3isp/
-obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
-
-obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
-
-obj-y += ti-vpe/
-
-obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
-obj-$(CONFIG_VIDEO_CODA) += coda/
-
-obj-$(CONFIG_VIDEO_IMX_PXP) += imx-pxp.o
-obj-$(CONFIG_VIDEO_IMX8_JPEG) += imx-jpeg/
-
+# Place here, alphabetically sorted by directory
+# (e. g. LC_ALL=C sort Makefile)
+obj-y += allegro-dvt/
+obj-y += amlogic/
+obj-y += amphion/
+obj-y += aspeed/
+obj-y += atmel/
+obj-y += cadence/
+obj-y += chips-media/
+obj-y += intel/
+obj-y += marvell/
+obj-y += mediatek/
+obj-y += nvidia/
+obj-y += nxp/
+obj-y += qcom/
+obj-y += renesas/
+obj-y += rockchip/
+obj-y += samsung/
+obj-y += st/
+obj-y += sunxi/
+obj-y += ti/
+obj-y += via/
+obj-y += xilinx/
+
+# Please place here only ancillary drivers that aren't SoC-specific
+# Please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
obj-$(CONFIG_VIDEO_MEM2MEM_DEINTERLACE) += m2m-deinterlace.o
-
obj-$(CONFIG_VIDEO_MUX) += video-mux.o
-
-obj-$(CONFIG_VIDEO_S3C_CAMIF) += s3c-camif/
-obj-$(CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS) += exynos4-is/
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg/
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc/
-
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_G2D) += s5p-g2d/
-obj-$(CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC) += exynos-gsc/
-
-obj-$(CONFIG_VIDEO_STI_BDISP) += sti/bdisp/
-obj-$(CONFIG_VIDEO_STI_HVA) += sti/hva/
-obj-$(CONFIG_DVB_C8SECTPFE) += sti/c8sectpfe/
-
-obj-$(CONFIG_VIDEO_STI_DELTA) += sti/delta/
-
-obj-y += stm32/
-
-obj-y += davinci/
-
-obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
-
-obj-$(CONFIG_VIDEO_RCAR_DRIF) += rcar_drif.o
-obj-$(CONFIG_VIDEO_RENESAS_CEU) += renesas-ceu.o
-obj-$(CONFIG_VIDEO_RENESAS_FCP) += rcar-fcp.o
-obj-$(CONFIG_VIDEO_RENESAS_FDP1) += rcar_fdp1.o
-obj-$(CONFIG_VIDEO_RENESAS_JPU) += rcar_jpu.o
-obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1/
-
-obj-$(CONFIG_VIDEO_ROCKCHIP_ISP1) += rockchip/rkisp1/
-obj-$(CONFIG_VIDEO_ROCKCHIP_RGA) += rockchip/rga/
-
-obj-y += omap/
-
-obj-$(CONFIG_VIDEO_AM437X_VPFE) += am437x/
-
-obj-$(CONFIG_VIDEO_XILINX) += xilinx/
-
-obj-$(CONFIG_VIDEO_RCAR_ISP) += rcar-isp.o
-obj-$(CONFIG_VIDEO_RCAR_VIN) += rcar-vin/
-
-obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel/
-obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel/
-obj-$(CONFIG_VIDEO_ATMEL_XISC) += atmel/
-
-obj-$(CONFIG_VIDEO_STM32_DCMI) += stm32/
-obj-$(CONFIG_VIDEO_STM32_DMA2D) += stm32/
-
-obj-$(CONFIG_VIDEO_MEDIATEK_VPU) += mtk-vpu/
-
-obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec/
-
-obj-$(CONFIG_VIDEO_MEDIATEK_MDP) += mtk-mdp/
-
-obj-$(CONFIG_VIDEO_MEDIATEK_JPEG) += mtk-jpeg/
-
-obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom/camss/
-
-obj-$(CONFIG_VIDEO_QCOM_VENUS) += qcom/venus/
-
-obj-y += sunxi/
-
-obj-$(CONFIG_VIDEO_MESON_GE2D) += meson/ge2d/
diff --git a/drivers/media/platform/allegro-dvt/Kconfig b/drivers/media/platform/allegro-dvt/Kconfig
new file mode 100644
index 000000000000..2182e1277568
--- /dev/null
+++ b/drivers/media/platform/allegro-dvt/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Allegro DVT media platform drivers"
+
+config VIDEO_ALLEGRO_DVT
+ tristate "Allegro DVT Video IP Core"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ select V4L2_MEM2MEM_DEV
+ select VIDEOBUF2_DMA_CONTIG
+ select REGMAP_MMIO
+ help
+ Support for the encoder video IP core by Allegro DVT. This core is
+ found for example on the Xilinx ZynqMP SoC in the EV family and is
+ called VCU in the reference manual.
+
+ To compile this driver as a module, choose M here: the module
+ will be called allegro.
diff --git a/drivers/media/platform/amlogic/Kconfig b/drivers/media/platform/amlogic/Kconfig
new file mode 100644
index 000000000000..5014957404e9
--- /dev/null
+++ b/drivers/media/platform/amlogic/Kconfig
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Amlogic media platform drivers"
+
+source "drivers/media/platform/amlogic/meson-ge2d/Kconfig"
diff --git a/drivers/media/platform/amlogic/Makefile b/drivers/media/platform/amlogic/Makefile
new file mode 100644
index 000000000000..d3cdb8fa4ddb
--- /dev/null
+++ b/drivers/media/platform/amlogic/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += meson-ge2d/
diff --git a/drivers/media/platform/amlogic/meson-ge2d/Kconfig b/drivers/media/platform/amlogic/meson-ge2d/Kconfig
new file mode 100644
index 000000000000..312c4169e3c2
--- /dev/null
+++ b/drivers/media/platform/amlogic/meson-ge2d/Kconfig
@@ -0,0 +1,14 @@
+config VIDEO_MESON_GE2D
+ tristate "Amlogic 2D Graphic Acceleration Unit"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_MESON || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This is a v4l2 driver for Amlogic GE2D 2D graphics accelerator.
+ GE2D is a standalone 2D graphic acceleration unit, with color converter,
+ image scaling, BitBLT & alpha blending operations.
+
+ To compile this driver as a module choose m here.
+
diff --git a/drivers/media/platform/meson/ge2d/Makefile b/drivers/media/platform/amlogic/meson-ge2d/Makefile
index 450586df27d7..450586df27d7 100644
--- a/drivers/media/platform/meson/ge2d/Makefile
+++ b/drivers/media/platform/amlogic/meson-ge2d/Makefile
diff --git a/drivers/media/platform/meson/ge2d/ge2d-regs.h b/drivers/media/platform/amlogic/meson-ge2d/ge2d-regs.h
index 2a76dd4c0ccb..2a76dd4c0ccb 100644
--- a/drivers/media/platform/meson/ge2d/ge2d-regs.h
+++ b/drivers/media/platform/amlogic/meson-ge2d/ge2d-regs.h
diff --git a/drivers/media/platform/meson/ge2d/ge2d.c b/drivers/media/platform/amlogic/meson-ge2d/ge2d.c
index ccda18e5a377..5e7b319f300d 100644
--- a/drivers/media/platform/meson/ge2d/ge2d.c
+++ b/drivers/media/platform/amlogic/meson-ge2d/ge2d.c
@@ -215,35 +215,35 @@ static void ge2d_hw_start(struct meson_ge2d *ge2d)
regmap_write(ge2d->map, GE2D_SRC1_CLIPY_START_END,
FIELD_PREP(GE2D_START, ctx->in.crop.top) |
- FIELD_PREP(GE2D_END, ctx->in.crop.top + ctx->in.crop.height));
+ FIELD_PREP(GE2D_END, ctx->in.crop.top + ctx->in.crop.height - 1));
regmap_write(ge2d->map, GE2D_SRC1_CLIPX_START_END,
FIELD_PREP(GE2D_START, ctx->in.crop.left) |
- FIELD_PREP(GE2D_END, ctx->in.crop.left + ctx->in.crop.width));
+ FIELD_PREP(GE2D_END, ctx->in.crop.left + ctx->in.crop.width - 1));
regmap_write(ge2d->map, GE2D_SRC2_CLIPY_START_END,
FIELD_PREP(GE2D_START, ctx->out.crop.top) |
- FIELD_PREP(GE2D_END, ctx->out.crop.top + ctx->out.crop.height));
+ FIELD_PREP(GE2D_END, ctx->out.crop.top + ctx->out.crop.height - 1));
regmap_write(ge2d->map, GE2D_SRC2_CLIPX_START_END,
FIELD_PREP(GE2D_START, ctx->out.crop.left) |
- FIELD_PREP(GE2D_END, ctx->out.crop.left + ctx->out.crop.width));
+ FIELD_PREP(GE2D_END, ctx->out.crop.left + ctx->out.crop.width - 1));
regmap_write(ge2d->map, GE2D_DST_CLIPY_START_END,
FIELD_PREP(GE2D_START, ctx->out.crop.top) |
- FIELD_PREP(GE2D_END, ctx->out.crop.top + ctx->out.crop.height));
+ FIELD_PREP(GE2D_END, ctx->out.crop.top + ctx->out.crop.height - 1));
regmap_write(ge2d->map, GE2D_DST_CLIPX_START_END,
FIELD_PREP(GE2D_START, ctx->out.crop.left) |
- FIELD_PREP(GE2D_END, ctx->out.crop.left + ctx->out.crop.width));
+ FIELD_PREP(GE2D_END, ctx->out.crop.left + ctx->out.crop.width - 1));
regmap_write(ge2d->map, GE2D_SRC1_Y_START_END,
- FIELD_PREP(GE2D_END, ctx->in.pix_fmt.height));
+ FIELD_PREP(GE2D_END, ctx->in.pix_fmt.height - 1));
regmap_write(ge2d->map, GE2D_SRC1_X_START_END,
- FIELD_PREP(GE2D_END, ctx->in.pix_fmt.width));
+ FIELD_PREP(GE2D_END, ctx->in.pix_fmt.width - 1));
regmap_write(ge2d->map, GE2D_SRC2_Y_START_END,
- FIELD_PREP(GE2D_END, ctx->out.pix_fmt.height));
+ FIELD_PREP(GE2D_END, ctx->out.pix_fmt.height - 1));
regmap_write(ge2d->map, GE2D_SRC2_X_START_END,
- FIELD_PREP(GE2D_END, ctx->out.pix_fmt.width));
+ FIELD_PREP(GE2D_END, ctx->out.pix_fmt.width - 1));
regmap_write(ge2d->map, GE2D_DST_Y_START_END,
- FIELD_PREP(GE2D_END, ctx->out.pix_fmt.height));
+ FIELD_PREP(GE2D_END, ctx->out.pix_fmt.height - 1));
regmap_write(ge2d->map, GE2D_DST_X_START_END,
- FIELD_PREP(GE2D_END, ctx->out.pix_fmt.width));
+ FIELD_PREP(GE2D_END, ctx->out.pix_fmt.width - 1));
/* Color, no blend, use source color */
reg = GE2D_ALU_DO_COLOR_OPERATION_LOGIC(LOGIC_OPERATION_COPY,
diff --git a/drivers/media/platform/amphion/Kconfig b/drivers/media/platform/amphion/Kconfig
new file mode 100644
index 000000000000..4a363e07ccc9
--- /dev/null
+++ b/drivers/media/platform/amphion/Kconfig
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Amphion drivers"
+
+config VIDEO_AMPHION_VPU
+ tristate "Amphion VPU (Video Processing Unit) Codec IP"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on MEDIA_SUPPORT
+ depends on VIDEO_DEV && MAILBOX
+ select MEDIA_CONTROLLER
+ select V4L2_MEM2MEM_DEV
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ help
+ Amphion VPU Codec IP contains two parts: Windsor and Malone.
+ Windsor is encoder that supports H.264, and Malone is decoder
+ that supports H.264, HEVC, and other video formats.
+ This is a V4L2 driver for NXP MXC 8Q video accelerator hardware.
+ It accelerates encoding and decoding operations on
+ various NXP SoCs.
+ To compile this driver as a module choose m here.
diff --git a/drivers/media/platform/amphion/Makefile b/drivers/media/platform/amphion/Makefile
new file mode 100644
index 000000000000..80717312835f
--- /dev/null
+++ b/drivers/media/platform/amphion/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for NXP VPU driver
+
+amphion-vpu-objs += vpu_drv.o \
+ vpu_core.o \
+ vpu_mbox.o \
+ vpu_v4l2.o \
+ vpu_helpers.o \
+ vpu_cmds.o \
+ vpu_msgs.o \
+ vpu_rpc.o \
+ vpu_imx8q.o \
+ vpu_windsor.o \
+ vpu_malone.o \
+ vpu_color.o \
+ vdec.o \
+ venc.o \
+ vpu_dbg.o
+
+obj-$(CONFIG_VIDEO_AMPHION_VPU) += amphion-vpu.o
diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
new file mode 100644
index 000000000000..8f8dfd6ce2c6
--- /dev/null
+++ b/drivers/media/platform/amphion/vdec.c
@@ -0,0 +1,1656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#include <linux/init.h>
+#include <linux/interconnect.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-vmalloc.h>
+#include "vpu.h"
+#include "vpu_defs.h"
+#include "vpu_core.h"
+#include "vpu_helpers.h"
+#include "vpu_v4l2.h"
+#include "vpu_cmds.h"
+#include "vpu_rpc.h"
+
+#define VDEC_FRAME_DEPTH 256
+#define VDEC_MIN_BUFFER_CAP 8
+
+struct vdec_fs_info {
+ char name[8];
+ u32 type;
+ u32 max_count;
+ u32 req_count;
+ u32 count;
+ u32 index;
+ u32 size;
+ struct vpu_buffer buffer[32];
+ u32 tag;
+};
+
+struct vdec_t {
+ u32 seq_hdr_found;
+ struct vpu_buffer udata;
+ struct vpu_decode_params params;
+ struct vpu_dec_codec_info codec_info;
+ enum vpu_codec_state state;
+
+ struct vpu_vb2_buffer *slots[VB2_MAX_FRAME];
+ u32 req_frame_count;
+ struct vdec_fs_info mbi;
+ struct vdec_fs_info dcp;
+ u32 seq_tag;
+
+ bool reset_codec;
+ bool fixed_fmt;
+ u32 decoded_frame_count;
+ u32 display_frame_count;
+ u32 sequence;
+ u32 eos_received;
+ bool is_source_changed;
+ u32 source_change;
+ u32 drain;
+ u32 ts_pre_count;
+ u32 frame_depth;
+};
+
+static const struct vpu_format vdec_formats[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_NV12M_8L128,
+ .num_planes = 2,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ },
+ {
+ .pixfmt = V4L2_PIX_FMT_NV12M_10BE_8L128,
+ .num_planes = 2,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ },
+ {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION
+ },
+ {
+ .pixfmt = V4L2_PIX_FMT_H264_MVC,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION
+ },
+ {
+ .pixfmt = V4L2_PIX_FMT_HEVC,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION
+ },
+ {
+ .pixfmt = V4L2_PIX_FMT_VC1_ANNEX_G,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION
+ },
+ {
+ .pixfmt = V4L2_PIX_FMT_VC1_ANNEX_L,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION
+ },
+ {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION
+ },
+ {
+ .pixfmt = V4L2_PIX_FMT_MPEG4,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION
+ },
+ {
+ .pixfmt = V4L2_PIX_FMT_XVID,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION
+ },
+ {
+ .pixfmt = V4L2_PIX_FMT_VP8,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION
+ },
+ {
+ .pixfmt = V4L2_PIX_FMT_H263,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION
+ },
+ {0, 0, 0, 0},
+};
+
+static const struct v4l2_ctrl_ops vdec_ctrl_ops = {
+ .g_volatile_ctrl = vpu_helper_g_volatile_ctrl,
+};
+
+static int vdec_ctrl_init(struct vpu_inst *inst)
+{
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 20);
+ if (ret)
+ return ret;
+
+ ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 2);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, 1, 32, 1, 2);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ret = v4l2_ctrl_handler_setup(&inst->ctrl_handler);
+ if (ret) {
+ dev_err(inst->dev, "[%d] setup ctrls fail, ret = %d\n", inst->id, ret);
+ v4l2_ctrl_handler_free(&inst->ctrl_handler);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vdec_set_last_buffer_dequeued(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+
+ if (vdec->eos_received) {
+ if (!vpu_set_last_buffer_dequeued(inst))
+ vdec->eos_received--;
+ }
+}
+
+static void vdec_handle_resolution_change(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+ struct vb2_queue *q;
+
+ if (!inst->fh.m2m_ctx)
+ return;
+
+ if (inst->state != VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE)
+ return;
+ if (!vdec->source_change)
+ return;
+
+ q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
+ if (!list_empty(&q->done_list))
+ return;
+
+ vdec->source_change--;
+ vpu_notify_source_change(inst);
+}
+
+static int vdec_update_state(struct vpu_inst *inst, enum vpu_codec_state state, u32 force)
+{
+ struct vdec_t *vdec = inst->priv;
+ enum vpu_codec_state pre_state = inst->state;
+
+ if (state == VPU_CODEC_STATE_SEEK) {
+ if (inst->state == VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE)
+ vdec->state = inst->state;
+ else
+ vdec->state = VPU_CODEC_STATE_ACTIVE;
+ }
+ if (inst->state != VPU_CODEC_STATE_SEEK || force)
+ inst->state = state;
+ else if (state == VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE)
+ vdec->state = VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE;
+
+ if (inst->state != pre_state)
+ vpu_trace(inst->dev, "[%d] %d -> %d\n", inst->id, pre_state, inst->state);
+
+ if (inst->state == VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE)
+ vdec_handle_resolution_change(inst);
+
+ return 0;
+}
+
+static int vdec_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, "amphion-vpu", sizeof(cap->driver));
+ strscpy(cap->card, "amphion vpu decoder", sizeof(cap->card));
+ strscpy(cap->bus_info, "platform: amphion-vpu", sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int vdec_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct vpu_inst *inst = to_inst(file);
+ struct vdec_t *vdec = inst->priv;
+ const struct vpu_format *fmt;
+ int ret = -EINVAL;
+
+ vpu_inst_lock(inst);
+ if (!V4L2_TYPE_IS_OUTPUT(f->type) && vdec->fixed_fmt) {
+ if (f->index == 0) {
+ f->pixelformat = inst->cap_format.pixfmt;
+ f->flags = inst->cap_format.flags;
+ ret = 0;
+ }
+ } else {
+ fmt = vpu_helper_enum_format(inst, f->type, f->index);
+ memset(f->reserved, 0, sizeof(f->reserved));
+ if (!fmt)
+ goto exit;
+
+ f->pixelformat = fmt->pixfmt;
+ f->flags = fmt->flags;
+ ret = 0;
+ }
+
+exit:
+ vpu_inst_unlock(inst);
+ return ret;
+}
+
+static int vdec_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_inst *inst = to_inst(file);
+ struct vdec_t *vdec = inst->priv;
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct vpu_format *cur_fmt;
+ int i;
+
+ cur_fmt = vpu_get_format(inst, f->type);
+
+ pixmp->pixelformat = cur_fmt->pixfmt;
+ pixmp->num_planes = cur_fmt->num_planes;
+ pixmp->width = cur_fmt->width;
+ pixmp->height = cur_fmt->height;
+ pixmp->field = cur_fmt->field;
+ pixmp->flags = cur_fmt->flags;
+ for (i = 0; i < pixmp->num_planes; i++) {
+ pixmp->plane_fmt[i].bytesperline = cur_fmt->bytesperline[i];
+ pixmp->plane_fmt[i].sizeimage = cur_fmt->sizeimage[i];
+ }
+
+ f->fmt.pix_mp.colorspace = vdec->codec_info.color_primaries;
+ f->fmt.pix_mp.xfer_func = vdec->codec_info.transfer_chars;
+ f->fmt.pix_mp.ycbcr_enc = vdec->codec_info.matrix_coeffs;
+ f->fmt.pix_mp.quantization = vdec->codec_info.full_range;
+
+ return 0;
+}
+
+static int vdec_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_inst *inst = to_inst(file);
+ struct vdec_t *vdec = inst->priv;
+
+ vpu_try_fmt_common(inst, f);
+
+ vpu_inst_lock(inst);
+ if (vdec->fixed_fmt) {
+ f->fmt.pix_mp.colorspace = vdec->codec_info.color_primaries;
+ f->fmt.pix_mp.xfer_func = vdec->codec_info.transfer_chars;
+ f->fmt.pix_mp.ycbcr_enc = vdec->codec_info.matrix_coeffs;
+ f->fmt.pix_mp.quantization = vdec->codec_info.full_range;
+ } else {
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_DEFAULT;
+ f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT;
+ }
+ vpu_inst_unlock(inst);
+
+ return 0;
+}
+
+static int vdec_s_fmt_common(struct vpu_inst *inst, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ const struct vpu_format *fmt;
+ struct vpu_format *cur_fmt;
+ struct vb2_queue *q;
+ struct vdec_t *vdec = inst->priv;
+ int i;
+
+ if (!inst->fh.m2m_ctx)
+ return -EINVAL;
+
+ q = v4l2_m2m_get_vq(inst->fh.m2m_ctx, f->type);
+ if (!q)
+ return -EINVAL;
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ fmt = vpu_try_fmt_common(inst, f);
+ if (!fmt)
+ return -EINVAL;
+
+ cur_fmt = vpu_get_format(inst, f->type);
+ if (V4L2_TYPE_IS_OUTPUT(f->type) && inst->state != VPU_CODEC_STATE_DEINIT) {
+ if (cur_fmt->pixfmt != fmt->pixfmt) {
+ vdec->reset_codec = true;
+ vdec->fixed_fmt = false;
+ }
+ }
+ cur_fmt->pixfmt = fmt->pixfmt;
+ if (V4L2_TYPE_IS_OUTPUT(f->type) || !vdec->fixed_fmt) {
+ cur_fmt->num_planes = fmt->num_planes;
+ cur_fmt->flags = fmt->flags;
+ cur_fmt->width = pixmp->width;
+ cur_fmt->height = pixmp->height;
+ for (i = 0; i < fmt->num_planes; i++) {
+ cur_fmt->sizeimage[i] = pixmp->plane_fmt[i].sizeimage;
+ cur_fmt->bytesperline[i] = pixmp->plane_fmt[i].bytesperline;
+ }
+ if (pixmp->field != V4L2_FIELD_ANY)
+ cur_fmt->field = pixmp->field;
+ } else {
+ pixmp->num_planes = cur_fmt->num_planes;
+ pixmp->width = cur_fmt->width;
+ pixmp->height = cur_fmt->height;
+ for (i = 0; i < pixmp->num_planes; i++) {
+ pixmp->plane_fmt[i].bytesperline = cur_fmt->bytesperline[i];
+ pixmp->plane_fmt[i].sizeimage = cur_fmt->sizeimage[i];
+ }
+ pixmp->field = cur_fmt->field;
+ }
+
+ if (!vdec->fixed_fmt) {
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ vdec->params.codec_format = cur_fmt->pixfmt;
+ vdec->codec_info.color_primaries = f->fmt.pix_mp.colorspace;
+ vdec->codec_info.transfer_chars = f->fmt.pix_mp.xfer_func;
+ vdec->codec_info.matrix_coeffs = f->fmt.pix_mp.ycbcr_enc;
+ vdec->codec_info.full_range = f->fmt.pix_mp.quantization;
+ } else {
+ vdec->params.output_format = cur_fmt->pixfmt;
+ inst->crop.left = 0;
+ inst->crop.top = 0;
+ inst->crop.width = cur_fmt->width;
+ inst->crop.height = cur_fmt->height;
+ }
+ }
+
+ vpu_trace(inst->dev, "[%d] %c%c%c%c %dx%d\n", inst->id,
+ f->fmt.pix_mp.pixelformat,
+ f->fmt.pix_mp.pixelformat >> 8,
+ f->fmt.pix_mp.pixelformat >> 16,
+ f->fmt.pix_mp.pixelformat >> 24,
+ f->fmt.pix_mp.width,
+ f->fmt.pix_mp.height);
+
+ return 0;
+}
+
+static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_inst *inst = to_inst(file);
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct vdec_t *vdec = inst->priv;
+ int ret = 0;
+
+ vpu_inst_lock(inst);
+ ret = vdec_s_fmt_common(inst, f);
+ if (ret)
+ goto exit;
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type) && !vdec->fixed_fmt) {
+ struct v4l2_format fc;
+
+ memset(&fc, 0, sizeof(fc));
+ fc.type = inst->cap_format.type;
+ fc.fmt.pix_mp.pixelformat = inst->cap_format.pixfmt;
+ fc.fmt.pix_mp.width = pixmp->width;
+ fc.fmt.pix_mp.height = pixmp->height;
+ vdec_s_fmt_common(inst, &fc);
+ }
+
+ f->fmt.pix_mp.colorspace = vdec->codec_info.color_primaries;
+ f->fmt.pix_mp.xfer_func = vdec->codec_info.transfer_chars;
+ f->fmt.pix_mp.ycbcr_enc = vdec->codec_info.matrix_coeffs;
+ f->fmt.pix_mp.quantization = vdec->codec_info.full_range;
+
+exit:
+ vpu_inst_unlock(inst);
+ return ret;
+}
+
+static int vdec_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vpu_inst *inst = to_inst(file);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ s->r = inst->crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = inst->cap_format.width;
+ s->r.height = inst->cap_format.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vdec_drain(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+
+ if (!inst->fh.m2m_ctx)
+ return 0;
+
+ if (!vdec->drain)
+ return 0;
+
+ if (v4l2_m2m_num_src_bufs_ready(inst->fh.m2m_ctx))
+ return 0;
+
+ if (!vdec->params.frame_count) {
+ vpu_set_last_buffer_dequeued(inst);
+ return 0;
+ }
+
+ vpu_iface_add_scode(inst, SCODE_PADDING_EOS);
+ vdec->params.end_flag = 1;
+ vpu_iface_set_decode_params(inst, &vdec->params, 1);
+ vdec->drain = 0;
+ vpu_trace(inst->dev, "[%d] frame_count = %d\n", inst->id, vdec->params.frame_count);
+
+ return 0;
+}
+
+static int vdec_cmd_start(struct vpu_inst *inst)
+{
+ switch (inst->state) {
+ case VPU_CODEC_STATE_STARTED:
+ case VPU_CODEC_STATE_DRAIN:
+ case VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE:
+ vdec_update_state(inst, VPU_CODEC_STATE_ACTIVE, 0);
+ break;
+ default:
+ break;
+ }
+ vpu_process_capture_buffer(inst);
+ return 0;
+}
+
+static int vdec_cmd_stop(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+
+ vpu_trace(inst->dev, "[%d]\n", inst->id);
+
+ if (inst->state == VPU_CODEC_STATE_DEINIT) {
+ vpu_set_last_buffer_dequeued(inst);
+ } else {
+ vdec->drain = 1;
+ vdec_drain(inst);
+ }
+
+ return 0;
+}
+
+static int vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
+{
+ struct vpu_inst *inst = to_inst(file);
+ int ret;
+
+ ret = v4l2_m2m_ioctl_try_decoder_cmd(file, fh, cmd);
+ if (ret)
+ return ret;
+
+ vpu_inst_lock(inst);
+ switch (cmd->cmd) {
+ case V4L2_DEC_CMD_START:
+ vdec_cmd_start(inst);
+ break;
+ case V4L2_DEC_CMD_STOP:
+ vdec_cmd_stop(inst);
+ break;
+ default:
+ break;
+ }
+ vpu_inst_unlock(inst);
+
+ return 0;
+}
+
+static int vdec_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subscribe(fh, sub);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
+ .vidioc_querycap = vdec_querycap,
+ .vidioc_enum_fmt_vid_cap = vdec_enum_fmt,
+ .vidioc_enum_fmt_vid_out = vdec_enum_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = vdec_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vdec_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vdec_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vdec_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vdec_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vdec_s_fmt,
+ .vidioc_g_selection = vdec_g_selection,
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_try_decoder_cmd,
+ .vidioc_decoder_cmd = vdec_decoder_cmd,
+ .vidioc_subscribe_event = vdec_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+};
+
+static bool vdec_check_ready(struct vpu_inst *inst, unsigned int type)
+{
+ struct vdec_t *vdec = inst->priv;
+
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ if (vdec->ts_pre_count >= vdec->frame_depth)
+ return false;
+ return true;
+ }
+
+ if (vdec->req_frame_count)
+ return true;
+
+ return false;
+}
+
+static int vdec_frame_decoded(struct vpu_inst *inst, void *arg)
+{
+ struct vdec_t *vdec = inst->priv;
+ struct vpu_dec_pic_info *info = arg;
+ struct vpu_vb2_buffer *vpu_buf;
+ struct vb2_v4l2_buffer *vbuf;
+ int ret = 0;
+
+ if (!info || info->id >= ARRAY_SIZE(vdec->slots))
+ return -EINVAL;
+
+ vpu_inst_lock(inst);
+ vpu_buf = vdec->slots[info->id];
+ if (!vpu_buf) {
+ dev_err(inst->dev, "[%d] decoded invalid frame[%d]\n", inst->id, info->id);
+ ret = -EINVAL;
+ goto exit;
+ }
+ vbuf = &vpu_buf->m2m_buf.vb;
+ if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_DECODED)
+ dev_info(inst->dev, "[%d] buf[%d] has been decoded\n", inst->id, info->id);
+ vpu_set_buffer_state(vbuf, VPU_BUF_STATE_DECODED);
+ vdec->decoded_frame_count++;
+ if (vdec->ts_pre_count >= info->consumed_count)
+ vdec->ts_pre_count -= info->consumed_count;
+ else
+ vdec->ts_pre_count = 0;
+exit:
+ vpu_inst_unlock(inst);
+
+ return ret;
+}
+
+static struct vpu_vb2_buffer *vdec_find_buffer(struct vpu_inst *inst, u32 luma)
+{
+ struct vdec_t *vdec = inst->priv;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vdec->slots); i++) {
+ if (!vdec->slots[i])
+ continue;
+ if (luma == vdec->slots[i]->luma)
+ return vdec->slots[i];
+ }
+
+ return NULL;
+}
+
+static void vdec_buf_done(struct vpu_inst *inst, struct vpu_frame_info *frame)
+{
+ struct vdec_t *vdec = inst->priv;
+ struct vpu_vb2_buffer *vpu_buf;
+ struct vb2_v4l2_buffer *vbuf;
+ u32 sequence;
+
+ if (!frame)
+ return;
+
+ vpu_inst_lock(inst);
+ sequence = vdec->sequence++;
+ vpu_buf = vdec_find_buffer(inst, frame->luma);
+ vpu_inst_unlock(inst);
+ if (!vpu_buf) {
+ dev_err(inst->dev, "[%d] can't find buffer, id = %d, addr = 0x%x\n",
+ inst->id, frame->id, frame->luma);
+ return;
+ }
+ if (frame->skipped) {
+ dev_dbg(inst->dev, "[%d] frame skip\n", inst->id);
+ return;
+ }
+
+ vbuf = &vpu_buf->m2m_buf.vb;
+ if (vbuf->vb2_buf.index != frame->id)
+ dev_err(inst->dev, "[%d] buffer id(%d, %d) dismatch\n",
+ inst->id, vbuf->vb2_buf.index, frame->id);
+
+ if (vpu_get_buffer_state(vbuf) != VPU_BUF_STATE_DECODED)
+ dev_err(inst->dev, "[%d] buffer(%d) ready without decoded\n", inst->id, frame->id);
+ vpu_set_buffer_state(vbuf, VPU_BUF_STATE_READY);
+ vb2_set_plane_payload(&vbuf->vb2_buf, 0, inst->cap_format.sizeimage[0]);
+ vb2_set_plane_payload(&vbuf->vb2_buf, 1, inst->cap_format.sizeimage[1]);
+ vbuf->vb2_buf.timestamp = frame->timestamp;
+ vbuf->field = inst->cap_format.field;
+ vbuf->sequence = sequence;
+ dev_dbg(inst->dev, "[%d][OUTPUT TS]%32lld\n", inst->id, frame->timestamp);
+
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+ vpu_inst_lock(inst);
+ vdec->display_frame_count++;
+ vpu_inst_unlock(inst);
+ dev_dbg(inst->dev, "[%d] decoded : %d, display : %d, sequence : %d\n",
+ inst->id, vdec->decoded_frame_count, vdec->display_frame_count, vdec->sequence);
+}
+
+static void vdec_stop_done(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+
+ vpu_inst_lock(inst);
+ vdec_update_state(inst, VPU_CODEC_STATE_DEINIT, 0);
+ vdec->seq_hdr_found = 0;
+ vdec->req_frame_count = 0;
+ vdec->reset_codec = false;
+ vdec->fixed_fmt = false;
+ vdec->params.end_flag = 0;
+ vdec->drain = 0;
+ vdec->ts_pre_count = 0;
+ vdec->params.frame_count = 0;
+ vdec->decoded_frame_count = 0;
+ vdec->display_frame_count = 0;
+ vdec->sequence = 0;
+ vdec->eos_received = 0;
+ vdec->is_source_changed = false;
+ vdec->source_change = 0;
+ vpu_inst_unlock(inst);
+}
+
+static bool vdec_check_source_change(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+ const struct vpu_format *fmt;
+ int i;
+
+ if (!inst->fh.m2m_ctx)
+ return false;
+
+ if (!vb2_is_streaming(v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx)))
+ return true;
+ fmt = vpu_helper_find_format(inst, inst->cap_format.type, vdec->codec_info.pixfmt);
+ if (inst->cap_format.pixfmt != vdec->codec_info.pixfmt)
+ return true;
+ if (inst->cap_format.width != vdec->codec_info.decoded_width)
+ return true;
+ if (inst->cap_format.height != vdec->codec_info.decoded_height)
+ return true;
+ if (vpu_get_num_buffers(inst, inst->cap_format.type) < inst->min_buffer_cap)
+ return true;
+ if (inst->crop.left != vdec->codec_info.offset_x)
+ return true;
+ if (inst->crop.top != vdec->codec_info.offset_y)
+ return true;
+ if (inst->crop.width != vdec->codec_info.width)
+ return true;
+ if (inst->crop.height != vdec->codec_info.height)
+ return true;
+ if (fmt && inst->cap_format.num_planes != fmt->num_planes)
+ return true;
+ for (i = 0; i < inst->cap_format.num_planes; i++) {
+ if (inst->cap_format.bytesperline[i] != vdec->codec_info.bytesperline[i])
+ return true;
+ if (inst->cap_format.sizeimage[i] != vdec->codec_info.sizeimage[i])
+ return true;
+ }
+
+ return false;
+}
+
+static void vdec_init_fmt(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+ const struct vpu_format *fmt;
+ int i;
+
+ fmt = vpu_helper_find_format(inst, inst->cap_format.type, vdec->codec_info.pixfmt);
+ inst->out_format.width = vdec->codec_info.width;
+ inst->out_format.height = vdec->codec_info.height;
+ inst->cap_format.width = vdec->codec_info.decoded_width;
+ inst->cap_format.height = vdec->codec_info.decoded_height;
+ inst->cap_format.pixfmt = vdec->codec_info.pixfmt;
+ if (fmt) {
+ inst->cap_format.num_planes = fmt->num_planes;
+ inst->cap_format.flags = fmt->flags;
+ }
+ for (i = 0; i < inst->cap_format.num_planes; i++) {
+ inst->cap_format.bytesperline[i] = vdec->codec_info.bytesperline[i];
+ inst->cap_format.sizeimage[i] = vdec->codec_info.sizeimage[i];
+ }
+ if (vdec->codec_info.progressive)
+ inst->cap_format.field = V4L2_FIELD_NONE;
+ else
+ inst->cap_format.field = V4L2_FIELD_SEQ_BT;
+ if (vdec->codec_info.color_primaries == V4L2_COLORSPACE_DEFAULT)
+ vdec->codec_info.color_primaries = V4L2_COLORSPACE_REC709;
+ if (vdec->codec_info.transfer_chars == V4L2_XFER_FUNC_DEFAULT)
+ vdec->codec_info.transfer_chars = V4L2_XFER_FUNC_709;
+ if (vdec->codec_info.matrix_coeffs == V4L2_YCBCR_ENC_DEFAULT)
+ vdec->codec_info.matrix_coeffs = V4L2_YCBCR_ENC_709;
+ if (vdec->codec_info.full_range == V4L2_QUANTIZATION_DEFAULT)
+ vdec->codec_info.full_range = V4L2_QUANTIZATION_LIM_RANGE;
+}
+
+static void vdec_init_crop(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+
+ inst->crop.left = vdec->codec_info.offset_x;
+ inst->crop.top = vdec->codec_info.offset_y;
+ inst->crop.width = vdec->codec_info.width;
+ inst->crop.height = vdec->codec_info.height;
+}
+
+static void vdec_init_mbi(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+
+ vdec->mbi.size = vdec->codec_info.mbi_size;
+ vdec->mbi.max_count = ARRAY_SIZE(vdec->mbi.buffer);
+ scnprintf(vdec->mbi.name, sizeof(vdec->mbi.name), "mbi");
+ vdec->mbi.type = MEM_RES_MBI;
+ vdec->mbi.tag = vdec->seq_tag;
+}
+
+static void vdec_init_dcp(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+
+ vdec->dcp.size = vdec->codec_info.dcp_size;
+ vdec->dcp.max_count = ARRAY_SIZE(vdec->dcp.buffer);
+ scnprintf(vdec->dcp.name, sizeof(vdec->dcp.name), "dcp");
+ vdec->dcp.type = MEM_RES_DCP;
+ vdec->dcp.tag = vdec->seq_tag;
+}
+
+static void vdec_request_one_fs(struct vdec_fs_info *fs)
+{
+ fs->req_count++;
+ if (fs->req_count > fs->max_count)
+ fs->req_count = fs->max_count;
+}
+
+static int vdec_alloc_fs_buffer(struct vpu_inst *inst, struct vdec_fs_info *fs)
+{
+ struct vpu_buffer *buffer;
+
+ if (!fs->size)
+ return -EINVAL;
+
+ if (fs->count >= fs->req_count)
+ return -EINVAL;
+
+ buffer = &fs->buffer[fs->count];
+ if (buffer->virt && buffer->length >= fs->size)
+ return 0;
+
+ vpu_free_dma(buffer);
+ buffer->length = fs->size;
+ return vpu_alloc_dma(inst->core, buffer);
+}
+
+static void vdec_alloc_fs(struct vpu_inst *inst, struct vdec_fs_info *fs)
+{
+ int ret;
+
+ while (fs->count < fs->req_count) {
+ ret = vdec_alloc_fs_buffer(inst, fs);
+ if (ret)
+ break;
+ fs->count++;
+ }
+}
+
+static void vdec_clear_fs(struct vdec_fs_info *fs)
+{
+ u32 i;
+
+ if (!fs)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(fs->buffer); i++)
+ vpu_free_dma(&fs->buffer[i]);
+ memset(fs, 0, sizeof(*fs));
+}
+
+static int vdec_response_fs(struct vpu_inst *inst, struct vdec_fs_info *fs)
+{
+ struct vpu_fs_info info;
+ int ret;
+
+ if (fs->index >= fs->count)
+ return 0;
+
+ memset(&info, 0, sizeof(info));
+ info.id = fs->index;
+ info.type = fs->type;
+ info.tag = fs->tag;
+ info.luma_addr = fs->buffer[fs->index].phys;
+ info.luma_size = fs->buffer[fs->index].length;
+ ret = vpu_session_alloc_fs(inst, &info);
+ if (ret)
+ return ret;
+
+ fs->index++;
+ return 0;
+}
+
+static int vdec_response_frame_abnormal(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+ struct vpu_fs_info info;
+
+ if (!vdec->req_frame_count)
+ return 0;
+
+ memset(&info, 0, sizeof(info));
+ info.type = MEM_RES_FRAME;
+ info.tag = vdec->seq_tag + 0xf0;
+ vpu_session_alloc_fs(inst, &info);
+ vdec->req_frame_count--;
+
+ return 0;
+}
+
+static int vdec_response_frame(struct vpu_inst *inst, struct vb2_v4l2_buffer *vbuf)
+{
+ struct vdec_t *vdec = inst->priv;
+ struct vpu_vb2_buffer *vpu_buf;
+ struct vpu_fs_info info;
+ int ret;
+
+ if (inst->state != VPU_CODEC_STATE_ACTIVE)
+ return -EINVAL;
+
+ if (!vdec->req_frame_count)
+ return -EINVAL;
+
+ if (!vbuf)
+ return -EINVAL;
+
+ if (vdec->slots[vbuf->vb2_buf.index]) {
+ dev_err(inst->dev, "[%d] repeat alloc fs %d\n",
+ inst->id, vbuf->vb2_buf.index);
+ return -EINVAL;
+ }
+
+ dev_dbg(inst->dev, "[%d] state = %d, alloc fs %d, tag = 0x%x\n",
+ inst->id, inst->state, vbuf->vb2_buf.index, vdec->seq_tag);
+ vpu_buf = to_vpu_vb2_buffer(vbuf);
+
+ memset(&info, 0, sizeof(info));
+ info.id = vbuf->vb2_buf.index;
+ info.type = MEM_RES_FRAME;
+ info.tag = vdec->seq_tag;
+ info.luma_addr = vpu_get_vb_phy_addr(&vbuf->vb2_buf, 0);
+ info.luma_size = inst->cap_format.sizeimage[0];
+ info.chroma_addr = vpu_get_vb_phy_addr(&vbuf->vb2_buf, 1);
+ info.chromau_size = inst->cap_format.sizeimage[1];
+ info.bytesperline = inst->cap_format.bytesperline[0];
+ ret = vpu_session_alloc_fs(inst, &info);
+ if (ret)
+ return ret;
+
+ vpu_buf->tag = info.tag;
+ vpu_buf->luma = info.luma_addr;
+ vpu_buf->chroma_u = info.chromau_size;
+ vpu_buf->chroma_v = 0;
+ vpu_set_buffer_state(vbuf, VPU_BUF_STATE_INUSE);
+ vdec->slots[info.id] = vpu_buf;
+ vdec->req_frame_count--;
+
+ return 0;
+}
+
+static void vdec_response_fs_request(struct vpu_inst *inst, bool force)
+{
+ struct vdec_t *vdec = inst->priv;
+ int i;
+ int ret;
+
+ if (force) {
+ for (i = vdec->req_frame_count; i > 0; i--)
+ vdec_response_frame_abnormal(inst);
+ return;
+ }
+
+ for (i = vdec->req_frame_count; i > 0; i--) {
+ ret = vpu_process_capture_buffer(inst);
+ if (ret)
+ break;
+ if (vdec->eos_received)
+ break;
+ }
+
+ for (i = vdec->mbi.index; i < vdec->mbi.count; i++) {
+ if (vdec_response_fs(inst, &vdec->mbi))
+ break;
+ if (vdec->eos_received)
+ break;
+ }
+ for (i = vdec->dcp.index; i < vdec->dcp.count; i++) {
+ if (vdec_response_fs(inst, &vdec->dcp))
+ break;
+ if (vdec->eos_received)
+ break;
+ }
+}
+
+static void vdec_response_fs_release(struct vpu_inst *inst, u32 id, u32 tag)
+{
+ struct vpu_fs_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.id = id;
+ info.tag = tag;
+ vpu_session_release_fs(inst, &info);
+}
+
+static void vdec_recycle_buffer(struct vpu_inst *inst, struct vb2_v4l2_buffer *vbuf)
+{
+ if (!inst->fh.m2m_ctx)
+ return;
+ if (vbuf->vb2_buf.state != VB2_BUF_STATE_ACTIVE)
+ return;
+ if (vpu_find_buf_by_idx(inst, vbuf->vb2_buf.type, vbuf->vb2_buf.index))
+ return;
+ v4l2_m2m_buf_queue(inst->fh.m2m_ctx, vbuf);
+}
+
+static void vdec_clear_slots(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+ struct vpu_vb2_buffer *vpu_buf;
+ struct vb2_v4l2_buffer *vbuf;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vdec->slots); i++) {
+ if (!vdec->slots[i])
+ continue;
+
+ vpu_buf = vdec->slots[i];
+ vbuf = &vpu_buf->m2m_buf.vb;
+
+ vdec_recycle_buffer(inst, vbuf);
+ vdec->slots[i]->state = VPU_BUF_STATE_IDLE;
+ vdec->slots[i] = NULL;
+ }
+}
+
+static void vdec_event_seq_hdr(struct vpu_inst *inst, struct vpu_dec_codec_info *hdr)
+{
+ struct vdec_t *vdec = inst->priv;
+
+ vpu_inst_lock(inst);
+ memcpy(&vdec->codec_info, hdr, sizeof(vdec->codec_info));
+
+ vpu_trace(inst->dev, "[%d] %d x %d, crop : (%d, %d) %d x %d, %d, %d\n",
+ inst->id,
+ vdec->codec_info.decoded_width,
+ vdec->codec_info.decoded_height,
+ vdec->codec_info.offset_x,
+ vdec->codec_info.offset_y,
+ vdec->codec_info.width,
+ vdec->codec_info.height,
+ hdr->num_ref_frms,
+ hdr->num_dpb_frms);
+ inst->min_buffer_cap = hdr->num_ref_frms + hdr->num_dpb_frms;
+ vdec->is_source_changed = vdec_check_source_change(inst);
+ vdec_init_fmt(inst);
+ vdec_init_crop(inst);
+ vdec_init_mbi(inst);
+ vdec_init_dcp(inst);
+ if (!vdec->seq_hdr_found) {
+ vdec->seq_tag = vdec->codec_info.tag;
+ if (vdec->is_source_changed) {
+ vdec_update_state(inst, VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE, 0);
+ vpu_notify_source_change(inst);
+ vdec->is_source_changed = false;
+ }
+ }
+ if (vdec->seq_tag != vdec->codec_info.tag) {
+ vdec_response_fs_request(inst, true);
+ vpu_trace(inst->dev, "[%d] seq tag change: %d -> %d\n",
+ inst->id, vdec->seq_tag, vdec->codec_info.tag);
+ }
+ vdec->seq_hdr_found++;
+ vdec->fixed_fmt = true;
+ vpu_inst_unlock(inst);
+}
+
+static void vdec_event_resolution_change(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+
+ vpu_trace(inst->dev, "[%d]\n", inst->id);
+ vpu_inst_lock(inst);
+ vdec->seq_tag = vdec->codec_info.tag;
+ vdec_clear_fs(&vdec->mbi);
+ vdec_clear_fs(&vdec->dcp);
+ vdec_clear_slots(inst);
+ vdec_init_mbi(inst);
+ vdec_init_dcp(inst);
+ if (vdec->is_source_changed) {
+ vdec_update_state(inst, VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE, 0);
+ vdec->source_change++;
+ vdec_handle_resolution_change(inst);
+ vdec->is_source_changed = false;
+ }
+ vpu_inst_unlock(inst);
+}
+
+static void vdec_event_req_fs(struct vpu_inst *inst, struct vpu_fs_info *fs)
+{
+ struct vdec_t *vdec = inst->priv;
+
+ if (!fs)
+ return;
+
+ vpu_inst_lock(inst);
+
+ switch (fs->type) {
+ case MEM_RES_FRAME:
+ vdec->req_frame_count++;
+ break;
+ case MEM_RES_MBI:
+ vdec_request_one_fs(&vdec->mbi);
+ break;
+ case MEM_RES_DCP:
+ vdec_request_one_fs(&vdec->dcp);
+ break;
+ default:
+ break;
+ }
+
+ vdec_alloc_fs(inst, &vdec->mbi);
+ vdec_alloc_fs(inst, &vdec->dcp);
+
+ vdec_response_fs_request(inst, false);
+
+ vpu_inst_unlock(inst);
+}
+
+static void vdec_evnet_rel_fs(struct vpu_inst *inst, struct vpu_fs_info *fs)
+{
+ struct vdec_t *vdec = inst->priv;
+ struct vpu_vb2_buffer *vpu_buf;
+ struct vb2_v4l2_buffer *vbuf;
+
+ if (!fs || fs->id >= ARRAY_SIZE(vdec->slots))
+ return;
+ if (fs->type != MEM_RES_FRAME)
+ return;
+
+ if (fs->id >= vpu_get_num_buffers(inst, inst->cap_format.type)) {
+ dev_err(inst->dev, "[%d] invalid fs(%d) to release\n", inst->id, fs->id);
+ return;
+ }
+
+ vpu_inst_lock(inst);
+ vpu_buf = vdec->slots[fs->id];
+ vdec->slots[fs->id] = NULL;
+
+ if (!vpu_buf) {
+ dev_dbg(inst->dev, "[%d] fs[%d] has bee released\n", inst->id, fs->id);
+ goto exit;
+ }
+
+ vbuf = &vpu_buf->m2m_buf.vb;
+ if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_DECODED) {
+ dev_dbg(inst->dev, "[%d] frame skip\n", inst->id);
+ vdec->sequence++;
+ }
+
+ vdec_response_fs_release(inst, fs->id, vpu_buf->tag);
+ if (vpu_get_buffer_state(vbuf) != VPU_BUF_STATE_READY)
+ vdec_recycle_buffer(inst, vbuf);
+
+ vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE);
+ vpu_process_capture_buffer(inst);
+
+exit:
+ vpu_inst_unlock(inst);
+}
+
+static void vdec_event_eos(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+
+ vpu_trace(inst->dev, "[%d] input : %d, decoded : %d, display : %d, sequence : %d\n",
+ inst->id,
+ vdec->params.frame_count,
+ vdec->decoded_frame_count,
+ vdec->display_frame_count,
+ vdec->sequence);
+ vpu_inst_lock(inst);
+ vdec->eos_received++;
+ vdec->fixed_fmt = false;
+ inst->min_buffer_cap = VDEC_MIN_BUFFER_CAP;
+ vdec_update_state(inst, VPU_CODEC_STATE_DRAIN, 0);
+ vdec_set_last_buffer_dequeued(inst);
+ vpu_inst_unlock(inst);
+}
+
+static void vdec_event_notify(struct vpu_inst *inst, u32 event, void *data)
+{
+ switch (event) {
+ case VPU_MSG_ID_SEQ_HDR_FOUND:
+ vdec_event_seq_hdr(inst, data);
+ break;
+ case VPU_MSG_ID_RES_CHANGE:
+ vdec_event_resolution_change(inst);
+ break;
+ case VPU_MSG_ID_FRAME_REQ:
+ vdec_event_req_fs(inst, data);
+ break;
+ case VPU_MSG_ID_FRAME_RELEASE:
+ vdec_evnet_rel_fs(inst, data);
+ break;
+ case VPU_MSG_ID_PIC_EOS:
+ vdec_event_eos(inst);
+ break;
+ default:
+ break;
+ }
+}
+
+static int vdec_process_output(struct vpu_inst *inst, struct vb2_buffer *vb)
+{
+ struct vdec_t *vdec = inst->priv;
+ struct vb2_v4l2_buffer *vbuf;
+ struct vpu_rpc_buffer_desc desc;
+ u32 free_space;
+ int ret;
+
+ vbuf = to_vb2_v4l2_buffer(vb);
+ dev_dbg(inst->dev, "[%d] dec output [%d] %d : %ld\n",
+ inst->id, vbuf->sequence, vb->index, vb2_get_plane_payload(vb, 0));
+
+ if (inst->state == VPU_CODEC_STATE_DEINIT)
+ return -EINVAL;
+ if (vdec->reset_codec)
+ return -EINVAL;
+
+ if (inst->state == VPU_CODEC_STATE_STARTED)
+ vdec_update_state(inst, VPU_CODEC_STATE_ACTIVE, 0);
+
+ ret = vpu_iface_get_stream_buffer_desc(inst, &desc);
+ if (ret)
+ return ret;
+
+ free_space = vpu_helper_get_free_space(inst);
+ if (free_space < vb2_get_plane_payload(vb, 0) + 0x40000)
+ return -ENOMEM;
+
+ ret = vpu_iface_input_frame(inst, vb);
+ if (ret < 0)
+ return -ENOMEM;
+
+ dev_dbg(inst->dev, "[%d][INPUT TS]%32lld\n", inst->id, vb->timestamp);
+ vdec->ts_pre_count++;
+ vdec->params.frame_count++;
+
+ v4l2_m2m_src_buf_remove_by_buf(inst->fh.m2m_ctx, vbuf);
+ vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+
+ if (vdec->drain)
+ vdec_drain(inst);
+
+ return 0;
+}
+
+static int vdec_process_capture(struct vpu_inst *inst, struct vb2_buffer *vb)
+{
+ struct vdec_t *vdec = inst->priv;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ int ret;
+
+ if (inst->state == VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE)
+ return -EINVAL;
+ if (vdec->reset_codec)
+ return -EINVAL;
+
+ ret = vdec_response_frame(inst, vbuf);
+ if (ret)
+ return ret;
+ v4l2_m2m_dst_buf_remove_by_buf(inst->fh.m2m_ctx, vbuf);
+ return 0;
+}
+
+static void vdec_on_queue_empty(struct vpu_inst *inst, u32 type)
+{
+ struct vdec_t *vdec = inst->priv;
+
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return;
+
+ vdec_handle_resolution_change(inst);
+ if (vdec->eos_received)
+ vdec_set_last_buffer_dequeued(inst);
+}
+
+static void vdec_abort(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+ struct vpu_rpc_buffer_desc desc;
+ int ret;
+
+ vpu_trace(inst->dev, "[%d] state = %d\n", inst->id, inst->state);
+ vpu_iface_add_scode(inst, SCODE_PADDING_ABORT);
+ vdec->params.end_flag = 1;
+ vpu_iface_set_decode_params(inst, &vdec->params, 1);
+
+ vpu_session_abort(inst);
+
+ ret = vpu_iface_get_stream_buffer_desc(inst, &desc);
+ if (!ret)
+ vpu_iface_update_stream_buffer(inst, desc.rptr, 1);
+
+ vpu_session_rst_buf(inst);
+ vpu_trace(inst->dev, "[%d] input : %d, decoded : %d, display : %d, sequence : %d\n",
+ inst->id,
+ vdec->params.frame_count,
+ vdec->decoded_frame_count,
+ vdec->display_frame_count,
+ vdec->sequence);
+ vdec->params.end_flag = 0;
+ vdec->drain = 0;
+ vdec->ts_pre_count = 0;
+ vdec->params.frame_count = 0;
+ vdec->decoded_frame_count = 0;
+ vdec->display_frame_count = 0;
+ vdec->sequence = 0;
+}
+
+static void vdec_stop(struct vpu_inst *inst, bool free)
+{
+ struct vdec_t *vdec = inst->priv;
+
+ vdec_clear_slots(inst);
+ if (inst->state != VPU_CODEC_STATE_DEINIT)
+ vpu_session_stop(inst);
+ vdec_clear_fs(&vdec->mbi);
+ vdec_clear_fs(&vdec->dcp);
+ if (free) {
+ vpu_free_dma(&vdec->udata);
+ vpu_free_dma(&inst->stream_buffer);
+ }
+ vdec_update_state(inst, VPU_CODEC_STATE_DEINIT, 1);
+ vdec->reset_codec = false;
+}
+
+static void vdec_release(struct vpu_inst *inst)
+{
+ if (inst->id != VPU_INST_NULL_ID)
+ vpu_trace(inst->dev, "[%d]\n", inst->id);
+ vpu_inst_lock(inst);
+ vdec_stop(inst, true);
+ vpu_inst_unlock(inst);
+}
+
+static void vdec_cleanup(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec;
+
+ if (!inst)
+ return;
+
+ vdec = inst->priv;
+ if (vdec)
+ vfree(vdec);
+ inst->priv = NULL;
+ vfree(inst);
+}
+
+static void vdec_init_params(struct vdec_t *vdec)
+{
+ vdec->params.frame_count = 0;
+ vdec->params.end_flag = 0;
+}
+
+static int vdec_start(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+ int stream_buffer_size;
+ int ret;
+
+ if (inst->state != VPU_CODEC_STATE_DEINIT)
+ return 0;
+
+ vpu_trace(inst->dev, "[%d]\n", inst->id);
+ if (!vdec->udata.virt) {
+ vdec->udata.length = 0x1000;
+ ret = vpu_alloc_dma(inst->core, &vdec->udata);
+ if (ret) {
+ dev_err(inst->dev, "[%d] alloc udata fail\n", inst->id);
+ goto error;
+ }
+ }
+
+ if (!inst->stream_buffer.virt) {
+ stream_buffer_size = vpu_iface_get_stream_buffer_size(inst->core);
+ if (stream_buffer_size > 0) {
+ inst->stream_buffer.length = stream_buffer_size;
+ ret = vpu_alloc_dma(inst->core, &inst->stream_buffer);
+ if (ret) {
+ dev_err(inst->dev, "[%d] alloc stream buffer fail\n", inst->id);
+ goto error;
+ }
+ inst->use_stream_buffer = true;
+ }
+ }
+
+ if (inst->use_stream_buffer)
+ vpu_iface_config_stream_buffer(inst, &inst->stream_buffer);
+ vpu_iface_init_instance(inst);
+ vdec->params.udata.base = vdec->udata.phys;
+ vdec->params.udata.size = vdec->udata.length;
+ ret = vpu_iface_set_decode_params(inst, &vdec->params, 0);
+ if (ret) {
+ dev_err(inst->dev, "[%d] set decode params fail\n", inst->id);
+ goto error;
+ }
+
+ vdec_init_params(vdec);
+ ret = vpu_session_start(inst);
+ if (ret) {
+ dev_err(inst->dev, "[%d] start fail\n", inst->id);
+ goto error;
+ }
+
+ vdec_update_state(inst, VPU_CODEC_STATE_STARTED, 0);
+
+ return 0;
+error:
+ vpu_free_dma(&vdec->udata);
+ vpu_free_dma(&inst->stream_buffer);
+ return ret;
+}
+
+static int vdec_start_session(struct vpu_inst *inst, u32 type)
+{
+ struct vdec_t *vdec = inst->priv;
+ int ret = 0;
+
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ if (vdec->reset_codec)
+ vdec_stop(inst, false);
+ if (inst->state == VPU_CODEC_STATE_DEINIT) {
+ ret = vdec_start(inst);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ if (inst->state == VPU_CODEC_STATE_SEEK)
+ vdec_update_state(inst, vdec->state, 1);
+ vdec->eos_received = 0;
+ vpu_process_output_buffer(inst);
+ } else {
+ vdec_cmd_start(inst);
+ }
+ if (inst->state == VPU_CODEC_STATE_ACTIVE)
+ vdec_response_fs_request(inst, false);
+
+ return ret;
+}
+
+static int vdec_stop_session(struct vpu_inst *inst, u32 type)
+{
+ struct vdec_t *vdec = inst->priv;
+
+ if (inst->state == VPU_CODEC_STATE_DEINIT)
+ return 0;
+
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ vdec_update_state(inst, VPU_CODEC_STATE_SEEK, 0);
+ vdec->drain = 0;
+ } else {
+ if (inst->state != VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE)
+ vdec_abort(inst);
+
+ vdec->eos_received = 0;
+ vdec_clear_slots(inst);
+ }
+
+ return 0;
+}
+
+static int vdec_get_debug_info(struct vpu_inst *inst, char *str, u32 size, u32 i)
+{
+ struct vdec_t *vdec = inst->priv;
+ int num = -1;
+
+ switch (i) {
+ case 0:
+ num = scnprintf(str, size,
+ "req_frame_count = %d\ninterlaced = %d\n",
+ vdec->req_frame_count,
+ vdec->codec_info.progressive ? 0 : 1);
+ break;
+ case 1:
+ num = scnprintf(str, size,
+ "mbi: size = 0x%x request = %d, alloc = %d, response = %d\n",
+ vdec->mbi.size,
+ vdec->mbi.req_count,
+ vdec->mbi.count,
+ vdec->mbi.index);
+ break;
+ case 2:
+ num = scnprintf(str, size,
+ "dcp: size = 0x%x request = %d, alloc = %d, response = %d\n",
+ vdec->dcp.size,
+ vdec->dcp.req_count,
+ vdec->dcp.count,
+ vdec->dcp.index);
+ break;
+ case 3:
+ num = scnprintf(str, size, "input_frame_count = %d\n", vdec->params.frame_count);
+ break;
+ case 4:
+ num = scnprintf(str, size, "decoded_frame_count = %d\n", vdec->decoded_frame_count);
+ break;
+ case 5:
+ num = scnprintf(str, size, "display_frame_count = %d\n", vdec->display_frame_count);
+ break;
+ case 6:
+ num = scnprintf(str, size, "sequence = %d\n", vdec->sequence);
+ break;
+ case 7:
+ num = scnprintf(str, size, "drain = %d, eos = %d, source_change = %d\n",
+ vdec->drain, vdec->eos_received, vdec->source_change);
+ break;
+ case 8:
+ num = scnprintf(str, size, "ts_pre_count = %d, frame_depth = %d\n",
+ vdec->ts_pre_count, vdec->frame_depth);
+ break;
+ case 9:
+ num = scnprintf(str, size, "fps = %d/%d\n",
+ vdec->codec_info.frame_rate.numerator,
+ vdec->codec_info.frame_rate.denominator);
+ break;
+ default:
+ break;
+ }
+
+ return num;
+}
+
+static struct vpu_inst_ops vdec_inst_ops = {
+ .ctrl_init = vdec_ctrl_init,
+ .check_ready = vdec_check_ready,
+ .buf_done = vdec_buf_done,
+ .get_one_frame = vdec_frame_decoded,
+ .stop_done = vdec_stop_done,
+ .event_notify = vdec_event_notify,
+ .release = vdec_release,
+ .cleanup = vdec_cleanup,
+ .start = vdec_start_session,
+ .stop = vdec_stop_session,
+ .process_output = vdec_process_output,
+ .process_capture = vdec_process_capture,
+ .on_queue_empty = vdec_on_queue_empty,
+ .get_debug_info = vdec_get_debug_info,
+ .wait_prepare = vpu_inst_unlock,
+ .wait_finish = vpu_inst_lock,
+};
+
+static void vdec_init(struct file *file)
+{
+ struct vpu_inst *inst = to_inst(file);
+ struct vdec_t *vdec;
+ struct v4l2_format f;
+
+ vdec = inst->priv;
+ vdec->frame_depth = VDEC_FRAME_DEPTH;
+
+ memset(&f, 0, sizeof(f));
+ f.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ f.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_H264;
+ f.fmt.pix_mp.width = 1280;
+ f.fmt.pix_mp.height = 720;
+ f.fmt.pix_mp.field = V4L2_FIELD_NONE;
+ vdec_s_fmt(file, &inst->fh, &f);
+
+ memset(&f, 0, sizeof(f));
+ f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ f.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12M_8L128;
+ f.fmt.pix_mp.width = 1280;
+ f.fmt.pix_mp.height = 720;
+ f.fmt.pix_mp.field = V4L2_FIELD_NONE;
+ vdec_s_fmt(file, &inst->fh, &f);
+}
+
+static int vdec_open(struct file *file)
+{
+ struct vpu_inst *inst;
+ struct vdec_t *vdec;
+ int ret;
+
+ inst = vzalloc(sizeof(*inst));
+ if (!inst)
+ return -ENOMEM;
+
+ vdec = vzalloc(sizeof(*vdec));
+ if (!vdec) {
+ vfree(inst);
+ return -ENOMEM;
+ }
+
+ inst->ops = &vdec_inst_ops;
+ inst->formats = vdec_formats;
+ inst->type = VPU_CORE_TYPE_DEC;
+ inst->priv = vdec;
+
+ ret = vpu_v4l2_open(file, inst);
+ if (ret)
+ return ret;
+
+ vdec->fixed_fmt = false;
+ inst->min_buffer_cap = VDEC_MIN_BUFFER_CAP;
+ vdec_init(file);
+
+ return 0;
+}
+
+static __poll_t vdec_poll(struct file *file, poll_table *wait)
+{
+ struct vpu_inst *inst = to_inst(file);
+ struct vb2_queue *src_q, *dst_q;
+ __poll_t ret;
+
+ ret = v4l2_m2m_fop_poll(file, wait);
+ src_q = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx);
+ dst_q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
+ if (vb2_is_streaming(src_q) && !vb2_is_streaming(dst_q))
+ ret &= (~EPOLLERR);
+ if (!src_q->error && !dst_q->error &&
+ (vb2_is_streaming(src_q) && list_empty(&src_q->queued_list)) &&
+ (vb2_is_streaming(dst_q) && list_empty(&dst_q->queued_list)))
+ ret &= (~EPOLLERR);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations vdec_fops = {
+ .owner = THIS_MODULE,
+ .open = vdec_open,
+ .release = vpu_v4l2_close,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = vdec_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+const struct v4l2_ioctl_ops *vdec_get_ioctl_ops(void)
+{
+ return &vdec_ioctl_ops;
+}
+
+const struct v4l2_file_operations *vdec_get_fops(void)
+{
+ return &vdec_fops;
+}
diff --git a/drivers/media/platform/amphion/venc.c b/drivers/media/platform/amphion/venc.c
new file mode 100644
index 000000000000..d33c2748e4b7
--- /dev/null
+++ b/drivers/media/platform/amphion/venc.c
@@ -0,0 +1,1358 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#include <linux/init.h>
+#include <linux/interconnect.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/ktime.h>
+#include <linux/rational.h>
+#include <linux/vmalloc.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-vmalloc.h>
+#include "vpu.h"
+#include "vpu_defs.h"
+#include "vpu_core.h"
+#include "vpu_helpers.h"
+#include "vpu_v4l2.h"
+#include "vpu_cmds.h"
+#include "vpu_rpc.h"
+
+#define VENC_OUTPUT_ENABLE BIT(0)
+#define VENC_CAPTURE_ENABLE BIT(1)
+#define VENC_ENABLE_MASK (VENC_OUTPUT_ENABLE | VENC_CAPTURE_ENABLE)
+#define VENC_MAX_BUF_CNT 8
+
+struct venc_t {
+ struct vpu_encode_params params;
+ u32 request_key_frame;
+ u32 input_ready;
+ u32 cpb_size;
+ bool bitrate_change;
+
+ struct vpu_buffer enc[VENC_MAX_BUF_CNT];
+ struct vpu_buffer ref[VENC_MAX_BUF_CNT];
+ struct vpu_buffer act[VENC_MAX_BUF_CNT];
+ struct list_head frames;
+ u32 frame_count;
+ u32 encode_count;
+ u32 ready_count;
+ u32 enable;
+ u32 stopped;
+
+ u32 skipped_count;
+ u32 skipped_bytes;
+
+ wait_queue_head_t wq;
+};
+
+struct venc_frame_t {
+ struct list_head list;
+ struct vpu_enc_pic_info info;
+ u32 bytesused;
+ s64 timestamp;
+};
+
+static const struct vpu_format venc_formats[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_NV12M,
+ .num_planes = 2,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ },
+ {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ },
+ {0, 0, 0, 0},
+};
+
+static int venc_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, "amphion-vpu", sizeof(cap->driver));
+ strscpy(cap->card, "amphion vpu encoder", sizeof(cap->card));
+ strscpy(cap->bus_info, "platform: amphion-vpu", sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int venc_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct vpu_inst *inst = to_inst(file);
+ const struct vpu_format *fmt;
+
+ memset(f->reserved, 0, sizeof(f->reserved));
+ fmt = vpu_helper_enum_format(inst, f->type, f->index);
+ if (!fmt)
+ return -EINVAL;
+
+ f->pixelformat = fmt->pixfmt;
+ f->flags = fmt->flags;
+
+ return 0;
+}
+
+static int venc_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize)
+{
+ struct vpu_inst *inst = to_inst(file);
+ const struct vpu_core_resources *res;
+
+ if (!fsize || fsize->index)
+ return -EINVAL;
+
+ if (!vpu_helper_find_format(inst, 0, fsize->pixel_format))
+ return -EINVAL;
+
+ res = vpu_get_resource(inst);
+ if (!res)
+ return -EINVAL;
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.max_width = res->max_width;
+ fsize->stepwise.max_height = res->max_height;
+ fsize->stepwise.min_width = res->min_width;
+ fsize->stepwise.min_height = res->min_height;
+ fsize->stepwise.step_width = res->step_width;
+ fsize->stepwise.step_height = res->step_height;
+
+ return 0;
+}
+
+static int venc_enum_frameintervals(struct file *file, void *fh, struct v4l2_frmivalenum *fival)
+{
+ struct vpu_inst *inst = to_inst(file);
+ const struct vpu_core_resources *res;
+
+ if (!fival || fival->index)
+ return -EINVAL;
+
+ if (!vpu_helper_find_format(inst, 0, fival->pixel_format))
+ return -EINVAL;
+
+ if (!fival->width || !fival->height)
+ return -EINVAL;
+
+ res = vpu_get_resource(inst);
+ if (!res)
+ return -EINVAL;
+ if (fival->width < res->min_width || fival->width > res->max_width ||
+ fival->height < res->min_height || fival->height > res->max_height)
+ return -EINVAL;
+
+ fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
+ fival->stepwise.min.numerator = 1;
+ fival->stepwise.min.denominator = USHRT_MAX;
+ fival->stepwise.max.numerator = USHRT_MAX;
+ fival->stepwise.max.denominator = 1;
+ fival->stepwise.step.numerator = 1;
+ fival->stepwise.step.denominator = 1;
+
+ return 0;
+}
+
+static int venc_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_inst *inst = to_inst(file);
+ struct venc_t *venc = inst->priv;
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct vpu_format *cur_fmt;
+ int i;
+
+ cur_fmt = vpu_get_format(inst, f->type);
+
+ pixmp->pixelformat = cur_fmt->pixfmt;
+ pixmp->num_planes = cur_fmt->num_planes;
+ pixmp->width = cur_fmt->width;
+ pixmp->height = cur_fmt->height;
+ pixmp->field = cur_fmt->field;
+ pixmp->flags = cur_fmt->flags;
+ for (i = 0; i < pixmp->num_planes; i++) {
+ pixmp->plane_fmt[i].bytesperline = cur_fmt->bytesperline[i];
+ pixmp->plane_fmt[i].sizeimage = cur_fmt->sizeimage[i];
+ }
+
+ f->fmt.pix_mp.colorspace = venc->params.color.primaries;
+ f->fmt.pix_mp.xfer_func = venc->params.color.transfer;
+ f->fmt.pix_mp.ycbcr_enc = venc->params.color.matrix;
+ f->fmt.pix_mp.quantization = venc->params.color.full_range;
+
+ return 0;
+}
+
+static int venc_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_inst *inst = to_inst(file);
+
+ vpu_try_fmt_common(inst, f);
+
+ return 0;
+}
+
+static int venc_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_inst *inst = to_inst(file);
+ const struct vpu_format *fmt;
+ struct vpu_format *cur_fmt;
+ struct vb2_queue *q;
+ struct venc_t *venc = inst->priv;
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ int i;
+
+ q = v4l2_m2m_get_vq(inst->fh.m2m_ctx, f->type);
+ if (!q)
+ return -EINVAL;
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ fmt = vpu_try_fmt_common(inst, f);
+ if (!fmt)
+ return -EINVAL;
+
+ cur_fmt = vpu_get_format(inst, f->type);
+
+ cur_fmt->pixfmt = fmt->pixfmt;
+ cur_fmt->num_planes = fmt->num_planes;
+ cur_fmt->flags = fmt->flags;
+ cur_fmt->width = pix_mp->width;
+ cur_fmt->height = pix_mp->height;
+ for (i = 0; i < fmt->num_planes; i++) {
+ cur_fmt->sizeimage[i] = pix_mp->plane_fmt[i].sizeimage;
+ cur_fmt->bytesperline[i] = pix_mp->plane_fmt[i].bytesperline;
+ }
+
+ if (pix_mp->field != V4L2_FIELD_ANY)
+ cur_fmt->field = pix_mp->field;
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ venc->params.input_format = cur_fmt->pixfmt;
+ venc->params.src_stride = cur_fmt->bytesperline[0];
+ venc->params.src_width = cur_fmt->width;
+ venc->params.src_height = cur_fmt->height;
+ venc->params.crop.left = 0;
+ venc->params.crop.top = 0;
+ venc->params.crop.width = cur_fmt->width;
+ venc->params.crop.height = cur_fmt->height;
+ } else {
+ venc->params.codec_format = cur_fmt->pixfmt;
+ venc->params.out_width = cur_fmt->width;
+ venc->params.out_height = cur_fmt->height;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ if (!vpu_color_check_primaries(pix_mp->colorspace)) {
+ venc->params.color.primaries = pix_mp->colorspace;
+ vpu_color_get_default(venc->params.color.primaries,
+ &venc->params.color.transfer,
+ &venc->params.color.matrix,
+ &venc->params.color.full_range);
+ }
+ if (!vpu_color_check_transfers(pix_mp->xfer_func))
+ venc->params.color.transfer = pix_mp->xfer_func;
+ if (!vpu_color_check_matrix(pix_mp->ycbcr_enc))
+ venc->params.color.matrix = pix_mp->ycbcr_enc;
+ if (!vpu_color_check_full_range(pix_mp->quantization))
+ venc->params.color.full_range = pix_mp->quantization;
+ }
+
+ pix_mp->colorspace = venc->params.color.primaries;
+ pix_mp->xfer_func = venc->params.color.transfer;
+ pix_mp->ycbcr_enc = venc->params.color.matrix;
+ pix_mp->quantization = venc->params.color.full_range;
+
+ return 0;
+}
+
+static int venc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *parm)
+{
+ struct vpu_inst *inst = to_inst(file);
+ struct venc_t *venc = inst->priv;
+ struct v4l2_fract *timeperframe = &parm->parm.capture.timeperframe;
+
+ if (!parm)
+ return -EINVAL;
+
+ if (!vpu_helper_check_type(inst, parm->type))
+ return -EINVAL;
+
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ parm->parm.capture.readbuffers = 0;
+ timeperframe->numerator = venc->params.frame_rate.numerator;
+ timeperframe->denominator = venc->params.frame_rate.denominator;
+
+ return 0;
+}
+
+static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *parm)
+{
+ struct vpu_inst *inst = to_inst(file);
+ struct venc_t *venc = inst->priv;
+ struct v4l2_fract *timeperframe = &parm->parm.capture.timeperframe;
+ unsigned long n, d;
+
+ if (!parm)
+ return -EINVAL;
+
+ if (!vpu_helper_check_type(inst, parm->type))
+ return -EINVAL;
+
+ if (!timeperframe->numerator)
+ timeperframe->numerator = venc->params.frame_rate.numerator;
+ if (!timeperframe->denominator)
+ timeperframe->denominator = venc->params.frame_rate.denominator;
+
+ venc->params.frame_rate.numerator = timeperframe->numerator;
+ venc->params.frame_rate.denominator = timeperframe->denominator;
+
+ rational_best_approximation(venc->params.frame_rate.numerator,
+ venc->params.frame_rate.denominator,
+ venc->params.frame_rate.numerator,
+ venc->params.frame_rate.denominator,
+ &n, &d);
+ venc->params.frame_rate.numerator = n;
+ venc->params.frame_rate.denominator = d;
+
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ memset(parm->parm.capture.reserved, 0, sizeof(parm->parm.capture.reserved));
+
+ return 0;
+}
+
+static int venc_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vpu_inst *inst = to_inst(file);
+ struct venc_t *venc = inst->priv;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = inst->out_format.width;
+ s->r.height = inst->out_format.height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ s->r = venc->params.crop;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int venc_valid_crop(struct venc_t *venc, const struct vpu_core_resources *res)
+{
+ struct v4l2_rect *rect = NULL;
+ u32 min_width;
+ u32 min_height;
+ u32 src_width;
+ u32 src_height;
+
+ rect = &venc->params.crop;
+ min_width = res->min_width;
+ min_height = res->min_height;
+ src_width = venc->params.src_width;
+ src_height = venc->params.src_height;
+
+ if (rect->width == 0 || rect->height == 0)
+ return -EINVAL;
+ if (rect->left > src_width - min_width || rect->top > src_height - min_height)
+ return -EINVAL;
+
+ rect->width = min(rect->width, src_width - rect->left);
+ rect->width = max_t(u32, rect->width, min_width);
+
+ rect->height = min(rect->height, src_height - rect->top);
+ rect->height = max_t(u32, rect->height, min_height);
+
+ return 0;
+}
+
+static int venc_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vpu_inst *inst = to_inst(file);
+ const struct vpu_core_resources *res;
+ struct venc_t *venc = inst->priv;
+
+ res = vpu_get_resource(inst);
+ if (!res)
+ return -EINVAL;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+ if (s->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ venc->params.crop.left = ALIGN(s->r.left, res->step_width);
+ venc->params.crop.top = ALIGN(s->r.top, res->step_height);
+ venc->params.crop.width = ALIGN(s->r.width, res->step_width);
+ venc->params.crop.height = ALIGN(s->r.height, res->step_height);
+ if (venc_valid_crop(venc, res)) {
+ venc->params.crop.left = 0;
+ venc->params.crop.top = 0;
+ venc->params.crop.width = venc->params.src_width;
+ venc->params.crop.height = venc->params.src_height;
+ }
+
+ inst->crop = venc->params.crop;
+
+ return 0;
+}
+
+static int venc_drain(struct vpu_inst *inst)
+{
+ struct venc_t *venc = inst->priv;
+ int ret;
+
+ if (!inst->fh.m2m_ctx)
+ return 0;
+
+ if (inst->state != VPU_CODEC_STATE_DRAIN)
+ return 0;
+
+ if (v4l2_m2m_num_src_bufs_ready(inst->fh.m2m_ctx))
+ return 0;
+
+ if (!venc->input_ready)
+ return 0;
+
+ venc->input_ready = false;
+ vpu_trace(inst->dev, "[%d]\n", inst->id);
+ ret = vpu_session_stop(inst);
+ if (ret)
+ return ret;
+ inst->state = VPU_CODEC_STATE_STOP;
+ wake_up_all(&venc->wq);
+
+ return 0;
+}
+
+static int venc_request_eos(struct vpu_inst *inst)
+{
+ inst->state = VPU_CODEC_STATE_DRAIN;
+ venc_drain(inst);
+
+ return 0;
+}
+
+static int venc_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *cmd)
+{
+ struct vpu_inst *inst = to_inst(file);
+ int ret;
+
+ ret = v4l2_m2m_ioctl_try_encoder_cmd(file, fh, cmd);
+ if (ret)
+ return ret;
+
+ vpu_inst_lock(inst);
+ if (cmd->cmd == V4L2_ENC_CMD_STOP) {
+ if (inst->state == VPU_CODEC_STATE_DEINIT)
+ vpu_set_last_buffer_dequeued(inst);
+ else
+ venc_request_eos(inst);
+ }
+ vpu_inst_unlock(inst);
+
+ return 0;
+}
+
+static int venc_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_ioctl_ops venc_ioctl_ops = {
+ .vidioc_querycap = venc_querycap,
+ .vidioc_enum_fmt_vid_cap = venc_enum_fmt,
+ .vidioc_enum_fmt_vid_out = venc_enum_fmt,
+ .vidioc_enum_framesizes = venc_enum_framesizes,
+ .vidioc_enum_frameintervals = venc_enum_frameintervals,
+ .vidioc_g_fmt_vid_cap_mplane = venc_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = venc_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = venc_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = venc_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = venc_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = venc_s_fmt,
+ .vidioc_g_parm = venc_g_parm,
+ .vidioc_s_parm = venc_s_parm,
+ .vidioc_g_selection = venc_g_selection,
+ .vidioc_s_selection = venc_s_selection,
+ .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
+ .vidioc_encoder_cmd = venc_encoder_cmd,
+ .vidioc_subscribe_event = venc_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+};
+
+static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vpu_inst *inst = ctrl_to_inst(ctrl);
+ struct venc_t *venc = inst->priv;
+ int ret = 0;
+
+ vpu_inst_lock(inst);
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ venc->params.profile = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ venc->params.level = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ venc->params.rc_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ venc->params.rc_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ if (ctrl->val != venc->params.bitrate)
+ venc->bitrate_change = true;
+ venc->params.bitrate = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
+ venc->params.bitrate_max = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ venc->params.gop_length = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ venc->params.bframes = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+ venc->params.i_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
+ venc->params.p_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
+ venc->params.b_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
+ venc->request_key_frame = 1;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:
+ venc->cpb_size = ctrl->val * 1024;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
+ venc->params.sar.enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ venc->params.sar.idc = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH:
+ venc->params.sar.width = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT:
+ venc->params.sar.height = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ vpu_inst_unlock(inst);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops venc_ctrl_ops = {
+ .s_ctrl = venc_op_s_ctrl,
+ .g_volatile_ctrl = vpu_helper_g_volatile_ctrl,
+};
+
+static int venc_ctrl_init(struct vpu_inst *inst)
+{
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 20);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)),
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_5_1,
+ 0x0,
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, 0, 1, 1, 1);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+ ~((1 << V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) |
+ (1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)),
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE,
+ BITRATE_MIN,
+ BITRATE_MAX,
+ BITRATE_STEP,
+ BITRATE_DEFAULT);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
+ BITRATE_MIN, BITRATE_MAX,
+ BITRATE_STEP,
+ BITRATE_DEFAULT_PEAK);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0, (1 << 16) - 1, 1, 30);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_B_FRAMES, 0, 4, 1, 0);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 1, 51, 1, 26);
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, 1, 51, 1, 28);
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP, 1, 51, 1, 30);
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME, 0, 0, 0, 0);
+ ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 2);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, 1, 32, 1, 2);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE, 64, 10240, 1, 1024);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE, 0, 1, 1, 1);
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED,
+ 0x0,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1);
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH,
+ 0, USHRT_MAX, 1, 1);
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT,
+ 0, USHRT_MAX, 1, 1);
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEADER_MODE,
+ V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ ~(1 << V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME),
+ V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
+
+ ret = v4l2_ctrl_handler_setup(&inst->ctrl_handler);
+ if (ret) {
+ dev_err(inst->dev, "[%d] setup ctrls fail, ret = %d\n", inst->id, ret);
+ v4l2_ctrl_handler_free(&inst->ctrl_handler);
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool venc_check_ready(struct vpu_inst *inst, unsigned int type)
+{
+ struct venc_t *venc = inst->priv;
+
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ if (vpu_helper_get_free_space(inst) < venc->cpb_size)
+ return false;
+ return venc->input_ready;
+ }
+
+ if (list_empty(&venc->frames))
+ return false;
+ return true;
+}
+
+static u32 venc_get_enable_mask(u32 type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return VENC_OUTPUT_ENABLE;
+ else
+ return VENC_CAPTURE_ENABLE;
+}
+
+static void venc_set_enable(struct venc_t *venc, u32 type, int enable)
+{
+ u32 mask = venc_get_enable_mask(type);
+
+ if (enable)
+ venc->enable |= mask;
+ else
+ venc->enable &= ~mask;
+}
+
+static u32 venc_get_enable(struct venc_t *venc, u32 type)
+{
+ return venc->enable & venc_get_enable_mask(type);
+}
+
+static void venc_input_done(struct vpu_inst *inst)
+{
+ struct venc_t *venc = inst->priv;
+
+ vpu_inst_lock(inst);
+ venc->input_ready = true;
+ vpu_process_output_buffer(inst);
+ if (inst->state == VPU_CODEC_STATE_DRAIN)
+ venc_drain(inst);
+ vpu_inst_unlock(inst);
+}
+
+/*
+ * It's hardware limitation, that there may be several bytes
+ * redundant data at the beginning of frame.
+ * For android platform, the redundant data may cause cts test fail
+ * So driver will strip them
+ */
+static int venc_precheck_encoded_frame(struct vpu_inst *inst, struct venc_frame_t *frame)
+{
+ struct venc_t *venc;
+ int skipped;
+
+ if (!frame || !frame->bytesused)
+ return -EINVAL;
+
+ venc = inst->priv;
+ skipped = vpu_helper_find_startcode(&inst->stream_buffer,
+ inst->cap_format.pixfmt,
+ frame->info.wptr - inst->stream_buffer.phys,
+ frame->bytesused);
+ if (skipped > 0) {
+ frame->bytesused -= skipped;
+ frame->info.wptr = vpu_helper_step_walk(&inst->stream_buffer,
+ frame->info.wptr, skipped);
+ venc->skipped_bytes += skipped;
+ venc->skipped_count++;
+ }
+
+ return 0;
+}
+
+static int venc_get_one_encoded_frame(struct vpu_inst *inst,
+ struct venc_frame_t *frame,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ struct venc_t *venc = inst->priv;
+
+ if (!vbuf)
+ return -EAGAIN;
+
+ if (!venc_get_enable(inst->priv, vbuf->vb2_buf.type)) {
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ return 0;
+ }
+ if (frame->bytesused > vbuf->vb2_buf.planes[0].length) {
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ return -ENOMEM;
+ }
+
+ venc_precheck_encoded_frame(inst, frame);
+
+ if (frame->bytesused) {
+ u32 rptr = frame->info.wptr;
+ void *dst = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+
+ vpu_helper_copy_from_stream_buffer(&inst->stream_buffer,
+ &rptr, frame->bytesused, dst);
+ vpu_iface_update_stream_buffer(inst, rptr, 0);
+ }
+ vb2_set_plane_payload(&vbuf->vb2_buf, 0, frame->bytesused);
+ vbuf->sequence = frame->info.frame_id;
+ vbuf->vb2_buf.timestamp = frame->info.timestamp;
+ vbuf->field = inst->cap_format.field;
+ vbuf->flags |= frame->info.pic_type;
+ vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE);
+ dev_dbg(inst->dev, "[%d][OUTPUT TS]%32lld\n", inst->id, frame->info.timestamp);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+ venc->ready_count++;
+
+ if (vbuf->flags & V4L2_BUF_FLAG_KEYFRAME)
+ dev_dbg(inst->dev, "[%d][%d]key frame\n", inst->id, frame->info.frame_id);
+
+ return 0;
+}
+
+static int venc_get_encoded_frames(struct vpu_inst *inst)
+{
+ struct venc_t *venc;
+ struct venc_frame_t *frame;
+ struct venc_frame_t *tmp;
+
+ if (!inst->fh.m2m_ctx)
+ return 0;
+ venc = inst->priv;
+ list_for_each_entry_safe(frame, tmp, &venc->frames, list) {
+ if (venc_get_one_encoded_frame(inst, frame,
+ v4l2_m2m_dst_buf_remove(inst->fh.m2m_ctx)))
+ break;
+ list_del_init(&frame->list);
+ vfree(frame);
+ }
+
+ return 0;
+}
+
+static int venc_frame_encoded(struct vpu_inst *inst, void *arg)
+{
+ struct vpu_enc_pic_info *info = arg;
+ struct venc_frame_t *frame;
+ struct venc_t *venc;
+ int ret = 0;
+
+ if (!info)
+ return -EINVAL;
+ venc = inst->priv;
+ frame = vzalloc(sizeof(*frame));
+ if (!frame)
+ return -ENOMEM;
+
+ memcpy(&frame->info, info, sizeof(frame->info));
+ frame->bytesused = info->frame_size;
+
+ vpu_inst_lock(inst);
+ list_add_tail(&frame->list, &venc->frames);
+ venc->encode_count++;
+ venc_get_encoded_frames(inst);
+ vpu_inst_unlock(inst);
+
+ return ret;
+}
+
+static void venc_buf_done(struct vpu_inst *inst, struct vpu_frame_info *frame)
+{
+ struct vb2_v4l2_buffer *vbuf;
+
+ if (!inst->fh.m2m_ctx)
+ return;
+
+ vpu_inst_lock(inst);
+ if (!venc_get_enable(inst->priv, frame->type))
+ goto exit;
+ vbuf = vpu_find_buf_by_sequence(inst, frame->type, frame->sequence);
+ if (!vbuf) {
+ dev_err(inst->dev, "[%d] can't find buf: type %d, sequence %d\n",
+ inst->id, frame->type, frame->sequence);
+ goto exit;
+ }
+
+ vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE);
+ if (V4L2_TYPE_IS_OUTPUT(frame->type))
+ v4l2_m2m_src_buf_remove_by_buf(inst->fh.m2m_ctx, vbuf);
+ else
+ v4l2_m2m_dst_buf_remove_by_buf(inst->fh.m2m_ctx, vbuf);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+exit:
+ vpu_inst_unlock(inst);
+}
+
+static void venc_set_last_buffer_dequeued(struct vpu_inst *inst)
+{
+ struct venc_t *venc = inst->priv;
+
+ if (venc->stopped && list_empty(&venc->frames))
+ vpu_set_last_buffer_dequeued(inst);
+}
+
+static void venc_stop_done(struct vpu_inst *inst)
+{
+ struct venc_t *venc = inst->priv;
+
+ vpu_inst_lock(inst);
+ venc->stopped = true;
+ venc_set_last_buffer_dequeued(inst);
+ vpu_inst_unlock(inst);
+
+ wake_up_all(&venc->wq);
+}
+
+static void venc_event_notify(struct vpu_inst *inst, u32 event, void *data)
+{
+}
+
+static void venc_release(struct vpu_inst *inst)
+{
+}
+
+static void venc_cleanup(struct vpu_inst *inst)
+{
+ struct venc_t *venc;
+
+ if (!inst)
+ return;
+
+ venc = inst->priv;
+ if (venc)
+ vfree(venc);
+ inst->priv = NULL;
+ vfree(inst);
+}
+
+static int venc_start_session(struct vpu_inst *inst, u32 type)
+{
+ struct venc_t *venc = inst->priv;
+ int stream_buffer_size;
+ int ret;
+
+ venc_set_enable(venc, type, 1);
+ if ((venc->enable & VENC_ENABLE_MASK) != VENC_ENABLE_MASK)
+ return 0;
+
+ vpu_iface_init_instance(inst);
+ stream_buffer_size = vpu_iface_get_stream_buffer_size(inst->core);
+ if (stream_buffer_size > 0) {
+ inst->stream_buffer.length = max_t(u32, stream_buffer_size, venc->cpb_size * 3);
+ ret = vpu_alloc_dma(inst->core, &inst->stream_buffer);
+ if (ret)
+ goto error;
+
+ inst->use_stream_buffer = true;
+ vpu_iface_config_stream_buffer(inst, &inst->stream_buffer);
+ }
+
+ ret = vpu_iface_set_encode_params(inst, &venc->params, 0);
+ if (ret)
+ goto error;
+ ret = vpu_session_configure_codec(inst);
+ if (ret)
+ goto error;
+
+ inst->state = VPU_CODEC_STATE_CONFIGURED;
+ /*vpu_iface_config_memory_resource*/
+
+ /*config enc expert mode parameter*/
+ ret = vpu_iface_set_encode_params(inst, &venc->params, 1);
+ if (ret)
+ goto error;
+
+ ret = vpu_session_start(inst);
+ if (ret)
+ goto error;
+ inst->state = VPU_CODEC_STATE_STARTED;
+
+ venc->bitrate_change = false;
+ venc->input_ready = true;
+ venc->frame_count = 0;
+ venc->encode_count = 0;
+ venc->ready_count = 0;
+ venc->stopped = false;
+ vpu_process_output_buffer(inst);
+ if (venc->frame_count == 0)
+ dev_err(inst->dev, "[%d] there is no input when starting\n", inst->id);
+
+ return 0;
+error:
+ venc_set_enable(venc, type, 0);
+ inst->state = VPU_CODEC_STATE_DEINIT;
+
+ vpu_free_dma(&inst->stream_buffer);
+ return ret;
+}
+
+static void venc_cleanup_mem_resource(struct vpu_inst *inst)
+{
+ struct venc_t *venc;
+ u32 i;
+
+ venc = inst->priv;
+
+ for (i = 0; i < ARRAY_SIZE(venc->enc); i++)
+ vpu_free_dma(&venc->enc[i]);
+ for (i = 0; i < ARRAY_SIZE(venc->ref); i++)
+ vpu_free_dma(&venc->ref[i]);
+}
+
+static void venc_request_mem_resource(struct vpu_inst *inst,
+ u32 enc_frame_size,
+ u32 enc_frame_num,
+ u32 ref_frame_size,
+ u32 ref_frame_num,
+ u32 act_frame_size,
+ u32 act_frame_num)
+{
+ struct venc_t *venc;
+ u32 i;
+ int ret;
+
+ venc = inst->priv;
+ if (enc_frame_num > ARRAY_SIZE(venc->enc)) {
+ dev_err(inst->dev, "[%d] enc num(%d) is out of range\n", inst->id, enc_frame_num);
+ return;
+ }
+ if (ref_frame_num > ARRAY_SIZE(venc->ref)) {
+ dev_err(inst->dev, "[%d] ref num(%d) is out of range\n", inst->id, ref_frame_num);
+ return;
+ }
+ if (act_frame_num > ARRAY_SIZE(venc->act)) {
+ dev_err(inst->dev, "[%d] act num(%d) is out of range\n", inst->id, act_frame_num);
+ return;
+ }
+
+ for (i = 0; i < enc_frame_num; i++) {
+ venc->enc[i].length = enc_frame_size;
+ ret = vpu_alloc_dma(inst->core, &venc->enc[i]);
+ if (ret) {
+ venc_cleanup_mem_resource(inst);
+ return;
+ }
+ }
+ for (i = 0; i < ref_frame_num; i++) {
+ venc->ref[i].length = ref_frame_size;
+ ret = vpu_alloc_dma(inst->core, &venc->ref[i]);
+ if (ret) {
+ venc_cleanup_mem_resource(inst);
+ return;
+ }
+ }
+ if (act_frame_num != 1 || act_frame_size > inst->act.length) {
+ venc_cleanup_mem_resource(inst);
+ return;
+ }
+ venc->act[0].length = act_frame_size;
+ venc->act[0].phys = inst->act.phys;
+ venc->act[0].virt = inst->act.virt;
+
+ for (i = 0; i < enc_frame_num; i++)
+ vpu_iface_config_memory_resource(inst, MEM_RES_ENC, i, &venc->enc[i]);
+ for (i = 0; i < ref_frame_num; i++)
+ vpu_iface_config_memory_resource(inst, MEM_RES_REF, i, &venc->ref[i]);
+ for (i = 0; i < act_frame_num; i++)
+ vpu_iface_config_memory_resource(inst, MEM_RES_ACT, i, &venc->act[i]);
+}
+
+static void venc_cleanup_frames(struct venc_t *venc)
+{
+ struct venc_frame_t *frame;
+ struct venc_frame_t *tmp;
+
+ list_for_each_entry_safe(frame, tmp, &venc->frames, list) {
+ list_del_init(&frame->list);
+ vfree(frame);
+ }
+}
+
+static int venc_stop_session(struct vpu_inst *inst, u32 type)
+{
+ struct venc_t *venc = inst->priv;
+
+ venc_set_enable(venc, type, 0);
+ if (venc->enable & VENC_ENABLE_MASK)
+ return 0;
+
+ if (inst->state == VPU_CODEC_STATE_DEINIT)
+ return 0;
+
+ if (inst->state != VPU_CODEC_STATE_STOP)
+ venc_request_eos(inst);
+
+ call_void_vop(inst, wait_prepare);
+ if (!wait_event_timeout(venc->wq, venc->stopped, VPU_TIMEOUT)) {
+ set_bit(inst->id, &inst->core->hang_mask);
+ vpu_session_debug(inst);
+ }
+ call_void_vop(inst, wait_finish);
+
+ inst->state = VPU_CODEC_STATE_DEINIT;
+ venc_cleanup_frames(inst->priv);
+ vpu_free_dma(&inst->stream_buffer);
+ venc_cleanup_mem_resource(inst);
+
+ return 0;
+}
+
+static int venc_process_output(struct vpu_inst *inst, struct vb2_buffer *vb)
+{
+ struct venc_t *venc = inst->priv;
+ struct vb2_v4l2_buffer *vbuf;
+ u32 flags;
+
+ if (inst->state == VPU_CODEC_STATE_DEINIT)
+ return -EINVAL;
+
+ vbuf = to_vb2_v4l2_buffer(vb);
+ if (inst->state == VPU_CODEC_STATE_STARTED)
+ inst->state = VPU_CODEC_STATE_ACTIVE;
+
+ flags = vbuf->flags;
+ if (venc->request_key_frame) {
+ vbuf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ venc->request_key_frame = 0;
+ }
+ if (venc->bitrate_change) {
+ vpu_session_update_parameters(inst, &venc->params);
+ venc->bitrate_change = false;
+ }
+ dev_dbg(inst->dev, "[%d][INPUT TS]%32lld\n", inst->id, vb->timestamp);
+ vpu_iface_input_frame(inst, vb);
+ vbuf->flags = flags;
+ venc->input_ready = false;
+ venc->frame_count++;
+ vpu_set_buffer_state(vbuf, VPU_BUF_STATE_INUSE);
+
+ return 0;
+}
+
+static int venc_process_capture(struct vpu_inst *inst, struct vb2_buffer *vb)
+{
+ struct venc_t *venc;
+ struct venc_frame_t *frame = NULL;
+ struct vb2_v4l2_buffer *vbuf;
+ int ret;
+
+ venc = inst->priv;
+ if (list_empty(&venc->frames))
+ return -EINVAL;
+
+ frame = list_first_entry(&venc->frames, struct venc_frame_t, list);
+ vbuf = to_vb2_v4l2_buffer(vb);
+ v4l2_m2m_dst_buf_remove_by_buf(inst->fh.m2m_ctx, vbuf);
+ ret = venc_get_one_encoded_frame(inst, frame, vbuf);
+ if (ret)
+ return ret;
+
+ list_del_init(&frame->list);
+ vfree(frame);
+ return 0;
+}
+
+static void venc_on_queue_empty(struct vpu_inst *inst, u32 type)
+{
+ struct venc_t *venc = inst->priv;
+
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return;
+
+ if (venc->stopped)
+ venc_set_last_buffer_dequeued(inst);
+}
+
+static int venc_get_debug_info(struct vpu_inst *inst, char *str, u32 size, u32 i)
+{
+ struct venc_t *venc = inst->priv;
+ int num = -1;
+
+ switch (i) {
+ case 0:
+ num = scnprintf(str, size, "profile = %d\n", venc->params.profile);
+ break;
+ case 1:
+ num = scnprintf(str, size, "level = %d\n", venc->params.level);
+ break;
+ case 2:
+ num = scnprintf(str, size, "fps = %d/%d\n",
+ venc->params.frame_rate.numerator,
+ venc->params.frame_rate.denominator);
+ break;
+ case 3:
+ num = scnprintf(str, size, "%d x %d -> %d x %d\n",
+ venc->params.src_width,
+ venc->params.src_height,
+ venc->params.out_width,
+ venc->params.out_height);
+ break;
+ case 4:
+ num = scnprintf(str, size, "(%d, %d) %d x %d\n",
+ venc->params.crop.left,
+ venc->params.crop.top,
+ venc->params.crop.width,
+ venc->params.crop.height);
+ break;
+ case 5:
+ num = scnprintf(str, size,
+ "enable = 0x%x, input = %d, encode = %d, ready = %d, stopped = %d\n",
+ venc->enable,
+ venc->frame_count, venc->encode_count,
+ venc->ready_count,
+ venc->stopped);
+ break;
+ case 6:
+ num = scnprintf(str, size, "gop = %d\n", venc->params.gop_length);
+ break;
+ case 7:
+ num = scnprintf(str, size, "bframes = %d\n", venc->params.bframes);
+ break;
+ case 8:
+ num = scnprintf(str, size, "rc: %s, mode = %d, bitrate = %d(%d), qp = %d\n",
+ venc->params.rc_enable ? "enable" : "disable",
+ venc->params.rc_mode,
+ venc->params.bitrate,
+ venc->params.bitrate_max,
+ venc->params.i_frame_qp);
+ break;
+ case 9:
+ num = scnprintf(str, size, "sar: enable = %d, idc = %d, %d x %d\n",
+ venc->params.sar.enable,
+ venc->params.sar.idc,
+ venc->params.sar.width,
+ venc->params.sar.height);
+
+ break;
+ case 10:
+ num = scnprintf(str, size,
+ "colorspace: primaries = %d, transfer = %d, matrix = %d, full_range = %d\n",
+ venc->params.color.primaries,
+ venc->params.color.transfer,
+ venc->params.color.matrix,
+ venc->params.color.full_range);
+ break;
+ case 11:
+ num = scnprintf(str, size, "skipped: count = %d, bytes = %d\n",
+ venc->skipped_count, venc->skipped_bytes);
+ break;
+ default:
+ break;
+ }
+
+ return num;
+}
+
+static struct vpu_inst_ops venc_inst_ops = {
+ .ctrl_init = venc_ctrl_init,
+ .check_ready = venc_check_ready,
+ .input_done = venc_input_done,
+ .get_one_frame = venc_frame_encoded,
+ .buf_done = venc_buf_done,
+ .stop_done = venc_stop_done,
+ .event_notify = venc_event_notify,
+ .release = venc_release,
+ .cleanup = venc_cleanup,
+ .start = venc_start_session,
+ .mem_request = venc_request_mem_resource,
+ .stop = venc_stop_session,
+ .process_output = venc_process_output,
+ .process_capture = venc_process_capture,
+ .on_queue_empty = venc_on_queue_empty,
+ .get_debug_info = venc_get_debug_info,
+ .wait_prepare = vpu_inst_unlock,
+ .wait_finish = vpu_inst_lock,
+};
+
+static void venc_init(struct file *file)
+{
+ struct vpu_inst *inst = to_inst(file);
+ struct venc_t *venc;
+ struct v4l2_format f;
+ struct v4l2_streamparm parm;
+
+ venc = inst->priv;
+ venc->params.qp_min = 1;
+ venc->params.qp_max = 51;
+ venc->params.qp_min_i = 1;
+ venc->params.qp_max_i = 51;
+ venc->params.bitrate_min = BITRATE_MIN;
+
+ memset(&f, 0, sizeof(f));
+ f.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ f.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12M;
+ f.fmt.pix_mp.width = 1280;
+ f.fmt.pix_mp.height = 720;
+ f.fmt.pix_mp.field = V4L2_FIELD_NONE;
+ f.fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709;
+ venc_s_fmt(file, &inst->fh, &f);
+
+ memset(&f, 0, sizeof(f));
+ f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ f.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_H264;
+ f.fmt.pix_mp.width = 1280;
+ f.fmt.pix_mp.height = 720;
+ f.fmt.pix_mp.field = V4L2_FIELD_NONE;
+ venc_s_fmt(file, &inst->fh, &f);
+
+ memset(&parm, 0, sizeof(parm));
+ parm.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ parm.parm.capture.timeperframe.numerator = 1;
+ parm.parm.capture.timeperframe.denominator = 30;
+ venc_s_parm(file, &inst->fh, &parm);
+}
+
+static int venc_open(struct file *file)
+{
+ struct vpu_inst *inst;
+ struct venc_t *venc;
+ int ret;
+
+ inst = vzalloc(sizeof(*inst));
+ if (!inst)
+ return -ENOMEM;
+
+ venc = vzalloc(sizeof(*venc));
+ if (!venc) {
+ vfree(inst);
+ return -ENOMEM;
+ }
+
+ inst->ops = &venc_inst_ops;
+ inst->formats = venc_formats;
+ inst->type = VPU_CORE_TYPE_ENC;
+ inst->priv = venc;
+ INIT_LIST_HEAD(&venc->frames);
+ init_waitqueue_head(&venc->wq);
+
+ ret = vpu_v4l2_open(file, inst);
+ if (ret)
+ return ret;
+
+ venc_init(file);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations venc_fops = {
+ .owner = THIS_MODULE,
+ .open = venc_open,
+ .release = vpu_v4l2_close,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+const struct v4l2_ioctl_ops *venc_get_ioctl_ops(void)
+{
+ return &venc_ioctl_ops;
+}
+
+const struct v4l2_file_operations *venc_get_fops(void)
+{
+ return &venc_fops;
+}
diff --git a/drivers/media/platform/amphion/vpu.h b/drivers/media/platform/amphion/vpu.h
new file mode 100644
index 000000000000..e56b96a7e5d3
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu.h
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#ifndef _AMPHION_VPU_H
+#define _AMPHION_VPU_H
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-mem2mem.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+#include <linux/kfifo.h>
+
+#define VPU_TIMEOUT msecs_to_jiffies(1000)
+#define VPU_INST_NULL_ID (-1L)
+#define VPU_MSG_BUFFER_SIZE (8192)
+
+enum imx_plat_type {
+ IMX8QXP = 0,
+ IMX8QM = 1,
+ IMX8DM,
+ IMX8DX,
+ PLAT_TYPE_RESERVED
+};
+
+enum vpu_core_type {
+ VPU_CORE_TYPE_ENC = 0,
+ VPU_CORE_TYPE_DEC = 0x10,
+};
+
+struct vpu_dev;
+struct vpu_resources {
+ enum imx_plat_type plat_type;
+ u32 mreg_base;
+ int (*setup)(struct vpu_dev *vpu);
+ int (*setup_encoder)(struct vpu_dev *vpu);
+ int (*setup_decoder)(struct vpu_dev *vpu);
+ int (*reset)(struct vpu_dev *vpu);
+};
+
+struct vpu_buffer {
+ void *virt;
+ dma_addr_t phys;
+ u32 length;
+ u32 bytesused;
+ struct device *dev;
+};
+
+struct vpu_func {
+ struct video_device *vfd;
+ struct v4l2_m2m_dev *m2m_dev;
+ enum vpu_core_type type;
+ int function;
+};
+
+struct vpu_dev {
+ void __iomem *base;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct mutex lock; /* protect vpu device */
+ const struct vpu_resources *res;
+ struct list_head cores;
+
+ struct v4l2_device v4l2_dev;
+ struct vpu_func encoder;
+ struct vpu_func decoder;
+ struct media_device mdev;
+
+ struct delayed_work watchdog_work;
+ void (*get_vpu)(struct vpu_dev *vpu);
+ void (*put_vpu)(struct vpu_dev *vpu);
+ void (*get_enc)(struct vpu_dev *vpu);
+ void (*put_enc)(struct vpu_dev *vpu);
+ void (*get_dec)(struct vpu_dev *vpu);
+ void (*put_dec)(struct vpu_dev *vpu);
+ atomic_t ref_vpu;
+ atomic_t ref_enc;
+ atomic_t ref_dec;
+
+ struct dentry *debugfs;
+};
+
+struct vpu_format {
+ u32 pixfmt;
+ unsigned int num_planes;
+ u32 type;
+ u32 flags;
+ u32 width;
+ u32 height;
+ u32 sizeimage[VIDEO_MAX_PLANES];
+ u32 bytesperline[VIDEO_MAX_PLANES];
+ u32 field;
+};
+
+struct vpu_core_resources {
+ enum vpu_core_type type;
+ const char *fwname;
+ u32 stride;
+ u32 max_width;
+ u32 min_width;
+ u32 step_width;
+ u32 max_height;
+ u32 min_height;
+ u32 step_height;
+ u32 rpc_size;
+ u32 fwlog_size;
+ u32 act_size;
+};
+
+struct vpu_mbox {
+ char name[20];
+ struct mbox_client cl;
+ struct mbox_chan *ch;
+ bool block;
+};
+
+enum vpu_core_state {
+ VPU_CORE_DEINIT = 0,
+ VPU_CORE_ACTIVE,
+ VPU_CORE_SNAPSHOT,
+ VPU_CORE_HANG
+};
+
+struct vpu_core {
+ void __iomem *base;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct device *parent;
+ struct device *pd;
+ struct device_link *pd_link;
+ struct mutex lock; /* protect vpu core */
+ struct mutex cmd_lock; /* Lock vpu command */
+ struct list_head list;
+ enum vpu_core_type type;
+ int id;
+ const struct vpu_core_resources *res;
+ unsigned long instance_mask;
+ u32 supported_instance_count;
+ unsigned long hang_mask;
+ u32 request_count;
+ struct list_head instances;
+ enum vpu_core_state state;
+ u32 fw_version;
+
+ struct vpu_buffer fw;
+ struct vpu_buffer rpc;
+ struct vpu_buffer log;
+ struct vpu_buffer act;
+
+ struct vpu_mbox tx_type;
+ struct vpu_mbox tx_data;
+ struct vpu_mbox rx;
+ unsigned long cmd_seq;
+
+ wait_queue_head_t ack_wq;
+ struct completion cmp;
+ struct workqueue_struct *workqueue;
+ struct work_struct msg_work;
+ struct delayed_work msg_delayed_work;
+ struct kfifo msg_fifo;
+ void *msg_buffer;
+ unsigned int msg_buffer_size;
+
+ struct vpu_dev *vpu;
+ void *iface;
+
+ struct dentry *debugfs;
+ struct dentry *debugfs_fwlog;
+};
+
+enum vpu_codec_state {
+ VPU_CODEC_STATE_DEINIT = 1,
+ VPU_CODEC_STATE_CONFIGURED,
+ VPU_CODEC_STATE_START,
+ VPU_CODEC_STATE_STARTED,
+ VPU_CODEC_STATE_ACTIVE,
+ VPU_CODEC_STATE_SEEK,
+ VPU_CODEC_STATE_STOP,
+ VPU_CODEC_STATE_DRAIN,
+ VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE,
+};
+
+struct vpu_frame_info {
+ u32 type;
+ u32 id;
+ u32 sequence;
+ u32 luma;
+ u32 chroma_u;
+ u32 chroma_v;
+ u32 data_offset;
+ u32 flags;
+ u32 skipped;
+ s64 timestamp;
+};
+
+struct vpu_inst;
+struct vpu_inst_ops {
+ int (*ctrl_init)(struct vpu_inst *inst);
+ int (*start)(struct vpu_inst *inst, u32 type);
+ int (*stop)(struct vpu_inst *inst, u32 type);
+ int (*abort)(struct vpu_inst *inst);
+ bool (*check_ready)(struct vpu_inst *inst, unsigned int type);
+ void (*buf_done)(struct vpu_inst *inst, struct vpu_frame_info *frame);
+ void (*event_notify)(struct vpu_inst *inst, u32 event, void *data);
+ void (*release)(struct vpu_inst *inst);
+ void (*cleanup)(struct vpu_inst *inst);
+ void (*mem_request)(struct vpu_inst *inst,
+ u32 enc_frame_size,
+ u32 enc_frame_num,
+ u32 ref_frame_size,
+ u32 ref_frame_num,
+ u32 act_frame_size,
+ u32 act_frame_num);
+ void (*input_done)(struct vpu_inst *inst);
+ void (*stop_done)(struct vpu_inst *inst);
+ int (*process_output)(struct vpu_inst *inst, struct vb2_buffer *vb);
+ int (*process_capture)(struct vpu_inst *inst, struct vb2_buffer *vb);
+ int (*get_one_frame)(struct vpu_inst *inst, void *info);
+ void (*on_queue_empty)(struct vpu_inst *inst, u32 type);
+ int (*get_debug_info)(struct vpu_inst *inst, char *str, u32 size, u32 i);
+ void (*wait_prepare)(struct vpu_inst *inst);
+ void (*wait_finish)(struct vpu_inst *inst);
+};
+
+struct vpu_inst {
+ struct list_head list;
+ struct mutex lock; /* v4l2 and videobuf2 lock */
+ struct vpu_dev *vpu;
+ struct vpu_core *core;
+ struct device *dev;
+ int id;
+
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+ atomic_t ref_count;
+ int (*release)(struct vpu_inst *inst);
+
+ enum vpu_codec_state state;
+ enum vpu_core_type type;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct msg_work;
+ struct kfifo msg_fifo;
+ u8 msg_buffer[VPU_MSG_BUFFER_SIZE];
+
+ struct vpu_buffer stream_buffer;
+ bool use_stream_buffer;
+ struct vpu_buffer act;
+
+ struct list_head cmd_q;
+ void *pending;
+
+ struct vpu_inst_ops *ops;
+ const struct vpu_format *formats;
+ struct vpu_format out_format;
+ struct vpu_format cap_format;
+ u32 min_buffer_cap;
+ u32 min_buffer_out;
+
+ struct v4l2_rect crop;
+ u32 colorspace;
+ u8 ycbcr_enc;
+ u8 quantization;
+ u8 xfer_func;
+ u32 sequence;
+ u32 extra_size;
+
+ u32 flows[16];
+ u32 flow_idx;
+
+ pid_t pid;
+ pid_t tgid;
+ struct dentry *debugfs;
+
+ void *priv;
+};
+
+#define call_vop(inst, op, args...) \
+ ((inst)->ops->op ? (inst)->ops->op(inst, ##args) : 0) \
+
+#define call_void_vop(inst, op, args...) \
+ do { \
+ if ((inst)->ops->op) \
+ (inst)->ops->op(inst, ##args); \
+ } while (0)
+
+enum {
+ VPU_BUF_STATE_IDLE = 0,
+ VPU_BUF_STATE_INUSE,
+ VPU_BUF_STATE_DECODED,
+ VPU_BUF_STATE_READY,
+ VPU_BUF_STATE_SKIP,
+ VPU_BUF_STATE_ERROR
+};
+
+struct vpu_vb2_buffer {
+ struct v4l2_m2m_buffer m2m_buf;
+ dma_addr_t luma;
+ dma_addr_t chroma_u;
+ dma_addr_t chroma_v;
+ unsigned int state;
+ u32 tag;
+};
+
+void vpu_writel(struct vpu_dev *vpu, u32 reg, u32 val);
+u32 vpu_readl(struct vpu_dev *vpu, u32 reg);
+
+static inline struct vpu_vb2_buffer *to_vpu_vb2_buffer(struct vb2_v4l2_buffer *vbuf)
+{
+ struct v4l2_m2m_buffer *m2m_buf = container_of(vbuf, struct v4l2_m2m_buffer, vb);
+
+ return container_of(m2m_buf, struct vpu_vb2_buffer, m2m_buf);
+}
+
+static inline const char *vpu_core_type_desc(enum vpu_core_type type)
+{
+ return type == VPU_CORE_TYPE_ENC ? "encoder" : "decoder";
+}
+
+static inline struct vpu_inst *to_inst(struct file *filp)
+{
+ return container_of(filp->private_data, struct vpu_inst, fh);
+}
+
+#define ctrl_to_inst(ctrl) \
+ container_of((ctrl)->handler, struct vpu_inst, ctrl_handler)
+
+const struct v4l2_ioctl_ops *venc_get_ioctl_ops(void);
+const struct v4l2_file_operations *venc_get_fops(void);
+const struct v4l2_ioctl_ops *vdec_get_ioctl_ops(void);
+const struct v4l2_file_operations *vdec_get_fops(void);
+
+int vpu_add_func(struct vpu_dev *vpu, struct vpu_func *func);
+void vpu_remove_func(struct vpu_func *func);
+
+struct vpu_inst *vpu_inst_get(struct vpu_inst *inst);
+void vpu_inst_put(struct vpu_inst *inst);
+struct vpu_core *vpu_request_core(struct vpu_dev *vpu, enum vpu_core_type type);
+void vpu_release_core(struct vpu_core *core);
+int vpu_inst_register(struct vpu_inst *inst);
+int vpu_inst_unregister(struct vpu_inst *inst);
+const struct vpu_core_resources *vpu_get_resource(struct vpu_inst *inst);
+
+int vpu_inst_create_dbgfs_file(struct vpu_inst *inst);
+int vpu_inst_remove_dbgfs_file(struct vpu_inst *inst);
+int vpu_core_create_dbgfs_file(struct vpu_core *core);
+int vpu_core_remove_dbgfs_file(struct vpu_core *core);
+void vpu_inst_record_flow(struct vpu_inst *inst, u32 flow);
+
+int vpu_core_driver_init(void);
+void vpu_core_driver_exit(void);
+
+extern bool debug;
+#define vpu_trace(dev, fmt, arg...) \
+ do { \
+ if (debug) \
+ dev_info(dev, "%s: " fmt, __func__, ## arg); \
+ } while (0)
+
+#endif
diff --git a/drivers/media/platform/amphion/vpu_cmds.c b/drivers/media/platform/amphion/vpu_cmds.c
new file mode 100644
index 000000000000..9b39d77a178d
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_cmds.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#include <linux/init.h>
+#include <linux/interconnect.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include "vpu.h"
+#include "vpu_defs.h"
+#include "vpu_cmds.h"
+#include "vpu_rpc.h"
+#include "vpu_mbox.h"
+
+struct vpu_cmd_request {
+ u32 request;
+ u32 response;
+ u32 handled;
+};
+
+struct vpu_cmd_t {
+ struct list_head list;
+ u32 id;
+ struct vpu_cmd_request *request;
+ struct vpu_rpc_event *pkt;
+ unsigned long key;
+};
+
+static struct vpu_cmd_request vpu_cmd_requests[] = {
+ {
+ .request = VPU_CMD_ID_CONFIGURE_CODEC,
+ .response = VPU_MSG_ID_MEM_REQUEST,
+ .handled = 1,
+ },
+ {
+ .request = VPU_CMD_ID_START,
+ .response = VPU_MSG_ID_START_DONE,
+ .handled = 0,
+ },
+ {
+ .request = VPU_CMD_ID_STOP,
+ .response = VPU_MSG_ID_STOP_DONE,
+ .handled = 0,
+ },
+ {
+ .request = VPU_CMD_ID_ABORT,
+ .response = VPU_MSG_ID_ABORT_DONE,
+ .handled = 0,
+ },
+ {
+ .request = VPU_CMD_ID_RST_BUF,
+ .response = VPU_MSG_ID_BUF_RST,
+ .handled = 1,
+ },
+};
+
+static int vpu_cmd_send(struct vpu_core *core, struct vpu_rpc_event *pkt)
+{
+ int ret = 0;
+
+ ret = vpu_iface_send_cmd(core, pkt);
+ if (ret)
+ return ret;
+
+ /*write cmd data to cmd buffer before trigger a cmd interrupt*/
+ mb();
+ vpu_mbox_send_type(core, COMMAND);
+
+ return ret;
+}
+
+static struct vpu_cmd_t *vpu_alloc_cmd(struct vpu_inst *inst, u32 id, void *data)
+{
+ struct vpu_cmd_t *cmd;
+ int i;
+ int ret;
+
+ cmd = vzalloc(sizeof(*cmd));
+ if (!cmd)
+ return NULL;
+
+ cmd->pkt = vzalloc(sizeof(*cmd->pkt));
+ if (!cmd->pkt) {
+ vfree(cmd);
+ return NULL;
+ }
+
+ cmd->id = id;
+ ret = vpu_iface_pack_cmd(inst->core, cmd->pkt, inst->id, id, data);
+ if (ret) {
+ dev_err(inst->dev, "iface pack cmd(%d) fail\n", id);
+ vfree(cmd->pkt);
+ vfree(cmd);
+ return NULL;
+ }
+ for (i = 0; i < ARRAY_SIZE(vpu_cmd_requests); i++) {
+ if (vpu_cmd_requests[i].request == id) {
+ cmd->request = &vpu_cmd_requests[i];
+ break;
+ }
+ }
+
+ return cmd;
+}
+
+static void vpu_free_cmd(struct vpu_cmd_t *cmd)
+{
+ if (!cmd)
+ return;
+ if (cmd->pkt)
+ vfree(cmd->pkt);
+ vfree(cmd);
+}
+
+static int vpu_session_process_cmd(struct vpu_inst *inst, struct vpu_cmd_t *cmd)
+{
+ int ret;
+
+ dev_dbg(inst->dev, "[%d]send cmd(0x%x)\n", inst->id, cmd->id);
+ vpu_iface_pre_send_cmd(inst);
+ ret = vpu_cmd_send(inst->core, cmd->pkt);
+ if (!ret) {
+ vpu_iface_post_send_cmd(inst);
+ vpu_inst_record_flow(inst, cmd->id);
+ } else {
+ dev_err(inst->dev, "[%d] iface send cmd(0x%x) fail\n", inst->id, cmd->id);
+ }
+
+ return ret;
+}
+
+static void vpu_process_cmd_request(struct vpu_inst *inst)
+{
+ struct vpu_cmd_t *cmd;
+ struct vpu_cmd_t *tmp;
+
+ if (!inst || inst->pending)
+ return;
+
+ list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) {
+ list_del_init(&cmd->list);
+ if (vpu_session_process_cmd(inst, cmd))
+ dev_err(inst->dev, "[%d] process cmd(%d) fail\n", inst->id, cmd->id);
+ if (cmd->request) {
+ inst->pending = (void *)cmd;
+ break;
+ }
+ vpu_free_cmd(cmd);
+ }
+}
+
+static int vpu_request_cmd(struct vpu_inst *inst, u32 id, void *data,
+ unsigned long *key, int *sync)
+{
+ struct vpu_core *core;
+ struct vpu_cmd_t *cmd;
+
+ if (!inst || !inst->core)
+ return -EINVAL;
+
+ core = inst->core;
+ cmd = vpu_alloc_cmd(inst, id, data);
+ if (!cmd)
+ return -ENOMEM;
+
+ mutex_lock(&core->cmd_lock);
+ cmd->key = core->cmd_seq++;
+ if (key)
+ *key = cmd->key;
+ if (sync)
+ *sync = cmd->request ? true : false;
+ list_add_tail(&cmd->list, &inst->cmd_q);
+ vpu_process_cmd_request(inst);
+ mutex_unlock(&core->cmd_lock);
+
+ return 0;
+}
+
+static void vpu_clear_pending(struct vpu_inst *inst)
+{
+ if (!inst || !inst->pending)
+ return;
+
+ vpu_free_cmd(inst->pending);
+ wake_up_all(&inst->core->ack_wq);
+ inst->pending = NULL;
+}
+
+static bool vpu_check_response(struct vpu_cmd_t *cmd, u32 response, u32 handled)
+{
+ struct vpu_cmd_request *request;
+
+ if (!cmd || !cmd->request)
+ return false;
+
+ request = cmd->request;
+ if (request->response != response)
+ return false;
+ if (request->handled != handled)
+ return false;
+
+ return true;
+}
+
+int vpu_response_cmd(struct vpu_inst *inst, u32 response, u32 handled)
+{
+ struct vpu_core *core;
+
+ if (!inst || !inst->core)
+ return -EINVAL;
+
+ core = inst->core;
+ mutex_lock(&core->cmd_lock);
+ if (vpu_check_response(inst->pending, response, handled))
+ vpu_clear_pending(inst);
+
+ vpu_process_cmd_request(inst);
+ mutex_unlock(&core->cmd_lock);
+
+ return 0;
+}
+
+void vpu_clear_request(struct vpu_inst *inst)
+{
+ struct vpu_cmd_t *cmd;
+ struct vpu_cmd_t *tmp;
+
+ mutex_lock(&inst->core->cmd_lock);
+ if (inst->pending)
+ vpu_clear_pending(inst);
+
+ list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) {
+ list_del_init(&cmd->list);
+ vpu_free_cmd(cmd);
+ }
+ mutex_unlock(&inst->core->cmd_lock);
+}
+
+static bool check_is_responsed(struct vpu_inst *inst, unsigned long key)
+{
+ struct vpu_core *core = inst->core;
+ struct vpu_cmd_t *cmd;
+ bool flag = true;
+
+ mutex_lock(&core->cmd_lock);
+ cmd = inst->pending;
+ if (cmd && key == cmd->key) {
+ flag = false;
+ goto exit;
+ }
+ list_for_each_entry(cmd, &inst->cmd_q, list) {
+ if (key == cmd->key) {
+ flag = false;
+ break;
+ }
+ }
+exit:
+ mutex_unlock(&core->cmd_lock);
+
+ return flag;
+}
+
+static int sync_session_response(struct vpu_inst *inst, unsigned long key)
+{
+ struct vpu_core *core;
+
+ if (!inst || !inst->core)
+ return -EINVAL;
+
+ core = inst->core;
+
+ call_void_vop(inst, wait_prepare);
+ wait_event_timeout(core->ack_wq, check_is_responsed(inst, key), VPU_TIMEOUT);
+ call_void_vop(inst, wait_finish);
+
+ if (!check_is_responsed(inst, key)) {
+ dev_err(inst->dev, "[%d] sync session timeout\n", inst->id);
+ set_bit(inst->id, &core->hang_mask);
+ mutex_lock(&inst->core->cmd_lock);
+ vpu_clear_pending(inst);
+ mutex_unlock(&inst->core->cmd_lock);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data)
+{
+ unsigned long key;
+ int sync = false;
+ int ret = -EINVAL;
+
+ if (inst->id < 0)
+ return -EINVAL;
+
+ ret = vpu_request_cmd(inst, id, data, &key, &sync);
+ if (!ret && sync)
+ ret = sync_session_response(inst, key);
+
+ if (ret)
+ dev_err(inst->dev, "[%d] send cmd(0x%x) fail\n", inst->id, id);
+
+ return ret;
+}
+
+int vpu_session_configure_codec(struct vpu_inst *inst)
+{
+ return vpu_session_send_cmd(inst, VPU_CMD_ID_CONFIGURE_CODEC, NULL);
+}
+
+int vpu_session_start(struct vpu_inst *inst)
+{
+ vpu_trace(inst->dev, "[%d]\n", inst->id);
+
+ return vpu_session_send_cmd(inst, VPU_CMD_ID_START, NULL);
+}
+
+int vpu_session_stop(struct vpu_inst *inst)
+{
+ int ret;
+
+ vpu_trace(inst->dev, "[%d]\n", inst->id);
+
+ ret = vpu_session_send_cmd(inst, VPU_CMD_ID_STOP, NULL);
+ /* workaround for a firmware bug,
+ * if the next command is too close after stop cmd,
+ * the firmware may enter wfi wrongly.
+ */
+ usleep_range(3000, 5000);
+ return ret;
+}
+
+int vpu_session_encode_frame(struct vpu_inst *inst, s64 timestamp)
+{
+ return vpu_session_send_cmd(inst, VPU_CMD_ID_FRAME_ENCODE, &timestamp);
+}
+
+int vpu_session_alloc_fs(struct vpu_inst *inst, struct vpu_fs_info *fs)
+{
+ return vpu_session_send_cmd(inst, VPU_CMD_ID_FS_ALLOC, fs);
+}
+
+int vpu_session_release_fs(struct vpu_inst *inst, struct vpu_fs_info *fs)
+{
+ return vpu_session_send_cmd(inst, VPU_CMD_ID_FS_RELEASE, fs);
+}
+
+int vpu_session_abort(struct vpu_inst *inst)
+{
+ return vpu_session_send_cmd(inst, VPU_CMD_ID_ABORT, NULL);
+}
+
+int vpu_session_rst_buf(struct vpu_inst *inst)
+{
+ return vpu_session_send_cmd(inst, VPU_CMD_ID_RST_BUF, NULL);
+}
+
+int vpu_session_fill_timestamp(struct vpu_inst *inst, struct vpu_ts_info *info)
+{
+ return vpu_session_send_cmd(inst, VPU_CMD_ID_TIMESTAMP, info);
+}
+
+int vpu_session_update_parameters(struct vpu_inst *inst, void *arg)
+{
+ if (inst->type & VPU_CORE_TYPE_DEC)
+ vpu_iface_set_decode_params(inst, arg, 1);
+ else
+ vpu_iface_set_encode_params(inst, arg, 1);
+
+ return vpu_session_send_cmd(inst, VPU_CMD_ID_UPDATE_PARAMETER, arg);
+}
+
+int vpu_session_debug(struct vpu_inst *inst)
+{
+ return vpu_session_send_cmd(inst, VPU_CMD_ID_DEBUG, NULL);
+}
+
+int vpu_core_snapshot(struct vpu_core *core)
+{
+ struct vpu_inst *inst;
+ int ret;
+
+ if (!core || list_empty(&core->instances))
+ return 0;
+
+ inst = list_first_entry(&core->instances, struct vpu_inst, list);
+
+ reinit_completion(&core->cmp);
+ ret = vpu_session_send_cmd(inst, VPU_CMD_ID_SNAPSHOT, NULL);
+ if (ret)
+ return ret;
+ ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
+ if (!ret) {
+ dev_err(core->dev, "snapshot timeout\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int vpu_core_sw_reset(struct vpu_core *core)
+{
+ struct vpu_rpc_event pkt;
+ int ret;
+
+ memset(&pkt, 0, sizeof(pkt));
+ vpu_iface_pack_cmd(core, &pkt, 0, VPU_CMD_ID_FIRM_RESET, NULL);
+
+ reinit_completion(&core->cmp);
+ mutex_lock(&core->cmd_lock);
+ ret = vpu_cmd_send(core, &pkt);
+ mutex_unlock(&core->cmd_lock);
+ if (ret)
+ return ret;
+ ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
+ if (!ret) {
+ dev_err(core->dev, "sw reset timeout\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/media/platform/amphion/vpu_cmds.h b/drivers/media/platform/amphion/vpu_cmds.h
new file mode 100644
index 000000000000..bc538d277bc9
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_cmds.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#ifndef _AMPHION_VPU_CMDS_H
+#define _AMPHION_VPU_CMDS_H
+
+int vpu_session_configure_codec(struct vpu_inst *inst);
+int vpu_session_start(struct vpu_inst *inst);
+int vpu_session_stop(struct vpu_inst *inst);
+int vpu_session_abort(struct vpu_inst *inst);
+int vpu_session_rst_buf(struct vpu_inst *inst);
+int vpu_session_encode_frame(struct vpu_inst *inst, s64 timestamp);
+int vpu_session_alloc_fs(struct vpu_inst *inst, struct vpu_fs_info *fs);
+int vpu_session_release_fs(struct vpu_inst *inst, struct vpu_fs_info *fs);
+int vpu_session_fill_timestamp(struct vpu_inst *inst, struct vpu_ts_info *info);
+int vpu_session_update_parameters(struct vpu_inst *inst, void *arg);
+int vpu_core_snapshot(struct vpu_core *core);
+int vpu_core_sw_reset(struct vpu_core *core);
+int vpu_response_cmd(struct vpu_inst *inst, u32 response, u32 handled);
+void vpu_clear_request(struct vpu_inst *inst);
+int vpu_session_debug(struct vpu_inst *inst);
+
+#endif
diff --git a/drivers/media/platform/amphion/vpu_codec.h b/drivers/media/platform/amphion/vpu_codec.h
new file mode 100644
index 000000000000..528a93f08ecd
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_codec.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#ifndef _AMPHION_VPU_CODEC_H
+#define _AMPHION_VPU_CODEC_H
+
+struct vpu_encode_params {
+ u32 input_format;
+ u32 codec_format;
+ u32 profile;
+ u32 tier;
+ u32 level;
+ struct v4l2_fract frame_rate;
+ u32 src_stride;
+ u32 src_width;
+ u32 src_height;
+ struct v4l2_rect crop;
+ u32 out_width;
+ u32 out_height;
+
+ u32 gop_length;
+ u32 bframes;
+
+ u32 rc_enable;
+ u32 rc_mode;
+ u32 bitrate;
+ u32 bitrate_min;
+ u32 bitrate_max;
+
+ u32 i_frame_qp;
+ u32 p_frame_qp;
+ u32 b_frame_qp;
+ u32 qp_min;
+ u32 qp_max;
+ u32 qp_min_i;
+ u32 qp_max_i;
+
+ struct {
+ u32 enable;
+ u32 idc;
+ u32 width;
+ u32 height;
+ } sar;
+
+ struct {
+ u32 primaries;
+ u32 transfer;
+ u32 matrix;
+ u32 full_range;
+ } color;
+};
+
+struct vpu_decode_params {
+ u32 codec_format;
+ u32 output_format;
+ u32 b_dis_reorder;
+ u32 b_non_frame;
+ u32 frame_count;
+ u32 end_flag;
+ struct {
+ u32 base;
+ u32 size;
+ } udata;
+};
+
+#endif
diff --git a/drivers/media/platform/amphion/vpu_color.c b/drivers/media/platform/amphion/vpu_color.c
new file mode 100644
index 000000000000..80b9a53fd1c1
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_color.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <media/v4l2-device.h>
+#include "vpu.h"
+#include "vpu_helpers.h"
+
+static const u8 colorprimaries[] = {
+ 0,
+ V4L2_COLORSPACE_REC709, /*Rec. ITU-R BT.709-6*/
+ 0,
+ 0,
+ V4L2_COLORSPACE_470_SYSTEM_M, /*Rec. ITU-R BT.470-6 System M*/
+ V4L2_COLORSPACE_470_SYSTEM_BG, /*Rec. ITU-R BT.470-6 System B, G*/
+ V4L2_COLORSPACE_SMPTE170M, /*SMPTE170M*/
+ V4L2_COLORSPACE_SMPTE240M, /*SMPTE240M*/
+ 0, /*Generic film*/
+ V4L2_COLORSPACE_BT2020, /*Rec. ITU-R BT.2020-2*/
+ 0, /*SMPTE ST 428-1*/
+};
+
+static const u8 colortransfers[] = {
+ 0,
+ V4L2_XFER_FUNC_709, /*Rec. ITU-R BT.709-6*/
+ 0,
+ 0,
+ 0, /*Rec. ITU-R BT.470-6 System M*/
+ 0, /*Rec. ITU-R BT.470-6 System B, G*/
+ V4L2_XFER_FUNC_709, /*SMPTE170M*/
+ V4L2_XFER_FUNC_SMPTE240M, /*SMPTE240M*/
+ V4L2_XFER_FUNC_NONE, /*Linear transfer characteristics*/
+ 0,
+ 0,
+ 0, /*IEC 61966-2-4*/
+ 0, /*Rec. ITU-R BT.1361-0 extended colour gamut*/
+ V4L2_XFER_FUNC_SRGB, /*IEC 61966-2-1 sRGB or sYCC*/
+ V4L2_XFER_FUNC_709, /*Rec. ITU-R BT.2020-2 (10 bit system)*/
+ V4L2_XFER_FUNC_709, /*Rec. ITU-R BT.2020-2 (12 bit system)*/
+ V4L2_XFER_FUNC_SMPTE2084, /*SMPTE ST 2084*/
+ 0, /*SMPTE ST 428-1*/
+ 0 /*Rec. ITU-R BT.2100-0 hybrid log-gamma (HLG)*/
+};
+
+static const u8 colormatrixcoefs[] = {
+ 0,
+ V4L2_YCBCR_ENC_709, /*Rec. ITU-R BT.709-6*/
+ 0,
+ 0,
+ 0, /*Title 47 Code of Federal Regulations*/
+ V4L2_YCBCR_ENC_601, /*Rec. ITU-R BT.601-7 625*/
+ V4L2_YCBCR_ENC_601, /*Rec. ITU-R BT.601-7 525*/
+ V4L2_YCBCR_ENC_SMPTE240M, /*SMPTE240M*/
+ 0,
+ V4L2_YCBCR_ENC_BT2020, /*Rec. ITU-R BT.2020-2*/
+ V4L2_YCBCR_ENC_BT2020_CONST_LUM /*Rec. ITU-R BT.2020-2 constant*/
+};
+
+u32 vpu_color_cvrt_primaries_v2i(u32 primaries)
+{
+ return vpu_helper_find_in_array_u8(colorprimaries, ARRAY_SIZE(colorprimaries), primaries);
+}
+
+u32 vpu_color_cvrt_primaries_i2v(u32 primaries)
+{
+ return primaries < ARRAY_SIZE(colorprimaries) ? colorprimaries[primaries] : 0;
+}
+
+u32 vpu_color_cvrt_transfers_v2i(u32 transfers)
+{
+ return vpu_helper_find_in_array_u8(colortransfers, ARRAY_SIZE(colortransfers), transfers);
+}
+
+u32 vpu_color_cvrt_transfers_i2v(u32 transfers)
+{
+ return transfers < ARRAY_SIZE(colortransfers) ? colortransfers[transfers] : 0;
+}
+
+u32 vpu_color_cvrt_matrix_v2i(u32 matrix)
+{
+ return vpu_helper_find_in_array_u8(colormatrixcoefs, ARRAY_SIZE(colormatrixcoefs), matrix);
+}
+
+u32 vpu_color_cvrt_matrix_i2v(u32 matrix)
+{
+ return matrix < ARRAY_SIZE(colormatrixcoefs) ? colormatrixcoefs[matrix] : 0;
+}
+
+u32 vpu_color_cvrt_full_range_v2i(u32 full_range)
+{
+ return (full_range == V4L2_QUANTIZATION_FULL_RANGE);
+}
+
+u32 vpu_color_cvrt_full_range_i2v(u32 full_range)
+{
+ if (full_range)
+ return V4L2_QUANTIZATION_FULL_RANGE;
+
+ return V4L2_QUANTIZATION_LIM_RANGE;
+}
+
+int vpu_color_check_primaries(u32 primaries)
+{
+ return vpu_color_cvrt_primaries_v2i(primaries) ? 0 : -EINVAL;
+}
+
+int vpu_color_check_transfers(u32 transfers)
+{
+ return vpu_color_cvrt_transfers_v2i(transfers) ? 0 : -EINVAL;
+}
+
+int vpu_color_check_matrix(u32 matrix)
+{
+ return vpu_color_cvrt_matrix_v2i(matrix) ? 0 : -EINVAL;
+}
+
+int vpu_color_check_full_range(u32 full_range)
+{
+ int ret = -EINVAL;
+
+ switch (full_range) {
+ case V4L2_QUANTIZATION_FULL_RANGE:
+ case V4L2_QUANTIZATION_LIM_RANGE:
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int vpu_color_get_default(u32 primaries, u32 *ptransfers, u32 *pmatrix, u32 *pfull_range)
+{
+ u32 transfers;
+ u32 matrix;
+ u32 full_range;
+
+ switch (primaries) {
+ case V4L2_COLORSPACE_REC709:
+ transfers = V4L2_XFER_FUNC_709;
+ matrix = V4L2_YCBCR_ENC_709;
+ break;
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ case V4L2_COLORSPACE_SMPTE170M:
+ transfers = V4L2_XFER_FUNC_709;
+ matrix = V4L2_YCBCR_ENC_601;
+ break;
+ case V4L2_COLORSPACE_SMPTE240M:
+ transfers = V4L2_XFER_FUNC_SMPTE240M;
+ matrix = V4L2_YCBCR_ENC_SMPTE240M;
+ break;
+ case V4L2_COLORSPACE_BT2020:
+ transfers = V4L2_XFER_FUNC_709;
+ matrix = V4L2_YCBCR_ENC_BT2020;
+ break;
+ default:
+ transfers = V4L2_XFER_FUNC_DEFAULT;
+ matrix = V4L2_YCBCR_ENC_DEFAULT;
+ break;
+ }
+ full_range = V4L2_QUANTIZATION_LIM_RANGE;
+
+ if (ptransfers)
+ *ptransfers = transfers;
+ if (pmatrix)
+ *pmatrix = matrix;
+ if (pfull_range)
+ *pfull_range = full_range;
+
+ return 0;
+}
diff --git a/drivers/media/platform/amphion/vpu_core.c b/drivers/media/platform/amphion/vpu_core.c
new file mode 100644
index 000000000000..68ad183925fd
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_core.c
@@ -0,0 +1,879 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#include <linux/init.h>
+#include <linux/interconnect.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/firmware.h>
+#include <linux/vmalloc.h>
+#include "vpu.h"
+#include "vpu_defs.h"
+#include "vpu_core.h"
+#include "vpu_mbox.h"
+#include "vpu_msgs.h"
+#include "vpu_rpc.h"
+#include "vpu_cmds.h"
+
+void csr_writel(struct vpu_core *core, u32 reg, u32 val)
+{
+ writel(val, core->base + reg);
+}
+
+u32 csr_readl(struct vpu_core *core, u32 reg)
+{
+ return readl(core->base + reg);
+}
+
+static int vpu_core_load_firmware(struct vpu_core *core)
+{
+ const struct firmware *pfw = NULL;
+ int ret = 0;
+
+ if (!core->fw.virt) {
+ dev_err(core->dev, "firmware buffer is not ready\n");
+ return -EINVAL;
+ }
+
+ ret = request_firmware(&pfw, core->res->fwname, core->dev);
+ dev_dbg(core->dev, "request_firmware %s : %d\n", core->res->fwname, ret);
+ if (ret) {
+ dev_err(core->dev, "request firmware %s failed, ret = %d\n",
+ core->res->fwname, ret);
+ return ret;
+ }
+
+ if (core->fw.length < pfw->size) {
+ dev_err(core->dev, "firmware buffer size want %zu, but %d\n",
+ pfw->size, core->fw.length);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ memset(core->fw.virt, 0, core->fw.length);
+ memcpy(core->fw.virt, pfw->data, pfw->size);
+ core->fw.bytesused = pfw->size;
+ ret = vpu_iface_on_firmware_loaded(core);
+exit:
+ release_firmware(pfw);
+ pfw = NULL;
+
+ return ret;
+}
+
+static int vpu_core_boot_done(struct vpu_core *core)
+{
+ u32 fw_version;
+
+ fw_version = vpu_iface_get_version(core);
+ dev_info(core->dev, "%s firmware version : %d.%d.%d\n",
+ vpu_core_type_desc(core->type),
+ (fw_version >> 16) & 0xff,
+ (fw_version >> 8) & 0xff,
+ fw_version & 0xff);
+ core->supported_instance_count = vpu_iface_get_max_instance_count(core);
+ if (core->res->act_size) {
+ u32 count = core->act.length / core->res->act_size;
+
+ core->supported_instance_count = min(core->supported_instance_count, count);
+ }
+ core->fw_version = fw_version;
+ core->state = VPU_CORE_ACTIVE;
+
+ return 0;
+}
+
+static int vpu_core_wait_boot_done(struct vpu_core *core)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
+ if (!ret) {
+ dev_err(core->dev, "boot timeout\n");
+ return -EINVAL;
+ }
+ return vpu_core_boot_done(core);
+}
+
+static int vpu_core_boot(struct vpu_core *core, bool load)
+{
+ int ret;
+
+ reinit_completion(&core->cmp);
+ if (load) {
+ ret = vpu_core_load_firmware(core);
+ if (ret)
+ return ret;
+ }
+
+ vpu_iface_boot_core(core);
+ return vpu_core_wait_boot_done(core);
+}
+
+static int vpu_core_shutdown(struct vpu_core *core)
+{
+ return vpu_iface_shutdown_core(core);
+}
+
+static int vpu_core_restore(struct vpu_core *core)
+{
+ int ret;
+
+ ret = vpu_core_sw_reset(core);
+ if (ret)
+ return ret;
+
+ vpu_core_boot_done(core);
+ return vpu_iface_restore_core(core);
+}
+
+static int __vpu_alloc_dma(struct device *dev, struct vpu_buffer *buf)
+{
+ gfp_t gfp = GFP_KERNEL | GFP_DMA32;
+
+ if (!buf->length)
+ return 0;
+
+ buf->virt = dma_alloc_coherent(dev, buf->length, &buf->phys, gfp);
+ if (!buf->virt)
+ return -ENOMEM;
+
+ buf->dev = dev;
+
+ return 0;
+}
+
+void vpu_free_dma(struct vpu_buffer *buf)
+{
+ if (!buf->virt || !buf->dev)
+ return;
+
+ dma_free_coherent(buf->dev, buf->length, buf->virt, buf->phys);
+ buf->virt = NULL;
+ buf->phys = 0;
+ buf->length = 0;
+ buf->bytesused = 0;
+ buf->dev = NULL;
+}
+
+int vpu_alloc_dma(struct vpu_core *core, struct vpu_buffer *buf)
+{
+ return __vpu_alloc_dma(core->dev, buf);
+}
+
+static void vpu_core_check_hang(struct vpu_core *core)
+{
+ if (core->hang_mask)
+ core->state = VPU_CORE_HANG;
+}
+
+static struct vpu_core *vpu_core_find_proper_by_type(struct vpu_dev *vpu, u32 type)
+{
+ struct vpu_core *core = NULL;
+ int request_count = INT_MAX;
+ struct vpu_core *c;
+
+ list_for_each_entry(c, &vpu->cores, list) {
+ dev_dbg(c->dev, "instance_mask = 0x%lx, state = %d\n", c->instance_mask, c->state);
+ if (c->type != type)
+ continue;
+ if (c->state == VPU_CORE_DEINIT) {
+ core = c;
+ break;
+ }
+ vpu_core_check_hang(c);
+ if (c->state != VPU_CORE_ACTIVE)
+ continue;
+ if (c->request_count < request_count) {
+ request_count = c->request_count;
+ core = c;
+ }
+ if (!request_count)
+ break;
+ }
+
+ return core;
+}
+
+static bool vpu_core_is_exist(struct vpu_dev *vpu, struct vpu_core *core)
+{
+ struct vpu_core *c;
+
+ list_for_each_entry(c, &vpu->cores, list) {
+ if (c == core)
+ return true;
+ }
+
+ return false;
+}
+
+static void vpu_core_get_vpu(struct vpu_core *core)
+{
+ core->vpu->get_vpu(core->vpu);
+ if (core->type == VPU_CORE_TYPE_ENC)
+ core->vpu->get_enc(core->vpu);
+ if (core->type == VPU_CORE_TYPE_DEC)
+ core->vpu->get_dec(core->vpu);
+}
+
+static int vpu_core_register(struct device *dev, struct vpu_core *core)
+{
+ struct vpu_dev *vpu = dev_get_drvdata(dev);
+ int ret = 0;
+
+ dev_dbg(core->dev, "register core %s\n", vpu_core_type_desc(core->type));
+ if (vpu_core_is_exist(vpu, core))
+ return 0;
+
+ core->workqueue = alloc_workqueue("vpu", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!core->workqueue) {
+ dev_err(core->dev, "fail to alloc workqueue\n");
+ return -ENOMEM;
+ }
+ INIT_WORK(&core->msg_work, vpu_msg_run_work);
+ INIT_DELAYED_WORK(&core->msg_delayed_work, vpu_msg_delayed_work);
+ core->msg_buffer_size = roundup_pow_of_two(VPU_MSG_BUFFER_SIZE);
+ core->msg_buffer = vzalloc(core->msg_buffer_size);
+ if (!core->msg_buffer) {
+ dev_err(core->dev, "failed allocate buffer for fifo\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+ ret = kfifo_init(&core->msg_fifo, core->msg_buffer, core->msg_buffer_size);
+ if (ret) {
+ dev_err(core->dev, "failed init kfifo\n");
+ goto error;
+ }
+
+ list_add_tail(&core->list, &vpu->cores);
+
+ vpu_core_get_vpu(core);
+
+ if (vpu_iface_get_power_state(core))
+ ret = vpu_core_restore(core);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ if (core->msg_buffer) {
+ vfree(core->msg_buffer);
+ core->msg_buffer = NULL;
+ }
+ if (core->workqueue) {
+ destroy_workqueue(core->workqueue);
+ core->workqueue = NULL;
+ }
+ return ret;
+}
+
+static void vpu_core_put_vpu(struct vpu_core *core)
+{
+ if (core->type == VPU_CORE_TYPE_ENC)
+ core->vpu->put_enc(core->vpu);
+ if (core->type == VPU_CORE_TYPE_DEC)
+ core->vpu->put_dec(core->vpu);
+ core->vpu->put_vpu(core->vpu);
+}
+
+static int vpu_core_unregister(struct device *dev, struct vpu_core *core)
+{
+ list_del_init(&core->list);
+
+ vpu_core_put_vpu(core);
+ core->vpu = NULL;
+ vfree(core->msg_buffer);
+ core->msg_buffer = NULL;
+
+ if (core->workqueue) {
+ cancel_work_sync(&core->msg_work);
+ cancel_delayed_work_sync(&core->msg_delayed_work);
+ destroy_workqueue(core->workqueue);
+ core->workqueue = NULL;
+ }
+
+ return 0;
+}
+
+static int vpu_core_acquire_instance(struct vpu_core *core)
+{
+ int id;
+
+ id = ffz(core->instance_mask);
+ if (id >= core->supported_instance_count)
+ return -EINVAL;
+
+ set_bit(id, &core->instance_mask);
+
+ return id;
+}
+
+static void vpu_core_release_instance(struct vpu_core *core, int id)
+{
+ if (id < 0 || id >= core->supported_instance_count)
+ return;
+
+ clear_bit(id, &core->instance_mask);
+}
+
+struct vpu_inst *vpu_inst_get(struct vpu_inst *inst)
+{
+ if (!inst)
+ return NULL;
+
+ atomic_inc(&inst->ref_count);
+
+ return inst;
+}
+
+void vpu_inst_put(struct vpu_inst *inst)
+{
+ if (!inst)
+ return;
+ if (atomic_dec_and_test(&inst->ref_count)) {
+ if (inst->release)
+ inst->release(inst);
+ }
+}
+
+struct vpu_core *vpu_request_core(struct vpu_dev *vpu, enum vpu_core_type type)
+{
+ struct vpu_core *core = NULL;
+ int ret;
+
+ mutex_lock(&vpu->lock);
+
+ core = vpu_core_find_proper_by_type(vpu, type);
+ if (!core)
+ goto exit;
+
+ mutex_lock(&core->lock);
+ pm_runtime_resume_and_get(core->dev);
+
+ if (core->state == VPU_CORE_DEINIT) {
+ ret = vpu_core_boot(core, true);
+ if (ret) {
+ pm_runtime_put_sync(core->dev);
+ mutex_unlock(&core->lock);
+ core = NULL;
+ goto exit;
+ }
+ }
+
+ core->request_count++;
+
+ mutex_unlock(&core->lock);
+exit:
+ mutex_unlock(&vpu->lock);
+
+ return core;
+}
+
+void vpu_release_core(struct vpu_core *core)
+{
+ if (!core)
+ return;
+
+ mutex_lock(&core->lock);
+ pm_runtime_put_sync(core->dev);
+ if (core->request_count)
+ core->request_count--;
+ mutex_unlock(&core->lock);
+}
+
+int vpu_inst_register(struct vpu_inst *inst)
+{
+ struct vpu_dev *vpu;
+ struct vpu_core *core;
+ int ret = 0;
+
+ vpu = inst->vpu;
+ core = inst->core;
+ if (!core) {
+ core = vpu_request_core(vpu, inst->type);
+ if (!core) {
+ dev_err(vpu->dev, "there is no vpu core for %s\n",
+ vpu_core_type_desc(inst->type));
+ return -EINVAL;
+ }
+ inst->core = core;
+ inst->dev = get_device(core->dev);
+ }
+
+ mutex_lock(&core->lock);
+ if (inst->id >= 0 && inst->id < core->supported_instance_count)
+ goto exit;
+
+ ret = vpu_core_acquire_instance(core);
+ if (ret < 0)
+ goto exit;
+
+ vpu_trace(inst->dev, "[%d] %p\n", ret, inst);
+ inst->id = ret;
+ list_add_tail(&inst->list, &core->instances);
+ ret = 0;
+ if (core->res->act_size) {
+ inst->act.phys = core->act.phys + core->res->act_size * inst->id;
+ inst->act.virt = core->act.virt + core->res->act_size * inst->id;
+ inst->act.length = core->res->act_size;
+ }
+ vpu_inst_create_dbgfs_file(inst);
+exit:
+ mutex_unlock(&core->lock);
+
+ if (ret)
+ dev_err(core->dev, "register instance fail\n");
+ return ret;
+}
+
+int vpu_inst_unregister(struct vpu_inst *inst)
+{
+ struct vpu_core *core;
+
+ if (!inst->core)
+ return 0;
+
+ core = inst->core;
+ vpu_clear_request(inst);
+ mutex_lock(&core->lock);
+ if (inst->id >= 0 && inst->id < core->supported_instance_count) {
+ vpu_inst_remove_dbgfs_file(inst);
+ list_del_init(&inst->list);
+ vpu_core_release_instance(core, inst->id);
+ inst->id = VPU_INST_NULL_ID;
+ }
+ vpu_core_check_hang(core);
+ if (core->state == VPU_CORE_HANG && !core->instance_mask) {
+ dev_info(core->dev, "reset hang core\n");
+ if (!vpu_core_sw_reset(core)) {
+ core->state = VPU_CORE_ACTIVE;
+ core->hang_mask = 0;
+ }
+ }
+ mutex_unlock(&core->lock);
+
+ return 0;
+}
+
+struct vpu_inst *vpu_core_find_instance(struct vpu_core *core, u32 index)
+{
+ struct vpu_inst *inst = NULL;
+ struct vpu_inst *tmp;
+
+ mutex_lock(&core->lock);
+ if (index >= core->supported_instance_count || !test_bit(index, &core->instance_mask))
+ goto exit;
+ list_for_each_entry(tmp, &core->instances, list) {
+ if (tmp->id == index) {
+ inst = vpu_inst_get(tmp);
+ break;
+ }
+ }
+exit:
+ mutex_unlock(&core->lock);
+
+ return inst;
+}
+
+const struct vpu_core_resources *vpu_get_resource(struct vpu_inst *inst)
+{
+ struct vpu_dev *vpu;
+ struct vpu_core *core = NULL;
+ const struct vpu_core_resources *res = NULL;
+
+ if (!inst || !inst->vpu)
+ return NULL;
+
+ if (inst->core && inst->core->res)
+ return inst->core->res;
+
+ vpu = inst->vpu;
+ mutex_lock(&vpu->lock);
+ list_for_each_entry(core, &vpu->cores, list) {
+ if (core->type == inst->type) {
+ res = core->res;
+ break;
+ }
+ }
+ mutex_unlock(&vpu->lock);
+
+ return res;
+}
+
+static int vpu_core_parse_dt(struct vpu_core *core, struct device_node *np)
+{
+ struct device_node *node;
+ struct resource res;
+ int ret;
+
+ if (of_count_phandle_with_args(np, "memory-region", NULL) < 2) {
+ dev_err(core->dev, "need 2 memory-region for boot and rpc\n");
+ return -ENODEV;
+ }
+
+ node = of_parse_phandle(np, "memory-region", 0);
+ if (!node) {
+ dev_err(core->dev, "boot-region of_parse_phandle error\n");
+ return -ENODEV;
+ }
+ if (of_address_to_resource(node, 0, &res)) {
+ dev_err(core->dev, "boot-region of_address_to_resource error\n");
+ of_node_put(node);
+ return -EINVAL;
+ }
+ core->fw.phys = res.start;
+ core->fw.length = resource_size(&res);
+
+ of_node_put(node);
+
+ node = of_parse_phandle(np, "memory-region", 1);
+ if (!node) {
+ dev_err(core->dev, "rpc-region of_parse_phandle error\n");
+ return -ENODEV;
+ }
+ if (of_address_to_resource(node, 0, &res)) {
+ dev_err(core->dev, "rpc-region of_address_to_resource error\n");
+ of_node_put(node);
+ return -EINVAL;
+ }
+ core->rpc.phys = res.start;
+ core->rpc.length = resource_size(&res);
+
+ if (core->rpc.length < core->res->rpc_size + core->res->fwlog_size) {
+ dev_err(core->dev, "the rpc-region <%pad, 0x%x> is not enough\n",
+ &core->rpc.phys, core->rpc.length);
+ of_node_put(node);
+ return -EINVAL;
+ }
+
+ core->fw.virt = memremap(core->fw.phys, core->fw.length, MEMREMAP_WC);
+ core->rpc.virt = memremap(core->rpc.phys, core->rpc.length, MEMREMAP_WC);
+ memset(core->rpc.virt, 0, core->rpc.length);
+
+ ret = vpu_iface_check_memory_region(core, core->rpc.phys, core->rpc.length);
+ if (ret != VPU_CORE_MEMORY_UNCACHED) {
+ dev_err(core->dev, "rpc region<%pad, 0x%x> isn't uncached\n",
+ &core->rpc.phys, core->rpc.length);
+ of_node_put(node);
+ return -EINVAL;
+ }
+
+ core->log.phys = core->rpc.phys + core->res->rpc_size;
+ core->log.virt = core->rpc.virt + core->res->rpc_size;
+ core->log.length = core->res->fwlog_size;
+ core->act.phys = core->log.phys + core->log.length;
+ core->act.virt = core->log.virt + core->log.length;
+ core->act.length = core->rpc.length - core->res->rpc_size - core->log.length;
+ core->rpc.length = core->res->rpc_size;
+
+ of_node_put(node);
+
+ return 0;
+}
+
+static int vpu_core_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct vpu_core *core;
+ struct vpu_dev *vpu = dev_get_drvdata(dev->parent);
+ struct vpu_shared_addr *iface;
+ u32 iface_data_size;
+ int ret;
+
+ dev_dbg(dev, "probe\n");
+ if (!vpu)
+ return -EINVAL;
+ core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
+ if (!core)
+ return -ENOMEM;
+
+ core->pdev = pdev;
+ core->dev = dev;
+ platform_set_drvdata(pdev, core);
+ core->vpu = vpu;
+ INIT_LIST_HEAD(&core->instances);
+ mutex_init(&core->lock);
+ mutex_init(&core->cmd_lock);
+ init_completion(&core->cmp);
+ init_waitqueue_head(&core->ack_wq);
+ core->state = VPU_CORE_DEINIT;
+
+ core->res = of_device_get_match_data(dev);
+ if (!core->res)
+ return -ENODEV;
+
+ core->type = core->res->type;
+ core->id = of_alias_get_id(dev->of_node, "vpu_core");
+ if (core->id < 0) {
+ dev_err(dev, "can't get vpu core id\n");
+ return core->id;
+ }
+ dev_info(core->dev, "[%d] = %s\n", core->id, vpu_core_type_desc(core->type));
+ ret = vpu_core_parse_dt(core, dev->of_node);
+ if (ret)
+ return ret;
+
+ core->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(core->base))
+ return PTR_ERR(core->base);
+
+ if (!vpu_iface_check_codec(core)) {
+ dev_err(core->dev, "is not supported\n");
+ return -EINVAL;
+ }
+
+ ret = vpu_mbox_init(core);
+ if (ret)
+ return ret;
+
+ iface = devm_kzalloc(dev, sizeof(*iface), GFP_KERNEL);
+ if (!iface)
+ return -ENOMEM;
+
+ iface_data_size = vpu_iface_get_data_size(core);
+ if (iface_data_size) {
+ iface->priv = devm_kzalloc(dev, iface_data_size, GFP_KERNEL);
+ if (!iface->priv)
+ return -ENOMEM;
+ }
+
+ ret = vpu_iface_init(core, iface, &core->rpc, core->fw.phys);
+ if (ret) {
+ dev_err(core->dev, "init iface fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ vpu_iface_config_system(core, vpu->res->mreg_base, vpu->base);
+ vpu_iface_set_log_buf(core, &core->log);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ pm_runtime_put_noidle(dev);
+ pm_runtime_set_suspended(dev);
+ goto err_runtime_disable;
+ }
+
+ ret = vpu_core_register(dev->parent, core);
+ if (ret)
+ goto err_core_register;
+ core->parent = dev->parent;
+
+ pm_runtime_put_sync(dev);
+ vpu_core_create_dbgfs_file(core);
+
+ return 0;
+
+err_core_register:
+ pm_runtime_put_sync(dev);
+err_runtime_disable:
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int vpu_core_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct vpu_core *core = platform_get_drvdata(pdev);
+ int ret;
+
+ vpu_core_remove_dbgfs_file(core);
+ ret = pm_runtime_resume_and_get(dev);
+ WARN_ON(ret < 0);
+
+ vpu_core_shutdown(core);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ vpu_core_unregister(core->parent, core);
+ memunmap(core->fw.virt);
+ memunmap(core->rpc.virt);
+ mutex_destroy(&core->lock);
+ mutex_destroy(&core->cmd_lock);
+
+ return 0;
+}
+
+static int __maybe_unused vpu_core_runtime_resume(struct device *dev)
+{
+ struct vpu_core *core = dev_get_drvdata(dev);
+
+ return vpu_mbox_request(core);
+}
+
+static int __maybe_unused vpu_core_runtime_suspend(struct device *dev)
+{
+ struct vpu_core *core = dev_get_drvdata(dev);
+
+ vpu_mbox_free(core);
+ return 0;
+}
+
+static void vpu_core_cancel_work(struct vpu_core *core)
+{
+ struct vpu_inst *inst = NULL;
+
+ cancel_work_sync(&core->msg_work);
+ cancel_delayed_work_sync(&core->msg_delayed_work);
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list)
+ cancel_work_sync(&inst->msg_work);
+ mutex_unlock(&core->lock);
+}
+
+static void vpu_core_resume_work(struct vpu_core *core)
+{
+ struct vpu_inst *inst = NULL;
+ unsigned long delay = msecs_to_jiffies(10);
+
+ queue_work(core->workqueue, &core->msg_work);
+ queue_delayed_work(core->workqueue, &core->msg_delayed_work, delay);
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list)
+ queue_work(inst->workqueue, &inst->msg_work);
+ mutex_unlock(&core->lock);
+}
+
+static int __maybe_unused vpu_core_resume(struct device *dev)
+{
+ struct vpu_core *core = dev_get_drvdata(dev);
+ int ret = 0;
+
+ mutex_lock(&core->lock);
+ pm_runtime_resume_and_get(dev);
+ vpu_core_get_vpu(core);
+ if (core->state != VPU_CORE_SNAPSHOT)
+ goto exit;
+
+ if (!vpu_iface_get_power_state(core)) {
+ if (!list_empty(&core->instances)) {
+ ret = vpu_core_boot(core, false);
+ if (ret) {
+ dev_err(core->dev, "%s boot fail\n", __func__);
+ core->state = VPU_CORE_DEINIT;
+ goto exit;
+ }
+ } else {
+ core->state = VPU_CORE_DEINIT;
+ }
+ } else {
+ if (!list_empty(&core->instances)) {
+ ret = vpu_core_sw_reset(core);
+ if (ret) {
+ dev_err(core->dev, "%s sw_reset fail\n", __func__);
+ core->state = VPU_CORE_HANG;
+ goto exit;
+ }
+ }
+ core->state = VPU_CORE_ACTIVE;
+ }
+
+exit:
+ pm_runtime_put_sync(dev);
+ mutex_unlock(&core->lock);
+
+ vpu_core_resume_work(core);
+ return ret;
+}
+
+static int __maybe_unused vpu_core_suspend(struct device *dev)
+{
+ struct vpu_core *core = dev_get_drvdata(dev);
+ int ret = 0;
+
+ mutex_lock(&core->lock);
+ if (core->state == VPU_CORE_ACTIVE) {
+ if (!list_empty(&core->instances)) {
+ ret = vpu_core_snapshot(core);
+ if (ret) {
+ mutex_unlock(&core->lock);
+ return ret;
+ }
+ }
+
+ core->state = VPU_CORE_SNAPSHOT;
+ }
+ mutex_unlock(&core->lock);
+
+ vpu_core_cancel_work(core);
+
+ mutex_lock(&core->lock);
+ vpu_core_put_vpu(core);
+ mutex_unlock(&core->lock);
+ return ret;
+}
+
+static const struct dev_pm_ops vpu_core_pm_ops = {
+ SET_RUNTIME_PM_OPS(vpu_core_runtime_suspend, vpu_core_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(vpu_core_suspend, vpu_core_resume)
+};
+
+static struct vpu_core_resources imx8q_enc = {
+ .type = VPU_CORE_TYPE_ENC,
+ .fwname = "vpu/vpu_fw_imx8_enc.bin",
+ .stride = 16,
+ .max_width = 1920,
+ .max_height = 1920,
+ .min_width = 64,
+ .min_height = 48,
+ .step_width = 2,
+ .step_height = 2,
+ .rpc_size = 0x80000,
+ .fwlog_size = 0x80000,
+ .act_size = 0xc0000,
+};
+
+static struct vpu_core_resources imx8q_dec = {
+ .type = VPU_CORE_TYPE_DEC,
+ .fwname = "vpu/vpu_fw_imx8_dec.bin",
+ .stride = 256,
+ .max_width = 8188,
+ .max_height = 8188,
+ .min_width = 16,
+ .min_height = 16,
+ .step_width = 1,
+ .step_height = 1,
+ .rpc_size = 0x80000,
+ .fwlog_size = 0x80000,
+};
+
+static const struct of_device_id vpu_core_dt_match[] = {
+ { .compatible = "nxp,imx8q-vpu-encoder", .data = &imx8q_enc },
+ { .compatible = "nxp,imx8q-vpu-decoder", .data = &imx8q_dec },
+ {}
+};
+MODULE_DEVICE_TABLE(of, vpu_core_dt_match);
+
+static struct platform_driver amphion_vpu_core_driver = {
+ .probe = vpu_core_probe,
+ .remove = vpu_core_remove,
+ .driver = {
+ .name = "amphion-vpu-core",
+ .of_match_table = vpu_core_dt_match,
+ .pm = &vpu_core_pm_ops,
+ },
+};
+
+int __init vpu_core_driver_init(void)
+{
+ return platform_driver_register(&amphion_vpu_core_driver);
+}
+
+void __exit vpu_core_driver_exit(void)
+{
+ platform_driver_unregister(&amphion_vpu_core_driver);
+}
diff --git a/drivers/media/platform/amphion/vpu_core.h b/drivers/media/platform/amphion/vpu_core.h
new file mode 100644
index 000000000000..00a662997da4
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_core.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#ifndef _AMPHION_VPU_CORE_H
+#define _AMPHION_VPU_CORE_H
+
+void csr_writel(struct vpu_core *core, u32 reg, u32 val);
+u32 csr_readl(struct vpu_core *core, u32 reg);
+int vpu_alloc_dma(struct vpu_core *core, struct vpu_buffer *buf);
+void vpu_free_dma(struct vpu_buffer *buf);
+struct vpu_inst *vpu_core_find_instance(struct vpu_core *core, u32 index);
+
+#endif
diff --git a/drivers/media/platform/amphion/vpu_dbg.c b/drivers/media/platform/amphion/vpu_dbg.c
new file mode 100644
index 000000000000..376196bea178
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_dbg.c
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-device.h>
+#include <linux/debugfs.h>
+#include "vpu.h"
+#include "vpu_defs.h"
+#include "vpu_helpers.h"
+#include "vpu_cmds.h"
+#include "vpu_rpc.h"
+#include "vpu_v4l2.h"
+
+struct print_buf_desc {
+ u32 start_h_phy;
+ u32 start_h_vir;
+ u32 start_m;
+ u32 bytes;
+ u32 read;
+ u32 write;
+ char buffer[0];
+};
+
+static char *vb2_stat_name[] = {
+ [VB2_BUF_STATE_DEQUEUED] = "dequeued",
+ [VB2_BUF_STATE_IN_REQUEST] = "in_request",
+ [VB2_BUF_STATE_PREPARING] = "preparing",
+ [VB2_BUF_STATE_QUEUED] = "queued",
+ [VB2_BUF_STATE_ACTIVE] = "active",
+ [VB2_BUF_STATE_DONE] = "done",
+ [VB2_BUF_STATE_ERROR] = "error",
+};
+
+static char *vpu_stat_name[] = {
+ [VPU_BUF_STATE_IDLE] = "idle",
+ [VPU_BUF_STATE_INUSE] = "inuse",
+ [VPU_BUF_STATE_DECODED] = "decoded",
+ [VPU_BUF_STATE_READY] = "ready",
+ [VPU_BUF_STATE_SKIP] = "skip",
+ [VPU_BUF_STATE_ERROR] = "error",
+};
+
+static int vpu_dbg_instance(struct seq_file *s, void *data)
+{
+ struct vpu_inst *inst = s->private;
+ char str[128];
+ int num;
+ struct vb2_queue *vq;
+ int i;
+
+ if (!inst->fh.m2m_ctx)
+ return 0;
+ num = scnprintf(str, sizeof(str), "[%s]\n", vpu_core_type_desc(inst->type));
+ if (seq_write(s, str, num))
+ return 0;
+
+ num = scnprintf(str, sizeof(str), "tgig = %d,pid = %d\n", inst->tgid, inst->pid);
+ if (seq_write(s, str, num))
+ return 0;
+ num = scnprintf(str, sizeof(str), "state = %d\n", inst->state);
+ if (seq_write(s, str, num))
+ return 0;
+ num = scnprintf(str, sizeof(str),
+ "min_buffer_out = %d, min_buffer_cap = %d\n",
+ inst->min_buffer_out, inst->min_buffer_cap);
+ if (seq_write(s, str, num))
+ return 0;
+
+ vq = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx);
+ num = scnprintf(str, sizeof(str),
+ "output (%2d, %2d): fmt = %c%c%c%c %d x %d, %d;",
+ vb2_is_streaming(vq),
+ vq->num_buffers,
+ inst->out_format.pixfmt,
+ inst->out_format.pixfmt >> 8,
+ inst->out_format.pixfmt >> 16,
+ inst->out_format.pixfmt >> 24,
+ inst->out_format.width,
+ inst->out_format.height,
+ vq->last_buffer_dequeued);
+ if (seq_write(s, str, num))
+ return 0;
+ for (i = 0; i < inst->out_format.num_planes; i++) {
+ num = scnprintf(str, sizeof(str), " %d(%d)",
+ inst->out_format.sizeimage[i],
+ inst->out_format.bytesperline[i]);
+ if (seq_write(s, str, num))
+ return 0;
+ }
+ if (seq_write(s, "\n", 1))
+ return 0;
+
+ vq = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
+ num = scnprintf(str, sizeof(str),
+ "capture(%2d, %2d): fmt = %c%c%c%c %d x %d, %d;",
+ vb2_is_streaming(vq),
+ vq->num_buffers,
+ inst->cap_format.pixfmt,
+ inst->cap_format.pixfmt >> 8,
+ inst->cap_format.pixfmt >> 16,
+ inst->cap_format.pixfmt >> 24,
+ inst->cap_format.width,
+ inst->cap_format.height,
+ vq->last_buffer_dequeued);
+ if (seq_write(s, str, num))
+ return 0;
+ for (i = 0; i < inst->cap_format.num_planes; i++) {
+ num = scnprintf(str, sizeof(str), " %d(%d)",
+ inst->cap_format.sizeimage[i],
+ inst->cap_format.bytesperline[i]);
+ if (seq_write(s, str, num))
+ return 0;
+ }
+ if (seq_write(s, "\n", 1))
+ return 0;
+ num = scnprintf(str, sizeof(str), "crop: (%d, %d) %d x %d\n",
+ inst->crop.left,
+ inst->crop.top,
+ inst->crop.width,
+ inst->crop.height);
+ if (seq_write(s, str, num))
+ return 0;
+
+ vq = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx);
+ for (i = 0; i < vq->num_buffers; i++) {
+ struct vb2_buffer *vb = vq->bufs[i];
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ if (vb->state == VB2_BUF_STATE_DEQUEUED)
+ continue;
+ num = scnprintf(str, sizeof(str),
+ "output [%2d] state = %10s, %8s\n",
+ i, vb2_stat_name[vb->state],
+ vpu_stat_name[vpu_get_buffer_state(vbuf)]);
+ if (seq_write(s, str, num))
+ return 0;
+ }
+
+ vq = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
+ for (i = 0; i < vq->num_buffers; i++) {
+ struct vb2_buffer *vb = vq->bufs[i];
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ if (vb->state == VB2_BUF_STATE_DEQUEUED)
+ continue;
+ num = scnprintf(str, sizeof(str),
+ "capture[%2d] state = %10s, %8s\n",
+ i, vb2_stat_name[vb->state],
+ vpu_stat_name[vpu_get_buffer_state(vbuf)]);
+ if (seq_write(s, str, num))
+ return 0;
+ }
+
+ num = scnprintf(str, sizeof(str), "sequence = %d\n", inst->sequence);
+ if (seq_write(s, str, num))
+ return 0;
+
+ if (inst->use_stream_buffer) {
+ num = scnprintf(str, sizeof(str), "stream_buffer = %d / %d, <%pad, 0x%x>\n",
+ vpu_helper_get_used_space(inst),
+ inst->stream_buffer.length,
+ &inst->stream_buffer.phys,
+ inst->stream_buffer.length);
+ if (seq_write(s, str, num))
+ return 0;
+ }
+ num = scnprintf(str, sizeof(str), "kfifo len = 0x%x\n", kfifo_len(&inst->msg_fifo));
+ if (seq_write(s, str, num))
+ return 0;
+
+ num = scnprintf(str, sizeof(str), "flow :\n");
+ if (seq_write(s, str, num))
+ return 0;
+
+ mutex_lock(&inst->core->cmd_lock);
+ for (i = 0; i < ARRAY_SIZE(inst->flows); i++) {
+ u32 idx = (inst->flow_idx + i) % (ARRAY_SIZE(inst->flows));
+
+ if (!inst->flows[idx])
+ continue;
+ num = scnprintf(str, sizeof(str), "\t[%s]0x%x\n",
+ inst->flows[idx] >= VPU_MSG_ID_NOOP ? "M" : "C",
+ inst->flows[idx]);
+ if (seq_write(s, str, num)) {
+ mutex_unlock(&inst->core->cmd_lock);
+ return 0;
+ }
+ }
+ mutex_unlock(&inst->core->cmd_lock);
+
+ i = 0;
+ while (true) {
+ num = call_vop(inst, get_debug_info, str, sizeof(str), i++);
+ if (num <= 0)
+ break;
+ if (seq_write(s, str, num))
+ return 0;
+ }
+
+ return 0;
+}
+
+static int vpu_dbg_core(struct seq_file *s, void *data)
+{
+ struct vpu_core *core = s->private;
+ struct vpu_shared_addr *iface = core->iface;
+ char str[128];
+ int num;
+
+ num = scnprintf(str, sizeof(str), "[%s]\n", vpu_core_type_desc(core->type));
+ if (seq_write(s, str, num))
+ return 0;
+
+ num = scnprintf(str, sizeof(str), "boot_region = <%pad, 0x%x>\n",
+ &core->fw.phys, core->fw.length);
+ if (seq_write(s, str, num))
+ return 0;
+ num = scnprintf(str, sizeof(str), "rpc_region = <%pad, 0x%x> used = 0x%x\n",
+ &core->rpc.phys, core->rpc.length, core->rpc.bytesused);
+ if (seq_write(s, str, num))
+ return 0;
+ num = scnprintf(str, sizeof(str), "fwlog_region = <%pad, 0x%x>\n",
+ &core->log.phys, core->log.length);
+ if (seq_write(s, str, num))
+ return 0;
+
+ num = scnprintf(str, sizeof(str), "state = %d\n", core->state);
+ if (seq_write(s, str, num))
+ return 0;
+ if (core->state == VPU_CORE_DEINIT)
+ return 0;
+ num = scnprintf(str, sizeof(str), "fw version = %d.%d.%d\n",
+ (core->fw_version >> 16) & 0xff,
+ (core->fw_version >> 8) & 0xff,
+ core->fw_version & 0xff);
+ if (seq_write(s, str, num))
+ return 0;
+ num = scnprintf(str, sizeof(str), "instances = %d/%d (0x%02lx), %d\n",
+ hweight32(core->instance_mask),
+ core->supported_instance_count,
+ core->instance_mask,
+ core->request_count);
+ if (seq_write(s, str, num))
+ return 0;
+ num = scnprintf(str, sizeof(str), "kfifo len = 0x%x\n", kfifo_len(&core->msg_fifo));
+ if (seq_write(s, str, num))
+ return 0;
+ num = scnprintf(str, sizeof(str),
+ "cmd_buf:[0x%x, 0x%x], wptr = 0x%x, rptr = 0x%x\n",
+ iface->cmd_desc->start,
+ iface->cmd_desc->end,
+ iface->cmd_desc->wptr,
+ iface->cmd_desc->rptr);
+ if (seq_write(s, str, num))
+ return 0;
+ num = scnprintf(str, sizeof(str),
+ "msg_buf:[0x%x, 0x%x], wptr = 0x%x, rptr = 0x%x\n",
+ iface->msg_desc->start,
+ iface->msg_desc->end,
+ iface->msg_desc->wptr,
+ iface->msg_desc->rptr);
+ if (seq_write(s, str, num))
+ return 0;
+
+ return 0;
+}
+
+static int vpu_dbg_fwlog(struct seq_file *s, void *data)
+{
+ struct vpu_core *core = s->private;
+ struct print_buf_desc *print_buf;
+ int length;
+ u32 rptr;
+ u32 wptr;
+ int ret = 0;
+
+ if (!core->log.virt || core->state == VPU_CORE_DEINIT)
+ return 0;
+
+ print_buf = core->log.virt;
+ rptr = print_buf->read;
+ wptr = print_buf->write;
+
+ if (rptr == wptr)
+ return 0;
+ else if (rptr < wptr)
+ length = wptr - rptr;
+ else
+ length = print_buf->bytes + wptr - rptr;
+
+ if (s->count + length >= s->size) {
+ s->count = s->size;
+ return 0;
+ }
+
+ if (rptr + length >= print_buf->bytes) {
+ int num = print_buf->bytes - rptr;
+
+ if (seq_write(s, print_buf->buffer + rptr, num))
+ ret = -1;
+ length -= num;
+ rptr = 0;
+ }
+
+ if (length) {
+ if (seq_write(s, print_buf->buffer + rptr, length))
+ ret = -1;
+ rptr += length;
+ }
+ if (!ret)
+ print_buf->read = rptr;
+
+ return 0;
+}
+
+static int vpu_dbg_inst_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, vpu_dbg_instance, inode->i_private);
+}
+
+static ssize_t vpu_dbg_inst_write(struct file *file,
+ const char __user *user_buf, size_t size, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct vpu_inst *inst = s->private;
+
+ vpu_session_debug(inst);
+
+ return size;
+}
+
+static ssize_t vpu_dbg_core_write(struct file *file,
+ const char __user *user_buf, size_t size, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct vpu_core *core = s->private;
+
+ pm_runtime_resume_and_get(core->dev);
+ mutex_lock(&core->lock);
+ if (core->state != VPU_CORE_DEINIT && !core->instance_mask) {
+ dev_info(core->dev, "reset\n");
+ if (!vpu_core_sw_reset(core)) {
+ core->state = VPU_CORE_ACTIVE;
+ core->hang_mask = 0;
+ }
+ }
+ mutex_unlock(&core->lock);
+ pm_runtime_put_sync(core->dev);
+
+ return size;
+}
+
+static int vpu_dbg_core_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, vpu_dbg_core, inode->i_private);
+}
+
+static int vpu_dbg_fwlog_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, vpu_dbg_fwlog, inode->i_private);
+}
+
+static const struct file_operations vpu_dbg_inst_fops = {
+ .owner = THIS_MODULE,
+ .open = vpu_dbg_inst_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = vpu_dbg_inst_write,
+};
+
+static const struct file_operations vpu_dbg_core_fops = {
+ .owner = THIS_MODULE,
+ .open = vpu_dbg_core_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = vpu_dbg_core_write,
+};
+
+static const struct file_operations vpu_dbg_fwlog_fops = {
+ .owner = THIS_MODULE,
+ .open = vpu_dbg_fwlog_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+int vpu_inst_create_dbgfs_file(struct vpu_inst *inst)
+{
+ struct vpu_dev *vpu;
+ char name[64];
+
+ if (!inst || !inst->core || !inst->core->vpu)
+ return -EINVAL;
+
+ vpu = inst->core->vpu;
+ if (!vpu->debugfs)
+ return -EINVAL;
+
+ if (inst->debugfs)
+ return 0;
+
+ scnprintf(name, sizeof(name), "instance.%d.%d", inst->core->id, inst->id);
+ inst->debugfs = debugfs_create_file((const char *)name,
+ VERIFY_OCTAL_PERMISSIONS(0644),
+ vpu->debugfs,
+ inst,
+ &vpu_dbg_inst_fops);
+ if (!inst->debugfs) {
+ dev_err(inst->dev, "vpu create debugfs %s fail\n", name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int vpu_inst_remove_dbgfs_file(struct vpu_inst *inst)
+{
+ if (!inst)
+ return 0;
+
+ debugfs_remove(inst->debugfs);
+ inst->debugfs = NULL;
+
+ return 0;
+}
+
+int vpu_core_create_dbgfs_file(struct vpu_core *core)
+{
+ struct vpu_dev *vpu;
+ char name[64];
+
+ if (!core || !core->vpu)
+ return -EINVAL;
+
+ vpu = core->vpu;
+ if (!vpu->debugfs)
+ return -EINVAL;
+
+ if (!core->debugfs) {
+ scnprintf(name, sizeof(name), "core.%d", core->id);
+ core->debugfs = debugfs_create_file((const char *)name,
+ VERIFY_OCTAL_PERMISSIONS(0644),
+ vpu->debugfs,
+ core,
+ &vpu_dbg_core_fops);
+ if (!core->debugfs) {
+ dev_err(core->dev, "vpu create debugfs %s fail\n", name);
+ return -EINVAL;
+ }
+ }
+ if (!core->debugfs_fwlog) {
+ scnprintf(name, sizeof(name), "fwlog.%d", core->id);
+ core->debugfs_fwlog = debugfs_create_file((const char *)name,
+ VERIFY_OCTAL_PERMISSIONS(0444),
+ vpu->debugfs,
+ core,
+ &vpu_dbg_fwlog_fops);
+ if (!core->debugfs_fwlog) {
+ dev_err(core->dev, "vpu create debugfs %s fail\n", name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int vpu_core_remove_dbgfs_file(struct vpu_core *core)
+{
+ if (!core)
+ return 0;
+ debugfs_remove(core->debugfs);
+ core->debugfs = NULL;
+ debugfs_remove(core->debugfs_fwlog);
+ core->debugfs_fwlog = NULL;
+
+ return 0;
+}
+
+void vpu_inst_record_flow(struct vpu_inst *inst, u32 flow)
+{
+ if (!inst)
+ return;
+
+ inst->flows[inst->flow_idx] = flow;
+ inst->flow_idx = (inst->flow_idx + 1) % (ARRAY_SIZE(inst->flows));
+}
diff --git a/drivers/media/platform/amphion/vpu_defs.h b/drivers/media/platform/amphion/vpu_defs.h
new file mode 100644
index 000000000000..282664202dcf
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_defs.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#ifndef _AMPHION_VPU_DEFS_H
+#define _AMPHION_VPU_DEFS_H
+
+enum MSG_TYPE {
+ INIT_DONE = 1,
+ PRC_BUF_OFFSET,
+ BOOT_ADDRESS,
+ COMMAND,
+ EVENT,
+};
+
+enum {
+ VPU_IRQ_CODE_BOOT_DONE = 0x55,
+ VPU_IRQ_CODE_SNAPSHOT_DONE = 0xa5,
+ VPU_IRQ_CODE_SYNC = 0xaa,
+};
+
+enum {
+ VPU_CMD_ID_NOOP = 0x0,
+ VPU_CMD_ID_CONFIGURE_CODEC,
+ VPU_CMD_ID_START,
+ VPU_CMD_ID_STOP,
+ VPU_CMD_ID_ABORT,
+ VPU_CMD_ID_RST_BUF,
+ VPU_CMD_ID_SNAPSHOT,
+ VPU_CMD_ID_FIRM_RESET,
+ VPU_CMD_ID_UPDATE_PARAMETER,
+ VPU_CMD_ID_FRAME_ENCODE,
+ VPU_CMD_ID_SKIP,
+ VPU_CMD_ID_PARSE_NEXT_SEQ,
+ VPU_CMD_ID_PARSE_NEXT_I,
+ VPU_CMD_ID_PARSE_NEXT_IP,
+ VPU_CMD_ID_PARSE_NEXT_ANY,
+ VPU_CMD_ID_DEC_PIC,
+ VPU_CMD_ID_FS_ALLOC,
+ VPU_CMD_ID_FS_RELEASE,
+ VPU_CMD_ID_TIMESTAMP,
+ VPU_CMD_ID_DEBUG
+};
+
+enum {
+ VPU_MSG_ID_NOOP = 0x100,
+ VPU_MSG_ID_RESET_DONE,
+ VPU_MSG_ID_START_DONE,
+ VPU_MSG_ID_STOP_DONE,
+ VPU_MSG_ID_ABORT_DONE,
+ VPU_MSG_ID_BUF_RST,
+ VPU_MSG_ID_MEM_REQUEST,
+ VPU_MSG_ID_PARAM_UPD_DONE,
+ VPU_MSG_ID_FRAME_INPUT_DONE,
+ VPU_MSG_ID_ENC_DONE,
+ VPU_MSG_ID_DEC_DONE,
+ VPU_MSG_ID_FRAME_REQ,
+ VPU_MSG_ID_FRAME_RELEASE,
+ VPU_MSG_ID_SEQ_HDR_FOUND,
+ VPU_MSG_ID_RES_CHANGE,
+ VPU_MSG_ID_PIC_HDR_FOUND,
+ VPU_MSG_ID_PIC_DECODED,
+ VPU_MSG_ID_PIC_EOS,
+ VPU_MSG_ID_FIFO_LOW,
+ VPU_MSG_ID_FIFO_HIGH,
+ VPU_MSG_ID_FIFO_EMPTY,
+ VPU_MSG_ID_FIFO_FULL,
+ VPU_MSG_ID_BS_ERROR,
+ VPU_MSG_ID_UNSUPPORTED,
+ VPU_MSG_ID_TIMESTAMP_INFO,
+
+ VPU_MSG_ID_FIRMWARE_XCPT,
+};
+
+enum VPU_ENC_MEMORY_RESOURSE {
+ MEM_RES_ENC,
+ MEM_RES_REF,
+ MEM_RES_ACT
+};
+
+enum VPU_DEC_MEMORY_RESOURCE {
+ MEM_RES_FRAME,
+ MEM_RES_MBI,
+ MEM_RES_DCP
+};
+
+enum VPU_SCODE_TYPE {
+ SCODE_PADDING_EOS = 1,
+ SCODE_PADDING_BUFFLUSH = 2,
+ SCODE_PADDING_ABORT = 3,
+ SCODE_SEQUENCE = 0x31,
+ SCODE_PICTURE = 0x32,
+ SCODE_SLICE = 0x33
+};
+
+struct vpu_pkt_mem_req_data {
+ u32 enc_frame_size;
+ u32 enc_frame_num;
+ u32 ref_frame_size;
+ u32 ref_frame_num;
+ u32 act_buf_size;
+ u32 act_buf_num;
+};
+
+struct vpu_enc_pic_info {
+ u32 frame_id;
+ u32 pic_type;
+ u32 skipped_frame;
+ u32 error_flag;
+ u32 psnr;
+ u32 frame_size;
+ u32 wptr;
+ u32 crc;
+ s64 timestamp;
+};
+
+struct vpu_dec_codec_info {
+ u32 pixfmt;
+ u32 num_ref_frms;
+ u32 num_dpb_frms;
+ u32 num_dfe_area;
+ u32 color_primaries;
+ u32 transfer_chars;
+ u32 matrix_coeffs;
+ u32 full_range;
+ u32 vui_present;
+ u32 progressive;
+ u32 width;
+ u32 height;
+ u32 decoded_width;
+ u32 decoded_height;
+ struct v4l2_fract frame_rate;
+ u32 dsp_asp_ratio;
+ u32 level_idc;
+ u32 bit_depth_luma;
+ u32 bit_depth_chroma;
+ u32 chroma_fmt;
+ u32 mvc_num_views;
+ u32 offset_x;
+ u32 offset_y;
+ u32 tag;
+ u32 sizeimage[VIDEO_MAX_PLANES];
+ u32 bytesperline[VIDEO_MAX_PLANES];
+ u32 mbi_size;
+ u32 dcp_size;
+ u32 stride;
+};
+
+struct vpu_dec_pic_info {
+ u32 id;
+ u32 luma;
+ u32 start;
+ u32 end;
+ u32 pic_size;
+ u32 stride;
+ u32 skipped;
+ s64 timestamp;
+ u32 consumed_count;
+};
+
+struct vpu_fs_info {
+ u32 id;
+ u32 type;
+ u32 tag;
+ u32 luma_addr;
+ u32 luma_size;
+ u32 chroma_addr;
+ u32 chromau_size;
+ u32 chromav_addr;
+ u32 chromav_size;
+ u32 bytesperline;
+ u32 not_displayed;
+};
+
+struct vpu_ts_info {
+ s64 timestamp;
+ u32 size;
+};
+
+#define BITRATE_STEP (1024)
+#define BITRATE_MIN (16 * BITRATE_STEP)
+#define BITRATE_MAX (240 * 1024 * BITRATE_STEP)
+#define BITRATE_DEFAULT (2 * 1024 * BITRATE_STEP)
+#define BITRATE_DEFAULT_PEAK (BITRATE_DEFAULT * 2)
+
+#endif
diff --git a/drivers/media/platform/amphion/vpu_drv.c b/drivers/media/platform/amphion/vpu_drv.c
new file mode 100644
index 000000000000..9d5a5075343d
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_drv.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#include <linux/init.h>
+#include <linux/interconnect.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dma-map-ops.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pm_runtime.h>
+#include <linux/videodev2.h>
+#include <linux/of_reserved_mem.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ioctl.h>
+#include <linux/debugfs.h>
+#include "vpu.h"
+#include "vpu_imx8q.h"
+
+bool debug;
+module_param(debug, bool, 0644);
+
+void vpu_writel(struct vpu_dev *vpu, u32 reg, u32 val)
+{
+ writel(val, vpu->base + reg);
+}
+
+u32 vpu_readl(struct vpu_dev *vpu, u32 reg)
+{
+ return readl(vpu->base + reg);
+}
+
+static void vpu_dev_get(struct vpu_dev *vpu)
+{
+ if (atomic_inc_return(&vpu->ref_vpu) == 1 && vpu->res->setup)
+ vpu->res->setup(vpu);
+}
+
+static void vpu_dev_put(struct vpu_dev *vpu)
+{
+ atomic_dec(&vpu->ref_vpu);
+}
+
+static void vpu_enc_get(struct vpu_dev *vpu)
+{
+ if (atomic_inc_return(&vpu->ref_enc) == 1 && vpu->res->setup_encoder)
+ vpu->res->setup_encoder(vpu);
+}
+
+static void vpu_enc_put(struct vpu_dev *vpu)
+{
+ atomic_dec(&vpu->ref_enc);
+}
+
+static void vpu_dec_get(struct vpu_dev *vpu)
+{
+ if (atomic_inc_return(&vpu->ref_dec) == 1 && vpu->res->setup_decoder)
+ vpu->res->setup_decoder(vpu);
+}
+
+static void vpu_dec_put(struct vpu_dev *vpu)
+{
+ atomic_dec(&vpu->ref_dec);
+}
+
+static int vpu_init_media_device(struct vpu_dev *vpu)
+{
+ vpu->mdev.dev = vpu->dev;
+ strscpy(vpu->mdev.model, "amphion-vpu", sizeof(vpu->mdev.model));
+ strscpy(vpu->mdev.bus_info, "platform: amphion-vpu", sizeof(vpu->mdev.bus_info));
+ media_device_init(&vpu->mdev);
+ vpu->v4l2_dev.mdev = &vpu->mdev;
+
+ return 0;
+}
+
+static int vpu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct vpu_dev *vpu;
+ int ret;
+
+ dev_dbg(dev, "probe\n");
+ vpu = devm_kzalloc(dev, sizeof(*vpu), GFP_KERNEL);
+ if (!vpu)
+ return -ENOMEM;
+
+ vpu->pdev = pdev;
+ vpu->dev = dev;
+ mutex_init(&vpu->lock);
+ INIT_LIST_HEAD(&vpu->cores);
+ platform_set_drvdata(pdev, vpu);
+ atomic_set(&vpu->ref_vpu, 0);
+ atomic_set(&vpu->ref_enc, 0);
+ atomic_set(&vpu->ref_dec, 0);
+ vpu->get_vpu = vpu_dev_get;
+ vpu->put_vpu = vpu_dev_put;
+ vpu->get_enc = vpu_enc_get;
+ vpu->put_enc = vpu_enc_put;
+ vpu->get_dec = vpu_dec_get;
+ vpu->put_dec = vpu_dec_put;
+
+ vpu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vpu->base))
+ return PTR_ERR(vpu->base);
+
+ vpu->res = of_device_get_match_data(dev);
+ if (!vpu->res)
+ return -ENODEV;
+
+ pm_runtime_enable(dev);
+
+ ret = v4l2_device_register(dev, &vpu->v4l2_dev);
+ if (ret)
+ goto err_vpu_deinit;
+
+ vpu_init_media_device(vpu);
+ vpu->encoder.type = VPU_CORE_TYPE_ENC;
+ vpu->encoder.function = MEDIA_ENT_F_PROC_VIDEO_ENCODER;
+ vpu->decoder.type = VPU_CORE_TYPE_DEC;
+ vpu->decoder.function = MEDIA_ENT_F_PROC_VIDEO_DECODER;
+ ret = vpu_add_func(vpu, &vpu->decoder);
+ if (ret)
+ goto err_add_decoder;
+ ret = vpu_add_func(vpu, &vpu->encoder);
+ if (ret)
+ goto err_add_encoder;
+ ret = media_device_register(&vpu->mdev);
+ if (ret)
+ goto err_vpu_media;
+ vpu->debugfs = debugfs_create_dir("amphion_vpu", NULL);
+
+ of_platform_populate(dev->of_node, NULL, NULL, dev);
+
+ return 0;
+
+err_vpu_media:
+ vpu_remove_func(&vpu->encoder);
+err_add_encoder:
+ vpu_remove_func(&vpu->decoder);
+err_add_decoder:
+ media_device_cleanup(&vpu->mdev);
+ v4l2_device_unregister(&vpu->v4l2_dev);
+err_vpu_deinit:
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int vpu_remove(struct platform_device *pdev)
+{
+ struct vpu_dev *vpu = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ debugfs_remove_recursive(vpu->debugfs);
+ vpu->debugfs = NULL;
+
+ pm_runtime_disable(dev);
+
+ media_device_unregister(&vpu->mdev);
+ vpu_remove_func(&vpu->decoder);
+ vpu_remove_func(&vpu->encoder);
+ media_device_cleanup(&vpu->mdev);
+ v4l2_device_unregister(&vpu->v4l2_dev);
+ mutex_destroy(&vpu->lock);
+
+ return 0;
+}
+
+static int __maybe_unused vpu_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static int __maybe_unused vpu_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int __maybe_unused vpu_resume(struct device *dev)
+{
+ return 0;
+}
+
+static int __maybe_unused vpu_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops vpu_pm_ops = {
+ SET_RUNTIME_PM_OPS(vpu_runtime_suspend, vpu_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(vpu_suspend, vpu_resume)
+};
+
+static struct vpu_resources imx8qxp_res = {
+ .plat_type = IMX8QXP,
+ .mreg_base = 0x40000000,
+ .setup = vpu_imx8q_setup,
+ .setup_encoder = vpu_imx8q_setup_enc,
+ .setup_decoder = vpu_imx8q_setup_dec,
+ .reset = vpu_imx8q_reset
+};
+
+static struct vpu_resources imx8qm_res = {
+ .plat_type = IMX8QM,
+ .mreg_base = 0x40000000,
+ .setup = vpu_imx8q_setup,
+ .setup_encoder = vpu_imx8q_setup_enc,
+ .setup_decoder = vpu_imx8q_setup_dec,
+ .reset = vpu_imx8q_reset
+};
+
+static const struct of_device_id vpu_dt_match[] = {
+ { .compatible = "nxp,imx8qxp-vpu", .data = &imx8qxp_res },
+ { .compatible = "nxp,imx8qm-vpu", .data = &imx8qm_res },
+ {}
+};
+MODULE_DEVICE_TABLE(of, vpu_dt_match);
+
+static struct platform_driver amphion_vpu_driver = {
+ .probe = vpu_probe,
+ .remove = vpu_remove,
+ .driver = {
+ .name = "amphion-vpu",
+ .of_match_table = vpu_dt_match,
+ .pm = &vpu_pm_ops,
+ },
+};
+
+static int __init vpu_driver_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&amphion_vpu_driver);
+ if (ret)
+ return ret;
+
+ return vpu_core_driver_init();
+}
+
+static void __exit vpu_driver_exit(void)
+{
+ vpu_core_driver_exit();
+ platform_driver_unregister(&amphion_vpu_driver);
+}
+module_init(vpu_driver_init);
+module_exit(vpu_driver_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX8Q");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/amphion/vpu_helpers.c b/drivers/media/platform/amphion/vpu_helpers.c
new file mode 100644
index 000000000000..e9aeb3453dfc
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_helpers.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#include <linux/init.h>
+#include <linux/interconnect.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "vpu.h"
+#include "vpu_core.h"
+#include "vpu_rpc.h"
+#include "vpu_helpers.h"
+
+int vpu_helper_find_in_array_u8(const u8 *array, u32 size, u32 x)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (array[i] == x)
+ return i;
+ }
+
+ return 0;
+}
+
+bool vpu_helper_check_type(struct vpu_inst *inst, u32 type)
+{
+ const struct vpu_format *pfmt;
+
+ for (pfmt = inst->formats; pfmt->pixfmt; pfmt++) {
+ if (!vpu_iface_check_format(inst, pfmt->pixfmt))
+ continue;
+ if (pfmt->type == type)
+ return true;
+ }
+
+ return false;
+}
+
+const struct vpu_format *vpu_helper_find_format(struct vpu_inst *inst, u32 type, u32 pixelfmt)
+{
+ const struct vpu_format *pfmt;
+
+ if (!inst || !inst->formats)
+ return NULL;
+
+ if (!vpu_iface_check_format(inst, pixelfmt))
+ return NULL;
+
+ for (pfmt = inst->formats; pfmt->pixfmt; pfmt++) {
+ if (pfmt->pixfmt == pixelfmt && (!type || type == pfmt->type))
+ return pfmt;
+ }
+
+ return NULL;
+}
+
+const struct vpu_format *vpu_helper_enum_format(struct vpu_inst *inst, u32 type, int index)
+{
+ const struct vpu_format *pfmt;
+ int i = 0;
+
+ if (!inst || !inst->formats)
+ return NULL;
+
+ for (pfmt = inst->formats; pfmt->pixfmt; pfmt++) {
+ if (!vpu_iface_check_format(inst, pfmt->pixfmt))
+ continue;
+
+ if (pfmt->type == type) {
+ if (index == i)
+ return pfmt;
+ i++;
+ }
+ }
+
+ return NULL;
+}
+
+u32 vpu_helper_valid_frame_width(struct vpu_inst *inst, u32 width)
+{
+ const struct vpu_core_resources *res;
+
+ if (!inst)
+ return width;
+
+ res = vpu_get_resource(inst);
+ if (!res)
+ return width;
+ if (res->max_width)
+ width = clamp(width, res->min_width, res->max_width);
+ if (res->step_width)
+ width = ALIGN(width, res->step_width);
+
+ return width;
+}
+
+u32 vpu_helper_valid_frame_height(struct vpu_inst *inst, u32 height)
+{
+ const struct vpu_core_resources *res;
+
+ if (!inst)
+ return height;
+
+ res = vpu_get_resource(inst);
+ if (!res)
+ return height;
+ if (res->max_height)
+ height = clamp(height, res->min_height, res->max_height);
+ if (res->step_height)
+ height = ALIGN(height, res->step_height);
+
+ return height;
+}
+
+static u32 get_nv12_plane_size(u32 width, u32 height, int plane_no,
+ u32 stride, u32 interlaced, u32 *pbl)
+{
+ u32 bytesperline;
+ u32 size = 0;
+
+ bytesperline = ALIGN(width, stride);
+ if (pbl)
+ bytesperline = max(bytesperline, *pbl);
+ height = ALIGN(height, 2);
+ if (plane_no == 0)
+ size = bytesperline * height;
+ else if (plane_no == 1)
+ size = bytesperline * height >> 1;
+ if (pbl)
+ *pbl = bytesperline;
+
+ return size;
+}
+
+static u32 get_tiled_8l128_plane_size(u32 fmt, u32 width, u32 height, int plane_no,
+ u32 stride, u32 interlaced, u32 *pbl)
+{
+ u32 ws = 3;
+ u32 hs = 7;
+ u32 bitdepth = 8;
+ u32 bytesperline;
+ u32 size = 0;
+
+ if (interlaced)
+ hs++;
+ if (fmt == V4L2_PIX_FMT_NV12M_10BE_8L128)
+ bitdepth = 10;
+ bytesperline = DIV_ROUND_UP(width * bitdepth, BITS_PER_BYTE);
+ bytesperline = ALIGN(bytesperline, 1 << ws);
+ bytesperline = ALIGN(bytesperline, stride);
+ if (pbl)
+ bytesperline = max(bytesperline, *pbl);
+ height = ALIGN(height, 1 << hs);
+ if (plane_no == 0)
+ size = bytesperline * height;
+ else if (plane_no == 1)
+ size = (bytesperline * ALIGN(height, 1 << (hs + 1))) >> 1;
+ if (pbl)
+ *pbl = bytesperline;
+
+ return size;
+}
+
+static u32 get_default_plane_size(u32 width, u32 height, int plane_no,
+ u32 stride, u32 interlaced, u32 *pbl)
+{
+ u32 bytesperline;
+ u32 size = 0;
+
+ bytesperline = ALIGN(width, stride);
+ if (pbl)
+ bytesperline = max(bytesperline, *pbl);
+ if (plane_no == 0)
+ size = bytesperline * height;
+ if (pbl)
+ *pbl = bytesperline;
+
+ return size;
+}
+
+u32 vpu_helper_get_plane_size(u32 fmt, u32 w, u32 h, int plane_no,
+ u32 stride, u32 interlaced, u32 *pbl)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12M:
+ return get_nv12_plane_size(w, h, plane_no, stride, interlaced, pbl);
+ case V4L2_PIX_FMT_NV12M_8L128:
+ case V4L2_PIX_FMT_NV12M_10BE_8L128:
+ return get_tiled_8l128_plane_size(fmt, w, h, plane_no, stride, interlaced, pbl);
+ default:
+ return get_default_plane_size(w, h, plane_no, stride, interlaced, pbl);
+ }
+}
+
+int vpu_helper_copy_from_stream_buffer(struct vpu_buffer *stream_buffer,
+ u32 *rptr, u32 size, void *dst)
+{
+ u32 offset;
+ u32 start;
+ u32 end;
+ void *virt;
+
+ if (!stream_buffer || !rptr || !dst)
+ return -EINVAL;
+
+ if (!size)
+ return 0;
+
+ offset = *rptr;
+ start = stream_buffer->phys;
+ end = start + stream_buffer->length;
+ virt = stream_buffer->virt;
+
+ if (offset < start || offset > end)
+ return -EINVAL;
+
+ if (offset + size <= end) {
+ memcpy(dst, virt + (offset - start), size);
+ } else {
+ memcpy(dst, virt + (offset - start), end - offset);
+ memcpy(dst + end - offset, virt, size + offset - end);
+ }
+
+ *rptr = vpu_helper_step_walk(stream_buffer, offset, size);
+
+ return 0;
+}
+
+int vpu_helper_copy_to_stream_buffer(struct vpu_buffer *stream_buffer,
+ u32 *wptr, u32 size, void *src)
+{
+ u32 offset;
+ u32 start;
+ u32 end;
+ void *virt;
+
+ if (!stream_buffer || !wptr || !src)
+ return -EINVAL;
+
+ if (!size)
+ return 0;
+
+ offset = *wptr;
+ start = stream_buffer->phys;
+ end = start + stream_buffer->length;
+ virt = stream_buffer->virt;
+ if (offset < start || offset > end)
+ return -EINVAL;
+
+ if (offset + size <= end) {
+ memcpy(virt + (offset - start), src, size);
+ } else {
+ memcpy(virt + (offset - start), src, end - offset);
+ memcpy(virt, src + end - offset, size + offset - end);
+ }
+
+ *wptr = vpu_helper_step_walk(stream_buffer, offset, size);
+
+ return 0;
+}
+
+int vpu_helper_memset_stream_buffer(struct vpu_buffer *stream_buffer,
+ u32 *wptr, u8 val, u32 size)
+{
+ u32 offset;
+ u32 start;
+ u32 end;
+ void *virt;
+
+ if (!stream_buffer || !wptr)
+ return -EINVAL;
+
+ if (!size)
+ return 0;
+
+ offset = *wptr;
+ start = stream_buffer->phys;
+ end = start + stream_buffer->length;
+ virt = stream_buffer->virt;
+ if (offset < start || offset > end)
+ return -EINVAL;
+
+ if (offset + size <= end) {
+ memset(virt + (offset - start), val, size);
+ } else {
+ memset(virt + (offset - start), val, end - offset);
+ memset(virt, val, size + offset - end);
+ }
+
+ offset += size;
+ if (offset >= end)
+ offset -= stream_buffer->length;
+
+ *wptr = offset;
+
+ return 0;
+}
+
+u32 vpu_helper_get_free_space(struct vpu_inst *inst)
+{
+ struct vpu_rpc_buffer_desc desc;
+
+ if (vpu_iface_get_stream_buffer_desc(inst, &desc))
+ return 0;
+
+ if (desc.rptr > desc.wptr)
+ return desc.rptr - desc.wptr;
+ else if (desc.rptr < desc.wptr)
+ return (desc.end - desc.start + desc.rptr - desc.wptr);
+ else
+ return desc.end - desc.start;
+}
+
+u32 vpu_helper_get_used_space(struct vpu_inst *inst)
+{
+ struct vpu_rpc_buffer_desc desc;
+
+ if (vpu_iface_get_stream_buffer_desc(inst, &desc))
+ return 0;
+
+ if (desc.wptr > desc.rptr)
+ return desc.wptr - desc.rptr;
+ else if (desc.wptr < desc.rptr)
+ return (desc.end - desc.start + desc.wptr - desc.rptr);
+ else
+ return 0;
+}
+
+int vpu_helper_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vpu_inst *inst = ctrl_to_inst(ctrl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ ctrl->val = inst->min_buffer_cap;
+ break;
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+ ctrl->val = inst->min_buffer_out;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int vpu_helper_find_startcode(struct vpu_buffer *stream_buffer,
+ u32 pixelformat, u32 offset, u32 bytesused)
+{
+ u32 start_code;
+ int start_code_size;
+ u32 val = 0;
+ int i;
+ int ret = -EINVAL;
+
+ if (!stream_buffer || !stream_buffer->virt)
+ return -EINVAL;
+
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_H264:
+ start_code_size = 4;
+ start_code = 0x00000001;
+ break;
+ default:
+ return 0;
+ }
+
+ for (i = 0; i < bytesused; i++) {
+ val = (val << 8) | vpu_helper_read_byte(stream_buffer, offset + i);
+ if (i < start_code_size - 1)
+ continue;
+ if (val == start_code) {
+ ret = i + 1 - start_code_size;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int vpu_find_dst_by_src(struct vpu_pair *pairs, u32 cnt, u32 src)
+{
+ u32 i;
+
+ if (!pairs || !cnt)
+ return -EINVAL;
+
+ for (i = 0; i < cnt; i++) {
+ if (pairs[i].src == src)
+ return pairs[i].dst;
+ }
+
+ return -EINVAL;
+}
+
+int vpu_find_src_by_dst(struct vpu_pair *pairs, u32 cnt, u32 dst)
+{
+ u32 i;
+
+ if (!pairs || !cnt)
+ return -EINVAL;
+
+ for (i = 0; i < cnt; i++) {
+ if (pairs[i].dst == dst)
+ return pairs[i].src;
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/media/platform/amphion/vpu_helpers.h b/drivers/media/platform/amphion/vpu_helpers.h
new file mode 100644
index 000000000000..bc28350958be
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_helpers.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#ifndef _AMPHION_VPU_HELPERS_H
+#define _AMPHION_VPU_HELPERS_H
+
+struct vpu_pair {
+ u32 src;
+ u32 dst;
+};
+
+int vpu_helper_find_in_array_u8(const u8 *array, u32 size, u32 x);
+bool vpu_helper_check_type(struct vpu_inst *inst, u32 type);
+const struct vpu_format *vpu_helper_find_format(struct vpu_inst *inst, u32 type, u32 pixelfmt);
+const struct vpu_format *vpu_helper_enum_format(struct vpu_inst *inst, u32 type, int index);
+u32 vpu_helper_valid_frame_width(struct vpu_inst *inst, u32 width);
+u32 vpu_helper_valid_frame_height(struct vpu_inst *inst, u32 height);
+u32 vpu_helper_get_plane_size(u32 fmt, u32 width, u32 height, int plane_no,
+ u32 stride, u32 interlaced, u32 *pbl);
+int vpu_helper_copy_from_stream_buffer(struct vpu_buffer *stream_buffer,
+ u32 *rptr, u32 size, void *dst);
+int vpu_helper_copy_to_stream_buffer(struct vpu_buffer *stream_buffer,
+ u32 *wptr, u32 size, void *src);
+int vpu_helper_memset_stream_buffer(struct vpu_buffer *stream_buffer,
+ u32 *wptr, u8 val, u32 size);
+u32 vpu_helper_get_free_space(struct vpu_inst *inst);
+u32 vpu_helper_get_used_space(struct vpu_inst *inst);
+int vpu_helper_g_volatile_ctrl(struct v4l2_ctrl *ctrl);
+void vpu_helper_get_kmp_next(const u8 *pattern, int *next, int size);
+int vpu_helper_kmp_search(u8 *s, int s_len, const u8 *p, int p_len, int *next);
+int vpu_helper_kmp_search_in_stream_buffer(struct vpu_buffer *stream_buffer,
+ u32 offset, int bytesused,
+ const u8 *p, int p_len, int *next);
+int vpu_helper_find_startcode(struct vpu_buffer *stream_buffer,
+ u32 pixelformat, u32 offset, u32 bytesused);
+
+static inline u32 vpu_helper_step_walk(struct vpu_buffer *stream_buffer, u32 pos, u32 step)
+{
+ pos += step;
+ if (pos > stream_buffer->phys + stream_buffer->length)
+ pos -= stream_buffer->length;
+
+ return pos;
+}
+
+static inline u8 vpu_helper_read_byte(struct vpu_buffer *stream_buffer, u32 pos)
+{
+ u8 *pdata = (u8 *)stream_buffer->virt;
+
+ return pdata[pos % stream_buffer->length];
+}
+
+int vpu_color_check_primaries(u32 primaries);
+int vpu_color_check_transfers(u32 transfers);
+int vpu_color_check_matrix(u32 matrix);
+int vpu_color_check_full_range(u32 full_range);
+u32 vpu_color_cvrt_primaries_v2i(u32 primaries);
+u32 vpu_color_cvrt_primaries_i2v(u32 primaries);
+u32 vpu_color_cvrt_transfers_v2i(u32 transfers);
+u32 vpu_color_cvrt_transfers_i2v(u32 transfers);
+u32 vpu_color_cvrt_matrix_v2i(u32 matrix);
+u32 vpu_color_cvrt_matrix_i2v(u32 matrix);
+u32 vpu_color_cvrt_full_range_v2i(u32 full_range);
+u32 vpu_color_cvrt_full_range_i2v(u32 full_range);
+int vpu_color_get_default(u32 primaries, u32 *ptransfers, u32 *pmatrix, u32 *pfull_range);
+
+int vpu_find_dst_by_src(struct vpu_pair *pairs, u32 cnt, u32 src);
+int vpu_find_src_by_dst(struct vpu_pair *pairs, u32 cnt, u32 dst);
+#endif
diff --git a/drivers/media/platform/amphion/vpu_imx8q.c b/drivers/media/platform/amphion/vpu_imx8q.c
new file mode 100644
index 000000000000..f14c2b8312a8
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_imx8q.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include "vpu.h"
+#include "vpu_core.h"
+#include "vpu_imx8q.h"
+#include "vpu_rpc.h"
+
+#define IMX8Q_CSR_CM0Px_ADDR_OFFSET 0x00000000
+#define IMX8Q_CSR_CM0Px_CPUWAIT 0x00000004
+
+#ifdef CONFIG_IMX_SCU
+#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/svc/misc.h>
+
+#define VPU_DISABLE_BITS 0x7
+#define VPU_IMX_DECODER_FUSE_OFFSET 14
+#define VPU_ENCODER_MASK 0x1
+#define VPU_DECODER_MASK 0x3UL
+#define VPU_DECODER_H264_MASK 0x2UL
+#define VPU_DECODER_HEVC_MASK 0x1UL
+
+static u32 imx8q_fuse;
+
+struct vpu_sc_msg_misc {
+ struct imx_sc_rpc_msg hdr;
+ u32 word;
+} __packed;
+#endif
+
+int vpu_imx8q_setup_dec(struct vpu_dev *vpu)
+{
+ const off_t offset = DEC_MFD_XREG_SLV_BASE + MFD_BLK_CTRL;
+
+ vpu_writel(vpu, offset + MFD_BLK_CTRL_MFD_SYS_CLOCK_ENABLE_SET, 0x1f);
+ vpu_writel(vpu, offset + MFD_BLK_CTRL_MFD_SYS_RESET_SET, 0xffffffff);
+
+ return 0;
+}
+
+int vpu_imx8q_setup_enc(struct vpu_dev *vpu)
+{
+ return 0;
+}
+
+int vpu_imx8q_setup(struct vpu_dev *vpu)
+{
+ const off_t offset = SCB_XREG_SLV_BASE + SCB_SCB_BLK_CTRL;
+
+ vpu_readl(vpu, offset + 0x108);
+
+ vpu_writel(vpu, offset + SCB_BLK_CTRL_SCB_CLK_ENABLE_SET, 0x1);
+ vpu_writel(vpu, offset + 0x190, 0xffffffff);
+ vpu_writel(vpu, offset + SCB_BLK_CTRL_XMEM_RESET_SET, 0xffffffff);
+ vpu_writel(vpu, offset + SCB_BLK_CTRL_SCB_CLK_ENABLE_SET, 0xE);
+ vpu_writel(vpu, offset + SCB_BLK_CTRL_CACHE_RESET_SET, 0x7);
+ vpu_writel(vpu, XMEM_CONTROL, 0x102);
+
+ vpu_readl(vpu, offset + 0x108);
+
+ return 0;
+}
+
+static int vpu_imx8q_reset_enc(struct vpu_dev *vpu)
+{
+ return 0;
+}
+
+static int vpu_imx8q_reset_dec(struct vpu_dev *vpu)
+{
+ const off_t offset = DEC_MFD_XREG_SLV_BASE + MFD_BLK_CTRL;
+
+ vpu_writel(vpu, offset + MFD_BLK_CTRL_MFD_SYS_RESET_CLR, 0xffffffff);
+
+ return 0;
+}
+
+int vpu_imx8q_reset(struct vpu_dev *vpu)
+{
+ const off_t offset = SCB_XREG_SLV_BASE + SCB_SCB_BLK_CTRL;
+
+ vpu_writel(vpu, offset + SCB_BLK_CTRL_CACHE_RESET_CLR, 0x7);
+ vpu_imx8q_reset_enc(vpu);
+ vpu_imx8q_reset_dec(vpu);
+
+ return 0;
+}
+
+int vpu_imx8q_set_system_cfg_common(struct vpu_rpc_system_config *config, u32 regs, u32 core_id)
+{
+ if (!config)
+ return -EINVAL;
+
+ switch (core_id) {
+ case 0:
+ config->malone_base_addr[0] = regs + DEC_MFD_XREG_SLV_BASE;
+ config->num_malones = 1;
+ config->num_windsors = 0;
+ break;
+ case 1:
+ config->windsor_base_addr[0] = regs + ENC_MFD_XREG_SLV_0_BASE;
+ config->num_windsors = 1;
+ config->num_malones = 0;
+ break;
+ case 2:
+ config->windsor_base_addr[0] = regs + ENC_MFD_XREG_SLV_1_BASE;
+ config->num_windsors = 1;
+ config->num_malones = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (config->num_windsors) {
+ config->windsor_irq_pin[0x0][0x0] = WINDSOR_PAL_IRQ_PIN_L;
+ config->windsor_irq_pin[0x0][0x1] = WINDSOR_PAL_IRQ_PIN_H;
+ }
+
+ config->malone_base_addr[0x1] = 0x0;
+ config->hif_offset[0x0] = MFD_HIF;
+ config->hif_offset[0x1] = 0x0;
+
+ config->dpv_base_addr = 0x0;
+ config->dpv_irq_pin = 0x0;
+ config->pixif_base_addr = regs + DEC_MFD_XREG_SLV_BASE + MFD_PIX_IF;
+ config->cache_base_addr[0] = regs + MC_CACHE_0_BASE;
+ config->cache_base_addr[1] = regs + MC_CACHE_1_BASE;
+
+ return 0;
+}
+
+int vpu_imx8q_boot_core(struct vpu_core *core)
+{
+ csr_writel(core, IMX8Q_CSR_CM0Px_ADDR_OFFSET, core->fw.phys);
+ csr_writel(core, IMX8Q_CSR_CM0Px_CPUWAIT, 0);
+ return 0;
+}
+
+int vpu_imx8q_get_power_state(struct vpu_core *core)
+{
+ if (csr_readl(core, IMX8Q_CSR_CM0Px_CPUWAIT) == 1)
+ return 0;
+ return 1;
+}
+
+int vpu_imx8q_on_firmware_loaded(struct vpu_core *core)
+{
+ u8 *p;
+
+ p = core->fw.virt;
+ p[16] = core->vpu->res->plat_type;
+ p[17] = core->id;
+ p[18] = 1;
+
+ return 0;
+}
+
+int vpu_imx8q_check_memory_region(dma_addr_t base, dma_addr_t addr, u32 size)
+{
+ const struct vpu_rpc_region_t imx8q_regions[] = {
+ {0x00000000, 0x08000000, VPU_CORE_MEMORY_CACHED},
+ {0x08000000, 0x10000000, VPU_CORE_MEMORY_UNCACHED},
+ {0x10000000, 0x20000000, VPU_CORE_MEMORY_CACHED},
+ {0x20000000, 0x40000000, VPU_CORE_MEMORY_UNCACHED}
+ };
+ int i;
+
+ if (addr < base)
+ return VPU_CORE_MEMORY_INVALID;
+
+ addr -= base;
+ for (i = 0; i < ARRAY_SIZE(imx8q_regions); i++) {
+ const struct vpu_rpc_region_t *region = &imx8q_regions[i];
+
+ if (addr >= region->start && addr + size < region->end)
+ return region->type;
+ }
+
+ return VPU_CORE_MEMORY_INVALID;
+}
+
+#ifdef CONFIG_IMX_SCU
+static u32 vpu_imx8q_get_fuse(void)
+{
+ static u32 fuse_got;
+ struct imx_sc_ipc *ipc;
+ struct vpu_sc_msg_misc msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ if (fuse_got)
+ return imx8q_fuse;
+
+ ret = imx_scu_get_handle(&ipc);
+ if (ret) {
+ pr_err("error: get sct handle fail: %d\n", ret);
+ return 0;
+ }
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_MISC;
+ hdr->func = IMX_SC_MISC_FUNC_OTP_FUSE_READ;
+ hdr->size = 2;
+
+ msg.word = VPU_DISABLE_BITS;
+
+ ret = imx_scu_call_rpc(ipc, &msg, true);
+ if (ret)
+ return 0;
+
+ imx8q_fuse = msg.word;
+ fuse_got = 1;
+ return imx8q_fuse;
+}
+
+bool vpu_imx8q_check_codec(enum vpu_core_type type)
+{
+ u32 fuse = vpu_imx8q_get_fuse();
+
+ if (type == VPU_CORE_TYPE_ENC) {
+ if (fuse & VPU_ENCODER_MASK)
+ return false;
+ } else if (type == VPU_CORE_TYPE_DEC) {
+ fuse >>= VPU_IMX_DECODER_FUSE_OFFSET;
+ fuse &= VPU_DECODER_MASK;
+
+ if (fuse == VPU_DECODER_MASK)
+ return false;
+ }
+ return true;
+}
+
+bool vpu_imx8q_check_fmt(enum vpu_core_type type, u32 pixelfmt)
+{
+ u32 fuse = vpu_imx8q_get_fuse();
+
+ if (type == VPU_CORE_TYPE_DEC) {
+ fuse >>= VPU_IMX_DECODER_FUSE_OFFSET;
+ fuse &= VPU_DECODER_MASK;
+
+ if (fuse == VPU_DECODER_HEVC_MASK && pixelfmt == V4L2_PIX_FMT_HEVC)
+ return false;
+ if (fuse == VPU_DECODER_H264_MASK && pixelfmt == V4L2_PIX_FMT_H264)
+ return false;
+ if (fuse == VPU_DECODER_MASK)
+ return false;
+ }
+
+ return true;
+}
+#else
+bool vpu_imx8q_check_codec(enum vpu_core_type type)
+{
+ return true;
+}
+
+bool vpu_imx8q_check_fmt(enum vpu_core_type type, u32 pixelfmt)
+{
+ return true;
+}
+#endif
diff --git a/drivers/media/platform/amphion/vpu_imx8q.h b/drivers/media/platform/amphion/vpu_imx8q.h
new file mode 100644
index 000000000000..9deffd7dde42
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_imx8q.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#ifndef _AMPHION_VPU_IMX8Q_H
+#define _AMPHION_VPU_IMX8Q_H
+
+#define SCB_XREG_SLV_BASE 0x00000000
+#define SCB_SCB_BLK_CTRL 0x00070000
+#define SCB_BLK_CTRL_XMEM_RESET_SET 0x00000090
+#define SCB_BLK_CTRL_CACHE_RESET_SET 0x000000A0
+#define SCB_BLK_CTRL_CACHE_RESET_CLR 0x000000A4
+#define SCB_BLK_CTRL_SCB_CLK_ENABLE_SET 0x00000100
+
+#define XMEM_CONTROL 0x00041000
+
+#define MC_CACHE_0_BASE 0x00060000
+#define MC_CACHE_1_BASE 0x00068000
+
+#define DEC_MFD_XREG_SLV_BASE 0x00180000
+#define ENC_MFD_XREG_SLV_0_BASE 0x00800000
+#define ENC_MFD_XREG_SLV_1_BASE 0x00A00000
+
+#define MFD_HIF 0x0001C000
+#define MFD_HIF_MSD_REG_INTERRUPT_STATUS 0x00000018
+#define MFD_SIF 0x0001D000
+#define MFD_SIF_CTRL_STATUS 0x000000F0
+#define MFD_SIF_INTR_STATUS 0x000000F4
+#define MFD_MCX 0x00020800
+#define MFD_MCX_OFF 0x00000020
+#define MFD_PIX_IF 0x00020000
+
+#define MFD_BLK_CTRL 0x00030000
+#define MFD_BLK_CTRL_MFD_SYS_RESET_SET 0x00000000
+#define MFD_BLK_CTRL_MFD_SYS_RESET_CLR 0x00000004
+#define MFD_BLK_CTRL_MFD_SYS_CLOCK_ENABLE_SET 0x00000100
+#define MFD_BLK_CTRL_MFD_SYS_CLOCK_ENABLE_CLR 0x00000104
+
+#define VID_API_NUM_STREAMS 8
+#define VID_API_MAX_BUF_PER_STR 3
+#define VID_API_MAX_NUM_MVC_VIEWS 4
+#define MEDIAIP_MAX_NUM_MALONES 2
+#define MEDIAIP_MAX_NUM_MALONE_IRQ_PINS 2
+#define MEDIAIP_MAX_NUM_WINDSORS 1
+#define MEDIAIP_MAX_NUM_WINDSOR_IRQ_PINS 2
+#define MEDIAIP_MAX_NUM_CMD_IRQ_PINS 2
+#define MEDIAIP_MAX_NUM_MSG_IRQ_PINS 1
+#define MEDIAIP_MAX_NUM_TIMER_IRQ_PINS 4
+#define MEDIAIP_MAX_NUM_TIMER_IRQ_SLOTS 4
+
+#define WINDSOR_PAL_IRQ_PIN_L 0x4
+#define WINDSOR_PAL_IRQ_PIN_H 0x5
+
+struct vpu_rpc_system_config {
+ u32 cfg_cookie;
+
+ u32 num_malones;
+ u32 malone_base_addr[MEDIAIP_MAX_NUM_MALONES];
+ u32 hif_offset[MEDIAIP_MAX_NUM_MALONES];
+ u32 malone_irq_pin[MEDIAIP_MAX_NUM_MALONES][MEDIAIP_MAX_NUM_MALONE_IRQ_PINS];
+ u32 malone_irq_target[MEDIAIP_MAX_NUM_MALONES][MEDIAIP_MAX_NUM_MALONE_IRQ_PINS];
+
+ u32 num_windsors;
+ u32 windsor_base_addr[MEDIAIP_MAX_NUM_WINDSORS];
+ u32 windsor_irq_pin[MEDIAIP_MAX_NUM_WINDSORS][MEDIAIP_MAX_NUM_WINDSOR_IRQ_PINS];
+ u32 windsor_irq_target[MEDIAIP_MAX_NUM_WINDSORS][MEDIAIP_MAX_NUM_WINDSOR_IRQ_PINS];
+
+ u32 cmd_irq_pin[MEDIAIP_MAX_NUM_CMD_IRQ_PINS];
+ u32 cmd_irq_target[MEDIAIP_MAX_NUM_CMD_IRQ_PINS];
+
+ u32 msg_irq_pin[MEDIAIP_MAX_NUM_MSG_IRQ_PINS];
+ u32 msg_irq_target[MEDIAIP_MAX_NUM_MSG_IRQ_PINS];
+
+ u32 sys_clk_freq;
+ u32 num_timers;
+ u32 timer_base_addr;
+ u32 timer_irq_pin[MEDIAIP_MAX_NUM_TIMER_IRQ_PINS];
+ u32 timer_irq_target[MEDIAIP_MAX_NUM_TIMER_IRQ_PINS];
+ u32 timer_slots[MEDIAIP_MAX_NUM_TIMER_IRQ_SLOTS];
+
+ u32 gic_base_addr;
+ u32 uart_base_addr;
+
+ u32 dpv_base_addr;
+ u32 dpv_irq_pin;
+ u32 dpv_irq_target;
+
+ u32 pixif_base_addr;
+
+ u32 pal_trace_level;
+ u32 pal_trace_destination;
+
+ u32 pal_trace_level1;
+ u32 pal_trace_destination1;
+
+ u32 heap_base;
+ u32 heap_size;
+
+ u32 cache_base_addr[2];
+};
+
+int vpu_imx8q_setup_dec(struct vpu_dev *vpu);
+int vpu_imx8q_setup_enc(struct vpu_dev *vpu);
+int vpu_imx8q_setup(struct vpu_dev *vpu);
+int vpu_imx8q_reset(struct vpu_dev *vpu);
+int vpu_imx8q_set_system_cfg_common(struct vpu_rpc_system_config *config, u32 regs, u32 core_id);
+int vpu_imx8q_boot_core(struct vpu_core *core);
+int vpu_imx8q_get_power_state(struct vpu_core *core);
+int vpu_imx8q_on_firmware_loaded(struct vpu_core *core);
+int vpu_imx8q_check_memory_region(dma_addr_t base, dma_addr_t addr, u32 size);
+bool vpu_imx8q_check_codec(enum vpu_core_type type);
+bool vpu_imx8q_check_fmt(enum vpu_core_type type, u32 pixelfmt);
+
+#endif
diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
new file mode 100644
index 000000000000..446a9de0cc11
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_malone.c
@@ -0,0 +1,1644 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#include <linux/init.h>
+#include <linux/interconnect.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/rational.h>
+#include <linux/time64.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <linux/videodev2.h>
+#include "vpu.h"
+#include "vpu_rpc.h"
+#include "vpu_defs.h"
+#include "vpu_helpers.h"
+#include "vpu_v4l2.h"
+#include "vpu_cmds.h"
+#include "vpu_imx8q.h"
+#include "vpu_malone.h"
+
+#define CMD_SIZE 25600
+#define MSG_SIZE 25600
+#define CODEC_SIZE 0x1000
+#define JPEG_SIZE 0x1000
+#define SEQ_SIZE 0x1000
+#define GOP_SIZE 0x1000
+#define PIC_SIZE 0x1000
+#define QMETER_SIZE 0x1000
+#define DBGLOG_SIZE 0x10000
+#define DEBUG_SIZE 0x80000
+#define ENG_SIZE 0x1000
+#define MALONE_SKIPPED_FRAME_ID 0x555
+
+#define MALONE_ALIGN_MBI 0x800
+#define MALONE_DCP_CHUNK_BIT 16
+#define MALONE_DCP_SIZE_MAX 0x3000000
+#define MALONE_DCP_SIZE_MIN 0x100000
+#define MALONE_DCP_FIXED_MB_ALLOC 250
+
+#define CONFIG_SET(val, cfg, pos, mask) \
+ (*(cfg) |= (((val) << (pos)) & (mask)))
+//x means source data , y means destination data
+#define STREAM_CONFIG_FORMAT_SET(x, y) CONFIG_SET(x, y, 0, 0x0000000F)
+#define STREAM_CONFIG_STRBUFIDX_SET(x, y) CONFIG_SET(x, y, 8, 0x00000300)
+#define STREAM_CONFIG_NOSEQ_SET(x, y) CONFIG_SET(x, y, 10, 0x00000400)
+#define STREAM_CONFIG_DEBLOCK_SET(x, y) CONFIG_SET(x, y, 11, 0x00000800)
+#define STREAM_CONFIG_DERING_SET(x, y) CONFIG_SET(x, y, 12, 0x00001000)
+#define STREAM_CONFIG_IBWAIT_SET(x, y) CONFIG_SET(x, y, 13, 0x00002000)
+#define STREAM_CONFIG_FBC_SET(x, y) CONFIG_SET(x, y, 14, 0x00004000)
+#define STREAM_CONFIG_PLAY_MODE_SET(x, y) CONFIG_SET(x, y, 16, 0x00030000)
+#define STREAM_CONFIG_ENABLE_DCP_SET(x, y) CONFIG_SET(x, y, 20, 0x00100000)
+#define STREAM_CONFIG_NUM_STR_BUF_SET(x, y) CONFIG_SET(x, y, 21, 0x00600000)
+#define STREAM_CONFIG_MALONE_USAGE_SET(x, y) CONFIG_SET(x, y, 23, 0x01800000)
+#define STREAM_CONFIG_MULTI_VID_SET(x, y) CONFIG_SET(x, y, 25, 0x02000000)
+#define STREAM_CONFIG_OBFUSC_EN_SET(x, y) CONFIG_SET(x, y, 26, 0x04000000)
+#define STREAM_CONFIG_RC4_EN_SET(x, y) CONFIG_SET(x, y, 27, 0x08000000)
+#define STREAM_CONFIG_MCX_SET(x, y) CONFIG_SET(x, y, 28, 0x10000000)
+#define STREAM_CONFIG_PES_SET(x, y) CONFIG_SET(x, y, 29, 0x20000000)
+#define STREAM_CONFIG_NUM_DBE_SET(x, y) CONFIG_SET(x, y, 30, 0x40000000)
+#define STREAM_CONFIG_FS_CTRL_MODE_SET(x, y) CONFIG_SET(x, y, 31, 0x80000000)
+
+enum vpu_malone_stream_input_mode {
+ INVALID_MODE = 0,
+ FRAME_LVL,
+ NON_FRAME_LVL
+};
+
+enum vpu_malone_format {
+ MALONE_FMT_NULL = 0x0,
+ MALONE_FMT_AVC = 0x1,
+ MALONE_FMT_MP2 = 0x2,
+ MALONE_FMT_VC1 = 0x3,
+ MALONE_FMT_AVS = 0x4,
+ MALONE_FMT_ASP = 0x5,
+ MALONE_FMT_JPG = 0x6,
+ MALONE_FMT_RV = 0x7,
+ MALONE_FMT_VP6 = 0x8,
+ MALONE_FMT_SPK = 0x9,
+ MALONE_FMT_VP8 = 0xA,
+ MALONE_FMT_HEVC = 0xB,
+ MALONE_FMT_LAST = MALONE_FMT_HEVC
+};
+
+enum {
+ VID_API_CMD_NULL = 0x00,
+ VID_API_CMD_PARSE_NEXT_SEQ = 0x01,
+ VID_API_CMD_PARSE_NEXT_I = 0x02,
+ VID_API_CMD_PARSE_NEXT_IP = 0x03,
+ VID_API_CMD_PARSE_NEXT_ANY = 0x04,
+ VID_API_CMD_DEC_PIC = 0x05,
+ VID_API_CMD_UPDATE_ES_WR_PTR = 0x06,
+ VID_API_CMD_UPDATE_ES_RD_PTR = 0x07,
+ VID_API_CMD_UPDATE_UDATA = 0x08,
+ VID_API_CMD_GET_FSINFO = 0x09,
+ VID_API_CMD_SKIP_PIC = 0x0a,
+ VID_API_CMD_DEC_CHUNK = 0x0b,
+ VID_API_CMD_START = 0x10,
+ VID_API_CMD_STOP = 0x11,
+ VID_API_CMD_ABORT = 0x12,
+ VID_API_CMD_RST_BUF = 0x13,
+ VID_API_CMD_FS_RELEASE = 0x15,
+ VID_API_CMD_MEM_REGION_ATTACH = 0x16,
+ VID_API_CMD_MEM_REGION_DETACH = 0x17,
+ VID_API_CMD_MVC_VIEW_SELECT = 0x18,
+ VID_API_CMD_FS_ALLOC = 0x19,
+ VID_API_CMD_DBG_GET_STATUS = 0x1C,
+ VID_API_CMD_DBG_START_LOG = 0x1D,
+ VID_API_CMD_DBG_STOP_LOG = 0x1E,
+ VID_API_CMD_DBG_DUMP_LOG = 0x1F,
+ VID_API_CMD_YUV_READY = 0x20,
+ VID_API_CMD_TS = 0x21,
+
+ VID_API_CMD_FIRM_RESET = 0x40,
+
+ VID_API_CMD_SNAPSHOT = 0xAA,
+ VID_API_CMD_ROLL_SNAPSHOT = 0xAB,
+ VID_API_CMD_LOCK_SCHEDULER = 0xAC,
+ VID_API_CMD_UNLOCK_SCHEDULER = 0xAD,
+ VID_API_CMD_CQ_FIFO_DUMP = 0xAE,
+ VID_API_CMD_DBG_FIFO_DUMP = 0xAF,
+ VID_API_CMD_SVC_ILP = 0xBB,
+ VID_API_CMD_FW_STATUS = 0xF0,
+ VID_API_CMD_INVALID = 0xFF
+};
+
+enum {
+ VID_API_EVENT_NULL = 0x00,
+ VID_API_EVENT_RESET_DONE = 0x01,
+ VID_API_EVENT_SEQ_HDR_FOUND = 0x02,
+ VID_API_EVENT_PIC_HDR_FOUND = 0x03,
+ VID_API_EVENT_PIC_DECODED = 0x04,
+ VID_API_EVENT_FIFO_LOW = 0x05,
+ VID_API_EVENT_FIFO_HIGH = 0x06,
+ VID_API_EVENT_FIFO_EMPTY = 0x07,
+ VID_API_EVENT_FIFO_FULL = 0x08,
+ VID_API_EVENT_BS_ERROR = 0x09,
+ VID_API_EVENT_UDATA_FIFO_UPTD = 0x0A,
+ VID_API_EVENT_RES_CHANGE = 0x0B,
+ VID_API_EVENT_FIFO_OVF = 0x0C,
+ VID_API_EVENT_CHUNK_DECODED = 0x0D,
+ VID_API_EVENT_REQ_FRAME_BUFF = 0x10,
+ VID_API_EVENT_FRAME_BUFF_RDY = 0x11,
+ VID_API_EVENT_REL_FRAME_BUFF = 0x12,
+ VID_API_EVENT_STR_BUF_RST = 0x13,
+ VID_API_EVENT_RET_PING = 0x14,
+ VID_API_EVENT_QMETER = 0x15,
+ VID_API_EVENT_STR_FMT_CHANGE = 0x16,
+ VID_API_EVENT_FIRMWARE_XCPT = 0x17,
+ VID_API_EVENT_START_DONE = 0x18,
+ VID_API_EVENT_STOPPED = 0x19,
+ VID_API_EVENT_ABORT_DONE = 0x1A,
+ VID_API_EVENT_FINISHED = 0x1B,
+ VID_API_EVENT_DBG_STAT_UPDATE = 0x1C,
+ VID_API_EVENT_DBG_LOG_STARTED = 0x1D,
+ VID_API_EVENT_DBG_LOG_STOPPED = 0x1E,
+ VID_API_EVENT_DBG_LOG_UPDATED = 0x1F,
+ VID_API_EVENT_DBG_MSG_DEC = 0x20,
+ VID_API_EVENT_DEC_SC_ERR = 0x21,
+ VID_API_EVENT_CQ_FIFO_DUMP = 0x22,
+ VID_API_EVENT_DBG_FIFO_DUMP = 0x23,
+ VID_API_EVENT_DEC_CHECK_RES = 0x24,
+ VID_API_EVENT_DEC_CFG_INFO = 0x25,
+ VID_API_EVENT_UNSUPPORTED_STREAM = 0x26,
+ VID_API_EVENT_STR_SUSPENDED = 0x30,
+ VID_API_EVENT_SNAPSHOT_DONE = 0x40,
+ VID_API_EVENT_FW_STATUS = 0xF0,
+ VID_API_EVENT_INVALID = 0xFF
+};
+
+struct vpu_malone_buffer_desc {
+ struct vpu_rpc_buffer_desc buffer;
+ u32 low;
+ u32 high;
+};
+
+struct vpu_malone_str_buffer {
+ u32 wptr;
+ u32 rptr;
+ u32 start;
+ u32 end;
+ u32 lwm;
+};
+
+struct vpu_malone_picth_info {
+ u32 frame_pitch;
+};
+
+struct vpu_malone_table_desc {
+ u32 array_base;
+ u32 size;
+};
+
+struct vpu_malone_dbglog_desc {
+ u32 addr;
+ u32 size;
+ u32 level;
+ u32 reserved;
+};
+
+struct vpu_malone_frame_buffer {
+ u32 addr;
+ u32 size;
+};
+
+struct vpu_malone_udata {
+ u32 base;
+ u32 total_size;
+ u32 slot_size;
+};
+
+struct vpu_malone_buffer_info {
+ u32 stream_input_mode;
+ u32 stream_pic_input_count;
+ u32 stream_pic_parsed_count;
+ u32 stream_buffer_threshold;
+ u32 stream_pic_end_flag;
+};
+
+struct vpu_malone_encrypt_info {
+ u32 rec4key[8];
+ u32 obfusc;
+};
+
+struct malone_iface {
+ u32 exec_base_addr;
+ u32 exec_area_size;
+ struct vpu_malone_buffer_desc cmd_buffer_desc;
+ struct vpu_malone_buffer_desc msg_buffer_desc;
+ u32 cmd_int_enable[VID_API_NUM_STREAMS];
+ struct vpu_malone_picth_info stream_pitch_info[VID_API_NUM_STREAMS];
+ u32 stream_config[VID_API_NUM_STREAMS];
+ struct vpu_malone_table_desc codec_param_tab_desc;
+ struct vpu_malone_table_desc jpeg_param_tab_desc;
+ u32 stream_buffer_desc[VID_API_NUM_STREAMS][VID_API_MAX_BUF_PER_STR];
+ struct vpu_malone_table_desc seq_info_tab_desc;
+ struct vpu_malone_table_desc pic_info_tab_desc;
+ struct vpu_malone_table_desc gop_info_tab_desc;
+ struct vpu_malone_table_desc qmeter_info_tab_desc;
+ u32 stream_error[VID_API_NUM_STREAMS];
+ u32 fw_version;
+ u32 fw_offset;
+ u32 max_streams;
+ struct vpu_malone_dbglog_desc dbglog_desc;
+ struct vpu_rpc_buffer_desc api_cmd_buffer_desc[VID_API_NUM_STREAMS];
+ struct vpu_malone_udata udata_buffer[VID_API_NUM_STREAMS];
+ struct vpu_malone_buffer_desc debug_buffer_desc;
+ struct vpu_malone_buffer_desc eng_access_buff_desc[VID_API_NUM_STREAMS];
+ u32 encrypt_info[VID_API_NUM_STREAMS];
+ struct vpu_rpc_system_config system_cfg;
+ u32 api_version;
+ struct vpu_malone_buffer_info stream_buff_info[VID_API_NUM_STREAMS];
+};
+
+struct malone_jpg_params {
+ u32 rotation_angle;
+ u32 horiz_scale_factor;
+ u32 vert_scale_factor;
+ u32 rotation_mode;
+ u32 rgb_mode;
+ u32 chunk_mode; /* 0 ~ 1 */
+ u32 last_chunk; /* 0 ~ 1 */
+ u32 chunk_rows; /* 0 ~ 255 */
+ u32 num_bytes;
+ u32 jpg_crop_x;
+ u32 jpg_crop_y;
+ u32 jpg_crop_width;
+ u32 jpg_crop_height;
+ u32 jpg_mjpeg_mode;
+ u32 jpg_mjpeg_interlaced;
+};
+
+struct malone_codec_params {
+ u32 disp_imm;
+ u32 fourcc;
+ u32 codec_version;
+ u32 frame_rate;
+ u32 dbglog_enable;
+ u32 bsdma_lwm;
+ u32 bbd_coring;
+ u32 bbd_s_thr_row;
+ u32 bbd_p_thr_row;
+ u32 bbd_s_thr_logo_row;
+ u32 bbd_p_thr_logo_row;
+ u32 bbd_s_thr_col;
+ u32 bbd_p_thr_col;
+ u32 bbd_chr_thr_row;
+ u32 bbd_chr_thr_col;
+ u32 bbd_uv_mid_level;
+ u32 bbd_excl_win_mb_left;
+ u32 bbd_excl_win_mb_right;
+};
+
+struct malone_padding_scode {
+ u32 scode_type;
+ u32 pixelformat;
+ u32 data[2];
+};
+
+struct malone_fmt_mapping {
+ u32 pixelformat;
+ enum vpu_malone_format malone_format;
+};
+
+struct malone_scode_t {
+ struct vpu_inst *inst;
+ struct vb2_buffer *vb;
+ u32 wptr;
+ u32 need_data;
+};
+
+struct malone_scode_handler {
+ u32 pixelformat;
+ int (*insert_scode_seq)(struct malone_scode_t *scode);
+ int (*insert_scode_pic)(struct malone_scode_t *scode);
+};
+
+struct vpu_dec_ctrl {
+ struct malone_codec_params *codec_param;
+ struct malone_jpg_params *jpg;
+ void *seq_mem;
+ void *pic_mem;
+ void *gop_mem;
+ void *qmeter_mem;
+ void *dbglog_mem;
+ struct vpu_malone_str_buffer __iomem *str_buf[VID_API_NUM_STREAMS];
+ u32 buf_addr[VID_API_NUM_STREAMS];
+};
+
+u32 vpu_malone_get_data_size(void)
+{
+ return sizeof(struct vpu_dec_ctrl);
+}
+
+void vpu_malone_init_rpc(struct vpu_shared_addr *shared,
+ struct vpu_buffer *rpc, dma_addr_t boot_addr)
+{
+ struct malone_iface *iface;
+ struct vpu_dec_ctrl *hc;
+ unsigned long base_phy_addr;
+ unsigned long phy_addr;
+ unsigned long offset;
+ unsigned int i;
+
+ if (rpc->phys < boot_addr)
+ return;
+
+ iface = rpc->virt;
+ base_phy_addr = rpc->phys - boot_addr;
+ hc = shared->priv;
+
+ shared->iface = iface;
+ shared->boot_addr = boot_addr;
+
+ iface->exec_base_addr = base_phy_addr;
+ iface->exec_area_size = rpc->length;
+
+ offset = sizeof(struct malone_iface);
+ phy_addr = base_phy_addr + offset;
+
+ shared->cmd_desc = &iface->cmd_buffer_desc.buffer;
+ shared->cmd_mem_vir = rpc->virt + offset;
+ iface->cmd_buffer_desc.buffer.start =
+ iface->cmd_buffer_desc.buffer.rptr =
+ iface->cmd_buffer_desc.buffer.wptr = phy_addr;
+ iface->cmd_buffer_desc.buffer.end = iface->cmd_buffer_desc.buffer.start + CMD_SIZE;
+ offset += CMD_SIZE;
+ phy_addr = base_phy_addr + offset;
+
+ shared->msg_desc = &iface->msg_buffer_desc.buffer;
+ shared->msg_mem_vir = rpc->virt + offset;
+ iface->msg_buffer_desc.buffer.start =
+ iface->msg_buffer_desc.buffer.wptr =
+ iface->msg_buffer_desc.buffer.rptr = phy_addr;
+ iface->msg_buffer_desc.buffer.end = iface->msg_buffer_desc.buffer.start + MSG_SIZE;
+ offset += MSG_SIZE;
+ phy_addr = base_phy_addr + offset;
+
+ iface->codec_param_tab_desc.array_base = phy_addr;
+ hc->codec_param = rpc->virt + offset;
+ offset += CODEC_SIZE;
+ phy_addr = base_phy_addr + offset;
+
+ iface->jpeg_param_tab_desc.array_base = phy_addr;
+ hc->jpg = rpc->virt + offset;
+ offset += JPEG_SIZE;
+ phy_addr = base_phy_addr + offset;
+
+ iface->seq_info_tab_desc.array_base = phy_addr;
+ hc->seq_mem = rpc->virt + offset;
+ offset += SEQ_SIZE;
+ phy_addr = base_phy_addr + offset;
+
+ iface->pic_info_tab_desc.array_base = phy_addr;
+ hc->pic_mem = rpc->virt + offset;
+ offset += PIC_SIZE;
+ phy_addr = base_phy_addr + offset;
+
+ iface->gop_info_tab_desc.array_base = phy_addr;
+ hc->gop_mem = rpc->virt + offset;
+ offset += GOP_SIZE;
+ phy_addr = base_phy_addr + offset;
+
+ iface->qmeter_info_tab_desc.array_base = phy_addr;
+ hc->qmeter_mem = rpc->virt + offset;
+ offset += QMETER_SIZE;
+ phy_addr = base_phy_addr + offset;
+
+ iface->dbglog_desc.addr = phy_addr;
+ iface->dbglog_desc.size = DBGLOG_SIZE;
+ hc->dbglog_mem = rpc->virt + offset;
+ offset += DBGLOG_SIZE;
+ phy_addr = base_phy_addr + offset;
+
+ for (i = 0; i < VID_API_NUM_STREAMS; i++) {
+ iface->eng_access_buff_desc[i].buffer.start =
+ iface->eng_access_buff_desc[i].buffer.wptr =
+ iface->eng_access_buff_desc[i].buffer.rptr = phy_addr;
+ iface->eng_access_buff_desc[i].buffer.end =
+ iface->eng_access_buff_desc[i].buffer.start + ENG_SIZE;
+ offset += ENG_SIZE;
+ phy_addr = base_phy_addr + offset;
+ }
+
+ for (i = 0; i < VID_API_NUM_STREAMS; i++) {
+ iface->encrypt_info[i] = phy_addr;
+ offset += sizeof(struct vpu_malone_encrypt_info);
+ phy_addr = base_phy_addr + offset;
+ }
+
+ rpc->bytesused = offset;
+}
+
+void vpu_malone_set_log_buf(struct vpu_shared_addr *shared,
+ struct vpu_buffer *log)
+{
+ struct malone_iface *iface = shared->iface;
+
+ iface->debug_buffer_desc.buffer.start =
+ iface->debug_buffer_desc.buffer.wptr =
+ iface->debug_buffer_desc.buffer.rptr = log->phys - shared->boot_addr;
+ iface->debug_buffer_desc.buffer.end = iface->debug_buffer_desc.buffer.start + log->length;
+}
+
+static u32 get_str_buffer_offset(u32 instance)
+{
+ return DEC_MFD_XREG_SLV_BASE + MFD_MCX + MFD_MCX_OFF * instance;
+}
+
+void vpu_malone_set_system_cfg(struct vpu_shared_addr *shared,
+ u32 regs_base, void __iomem *regs, u32 core_id)
+{
+ struct malone_iface *iface = shared->iface;
+ struct vpu_rpc_system_config *config = &iface->system_cfg;
+ struct vpu_dec_ctrl *hc = shared->priv;
+ int i;
+
+ vpu_imx8q_set_system_cfg_common(config, regs_base, core_id);
+ for (i = 0; i < VID_API_NUM_STREAMS; i++) {
+ u32 offset = get_str_buffer_offset(i);
+
+ hc->buf_addr[i] = regs_base + offset;
+ hc->str_buf[i] = regs + offset;
+ }
+}
+
+u32 vpu_malone_get_version(struct vpu_shared_addr *shared)
+{
+ struct malone_iface *iface = shared->iface;
+
+ return iface->fw_version;
+}
+
+int vpu_malone_get_stream_buffer_size(struct vpu_shared_addr *shared)
+{
+ return 0xc00000;
+}
+
+int vpu_malone_config_stream_buffer(struct vpu_shared_addr *shared,
+ u32 instance,
+ struct vpu_buffer *buf)
+{
+ struct malone_iface *iface = shared->iface;
+ struct vpu_dec_ctrl *hc = shared->priv;
+ struct vpu_malone_str_buffer __iomem *str_buf = hc->str_buf[instance];
+
+ writel(buf->phys, &str_buf->start);
+ writel(buf->phys, &str_buf->rptr);
+ writel(buf->phys, &str_buf->wptr);
+ writel(buf->phys + buf->length, &str_buf->end);
+ writel(0x1, &str_buf->lwm);
+
+ iface->stream_buffer_desc[instance][0] = hc->buf_addr[instance];
+
+ return 0;
+}
+
+int vpu_malone_get_stream_buffer_desc(struct vpu_shared_addr *shared,
+ u32 instance,
+ struct vpu_rpc_buffer_desc *desc)
+{
+ struct vpu_dec_ctrl *hc = shared->priv;
+ struct vpu_malone_str_buffer __iomem *str_buf = hc->str_buf[instance];
+
+ if (desc) {
+ desc->wptr = readl(&str_buf->wptr);
+ desc->rptr = readl(&str_buf->rptr);
+ desc->start = readl(&str_buf->start);
+ desc->end = readl(&str_buf->end);
+ }
+
+ return 0;
+}
+
+static void vpu_malone_update_wptr(struct vpu_malone_str_buffer __iomem *str_buf, u32 wptr)
+{
+ /*update wptr after data is written*/
+ mb();
+ writel(wptr, &str_buf->wptr);
+}
+
+static void vpu_malone_update_rptr(struct vpu_malone_str_buffer __iomem *str_buf, u32 rptr)
+{
+ /*update rptr after data is read*/
+ mb();
+ writel(rptr, &str_buf->rptr);
+}
+
+int vpu_malone_update_stream_buffer(struct vpu_shared_addr *shared,
+ u32 instance, u32 ptr, bool write)
+{
+ struct vpu_dec_ctrl *hc = shared->priv;
+ struct vpu_malone_str_buffer __iomem *str_buf = hc->str_buf[instance];
+
+ if (write)
+ vpu_malone_update_wptr(str_buf, ptr);
+ else
+ vpu_malone_update_rptr(str_buf, ptr);
+
+ return 0;
+}
+
+static struct malone_fmt_mapping fmt_mappings[] = {
+ {V4L2_PIX_FMT_H264, MALONE_FMT_AVC},
+ {V4L2_PIX_FMT_H264_MVC, MALONE_FMT_AVC},
+ {V4L2_PIX_FMT_HEVC, MALONE_FMT_HEVC},
+ {V4L2_PIX_FMT_VC1_ANNEX_G, MALONE_FMT_VC1},
+ {V4L2_PIX_FMT_VC1_ANNEX_L, MALONE_FMT_VC1},
+ {V4L2_PIX_FMT_MPEG2, MALONE_FMT_MP2},
+ {V4L2_PIX_FMT_MPEG4, MALONE_FMT_ASP},
+ {V4L2_PIX_FMT_XVID, MALONE_FMT_ASP},
+ {V4L2_PIX_FMT_H263, MALONE_FMT_ASP},
+ {V4L2_PIX_FMT_JPEG, MALONE_FMT_JPG},
+ {V4L2_PIX_FMT_VP8, MALONE_FMT_VP8},
+};
+
+static enum vpu_malone_format vpu_malone_format_remap(u32 pixelformat)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(fmt_mappings); i++) {
+ if (pixelformat == fmt_mappings[i].pixelformat)
+ return fmt_mappings[i].malone_format;
+ }
+
+ return MALONE_FMT_NULL;
+}
+
+static void vpu_malone_set_stream_cfg(struct vpu_shared_addr *shared,
+ u32 instance,
+ enum vpu_malone_format malone_format)
+{
+ struct malone_iface *iface = shared->iface;
+ u32 *curr_str_cfg = &iface->stream_config[instance];
+
+ *curr_str_cfg = 0;
+ STREAM_CONFIG_FORMAT_SET(malone_format, curr_str_cfg);
+ STREAM_CONFIG_STRBUFIDX_SET(0, curr_str_cfg);
+ STREAM_CONFIG_NOSEQ_SET(0, curr_str_cfg);
+ STREAM_CONFIG_DEBLOCK_SET(0, curr_str_cfg);
+ STREAM_CONFIG_DERING_SET(0, curr_str_cfg);
+ STREAM_CONFIG_PLAY_MODE_SET(0x3, curr_str_cfg);
+ STREAM_CONFIG_FS_CTRL_MODE_SET(0x1, curr_str_cfg);
+ STREAM_CONFIG_ENABLE_DCP_SET(1, curr_str_cfg);
+ STREAM_CONFIG_NUM_STR_BUF_SET(1, curr_str_cfg);
+ STREAM_CONFIG_MALONE_USAGE_SET(1, curr_str_cfg);
+ STREAM_CONFIG_MULTI_VID_SET(0, curr_str_cfg);
+ STREAM_CONFIG_OBFUSC_EN_SET(0, curr_str_cfg);
+ STREAM_CONFIG_RC4_EN_SET(0, curr_str_cfg);
+ STREAM_CONFIG_MCX_SET(1, curr_str_cfg);
+ STREAM_CONFIG_PES_SET(0, curr_str_cfg);
+ STREAM_CONFIG_NUM_DBE_SET(1, curr_str_cfg);
+}
+
+static int vpu_malone_set_params(struct vpu_shared_addr *shared,
+ u32 instance,
+ struct vpu_decode_params *params)
+{
+ struct malone_iface *iface = shared->iface;
+ struct vpu_dec_ctrl *hc = shared->priv;
+ enum vpu_malone_format malone_format;
+
+ malone_format = vpu_malone_format_remap(params->codec_format);
+ iface->udata_buffer[instance].base = params->udata.base;
+ iface->udata_buffer[instance].slot_size = params->udata.size;
+
+ vpu_malone_set_stream_cfg(shared, instance, malone_format);
+
+ if (malone_format == MALONE_FMT_JPG) {
+ //1:JPGD_MJPEG_MODE_A; 2:JPGD_MJPEG_MODE_B
+ hc->jpg[instance].jpg_mjpeg_mode = 1;
+ //0: JPGD_MJPEG_PROGRESSIVE
+ hc->jpg[instance].jpg_mjpeg_interlaced = 0;
+ }
+
+ hc->codec_param[instance].disp_imm = params->b_dis_reorder ? 1 : 0;
+ hc->codec_param[instance].dbglog_enable = 0;
+ iface->dbglog_desc.level = 0;
+
+ if (params->b_non_frame)
+ iface->stream_buff_info[instance].stream_input_mode = NON_FRAME_LVL;
+ else
+ iface->stream_buff_info[instance].stream_input_mode = FRAME_LVL;
+ iface->stream_buff_info[instance].stream_buffer_threshold = 0;
+ iface->stream_buff_info[instance].stream_pic_input_count = 0;
+
+ return 0;
+}
+
+static bool vpu_malone_is_non_frame_mode(struct vpu_shared_addr *shared, u32 instance)
+{
+ struct malone_iface *iface = shared->iface;
+
+ if (iface->stream_buff_info[instance].stream_input_mode == NON_FRAME_LVL)
+ return true;
+
+ return false;
+}
+
+static int vpu_malone_update_params(struct vpu_shared_addr *shared,
+ u32 instance,
+ struct vpu_decode_params *params)
+{
+ struct malone_iface *iface = shared->iface;
+
+ if (params->end_flag)
+ iface->stream_buff_info[instance].stream_pic_end_flag = params->end_flag;
+ params->end_flag = 0;
+
+ return 0;
+}
+
+int vpu_malone_set_decode_params(struct vpu_shared_addr *shared,
+ u32 instance,
+ struct vpu_decode_params *params,
+ u32 update)
+{
+ if (!params)
+ return -EINVAL;
+
+ if (!update)
+ return vpu_malone_set_params(shared, instance, params);
+ else
+ return vpu_malone_update_params(shared, instance, params);
+}
+
+static struct vpu_pair malone_cmds[] = {
+ {VPU_CMD_ID_START, VID_API_CMD_START},
+ {VPU_CMD_ID_STOP, VID_API_CMD_STOP},
+ {VPU_CMD_ID_ABORT, VID_API_CMD_ABORT},
+ {VPU_CMD_ID_RST_BUF, VID_API_CMD_RST_BUF},
+ {VPU_CMD_ID_SNAPSHOT, VID_API_CMD_SNAPSHOT},
+ {VPU_CMD_ID_FIRM_RESET, VID_API_CMD_FIRM_RESET},
+ {VPU_CMD_ID_FS_ALLOC, VID_API_CMD_FS_ALLOC},
+ {VPU_CMD_ID_FS_RELEASE, VID_API_CMD_FS_RELEASE},
+ {VPU_CMD_ID_TIMESTAMP, VID_API_CMD_TS},
+ {VPU_CMD_ID_DEBUG, VID_API_CMD_FW_STATUS},
+};
+
+static struct vpu_pair malone_msgs[] = {
+ {VPU_MSG_ID_RESET_DONE, VID_API_EVENT_RESET_DONE},
+ {VPU_MSG_ID_START_DONE, VID_API_EVENT_START_DONE},
+ {VPU_MSG_ID_STOP_DONE, VID_API_EVENT_STOPPED},
+ {VPU_MSG_ID_ABORT_DONE, VID_API_EVENT_ABORT_DONE},
+ {VPU_MSG_ID_BUF_RST, VID_API_EVENT_STR_BUF_RST},
+ {VPU_MSG_ID_PIC_EOS, VID_API_EVENT_FINISHED},
+ {VPU_MSG_ID_SEQ_HDR_FOUND, VID_API_EVENT_SEQ_HDR_FOUND},
+ {VPU_MSG_ID_RES_CHANGE, VID_API_EVENT_RES_CHANGE},
+ {VPU_MSG_ID_PIC_HDR_FOUND, VID_API_EVENT_PIC_HDR_FOUND},
+ {VPU_MSG_ID_PIC_DECODED, VID_API_EVENT_PIC_DECODED},
+ {VPU_MSG_ID_DEC_DONE, VID_API_EVENT_FRAME_BUFF_RDY},
+ {VPU_MSG_ID_FRAME_REQ, VID_API_EVENT_REQ_FRAME_BUFF},
+ {VPU_MSG_ID_FRAME_RELEASE, VID_API_EVENT_REL_FRAME_BUFF},
+ {VPU_MSG_ID_FIFO_LOW, VID_API_EVENT_FIFO_LOW},
+ {VPU_MSG_ID_BS_ERROR, VID_API_EVENT_BS_ERROR},
+ {VPU_MSG_ID_UNSUPPORTED, VID_API_EVENT_UNSUPPORTED_STREAM},
+ {VPU_MSG_ID_FIRMWARE_XCPT, VID_API_EVENT_FIRMWARE_XCPT},
+};
+
+static void vpu_malone_pack_fs_alloc(struct vpu_rpc_event *pkt,
+ struct vpu_fs_info *fs)
+{
+ const u32 fs_type[] = {
+ [MEM_RES_FRAME] = 0,
+ [MEM_RES_MBI] = 1,
+ [MEM_RES_DCP] = 2,
+ };
+
+ pkt->hdr.num = 7;
+ pkt->data[0] = fs->id | (fs->tag << 24);
+ pkt->data[1] = fs->luma_addr;
+ if (fs->type == MEM_RES_FRAME) {
+ /*
+ * if luma_addr equal to chroma_addr,
+ * means luma(plane[0]) and chromau(plane[1]) used the
+ * same fd -- usage of NXP codec2. Need to manually
+ * offset chroma addr.
+ */
+ if (fs->luma_addr == fs->chroma_addr)
+ fs->chroma_addr = fs->luma_addr + fs->luma_size;
+ pkt->data[2] = fs->luma_addr + fs->luma_size / 2;
+ pkt->data[3] = fs->chroma_addr;
+ pkt->data[4] = fs->chroma_addr + fs->chromau_size / 2;
+ pkt->data[5] = fs->bytesperline;
+ } else {
+ pkt->data[2] = fs->luma_size;
+ pkt->data[3] = 0;
+ pkt->data[4] = 0;
+ pkt->data[5] = 0;
+ }
+ pkt->data[6] = fs_type[fs->type];
+}
+
+static void vpu_malone_pack_fs_release(struct vpu_rpc_event *pkt,
+ struct vpu_fs_info *fs)
+{
+ pkt->hdr.num = 1;
+ pkt->data[0] = fs->id | (fs->tag << 24);
+}
+
+static void vpu_malone_pack_timestamp(struct vpu_rpc_event *pkt,
+ struct vpu_ts_info *info)
+{
+ struct timespec64 ts = ns_to_timespec64(info->timestamp);
+
+ pkt->hdr.num = 3;
+
+ pkt->data[0] = ts.tv_sec;
+ pkt->data[1] = ts.tv_nsec;
+ pkt->data[2] = info->size;
+}
+
+int vpu_malone_pack_cmd(struct vpu_rpc_event *pkt, u32 index, u32 id, void *data)
+{
+ int ret;
+
+ ret = vpu_find_dst_by_src(malone_cmds, ARRAY_SIZE(malone_cmds), id);
+ if (ret < 0)
+ return ret;
+
+ pkt->hdr.id = ret;
+ pkt->hdr.num = 0;
+ pkt->hdr.index = index;
+
+ switch (id) {
+ case VPU_CMD_ID_FS_ALLOC:
+ vpu_malone_pack_fs_alloc(pkt, data);
+ break;
+ case VPU_CMD_ID_FS_RELEASE:
+ vpu_malone_pack_fs_release(pkt, data);
+ break;
+ case VPU_CMD_ID_TIMESTAMP:
+ vpu_malone_pack_timestamp(pkt, data);
+ break;
+ }
+
+ pkt->hdr.index = index;
+ return 0;
+}
+
+int vpu_malone_convert_msg_id(u32 id)
+{
+ return vpu_find_src_by_dst(malone_msgs, ARRAY_SIZE(malone_msgs), id);
+}
+
+static void vpu_malone_fill_planes(struct vpu_dec_codec_info *info)
+{
+ u32 interlaced = info->progressive ? 0 : 1;
+
+ info->bytesperline[0] = 0;
+ info->sizeimage[0] = vpu_helper_get_plane_size(info->pixfmt,
+ info->decoded_width,
+ info->decoded_height,
+ 0,
+ info->stride,
+ interlaced,
+ &info->bytesperline[0]);
+ info->bytesperline[1] = 0;
+ info->sizeimage[1] = vpu_helper_get_plane_size(info->pixfmt,
+ info->decoded_width,
+ info->decoded_height,
+ 1,
+ info->stride,
+ interlaced,
+ &info->bytesperline[1]);
+}
+
+static void vpu_malone_init_seq_hdr(struct vpu_dec_codec_info *info)
+{
+ u32 chunks = info->num_dfe_area >> MALONE_DCP_CHUNK_BIT;
+
+ vpu_malone_fill_planes(info);
+
+ info->mbi_size = (info->sizeimage[0] + info->sizeimage[1]) >> 2;
+ info->mbi_size = ALIGN(info->mbi_size, MALONE_ALIGN_MBI);
+
+ info->dcp_size = MALONE_DCP_SIZE_MAX;
+ if (chunks) {
+ u32 mb_num;
+ u32 mb_w;
+ u32 mb_h;
+
+ mb_w = DIV_ROUND_UP(info->decoded_width, 16);
+ mb_h = DIV_ROUND_UP(info->decoded_height, 16);
+ mb_num = mb_w * mb_h;
+ info->dcp_size = mb_num * MALONE_DCP_FIXED_MB_ALLOC * chunks;
+ info->dcp_size = clamp_t(u32, info->dcp_size,
+ MALONE_DCP_SIZE_MIN, MALONE_DCP_SIZE_MAX);
+ }
+}
+
+static void vpu_malone_unpack_seq_hdr(struct vpu_rpc_event *pkt,
+ struct vpu_dec_codec_info *info)
+{
+ info->num_ref_frms = pkt->data[0];
+ info->num_dpb_frms = pkt->data[1];
+ info->num_dfe_area = pkt->data[2];
+ info->progressive = pkt->data[3];
+ info->width = pkt->data[5];
+ info->height = pkt->data[4];
+ info->decoded_width = pkt->data[12];
+ info->decoded_height = pkt->data[11];
+ info->frame_rate.numerator = 1000;
+ info->frame_rate.denominator = pkt->data[8];
+ info->dsp_asp_ratio = pkt->data[9];
+ info->level_idc = pkt->data[10];
+ info->bit_depth_luma = pkt->data[13];
+ info->bit_depth_chroma = pkt->data[14];
+ info->chroma_fmt = pkt->data[15];
+ info->color_primaries = vpu_color_cvrt_primaries_i2v(pkt->data[16]);
+ info->transfer_chars = vpu_color_cvrt_transfers_i2v(pkt->data[17]);
+ info->matrix_coeffs = vpu_color_cvrt_matrix_i2v(pkt->data[18]);
+ info->full_range = vpu_color_cvrt_full_range_i2v(pkt->data[19]);
+ info->vui_present = pkt->data[20];
+ info->mvc_num_views = pkt->data[21];
+ info->offset_x = pkt->data[23];
+ info->offset_y = pkt->data[25];
+ info->tag = pkt->data[27];
+ if (info->bit_depth_luma > 8)
+ info->pixfmt = V4L2_PIX_FMT_NV12M_10BE_8L128;
+ else
+ info->pixfmt = V4L2_PIX_FMT_NV12M_8L128;
+ if (info->frame_rate.numerator && info->frame_rate.denominator) {
+ unsigned long n, d;
+
+ rational_best_approximation(info->frame_rate.numerator,
+ info->frame_rate.denominator,
+ info->frame_rate.numerator,
+ info->frame_rate.denominator,
+ &n, &d);
+ info->frame_rate.numerator = n;
+ info->frame_rate.denominator = d;
+ }
+ vpu_malone_init_seq_hdr(info);
+}
+
+static void vpu_malone_unpack_pic_info(struct vpu_rpc_event *pkt,
+ struct vpu_dec_pic_info *info)
+{
+ info->id = pkt->data[7];
+ info->luma = pkt->data[0];
+ info->start = pkt->data[10];
+ info->end = pkt->data[12];
+ info->pic_size = pkt->data[11];
+ info->stride = pkt->data[5];
+ info->consumed_count = pkt->data[13];
+ if (info->id == MALONE_SKIPPED_FRAME_ID)
+ info->skipped = 1;
+ else
+ info->skipped = 0;
+}
+
+static void vpu_malone_unpack_req_frame(struct vpu_rpc_event *pkt,
+ struct vpu_fs_info *info)
+{
+ info->type = pkt->data[1];
+}
+
+static void vpu_malone_unpack_rel_frame(struct vpu_rpc_event *pkt,
+ struct vpu_fs_info *info)
+{
+ info->id = pkt->data[0];
+ info->type = pkt->data[1];
+ info->not_displayed = pkt->data[2];
+}
+
+static void vpu_malone_unpack_buff_rdy(struct vpu_rpc_event *pkt,
+ struct vpu_dec_pic_info *info)
+{
+ struct timespec64 ts = { pkt->data[9], pkt->data[10] };
+
+ info->id = pkt->data[0];
+ info->luma = pkt->data[1];
+ info->stride = pkt->data[3];
+ if (info->id == MALONE_SKIPPED_FRAME_ID)
+ info->skipped = 1;
+ else
+ info->skipped = 0;
+
+ info->timestamp = timespec64_to_ns(&ts);
+}
+
+int vpu_malone_unpack_msg_data(struct vpu_rpc_event *pkt, void *data)
+{
+ if (!pkt || !data)
+ return -EINVAL;
+
+ switch (pkt->hdr.id) {
+ case VID_API_EVENT_SEQ_HDR_FOUND:
+ vpu_malone_unpack_seq_hdr(pkt, data);
+ break;
+ case VID_API_EVENT_PIC_DECODED:
+ vpu_malone_unpack_pic_info(pkt, data);
+ break;
+ case VID_API_EVENT_REQ_FRAME_BUFF:
+ vpu_malone_unpack_req_frame(pkt, data);
+ break;
+ case VID_API_EVENT_REL_FRAME_BUFF:
+ vpu_malone_unpack_rel_frame(pkt, data);
+ break;
+ case VID_API_EVENT_FRAME_BUFF_RDY:
+ vpu_malone_unpack_buff_rdy(pkt, data);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct malone_padding_scode padding_scodes[] = {
+ {SCODE_PADDING_EOS, V4L2_PIX_FMT_H264, {0x0B010000, 0}},
+ {SCODE_PADDING_EOS, V4L2_PIX_FMT_H264_MVC, {0x0B010000, 0}},
+ {SCODE_PADDING_EOS, V4L2_PIX_FMT_HEVC, {0x4A010000, 0x20}},
+ {SCODE_PADDING_EOS, V4L2_PIX_FMT_VC1_ANNEX_G, {0x0a010000, 0x0}},
+ {SCODE_PADDING_EOS, V4L2_PIX_FMT_VC1_ANNEX_L, {0x0a010000, 0x0}},
+ {SCODE_PADDING_EOS, V4L2_PIX_FMT_MPEG2, {0xCC010000, 0x0}},
+ {SCODE_PADDING_EOS, V4L2_PIX_FMT_MPEG4, {0xb1010000, 0x0}},
+ {SCODE_PADDING_EOS, V4L2_PIX_FMT_XVID, {0xb1010000, 0x0}},
+ {SCODE_PADDING_EOS, V4L2_PIX_FMT_H263, {0xb1010000, 0x0}},
+ {SCODE_PADDING_EOS, V4L2_PIX_FMT_VP8, {0x34010000, 0x0}},
+ {SCODE_PADDING_EOS, V4L2_PIX_FMT_JPEG, {0xefff0000, 0x0}},
+ {SCODE_PADDING_ABORT, V4L2_PIX_FMT_H264, {0x0B010000, 0}},
+ {SCODE_PADDING_ABORT, V4L2_PIX_FMT_H264_MVC, {0x0B010000, 0}},
+ {SCODE_PADDING_ABORT, V4L2_PIX_FMT_HEVC, {0x4A010000, 0x20}},
+ {SCODE_PADDING_ABORT, V4L2_PIX_FMT_VC1_ANNEX_G, {0x0a010000, 0x0}},
+ {SCODE_PADDING_ABORT, V4L2_PIX_FMT_VC1_ANNEX_L, {0x0a010000, 0x0}},
+ {SCODE_PADDING_ABORT, V4L2_PIX_FMT_MPEG2, {0xb7010000, 0x0}},
+ {SCODE_PADDING_ABORT, V4L2_PIX_FMT_MPEG4, {0xb1010000, 0x0}},
+ {SCODE_PADDING_ABORT, V4L2_PIX_FMT_XVID, {0xb1010000, 0x0}},
+ {SCODE_PADDING_ABORT, V4L2_PIX_FMT_H263, {0xb1010000, 0x0}},
+ {SCODE_PADDING_ABORT, V4L2_PIX_FMT_VP8, {0x34010000, 0x0}},
+ {SCODE_PADDING_EOS, V4L2_PIX_FMT_JPEG, {0x0, 0x0}},
+ {SCODE_PADDING_BUFFLUSH, V4L2_PIX_FMT_H264, {0x15010000, 0x0}},
+ {SCODE_PADDING_BUFFLUSH, V4L2_PIX_FMT_H264_MVC, {0x15010000, 0x0}},
+};
+
+static const struct malone_padding_scode padding_scode_dft = {0x0, 0x0};
+
+static const struct malone_padding_scode *get_padding_scode(u32 type, u32 fmt)
+{
+ const struct malone_padding_scode *s;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(padding_scodes); i++) {
+ s = &padding_scodes[i];
+
+ if (s->scode_type == type && s->pixelformat == fmt)
+ return s;
+ }
+
+ if (type != SCODE_PADDING_BUFFLUSH)
+ return &padding_scode_dft;
+
+ return NULL;
+}
+
+static int vpu_malone_add_padding_scode(struct vpu_buffer *stream_buffer,
+ struct vpu_malone_str_buffer __iomem *str_buf,
+ u32 pixelformat, u32 scode_type)
+{
+ u32 wptr;
+ int size;
+ int total_size = 0;
+ const struct malone_padding_scode *ps;
+ const u32 padding_size = 4096;
+ int ret;
+
+ ps = get_padding_scode(scode_type, pixelformat);
+ if (!ps)
+ return -EINVAL;
+
+ wptr = readl(&str_buf->wptr);
+ if (wptr < stream_buffer->phys || wptr > stream_buffer->phys + stream_buffer->length)
+ return -EINVAL;
+ if (wptr == stream_buffer->phys + stream_buffer->length)
+ wptr = stream_buffer->phys;
+ size = ALIGN(wptr, 4) - wptr;
+ if (size)
+ vpu_helper_memset_stream_buffer(stream_buffer, &wptr, 0, size);
+ total_size += size;
+
+ size = sizeof(ps->data);
+ ret = vpu_helper_copy_to_stream_buffer(stream_buffer, &wptr, size, (void *)ps->data);
+ if (ret < 0)
+ return -EINVAL;
+ total_size += size;
+
+ size = padding_size - sizeof(ps->data);
+ vpu_helper_memset_stream_buffer(stream_buffer, &wptr, 0, size);
+ total_size += size;
+
+ vpu_malone_update_wptr(str_buf, wptr);
+ return total_size;
+}
+
+int vpu_malone_add_scode(struct vpu_shared_addr *shared,
+ u32 instance,
+ struct vpu_buffer *stream_buffer,
+ u32 pixelformat,
+ u32 scode_type)
+{
+ struct vpu_dec_ctrl *hc = shared->priv;
+ struct vpu_malone_str_buffer __iomem *str_buf = hc->str_buf[instance];
+ int ret = -EINVAL;
+
+ switch (scode_type) {
+ case SCODE_PADDING_EOS:
+ case SCODE_PADDING_ABORT:
+ case SCODE_PADDING_BUFFLUSH:
+ ret = vpu_malone_add_padding_scode(stream_buffer, str_buf, pixelformat, scode_type);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+#define MALONE_PAYLOAD_HEADER_SIZE 16
+#define MALONE_CODEC_VERSION_ID 0x1
+#define MALONE_CODEC_ID_VC1_SIMPLE 0x10
+#define MALONE_CODEC_ID_VC1_MAIN 0x11
+#define MALONE_CODEC_ID_ARV8 0x28
+#define MALONE_CODEC_ID_ARV9 0x29
+#define MALONE_CODEC_ID_VP6 0x36
+#define MALONE_CODEC_ID_VP8 0x36
+#define MALONE_CODEC_ID_DIVX3 0x38
+#define MALONE_CODEC_ID_SPK 0x39
+
+#define MALONE_VP8_IVF_SEQ_HEADER_LEN 32
+#define MALONE_VP8_IVF_FRAME_HEADER_LEN 8
+
+#define MALONE_VC1_RCV_CODEC_V1_VERSION 0x85
+#define MALONE_VC1_RCV_CODEC_V2_VERSION 0xC5
+#define MALONE_VC1_RCV_NUM_FRAMES 0xFF
+#define MALONE_VC1_RCV_SEQ_EXT_DATA_SIZE 4
+#define MALONE_VC1_RCV_SEQ_HEADER_LEN 20
+#define MALONE_VC1_RCV_PIC_HEADER_LEN 4
+#define MALONE_VC1_NAL_HEADER_LEN 4
+#define MALONE_VC1_CONTAIN_NAL(data) (((data) & 0x00FFFFFF) == 0x00010000)
+
+static void set_payload_hdr(u8 *dst, u32 scd_type, u32 codec_id,
+ u32 buffer_size, u32 width, u32 height)
+{
+ unsigned int payload_size;
+ /* payload_size = buffer_size + itself_size(16) - start_code(4) */
+ payload_size = buffer_size + 12;
+
+ dst[0] = 0x00;
+ dst[1] = 0x00;
+ dst[2] = 0x01;
+ dst[3] = scd_type;
+
+ /* length */
+ dst[4] = ((payload_size >> 16) & 0xff);
+ dst[5] = ((payload_size >> 8) & 0xff);
+ dst[6] = 0x4e;
+ dst[7] = ((payload_size >> 0) & 0xff);
+
+ /* Codec ID and Version */
+ dst[8] = codec_id;
+ dst[9] = MALONE_CODEC_VERSION_ID;
+
+ /* width */
+ dst[10] = ((width >> 8) & 0xff);
+ dst[11] = ((width >> 0) & 0xff);
+ dst[12] = 0x58;
+
+ /* height */
+ dst[13] = ((height >> 8) & 0xff);
+ dst[14] = ((height >> 0) & 0xff);
+ dst[15] = 0x50;
+}
+
+static void set_vp8_ivf_seqhdr(u8 *dst, u32 width, u32 height)
+{
+ /* 0-3byte signature "DKIF" */
+ dst[0] = 0x44;
+ dst[1] = 0x4b;
+ dst[2] = 0x49;
+ dst[3] = 0x46;
+ /* 4-5byte version: should be 0*/
+ dst[4] = 0x00;
+ dst[5] = 0x00;
+ /* 6-7 length of Header */
+ dst[6] = MALONE_VP8_IVF_SEQ_HEADER_LEN;
+ dst[7] = MALONE_VP8_IVF_SEQ_HEADER_LEN >> 8;
+ /* 8-11 VP8 fourcc */
+ dst[8] = 0x56;
+ dst[9] = 0x50;
+ dst[10] = 0x38;
+ dst[11] = 0x30;
+ /* 12-13 width in pixels */
+ dst[12] = width;
+ dst[13] = width >> 8;
+ /* 14-15 height in pixels */
+ dst[14] = height;
+ dst[15] = height >> 8;
+ /* 16-19 frame rate */
+ dst[16] = 0xe8;
+ dst[17] = 0x03;
+ dst[18] = 0x00;
+ dst[19] = 0x00;
+ /* 20-23 time scale */
+ dst[20] = 0x01;
+ dst[21] = 0x00;
+ dst[22] = 0x00;
+ dst[23] = 0x00;
+ /* 24-27 number frames */
+ dst[24] = 0xdf;
+ dst[25] = 0xf9;
+ dst[26] = 0x09;
+ dst[27] = 0x00;
+ /* 28-31 reserved */
+}
+
+static void set_vp8_ivf_pichdr(u8 *dst, u32 frame_size)
+{
+ /*
+ * firmware just parse 64-bit timestamp(8 bytes).
+ * As not transfer timestamp to firmware, use default value(ZERO).
+ * No need to do anything here
+ */
+}
+
+static void set_vc1_rcv_seqhdr(u8 *dst, u8 *src, u32 width, u32 height)
+{
+ u32 frames = MALONE_VC1_RCV_NUM_FRAMES;
+ u32 ext_data_size = MALONE_VC1_RCV_SEQ_EXT_DATA_SIZE;
+
+ /* 0-2 Number of frames, used default value 0xFF */
+ dst[0] = frames;
+ dst[1] = frames >> 8;
+ dst[2] = frames >> 16;
+
+ /* 3 RCV version, used V1 */
+ dst[3] = MALONE_VC1_RCV_CODEC_V1_VERSION;
+
+ /* 4-7 extension data size */
+ dst[4] = ext_data_size;
+ dst[5] = ext_data_size >> 8;
+ dst[6] = ext_data_size >> 16;
+ dst[7] = ext_data_size >> 24;
+ /* 8-11 extension data */
+ dst[8] = src[0];
+ dst[9] = src[1];
+ dst[10] = src[2];
+ dst[11] = src[3];
+
+ /* height */
+ dst[12] = height;
+ dst[13] = (height >> 8) & 0xff;
+ dst[14] = (height >> 16) & 0xff;
+ dst[15] = (height >> 24) & 0xff;
+ /* width */
+ dst[16] = width;
+ dst[17] = (width >> 8) & 0xff;
+ dst[18] = (width >> 16) & 0xff;
+ dst[19] = (width >> 24) & 0xff;
+}
+
+static void set_vc1_rcv_pichdr(u8 *dst, u32 buffer_size)
+{
+ dst[0] = buffer_size;
+ dst[1] = buffer_size >> 8;
+ dst[2] = buffer_size >> 16;
+ dst[3] = buffer_size >> 24;
+}
+
+static void create_vc1_nal_pichdr(u8 *dst)
+{
+ /* need insert nal header: special ID */
+ dst[0] = 0x0;
+ dst[1] = 0x0;
+ dst[2] = 0x01;
+ dst[3] = 0x0D;
+}
+
+static int vpu_malone_insert_scode_seq(struct malone_scode_t *scode, u32 codec_id, u32 ext_size)
+{
+ u8 hdr[MALONE_PAYLOAD_HEADER_SIZE];
+ int ret;
+
+ set_payload_hdr(hdr,
+ SCODE_SEQUENCE,
+ codec_id,
+ ext_size,
+ scode->inst->out_format.width,
+ scode->inst->out_format.height);
+ ret = vpu_helper_copy_to_stream_buffer(&scode->inst->stream_buffer,
+ &scode->wptr,
+ sizeof(hdr),
+ hdr);
+ if (ret < 0)
+ return ret;
+ return sizeof(hdr);
+}
+
+static int vpu_malone_insert_scode_pic(struct malone_scode_t *scode, u32 codec_id, u32 ext_size)
+{
+ u8 hdr[MALONE_PAYLOAD_HEADER_SIZE];
+ int ret;
+
+ set_payload_hdr(hdr,
+ SCODE_PICTURE,
+ codec_id,
+ ext_size + vb2_get_plane_payload(scode->vb, 0),
+ scode->inst->out_format.width,
+ scode->inst->out_format.height);
+ ret = vpu_helper_copy_to_stream_buffer(&scode->inst->stream_buffer,
+ &scode->wptr,
+ sizeof(hdr),
+ hdr);
+ if (ret < 0)
+ return ret;
+ return sizeof(hdr);
+}
+
+static int vpu_malone_insert_scode_vc1_g_pic(struct malone_scode_t *scode)
+{
+ struct vb2_v4l2_buffer *vbuf;
+ u8 nal_hdr[MALONE_VC1_NAL_HEADER_LEN];
+ u32 *data = NULL;
+ int ret;
+
+ vbuf = to_vb2_v4l2_buffer(scode->vb);
+ data = vb2_plane_vaddr(scode->vb, 0);
+
+ if (vbuf->sequence == 0 || vpu_vb_is_codecconfig(vbuf))
+ return 0;
+ if (MALONE_VC1_CONTAIN_NAL(*data))
+ return 0;
+
+ create_vc1_nal_pichdr(nal_hdr);
+ ret = vpu_helper_copy_to_stream_buffer(&scode->inst->stream_buffer,
+ &scode->wptr,
+ sizeof(nal_hdr),
+ nal_hdr);
+ if (ret < 0)
+ return ret;
+ return sizeof(nal_hdr);
+}
+
+static int vpu_malone_insert_scode_vc1_l_seq(struct malone_scode_t *scode)
+{
+ int ret;
+ int size = 0;
+ u8 rcv_seqhdr[MALONE_VC1_RCV_SEQ_HEADER_LEN];
+
+ scode->need_data = 0;
+
+ ret = vpu_malone_insert_scode_seq(scode, MALONE_CODEC_ID_VC1_SIMPLE, sizeof(rcv_seqhdr));
+ if (ret < 0)
+ return ret;
+ size = ret;
+
+ set_vc1_rcv_seqhdr(rcv_seqhdr,
+ vb2_plane_vaddr(scode->vb, 0),
+ scode->inst->out_format.width,
+ scode->inst->out_format.height);
+ ret = vpu_helper_copy_to_stream_buffer(&scode->inst->stream_buffer,
+ &scode->wptr,
+ sizeof(rcv_seqhdr),
+ rcv_seqhdr);
+
+ if (ret < 0)
+ return ret;
+ size += sizeof(rcv_seqhdr);
+ return size;
+}
+
+static int vpu_malone_insert_scode_vc1_l_pic(struct malone_scode_t *scode)
+{
+ int ret;
+ int size = 0;
+ u8 rcv_pichdr[MALONE_VC1_RCV_PIC_HEADER_LEN];
+
+ ret = vpu_malone_insert_scode_pic(scode, MALONE_CODEC_ID_VC1_SIMPLE,
+ sizeof(rcv_pichdr));
+ if (ret < 0)
+ return ret;
+ size = ret;
+
+ set_vc1_rcv_pichdr(rcv_pichdr, vb2_get_plane_payload(scode->vb, 0));
+ ret = vpu_helper_copy_to_stream_buffer(&scode->inst->stream_buffer,
+ &scode->wptr,
+ sizeof(rcv_pichdr),
+ rcv_pichdr);
+ if (ret < 0)
+ return ret;
+ size += sizeof(rcv_pichdr);
+ return size;
+}
+
+static int vpu_malone_insert_scode_vp8_seq(struct malone_scode_t *scode)
+{
+ int ret;
+ int size = 0;
+ u8 ivf_hdr[MALONE_VP8_IVF_SEQ_HEADER_LEN];
+
+ ret = vpu_malone_insert_scode_seq(scode, MALONE_CODEC_ID_VP8, sizeof(ivf_hdr));
+ if (ret < 0)
+ return ret;
+ size = ret;
+
+ set_vp8_ivf_seqhdr(ivf_hdr,
+ scode->inst->out_format.width,
+ scode->inst->out_format.height);
+ ret = vpu_helper_copy_to_stream_buffer(&scode->inst->stream_buffer,
+ &scode->wptr,
+ sizeof(ivf_hdr),
+ ivf_hdr);
+ if (ret < 0)
+ return ret;
+ size += sizeof(ivf_hdr);
+
+ return size;
+}
+
+static int vpu_malone_insert_scode_vp8_pic(struct malone_scode_t *scode)
+{
+ int ret;
+ int size = 0;
+ u8 ivf_hdr[MALONE_VP8_IVF_FRAME_HEADER_LEN] = {0};
+
+ ret = vpu_malone_insert_scode_pic(scode, MALONE_CODEC_ID_VP8, sizeof(ivf_hdr));
+ if (ret < 0)
+ return ret;
+ size = ret;
+
+ set_vp8_ivf_pichdr(ivf_hdr, vb2_get_plane_payload(scode->vb, 0));
+ ret = vpu_helper_copy_to_stream_buffer(&scode->inst->stream_buffer,
+ &scode->wptr,
+ sizeof(ivf_hdr),
+ ivf_hdr);
+ if (ret < 0)
+ return ret;
+ size += sizeof(ivf_hdr);
+
+ return size;
+}
+
+static const struct malone_scode_handler scode_handlers[] = {
+ {
+ /* fix me, need to swap return operation after gstreamer swap */
+ .pixelformat = V4L2_PIX_FMT_VC1_ANNEX_L,
+ .insert_scode_seq = vpu_malone_insert_scode_vc1_l_seq,
+ .insert_scode_pic = vpu_malone_insert_scode_vc1_l_pic,
+ },
+ {
+ .pixelformat = V4L2_PIX_FMT_VC1_ANNEX_G,
+ .insert_scode_pic = vpu_malone_insert_scode_vc1_g_pic,
+ },
+ {
+ .pixelformat = V4L2_PIX_FMT_VP8,
+ .insert_scode_seq = vpu_malone_insert_scode_vp8_seq,
+ .insert_scode_pic = vpu_malone_insert_scode_vp8_pic,
+ },
+};
+
+static const struct malone_scode_handler *get_scode_handler(u32 pixelformat)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(scode_handlers); i++) {
+ if (scode_handlers[i].pixelformat == pixelformat)
+ return &scode_handlers[i];
+ }
+
+ return NULL;
+}
+
+static int vpu_malone_insert_scode(struct malone_scode_t *scode, u32 type)
+{
+ const struct malone_scode_handler *handler;
+ int ret = 0;
+
+ if (!scode || !scode->inst || !scode->vb)
+ return 0;
+
+ scode->need_data = 1;
+ handler = get_scode_handler(scode->inst->out_format.pixfmt);
+ if (!handler)
+ return 0;
+
+ switch (type) {
+ case SCODE_SEQUENCE:
+ if (handler->insert_scode_seq)
+ ret = handler->insert_scode_seq(scode);
+ break;
+ case SCODE_PICTURE:
+ if (handler->insert_scode_pic)
+ ret = handler->insert_scode_pic(scode);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int vpu_malone_input_frame_data(struct vpu_malone_str_buffer __iomem *str_buf,
+ struct vpu_inst *inst, struct vb2_buffer *vb,
+ u32 disp_imm)
+{
+ struct malone_scode_t scode;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ u32 wptr = readl(&str_buf->wptr);
+ int size = 0;
+ int ret = 0;
+
+ /*add scode: SCODE_SEQUENCE, SCODE_PICTURE, SCODE_SLICE*/
+ scode.inst = inst;
+ scode.vb = vb;
+ scode.wptr = wptr;
+ scode.need_data = 1;
+ if (vbuf->sequence == 0 || vpu_vb_is_codecconfig(vbuf))
+ ret = vpu_malone_insert_scode(&scode, SCODE_SEQUENCE);
+
+ if (ret < 0)
+ return -ENOMEM;
+ size += ret;
+ wptr = scode.wptr;
+ if (!scode.need_data) {
+ vpu_malone_update_wptr(str_buf, wptr);
+ return size;
+ }
+
+ ret = vpu_malone_insert_scode(&scode, SCODE_PICTURE);
+ if (ret < 0)
+ return -ENOMEM;
+ size += ret;
+ wptr = scode.wptr;
+
+ ret = vpu_helper_copy_to_stream_buffer(&inst->stream_buffer,
+ &wptr,
+ vb2_get_plane_payload(vb, 0),
+ vb2_plane_vaddr(vb, 0));
+ if (ret < 0)
+ return -ENOMEM;
+ size += vb2_get_plane_payload(vb, 0);
+
+ vpu_malone_update_wptr(str_buf, wptr);
+
+ if (disp_imm && !vpu_vb_is_codecconfig(vbuf)) {
+ ret = vpu_malone_add_scode(inst->core->iface,
+ inst->id,
+ &inst->stream_buffer,
+ inst->out_format.pixfmt,
+ SCODE_PADDING_BUFFLUSH);
+ if (ret < 0)
+ return ret;
+ size += ret;
+ }
+
+ return size;
+}
+
+static int vpu_malone_input_stream_data(struct vpu_malone_str_buffer __iomem *str_buf,
+ struct vpu_inst *inst, struct vb2_buffer *vb)
+{
+ u32 wptr = readl(&str_buf->wptr);
+ int ret = 0;
+
+ ret = vpu_helper_copy_to_stream_buffer(&inst->stream_buffer,
+ &wptr,
+ vb2_get_plane_payload(vb, 0),
+ vb2_plane_vaddr(vb, 0));
+ if (ret < 0)
+ return -ENOMEM;
+
+ vpu_malone_update_wptr(str_buf, wptr);
+
+ return ret;
+}
+
+static int vpu_malone_input_ts(struct vpu_inst *inst, s64 timestamp, u32 size)
+{
+ struct vpu_ts_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.timestamp = timestamp;
+ info.size = size;
+
+ return vpu_session_fill_timestamp(inst, &info);
+}
+
+int vpu_malone_input_frame(struct vpu_shared_addr *shared,
+ struct vpu_inst *inst, struct vb2_buffer *vb)
+{
+ struct vpu_dec_ctrl *hc = shared->priv;
+ struct vb2_v4l2_buffer *vbuf;
+ struct vpu_malone_str_buffer __iomem *str_buf = hc->str_buf[inst->id];
+ u32 disp_imm = hc->codec_param[inst->id].disp_imm;
+ u32 size;
+ int ret;
+
+ if (vpu_malone_is_non_frame_mode(shared, inst->id))
+ ret = vpu_malone_input_stream_data(str_buf, inst, vb);
+ else
+ ret = vpu_malone_input_frame_data(str_buf, inst, vb, disp_imm);
+ if (ret < 0)
+ return ret;
+ size = ret;
+
+ /*
+ * if buffer only contain codec data, and the timestamp is invalid,
+ * don't put the invalid timestamp to resync
+ * merge the data to next frame
+ */
+ vbuf = to_vb2_v4l2_buffer(vb);
+ if (vpu_vb_is_codecconfig(vbuf) && (s64)vb->timestamp < 0) {
+ inst->extra_size += size;
+ return 0;
+ }
+ if (inst->extra_size) {
+ size += inst->extra_size;
+ inst->extra_size = 0;
+ }
+
+ ret = vpu_malone_input_ts(inst, vb->timestamp, size);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static bool vpu_malone_check_ready(struct vpu_shared_addr *shared, u32 instance)
+{
+ struct malone_iface *iface = shared->iface;
+ struct vpu_rpc_buffer_desc *desc = &iface->api_cmd_buffer_desc[instance];
+ u32 size = desc->end - desc->start;
+ u32 rptr = desc->rptr;
+ u32 wptr = desc->wptr;
+ u32 used;
+
+ if (!size)
+ return true;
+
+ used = (wptr + size - rptr) % size;
+ if (used < (size / 2))
+ return true;
+
+ return false;
+}
+
+bool vpu_malone_is_ready(struct vpu_shared_addr *shared, u32 instance)
+{
+ u32 cnt = 0;
+
+ while (!vpu_malone_check_ready(shared, instance)) {
+ if (cnt > 30)
+ return false;
+ mdelay(1);
+ cnt++;
+ }
+ return true;
+}
+
+int vpu_malone_pre_cmd(struct vpu_shared_addr *shared, u32 instance)
+{
+ if (!vpu_malone_is_ready(shared, instance))
+ return -EINVAL;
+
+ return 0;
+}
+
+int vpu_malone_post_cmd(struct vpu_shared_addr *shared, u32 instance)
+{
+ struct malone_iface *iface = shared->iface;
+ struct vpu_rpc_buffer_desc *desc = &iface->api_cmd_buffer_desc[instance];
+
+ desc->wptr++;
+ if (desc->wptr == desc->end)
+ desc->wptr = desc->start;
+
+ return 0;
+}
+
+int vpu_malone_init_instance(struct vpu_shared_addr *shared, u32 instance)
+{
+ struct malone_iface *iface = shared->iface;
+ struct vpu_rpc_buffer_desc *desc = &iface->api_cmd_buffer_desc[instance];
+
+ desc->wptr = desc->rptr;
+ if (desc->wptr == desc->end)
+ desc->wptr = desc->start;
+
+ return 0;
+}
+
+u32 vpu_malone_get_max_instance_count(struct vpu_shared_addr *shared)
+{
+ struct malone_iface *iface = shared->iface;
+
+ return iface->max_streams;
+}
diff --git a/drivers/media/platform/amphion/vpu_malone.h b/drivers/media/platform/amphion/vpu_malone.h
new file mode 100644
index 000000000000..e5a5cbe9843e
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_malone.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#ifndef _AMPHION_VPU_MALONE_H
+#define _AMPHION_VPU_MALONE_H
+
+u32 vpu_malone_get_data_size(void);
+void vpu_malone_init_rpc(struct vpu_shared_addr *shared,
+ struct vpu_buffer *rpc, dma_addr_t boot_addr);
+void vpu_malone_set_log_buf(struct vpu_shared_addr *shared,
+ struct vpu_buffer *log);
+void vpu_malone_set_system_cfg(struct vpu_shared_addr *shared,
+ u32 regs_base, void __iomem *regs, u32 core_id);
+u32 vpu_malone_get_version(struct vpu_shared_addr *shared);
+int vpu_malone_get_stream_buffer_size(struct vpu_shared_addr *shared);
+int vpu_malone_config_stream_buffer(struct vpu_shared_addr *shared,
+ u32 instance, struct vpu_buffer *buf);
+int vpu_malone_get_stream_buffer_desc(struct vpu_shared_addr *shared,
+ u32 instance,
+ struct vpu_rpc_buffer_desc *desc);
+int vpu_malone_update_stream_buffer(struct vpu_shared_addr *shared,
+ u32 instance, u32 ptr, bool write);
+int vpu_malone_set_decode_params(struct vpu_shared_addr *shared,
+ u32 instance,
+ struct vpu_decode_params *params, u32 update);
+int vpu_malone_pack_cmd(struct vpu_rpc_event *pkt, u32 index, u32 id, void *data);
+int vpu_malone_convert_msg_id(u32 msg_id);
+int vpu_malone_unpack_msg_data(struct vpu_rpc_event *pkt, void *data);
+int vpu_malone_add_scode(struct vpu_shared_addr *shared,
+ u32 instance,
+ struct vpu_buffer *stream_buffer,
+ u32 pixelformat,
+ u32 scode_type);
+int vpu_malone_input_frame(struct vpu_shared_addr *shared,
+ struct vpu_inst *inst, struct vb2_buffer *vb);
+bool vpu_malone_is_ready(struct vpu_shared_addr *shared, u32 instance);
+int vpu_malone_pre_cmd(struct vpu_shared_addr *shared, u32 instance);
+int vpu_malone_post_cmd(struct vpu_shared_addr *shared, u32 instance);
+int vpu_malone_init_instance(struct vpu_shared_addr *shared, u32 instance);
+u32 vpu_malone_get_max_instance_count(struct vpu_shared_addr *shared);
+
+#endif
diff --git a/drivers/media/platform/amphion/vpu_mbox.c b/drivers/media/platform/amphion/vpu_mbox.c
new file mode 100644
index 000000000000..bf759eb2fd46
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_mbox.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#include <linux/init.h>
+#include <linux/interconnect.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include "vpu.h"
+#include "vpu_mbox.h"
+#include "vpu_msgs.h"
+
+static void vpu_mbox_rx_callback(struct mbox_client *cl, void *msg)
+{
+ struct vpu_mbox *rx = container_of(cl, struct vpu_mbox, cl);
+ struct vpu_core *core = container_of(rx, struct vpu_core, rx);
+
+ vpu_isr(core, *(u32 *)msg);
+}
+
+static int vpu_mbox_request_channel(struct device *dev, struct vpu_mbox *mbox)
+{
+ struct mbox_chan *ch;
+ struct mbox_client *cl;
+
+ if (!dev || !mbox)
+ return -EINVAL;
+ if (mbox->ch)
+ return 0;
+
+ cl = &mbox->cl;
+ cl->dev = dev;
+ if (mbox->block) {
+ cl->tx_block = true;
+ cl->tx_tout = 1000;
+ } else {
+ cl->tx_block = false;
+ }
+ cl->knows_txdone = false;
+ cl->rx_callback = vpu_mbox_rx_callback;
+
+ ch = mbox_request_channel_byname(cl, mbox->name);
+ if (IS_ERR(ch)) {
+ dev_err(dev, "Failed to request mbox chan %s, ret : %ld\n",
+ mbox->name, PTR_ERR(ch));
+ return PTR_ERR(ch);
+ }
+
+ mbox->ch = ch;
+ return 0;
+}
+
+int vpu_mbox_init(struct vpu_core *core)
+{
+ scnprintf(core->tx_type.name, sizeof(core->tx_type.name) - 1, "tx0");
+ core->tx_type.block = true;
+
+ scnprintf(core->tx_data.name, sizeof(core->tx_data.name) - 1, "tx1");
+ core->tx_data.block = false;
+
+ scnprintf(core->rx.name, sizeof(core->rx.name) - 1, "rx");
+ core->rx.block = true;
+
+ return 0;
+}
+
+int vpu_mbox_request(struct vpu_core *core)
+{
+ int ret;
+
+ ret = vpu_mbox_request_channel(core->dev, &core->tx_type);
+ if (ret)
+ goto error;
+ ret = vpu_mbox_request_channel(core->dev, &core->tx_data);
+ if (ret)
+ goto error;
+ ret = vpu_mbox_request_channel(core->dev, &core->rx);
+ if (ret)
+ goto error;
+
+ dev_dbg(core->dev, "%s request mbox\n", vpu_core_type_desc(core->type));
+ return 0;
+error:
+ vpu_mbox_free(core);
+ return ret;
+}
+
+void vpu_mbox_free(struct vpu_core *core)
+{
+ mbox_free_channel(core->tx_type.ch);
+ mbox_free_channel(core->tx_data.ch);
+ mbox_free_channel(core->rx.ch);
+ core->tx_type.ch = NULL;
+ core->tx_data.ch = NULL;
+ core->rx.ch = NULL;
+ dev_dbg(core->dev, "%s free mbox\n", vpu_core_type_desc(core->type));
+}
+
+void vpu_mbox_send_type(struct vpu_core *core, u32 type)
+{
+ mbox_send_message(core->tx_type.ch, &type);
+}
+
+void vpu_mbox_send_msg(struct vpu_core *core, u32 type, u32 data)
+{
+ mbox_send_message(core->tx_data.ch, &data);
+ mbox_send_message(core->tx_type.ch, &type);
+}
+
+void vpu_mbox_enable_rx(struct vpu_dev *dev)
+{
+}
diff --git a/drivers/media/platform/amphion/vpu_mbox.h b/drivers/media/platform/amphion/vpu_mbox.h
new file mode 100644
index 000000000000..79cfd874e92b
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_mbox.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#ifndef _AMPHION_VPU_MBOX_H
+#define _AMPHION_VPU_MBOX_H
+
+int vpu_mbox_init(struct vpu_core *core);
+int vpu_mbox_request(struct vpu_core *core);
+void vpu_mbox_free(struct vpu_core *core);
+void vpu_mbox_send_msg(struct vpu_core *core, u32 type, u32 data);
+void vpu_mbox_send_type(struct vpu_core *core, u32 type);
+void vpu_mbox_enable_rx(struct vpu_dev *dev);
+
+#endif
diff --git a/drivers/media/platform/amphion/vpu_msgs.c b/drivers/media/platform/amphion/vpu_msgs.c
new file mode 100644
index 000000000000..58502c51ddb3
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_msgs.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#include <linux/init.h>
+#include <linux/interconnect.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "vpu.h"
+#include "vpu_core.h"
+#include "vpu_rpc.h"
+#include "vpu_mbox.h"
+#include "vpu_defs.h"
+#include "vpu_cmds.h"
+#include "vpu_msgs.h"
+#include "vpu_v4l2.h"
+
+#define VPU_PKT_HEADER_LENGTH 3
+
+struct vpu_msg_handler {
+ u32 id;
+ void (*done)(struct vpu_inst *inst, struct vpu_rpc_event *pkt);
+};
+
+static void vpu_session_handle_start_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+ vpu_trace(inst->dev, "[%d]\n", inst->id);
+}
+
+static void vpu_session_handle_mem_request(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+ struct vpu_pkt_mem_req_data req_data;
+
+ vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&req_data);
+ vpu_trace(inst->dev, "[%d] %d:%d %d:%d %d:%d\n",
+ inst->id,
+ req_data.enc_frame_size,
+ req_data.enc_frame_num,
+ req_data.ref_frame_size,
+ req_data.ref_frame_num,
+ req_data.act_buf_size,
+ req_data.act_buf_num);
+ call_void_vop(inst, mem_request,
+ req_data.enc_frame_size,
+ req_data.enc_frame_num,
+ req_data.ref_frame_size,
+ req_data.ref_frame_num,
+ req_data.act_buf_size,
+ req_data.act_buf_num);
+}
+
+static void vpu_session_handle_stop_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+ vpu_trace(inst->dev, "[%d]\n", inst->id);
+
+ call_void_vop(inst, stop_done);
+}
+
+static void vpu_session_handle_seq_hdr(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+ struct vpu_dec_codec_info info;
+ const struct vpu_core_resources *res;
+
+ memset(&info, 0, sizeof(info));
+ res = vpu_get_resource(inst);
+ info.stride = res ? res->stride : 1;
+ vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
+ call_void_vop(inst, event_notify, VPU_MSG_ID_SEQ_HDR_FOUND, &info);
+}
+
+static void vpu_session_handle_resolution_change(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+ call_void_vop(inst, event_notify, VPU_MSG_ID_RES_CHANGE, NULL);
+}
+
+static void vpu_session_handle_enc_frame_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+ struct vpu_enc_pic_info info;
+
+ vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
+ dev_dbg(inst->dev, "[%d] frame id = %d, wptr = 0x%x, size = %d\n",
+ inst->id, info.frame_id, info.wptr, info.frame_size);
+ call_void_vop(inst, get_one_frame, &info);
+}
+
+static void vpu_session_handle_frame_request(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+ struct vpu_fs_info fs;
+
+ vpu_iface_unpack_msg_data(inst->core, pkt, &fs);
+ call_void_vop(inst, event_notify, VPU_MSG_ID_FRAME_REQ, &fs);
+}
+
+static void vpu_session_handle_frame_release(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+ if (inst->core->type == VPU_CORE_TYPE_ENC) {
+ struct vpu_frame_info info;
+
+ memset(&info, 0, sizeof(info));
+ vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info.sequence);
+ dev_dbg(inst->dev, "[%d] %d\n", inst->id, info.sequence);
+ info.type = inst->out_format.type;
+ call_void_vop(inst, buf_done, &info);
+ } else if (inst->core->type == VPU_CORE_TYPE_DEC) {
+ struct vpu_fs_info fs;
+
+ vpu_iface_unpack_msg_data(inst->core, pkt, &fs);
+ call_void_vop(inst, event_notify, VPU_MSG_ID_FRAME_RELEASE, &fs);
+ }
+}
+
+static void vpu_session_handle_input_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+ dev_dbg(inst->dev, "[%d]\n", inst->id);
+ call_void_vop(inst, input_done);
+}
+
+static void vpu_session_handle_pic_decoded(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+ struct vpu_dec_pic_info info;
+
+ vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
+ call_void_vop(inst, get_one_frame, &info);
+}
+
+static void vpu_session_handle_pic_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+ struct vpu_dec_pic_info info;
+ struct vpu_frame_info frame;
+
+ memset(&frame, 0, sizeof(frame));
+ vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
+ if (inst->core->type == VPU_CORE_TYPE_DEC)
+ frame.type = inst->cap_format.type;
+ frame.id = info.id;
+ frame.luma = info.luma;
+ frame.skipped = info.skipped;
+ frame.timestamp = info.timestamp;
+
+ call_void_vop(inst, buf_done, &frame);
+}
+
+static void vpu_session_handle_eos(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+ call_void_vop(inst, event_notify, VPU_MSG_ID_PIC_EOS, NULL);
+}
+
+static void vpu_session_handle_error(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+ dev_err(inst->dev, "unsupported stream\n");
+ call_void_vop(inst, event_notify, VPU_MSG_ID_UNSUPPORTED, NULL);
+ vpu_v4l2_set_error(inst);
+}
+
+static void vpu_session_handle_firmware_xcpt(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+ char *str = (char *)pkt->data;
+
+ dev_err(inst->dev, "%s firmware xcpt: %s\n",
+ vpu_core_type_desc(inst->core->type), str);
+ call_void_vop(inst, event_notify, VPU_MSG_ID_FIRMWARE_XCPT, NULL);
+ set_bit(inst->id, &inst->core->hang_mask);
+ vpu_v4l2_set_error(inst);
+}
+
+static struct vpu_msg_handler handlers[] = {
+ {VPU_MSG_ID_START_DONE, vpu_session_handle_start_done},
+ {VPU_MSG_ID_STOP_DONE, vpu_session_handle_stop_done},
+ {VPU_MSG_ID_MEM_REQUEST, vpu_session_handle_mem_request},
+ {VPU_MSG_ID_SEQ_HDR_FOUND, vpu_session_handle_seq_hdr},
+ {VPU_MSG_ID_RES_CHANGE, vpu_session_handle_resolution_change},
+ {VPU_MSG_ID_FRAME_INPUT_DONE, vpu_session_handle_input_done},
+ {VPU_MSG_ID_FRAME_REQ, vpu_session_handle_frame_request},
+ {VPU_MSG_ID_FRAME_RELEASE, vpu_session_handle_frame_release},
+ {VPU_MSG_ID_ENC_DONE, vpu_session_handle_enc_frame_done},
+ {VPU_MSG_ID_PIC_DECODED, vpu_session_handle_pic_decoded},
+ {VPU_MSG_ID_DEC_DONE, vpu_session_handle_pic_done},
+ {VPU_MSG_ID_PIC_EOS, vpu_session_handle_eos},
+ {VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error},
+ {VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt},
+};
+
+static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *msg)
+{
+ int ret;
+ u32 msg_id;
+ struct vpu_msg_handler *handler = NULL;
+ unsigned int i;
+
+ ret = vpu_iface_convert_msg_id(inst->core, msg->hdr.id);
+ if (ret < 0)
+ return -EINVAL;
+
+ msg_id = ret;
+ dev_dbg(inst->dev, "[%d] receive event(0x%x)\n", inst->id, msg_id);
+
+ for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+ if (handlers[i].id == msg_id) {
+ handler = &handlers[i];
+ break;
+ }
+ }
+
+ if (handler && handler->done)
+ handler->done(inst, msg);
+
+ vpu_response_cmd(inst, msg_id, 1);
+
+ return 0;
+}
+
+static bool vpu_inst_receive_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+ unsigned long bytes = sizeof(struct vpu_rpc_event_header);
+ u32 ret;
+
+ memset(pkt, 0, sizeof(*pkt));
+ if (kfifo_len(&inst->msg_fifo) < bytes)
+ return false;
+
+ ret = kfifo_out(&inst->msg_fifo, pkt, bytes);
+ if (ret != bytes)
+ return false;
+
+ if (pkt->hdr.num > 0) {
+ bytes = pkt->hdr.num * sizeof(u32);
+ ret = kfifo_out(&inst->msg_fifo, pkt->data, bytes);
+ if (ret != bytes)
+ return false;
+ }
+
+ return true;
+}
+
+void vpu_inst_run_work(struct work_struct *work)
+{
+ struct vpu_inst *inst = container_of(work, struct vpu_inst, msg_work);
+ struct vpu_rpc_event pkt;
+
+ while (vpu_inst_receive_msg(inst, &pkt))
+ vpu_session_handle_msg(inst, &pkt);
+}
+
+static void vpu_inst_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+ unsigned long bytes;
+ u32 id = pkt->hdr.id;
+ int ret;
+
+ if (!inst->workqueue)
+ return;
+
+ bytes = sizeof(pkt->hdr) + pkt->hdr.num * sizeof(u32);
+ ret = kfifo_in(&inst->msg_fifo, pkt, bytes);
+ if (ret != bytes)
+ dev_err(inst->dev, "[%d:%d]overflow: %d\n", inst->core->id, inst->id, id);
+ queue_work(inst->workqueue, &inst->msg_work);
+}
+
+static int vpu_handle_msg(struct vpu_core *core)
+{
+ struct vpu_rpc_event pkt;
+ struct vpu_inst *inst;
+ int ret;
+
+ memset(&pkt, 0, sizeof(pkt));
+ while (!vpu_iface_receive_msg(core, &pkt)) {
+ dev_dbg(core->dev, "event index = %d, id = %d, num = %d\n",
+ pkt.hdr.index, pkt.hdr.id, pkt.hdr.num);
+
+ ret = vpu_iface_convert_msg_id(core, pkt.hdr.id);
+ if (ret < 0)
+ continue;
+
+ inst = vpu_core_find_instance(core, pkt.hdr.index);
+ if (inst) {
+ vpu_response_cmd(inst, ret, 0);
+ mutex_lock(&core->cmd_lock);
+ vpu_inst_record_flow(inst, ret);
+ mutex_unlock(&core->cmd_lock);
+
+ vpu_inst_handle_msg(inst, &pkt);
+ vpu_inst_put(inst);
+ }
+ memset(&pkt, 0, sizeof(pkt));
+ }
+
+ return 0;
+}
+
+static int vpu_isr_thread(struct vpu_core *core, u32 irq_code)
+{
+ dev_dbg(core->dev, "irq code = 0x%x\n", irq_code);
+ switch (irq_code) {
+ case VPU_IRQ_CODE_SYNC:
+ vpu_mbox_send_msg(core, PRC_BUF_OFFSET, core->rpc.phys - core->fw.phys);
+ vpu_mbox_send_msg(core, BOOT_ADDRESS, core->fw.phys);
+ vpu_mbox_send_msg(core, INIT_DONE, 2);
+ break;
+ case VPU_IRQ_CODE_BOOT_DONE:
+ break;
+ case VPU_IRQ_CODE_SNAPSHOT_DONE:
+ break;
+ default:
+ vpu_handle_msg(core);
+ break;
+ }
+
+ return 0;
+}
+
+static void vpu_core_run_msg_work(struct vpu_core *core)
+{
+ const unsigned int SIZE = sizeof(u32);
+
+ while (kfifo_len(&core->msg_fifo) >= SIZE) {
+ u32 data = 0;
+
+ if (kfifo_out(&core->msg_fifo, &data, SIZE) == SIZE)
+ vpu_isr_thread(core, data);
+ }
+}
+
+void vpu_msg_run_work(struct work_struct *work)
+{
+ struct vpu_core *core = container_of(work, struct vpu_core, msg_work);
+ unsigned long delay = msecs_to_jiffies(10);
+
+ vpu_core_run_msg_work(core);
+ queue_delayed_work(core->workqueue, &core->msg_delayed_work, delay);
+}
+
+void vpu_msg_delayed_work(struct work_struct *work)
+{
+ struct vpu_core *core;
+ struct delayed_work *dwork;
+ unsigned long bytes = sizeof(u32);
+ u32 i;
+
+ if (!work)
+ return;
+
+ dwork = to_delayed_work(work);
+ core = container_of(dwork, struct vpu_core, msg_delayed_work);
+ if (kfifo_len(&core->msg_fifo) >= bytes)
+ vpu_core_run_msg_work(core);
+
+ bytes = sizeof(struct vpu_rpc_event_header);
+ for (i = 0; i < core->supported_instance_count; i++) {
+ struct vpu_inst *inst = vpu_core_find_instance(core, i);
+
+ if (!inst)
+ continue;
+
+ if (inst->workqueue && kfifo_len(&inst->msg_fifo) >= bytes)
+ queue_work(inst->workqueue, &inst->msg_work);
+
+ vpu_inst_put(inst);
+ }
+}
+
+int vpu_isr(struct vpu_core *core, u32 irq)
+{
+ switch (irq) {
+ case VPU_IRQ_CODE_SYNC:
+ break;
+ case VPU_IRQ_CODE_BOOT_DONE:
+ complete(&core->cmp);
+ break;
+ case VPU_IRQ_CODE_SNAPSHOT_DONE:
+ complete(&core->cmp);
+ break;
+ default:
+ break;
+ }
+
+ if (kfifo_in(&core->msg_fifo, &irq, sizeof(irq)) != sizeof(irq))
+ dev_err(core->dev, "[%d]overflow: %d\n", core->id, irq);
+ queue_work(core->workqueue, &core->msg_work);
+
+ return 0;
+}
diff --git a/drivers/media/platform/amphion/vpu_msgs.h b/drivers/media/platform/amphion/vpu_msgs.h
new file mode 100644
index 000000000000..c466b4f62aad
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_msgs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#ifndef _AMPHION_VPU_MSGS_H
+#define _AMPHION_VPU_MSGS_H
+
+int vpu_isr(struct vpu_core *core, u32 irq);
+void vpu_inst_run_work(struct work_struct *work);
+void vpu_msg_run_work(struct work_struct *work);
+void vpu_msg_delayed_work(struct work_struct *work);
+
+#endif
diff --git a/drivers/media/platform/amphion/vpu_rpc.c b/drivers/media/platform/amphion/vpu_rpc.c
new file mode 100644
index 000000000000..18a164766409
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_rpc.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#include <linux/init.h>
+#include <linux/interconnect.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/svc/misc.h>
+#include "vpu.h"
+#include "vpu_rpc.h"
+#include "vpu_imx8q.h"
+#include "vpu_windsor.h"
+#include "vpu_malone.h"
+
+int vpu_iface_check_memory_region(struct vpu_core *core, dma_addr_t addr, u32 size)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (!ops || !ops->check_memory_region)
+ return VPU_CORE_MEMORY_INVALID;
+
+ return ops->check_memory_region(core->fw.phys, addr, size);
+}
+
+static u32 vpu_rpc_check_buffer_space(struct vpu_rpc_buffer_desc *desc, bool write)
+{
+ u32 ptr1;
+ u32 ptr2;
+ u32 size;
+
+ size = desc->end - desc->start;
+ if (write) {
+ ptr1 = desc->wptr;
+ ptr2 = desc->rptr;
+ } else {
+ ptr1 = desc->rptr;
+ ptr2 = desc->wptr;
+ }
+
+ if (ptr1 == ptr2) {
+ if (!write)
+ return 0;
+ else
+ return size;
+ }
+
+ return (ptr2 + size - ptr1) % size;
+}
+
+static int vpu_rpc_send_cmd_buf(struct vpu_shared_addr *shared, struct vpu_rpc_event *cmd)
+{
+ struct vpu_rpc_buffer_desc *desc;
+ u32 space = 0;
+ u32 *data;
+ u32 wptr;
+ u32 i;
+
+ if (cmd->hdr.num > 0xff || cmd->hdr.num >= ARRAY_SIZE(cmd->data))
+ return -EINVAL;
+ desc = shared->cmd_desc;
+ space = vpu_rpc_check_buffer_space(desc, true);
+ if (space < (((cmd->hdr.num + 1) << 2) + 16))
+ return -EINVAL;
+ wptr = desc->wptr;
+ data = (u32 *)(shared->cmd_mem_vir + desc->wptr - desc->start);
+ *data = 0;
+ *data |= ((cmd->hdr.index & 0xff) << 24);
+ *data |= ((cmd->hdr.num & 0xff) << 16);
+ *data |= (cmd->hdr.id & 0x3fff);
+ wptr += 4;
+ data++;
+ if (wptr >= desc->end) {
+ wptr = desc->start;
+ data = shared->cmd_mem_vir;
+ }
+
+ for (i = 0; i < cmd->hdr.num; i++) {
+ *data = cmd->data[i];
+ wptr += 4;
+ data++;
+ if (wptr >= desc->end) {
+ wptr = desc->start;
+ data = shared->cmd_mem_vir;
+ }
+ }
+
+ /*update wptr after data is written*/
+ mb();
+ desc->wptr = wptr;
+
+ return 0;
+}
+
+static bool vpu_rpc_check_msg(struct vpu_shared_addr *shared)
+{
+ struct vpu_rpc_buffer_desc *desc;
+ u32 space = 0;
+ u32 msgword;
+ u32 msgnum;
+
+ desc = shared->msg_desc;
+ space = vpu_rpc_check_buffer_space(desc, 0);
+ space = (space >> 2);
+
+ if (space) {
+ msgword = *(u32 *)(shared->msg_mem_vir + desc->rptr - desc->start);
+ msgnum = (msgword & 0xff0000) >> 16;
+ if (msgnum <= space)
+ return true;
+ }
+
+ return false;
+}
+
+static int vpu_rpc_receive_msg_buf(struct vpu_shared_addr *shared, struct vpu_rpc_event *msg)
+{
+ struct vpu_rpc_buffer_desc *desc;
+ u32 *data;
+ u32 msgword;
+ u32 rptr;
+ u32 i;
+
+ if (!vpu_rpc_check_msg(shared))
+ return -EINVAL;
+
+ desc = shared->msg_desc;
+ data = (u32 *)(shared->msg_mem_vir + desc->rptr - desc->start);
+ rptr = desc->rptr;
+ msgword = *data;
+ data++;
+ rptr += 4;
+ if (rptr >= desc->end) {
+ rptr = desc->start;
+ data = shared->msg_mem_vir;
+ }
+
+ msg->hdr.index = (msgword >> 24) & 0xff;
+ msg->hdr.num = (msgword >> 16) & 0xff;
+ msg->hdr.id = msgword & 0x3fff;
+
+ if (msg->hdr.num > ARRAY_SIZE(msg->data))
+ return -EINVAL;
+
+ for (i = 0; i < msg->hdr.num; i++) {
+ msg->data[i] = *data;
+ data++;
+ rptr += 4;
+ if (rptr >= desc->end) {
+ rptr = desc->start;
+ data = shared->msg_mem_vir;
+ }
+ }
+
+ /*update rptr after data is read*/
+ mb();
+ desc->rptr = rptr;
+
+ return 0;
+}
+
+static struct vpu_iface_ops imx8q_rpc_ops[] = {
+ [VPU_CORE_TYPE_ENC] = {
+ .check_codec = vpu_imx8q_check_codec,
+ .check_fmt = vpu_imx8q_check_fmt,
+ .boot_core = vpu_imx8q_boot_core,
+ .get_power_state = vpu_imx8q_get_power_state,
+ .on_firmware_loaded = vpu_imx8q_on_firmware_loaded,
+ .get_data_size = vpu_windsor_get_data_size,
+ .check_memory_region = vpu_imx8q_check_memory_region,
+ .init_rpc = vpu_windsor_init_rpc,
+ .set_log_buf = vpu_windsor_set_log_buf,
+ .set_system_cfg = vpu_windsor_set_system_cfg,
+ .get_version = vpu_windsor_get_version,
+ .send_cmd_buf = vpu_rpc_send_cmd_buf,
+ .receive_msg_buf = vpu_rpc_receive_msg_buf,
+ .pack_cmd = vpu_windsor_pack_cmd,
+ .convert_msg_id = vpu_windsor_convert_msg_id,
+ .unpack_msg_data = vpu_windsor_unpack_msg_data,
+ .config_memory_resource = vpu_windsor_config_memory_resource,
+ .get_stream_buffer_size = vpu_windsor_get_stream_buffer_size,
+ .config_stream_buffer = vpu_windsor_config_stream_buffer,
+ .get_stream_buffer_desc = vpu_windsor_get_stream_buffer_desc,
+ .update_stream_buffer = vpu_windsor_update_stream_buffer,
+ .set_encode_params = vpu_windsor_set_encode_params,
+ .input_frame = vpu_windsor_input_frame,
+ .get_max_instance_count = vpu_windsor_get_max_instance_count,
+ },
+ [VPU_CORE_TYPE_DEC] = {
+ .check_codec = vpu_imx8q_check_codec,
+ .check_fmt = vpu_imx8q_check_fmt,
+ .boot_core = vpu_imx8q_boot_core,
+ .get_power_state = vpu_imx8q_get_power_state,
+ .on_firmware_loaded = vpu_imx8q_on_firmware_loaded,
+ .get_data_size = vpu_malone_get_data_size,
+ .check_memory_region = vpu_imx8q_check_memory_region,
+ .init_rpc = vpu_malone_init_rpc,
+ .set_log_buf = vpu_malone_set_log_buf,
+ .set_system_cfg = vpu_malone_set_system_cfg,
+ .get_version = vpu_malone_get_version,
+ .send_cmd_buf = vpu_rpc_send_cmd_buf,
+ .receive_msg_buf = vpu_rpc_receive_msg_buf,
+ .get_stream_buffer_size = vpu_malone_get_stream_buffer_size,
+ .config_stream_buffer = vpu_malone_config_stream_buffer,
+ .set_decode_params = vpu_malone_set_decode_params,
+ .pack_cmd = vpu_malone_pack_cmd,
+ .convert_msg_id = vpu_malone_convert_msg_id,
+ .unpack_msg_data = vpu_malone_unpack_msg_data,
+ .get_stream_buffer_desc = vpu_malone_get_stream_buffer_desc,
+ .update_stream_buffer = vpu_malone_update_stream_buffer,
+ .add_scode = vpu_malone_add_scode,
+ .input_frame = vpu_malone_input_frame,
+ .pre_send_cmd = vpu_malone_pre_cmd,
+ .post_send_cmd = vpu_malone_post_cmd,
+ .init_instance = vpu_malone_init_instance,
+ .get_max_instance_count = vpu_malone_get_max_instance_count,
+ },
+};
+
+static struct vpu_iface_ops *vpu_get_iface(struct vpu_dev *vpu, enum vpu_core_type type)
+{
+ struct vpu_iface_ops *rpc_ops = NULL;
+ u32 size = 0;
+
+ switch (vpu->res->plat_type) {
+ case IMX8QXP:
+ case IMX8QM:
+ rpc_ops = imx8q_rpc_ops;
+ size = ARRAY_SIZE(imx8q_rpc_ops);
+ break;
+ default:
+ return NULL;
+ }
+
+ if (type >= size)
+ return NULL;
+
+ return &rpc_ops[type];
+}
+
+struct vpu_iface_ops *vpu_core_get_iface(struct vpu_core *core)
+{
+ return vpu_get_iface(core->vpu, core->type);
+}
+
+struct vpu_iface_ops *vpu_inst_get_iface(struct vpu_inst *inst)
+{
+ if (inst->core)
+ return vpu_core_get_iface(inst->core);
+
+ return vpu_get_iface(inst->vpu, inst->type);
+}
diff --git a/drivers/media/platform/amphion/vpu_rpc.h b/drivers/media/platform/amphion/vpu_rpc.h
new file mode 100644
index 000000000000..25119e5e807e
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_rpc.h
@@ -0,0 +1,461 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#ifndef _AMPHION_VPU_RPC_H
+#define _AMPHION_VPU_RPC_H
+
+#include <media/videobuf2-core.h>
+#include "vpu_codec.h"
+
+struct vpu_rpc_buffer_desc {
+ u32 wptr;
+ u32 rptr;
+ u32 start;
+ u32 end;
+};
+
+struct vpu_shared_addr {
+ void *iface;
+ struct vpu_rpc_buffer_desc *cmd_desc;
+ void *cmd_mem_vir;
+ struct vpu_rpc_buffer_desc *msg_desc;
+ void *msg_mem_vir;
+
+ unsigned long boot_addr;
+ struct vpu_core *core;
+ void *priv;
+};
+
+struct vpu_rpc_event_header {
+ u32 index;
+ u32 id;
+ u32 num;
+};
+
+struct vpu_rpc_event {
+ struct vpu_rpc_event_header hdr;
+ u32 data[128];
+};
+
+struct vpu_iface_ops {
+ bool (*check_codec)(enum vpu_core_type type);
+ bool (*check_fmt)(enum vpu_core_type type, u32 pixelfmt);
+ u32 (*get_data_size)(void);
+ int (*check_memory_region)(dma_addr_t base, dma_addr_t addr, u32 size);
+ int (*boot_core)(struct vpu_core *core);
+ int (*shutdown_core)(struct vpu_core *core);
+ int (*restore_core)(struct vpu_core *core);
+ int (*get_power_state)(struct vpu_core *core);
+ int (*on_firmware_loaded)(struct vpu_core *core);
+ void (*init_rpc)(struct vpu_shared_addr *shared,
+ struct vpu_buffer *rpc, dma_addr_t boot_addr);
+ void (*set_log_buf)(struct vpu_shared_addr *shared,
+ struct vpu_buffer *log);
+ void (*set_system_cfg)(struct vpu_shared_addr *shared,
+ u32 regs_base, void __iomem *regs, u32 index);
+ void (*set_stream_cfg)(struct vpu_shared_addr *shared, u32 index);
+ u32 (*get_version)(struct vpu_shared_addr *shared);
+ u32 (*get_max_instance_count)(struct vpu_shared_addr *shared);
+ int (*get_stream_buffer_size)(struct vpu_shared_addr *shared);
+ int (*send_cmd_buf)(struct vpu_shared_addr *shared,
+ struct vpu_rpc_event *cmd);
+ int (*receive_msg_buf)(struct vpu_shared_addr *shared,
+ struct vpu_rpc_event *msg);
+ int (*pack_cmd)(struct vpu_rpc_event *pkt, u32 index, u32 id, void *data);
+ int (*convert_msg_id)(u32 msg_id);
+ int (*unpack_msg_data)(struct vpu_rpc_event *pkt, void *data);
+ int (*input_frame)(struct vpu_shared_addr *shared,
+ struct vpu_inst *inst, struct vb2_buffer *vb);
+ int (*config_memory_resource)(struct vpu_shared_addr *shared,
+ u32 instance,
+ u32 type,
+ u32 index,
+ struct vpu_buffer *buf);
+ int (*config_stream_buffer)(struct vpu_shared_addr *shared,
+ u32 instance,
+ struct vpu_buffer *buf);
+ int (*update_stream_buffer)(struct vpu_shared_addr *shared,
+ u32 instance, u32 ptr, bool write);
+ int (*get_stream_buffer_desc)(struct vpu_shared_addr *shared,
+ u32 instance,
+ struct vpu_rpc_buffer_desc *desc);
+ int (*set_encode_params)(struct vpu_shared_addr *shared,
+ u32 instance,
+ struct vpu_encode_params *params,
+ u32 update);
+ int (*set_decode_params)(struct vpu_shared_addr *shared,
+ u32 instance,
+ struct vpu_decode_params *params,
+ u32 update);
+ int (*add_scode)(struct vpu_shared_addr *shared,
+ u32 instance,
+ struct vpu_buffer *stream_buffer,
+ u32 pixelformat,
+ u32 scode_type);
+ int (*pre_send_cmd)(struct vpu_shared_addr *shared, u32 instance);
+ int (*post_send_cmd)(struct vpu_shared_addr *shared, u32 instance);
+ int (*init_instance)(struct vpu_shared_addr *shared, u32 instance);
+};
+
+enum {
+ VPU_CORE_MEMORY_INVALID = 0,
+ VPU_CORE_MEMORY_CACHED,
+ VPU_CORE_MEMORY_UNCACHED
+};
+
+struct vpu_rpc_region_t {
+ dma_addr_t start;
+ dma_addr_t end;
+ dma_addr_t type;
+};
+
+struct vpu_iface_ops *vpu_core_get_iface(struct vpu_core *core);
+struct vpu_iface_ops *vpu_inst_get_iface(struct vpu_inst *inst);
+int vpu_iface_check_memory_region(struct vpu_core *core, dma_addr_t addr, u32 size);
+
+static inline bool vpu_iface_check_codec(struct vpu_core *core)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (ops && ops->check_codec)
+ return ops->check_codec(core->type);
+
+ return true;
+}
+
+static inline bool vpu_iface_check_format(struct vpu_inst *inst, u32 pixelfmt)
+{
+ struct vpu_iface_ops *ops = vpu_inst_get_iface(inst);
+
+ if (ops && ops->check_fmt)
+ return ops->check_fmt(inst->type, pixelfmt);
+
+ return true;
+}
+
+static inline int vpu_iface_boot_core(struct vpu_core *core)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (ops && ops->boot_core)
+ return ops->boot_core(core);
+ return 0;
+}
+
+static inline int vpu_iface_get_power_state(struct vpu_core *core)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (ops && ops->get_power_state)
+ return ops->get_power_state(core);
+ return 1;
+}
+
+static inline int vpu_iface_shutdown_core(struct vpu_core *core)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (ops && ops->shutdown_core)
+ return ops->shutdown_core(core);
+ return 0;
+}
+
+static inline int vpu_iface_restore_core(struct vpu_core *core)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (ops && ops->restore_core)
+ return ops->restore_core(core);
+ return 0;
+}
+
+static inline int vpu_iface_on_firmware_loaded(struct vpu_core *core)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (ops && ops->on_firmware_loaded)
+ return ops->on_firmware_loaded(core);
+
+ return 0;
+}
+
+static inline u32 vpu_iface_get_data_size(struct vpu_core *core)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (!ops || !ops->get_data_size)
+ return 0;
+
+ return ops->get_data_size();
+}
+
+static inline int vpu_iface_init(struct vpu_core *core,
+ struct vpu_shared_addr *shared,
+ struct vpu_buffer *rpc,
+ dma_addr_t boot_addr)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (!ops || !ops->init_rpc)
+ return -EINVAL;
+
+ ops->init_rpc(shared, rpc, boot_addr);
+ core->iface = shared;
+ shared->core = core;
+ if (rpc->bytesused > rpc->length)
+ return -ENOSPC;
+ return 0;
+}
+
+static inline int vpu_iface_set_log_buf(struct vpu_core *core,
+ struct vpu_buffer *log)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (!ops)
+ return -EINVAL;
+
+ if (ops->set_log_buf)
+ ops->set_log_buf(core->iface, log);
+
+ return 0;
+}
+
+static inline int vpu_iface_config_system(struct vpu_core *core, u32 regs_base, void __iomem *regs)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (!ops)
+ return -EINVAL;
+ if (ops->set_system_cfg)
+ ops->set_system_cfg(core->iface, regs_base, regs, core->id);
+
+ return 0;
+}
+
+static inline int vpu_iface_get_stream_buffer_size(struct vpu_core *core)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (!ops || !ops->get_stream_buffer_size)
+ return 0;
+
+ return ops->get_stream_buffer_size(core->iface);
+}
+
+static inline int vpu_iface_config_stream(struct vpu_inst *inst)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(inst->core);
+
+ if (!ops || inst->id < 0)
+ return -EINVAL;
+ if (ops->set_stream_cfg)
+ ops->set_stream_cfg(inst->core->iface, inst->id);
+ return 0;
+}
+
+static inline int vpu_iface_send_cmd(struct vpu_core *core, struct vpu_rpc_event *cmd)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (!ops || !ops->send_cmd_buf)
+ return -EINVAL;
+
+ return ops->send_cmd_buf(core->iface, cmd);
+}
+
+static inline int vpu_iface_receive_msg(struct vpu_core *core, struct vpu_rpc_event *msg)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (!ops || !ops->receive_msg_buf)
+ return -EINVAL;
+
+ return ops->receive_msg_buf(core->iface, msg);
+}
+
+static inline int vpu_iface_pack_cmd(struct vpu_core *core,
+ struct vpu_rpc_event *pkt,
+ u32 index, u32 id, void *data)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (!ops || !ops->pack_cmd)
+ return -EINVAL;
+ return ops->pack_cmd(pkt, index, id, data);
+}
+
+static inline int vpu_iface_convert_msg_id(struct vpu_core *core, u32 msg_id)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (!ops || !ops->convert_msg_id)
+ return -EINVAL;
+
+ return ops->convert_msg_id(msg_id);
+}
+
+static inline int vpu_iface_unpack_msg_data(struct vpu_core *core,
+ struct vpu_rpc_event *pkt, void *data)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (!ops || !ops->unpack_msg_data)
+ return -EINVAL;
+
+ return ops->unpack_msg_data(pkt, data);
+}
+
+static inline int vpu_iface_input_frame(struct vpu_inst *inst,
+ struct vb2_buffer *vb)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(inst->core);
+
+ if (!ops || !ops->input_frame)
+ return -EINVAL;
+
+ return ops->input_frame(inst->core->iface, inst, vb);
+}
+
+static inline int vpu_iface_config_memory_resource(struct vpu_inst *inst,
+ u32 type,
+ u32 index,
+ struct vpu_buffer *buf)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(inst->core);
+
+ if (!ops || !ops->config_memory_resource || inst->id < 0)
+ return -EINVAL;
+
+ return ops->config_memory_resource(inst->core->iface,
+ inst->id,
+ type, index, buf);
+}
+
+static inline int vpu_iface_config_stream_buffer(struct vpu_inst *inst,
+ struct vpu_buffer *buf)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(inst->core);
+
+ if (!ops || !ops->config_stream_buffer || inst->id < 0)
+ return -EINVAL;
+
+ if ((buf->phys % 4) || (buf->length % 4))
+ return -EINVAL;
+ if (buf->phys + buf->length > (u64)UINT_MAX)
+ return -EINVAL;
+
+ return ops->config_stream_buffer(inst->core->iface, inst->id, buf);
+}
+
+static inline int vpu_iface_update_stream_buffer(struct vpu_inst *inst,
+ u32 ptr, bool write)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(inst->core);
+
+ if (!ops || !ops->update_stream_buffer || inst->id < 0)
+ return -EINVAL;
+
+ return ops->update_stream_buffer(inst->core->iface, inst->id, ptr, write);
+}
+
+static inline int vpu_iface_get_stream_buffer_desc(struct vpu_inst *inst,
+ struct vpu_rpc_buffer_desc *desc)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(inst->core);
+
+ if (!ops || !ops->get_stream_buffer_desc || inst->id < 0)
+ return -EINVAL;
+
+ if (!desc)
+ return 0;
+
+ return ops->get_stream_buffer_desc(inst->core->iface, inst->id, desc);
+}
+
+static inline u32 vpu_iface_get_version(struct vpu_core *core)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (!ops || !ops->get_version)
+ return 0;
+
+ return ops->get_version(core->iface);
+}
+
+static inline u32 vpu_iface_get_max_instance_count(struct vpu_core *core)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(core);
+
+ if (!ops || !ops->get_max_instance_count)
+ return 0;
+
+ return ops->get_max_instance_count(core->iface);
+}
+
+static inline int vpu_iface_set_encode_params(struct vpu_inst *inst,
+ struct vpu_encode_params *params, u32 update)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(inst->core);
+
+ if (!ops || !ops->set_encode_params || inst->id < 0)
+ return -EINVAL;
+
+ return ops->set_encode_params(inst->core->iface, inst->id, params, update);
+}
+
+static inline int vpu_iface_set_decode_params(struct vpu_inst *inst,
+ struct vpu_decode_params *params, u32 update)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(inst->core);
+
+ if (!ops || !ops->set_decode_params || inst->id < 0)
+ return -EINVAL;
+
+ return ops->set_decode_params(inst->core->iface, inst->id, params, update);
+}
+
+static inline int vpu_iface_add_scode(struct vpu_inst *inst, u32 scode_type)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(inst->core);
+
+ if (!ops || !ops->add_scode || inst->id < 0)
+ return -EINVAL;
+
+ return ops->add_scode(inst->core->iface, inst->id,
+ &inst->stream_buffer,
+ inst->out_format.pixfmt,
+ scode_type);
+}
+
+static inline int vpu_iface_pre_send_cmd(struct vpu_inst *inst)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(inst->core);
+
+ if (ops && ops->pre_send_cmd && inst->id >= 0)
+ return ops->pre_send_cmd(inst->core->iface, inst->id);
+ return 0;
+}
+
+static inline int vpu_iface_post_send_cmd(struct vpu_inst *inst)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(inst->core);
+
+ if (ops && ops->post_send_cmd && inst->id >= 0)
+ return ops->post_send_cmd(inst->core->iface, inst->id);
+ return 0;
+}
+
+static inline int vpu_iface_init_instance(struct vpu_inst *inst)
+{
+ struct vpu_iface_ops *ops = vpu_core_get_iface(inst->core);
+
+ if (ops && ops->init_instance && inst->id >= 0)
+ return ops->init_instance(inst->core->iface, inst->id);
+
+ return 0;
+}
+
+#endif
diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
new file mode 100644
index 000000000000..9c0704cd5766
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_v4l2.c
@@ -0,0 +1,713 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#include <linux/init.h>
+#include <linux/interconnect.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-vmalloc.h>
+#include "vpu.h"
+#include "vpu_core.h"
+#include "vpu_v4l2.h"
+#include "vpu_msgs.h"
+#include "vpu_helpers.h"
+
+void vpu_inst_lock(struct vpu_inst *inst)
+{
+ mutex_lock(&inst->lock);
+}
+
+void vpu_inst_unlock(struct vpu_inst *inst)
+{
+ mutex_unlock(&inst->lock);
+}
+
+dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no)
+{
+ if (plane_no >= vb->num_planes)
+ return 0;
+ return vb2_dma_contig_plane_dma_addr(vb, plane_no) +
+ vb->planes[plane_no].data_offset;
+}
+
+unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no)
+{
+ if (plane_no >= vb->num_planes)
+ return 0;
+ return vb2_plane_size(vb, plane_no) - vb->planes[plane_no].data_offset;
+}
+
+void vpu_set_buffer_state(struct vb2_v4l2_buffer *vbuf, unsigned int state)
+{
+ struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf);
+
+ vpu_buf->state = state;
+}
+
+unsigned int vpu_get_buffer_state(struct vb2_v4l2_buffer *vbuf)
+{
+ struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf);
+
+ return vpu_buf->state;
+}
+
+void vpu_v4l2_set_error(struct vpu_inst *inst)
+{
+ struct vb2_queue *src_q;
+ struct vb2_queue *dst_q;
+
+ vpu_inst_lock(inst);
+ dev_err(inst->dev, "some error occurs in codec\n");
+ if (inst->fh.m2m_ctx) {
+ src_q = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx);
+ dst_q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
+ if (src_q)
+ src_q->error = 1;
+ if (dst_q)
+ dst_q->error = 1;
+ }
+ vpu_inst_unlock(inst);
+}
+
+int vpu_notify_eos(struct vpu_inst *inst)
+{
+ static const struct v4l2_event ev = {
+ .id = 0,
+ .type = V4L2_EVENT_EOS
+ };
+
+ vpu_trace(inst->dev, "[%d]\n", inst->id);
+ v4l2_event_queue_fh(&inst->fh, &ev);
+
+ return 0;
+}
+
+int vpu_notify_source_change(struct vpu_inst *inst)
+{
+ static const struct v4l2_event ev = {
+ .id = 0,
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION
+ };
+
+ vpu_trace(inst->dev, "[%d]\n", inst->id);
+ v4l2_event_queue_fh(&inst->fh, &ev);
+ return 0;
+}
+
+int vpu_set_last_buffer_dequeued(struct vpu_inst *inst)
+{
+ struct vb2_queue *q;
+
+ if (!inst || !inst->fh.m2m_ctx)
+ return -EINVAL;
+
+ q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
+ if (!list_empty(&q->done_list))
+ return -EINVAL;
+
+ if (q->last_buffer_dequeued)
+ return 0;
+ vpu_trace(inst->dev, "last buffer dequeued\n");
+ q->last_buffer_dequeued = true;
+ wake_up(&q->done_wq);
+ vpu_notify_eos(inst);
+ return 0;
+}
+
+const struct vpu_format *vpu_try_fmt_common(struct vpu_inst *inst, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ u32 type = f->type;
+ u32 stride = 1;
+ u32 bytesperline;
+ u32 sizeimage;
+ const struct vpu_format *fmt;
+ const struct vpu_core_resources *res;
+ int i;
+
+ fmt = vpu_helper_find_format(inst, type, pixmp->pixelformat);
+ if (!fmt) {
+ fmt = vpu_helper_enum_format(inst, type, 0);
+ if (!fmt)
+ return NULL;
+ pixmp->pixelformat = fmt->pixfmt;
+ }
+
+ res = vpu_get_resource(inst);
+ if (res)
+ stride = res->stride;
+ if (pixmp->width)
+ pixmp->width = vpu_helper_valid_frame_width(inst, pixmp->width);
+ if (pixmp->height)
+ pixmp->height = vpu_helper_valid_frame_height(inst, pixmp->height);
+ pixmp->flags = fmt->flags;
+ pixmp->num_planes = fmt->num_planes;
+ if (pixmp->field == V4L2_FIELD_ANY)
+ pixmp->field = V4L2_FIELD_NONE;
+ for (i = 0; i < pixmp->num_planes; i++) {
+ bytesperline = max_t(s32, pixmp->plane_fmt[i].bytesperline, 0);
+ sizeimage = vpu_helper_get_plane_size(pixmp->pixelformat,
+ pixmp->width,
+ pixmp->height,
+ i,
+ stride,
+ pixmp->field > V4L2_FIELD_NONE ? 1 : 0,
+ &bytesperline);
+ sizeimage = max_t(s32, pixmp->plane_fmt[i].sizeimage, sizeimage);
+ pixmp->plane_fmt[i].bytesperline = bytesperline;
+ pixmp->plane_fmt[i].sizeimage = sizeimage;
+ }
+
+ return fmt;
+}
+
+static bool vpu_check_ready(struct vpu_inst *inst, u32 type)
+{
+ if (!inst)
+ return false;
+ if (inst->state == VPU_CODEC_STATE_DEINIT || inst->id < 0)
+ return false;
+ if (!inst->ops->check_ready)
+ return true;
+ return call_vop(inst, check_ready, type);
+}
+
+int vpu_process_output_buffer(struct vpu_inst *inst)
+{
+ struct v4l2_m2m_buffer *buf = NULL;
+ struct vb2_v4l2_buffer *vbuf = NULL;
+
+ if (!inst || !inst->fh.m2m_ctx)
+ return -EINVAL;
+
+ if (!vpu_check_ready(inst, inst->out_format.type))
+ return -EINVAL;
+
+ v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
+ vbuf = &buf->vb;
+ if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE)
+ break;
+ vbuf = NULL;
+ }
+
+ if (!vbuf)
+ return -EINVAL;
+
+ dev_dbg(inst->dev, "[%d]frame id = %d / %d\n",
+ inst->id, vbuf->sequence, inst->sequence);
+ return call_vop(inst, process_output, &vbuf->vb2_buf);
+}
+
+int vpu_process_capture_buffer(struct vpu_inst *inst)
+{
+ struct v4l2_m2m_buffer *buf = NULL;
+ struct vb2_v4l2_buffer *vbuf = NULL;
+
+ if (!inst || !inst->fh.m2m_ctx)
+ return -EINVAL;
+
+ if (!vpu_check_ready(inst, inst->cap_format.type))
+ return -EINVAL;
+
+ v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) {
+ vbuf = &buf->vb;
+ if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE)
+ break;
+ vbuf = NULL;
+ }
+ if (!vbuf)
+ return -EINVAL;
+
+ return call_vop(inst, process_capture, &vbuf->vb2_buf);
+}
+
+struct vb2_v4l2_buffer *vpu_find_buf_by_sequence(struct vpu_inst *inst, u32 type, u32 sequence)
+{
+ struct v4l2_m2m_buffer *buf = NULL;
+ struct vb2_v4l2_buffer *vbuf = NULL;
+
+ if (!inst || !inst->fh.m2m_ctx)
+ return NULL;
+
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
+ vbuf = &buf->vb;
+ if (vbuf->sequence == sequence)
+ break;
+ vbuf = NULL;
+ }
+ } else {
+ v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) {
+ vbuf = &buf->vb;
+ if (vbuf->sequence == sequence)
+ break;
+ vbuf = NULL;
+ }
+ }
+
+ return vbuf;
+}
+
+struct vb2_v4l2_buffer *vpu_find_buf_by_idx(struct vpu_inst *inst, u32 type, u32 idx)
+{
+ struct v4l2_m2m_buffer *buf = NULL;
+ struct vb2_v4l2_buffer *vbuf = NULL;
+
+ if (!inst || !inst->fh.m2m_ctx)
+ return NULL;
+
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
+ vbuf = &buf->vb;
+ if (vbuf->vb2_buf.index == idx)
+ break;
+ vbuf = NULL;
+ }
+ } else {
+ v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) {
+ vbuf = &buf->vb;
+ if (vbuf->vb2_buf.index == idx)
+ break;
+ vbuf = NULL;
+ }
+ }
+
+ return vbuf;
+}
+
+int vpu_get_num_buffers(struct vpu_inst *inst, u32 type)
+{
+ struct vb2_queue *q;
+
+ if (!inst || !inst->fh.m2m_ctx)
+ return -EINVAL;
+
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ q = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx);
+ else
+ q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
+
+ return q->num_buffers;
+}
+
+static void vpu_m2m_device_run(void *priv)
+{
+}
+
+static void vpu_m2m_job_abort(void *priv)
+{
+ struct vpu_inst *inst = priv;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->fh.m2m_ctx;
+
+ v4l2_m2m_job_finish(m2m_ctx->m2m_dev, m2m_ctx);
+}
+
+static const struct v4l2_m2m_ops vpu_m2m_ops = {
+ .device_run = vpu_m2m_device_run,
+ .job_abort = vpu_m2m_job_abort
+};
+
+static int vpu_vb2_queue_setup(struct vb2_queue *vq,
+ unsigned int *buf_count,
+ unsigned int *plane_count,
+ unsigned int psize[],
+ struct device *allocators[])
+{
+ struct vpu_inst *inst = vb2_get_drv_priv(vq);
+ struct vpu_format *cur_fmt;
+ int i;
+
+ cur_fmt = vpu_get_format(inst, vq->type);
+
+ if (*plane_count) {
+ if (*plane_count != cur_fmt->num_planes)
+ return -EINVAL;
+ for (i = 0; i < cur_fmt->num_planes; i++) {
+ if (psize[i] < cur_fmt->sizeimage[i])
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ *plane_count = cur_fmt->num_planes;
+ for (i = 0; i < cur_fmt->num_planes; i++)
+ psize[i] = cur_fmt->sizeimage[i];
+
+ return 0;
+}
+
+static int vpu_vb2_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE);
+ return 0;
+}
+
+static int vpu_vb2_buf_out_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int vpu_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpu_format *cur_fmt;
+ u32 i;
+
+ cur_fmt = vpu_get_format(inst, vb->type);
+ for (i = 0; i < cur_fmt->num_planes; i++) {
+ if (vpu_get_vb_length(vb, i) < cur_fmt->sizeimage[i]) {
+ dev_dbg(inst->dev, "[%d] %s buf[%d] is invalid\n",
+ inst->id, vpu_type_name(vb->type), vb->index);
+ vpu_set_buffer_state(vbuf, VPU_BUF_STATE_ERROR);
+ }
+ }
+
+ return 0;
+}
+
+static void vpu_vb2_buf_finish(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_queue *q = vb->vb2_queue;
+
+ if (vbuf->flags & V4L2_BUF_FLAG_LAST)
+ vpu_notify_eos(inst);
+
+ if (list_empty(&q->done_list))
+ call_void_vop(inst, on_queue_empty, q->type);
+}
+
+void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *buf;
+
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ while ((buf = v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx))) {
+ vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE);
+ v4l2_m2m_buf_done(buf, state);
+ }
+ } else {
+ while ((buf = v4l2_m2m_dst_buf_remove(inst->fh.m2m_ctx))) {
+ vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE);
+ v4l2_m2m_buf_done(buf, state);
+ }
+ }
+}
+
+static int vpu_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct vpu_inst *inst = vb2_get_drv_priv(q);
+ struct vpu_format *fmt = vpu_get_format(inst, q->type);
+ int ret;
+
+ vpu_inst_unlock(inst);
+ ret = vpu_inst_register(inst);
+ vpu_inst_lock(inst);
+ if (ret) {
+ vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED);
+ return ret;
+ }
+
+ vpu_trace(inst->dev, "[%d] %s %c%c%c%c %dx%d %u(%u) %u(%u) %u(%u) %d\n",
+ inst->id, vpu_type_name(q->type),
+ fmt->pixfmt,
+ fmt->pixfmt >> 8,
+ fmt->pixfmt >> 16,
+ fmt->pixfmt >> 24,
+ fmt->width, fmt->height,
+ fmt->sizeimage[0], fmt->bytesperline[0],
+ fmt->sizeimage[1], fmt->bytesperline[1],
+ fmt->sizeimage[2], fmt->bytesperline[2],
+ q->num_buffers);
+ call_void_vop(inst, start, q->type);
+ vb2_clear_last_buffer_dequeued(q);
+
+ return 0;
+}
+
+static void vpu_vb2_stop_streaming(struct vb2_queue *q)
+{
+ struct vpu_inst *inst = vb2_get_drv_priv(q);
+
+ vpu_trace(inst->dev, "[%d] %s\n", inst->id, vpu_type_name(q->type));
+
+ call_void_vop(inst, stop, q->type);
+ vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_ERROR);
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ inst->sequence = 0;
+}
+
+static void vpu_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->type))
+ vbuf->sequence = inst->sequence++;
+
+ v4l2_m2m_buf_queue(inst->fh.m2m_ctx, vbuf);
+ vpu_process_output_buffer(inst);
+ vpu_process_capture_buffer(inst);
+}
+
+static const struct vb2_ops vpu_vb2_ops = {
+ .queue_setup = vpu_vb2_queue_setup,
+ .buf_init = vpu_vb2_buf_init,
+ .buf_out_validate = vpu_vb2_buf_out_validate,
+ .buf_prepare = vpu_vb2_buf_prepare,
+ .buf_finish = vpu_vb2_buf_finish,
+ .start_streaming = vpu_vb2_start_streaming,
+ .stop_streaming = vpu_vb2_stop_streaming,
+ .buf_queue = vpu_vb2_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int vpu_m2m_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct vpu_inst *inst = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ inst->out_format.type = src_vq->type;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->ops = &vpu_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ if (inst->type == VPU_CORE_TYPE_DEC && inst->use_stream_buffer)
+ src_vq->mem_ops = &vb2_vmalloc_memops;
+ src_vq->drv_priv = inst;
+ src_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer);
+ src_vq->min_buffers_needed = 1;
+ src_vq->dev = inst->vpu->dev;
+ src_vq->lock = &inst->lock;
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ inst->cap_format.type = dst_vq->type;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->ops = &vpu_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ if (inst->type == VPU_CORE_TYPE_ENC && inst->use_stream_buffer)
+ dst_vq->mem_ops = &vb2_vmalloc_memops;
+ dst_vq->drv_priv = inst;
+ dst_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer);
+ dst_vq->min_buffers_needed = 1;
+ dst_vq->dev = inst->vpu->dev;
+ dst_vq->lock = &inst->lock;
+ ret = vb2_queue_init(dst_vq);
+ if (ret) {
+ vb2_queue_release(src_vq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vpu_v4l2_release(struct vpu_inst *inst)
+{
+ vpu_trace(inst->vpu->dev, "%p\n", inst);
+
+ vpu_release_core(inst->core);
+ put_device(inst->dev);
+
+ if (inst->workqueue) {
+ cancel_work_sync(&inst->msg_work);
+ destroy_workqueue(inst->workqueue);
+ inst->workqueue = NULL;
+ }
+
+ v4l2_ctrl_handler_free(&inst->ctrl_handler);
+ mutex_destroy(&inst->lock);
+ v4l2_fh_del(&inst->fh);
+ v4l2_fh_exit(&inst->fh);
+
+ call_void_vop(inst, cleanup);
+
+ return 0;
+}
+
+int vpu_v4l2_open(struct file *file, struct vpu_inst *inst)
+{
+ struct vpu_dev *vpu = video_drvdata(file);
+ struct vpu_func *func;
+ int ret = 0;
+
+ if (!inst || !inst->ops)
+ return -EINVAL;
+
+ if (inst->type == VPU_CORE_TYPE_ENC)
+ func = &vpu->encoder;
+ else
+ func = &vpu->decoder;
+
+ atomic_set(&inst->ref_count, 0);
+ vpu_inst_get(inst);
+ inst->vpu = vpu;
+ inst->core = vpu_request_core(vpu, inst->type);
+ if (inst->core)
+ inst->dev = get_device(inst->core->dev);
+ mutex_init(&inst->lock);
+ INIT_LIST_HEAD(&inst->cmd_q);
+ inst->id = VPU_INST_NULL_ID;
+ inst->release = vpu_v4l2_release;
+ inst->pid = current->pid;
+ inst->tgid = current->tgid;
+ inst->min_buffer_cap = 2;
+ inst->min_buffer_out = 2;
+ v4l2_fh_init(&inst->fh, func->vfd);
+ v4l2_fh_add(&inst->fh);
+
+ ret = call_vop(inst, ctrl_init);
+ if (ret)
+ goto error;
+
+ inst->fh.m2m_ctx = v4l2_m2m_ctx_init(func->m2m_dev, inst, vpu_m2m_queue_init);
+ if (IS_ERR(inst->fh.m2m_ctx)) {
+ dev_err(vpu->dev, "v4l2_m2m_ctx_init fail\n");
+ ret = PTR_ERR(inst->fh.m2m_ctx);
+ goto error;
+ }
+
+ inst->fh.ctrl_handler = &inst->ctrl_handler;
+ file->private_data = &inst->fh;
+ inst->state = VPU_CODEC_STATE_DEINIT;
+ inst->workqueue = alloc_workqueue("vpu_inst", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (inst->workqueue) {
+ INIT_WORK(&inst->msg_work, vpu_inst_run_work);
+ ret = kfifo_init(&inst->msg_fifo,
+ inst->msg_buffer,
+ rounddown_pow_of_two(sizeof(inst->msg_buffer)));
+ if (ret) {
+ destroy_workqueue(inst->workqueue);
+ inst->workqueue = NULL;
+ }
+ }
+ vpu_trace(vpu->dev, "tgid = %d, pid = %d, type = %s, inst = %p\n",
+ inst->tgid, inst->pid, vpu_core_type_desc(inst->type), inst);
+
+ return 0;
+error:
+ vpu_inst_put(inst);
+ return ret;
+}
+
+int vpu_v4l2_close(struct file *file)
+{
+ struct vpu_dev *vpu = video_drvdata(file);
+ struct vpu_inst *inst = to_inst(file);
+
+ vpu_trace(vpu->dev, "tgid = %d, pid = %d, inst = %p\n", inst->tgid, inst->pid, inst);
+
+ vpu_inst_lock(inst);
+ if (inst->fh.m2m_ctx) {
+ v4l2_m2m_ctx_release(inst->fh.m2m_ctx);
+ inst->fh.m2m_ctx = NULL;
+ }
+ vpu_inst_unlock(inst);
+
+ call_void_vop(inst, release);
+ vpu_inst_unregister(inst);
+ vpu_inst_put(inst);
+
+ return 0;
+}
+
+int vpu_add_func(struct vpu_dev *vpu, struct vpu_func *func)
+{
+ struct video_device *vfd;
+ int ret;
+
+ if (!vpu || !func)
+ return -EINVAL;
+
+ if (func->vfd)
+ return 0;
+
+ func->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
+ if (IS_ERR(func->m2m_dev)) {
+ dev_err(vpu->dev, "v4l2_m2m_init fail\n");
+ func->vfd = NULL;
+ return PTR_ERR(func->m2m_dev);
+ }
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_m2m_release(func->m2m_dev);
+ dev_err(vpu->dev, "alloc vpu decoder video device fail\n");
+ return -ENOMEM;
+ }
+ vfd->release = video_device_release;
+ vfd->vfl_dir = VFL_DIR_M2M;
+ vfd->v4l2_dev = &vpu->v4l2_dev;
+ vfd->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ if (func->type == VPU_CORE_TYPE_ENC) {
+ strscpy(vfd->name, "amphion-vpu-encoder", sizeof(vfd->name));
+ vfd->fops = venc_get_fops();
+ vfd->ioctl_ops = venc_get_ioctl_ops();
+ } else {
+ strscpy(vfd->name, "amphion-vpu-decoder", sizeof(vfd->name));
+ vfd->fops = vdec_get_fops();
+ vfd->ioctl_ops = vdec_get_ioctl_ops();
+ }
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ video_device_release(vfd);
+ v4l2_m2m_release(func->m2m_dev);
+ return ret;
+ }
+ video_set_drvdata(vfd, vpu);
+ func->vfd = vfd;
+
+ ret = v4l2_m2m_register_media_controller(func->m2m_dev, func->vfd, func->function);
+ if (ret) {
+ v4l2_m2m_release(func->m2m_dev);
+ func->m2m_dev = NULL;
+ video_unregister_device(func->vfd);
+ func->vfd = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+void vpu_remove_func(struct vpu_func *func)
+{
+ if (!func)
+ return;
+
+ if (func->m2m_dev) {
+ v4l2_m2m_unregister_media_controller(func->m2m_dev);
+ v4l2_m2m_release(func->m2m_dev);
+ func->m2m_dev = NULL;
+ }
+ if (func->vfd) {
+ video_unregister_device(func->vfd);
+ func->vfd = NULL;
+ }
+}
diff --git a/drivers/media/platform/amphion/vpu_v4l2.h b/drivers/media/platform/amphion/vpu_v4l2.h
new file mode 100644
index 000000000000..90fa7ea67495
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_v4l2.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#ifndef _AMPHION_VPU_V4L2_H
+#define _AMPHION_VPU_V4L2_H
+
+#include <linux/videodev2.h>
+
+void vpu_inst_lock(struct vpu_inst *inst);
+void vpu_inst_unlock(struct vpu_inst *inst);
+void vpu_set_buffer_state(struct vb2_v4l2_buffer *vbuf, unsigned int state);
+unsigned int vpu_get_buffer_state(struct vb2_v4l2_buffer *vbuf);
+
+int vpu_v4l2_open(struct file *file, struct vpu_inst *inst);
+int vpu_v4l2_close(struct file *file);
+
+const struct vpu_format *vpu_try_fmt_common(struct vpu_inst *inst, struct v4l2_format *f);
+int vpu_process_output_buffer(struct vpu_inst *inst);
+int vpu_process_capture_buffer(struct vpu_inst *inst);
+struct vb2_v4l2_buffer *vpu_find_buf_by_sequence(struct vpu_inst *inst, u32 type, u32 sequence);
+struct vb2_v4l2_buffer *vpu_find_buf_by_idx(struct vpu_inst *inst, u32 type, u32 idx);
+void vpu_v4l2_set_error(struct vpu_inst *inst);
+int vpu_notify_eos(struct vpu_inst *inst);
+int vpu_notify_source_change(struct vpu_inst *inst);
+int vpu_set_last_buffer_dequeued(struct vpu_inst *inst);
+void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state);
+int vpu_get_num_buffers(struct vpu_inst *inst, u32 type);
+
+dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no);
+unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no);
+static inline struct vpu_format *vpu_get_format(struct vpu_inst *inst, u32 type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &inst->out_format;
+ else
+ return &inst->cap_format;
+}
+
+static inline char *vpu_type_name(u32 type)
+{
+ return V4L2_TYPE_IS_OUTPUT(type) ? "output" : "capture";
+}
+
+static inline int vpu_vb_is_codecconfig(struct vb2_v4l2_buffer *vbuf)
+{
+#ifdef V4L2_BUF_FLAG_CODECCONFIG
+ return (vbuf->flags & V4L2_BUF_FLAG_CODECCONFIG) ? 1 : 0;
+#else
+ return 0;
+#endif
+}
+
+#endif
diff --git a/drivers/media/platform/amphion/vpu_windsor.c b/drivers/media/platform/amphion/vpu_windsor.c
new file mode 100644
index 000000000000..1526af2ef9da
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_windsor.c
@@ -0,0 +1,1173 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#include <linux/init.h>
+#include <linux/interconnect.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/time64.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include "vpu.h"
+#include "vpu_rpc.h"
+#include "vpu_defs.h"
+#include "vpu_helpers.h"
+#include "vpu_cmds.h"
+#include "vpu_v4l2.h"
+#include "vpu_imx8q.h"
+#include "vpu_windsor.h"
+
+#define CMD_SIZE 2560
+#define MSG_SIZE 25600
+#define WINDSOR_USER_DATA_WORDS 16
+#define WINDSOR_MAX_SRC_FRAMES 0x6
+#define WINDSOR_MAX_REF_FRAMES 0x3
+#define WINDSOR_BITRATE_UNIT 1024
+#define WINDSOR_H264_EXTENDED_SAR 255
+
+enum {
+ GTB_ENC_CMD_NOOP = 0x0,
+ GTB_ENC_CMD_STREAM_START,
+ GTB_ENC_CMD_FRAME_ENCODE,
+ GTB_ENC_CMD_FRAME_SKIP,
+ GTB_ENC_CMD_STREAM_STOP,
+ GTB_ENC_CMD_PARAMETER_UPD,
+ GTB_ENC_CMD_TERMINATE,
+ GTB_ENC_CMD_SNAPSHOT,
+ GTB_ENC_CMD_ROLL_SNAPSHOT,
+ GTB_ENC_CMD_LOCK_SCHEDULER,
+ GTB_ENC_CMD_UNLOCK_SCHEDULER,
+ GTB_ENC_CMD_CONFIGURE_CODEC,
+ GTB_ENC_CMD_DEAD_MARK,
+ GTB_ENC_CMD_FIRM_RESET,
+ GTB_ENC_CMD_FW_STATUS,
+ GTB_ENC_CMD_RESERVED
+};
+
+enum {
+ VID_API_EVENT_UNDEFINED = 0x0,
+ VID_API_ENC_EVENT_RESET_DONE = 0x1,
+ VID_API_ENC_EVENT_START_DONE,
+ VID_API_ENC_EVENT_STOP_DONE,
+ VID_API_ENC_EVENT_TERMINATE_DONE,
+ VID_API_ENC_EVENT_FRAME_INPUT_DONE,
+ VID_API_ENC_EVENT_FRAME_DONE,
+ VID_API_ENC_EVENT_FRAME_RELEASE,
+ VID_API_ENC_EVENT_PARA_UPD_DONE,
+ VID_API_ENC_EVENT_MEM_REQUEST,
+ VID_API_ENC_EVENT_FIRMWARE_XCPT,
+ VID_API_ENC_EVENT_RESERVED
+};
+
+enum {
+ MEDIAIP_ENC_PIC_TYPE_B_FRAME = 0,
+ MEDIAIP_ENC_PIC_TYPE_P_FRAME,
+ MEDIAIP_ENC_PIC_TYPE_I_FRAME,
+ MEDIAIP_ENC_PIC_TYPE_IDR_FRAME,
+ MEDIAIP_ENC_PIC_TYPE_BI_FRAME
+};
+
+struct windsor_iface {
+ u32 exec_base_addr;
+ u32 exec_area_size;
+ struct vpu_rpc_buffer_desc cmd_buffer_desc;
+ struct vpu_rpc_buffer_desc msg_buffer_desc;
+ u32 cmd_int_enable[VID_API_NUM_STREAMS];
+ u32 fw_version;
+ u32 mvd_fw_offset;
+ u32 max_streams;
+ u32 ctrl_iface[VID_API_NUM_STREAMS];
+ struct vpu_rpc_system_config system_config;
+ u32 api_version;
+ struct vpu_rpc_buffer_desc log_buffer_desc;
+};
+
+struct windsor_ctrl_iface {
+ u32 enc_yuv_buffer_desc;
+ u32 enc_stream_buffer_desc;
+ u32 enc_expert_mode_param;
+ u32 enc_param;
+ u32 enc_mem_pool;
+ u32 enc_encoding_status;
+ u32 enc_dsa_status;
+};
+
+struct vpu_enc_yuv_desc {
+ u32 frame_id;
+ u32 luma_base;
+ u32 chroma_base;
+ u32 param_idx;
+ u32 key_frame;
+};
+
+struct vpu_enc_calib_params {
+ u32 use_ame;
+
+ u32 cme_mvx_max;
+ u32 cme_mvy_max;
+ u32 ame_prefresh_y0;
+ u32 ame_prefresh_y1;
+ u32 fme_min_sad;
+ u32 cme_min_sad;
+
+ u32 fme_pred_int_weight;
+ u32 fme_pred_hp_weight;
+ u32 fme_pred_qp_weight;
+ u32 fme_cost_weight;
+ u32 fme_act_thold;
+ u32 fme_sad_thold;
+ u32 fme_zero_sad_thold;
+
+ u32 fme_lrg_mvx_lmt;
+ u32 fme_lrg_mvy_lmt;
+ u32 fme_force_mode;
+ u32 fme_force4mvcost;
+ u32 fme_force2mvcost;
+
+ u32 h264_inter_thrd;
+
+ u32 i16x16_mode_cost;
+ u32 i4x4_mode_lambda;
+ u32 i8x8_mode_lambda;
+
+ u32 inter_mod_mult;
+ u32 inter_sel_mult;
+ u32 inter_bid_cost;
+ u32 inter_bwd_cost;
+ u32 inter_4mv_cost;
+ s32 one_mv_i16_cost;
+ s32 one_mv_i4x4_cost;
+ s32 one_mv_i8x8_cost;
+ s32 two_mv_i16_cost;
+ s32 two_mv_i4x4_cost;
+ s32 two_mv_i8x8_cost;
+ s32 four_mv_i16_cost;
+ s32 four_mv_i4x4_cost;
+ s32 four_mv_i8x8_cost;
+
+ u32 intra_pred_enab;
+ u32 intra_chr_pred;
+ u32 intra16_pred;
+ u32 intra4x4_pred;
+ u32 intra8x8_pred;
+
+ u32 cb_base;
+ u32 cb_size;
+ u32 cb_head_room;
+
+ u32 mem_page_width;
+ u32 mem_page_height;
+ u32 mem_total_size;
+ u32 mem_chunk_phys_addr;
+ u32 mem_chunk_virt_addr;
+ u32 mem_chunk_size;
+ u32 mem_y_stride;
+ u32 mem_uv_stride;
+
+ u32 split_wr_enab;
+ u32 split_wr_req_size;
+ u32 split_rd_enab;
+ u32 split_rd_req_size;
+};
+
+struct vpu_enc_config_params {
+ u32 param_change;
+ u32 start_frame;
+ u32 end_frame;
+ u32 userdata_enable;
+ u32 userdata_id[4];
+ u32 userdata_message[WINDSOR_USER_DATA_WORDS];
+ u32 userdata_length;
+ u32 h264_profile_idc;
+ u32 h264_level_idc;
+ u32 h264_au_delimiter;
+ u32 h264_seq_end_code;
+ u32 h264_recovery_points;
+ u32 h264_vui_parameters;
+ u32 h264_aspect_ratio_present;
+ u32 h264_aspect_ratio_sar_width;
+ u32 h264_aspect_ratio_sar_height;
+ u32 h264_overscan_present;
+ u32 h264_video_type_present;
+ u32 h264_video_format;
+ u32 h264_video_full_range;
+ u32 h264_video_colour_descriptor;
+ u32 h264_video_colour_primaries;
+ u32 h264_video_transfer_char;
+ u32 h264_video_matrix_coeff;
+ u32 h264_chroma_loc_info_present;
+ u32 h264_chroma_loc_type_top;
+ u32 h264_chroma_loc_type_bot;
+ u32 h264_timing_info_present;
+ u32 h264_buffering_period_present;
+ u32 h264_low_delay_hrd_flag;
+ u32 aspect_ratio;
+ u32 test_mode; // Automated firmware test mode
+ u32 dsa_test_mode; // Automated test mode for the DSA.
+ u32 fme_test_mode; // Automated test mode for the fme
+ u32 cbr_row_mode; //0: FW mode; 1: HW mode
+ u32 windsor_mode; //0: normal mode; 1: intra only mode; 2: intra+0MV mode
+ u32 encode_mode; // H264, VC1, MPEG2, DIVX
+ u32 frame_width; // display width
+ u32 frame_height; // display height
+ u32 enc_frame_width; // encoding width, should be 16-pix align
+ u32 enc_frame_height; // encoding height, should be 16-pix aligned
+ u32 frame_rate_num;
+ u32 frame_rate_den;
+ u32 vi_field_source;
+ u32 vi_frame_width;
+ u32 vi_frame_height;
+ u32 crop_frame_width;
+ u32 crop_frame_height;
+ u32 crop_x_start_posn;
+ u32 crop_y_start_posn;
+ u32 mode422;
+ u32 mode_yuy2;
+ u32 dsa_luma_en;
+ u32 dsa_chroma_en;
+ u32 dsa_ext_hfilt_en;
+ u32 dsa_di_en;
+ u32 dsa_di_top_ref;
+ u32 dsa_vertf_disable;
+ u32 dsa_disable_pwb;
+ u32 dsa_hor_phase;
+ u32 dsa_ver_phase;
+ u32 dsa_iac_enable;
+ u32 iac_sc_threshold;
+ u32 iac_vm_threshold;
+ u32 iac_skip_mode;
+ u32 iac_grp_width;
+ u32 iac_grp_height;
+ u32 rate_control_mode;
+ u32 rate_control_resolution;
+ u32 buffer_size;
+ u32 buffer_level_init;
+ u32 buffer_I_bit_budget;
+ u32 top_field_first;
+ u32 intra_lum_qoffset;
+ u32 intra_chr_qoffset;
+ u32 inter_lum_qoffset;
+ u32 inter_chr_qoffset;
+ u32 use_def_scaling_mtx;
+ u32 inter_8x8_enab;
+ u32 inter_4x4_enab;
+ u32 fme_enable_qpel;
+ u32 fme_enable_hpel;
+ u32 fme_nozeromv;
+ u32 fme_predmv_en;
+ u32 fme_pred_2mv4mv;
+ u32 fme_smallsadthresh;
+ u32 ame_en_lmvc;
+ u32 ame_x_mult;
+ u32 cme_enable_4mv;
+ u32 cme_enable_1mv;
+ u32 hme_enable_16x8mv;
+ u32 hme_enable_8x16mv;
+ u32 cme_mv_weight;
+ u32 cme_mv_cost;
+ u32 ame_mult_mv;
+ u32 ame_shift_mv;
+ u32 hme_forceto1mv_en;
+ u32 hme_2mv_cost;
+ u32 hme_pred_mode;
+ u32 hme_sc_rnge;
+ u32 hme_sw_rnge;
+ u32 output_format;
+ u32 timestamp_enab;
+ u32 initial_pts_enab;
+ u32 initial_pts;
+};
+
+struct vpu_enc_static_params {
+ u32 param_change;
+ u32 gop_length;
+ u32 rate_control_bitrate;
+ u32 rate_control_bitrate_min;
+ u32 rate_control_bitrate_max;
+ u32 rate_control_content_models;
+ u32 rate_control_iframe_maxsize;
+ u32 rate_control_qp_init;
+ u32 rate_control_islice_qp;
+ u32 rate_control_pslice_qp;
+ u32 rate_control_bslice_qp;
+ u32 adaptive_quantization;
+ u32 aq_variance;
+ u32 cost_optimization;
+ u32 fdlp_mode;
+ u32 enable_isegbframes;
+ u32 enable_adaptive_keyratio;
+ u32 keyratio_imin;
+ u32 keyratio_imax;
+ u32 keyratio_pmin;
+ u32 keyratio_pmax;
+ u32 keyratio_bmin;
+ u32 keyratio_bmax;
+ s32 keyratio_istep;
+ s32 keyratio_pstep;
+ s32 keyratio_bstep;
+ u32 enable_paff;
+ u32 enable_b_frame_ref;
+ u32 enable_adaptive_gop;
+ u32 enable_closed_gop;
+ u32 open_gop_refresh_freq;
+ u32 enable_adaptive_sc;
+ u32 enable_fade_detection;
+ s32 fade_detection_threshold;
+ u32 enable_repeat_b;
+ u32 enable_low_delay_b;
+};
+
+struct vpu_enc_dynamic_params {
+ u32 param_change;
+ u32 rows_per_slice;
+ u32 mbaff_enable;
+ u32 dbf_enable;
+ u32 field_source;
+ u32 gop_b_length;
+ u32 mb_group_size;
+ u32 cbr_rows_per_group;
+ u32 skip_enable;
+ u32 pts_bits_0_to_31;
+ u32 pts_bit_32;
+ u32 rm_expsv_cff;
+ u32 const_ipred;
+ s32 chr_qp_offset;
+ u32 intra_mb_qp_offset;
+ u32 h264_cabac_init_method;
+ u32 h264_cabac_init_idc;
+ u32 h264_cabac_enable;
+ s32 alpha_c0_offset_div2;
+ s32 beta_offset_div2;
+ u32 intra_prefresh_y0;
+ u32 intra_prefresh_y1;
+ u32 dbg_dump_rec_src;
+};
+
+struct vpu_enc_expert_mode_param {
+ struct vpu_enc_calib_params calib_param;
+ struct vpu_enc_config_params config_param;
+ struct vpu_enc_static_params static_param;
+ struct vpu_enc_dynamic_params dynamic_param;
+};
+
+enum MEDIAIP_ENC_FMT {
+ MEDIAIP_ENC_FMT_H264 = 0,
+ MEDIAIP_ENC_FMT_VC1,
+ MEDIAIP_ENC_FMT_MPEG2,
+ MEDIAIP_ENC_FMT_MPEG4SP,
+ MEDIAIP_ENC_FMT_H263,
+ MEDIAIP_ENC_FMT_MPEG1,
+ MEDIAIP_ENC_FMT_SHORT_HEADER,
+ MEDIAIP_ENC_FMT_NULL
+};
+
+enum MEDIAIP_ENC_PROFILE {
+ MEDIAIP_ENC_PROF_MPEG2_SP = 0,
+ MEDIAIP_ENC_PROF_MPEG2_MP,
+ MEDIAIP_ENC_PROF_MPEG2_HP,
+ MEDIAIP_ENC_PROF_H264_BP,
+ MEDIAIP_ENC_PROF_H264_MP,
+ MEDIAIP_ENC_PROF_H264_HP,
+ MEDIAIP_ENC_PROF_MPEG4_SP,
+ MEDIAIP_ENC_PROF_MPEG4_ASP,
+ MEDIAIP_ENC_PROF_VC1_SP,
+ MEDIAIP_ENC_PROF_VC1_MP,
+ MEDIAIP_ENC_PROF_VC1_AP
+};
+
+enum MEDIAIP_ENC_BITRATE_MODE {
+ MEDIAIP_ENC_BITRATE_MODE_VBR = 0x00000001,
+ MEDIAIP_ENC_BITRATE_MODE_CBR = 0x00000002,
+ MEDIAIP_ENC_BITRATE_MODE_CONSTANT_QP = 0x00000004
+};
+
+struct vpu_enc_memory_resource {
+ u32 phys;
+ u32 virt;
+ u32 size;
+};
+
+struct vpu_enc_param {
+ enum MEDIAIP_ENC_FMT codec_mode;
+ enum MEDIAIP_ENC_PROFILE profile;
+ u32 level;
+
+ struct vpu_enc_memory_resource enc_mem_desc;
+
+ u32 frame_rate;
+ u32 src_stride;
+ u32 src_width;
+ u32 src_height;
+ u32 src_offset_x;
+ u32 src_offset_y;
+ u32 src_crop_width;
+ u32 src_crop_height;
+ u32 out_width;
+ u32 out_height;
+ u32 iframe_interval;
+ u32 bframes;
+ u32 low_latency_mode;
+
+ enum MEDIAIP_ENC_BITRATE_MODE bitrate_mode;
+ u32 target_bitrate;
+ u32 max_bitrate;
+ u32 min_bitrate;
+ u32 init_slice_qp;
+};
+
+struct vpu_enc_mem_pool {
+ struct vpu_enc_memory_resource enc_frames[WINDSOR_MAX_SRC_FRAMES];
+ struct vpu_enc_memory_resource ref_frames[WINDSOR_MAX_REF_FRAMES];
+ struct vpu_enc_memory_resource act_frame;
+};
+
+struct vpu_enc_encoding_status {
+ u32 frame_id;
+ u32 error_flag; //Error type
+ u32 mb_y;
+ u32 mb_x;
+ u32 reserved[12];
+
+};
+
+struct vpu_enc_dsa_status {
+ u32 frame_id;
+ u32 dsa_cyle;
+ u32 mb_y;
+ u32 mb_x;
+ u32 reserved[4];
+};
+
+struct vpu_enc_ctrl {
+ struct vpu_enc_yuv_desc *yuv_desc;
+ struct vpu_rpc_buffer_desc *stream_desc;
+ struct vpu_enc_expert_mode_param *expert;
+ struct vpu_enc_param *param;
+ struct vpu_enc_mem_pool *pool;
+ struct vpu_enc_encoding_status *status;
+ struct vpu_enc_dsa_status *dsa;
+};
+
+struct vpu_enc_host_ctrls {
+ struct vpu_enc_ctrl ctrls[VID_API_NUM_STREAMS];
+};
+
+struct windsor_pic_info {
+ u32 frame_id;
+ u32 pic_encod_done;
+ u32 pic_type;
+ u32 skipped_frame;
+ u32 error_flag;
+ u32 psnr;
+ u32 flush_done;
+ u32 mb_y;
+ u32 mb_x;
+ u32 frame_size;
+ u32 frame_enc_ttl_cycles;
+ u32 frame_enc_ttl_frm_cycles;
+ u32 frame_enc_ttl_slc_cycles;
+ u32 frame_enc_ttl_enc_cycles;
+ u32 frame_enc_ttl_hme_cycles;
+ u32 frame_enc_ttl_dsa_cycles;
+ u32 frame_enc_fw_cycles;
+ u32 frame_crc;
+ u32 num_interrupts_1;
+ u32 num_interrupts_2;
+ u32 poc;
+ u32 ref_info;
+ u32 pic_num;
+ u32 pic_activity;
+ u32 scene_change;
+ u32 mb_stats;
+ u32 enc_cache_count0;
+ u32 enc_cache_count1;
+ u32 mtl_wr_strb_cnt;
+ u32 mtl_rd_strb_cnt;
+ u32 str_buff_wptr;
+ u32 diagnosticEvents;
+ u32 proc_iacc_tot_rd_cnt;
+ u32 proc_dacc_tot_rd_cnt;
+ u32 proc_dacc_tot_wr_cnt;
+ u32 proc_dacc_reg_rd_cnt;
+ u32 proc_dacc_reg_wr_cnt;
+ u32 proc_dacc_rng_rd_cnt;
+ u32 proc_dacc_rng_wr_cnt;
+ s32 tv_s;
+ u32 tv_ns;
+};
+
+u32 vpu_windsor_get_data_size(void)
+{
+ return sizeof(struct vpu_enc_host_ctrls);
+}
+
+static struct vpu_enc_yuv_desc *get_yuv_desc(struct vpu_shared_addr *shared,
+ u32 instance)
+{
+ struct vpu_enc_host_ctrls *hcs = shared->priv;
+
+ return hcs->ctrls[instance].yuv_desc;
+}
+
+static struct vpu_enc_mem_pool *get_mem_pool(struct vpu_shared_addr *shared,
+ u32 instance)
+{
+ struct vpu_enc_host_ctrls *hcs = shared->priv;
+
+ return hcs->ctrls[instance].pool;
+}
+
+static struct vpu_rpc_buffer_desc *get_stream_buf_desc(struct vpu_shared_addr *shared,
+ u32 instance)
+{
+ struct vpu_enc_host_ctrls *hcs = shared->priv;
+
+ return hcs->ctrls[instance].stream_desc;
+}
+
+static struct vpu_enc_expert_mode_param *get_expert_param(struct vpu_shared_addr *shared,
+ u32 instance)
+{
+ struct vpu_enc_host_ctrls *hcs = shared->priv;
+
+ return hcs->ctrls[instance].expert;
+}
+
+static struct vpu_enc_param *get_enc_param(struct vpu_shared_addr *shared, u32 instance)
+{
+ struct vpu_enc_host_ctrls *hcs = shared->priv;
+
+ return hcs->ctrls[instance].param;
+}
+
+static u32 get_ptr(u32 ptr)
+{
+ return (ptr | 0x80000000);
+}
+
+void vpu_windsor_init_rpc(struct vpu_shared_addr *shared,
+ struct vpu_buffer *rpc, dma_addr_t boot_addr)
+{
+ unsigned long base_phy_addr;
+ unsigned long phy_addr;
+ unsigned long offset;
+ struct windsor_iface *iface;
+ struct windsor_ctrl_iface *ctrl;
+ struct vpu_enc_host_ctrls *hcs;
+ unsigned int i;
+
+ if (rpc->phys < boot_addr)
+ return;
+
+ base_phy_addr = rpc->phys - boot_addr;
+ iface = rpc->virt;
+ shared->iface = iface;
+ shared->boot_addr = boot_addr;
+ hcs = shared->priv;
+
+ iface->exec_base_addr = base_phy_addr;
+ iface->exec_area_size = rpc->length;
+
+ offset = sizeof(struct windsor_iface);
+ phy_addr = base_phy_addr + offset;
+ shared->cmd_desc = &iface->cmd_buffer_desc;
+ shared->cmd_mem_vir = rpc->virt + offset;
+ iface->cmd_buffer_desc.start =
+ iface->cmd_buffer_desc.rptr =
+ iface->cmd_buffer_desc.wptr = phy_addr;
+ iface->cmd_buffer_desc.end = iface->cmd_buffer_desc.start + CMD_SIZE;
+
+ offset += CMD_SIZE;
+ phy_addr = base_phy_addr + offset;
+ shared->msg_desc = &iface->msg_buffer_desc;
+ shared->msg_mem_vir = rpc->virt + offset;
+ iface->msg_buffer_desc.start =
+ iface->msg_buffer_desc.wptr =
+ iface->msg_buffer_desc.rptr = phy_addr;
+ iface->msg_buffer_desc.end = iface->msg_buffer_desc.start + MSG_SIZE;
+
+ offset += MSG_SIZE;
+ for (i = 0; i < ARRAY_SIZE(iface->ctrl_iface); i++) {
+ iface->ctrl_iface[i] = base_phy_addr + offset;
+ offset += sizeof(struct windsor_ctrl_iface);
+ }
+ for (i = 0; i < ARRAY_SIZE(iface->ctrl_iface); i++) {
+ ctrl = rpc->virt + (iface->ctrl_iface[i] - base_phy_addr);
+
+ ctrl->enc_yuv_buffer_desc = base_phy_addr + offset;
+ hcs->ctrls[i].yuv_desc = rpc->virt + offset;
+ offset += sizeof(struct vpu_enc_yuv_desc);
+
+ ctrl->enc_stream_buffer_desc = base_phy_addr + offset;
+ hcs->ctrls[i].stream_desc = rpc->virt + offset;
+ offset += sizeof(struct vpu_rpc_buffer_desc);
+
+ ctrl->enc_expert_mode_param = base_phy_addr + offset;
+ hcs->ctrls[i].expert = rpc->virt + offset;
+ offset += sizeof(struct vpu_enc_expert_mode_param);
+
+ ctrl->enc_param = base_phy_addr + offset;
+ hcs->ctrls[i].param = rpc->virt + offset;
+ offset += sizeof(struct vpu_enc_param);
+
+ ctrl->enc_mem_pool = base_phy_addr + offset;
+ hcs->ctrls[i].pool = rpc->virt + offset;
+ offset += sizeof(struct vpu_enc_mem_pool);
+
+ ctrl->enc_encoding_status = base_phy_addr + offset;
+ hcs->ctrls[i].status = rpc->virt + offset;
+ offset += sizeof(struct vpu_enc_encoding_status);
+
+ ctrl->enc_dsa_status = base_phy_addr + offset;
+ hcs->ctrls[i].dsa = rpc->virt + offset;
+ offset += sizeof(struct vpu_enc_dsa_status);
+ }
+
+ rpc->bytesused = offset;
+}
+
+void vpu_windsor_set_log_buf(struct vpu_shared_addr *shared, struct vpu_buffer *log)
+{
+ struct windsor_iface *iface = shared->iface;
+
+ iface->log_buffer_desc.start =
+ iface->log_buffer_desc.wptr =
+ iface->log_buffer_desc.rptr = log->phys - shared->boot_addr;
+ iface->log_buffer_desc.end = iface->log_buffer_desc.start + log->length;
+}
+
+void vpu_windsor_set_system_cfg(struct vpu_shared_addr *shared,
+ u32 regs_base, void __iomem *regs, u32 core_id)
+{
+ struct windsor_iface *iface = shared->iface;
+ struct vpu_rpc_system_config *config = &iface->system_config;
+
+ vpu_imx8q_set_system_cfg_common(config, regs_base, core_id);
+}
+
+int vpu_windsor_get_stream_buffer_size(struct vpu_shared_addr *shared)
+{
+ return 0x300000;
+}
+
+static struct vpu_pair windsor_cmds[] = {
+ {VPU_CMD_ID_CONFIGURE_CODEC, GTB_ENC_CMD_CONFIGURE_CODEC},
+ {VPU_CMD_ID_START, GTB_ENC_CMD_STREAM_START},
+ {VPU_CMD_ID_STOP, GTB_ENC_CMD_STREAM_STOP},
+ {VPU_CMD_ID_FRAME_ENCODE, GTB_ENC_CMD_FRAME_ENCODE},
+ {VPU_CMD_ID_SNAPSHOT, GTB_ENC_CMD_SNAPSHOT},
+ {VPU_CMD_ID_FIRM_RESET, GTB_ENC_CMD_FIRM_RESET},
+ {VPU_CMD_ID_UPDATE_PARAMETER, GTB_ENC_CMD_PARAMETER_UPD},
+ {VPU_CMD_ID_DEBUG, GTB_ENC_CMD_FW_STATUS}
+};
+
+static struct vpu_pair windsor_msgs[] = {
+ {VPU_MSG_ID_RESET_DONE, VID_API_ENC_EVENT_RESET_DONE},
+ {VPU_MSG_ID_START_DONE, VID_API_ENC_EVENT_START_DONE},
+ {VPU_MSG_ID_STOP_DONE, VID_API_ENC_EVENT_STOP_DONE},
+ {VPU_MSG_ID_FRAME_INPUT_DONE, VID_API_ENC_EVENT_FRAME_INPUT_DONE},
+ {VPU_MSG_ID_ENC_DONE, VID_API_ENC_EVENT_FRAME_DONE},
+ {VPU_MSG_ID_FRAME_RELEASE, VID_API_ENC_EVENT_FRAME_RELEASE},
+ {VPU_MSG_ID_MEM_REQUEST, VID_API_ENC_EVENT_MEM_REQUEST},
+ {VPU_MSG_ID_PARAM_UPD_DONE, VID_API_ENC_EVENT_PARA_UPD_DONE},
+ {VPU_MSG_ID_FIRMWARE_XCPT, VID_API_ENC_EVENT_FIRMWARE_XCPT},
+};
+
+int vpu_windsor_pack_cmd(struct vpu_rpc_event *pkt, u32 index, u32 id, void *data)
+{
+ int ret;
+
+ ret = vpu_find_dst_by_src(windsor_cmds, ARRAY_SIZE(windsor_cmds), id);
+ if (ret < 0)
+ return ret;
+ pkt->hdr.id = ret;
+ pkt->hdr.num = 0;
+ pkt->hdr.index = index;
+ if (id == VPU_CMD_ID_FRAME_ENCODE) {
+ s64 timestamp = *(s64 *)data;
+ struct timespec64 ts = ns_to_timespec64(timestamp);
+
+ pkt->hdr.num = 2;
+ pkt->data[0] = ts.tv_sec;
+ pkt->data[1] = ts.tv_nsec;
+ }
+
+ return 0;
+}
+
+int vpu_windsor_convert_msg_id(u32 id)
+{
+ return vpu_find_src_by_dst(windsor_msgs, ARRAY_SIZE(windsor_msgs), id);
+}
+
+static void vpu_windsor_unpack_pic_info(struct vpu_rpc_event *pkt, void *data)
+{
+ struct vpu_enc_pic_info *info = data;
+ struct windsor_pic_info *windsor = (struct windsor_pic_info *)pkt->data;
+ struct timespec64 ts = { windsor->tv_s, windsor->tv_ns };
+
+ info->frame_id = windsor->frame_id;
+ switch (windsor->pic_type) {
+ case MEDIAIP_ENC_PIC_TYPE_I_FRAME:
+ case MEDIAIP_ENC_PIC_TYPE_IDR_FRAME:
+ info->pic_type = V4L2_BUF_FLAG_KEYFRAME;
+ break;
+ case MEDIAIP_ENC_PIC_TYPE_P_FRAME:
+ info->pic_type = V4L2_BUF_FLAG_PFRAME;
+ break;
+ case MEDIAIP_ENC_PIC_TYPE_B_FRAME:
+ info->pic_type = V4L2_BUF_FLAG_BFRAME;
+ break;
+ default:
+ break;
+ }
+ info->skipped_frame = windsor->skipped_frame;
+ info->error_flag = windsor->error_flag;
+ info->psnr = windsor->psnr;
+ info->frame_size = windsor->frame_size;
+ info->wptr = get_ptr(windsor->str_buff_wptr);
+ info->crc = windsor->frame_crc;
+ info->timestamp = timespec64_to_ns(&ts);
+}
+
+static void vpu_windsor_unpack_mem_req(struct vpu_rpc_event *pkt, void *data)
+{
+ struct vpu_pkt_mem_req_data *req_data = data;
+
+ req_data->enc_frame_size = pkt->data[0];
+ req_data->enc_frame_num = pkt->data[1];
+ req_data->ref_frame_size = pkt->data[2];
+ req_data->ref_frame_num = pkt->data[3];
+ req_data->act_buf_size = pkt->data[4];
+ req_data->act_buf_num = 1;
+}
+
+int vpu_windsor_unpack_msg_data(struct vpu_rpc_event *pkt, void *data)
+{
+ if (!pkt || !data)
+ return -EINVAL;
+
+ switch (pkt->hdr.id) {
+ case VID_API_ENC_EVENT_FRAME_DONE:
+ vpu_windsor_unpack_pic_info(pkt, data);
+ break;
+ case VID_API_ENC_EVENT_MEM_REQUEST:
+ vpu_windsor_unpack_mem_req(pkt, data);
+ break;
+ case VID_API_ENC_EVENT_FRAME_RELEASE:
+ *(u32 *)data = pkt->data[0];
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int vpu_windsor_fill_yuv_frame(struct vpu_shared_addr *shared,
+ u32 instance,
+ struct vb2_buffer *vb)
+{
+ struct vpu_enc_yuv_desc *desc;
+ struct vb2_v4l2_buffer *vbuf;
+
+ if (instance >= VID_API_NUM_STREAMS)
+ return -EINVAL;
+
+ desc = get_yuv_desc(shared, instance);
+
+ vbuf = to_vb2_v4l2_buffer(vb);
+ desc->frame_id = vbuf->sequence;
+ if (vbuf->flags & V4L2_BUF_FLAG_KEYFRAME)
+ desc->key_frame = 1;
+ else
+ desc->key_frame = 0;
+ desc->luma_base = vpu_get_vb_phy_addr(vb, 0);
+ desc->chroma_base = vpu_get_vb_phy_addr(vb, 1);
+
+ return 0;
+}
+
+int vpu_windsor_input_frame(struct vpu_shared_addr *shared,
+ struct vpu_inst *inst, struct vb2_buffer *vb)
+{
+ vpu_windsor_fill_yuv_frame(shared, inst->id, vb);
+ return vpu_session_encode_frame(inst, vb->timestamp);
+}
+
+int vpu_windsor_config_memory_resource(struct vpu_shared_addr *shared,
+ u32 instance,
+ u32 type,
+ u32 index,
+ struct vpu_buffer *buf)
+{
+ struct vpu_enc_mem_pool *pool;
+ struct vpu_enc_memory_resource *res;
+
+ if (instance >= VID_API_NUM_STREAMS)
+ return -EINVAL;
+
+ pool = get_mem_pool(shared, instance);
+
+ switch (type) {
+ case MEM_RES_ENC:
+ if (index >= ARRAY_SIZE(pool->enc_frames))
+ return -EINVAL;
+ res = &pool->enc_frames[index];
+ break;
+ case MEM_RES_REF:
+ if (index >= ARRAY_SIZE(pool->ref_frames))
+ return -EINVAL;
+ res = &pool->ref_frames[index];
+ break;
+ case MEM_RES_ACT:
+ if (index)
+ return -EINVAL;
+ res = &pool->act_frame;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ res->phys = buf->phys;
+ res->virt = buf->phys - shared->boot_addr;
+ res->size = buf->length;
+
+ return 0;
+}
+
+int vpu_windsor_config_stream_buffer(struct vpu_shared_addr *shared,
+ u32 instance,
+ struct vpu_buffer *buf)
+{
+ struct vpu_rpc_buffer_desc *desc;
+ struct vpu_enc_expert_mode_param *expert;
+
+ desc = get_stream_buf_desc(shared, instance);
+ expert = get_expert_param(shared, instance);
+
+ desc->start = buf->phys;
+ desc->wptr = buf->phys;
+ desc->rptr = buf->phys;
+ desc->end = buf->phys + buf->length;
+
+ expert->calib_param.mem_chunk_phys_addr = 0;
+ expert->calib_param.mem_chunk_virt_addr = 0;
+ expert->calib_param.mem_chunk_size = 0;
+ expert->calib_param.cb_base = buf->phys;
+ expert->calib_param.cb_size = buf->length;
+
+ return 0;
+}
+
+int vpu_windsor_update_stream_buffer(struct vpu_shared_addr *shared,
+ u32 instance, u32 ptr, bool write)
+{
+ struct vpu_rpc_buffer_desc *desc;
+
+ desc = get_stream_buf_desc(shared, instance);
+
+ /*update wptr/rptr after data is written or read*/
+ mb();
+ if (write)
+ desc->wptr = ptr;
+ else
+ desc->rptr = ptr;
+
+ return 0;
+}
+
+int vpu_windsor_get_stream_buffer_desc(struct vpu_shared_addr *shared,
+ u32 instance, struct vpu_rpc_buffer_desc *desc)
+{
+ struct vpu_rpc_buffer_desc *rpc_desc;
+
+ rpc_desc = get_stream_buf_desc(shared, instance);
+ if (desc) {
+ desc->wptr = get_ptr(rpc_desc->wptr);
+ desc->rptr = get_ptr(rpc_desc->rptr);
+ desc->start = get_ptr(rpc_desc->start);
+ desc->end = get_ptr(rpc_desc->end);
+ }
+
+ return 0;
+}
+
+u32 vpu_windsor_get_version(struct vpu_shared_addr *shared)
+{
+ struct windsor_iface *iface = shared->iface;
+
+ return iface->fw_version;
+}
+
+static int vpu_windsor_set_frame_rate(struct vpu_enc_expert_mode_param *expert,
+ struct vpu_encode_params *params)
+{
+ expert->config_param.frame_rate_num = params->frame_rate.numerator;
+ expert->config_param.frame_rate_den = params->frame_rate.denominator;
+
+ return 0;
+}
+
+static int vpu_windsor_set_format(struct vpu_enc_param *param, u32 pixelformat)
+{
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_H264:
+ param->codec_mode = MEDIAIP_ENC_FMT_H264;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vpu_windsor_set_profile(struct vpu_enc_param *param, u32 profile)
+{
+ switch (profile) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ param->profile = MEDIAIP_ENC_PROF_H264_BP;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ param->profile = MEDIAIP_ENC_PROF_H264_MP;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ param->profile = MEDIAIP_ENC_PROF_H264_HP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const u32 h264_level[] = {
+ [V4L2_MPEG_VIDEO_H264_LEVEL_1_0] = 10,
+ [V4L2_MPEG_VIDEO_H264_LEVEL_1B] = 14,
+ [V4L2_MPEG_VIDEO_H264_LEVEL_1_1] = 11,
+ [V4L2_MPEG_VIDEO_H264_LEVEL_1_2] = 12,
+ [V4L2_MPEG_VIDEO_H264_LEVEL_1_3] = 13,
+ [V4L2_MPEG_VIDEO_H264_LEVEL_2_0] = 20,
+ [V4L2_MPEG_VIDEO_H264_LEVEL_2_1] = 21,
+ [V4L2_MPEG_VIDEO_H264_LEVEL_2_2] = 22,
+ [V4L2_MPEG_VIDEO_H264_LEVEL_3_0] = 30,
+ [V4L2_MPEG_VIDEO_H264_LEVEL_3_1] = 31,
+ [V4L2_MPEG_VIDEO_H264_LEVEL_3_2] = 32,
+ [V4L2_MPEG_VIDEO_H264_LEVEL_4_0] = 40,
+ [V4L2_MPEG_VIDEO_H264_LEVEL_4_1] = 41,
+ [V4L2_MPEG_VIDEO_H264_LEVEL_4_2] = 42,
+ [V4L2_MPEG_VIDEO_H264_LEVEL_5_0] = 50,
+ [V4L2_MPEG_VIDEO_H264_LEVEL_5_1] = 51
+};
+
+static int vpu_windsor_set_level(struct vpu_enc_param *param, u32 level)
+{
+ if (level >= ARRAY_SIZE(h264_level))
+ return -EINVAL;
+
+ param->level = h264_level[level];
+
+ return 0;
+}
+
+static int vpu_windsor_set_size(struct vpu_enc_param *windsor,
+ struct vpu_encode_params *params)
+{
+ windsor->src_stride = params->src_stride;
+ windsor->src_width = params->src_width;
+ windsor->src_height = params->src_height;
+ windsor->src_offset_x = params->crop.left;
+ windsor->src_offset_y = params->crop.top;
+ windsor->src_crop_width = params->crop.width;
+ windsor->src_crop_height = params->crop.height;
+ windsor->out_width = params->out_width;
+ windsor->out_height = params->out_height;
+
+ return 0;
+}
+
+static int vpu_windsor_set_gop(struct vpu_enc_param *param, u32 gop)
+{
+ param->iframe_interval = gop;
+
+ return 0;
+}
+
+static int vpu_windsor_set_bframes(struct vpu_enc_param *param, u32 bframes)
+{
+ if (bframes) {
+ param->low_latency_mode = 0;
+ param->bframes = bframes;
+ } else {
+ param->low_latency_mode = 1;
+ param->bframes = 0;
+ }
+
+ return 0;
+}
+
+static int vpu_windsor_set_bitrate_mode(struct vpu_enc_param *param, u32 rc_enable, u32 mode)
+{
+ if (!rc_enable)
+ param->bitrate_mode = MEDIAIP_ENC_BITRATE_MODE_CONSTANT_QP;
+ else if (mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
+ param->bitrate_mode = MEDIAIP_ENC_BITRATE_MODE_VBR;
+ else
+ param->bitrate_mode = MEDIAIP_ENC_BITRATE_MODE_CBR;
+
+ return 0;
+}
+
+static u32 vpu_windsor_bitrate(u32 bitrate)
+{
+ return DIV_ROUND_CLOSEST(bitrate, WINDSOR_BITRATE_UNIT);
+}
+
+static int vpu_windsor_set_bitrate(struct vpu_enc_param *windsor,
+ struct vpu_encode_params *params)
+{
+ windsor->target_bitrate = vpu_windsor_bitrate(params->bitrate);
+ windsor->min_bitrate = vpu_windsor_bitrate(params->bitrate_min);
+ windsor->max_bitrate = vpu_windsor_bitrate(params->bitrate_max);
+
+ return 0;
+}
+
+static int vpu_windsor_set_qp(struct vpu_enc_expert_mode_param *expert,
+ struct vpu_encode_params *params)
+{
+ expert->static_param.rate_control_islice_qp = params->i_frame_qp;
+ expert->static_param.rate_control_pslice_qp = params->p_frame_qp;
+ expert->static_param.rate_control_bslice_qp = params->b_frame_qp;
+
+ return 0;
+}
+
+static int vpu_windsor_set_sar(struct vpu_enc_expert_mode_param *expert,
+ struct vpu_encode_params *params)
+{
+ expert->config_param.h264_aspect_ratio_present = params->sar.enable;
+ if (params->sar.idc == V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED)
+ expert->config_param.aspect_ratio = WINDSOR_H264_EXTENDED_SAR;
+ else
+ expert->config_param.aspect_ratio = params->sar.idc;
+ expert->config_param.h264_aspect_ratio_sar_width = params->sar.width;
+ expert->config_param.h264_aspect_ratio_sar_height = params->sar.height;
+
+ return 0;
+}
+
+static int vpu_windsor_set_color(struct vpu_enc_expert_mode_param *expert,
+ struct vpu_encode_params *params)
+{
+ expert->config_param.h264_video_type_present = 1;
+ expert->config_param.h264_video_format = 5;
+ expert->config_param.h264_video_colour_descriptor = 1;
+ expert->config_param.h264_video_colour_primaries =
+ vpu_color_cvrt_primaries_v2i(params->color.primaries);
+ expert->config_param.h264_video_transfer_char =
+ vpu_color_cvrt_transfers_v2i(params->color.transfer);
+ expert->config_param.h264_video_matrix_coeff =
+ vpu_color_cvrt_matrix_v2i(params->color.matrix);
+ expert->config_param.h264_video_full_range =
+ vpu_color_cvrt_full_range_v2i(params->color.full_range);
+ return 0;
+}
+
+static int vpu_windsor_update_bitrate(struct vpu_shared_addr *shared,
+ u32 instance, struct vpu_encode_params *params)
+{
+ struct vpu_enc_param *windsor;
+ struct vpu_enc_expert_mode_param *expert;
+
+ windsor = get_enc_param(shared, instance);
+ expert = get_expert_param(shared, instance);
+
+ if (windsor->bitrate_mode != MEDIAIP_ENC_BITRATE_MODE_CBR)
+ return 0;
+ if (!params->rc_enable)
+ return 0;
+ if (vpu_windsor_bitrate(params->bitrate) == windsor->target_bitrate)
+ return 0;
+
+ vpu_windsor_set_bitrate(windsor, params);
+ expert->static_param.rate_control_bitrate = windsor->target_bitrate;
+ expert->static_param.rate_control_bitrate_min = windsor->min_bitrate;
+ expert->static_param.rate_control_bitrate_max = windsor->max_bitrate;
+
+ return 0;
+}
+
+static int vpu_windsor_set_params(struct vpu_shared_addr *shared,
+ u32 instance, struct vpu_encode_params *params)
+{
+ struct vpu_enc_param *windsor;
+ int ret;
+
+ windsor = get_enc_param(shared, instance);
+
+ if (params->input_format != V4L2_PIX_FMT_NV12 &&
+ params->input_format != V4L2_PIX_FMT_NV12M)
+ return -EINVAL;
+
+ ret = vpu_windsor_set_format(windsor, params->codec_format);
+ if (ret)
+ return ret;
+ vpu_windsor_set_profile(windsor, params->profile);
+ vpu_windsor_set_level(windsor, params->level);
+ vpu_windsor_set_size(windsor, params);
+ vpu_windsor_set_gop(windsor, params->gop_length);
+ vpu_windsor_set_bframes(windsor, params->bframes);
+ vpu_windsor_set_bitrate_mode(windsor, params->rc_enable, params->rc_mode);
+ vpu_windsor_set_bitrate(windsor, params);
+ windsor->init_slice_qp = params->i_frame_qp;
+
+ if (!params->frame_rate.numerator)
+ return -EINVAL;
+ windsor->frame_rate = params->frame_rate.denominator / params->frame_rate.numerator;
+
+ return 0;
+}
+
+static int vpu_windsor_update_params(struct vpu_shared_addr *shared,
+ u32 instance, struct vpu_encode_params *params)
+{
+ struct vpu_enc_expert_mode_param *expert;
+
+ expert = get_expert_param(shared, instance);
+
+ vpu_windsor_set_frame_rate(expert, params);
+ vpu_windsor_set_qp(expert, params);
+ vpu_windsor_set_sar(expert, params);
+ vpu_windsor_set_color(expert, params);
+ vpu_windsor_update_bitrate(shared, instance, params);
+ /*expert->config_param.iac_sc_threshold = 0;*/
+
+ return 0;
+}
+
+int vpu_windsor_set_encode_params(struct vpu_shared_addr *shared,
+ u32 instance, struct vpu_encode_params *params, u32 update)
+{
+ if (!params)
+ return -EINVAL;
+
+ if (!update)
+ return vpu_windsor_set_params(shared, instance, params);
+ else
+ return vpu_windsor_update_params(shared, instance, params);
+}
+
+u32 vpu_windsor_get_max_instance_count(struct vpu_shared_addr *shared)
+{
+ struct windsor_iface *iface = shared->iface;
+
+ return iface->max_streams;
+}
diff --git a/drivers/media/platform/amphion/vpu_windsor.h b/drivers/media/platform/amphion/vpu_windsor.h
new file mode 100644
index 000000000000..3fbb6556dbca
--- /dev/null
+++ b/drivers/media/platform/amphion/vpu_windsor.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020-2021 NXP
+ */
+
+#ifndef _AMPHION_VPU_WINDSOR_H
+#define _AMPHION_VPU_WINDSOR_H
+
+u32 vpu_windsor_get_data_size(void);
+void vpu_windsor_init_rpc(struct vpu_shared_addr *shared,
+ struct vpu_buffer *rpc, dma_addr_t boot_addr);
+void vpu_windsor_set_log_buf(struct vpu_shared_addr *shared, struct vpu_buffer *log);
+void vpu_windsor_set_system_cfg(struct vpu_shared_addr *shared,
+ u32 regs_base, void __iomem *regs, u32 core_id);
+int vpu_windsor_get_stream_buffer_size(struct vpu_shared_addr *shared);
+int vpu_windsor_pack_cmd(struct vpu_rpc_event *pkt, u32 index, u32 id, void *data);
+int vpu_windsor_convert_msg_id(u32 msg_id);
+int vpu_windsor_unpack_msg_data(struct vpu_rpc_event *pkt, void *data);
+int vpu_windsor_config_memory_resource(struct vpu_shared_addr *shared,
+ u32 instance, u32 type, u32 index,
+ struct vpu_buffer *buf);
+int vpu_windsor_config_stream_buffer(struct vpu_shared_addr *shared,
+ u32 instance, struct vpu_buffer *buf);
+int vpu_windsor_update_stream_buffer(struct vpu_shared_addr *shared,
+ u32 instance, u32 ptr, bool write);
+int vpu_windsor_get_stream_buffer_desc(struct vpu_shared_addr *shared,
+ u32 instance, struct vpu_rpc_buffer_desc *desc);
+u32 vpu_windsor_get_version(struct vpu_shared_addr *shared);
+int vpu_windsor_set_encode_params(struct vpu_shared_addr *shared,
+ u32 instance,
+ struct vpu_encode_params *params,
+ u32 update);
+int vpu_windsor_input_frame(struct vpu_shared_addr *shared,
+ struct vpu_inst *inst, struct vb2_buffer *vb);
+u32 vpu_windsor_get_max_instance_count(struct vpu_shared_addr *shared);
+
+#endif
diff --git a/drivers/media/platform/aspeed/Kconfig b/drivers/media/platform/aspeed/Kconfig
new file mode 100644
index 000000000000..c871eda33570
--- /dev/null
+++ b/drivers/media/platform/aspeed/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Aspeed media platform drivers"
+
+config VIDEO_ASPEED
+ tristate "Aspeed AST2400 and AST2500 Video Engine driver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Support for the Aspeed Video Engine (VE) embedded in the Aspeed
+ AST2400 and AST2500 SOCs. The VE can capture and compress video data
+ from digital or analog sources.
diff --git a/drivers/media/platform/aspeed/Makefile b/drivers/media/platform/aspeed/Makefile
new file mode 100644
index 000000000000..1979af63dadd
--- /dev/null
+++ b/drivers/media/platform/aspeed/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_VIDEO_ASPEED) += aspeed-video.o
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed/aspeed-video.c
index 7a24daf7165a..b937dbcbe9e0 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed/aspeed-video.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
@@ -33,6 +34,8 @@
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-dma-contig.h>
+#define ASPEED_VIDEO_V4L2_MIN_BUF_REQ 3
+
#define DEVICE_NAME "aspeed-video"
#define ASPEED_VIDEO_JPEG_NUM_QUALITIES 12
@@ -85,8 +88,6 @@
#define VE_CTRL_SOURCE BIT(2)
#define VE_CTRL_INT_DE BIT(4)
#define VE_CTRL_DIRECT_FETCH BIT(5)
-#define VE_CTRL_YUV BIT(6)
-#define VE_CTRL_RGB BIT(7)
#define VE_CTRL_CAPTURE_FMT GENMASK(7, 6)
#define VE_CTRL_AUTO_OR_CURSOR BIT(8)
#define VE_CTRL_CLK_INVERSE BIT(11)
@@ -143,26 +144,29 @@
#define VE_SRC_LR_EDGE_DET_NO_H BIT(13)
#define VE_SRC_LR_EDGE_DET_NO_DISP BIT(14)
#define VE_SRC_LR_EDGE_DET_NO_CLK BIT(15)
-#define VE_SRC_LR_EDGE_DET_RT_SHF 16
-#define VE_SRC_LR_EDGE_DET_RT GENMASK(27, VE_SRC_LR_EDGE_DET_RT_SHF)
+#define VE_SRC_LR_EDGE_DET_RT GENMASK(27, 16)
#define VE_SRC_LR_EDGE_DET_INTERLACE BIT(31)
#define VE_SRC_TB_EDGE_DET 0x094
#define VE_SRC_TB_EDGE_DET_TOP GENMASK(12, 0)
-#define VE_SRC_TB_EDGE_DET_BOT_SHF 16
-#define VE_SRC_TB_EDGE_DET_BOT GENMASK(28, VE_SRC_TB_EDGE_DET_BOT_SHF)
+#define VE_SRC_TB_EDGE_DET_BOT GENMASK(28, 16)
#define VE_MODE_DETECT_STATUS 0x098
-#define VE_MODE_DETECT_H_PIXELS GENMASK(11, 0)
-#define VE_MODE_DETECT_V_LINES_SHF 16
-#define VE_MODE_DETECT_V_LINES GENMASK(27, VE_MODE_DETECT_V_LINES_SHF)
+#define VE_MODE_DETECT_H_PERIOD GENMASK(11, 0)
+#define VE_MODE_DETECT_EXTSRC_ADC BIT(12)
+#define VE_MODE_DETECT_H_STABLE BIT(13)
+#define VE_MODE_DETECT_V_STABLE BIT(14)
+#define VE_MODE_DETECT_V_LINES GENMASK(27, 16)
#define VE_MODE_DETECT_STATUS_VSYNC BIT(28)
#define VE_MODE_DETECT_STATUS_HSYNC BIT(29)
+#define VE_MODE_DETECT_VSYNC_RDY BIT(30)
+#define VE_MODE_DETECT_HSYNC_RDY BIT(31)
#define VE_SYNC_STATUS 0x09c
#define VE_SYNC_STATUS_HSYNC GENMASK(11, 0)
-#define VE_SYNC_STATUS_VSYNC_SHF 16
-#define VE_SYNC_STATUS_VSYNC GENMASK(27, VE_SYNC_STATUS_VSYNC_SHF)
+#define VE_SYNC_STATUS_VSYNC GENMASK(27, 16)
+
+#define VE_H_TOTAL_PIXELS 0x0A0
#define VE_INTERRUPT_CTRL 0x304
#define VE_INTERRUPT_STATUS 0x308
@@ -179,9 +183,24 @@
#define VE_INTERRUPT_VSYNC_DESC BIT(11)
#define VE_MODE_DETECT 0x30c
+#define VE_MODE_DT_HOR_TOLER GENMASK(31, 28)
+#define VE_MODE_DT_VER_TOLER GENMASK(27, 24)
+#define VE_MODE_DT_HOR_STABLE GENMASK(23, 20)
+#define VE_MODE_DT_VER_STABLE GENMASK(19, 16)
+#define VE_MODE_DT_EDG_THROD GENMASK(15, 8)
+
#define VE_MEM_RESTRICT_START 0x310
#define VE_MEM_RESTRICT_END 0x314
+/*
+ * VIDEO_MODE_DETECT_DONE: a flag raised if signal lock
+ * VIDEO_RES_CHANGE: a flag raised if res_change work on-going
+ * VIDEO_RES_DETECT: a flag raised if res. detection on-going
+ * VIDEO_STREAMING: a flag raised if user requires stream-on
+ * VIDEO_FRAME_INPRG: a flag raised if hw working on a frame
+ * VIDEO_STOPPED: a flag raised if device release
+ * VIDEO_CLOCKS_ON: a flag raised if clk is on
+ */
enum {
VIDEO_MODE_DETECT_DONE,
VIDEO_RES_CHANGE,
@@ -192,6 +211,15 @@ enum {
VIDEO_CLOCKS_ON,
};
+// for VE_CTRL_CAPTURE_FMT
+enum aspeed_video_capture_format {
+ VIDEO_CAP_FMT_YUV_STUDIO_SWING = 0,
+ VIDEO_CAP_FMT_YUV_FULL_SWING,
+ VIDEO_CAP_FMT_RGB,
+ VIDEO_CAP_FMT_GRAY,
+ VIDEO_CAP_FMT_MAX
+};
+
struct aspeed_video_addr {
unsigned int size;
dma_addr_t dma;
@@ -214,6 +242,25 @@ struct aspeed_video_perf {
#define to_aspeed_video_buffer(x) \
container_of((x), struct aspeed_video_buffer, vb)
+/*
+ * struct aspeed_video - driver data
+ *
+ * res_work: holds the delayed_work for res-detection if unlock
+ * buffers: holds the list of buffer queued from user
+ * flags: holds the state of video
+ * sequence: holds the last number of frame completed
+ * max_compressed_size: holds max compressed stream's size
+ * srcs: holds the buffer information for srcs
+ * jpeg: holds the buffer information for jpeg header
+ * yuv420: a flag raised if JPEG subsampling is 420
+ * frame_rate: holds the frame_rate
+ * jpeg_quality: holds jpeq's quality (0~11)
+ * frame_bottom: end position of video data in vertical direction
+ * frame_left: start position of video data in horizontal direction
+ * frame_right: end position of video data in horizontal direction
+ * frame_top: start position of video data in vertical direction
+ * perf: holds the statistics primary for debugfs
+ */
struct aspeed_video {
void __iomem *base;
struct clk *eclk;
@@ -411,6 +458,8 @@ static const struct v4l2_dv_timings_cap aspeed_video_timings_cap = {
},
};
+static unsigned int debug;
+
static void aspeed_video_init_jpeg_table(u32 *table, bool yuv420)
{
int i;
@@ -458,33 +507,38 @@ static void aspeed_video_update(struct aspeed_video *video, u32 reg, u32 clear,
t &= ~clear;
t |= bits;
writel(t, video->base + reg);
- dev_dbg(video->dev, "update %03x[%08x -> %08x]\n", reg, before,
- readl(video->base + reg));
+ v4l2_dbg(3, debug, &video->v4l2_dev, "update %03x[%08x -> %08x]\n",
+ reg, before, readl(video->base + reg));
}
static u32 aspeed_video_read(struct aspeed_video *video, u32 reg)
{
u32 t = readl(video->base + reg);
- dev_dbg(video->dev, "read %03x[%08x]\n", reg, t);
+ v4l2_dbg(3, debug, &video->v4l2_dev, "read %03x[%08x]\n", reg, t);
return t;
}
static void aspeed_video_write(struct aspeed_video *video, u32 reg, u32 val)
{
writel(val, video->base + reg);
- dev_dbg(video->dev, "write %03x[%08x]\n", reg,
- readl(video->base + reg));
+ v4l2_dbg(3, debug, &video->v4l2_dev, "write %03x[%08x]\n", reg,
+ readl(video->base + reg));
}
static void update_perf(struct aspeed_video_perf *p)
{
+ struct aspeed_video *v = container_of(p, struct aspeed_video,
+ perf);
+
p->duration =
ktime_to_ms(ktime_sub(ktime_get(), p->last_sample));
p->totaltime += p->duration;
p->duration_max = max(p->duration, p->duration_max);
p->duration_min = min(p->duration, p->duration_min);
+ v4l2_dbg(2, debug, &v->v4l2_dev, "time consumed: %d ms\n",
+ p->duration);
}
static int aspeed_video_start_frame(struct aspeed_video *video)
@@ -495,13 +549,13 @@ static int aspeed_video_start_frame(struct aspeed_video *video)
u32 seq_ctrl = aspeed_video_read(video, VE_SEQ_CTRL);
if (video->v4l2_input_status) {
- dev_dbg(video->dev, "No signal; don't start frame\n");
+ v4l2_warn(&video->v4l2_dev, "No signal; don't start frame\n");
return 0;
}
if (!(seq_ctrl & VE_SEQ_CTRL_COMP_BUSY) ||
!(seq_ctrl & VE_SEQ_CTRL_CAP_BUSY)) {
- dev_dbg(video->dev, "Engine busy; don't start frame\n");
+ v4l2_warn(&video->v4l2_dev, "Engine busy; don't start frame\n");
return -EBUSY;
}
@@ -510,7 +564,7 @@ static int aspeed_video_start_frame(struct aspeed_video *video)
struct aspeed_video_buffer, link);
if (!buf) {
spin_unlock_irqrestore(&video->lock, flags);
- dev_dbg(video->dev, "No buffers; don't start frame\n");
+ v4l2_warn(&video->v4l2_dev, "No buffers; don't start frame\n");
return -EPROTO;
}
@@ -590,7 +644,7 @@ static void aspeed_video_bufs_done(struct aspeed_video *video,
static void aspeed_video_irq_res_change(struct aspeed_video *video, ulong delay)
{
- dev_dbg(video->dev, "Resolution changed; resetting\n");
+ v4l2_dbg(1, debug, &video->v4l2_dev, "Resolution changed; resetting\n");
set_bit(VIDEO_RES_CHANGE, &video->flags);
clear_bit(VIDEO_FRAME_INPRG, &video->flags);
@@ -614,6 +668,12 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
*/
sts &= aspeed_video_read(video, VE_INTERRUPT_CTRL);
+ v4l2_dbg(2, debug, &video->v4l2_dev, "irq sts=%#x %s%s%s%s\n", sts,
+ sts & VE_INTERRUPT_MODE_DETECT_WD ? ", unlock" : "",
+ sts & VE_INTERRUPT_MODE_DETECT ? ", lock" : "",
+ sts & VE_INTERRUPT_CAPTURE_COMPLETE ? ", capture-done" : "",
+ sts & VE_INTERRUPT_COMP_COMPLETE ? ", comp-done" : "");
+
/*
* Resolution changed or signal was lost; reset the engine and
* re-initialize
@@ -787,8 +847,101 @@ static void aspeed_video_calc_compressed_size(struct aspeed_video *video,
aspeed_video_write(video, VE_STREAM_BUF_SIZE,
compression_buffer_size_reg);
- dev_dbg(video->dev, "Max compressed size: %x\n",
- video->max_compressed_size);
+ v4l2_dbg(1, debug, &video->v4l2_dev, "Max compressed size: %#x\n",
+ video->max_compressed_size);
+}
+
+/*
+ * Update v4l2_bt_timings per current status.
+ * frame_top/frame_bottom/frame_left/frame_right need to be ready.
+ *
+ * The following registers start counting from sync's rising edge:
+ * 1. VR090: frame edge's left and right
+ * 2. VR094: frame edge's top and bottom
+ * 3. VR09C: counting from sync's rising edge to falling edge
+ *
+ * [Vertical timing]
+ * +--+ +-------------------+ +--+
+ * | | | v i d e o | | |
+ * +--+ +-----+ +-----+ +---+
+ * vsync+--+
+ * frame_top+--------+
+ * frame_bottom+----------------------------+
+ *
+ * +-------------------+
+ * | v i d e o |
+ * +--+ +-----+ +-----+ +---+
+ * | | | |
+ * +--+ +--+
+ * vsync+-------------------------------+
+ * frame_top+-----+
+ * frame_bottom+-------------------------+
+ *
+ * [Horizontal timing]
+ * +--+ +-------------------+ +--+
+ * | | | v i d e o | | |
+ * +--+ +-----+ +-----+ +---+
+ * hsync+--+
+ * frame_left+--------+
+ * frame_right+----------------------------+
+ *
+ * +-------------------+
+ * | v i d e o |
+ * +--+ +-----+ +-----+ +---+
+ * | | | |
+ * +--+ +--+
+ * hsync+-------------------------------+
+ * frame_left+-----+
+ * frame_right+-------------------------+
+ *
+ * @v: the struct of aspeed_video
+ * @det: v4l2_bt_timings to be updated.
+ */
+static void aspeed_video_get_timings(struct aspeed_video *v,
+ struct v4l2_bt_timings *det)
+{
+ u32 mds, sync, htotal, vtotal, vsync, hsync;
+
+ mds = aspeed_video_read(v, VE_MODE_DETECT_STATUS);
+ sync = aspeed_video_read(v, VE_SYNC_STATUS);
+ htotal = aspeed_video_read(v, VE_H_TOTAL_PIXELS);
+ vtotal = FIELD_GET(VE_MODE_DETECT_V_LINES, mds);
+ vsync = FIELD_GET(VE_SYNC_STATUS_VSYNC, sync);
+ hsync = FIELD_GET(VE_SYNC_STATUS_HSYNC, sync);
+
+ /*
+ * This is a workaround for polarity detection.
+ * Because ast-soc counts sync from sync's rising edge, the reg value
+ * of sync would be larger than video's active area if negative.
+ */
+ if (vsync > det->height)
+ det->polarities &= ~V4L2_DV_VSYNC_POS_POL;
+ else
+ det->polarities |= V4L2_DV_VSYNC_POS_POL;
+ if (hsync > det->width)
+ det->polarities &= ~V4L2_DV_HSYNC_POS_POL;
+ else
+ det->polarities |= V4L2_DV_HSYNC_POS_POL;
+
+ if (det->polarities & V4L2_DV_VSYNC_POS_POL) {
+ det->vbackporch = v->frame_top - vsync;
+ det->vfrontporch = vtotal - v->frame_bottom;
+ det->vsync = vsync;
+ } else {
+ det->vbackporch = v->frame_top;
+ det->vfrontporch = vsync - v->frame_bottom;
+ det->vsync = vtotal - vsync;
+ }
+
+ if (det->polarities & V4L2_DV_HSYNC_POS_POL) {
+ det->hbackporch = v->frame_left - hsync;
+ det->hfrontporch = htotal - v->frame_right;
+ det->hsync = hsync;
+ } else {
+ det->hbackporch = v->frame_left;
+ det->hfrontporch = hsync - v->frame_right;
+ det->hsync = htotal - hsync;
+ }
}
#define res_check(v) test_and_clear_bit(VIDEO_MODE_DETECT_DONE, &(v)->flags)
@@ -801,7 +954,6 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
u32 mds;
u32 src_lr_edge;
u32 src_tb_edge;
- u32 sync;
struct v4l2_bt_timings *det = &video->detected_timings;
det->width = MIN_WIDTH;
@@ -825,11 +977,18 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
res_check(video),
MODE_DETECT_TIMEOUT);
if (!rc) {
- dev_dbg(video->dev, "Timed out; first mode detect\n");
+ v4l2_warn(&video->v4l2_dev, "Timed out; first mode detect\n");
clear_bit(VIDEO_RES_DETECT, &video->flags);
return;
}
+ mds = aspeed_video_read(video, VE_MODE_DETECT_STATUS);
+ // try detection again if current signal isn't stable
+ if (!(mds & VE_MODE_DETECT_H_STABLE) ||
+ !(mds & VE_MODE_DETECT_V_STABLE) ||
+ (mds & VE_MODE_DETECT_EXTSRC_ADC))
+ continue;
+
aspeed_video_check_and_set_polarity(video);
aspeed_video_enable_mode_detect(video);
@@ -839,33 +998,22 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
MODE_DETECT_TIMEOUT);
clear_bit(VIDEO_RES_DETECT, &video->flags);
if (!rc) {
- dev_dbg(video->dev, "Timed out; second mode detect\n");
+ v4l2_warn(&video->v4l2_dev, "Timed out; second mode detect\n");
return;
}
src_lr_edge = aspeed_video_read(video, VE_SRC_LR_EDGE_DET);
src_tb_edge = aspeed_video_read(video, VE_SRC_TB_EDGE_DET);
- mds = aspeed_video_read(video, VE_MODE_DETECT_STATUS);
- sync = aspeed_video_read(video, VE_SYNC_STATUS);
-
- video->frame_bottom = (src_tb_edge & VE_SRC_TB_EDGE_DET_BOT) >>
- VE_SRC_TB_EDGE_DET_BOT_SHF;
- video->frame_top = src_tb_edge & VE_SRC_TB_EDGE_DET_TOP;
- det->vfrontporch = video->frame_top;
- det->vbackporch = ((mds & VE_MODE_DETECT_V_LINES) >>
- VE_MODE_DETECT_V_LINES_SHF) - video->frame_bottom;
- det->vsync = (sync & VE_SYNC_STATUS_VSYNC) >>
- VE_SYNC_STATUS_VSYNC_SHF;
+
+ video->frame_bottom = FIELD_GET(VE_SRC_TB_EDGE_DET_BOT, src_tb_edge);
+ video->frame_top = FIELD_GET(VE_SRC_TB_EDGE_DET_TOP, src_tb_edge);
+
if (video->frame_top > video->frame_bottom)
continue;
- video->frame_right = (src_lr_edge & VE_SRC_LR_EDGE_DET_RT) >>
- VE_SRC_LR_EDGE_DET_RT_SHF;
- video->frame_left = src_lr_edge & VE_SRC_LR_EDGE_DET_LEFT;
- det->hfrontporch = video->frame_left;
- det->hbackporch = (mds & VE_MODE_DETECT_H_PIXELS) -
- video->frame_right;
- det->hsync = sync & VE_SYNC_STATUS_HSYNC;
+ video->frame_right = FIELD_GET(VE_SRC_LR_EDGE_DET_RT, src_lr_edge);
+ video->frame_left = FIELD_GET(VE_SRC_LR_EDGE_DET_LEFT, src_lr_edge);
+
if (video->frame_left > video->frame_right)
continue;
@@ -873,7 +1021,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
} while (invalid_resolution && (tries++ < INVALID_RESOLUTION_RETRIES));
if (invalid_resolution) {
- dev_dbg(video->dev, "Invalid resolution detected\n");
+ v4l2_warn(&video->v4l2_dev, "Invalid resolution detected\n");
return;
}
@@ -881,6 +1029,8 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
det->width = (video->frame_right - video->frame_left) + 1;
video->v4l2_input_status = 0;
+ aspeed_video_get_timings(video, det);
+
/*
* Enable mode-detect watchdog, resolution-change watchdog and
* automatic compression after frame capture.
@@ -890,8 +1040,8 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
aspeed_video_update(video, VE_SEQ_CTRL, 0,
VE_SEQ_CTRL_AUTO_COMP | VE_SEQ_CTRL_EN_WATCHDOG);
- dev_dbg(video->dev, "Got resolution: %dx%d\n", det->width,
- det->height);
+ v4l2_dbg(1, debug, &video->v4l2_dev, "Got resolution: %dx%d\n",
+ det->width, det->height);
}
static void aspeed_video_set_resolution(struct aspeed_video *video)
@@ -902,18 +1052,19 @@ static void aspeed_video_set_resolution(struct aspeed_video *video)
/* Set capture/compression frame sizes */
aspeed_video_calc_compressed_size(video, size);
- if (video->active_timings.width == 1680) {
+ if (!IS_ALIGNED(act->width, 64)) {
/*
- * This is a workaround to fix a silicon bug on A1 and A2
- * revisions. Since it doesn't break capturing operation of
+ * This is a workaround to fix a AST2500 silicon bug on A1 and
+ * A2 revisions. Since it doesn't break capturing operation of
* other revisions, use it for all revisions without checking
- * the revision ID. It picked 1728 which is a very next
- * 64-pixels aligned value to 1680 to minimize memory bandwidth
+ * the revision ID. It picked new width which is a very next
+ * 64-pixels aligned value to minimize memory bandwidth
* and to get better access speed from video engine.
*/
- aspeed_video_write(video, VE_CAP_WINDOW,
- 1728 << 16 | act->height);
- size += (1728 - 1680) * video->active_timings.height;
+ u32 width = ALIGN(act->width, 64);
+
+ aspeed_video_write(video, VE_CAP_WINDOW, width << 16 | act->height);
+ size = width * act->height;
} else {
aspeed_video_write(video, VE_CAP_WINDOW,
act->width << 16 | act->height);
@@ -924,6 +1075,7 @@ static void aspeed_video_set_resolution(struct aspeed_video *video)
/* Don't use direct mode below 1024 x 768 (irqs don't fire) */
if (size < DIRECT_FETCH_THRESHOLD) {
+ v4l2_dbg(1, debug, &video->v4l2_dev, "Capture: Sync Mode\n");
aspeed_video_write(video, VE_TGS_0,
FIELD_PREP(VE_TGS_FIRST,
video->frame_left - 1) |
@@ -935,6 +1087,7 @@ static void aspeed_video_set_resolution(struct aspeed_video *video)
video->frame_bottom + 1));
aspeed_video_update(video, VE_CTRL, 0, VE_CTRL_INT_DE);
} else {
+ v4l2_dbg(1, debug, &video->v4l2_dev, "Capture: Direct Mode\n");
aspeed_video_update(video, VE_CTRL, 0, VE_CTRL_DIRECT_FETCH);
}
@@ -951,6 +1104,10 @@ static void aspeed_video_set_resolution(struct aspeed_video *video)
if (!aspeed_video_alloc_buf(video, &video->srcs[1], size))
goto err_mem;
+ v4l2_dbg(1, debug, &video->v4l2_dev, "src buf0 addr(%pad) size(%d)\n",
+ &video->srcs[0].dma, video->srcs[0].size);
+ v4l2_dbg(1, debug, &video->v4l2_dev, "src buf1 addr(%pad) size(%d)\n",
+ &video->srcs[1].dma, video->srcs[1].size);
aspeed_video_write(video, VE_SRC0_ADDR, video->srcs[0].dma);
aspeed_video_write(video, VE_SRC1_ADDR, video->srcs[1].dma);
}
@@ -969,7 +1126,8 @@ static void aspeed_video_init_regs(struct aspeed_video *video)
u32 comp_ctrl = VE_COMP_CTRL_RSVD |
FIELD_PREP(VE_COMP_CTRL_DCT_LUM, video->jpeg_quality) |
FIELD_PREP(VE_COMP_CTRL_DCT_CHR, video->jpeg_quality | 0x10);
- u32 ctrl = VE_CTRL_AUTO_OR_CURSOR;
+ u32 ctrl = VE_CTRL_AUTO_OR_CURSOR |
+ FIELD_PREP(VE_CTRL_CAPTURE_FMT, VIDEO_CAP_FMT_YUV_FULL_SWING);
u32 seq_ctrl = video->jpeg_mode;
if (video->frame_rate)
@@ -1004,7 +1162,12 @@ static void aspeed_video_init_regs(struct aspeed_video *video)
aspeed_video_write(video, VE_SCALING_FILTER3, 0x00200000);
/* Set mode detection defaults */
- aspeed_video_write(video, VE_MODE_DETECT, 0x22666500);
+ aspeed_video_write(video, VE_MODE_DETECT,
+ FIELD_PREP(VE_MODE_DT_HOR_TOLER, 2) |
+ FIELD_PREP(VE_MODE_DT_VER_TOLER, 2) |
+ FIELD_PREP(VE_MODE_DT_HOR_STABLE, 6) |
+ FIELD_PREP(VE_MODE_DT_VER_STABLE, 6) |
+ FIELD_PREP(VE_MODE_DT_EDG_THROD, 0x65));
}
static void aspeed_video_start(struct aspeed_video *video)
@@ -1111,7 +1274,7 @@ static int aspeed_video_get_parm(struct file *file, void *fh,
struct aspeed_video *video = video_drvdata(file);
a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
- a->parm.capture.readbuffers = 3;
+ a->parm.capture.readbuffers = ASPEED_VIDEO_V4L2_MIN_BUF_REQ;
a->parm.capture.timeperframe.numerator = 1;
if (!video->frame_rate)
a->parm.capture.timeperframe.denominator = MAX_FRAME_RATE;
@@ -1128,7 +1291,7 @@ static int aspeed_video_set_parm(struct file *file, void *fh,
struct aspeed_video *video = video_drvdata(file);
a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
- a->parm.capture.readbuffers = 3;
+ a->parm.capture.readbuffers = ASPEED_VIDEO_V4L2_MIN_BUF_REQ;
if (a->parm.capture.timeperframe.numerator)
frame_rate = a->parm.capture.timeperframe.denominator /
@@ -1215,6 +1378,9 @@ static int aspeed_video_set_dv_timings(struct file *file, void *fh,
timings->type = V4L2_DV_BT_656_1120;
+ v4l2_dbg(1, debug, &video->v4l2_dev, "set new timings(%dx%d)\n",
+ timings->bt.width, timings->bt.height);
+
return 0;
}
@@ -1395,6 +1561,7 @@ static void aspeed_video_resolution_work(struct work_struct *work)
.u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
};
+ v4l2_dbg(1, debug, &video->v4l2_dev, "fire source change event\n");
v4l2_event_queue(&video->vdev, &ev);
} else if (test_bit(VIDEO_STREAMING, &video->flags)) {
/* No resolution change so just restart streaming */
@@ -1516,7 +1683,7 @@ static void aspeed_video_stop_streaming(struct vb2_queue *q)
!test_bit(VIDEO_FRAME_INPRG, &video->flags),
STOP_TIMEOUT);
if (!rc) {
- dev_dbg(video->dev, "Timed out when stopping streaming\n");
+ v4l2_warn(&video->v4l2_dev, "Timed out when stopping streaming\n");
/*
* Need to force stop any DMA and try and get HW into a good
@@ -1676,7 +1843,7 @@ static int aspeed_video_setup_video(struct aspeed_video *video)
vbq->drv_priv = video;
vbq->buf_struct_size = sizeof(struct aspeed_video_buffer);
vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- vbq->min_buffers_needed = 3;
+ vbq->min_buffers_needed = ASPEED_VIDEO_V4L2_MIN_BUF_REQ;
rc = vb2_queue_init(vbq);
if (rc) {
@@ -1730,6 +1897,7 @@ static int aspeed_video_init(struct aspeed_video *video)
dev_err(dev, "Unable to request IRQ %d\n", irq);
return rc;
}
+ dev_info(video->dev, "irq %d\n", irq);
video->eclk = devm_clk_get(dev, "eclk");
if (IS_ERR(video->eclk)) {
@@ -1766,6 +1934,8 @@ static int aspeed_video_init(struct aspeed_video *video)
rc = -ENOMEM;
goto err_release_reserved_mem;
}
+ dev_info(video->dev, "alloc mem size(%d) at %pad for jpeg header\n",
+ VE_JPEG_HEADER_SIZE, &video->jpeg.dma);
aspeed_video_init_jpeg_table(video->jpeg.virt, video->yuv420);
@@ -1791,7 +1961,6 @@ MODULE_DEVICE_TABLE(of, aspeed_video_of_match);
static int aspeed_video_probe(struct platform_device *pdev)
{
const struct aspeed_video_config *config;
- const struct of_device_id *match;
struct aspeed_video *video;
int rc;
@@ -1803,11 +1972,10 @@ static int aspeed_video_probe(struct platform_device *pdev)
if (IS_ERR(video->base))
return PTR_ERR(video->base);
- match = of_match_node(aspeed_video_of_match, pdev->dev.of_node);
- if (!match)
- return -EINVAL;
+ config = of_device_get_match_data(&pdev->dev);
+ if (!config)
+ return -ENODEV;
- config = match->data;
video->jpeg_mode = config->jpeg_mode;
video->comp_size_read = config->comp_size_read;
@@ -1875,6 +2043,9 @@ static struct platform_driver aspeed_video_driver = {
module_platform_driver(aspeed_video_driver);
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level (0=off,1=info,2=debug,3=reg ops)");
+
MODULE_DESCRIPTION("ASPEED Video Engine Driver");
MODULE_AUTHOR("Eddie James");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/atmel/Kconfig b/drivers/media/platform/atmel/Kconfig
index dda2f27da317..83aebee0c8eb 100644
--- a/drivers/media/platform/atmel/Kconfig
+++ b/drivers/media/platform/atmel/Kconfig
@@ -1,7 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Atmel media platform drivers"
+
config VIDEO_ATMEL_ISC
tristate "ATMEL Image Sensor Controller (ISC) support"
- depends on VIDEO_V4L2 && COMMON_CLK
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && COMMON_CLK
depends on ARCH_AT91 || COMPILE_TEST
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
@@ -15,7 +19,8 @@ config VIDEO_ATMEL_ISC
config VIDEO_ATMEL_XISC
tristate "ATMEL eXtended Image Sensor Controller (XISC) support"
- depends on VIDEO_V4L2 && COMMON_CLK && VIDEO_V4L2_SUBDEV_API
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && COMMON_CLK && VIDEO_V4L2_SUBDEV_API
depends on ARCH_AT91 || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select REGMAP_MMIO
@@ -33,10 +38,27 @@ config VIDEO_ATMEL_ISC_BASE
config VIDEO_ATMEL_ISI
tristate "ATMEL Image Sensor Interface (ISI) support"
- depends on VIDEO_V4L2 && OF
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
depends on ARCH_AT91 || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
help
This module makes the ATMEL Image Sensor Interface available
as a v4l2 device.
+
+config VIDEO_MICROCHIP_CSI2DC
+ tristate "Microchip CSI2 Demux Controller"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && COMMON_CLK && OF
+ depends on ARCH_AT91 || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ CSI2 Demux Controller driver. CSI2DC is a helper chip
+ that converts IDI interface byte stream to a parallel pixel stream.
+ It supports various RAW formats as input.
+
+ To compile this driver as a module, choose M here: the
+ module will be called microchip-csi2dc.
diff --git a/drivers/media/platform/atmel/Makefile b/drivers/media/platform/atmel/Makefile
index 46d264ab7948..794e8f739287 100644
--- a/drivers/media/platform/atmel/Makefile
+++ b/drivers/media/platform/atmel/Makefile
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
atmel-isc-objs = atmel-sama5d2-isc.o
atmel-xisc-objs = atmel-sama7g5-isc.o
+atmel-isc-common-objs = atmel-isc-base.o atmel-isc-clk.o
obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
-obj-$(CONFIG_VIDEO_ATMEL_ISC_BASE) += atmel-isc-base.o
+obj-$(CONFIG_VIDEO_ATMEL_ISC_BASE) += atmel-isc-common.o
obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel-isc.o
obj-$(CONFIG_VIDEO_ATMEL_XISC) += atmel-xisc.o
+obj-$(CONFIG_VIDEO_MICROCHIP_CSI2DC) += microchip-csi2dc.o
diff --git a/drivers/media/platform/atmel/atmel-isc-base.c b/drivers/media/platform/atmel/atmel-isc-base.c
index 660cd0ab6749..db15770d5b88 100644
--- a/drivers/media/platform/atmel/atmel-isc-base.c
+++ b/drivers/media/platform/atmel/atmel-isc-base.c
@@ -8,10 +8,6 @@
* Author: Eugen Hristev <eugen.hristev@microchip.com>
*
*/
-
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/math64.h>
@@ -100,297 +96,6 @@ static inline void isc_reset_awb_ctrls(struct isc_device *isc)
}
}
-static int isc_wait_clk_stable(struct clk_hw *hw)
-{
- struct isc_clk *isc_clk = to_isc_clk(hw);
- struct regmap *regmap = isc_clk->regmap;
- unsigned long timeout = jiffies + usecs_to_jiffies(1000);
- unsigned int status;
-
- while (time_before(jiffies, timeout)) {
- regmap_read(regmap, ISC_CLKSR, &status);
- if (!(status & ISC_CLKSR_SIP))
- return 0;
-
- usleep_range(10, 250);
- }
-
- return -ETIMEDOUT;
-}
-
-static int isc_clk_prepare(struct clk_hw *hw)
-{
- struct isc_clk *isc_clk = to_isc_clk(hw);
- int ret;
-
- ret = pm_runtime_resume_and_get(isc_clk->dev);
- if (ret < 0)
- return ret;
-
- return isc_wait_clk_stable(hw);
-}
-
-static void isc_clk_unprepare(struct clk_hw *hw)
-{
- struct isc_clk *isc_clk = to_isc_clk(hw);
-
- isc_wait_clk_stable(hw);
-
- pm_runtime_put_sync(isc_clk->dev);
-}
-
-static int isc_clk_enable(struct clk_hw *hw)
-{
- struct isc_clk *isc_clk = to_isc_clk(hw);
- u32 id = isc_clk->id;
- struct regmap *regmap = isc_clk->regmap;
- unsigned long flags;
- unsigned int status;
-
- dev_dbg(isc_clk->dev, "ISC CLK: %s, id = %d, div = %d, parent id = %d\n",
- __func__, id, isc_clk->div, isc_clk->parent_id);
-
- spin_lock_irqsave(&isc_clk->lock, flags);
- regmap_update_bits(regmap, ISC_CLKCFG,
- ISC_CLKCFG_DIV_MASK(id) | ISC_CLKCFG_SEL_MASK(id),
- (isc_clk->div << ISC_CLKCFG_DIV_SHIFT(id)) |
- (isc_clk->parent_id << ISC_CLKCFG_SEL_SHIFT(id)));
-
- regmap_write(regmap, ISC_CLKEN, ISC_CLK(id));
- spin_unlock_irqrestore(&isc_clk->lock, flags);
-
- regmap_read(regmap, ISC_CLKSR, &status);
- if (status & ISC_CLK(id))
- return 0;
- else
- return -EINVAL;
-}
-
-static void isc_clk_disable(struct clk_hw *hw)
-{
- struct isc_clk *isc_clk = to_isc_clk(hw);
- u32 id = isc_clk->id;
- unsigned long flags;
-
- spin_lock_irqsave(&isc_clk->lock, flags);
- regmap_write(isc_clk->regmap, ISC_CLKDIS, ISC_CLK(id));
- spin_unlock_irqrestore(&isc_clk->lock, flags);
-}
-
-static int isc_clk_is_enabled(struct clk_hw *hw)
-{
- struct isc_clk *isc_clk = to_isc_clk(hw);
- u32 status;
- int ret;
-
- ret = pm_runtime_resume_and_get(isc_clk->dev);
- if (ret < 0)
- return 0;
-
- regmap_read(isc_clk->regmap, ISC_CLKSR, &status);
-
- pm_runtime_put_sync(isc_clk->dev);
-
- return status & ISC_CLK(isc_clk->id) ? 1 : 0;
-}
-
-static unsigned long
-isc_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
-{
- struct isc_clk *isc_clk = to_isc_clk(hw);
-
- return DIV_ROUND_CLOSEST(parent_rate, isc_clk->div + 1);
-}
-
-static int isc_clk_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- struct isc_clk *isc_clk = to_isc_clk(hw);
- long best_rate = -EINVAL;
- int best_diff = -1;
- unsigned int i, div;
-
- for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
- struct clk_hw *parent;
- unsigned long parent_rate;
-
- parent = clk_hw_get_parent_by_index(hw, i);
- if (!parent)
- continue;
-
- parent_rate = clk_hw_get_rate(parent);
- if (!parent_rate)
- continue;
-
- for (div = 1; div < ISC_CLK_MAX_DIV + 2; div++) {
- unsigned long rate;
- int diff;
-
- rate = DIV_ROUND_CLOSEST(parent_rate, div);
- diff = abs(req->rate - rate);
-
- if (best_diff < 0 || best_diff > diff) {
- best_rate = rate;
- best_diff = diff;
- req->best_parent_rate = parent_rate;
- req->best_parent_hw = parent;
- }
-
- if (!best_diff || rate < req->rate)
- break;
- }
-
- if (!best_diff)
- break;
- }
-
- dev_dbg(isc_clk->dev,
- "ISC CLK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
- __func__, best_rate,
- __clk_get_name((req->best_parent_hw)->clk),
- req->best_parent_rate);
-
- if (best_rate < 0)
- return best_rate;
-
- req->rate = best_rate;
-
- return 0;
-}
-
-static int isc_clk_set_parent(struct clk_hw *hw, u8 index)
-{
- struct isc_clk *isc_clk = to_isc_clk(hw);
-
- if (index >= clk_hw_get_num_parents(hw))
- return -EINVAL;
-
- isc_clk->parent_id = index;
-
- return 0;
-}
-
-static u8 isc_clk_get_parent(struct clk_hw *hw)
-{
- struct isc_clk *isc_clk = to_isc_clk(hw);
-
- return isc_clk->parent_id;
-}
-
-static int isc_clk_set_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long parent_rate)
-{
- struct isc_clk *isc_clk = to_isc_clk(hw);
- u32 div;
-
- if (!rate)
- return -EINVAL;
-
- div = DIV_ROUND_CLOSEST(parent_rate, rate);
- if (div > (ISC_CLK_MAX_DIV + 1) || !div)
- return -EINVAL;
-
- isc_clk->div = div - 1;
-
- return 0;
-}
-
-static const struct clk_ops isc_clk_ops = {
- .prepare = isc_clk_prepare,
- .unprepare = isc_clk_unprepare,
- .enable = isc_clk_enable,
- .disable = isc_clk_disable,
- .is_enabled = isc_clk_is_enabled,
- .recalc_rate = isc_clk_recalc_rate,
- .determine_rate = isc_clk_determine_rate,
- .set_parent = isc_clk_set_parent,
- .get_parent = isc_clk_get_parent,
- .set_rate = isc_clk_set_rate,
-};
-
-static int isc_clk_register(struct isc_device *isc, unsigned int id)
-{
- struct regmap *regmap = isc->regmap;
- struct device_node *np = isc->dev->of_node;
- struct isc_clk *isc_clk;
- struct clk_init_data init;
- const char *clk_name = np->name;
- const char *parent_names[3];
- int num_parents;
-
- if (id == ISC_ISPCK && !isc->ispck_required)
- return 0;
-
- num_parents = of_clk_get_parent_count(np);
- if (num_parents < 1 || num_parents > 3)
- return -EINVAL;
-
- if (num_parents > 2 && id == ISC_ISPCK)
- num_parents = 2;
-
- of_clk_parent_fill(np, parent_names, num_parents);
-
- if (id == ISC_MCK)
- of_property_read_string(np, "clock-output-names", &clk_name);
- else
- clk_name = "isc-ispck";
-
- init.parent_names = parent_names;
- init.num_parents = num_parents;
- init.name = clk_name;
- init.ops = &isc_clk_ops;
- init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
-
- isc_clk = &isc->isc_clks[id];
- isc_clk->hw.init = &init;
- isc_clk->regmap = regmap;
- isc_clk->id = id;
- isc_clk->dev = isc->dev;
- spin_lock_init(&isc_clk->lock);
-
- isc_clk->clk = clk_register(isc->dev, &isc_clk->hw);
- if (IS_ERR(isc_clk->clk)) {
- dev_err(isc->dev, "%s: clock register fail\n", clk_name);
- return PTR_ERR(isc_clk->clk);
- } else if (id == ISC_MCK)
- of_clk_add_provider(np, of_clk_src_simple_get, isc_clk->clk);
-
- return 0;
-}
-
-int isc_clk_init(struct isc_device *isc)
-{
- unsigned int i;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++)
- isc->isc_clks[i].clk = ERR_PTR(-EINVAL);
-
- for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++) {
- ret = isc_clk_register(isc, i);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(isc_clk_init);
-
-void isc_clk_cleanup(struct isc_device *isc)
-{
- unsigned int i;
-
- of_clk_del_provider(isc->dev->of_node);
-
- for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++) {
- struct isc_clk *isc_clk = &isc->isc_clks[i];
-
- if (!IS_ERR(isc_clk->clk))
- clk_unregister(isc_clk->clk);
- }
-}
-EXPORT_SYMBOL_GPL(isc_clk_cleanup);
static int isc_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
@@ -912,6 +617,7 @@ static int isc_try_configure_rlp_dma(struct isc_device *isc, bool direct_dump)
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 8;
+ isc->try_config.bpp_v4l2 = 8;
break;
case V4L2_PIX_FMT_SBGGR10:
case V4L2_PIX_FMT_SGBRG10:
@@ -921,6 +627,7 @@ static int isc_try_configure_rlp_dma(struct isc_device *isc, bool direct_dump)
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
break;
case V4L2_PIX_FMT_SBGGR12:
case V4L2_PIX_FMT_SGBRG12:
@@ -930,24 +637,28 @@ static int isc_try_configure_rlp_dma(struct isc_device *isc, bool direct_dump)
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
break;
case V4L2_PIX_FMT_RGB565:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_RGB565;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
break;
case V4L2_PIX_FMT_ARGB444:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB444;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
break;
case V4L2_PIX_FMT_ARGB555:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB555;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
break;
case V4L2_PIX_FMT_ABGR32:
case V4L2_PIX_FMT_XBGR32:
@@ -955,42 +666,49 @@ static int isc_try_configure_rlp_dma(struct isc_device *isc, bool direct_dump)
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 32;
+ isc->try_config.bpp_v4l2 = 32;
break;
case V4L2_PIX_FMT_YUV420:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_YC420P;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PLANAR;
isc->try_config.bpp = 12;
+ isc->try_config.bpp_v4l2 = 8; /* only first plane */
break;
case V4L2_PIX_FMT_YUV422P:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_YC422P;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PLANAR;
isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 8; /* only first plane */
break;
case V4L2_PIX_FMT_YUYV:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YCYC | ISC_RLP_CFG_YMODE_YUYV;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
break;
case V4L2_PIX_FMT_UYVY:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YCYC | ISC_RLP_CFG_YMODE_UYVY;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
break;
case V4L2_PIX_FMT_VYUY:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YCYC | ISC_RLP_CFG_YMODE_VYUY;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
break;
case V4L2_PIX_FMT_GREY:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY8;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 8;
+ isc->try_config.bpp_v4l2 = 8;
break;
case V4L2_PIX_FMT_Y16:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY10 | ISC_RLP_CFG_LSH;
@@ -1000,6 +718,7 @@ static int isc_try_configure_rlp_dma(struct isc_device *isc, bool direct_dump)
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
break;
default:
return -EINVAL;
@@ -1248,8 +967,9 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
pixfmt->height = isc->max_height;
pixfmt->field = V4L2_FIELD_NONE;
- pixfmt->bytesperline = (pixfmt->width * isc->try_config.bpp) >> 3;
- pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+ pixfmt->bytesperline = (pixfmt->width * isc->try_config.bpp_v4l2) >> 3;
+ pixfmt->sizeimage = ((pixfmt->width * isc->try_config.bpp) >> 3) *
+ pixfmt->height;
if (code)
*code = mbus_code;
@@ -1369,14 +1089,12 @@ static int isc_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct isc_device *isc = video_drvdata(file);
- struct v4l2_subdev_frame_size_enum fse = {
- .code = isc->config.sd_format->mbus_code,
- .index = fsize->index,
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
int ret = -EINVAL;
int i;
+ if (fsize->index)
+ return -EINVAL;
+
for (i = 0; i < isc->num_user_formats; i++)
if (isc->user_formats[i]->fourcc == fsize->pixel_format)
ret = 0;
@@ -1388,50 +1106,14 @@ static int isc_enum_framesizes(struct file *file, void *fh,
if (ret)
return ret;
- ret = v4l2_subdev_call(isc->current_subdev->sd, pad, enum_frame_size,
- NULL, &fse);
- if (ret)
- return ret;
-
- fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
- fsize->discrete.width = fse.max_width;
- fsize->discrete.height = fse.max_height;
-
- return 0;
-}
-
-static int isc_enum_frameintervals(struct file *file, void *fh,
- struct v4l2_frmivalenum *fival)
-{
- struct isc_device *isc = video_drvdata(file);
- struct v4l2_subdev_frame_interval_enum fie = {
- .code = isc->config.sd_format->mbus_code,
- .index = fival->index,
- .width = fival->width,
- .height = fival->height,
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- int ret = -EINVAL;
- unsigned int i;
-
- for (i = 0; i < isc->num_user_formats; i++)
- if (isc->user_formats[i]->fourcc == fival->pixel_format)
- ret = 0;
-
- for (i = 0; i < isc->controller_formats_size; i++)
- if (isc->controller_formats[i].fourcc == fival->pixel_format)
- ret = 0;
-
- if (ret)
- return ret;
-
- ret = v4l2_subdev_call(isc->current_subdev->sd, pad,
- enum_frame_interval, NULL, &fie);
- if (ret)
- return ret;
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
- fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
- fival->discrete = fie.interval;
+ fsize->stepwise.min_width = 16;
+ fsize->stepwise.max_width = isc->max_width;
+ fsize->stepwise.min_height = 16;
+ fsize->stepwise.max_height = isc->max_height;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
return 0;
}
@@ -1460,7 +1142,6 @@ static const struct v4l2_ioctl_ops isc_ioctl_ops = {
.vidioc_g_parm = isc_g_parm,
.vidioc_s_parm = isc_s_parm,
.vidioc_enum_framesizes = isc_enum_framesizes,
- .vidioc_enum_frameintervals = isc_enum_frameintervals,
.vidioc_log_status = v4l2_ctrl_log_status,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
@@ -1608,10 +1289,15 @@ static void isc_hist_count(struct isc_device *isc, u32 *min, u32 *max)
if (!*min)
*min = 1;
+
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "isc wb: hist_id %u, hist_count %u",
+ ctrls->hist_id, *hist_count);
}
static void isc_wb_update(struct isc_ctrls *ctrls)
{
+ struct isc_device *isc = container_of(ctrls, struct isc_device, ctrls);
u32 *hist_count = &ctrls->hist_count[0];
u32 c, offset[4];
u64 avg = 0;
@@ -1628,6 +1314,9 @@ static void isc_wb_update(struct isc_ctrls *ctrls)
(u64)hist_count[ISC_HIS_CFG_MODE_GB];
avg >>= 1;
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "isc wb: green components average %llu\n", avg);
+
/* Green histogram is null, nothing to do */
if (!avg)
return;
@@ -1680,9 +1369,19 @@ static void isc_wb_update(struct isc_ctrls *ctrls)
else
gw_gain[c] = 1 << 9;
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "isc wb: component %d, s_gain %u, gw_gain %u\n",
+ c, s_gain[c], gw_gain[c]);
/* multiply both gains and adjust for decimals */
ctrls->gain[c] = s_gain[c] * gw_gain[c];
ctrls->gain[c] >>= 9;
+
+ /* make sure we are not out of range */
+ ctrls->gain[c] = clamp_val(ctrls->gain[c], 0, GENMASK(12, 0));
+
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "isc wb: component %d, final gain %u\n",
+ c, ctrls->gain[c]);
}
}
@@ -1706,6 +1405,10 @@ static void isc_awb_work(struct work_struct *w)
return;
isc_hist_count(isc, &min, &max);
+
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "isc wb mode %d: hist min %u , max %u\n", hist_id, min, max);
+
ctrls->hist_minmax[hist_id][HIST_MIN_INDEX] = min;
ctrls->hist_minmax[hist_id][HIST_MAX_INDEX] = max;
@@ -2181,7 +1884,7 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
}
/* Register video device */
- strscpy(vdev->name, "microchip-isc", sizeof(vdev->name));
+ strscpy(vdev->name, KBUILD_MODNAME, sizeof(vdev->name));
vdev->release = video_device_release_empty;
vdev->fops = &isc_fops;
vdev->ioctl_ops = &isc_ioctl_ops;
diff --git a/drivers/media/platform/atmel/atmel-isc-clk.c b/drivers/media/platform/atmel/atmel-isc-clk.c
new file mode 100644
index 000000000000..2059fe376b00
--- /dev/null
+++ b/drivers/media/platform/atmel/atmel-isc-clk.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Microchip Image Sensor Controller (ISC) common clock driver setup
+ *
+ * Copyright (C) 2016 Microchip Technology, Inc.
+ *
+ * Author: Songjun Wu
+ * Author: Eugen Hristev <eugen.hristev@microchip.com>
+ *
+ */
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include "atmel-isc-regs.h"
+#include "atmel-isc.h"
+
+static int isc_wait_clk_stable(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ struct regmap *regmap = isc_clk->regmap;
+ unsigned long timeout = jiffies + usecs_to_jiffies(1000);
+ unsigned int status;
+
+ while (time_before(jiffies, timeout)) {
+ regmap_read(regmap, ISC_CLKSR, &status);
+ if (!(status & ISC_CLKSR_SIP))
+ return 0;
+
+ usleep_range(10, 250);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int isc_clk_prepare(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(isc_clk->dev);
+ if (ret < 0)
+ return ret;
+
+ return isc_wait_clk_stable(hw);
+}
+
+static void isc_clk_unprepare(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ isc_wait_clk_stable(hw);
+
+ pm_runtime_put_sync(isc_clk->dev);
+}
+
+static int isc_clk_enable(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ u32 id = isc_clk->id;
+ struct regmap *regmap = isc_clk->regmap;
+ unsigned long flags;
+ unsigned int status;
+
+ dev_dbg(isc_clk->dev, "ISC CLK: %s, id = %d, div = %d, parent id = %d\n",
+ __func__, id, isc_clk->div, isc_clk->parent_id);
+
+ spin_lock_irqsave(&isc_clk->lock, flags);
+ regmap_update_bits(regmap, ISC_CLKCFG,
+ ISC_CLKCFG_DIV_MASK(id) | ISC_CLKCFG_SEL_MASK(id),
+ (isc_clk->div << ISC_CLKCFG_DIV_SHIFT(id)) |
+ (isc_clk->parent_id << ISC_CLKCFG_SEL_SHIFT(id)));
+
+ regmap_write(regmap, ISC_CLKEN, ISC_CLK(id));
+ spin_unlock_irqrestore(&isc_clk->lock, flags);
+
+ regmap_read(regmap, ISC_CLKSR, &status);
+ if (status & ISC_CLK(id))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static void isc_clk_disable(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ u32 id = isc_clk->id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&isc_clk->lock, flags);
+ regmap_write(isc_clk->regmap, ISC_CLKDIS, ISC_CLK(id));
+ spin_unlock_irqrestore(&isc_clk->lock, flags);
+}
+
+static int isc_clk_is_enabled(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ u32 status;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(isc_clk->dev);
+ if (ret < 0)
+ return 0;
+
+ regmap_read(isc_clk->regmap, ISC_CLKSR, &status);
+
+ pm_runtime_put_sync(isc_clk->dev);
+
+ return status & ISC_CLK(isc_clk->id) ? 1 : 0;
+}
+
+static unsigned long
+isc_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ return DIV_ROUND_CLOSEST(parent_rate, isc_clk->div + 1);
+}
+
+static int isc_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ long best_rate = -EINVAL;
+ int best_diff = -1;
+ unsigned int i, div;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ struct clk_hw *parent;
+ unsigned long parent_rate;
+
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (!parent)
+ continue;
+
+ parent_rate = clk_hw_get_rate(parent);
+ if (!parent_rate)
+ continue;
+
+ for (div = 1; div < ISC_CLK_MAX_DIV + 2; div++) {
+ unsigned long rate;
+ int diff;
+
+ rate = DIV_ROUND_CLOSEST(parent_rate, div);
+ diff = abs(req->rate - rate);
+
+ if (best_diff < 0 || best_diff > diff) {
+ best_rate = rate;
+ best_diff = diff;
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
+ }
+
+ if (!best_diff || rate < req->rate)
+ break;
+ }
+
+ if (!best_diff)
+ break;
+ }
+
+ dev_dbg(isc_clk->dev,
+ "ISC CLK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
+ __func__, best_rate,
+ __clk_get_name((req->best_parent_hw)->clk),
+ req->best_parent_rate);
+
+ if (best_rate < 0)
+ return best_rate;
+
+ req->rate = best_rate;
+
+ return 0;
+}
+
+static int isc_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ if (index >= clk_hw_get_num_parents(hw))
+ return -EINVAL;
+
+ isc_clk->parent_id = index;
+
+ return 0;
+}
+
+static u8 isc_clk_get_parent(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ return isc_clk->parent_id;
+}
+
+static int isc_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ u32 div;
+
+ if (!rate)
+ return -EINVAL;
+
+ div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ if (div > (ISC_CLK_MAX_DIV + 1) || !div)
+ return -EINVAL;
+
+ isc_clk->div = div - 1;
+
+ return 0;
+}
+
+static const struct clk_ops isc_clk_ops = {
+ .prepare = isc_clk_prepare,
+ .unprepare = isc_clk_unprepare,
+ .enable = isc_clk_enable,
+ .disable = isc_clk_disable,
+ .is_enabled = isc_clk_is_enabled,
+ .recalc_rate = isc_clk_recalc_rate,
+ .determine_rate = isc_clk_determine_rate,
+ .set_parent = isc_clk_set_parent,
+ .get_parent = isc_clk_get_parent,
+ .set_rate = isc_clk_set_rate,
+};
+
+static int isc_clk_register(struct isc_device *isc, unsigned int id)
+{
+ struct regmap *regmap = isc->regmap;
+ struct device_node *np = isc->dev->of_node;
+ struct isc_clk *isc_clk;
+ struct clk_init_data init;
+ const char *clk_name = np->name;
+ const char *parent_names[3];
+ int num_parents;
+
+ if (id == ISC_ISPCK && !isc->ispck_required)
+ return 0;
+
+ num_parents = of_clk_get_parent_count(np);
+ if (num_parents < 1 || num_parents > 3)
+ return -EINVAL;
+
+ if (num_parents > 2 && id == ISC_ISPCK)
+ num_parents = 2;
+
+ of_clk_parent_fill(np, parent_names, num_parents);
+
+ if (id == ISC_MCK)
+ of_property_read_string(np, "clock-output-names", &clk_name);
+ else
+ clk_name = "isc-ispck";
+
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.name = clk_name;
+ init.ops = &isc_clk_ops;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+ isc_clk = &isc->isc_clks[id];
+ isc_clk->hw.init = &init;
+ isc_clk->regmap = regmap;
+ isc_clk->id = id;
+ isc_clk->dev = isc->dev;
+ spin_lock_init(&isc_clk->lock);
+
+ isc_clk->clk = clk_register(isc->dev, &isc_clk->hw);
+ if (IS_ERR(isc_clk->clk)) {
+ dev_err(isc->dev, "%s: clock register fail\n", clk_name);
+ return PTR_ERR(isc_clk->clk);
+ } else if (id == ISC_MCK) {
+ of_clk_add_provider(np, of_clk_src_simple_get, isc_clk->clk);
+ }
+
+ return 0;
+}
+
+int isc_clk_init(struct isc_device *isc)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++)
+ isc->isc_clks[i].clk = ERR_PTR(-EINVAL);
+
+ for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++) {
+ ret = isc_clk_register(isc, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(isc_clk_init);
+
+void isc_clk_cleanup(struct isc_device *isc)
+{
+ unsigned int i;
+
+ of_clk_del_provider(isc->dev->of_node);
+
+ for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++) {
+ struct isc_clk *isc_clk = &isc->isc_clks[i];
+
+ if (!IS_ERR(isc_clk->clk))
+ clk_unregister(isc_clk->clk);
+ }
+}
+EXPORT_SYMBOL_GPL(isc_clk_cleanup);
diff --git a/drivers/media/platform/atmel/atmel-isc.h b/drivers/media/platform/atmel/atmel-isc.h
index 2bfcb135ef13..07fa6dbf8460 100644
--- a/drivers/media/platform/atmel/atmel-isc.h
+++ b/drivers/media/platform/atmel/atmel-isc.h
@@ -10,6 +10,13 @@
*/
#ifndef _ATMEL_ISC_H_
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-dma-contig.h>
+
#define ISC_CLK_MAX_DIV 255
enum isc_clk_id {
@@ -95,6 +102,9 @@ struct isc_format {
configuration.
* @fourcc: Fourcc code for this format.
* @bpp: Bytes per pixel in the current format.
+ * @bpp_v4l2: Bytes per pixel in the current format, for v4l2.
+ This differs from 'bpp' in the sense that in planar
+ formats, it refers only to the first plane.
* @rlp_cfg_mode: Configuration of the RLP (rounding, limiting packaging)
* @dcfg_imode: Configuration of the input of the DMA module
* @dctrl_dview: Configuration of the output of the DMA module
@@ -105,6 +115,7 @@ struct fmt_config {
u32 fourcc;
u8 bpp;
+ u8 bpp_v4l2;
u32 rlp_cfg_mode;
u32 dcfg_imode;
diff --git a/drivers/media/platform/atmel/atmel-sama5d2-isc.c b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
index 1b2063cce0f7..c5b9563e36cb 100644
--- a/drivers/media/platform/atmel/atmel-sama5d2-isc.c
+++ b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
@@ -88,6 +88,30 @@ static const struct isc_format sama5d2_controller_formats[] = {
{
.fourcc = V4L2_PIX_FMT_Y10,
},
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ },
};
/* This is a list of formats that the ISC can receive as *input* */
diff --git a/drivers/media/platform/atmel/atmel-sama7g5-isc.c b/drivers/media/platform/atmel/atmel-sama7g5-isc.c
index 5d1c76f680f3..07a80b08bc54 100644
--- a/drivers/media/platform/atmel/atmel-sama7g5-isc.c
+++ b/drivers/media/platform/atmel/atmel-sama7g5-isc.c
@@ -100,6 +100,30 @@ static const struct isc_format sama7g5_controller_formats[] = {
{
.fourcc = V4L2_PIX_FMT_Y16,
},
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ },
};
/* This is a list of formats that the ISC can receive as *input* */
@@ -188,7 +212,7 @@ static struct isc_format sama7g5_formats_list[] = {
},
{
.fourcc = V4L2_PIX_FMT_UYVY,
- .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
},
{
@@ -556,7 +580,6 @@ static int microchip_xisc_remove(struct platform_device *pdev)
v4l2_device_unregister(&isc->v4l2_dev);
- clk_disable_unprepare(isc->ispck);
clk_disable_unprepare(isc->hclock);
isc_clk_cleanup(isc);
@@ -568,7 +591,6 @@ static int __maybe_unused xisc_runtime_suspend(struct device *dev)
{
struct isc_device *isc = dev_get_drvdata(dev);
- clk_disable_unprepare(isc->ispck);
clk_disable_unprepare(isc->hclock);
return 0;
@@ -583,10 +605,6 @@ static int __maybe_unused xisc_runtime_resume(struct device *dev)
if (ret)
return ret;
- ret = clk_prepare_enable(isc->ispck);
- if (ret)
- clk_disable_unprepare(isc->hclock);
-
return ret;
}
diff --git a/drivers/media/platform/atmel/microchip-csi2dc.c b/drivers/media/platform/atmel/microchip-csi2dc.c
new file mode 100644
index 000000000000..2487978db1f1
--- /dev/null
+++ b/drivers/media/platform/atmel/microchip-csi2dc.c
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Microchip CSI2 Demux Controller (CSI2DC) driver
+ *
+ * Copyright (C) 2018 Microchip Technology, Inc.
+ *
+ * Author: Eugen Hristev <eugen.hristev@microchip.com>
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+/* Global configuration register */
+#define CSI2DC_GCFG 0x0
+
+/* MIPI sensor pixel clock is free running */
+#define CSI2DC_GCFG_MIPIFRN BIT(0)
+/* GPIO parallel interface selection */
+#define CSI2DC_GCFG_GPIOSEL BIT(1)
+/* Output waveform inter-line minimum delay */
+#define CSI2DC_GCFG_HLC(v) ((v) << 4)
+#define CSI2DC_GCFG_HLC_MASK GENMASK(7, 4)
+/* SAMA7G5 requires a HLC delay of 15 */
+#define SAMA7G5_HLC (15)
+
+/* Global control register */
+#define CSI2DC_GCTLR 0x04
+#define CSI2DC_GCTLR_SWRST BIT(0)
+
+/* Global status register */
+#define CSI2DC_GS 0x08
+
+/* SSP interrupt status register */
+#define CSI2DC_SSPIS 0x28
+/* Pipe update register */
+#define CSI2DC_PU 0xc0
+/* Video pipe attributes update */
+#define CSI2DC_PU_VP BIT(0)
+
+/* Pipe update status register */
+#define CSI2DC_PUS 0xc4
+
+/* Video pipeline Interrupt Status Register */
+#define CSI2DC_VPISR 0xf4
+
+/* Video pipeline enable register */
+#define CSI2DC_VPE 0xf8
+#define CSI2DC_VPE_ENABLE BIT(0)
+
+/* Video pipeline configuration register */
+#define CSI2DC_VPCFG 0xfc
+/* Data type */
+#define CSI2DC_VPCFG_DT(v) ((v) << 0)
+#define CSI2DC_VPCFG_DT_MASK GENMASK(5, 0)
+/* Virtual channel identifier */
+#define CSI2DC_VPCFG_VC(v) ((v) << 6)
+#define CSI2DC_VPCFG_VC_MASK GENMASK(7, 6)
+/* Decompression enable */
+#define CSI2DC_VPCFG_DE BIT(8)
+/* Decoder mode */
+#define CSI2DC_VPCFG_DM(v) ((v) << 9)
+#define CSI2DC_VPCFG_DM_DECODER8TO12 0
+/* Decoder predictor 2 selection */
+#define CSI2DC_VPCFG_DP2 BIT(12)
+/* Recommended memory storage */
+#define CSI2DC_VPCFG_RMS BIT(13)
+/* Post adjustment */
+#define CSI2DC_VPCFG_PA BIT(14)
+
+/* Video pipeline column register */
+#define CSI2DC_VPCOL 0x100
+/* Column number */
+#define CSI2DC_VPCOL_COL(v) ((v) << 0)
+#define CSI2DC_VPCOL_COL_MASK GENMASK(15, 0)
+
+/* Video pipeline row register */
+#define CSI2DC_VPROW 0x104
+/* Row number */
+#define CSI2DC_VPROW_ROW(v) ((v) << 0)
+#define CSI2DC_VPROW_ROW_MASK GENMASK(15, 0)
+
+/* Version register */
+#define CSI2DC_VERSION 0x1fc
+
+/* register read/write helpers */
+#define csi2dc_readl(st, reg) readl_relaxed((st)->base + (reg))
+#define csi2dc_writel(st, reg, val) writel_relaxed((val), \
+ (st)->base + (reg))
+
+/* supported RAW data types */
+#define CSI2DC_DT_RAW6 0x28
+#define CSI2DC_DT_RAW7 0x29
+#define CSI2DC_DT_RAW8 0x2a
+#define CSI2DC_DT_RAW10 0x2b
+#define CSI2DC_DT_RAW12 0x2c
+#define CSI2DC_DT_RAW14 0x2d
+/* YUV data types */
+#define CSI2DC_DT_YUV422_8B 0x1e
+
+/*
+ * struct csi2dc_format - CSI2DC format type struct
+ * @mbus_code: Media bus code for the format
+ * @dt: Data type constant for this format
+ */
+struct csi2dc_format {
+ u32 mbus_code;
+ u32 dt;
+};
+
+static const struct csi2dc_format csi2dc_formats[] = {
+ {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .dt = CSI2DC_DT_RAW8,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .dt = CSI2DC_DT_RAW8,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .dt = CSI2DC_DT_RAW8,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .dt = CSI2DC_DT_RAW8,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .dt = CSI2DC_DT_RAW10,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .dt = CSI2DC_DT_RAW10,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .dt = CSI2DC_DT_RAW10,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .dt = CSI2DC_DT_RAW10,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .dt = CSI2DC_DT_YUV422_8B,
+ },
+};
+
+enum mipi_csi_pads {
+ CSI2DC_PAD_SINK = 0,
+ CSI2DC_PAD_SOURCE = 1,
+ CSI2DC_PADS_NUM = 2,
+};
+
+/*
+ * struct csi2dc_device - CSI2DC device driver data/config struct
+ * @base: Register map base address
+ * @csi2dc_sd: v4l2 subdevice for the csi2dc device
+ * This is the subdevice that the csi2dc device itself
+ * registers in v4l2 subsystem
+ * @dev: struct device for this csi2dc device
+ * @pclk: Peripheral clock reference
+ * Input clock that clocks the hardware block internal
+ * logic
+ * @scck: Sensor Controller clock reference
+ * Input clock that is used to generate the pixel clock
+ * @format: Current saved format used in g/s fmt
+ * @cur_fmt: Current state format
+ * @try_fmt: Try format that is being tried
+ * @pads: Media entity pads for the csi2dc subdevice
+ * @clk_gated: Whether the clock is gated or free running
+ * @video_pipe: Whether video pipeline is configured
+ * @parallel_mode: The underlying subdevice is connected on a parallel bus
+ * @vc: Current set virtual channel
+ * @notifier: Async notifier that is used to bound the underlying
+ * subdevice to the csi2dc subdevice
+ * @input_sd: Reference to the underlying subdevice bound to the
+ * csi2dc subdevice
+ * @remote_pad: Pad number of the underlying subdevice that is linked
+ * to the csi2dc subdevice sink pad.
+ */
+struct csi2dc_device {
+ void __iomem *base;
+ struct v4l2_subdev csi2dc_sd;
+ struct device *dev;
+ struct clk *pclk;
+ struct clk *scck;
+
+ struct v4l2_mbus_framefmt format;
+
+ const struct csi2dc_format *cur_fmt;
+ const struct csi2dc_format *try_fmt;
+
+ struct media_pad pads[CSI2DC_PADS_NUM];
+
+ bool clk_gated;
+ bool video_pipe;
+ bool parallel_mode;
+ u32 vc;
+
+ struct v4l2_async_notifier notifier;
+
+ struct v4l2_subdev *input_sd;
+
+ u32 remote_pad;
+};
+
+static inline struct csi2dc_device *
+csi2dc_sd_to_csi2dc_device(struct v4l2_subdev *csi2dc_sd)
+{
+ return container_of(csi2dc_sd, struct csi2dc_device, csi2dc_sd);
+}
+
+static int csi2dc_enum_mbus_code(struct v4l2_subdev *csi2dc_sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(csi2dc_formats))
+ return -EINVAL;
+
+ code->code = csi2dc_formats[code->index].mbus_code;
+
+ return 0;
+}
+
+static int csi2dc_get_fmt(struct v4l2_subdev *csi2dc_sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct csi2dc_device *csi2dc = csi2dc_sd_to_csi2dc_device(csi2dc_sd);
+ struct v4l2_mbus_framefmt *v4l2_try_fmt;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ v4l2_try_fmt = v4l2_subdev_get_try_format(csi2dc_sd, sd_state,
+ format->pad);
+ format->format = *v4l2_try_fmt;
+
+ return 0;
+ }
+
+ format->format = csi2dc->format;
+
+ return 0;
+}
+
+static int csi2dc_set_fmt(struct v4l2_subdev *csi2dc_sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *req_fmt)
+{
+ struct csi2dc_device *csi2dc = csi2dc_sd_to_csi2dc_device(csi2dc_sd);
+ const struct csi2dc_format *fmt, *try_fmt = NULL;
+ struct v4l2_mbus_framefmt *v4l2_try_fmt;
+ unsigned int i;
+
+ /*
+ * Setting the source pad is disabled.
+ * The same format is being propagated from the sink to source.
+ */
+ if (req_fmt->pad == CSI2DC_PAD_SOURCE)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(csi2dc_formats); i++) {
+ fmt = &csi2dc_formats[i];
+ if (req_fmt->format.code == fmt->mbus_code)
+ try_fmt = fmt;
+ fmt++;
+ }
+
+ /* in case we could not find the desired format, default to something */
+ if (!try_fmt) {
+ try_fmt = &csi2dc_formats[0];
+
+ dev_dbg(csi2dc->dev,
+ "CSI2DC unsupported format 0x%x, defaulting to 0x%x\n",
+ req_fmt->format.code, csi2dc_formats[0].mbus_code);
+ }
+
+ req_fmt->format.code = try_fmt->mbus_code;
+ req_fmt->format.colorspace = V4L2_COLORSPACE_SRGB;
+ req_fmt->format.field = V4L2_FIELD_NONE;
+
+ if (req_fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ v4l2_try_fmt = v4l2_subdev_get_try_format(csi2dc_sd, sd_state,
+ req_fmt->pad);
+ *v4l2_try_fmt = req_fmt->format;
+ /* Trying on the sink pad makes the source pad change too */
+ v4l2_try_fmt = v4l2_subdev_get_try_format(csi2dc_sd,
+ sd_state,
+ CSI2DC_PAD_SOURCE);
+ *v4l2_try_fmt = req_fmt->format;
+
+ /* if we are just trying, we are done */
+ return 0;
+ }
+
+ /* save the format for later requests */
+ csi2dc->format = req_fmt->format;
+
+ /* update config */
+ csi2dc->cur_fmt = try_fmt;
+
+ dev_dbg(csi2dc->dev, "new format set: 0x%x @%dx%d\n",
+ csi2dc->format.code, csi2dc->format.width,
+ csi2dc->format.height);
+
+ return 0;
+}
+
+static int csi2dc_power(struct csi2dc_device *csi2dc, int on)
+{
+ int ret = 0;
+
+ if (on) {
+ ret = clk_prepare_enable(csi2dc->pclk);
+ if (ret) {
+ dev_err(csi2dc->dev, "failed to enable pclk:%d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(csi2dc->scck);
+ if (ret) {
+ dev_err(csi2dc->dev, "failed to enable scck:%d\n", ret);
+ clk_disable_unprepare(csi2dc->pclk);
+ return ret;
+ }
+
+ /* if powering up, deassert reset line */
+ csi2dc_writel(csi2dc, CSI2DC_GCTLR, CSI2DC_GCTLR_SWRST);
+ } else {
+ /* if powering down, assert reset line */
+ csi2dc_writel(csi2dc, CSI2DC_GCTLR, 0);
+
+ clk_disable_unprepare(csi2dc->scck);
+ clk_disable_unprepare(csi2dc->pclk);
+ }
+
+ return ret;
+}
+
+static int csi2dc_get_mbus_config(struct csi2dc_device *csi2dc)
+{
+ struct v4l2_mbus_config mbus_config = { 0 };
+ int ret;
+
+ ret = v4l2_subdev_call(csi2dc->input_sd, pad, get_mbus_config,
+ csi2dc->remote_pad, &mbus_config);
+ if (ret == -ENOIOCTLCMD) {
+ dev_dbg(csi2dc->dev,
+ "no remote mbus configuration available\n");
+ return 0;
+ }
+
+ if (ret) {
+ dev_err(csi2dc->dev,
+ "failed to get remote mbus configuration\n");
+ return 0;
+ }
+
+ dev_dbg(csi2dc->dev, "subdev sending on channel %d\n", csi2dc->vc);
+
+ csi2dc->clk_gated = mbus_config.bus.parallel.flags &
+ V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
+
+ dev_dbg(csi2dc->dev, "mbus_config: %s clock\n",
+ csi2dc->clk_gated ? "gated" : "free running");
+
+ return 0;
+}
+
+static void csi2dc_vp_update(struct csi2dc_device *csi2dc)
+{
+ u32 vp, gcfg;
+
+ if (!csi2dc->video_pipe) {
+ dev_err(csi2dc->dev, "video pipeline unavailable\n");
+ return;
+ }
+
+ if (csi2dc->parallel_mode) {
+ /* In parallel mode, GPIO parallel interface must be selected */
+ gcfg = csi2dc_readl(csi2dc, CSI2DC_GCFG);
+ gcfg |= CSI2DC_GCFG_GPIOSEL;
+ csi2dc_writel(csi2dc, CSI2DC_GCFG, gcfg);
+ return;
+ }
+
+ /* serial video pipeline */
+
+ csi2dc_writel(csi2dc, CSI2DC_GCFG,
+ (SAMA7G5_HLC & CSI2DC_GCFG_HLC_MASK) |
+ (csi2dc->clk_gated ? 0 : CSI2DC_GCFG_MIPIFRN));
+
+ vp = CSI2DC_VPCFG_DT(csi2dc->cur_fmt->dt) & CSI2DC_VPCFG_DT_MASK;
+ vp |= CSI2DC_VPCFG_VC(csi2dc->vc) & CSI2DC_VPCFG_VC_MASK;
+ vp &= ~CSI2DC_VPCFG_DE;
+ vp |= CSI2DC_VPCFG_DM(CSI2DC_VPCFG_DM_DECODER8TO12);
+ vp &= ~CSI2DC_VPCFG_DP2;
+ vp &= ~CSI2DC_VPCFG_RMS;
+ vp |= CSI2DC_VPCFG_PA;
+
+ csi2dc_writel(csi2dc, CSI2DC_VPCFG, vp);
+ csi2dc_writel(csi2dc, CSI2DC_VPE, CSI2DC_VPE_ENABLE);
+ csi2dc_writel(csi2dc, CSI2DC_PU, CSI2DC_PU_VP);
+}
+
+static int csi2dc_s_stream(struct v4l2_subdev *csi2dc_sd, int enable)
+{
+ struct csi2dc_device *csi2dc = csi2dc_sd_to_csi2dc_device(csi2dc_sd);
+ int ret;
+
+ if (enable) {
+ ret = pm_runtime_resume_and_get(csi2dc->dev);
+ if (ret < 0)
+ return ret;
+
+ csi2dc_get_mbus_config(csi2dc);
+
+ csi2dc_vp_update(csi2dc);
+
+ return v4l2_subdev_call(csi2dc->input_sd, video, s_stream,
+ true);
+ }
+
+ dev_dbg(csi2dc->dev,
+ "Last frame received: VPCOLR = %u, VPROWR= %u, VPISR = %x\n",
+ csi2dc_readl(csi2dc, CSI2DC_VPCOL),
+ csi2dc_readl(csi2dc, CSI2DC_VPROW),
+ csi2dc_readl(csi2dc, CSI2DC_VPISR));
+
+ /* stop streaming scenario */
+ ret = v4l2_subdev_call(csi2dc->input_sd, video, s_stream, false);
+
+ pm_runtime_put_sync(csi2dc->dev);
+
+ return ret;
+}
+
+static int csi2dc_init_cfg(struct v4l2_subdev *csi2dc_sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct v4l2_mbus_framefmt *v4l2_try_fmt =
+ v4l2_subdev_get_try_format(csi2dc_sd, sd_state, 0);
+
+ v4l2_try_fmt->height = 480;
+ v4l2_try_fmt->width = 640;
+ v4l2_try_fmt->code = csi2dc_formats[0].mbus_code;
+ v4l2_try_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ v4l2_try_fmt->field = V4L2_FIELD_NONE;
+ v4l2_try_fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ v4l2_try_fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
+ v4l2_try_fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops csi2dc_pad_ops = {
+ .enum_mbus_code = csi2dc_enum_mbus_code,
+ .set_fmt = csi2dc_set_fmt,
+ .get_fmt = csi2dc_get_fmt,
+ .init_cfg = csi2dc_init_cfg,
+};
+
+static const struct v4l2_subdev_video_ops csi2dc_video_ops = {
+ .s_stream = csi2dc_s_stream,
+};
+
+static const struct v4l2_subdev_ops csi2dc_subdev_ops = {
+ .pad = &csi2dc_pad_ops,
+ .video = &csi2dc_video_ops,
+};
+
+static int csi2dc_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct csi2dc_device *csi2dc = container_of(notifier,
+ struct csi2dc_device, notifier);
+ int pad;
+ int ret;
+
+ csi2dc->input_sd = subdev;
+
+ pad = media_entity_get_fwnode_pad(&subdev->entity, asd->match.fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (pad < 0) {
+ dev_err(csi2dc->dev, "Failed to find pad for %s\n",
+ subdev->name);
+ return pad;
+ }
+
+ csi2dc->remote_pad = pad;
+
+ ret = media_create_pad_link(&csi2dc->input_sd->entity,
+ csi2dc->remote_pad,
+ &csi2dc->csi2dc_sd.entity, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_err(csi2dc->dev,
+ "Failed to create pad link: %s to %s\n",
+ csi2dc->input_sd->entity.name,
+ csi2dc->csi2dc_sd.entity.name);
+ return ret;
+ }
+
+ dev_dbg(csi2dc->dev, "link with %s pad: %d\n",
+ csi2dc->input_sd->name, csi2dc->remote_pad);
+
+ return ret;
+}
+
+static const struct v4l2_async_notifier_operations csi2dc_async_ops = {
+ .bound = csi2dc_async_bound,
+};
+
+static int csi2dc_prepare_notifier(struct csi2dc_device *csi2dc,
+ struct fwnode_handle *input_fwnode)
+{
+ struct v4l2_async_subdev *asd;
+ int ret = 0;
+
+ v4l2_async_nf_init(&csi2dc->notifier);
+
+ asd = v4l2_async_nf_add_fwnode_remote(&csi2dc->notifier,
+ input_fwnode,
+ struct v4l2_async_subdev);
+
+ fwnode_handle_put(input_fwnode);
+
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ dev_err(csi2dc->dev,
+ "failed to add async notifier for node %pOF: %d\n",
+ to_of_node(input_fwnode), ret);
+ v4l2_async_nf_cleanup(&csi2dc->notifier);
+ return ret;
+ }
+
+ csi2dc->notifier.ops = &csi2dc_async_ops;
+
+ ret = v4l2_async_subdev_nf_register(&csi2dc->csi2dc_sd,
+ &csi2dc->notifier);
+ if (ret) {
+ dev_err(csi2dc->dev, "fail to register async notifier: %d\n",
+ ret);
+ v4l2_async_nf_cleanup(&csi2dc->notifier);
+ }
+
+ return ret;
+}
+
+static int csi2dc_of_parse(struct csi2dc_device *csi2dc,
+ struct device_node *of_node)
+{
+ struct fwnode_handle *input_fwnode, *output_fwnode;
+ struct v4l2_fwnode_endpoint input_endpoint = { 0 },
+ output_endpoint = { 0 };
+ int ret;
+
+ input_fwnode = fwnode_graph_get_next_endpoint(of_fwnode_handle(of_node),
+ NULL);
+ if (!input_fwnode) {
+ dev_err(csi2dc->dev,
+ "missing port node at %pOF, input node is mandatory.\n",
+ of_node);
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(input_fwnode, &input_endpoint);
+ if (ret) {
+ dev_err(csi2dc->dev, "endpoint not defined at %pOF\n", of_node);
+ goto csi2dc_of_parse_err;
+ }
+
+ if (input_endpoint.bus_type == V4L2_MBUS_PARALLEL ||
+ input_endpoint.bus_type == V4L2_MBUS_BT656) {
+ csi2dc->parallel_mode = true;
+ dev_dbg(csi2dc->dev,
+ "subdevice connected on parallel interface\n");
+ }
+
+ if (input_endpoint.bus_type == V4L2_MBUS_CSI2_DPHY) {
+ csi2dc->clk_gated = input_endpoint.bus.mipi_csi2.flags &
+ V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
+ dev_dbg(csi2dc->dev,
+ "subdevice connected on serial interface\n");
+ dev_dbg(csi2dc->dev, "DT: %s clock\n",
+ csi2dc->clk_gated ? "gated" : "free running");
+ }
+
+ output_fwnode = fwnode_graph_get_next_endpoint
+ (of_fwnode_handle(of_node), input_fwnode);
+
+ if (output_fwnode)
+ ret = v4l2_fwnode_endpoint_parse(output_fwnode,
+ &output_endpoint);
+
+ fwnode_handle_put(output_fwnode);
+
+ if (!output_fwnode || ret) {
+ dev_info(csi2dc->dev,
+ "missing output node at %pOF, data pipe available only.\n",
+ of_node);
+ } else {
+ if (output_endpoint.bus_type != V4L2_MBUS_PARALLEL &&
+ output_endpoint.bus_type != V4L2_MBUS_BT656) {
+ dev_err(csi2dc->dev,
+ "output port must be parallel/bt656.\n");
+ ret = -EINVAL;
+ goto csi2dc_of_parse_err;
+ }
+
+ csi2dc->video_pipe = true;
+
+ dev_dbg(csi2dc->dev,
+ "block %pOF [%d.%d]->[%d.%d] video pipeline\n",
+ of_node, input_endpoint.base.port,
+ input_endpoint.base.id, output_endpoint.base.port,
+ output_endpoint.base.id);
+ }
+
+ /* prepare async notifier for subdevice completion */
+ return csi2dc_prepare_notifier(csi2dc, input_fwnode);
+
+csi2dc_of_parse_err:
+ fwnode_handle_put(input_fwnode);
+ return ret;
+}
+
+static void csi2dc_default_format(struct csi2dc_device *csi2dc)
+{
+ csi2dc->cur_fmt = &csi2dc_formats[0];
+
+ csi2dc->format.height = 480;
+ csi2dc->format.width = 640;
+ csi2dc->format.code = csi2dc_formats[0].mbus_code;
+ csi2dc->format.colorspace = V4L2_COLORSPACE_SRGB;
+ csi2dc->format.field = V4L2_FIELD_NONE;
+ csi2dc->format.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ csi2dc->format.quantization = V4L2_QUANTIZATION_DEFAULT;
+ csi2dc->format.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static int csi2dc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct csi2dc_device *csi2dc;
+ int ret = 0;
+ u32 ver;
+
+ csi2dc = devm_kzalloc(dev, sizeof(*csi2dc), GFP_KERNEL);
+ if (!csi2dc)
+ return -ENOMEM;
+
+ csi2dc->dev = dev;
+
+ csi2dc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(csi2dc->base)) {
+ dev_err(dev, "base address not set\n");
+ return PTR_ERR(csi2dc->base);
+ }
+
+ csi2dc->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(csi2dc->pclk)) {
+ ret = PTR_ERR(csi2dc->pclk);
+ dev_err(dev, "failed to get pclk: %d\n", ret);
+ return ret;
+ }
+
+ csi2dc->scck = devm_clk_get(dev, "scck");
+ if (IS_ERR(csi2dc->scck)) {
+ ret = PTR_ERR(csi2dc->scck);
+ dev_err(dev, "failed to get scck: %d\n", ret);
+ return ret;
+ }
+
+ v4l2_subdev_init(&csi2dc->csi2dc_sd, &csi2dc_subdev_ops);
+
+ csi2dc->csi2dc_sd.owner = THIS_MODULE;
+ csi2dc->csi2dc_sd.dev = dev;
+ snprintf(csi2dc->csi2dc_sd.name, sizeof(csi2dc->csi2dc_sd.name),
+ "csi2dc");
+
+ csi2dc->csi2dc_sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ csi2dc->csi2dc_sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+
+ platform_set_drvdata(pdev, csi2dc);
+
+ ret = csi2dc_of_parse(csi2dc, dev->of_node);
+ if (ret)
+ goto csi2dc_probe_cleanup_entity;
+
+ csi2dc->pads[CSI2DC_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ if (csi2dc->video_pipe)
+ csi2dc->pads[CSI2DC_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&csi2dc->csi2dc_sd.entity,
+ csi2dc->video_pipe ? CSI2DC_PADS_NUM : 1,
+ csi2dc->pads);
+ if (ret < 0) {
+ dev_err(dev, "media entity init failed\n");
+ goto csi2dc_probe_cleanup_notifier;
+ }
+
+ csi2dc_default_format(csi2dc);
+
+ /* turn power on to validate capabilities */
+ ret = csi2dc_power(csi2dc, true);
+ if (ret < 0)
+ goto csi2dc_probe_cleanup_notifier;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ ver = csi2dc_readl(csi2dc, CSI2DC_VERSION);
+
+ /*
+ * we must register the subdev after PM runtime has been requested,
+ * otherwise we might bound immediately and request pm_runtime_resume
+ * before runtime_enable.
+ */
+ ret = v4l2_async_register_subdev(&csi2dc->csi2dc_sd);
+ if (ret) {
+ dev_err(csi2dc->dev, "failed to register the subdevice\n");
+ goto csi2dc_probe_cleanup_notifier;
+ }
+
+ dev_info(dev, "Microchip CSI2DC version %x\n", ver);
+
+ return 0;
+
+csi2dc_probe_cleanup_notifier:
+ v4l2_async_nf_cleanup(&csi2dc->notifier);
+csi2dc_probe_cleanup_entity:
+ media_entity_cleanup(&csi2dc->csi2dc_sd.entity);
+
+ return ret;
+}
+
+static int csi2dc_remove(struct platform_device *pdev)
+{
+ struct csi2dc_device *csi2dc = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ v4l2_async_unregister_subdev(&csi2dc->csi2dc_sd);
+ v4l2_async_nf_unregister(&csi2dc->notifier);
+ v4l2_async_nf_cleanup(&csi2dc->notifier);
+ media_entity_cleanup(&csi2dc->csi2dc_sd.entity);
+
+ return 0;
+}
+
+static int __maybe_unused csi2dc_runtime_suspend(struct device *dev)
+{
+ struct csi2dc_device *csi2dc = dev_get_drvdata(dev);
+
+ return csi2dc_power(csi2dc, false);
+}
+
+static int __maybe_unused csi2dc_runtime_resume(struct device *dev)
+{
+ struct csi2dc_device *csi2dc = dev_get_drvdata(dev);
+
+ return csi2dc_power(csi2dc, true);
+}
+
+static const struct dev_pm_ops csi2dc_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(csi2dc_runtime_suspend, csi2dc_runtime_resume, NULL)
+};
+
+static const struct of_device_id csi2dc_of_match[] = {
+ { .compatible = "microchip,sama7g5-csi2dc" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, csi2dc_of_match);
+
+static struct platform_driver csi2dc_driver = {
+ .probe = csi2dc_probe,
+ .remove = csi2dc_remove,
+ .driver = {
+ .name = "microchip-csi2dc",
+ .pm = &csi2dc_dev_pm_ops,
+ .of_match_table = of_match_ptr(csi2dc_of_match),
+ },
+};
+
+module_platform_driver(csi2dc_driver);
+
+MODULE_AUTHOR("Eugen Hristev <eugen.hristev@microchip.com>");
+MODULE_DESCRIPTION("Microchip CSI2 Demux Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/cadence/Kconfig b/drivers/media/platform/cadence/Kconfig
index 80cf601323ce..480325d053de 100644
--- a/drivers/media/platform/cadence/Kconfig
+++ b/drivers/media/platform/cadence/Kconfig
@@ -1,18 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
-config VIDEO_CADENCE
- bool "Cadence Video Devices"
- help
- If you have a media device designed by Cadence, say Y.
-
- Note that this option doesn't include new drivers in the kernel:
- saying N will just cause Kconfig to skip all the questions about
- Cadence media devices.
-if VIDEO_CADENCE
+comment "Cadence media platform drivers"
config VIDEO_CADENCE_CSI2RX
tristate "Cadence MIPI-CSI2 RX Controller"
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -24,7 +16,7 @@ config VIDEO_CADENCE_CSI2RX
config VIDEO_CADENCE_CSI2TX
tristate "Cadence MIPI-CSI2 TX Controller"
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -33,5 +25,3 @@ config VIDEO_CADENCE_CSI2TX
To compile this driver as a module, choose M here: the module will be
called cdns-csi2tx.
-
-endif
diff --git a/drivers/media/platform/chips-media/Kconfig b/drivers/media/platform/chips-media/Kconfig
new file mode 100644
index 000000000000..57f8f8a22df8
--- /dev/null
+++ b/drivers/media/platform/chips-media/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Chips&Media media platform drivers"
+
+config VIDEO_CODA
+ tristate "Chips&Media Coda multi-standard codec IP"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV && OF && (ARCH_MXC || COMPILE_TEST)
+ select SRAM
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ select V4L2_JPEG_HELPER
+ select V4L2_MEM2MEM_DEV
+ select GENERIC_ALLOCATOR
+ help
+ Coda is a range of video codec IPs that supports
+ H.264, MPEG-4, and other video formats.
+
+config VIDEO_IMX_VDOA
+ def_tristate VIDEO_CODA if SOC_IMX6Q || COMPILE_TEST
diff --git a/drivers/media/platform/coda/Makefile b/drivers/media/platform/chips-media/Makefile
index bbb16425a875..bbb16425a875 100644
--- a/drivers/media/platform/coda/Makefile
+++ b/drivers/media/platform/chips-media/Makefile
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/chips-media/coda-bit.c
index c484c008ab02..c484c008ab02 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/chips-media/coda-bit.c
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/chips-media/coda-common.c
index 3cd47ba26357..a57822b05070 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/chips-media/coda-common.c
@@ -409,6 +409,7 @@ static struct vdoa_data *coda_get_vdoa_data(void)
if (!vdoa_data)
vdoa_data = ERR_PTR(-EPROBE_DEFER);
+ put_device(&vdoa_pdev->dev);
out:
of_node_put(vdoa_node);
diff --git a/drivers/media/platform/coda/coda-gdi.c b/drivers/media/platform/chips-media/coda-gdi.c
index 59d65daca153..59d65daca153 100644
--- a/drivers/media/platform/coda/coda-gdi.c
+++ b/drivers/media/platform/chips-media/coda-gdi.c
diff --git a/drivers/media/platform/coda/coda-h264.c b/drivers/media/platform/chips-media/coda-h264.c
index 8bd0aa8af114..8bd0aa8af114 100644
--- a/drivers/media/platform/coda/coda-h264.c
+++ b/drivers/media/platform/chips-media/coda-h264.c
diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/chips-media/coda-jpeg.c
index a72f4655e5ad..a72f4655e5ad 100644
--- a/drivers/media/platform/coda/coda-jpeg.c
+++ b/drivers/media/platform/chips-media/coda-jpeg.c
diff --git a/drivers/media/platform/coda/coda-mpeg2.c b/drivers/media/platform/chips-media/coda-mpeg2.c
index 6f3f6721d286..6f3f6721d286 100644
--- a/drivers/media/platform/coda/coda-mpeg2.c
+++ b/drivers/media/platform/chips-media/coda-mpeg2.c
diff --git a/drivers/media/platform/coda/coda-mpeg4.c b/drivers/media/platform/chips-media/coda-mpeg4.c
index 483a4fba1b4f..483a4fba1b4f 100644
--- a/drivers/media/platform/coda/coda-mpeg4.c
+++ b/drivers/media/platform/chips-media/coda-mpeg4.c
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/chips-media/coda.h
index dcf35641c603..dcf35641c603 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/chips-media/coda.h
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/chips-media/coda_regs.h
index da5bb3212528..db81a904cf3f 100644
--- a/drivers/media/platform/coda/coda_regs.h
+++ b/drivers/media/platform/chips-media/coda_regs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * linux/drivers/media/platform/coda/coda_regs.h
+ * linux/drivers/media/platform/chips-media/coda_regs.h
*
* Copyright (C) 2012 Vista Silicon SL
* Javier Martin <javier.martin@vista-silicon.com>
diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/chips-media/imx-vdoa.c
index 00643f37b3e6..c70871bae193 100644
--- a/drivers/media/platform/coda/imx-vdoa.c
+++ b/drivers/media/platform/chips-media/imx-vdoa.c
@@ -284,7 +284,6 @@ EXPORT_SYMBOL(vdoa_context_configure);
static int vdoa_probe(struct platform_device *pdev)
{
struct vdoa_data *vdoa;
- struct resource *res;
int ret;
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
@@ -309,10 +308,10 @@ static int vdoa_probe(struct platform_device *pdev)
if (IS_ERR(vdoa->regs))
return PTR_ERR(vdoa->regs);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res)
- return -EINVAL;
- ret = devm_request_threaded_irq(&pdev->dev, res->start, NULL,
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ ret = devm_request_threaded_irq(&pdev->dev, ret, NULL,
vdoa_irq_handler, IRQF_ONESHOT,
"vdoa", vdoa);
if (ret < 0) {
diff --git a/drivers/media/platform/coda/imx-vdoa.h b/drivers/media/platform/chips-media/imx-vdoa.h
index a62eab476d58..a62eab476d58 100644
--- a/drivers/media/platform/coda/imx-vdoa.h
+++ b/drivers/media/platform/chips-media/imx-vdoa.h
diff --git a/drivers/media/platform/coda/trace.h b/drivers/media/platform/chips-media/trace.h
index c0791c847f7c..19f98e6dafb9 100644
--- a/drivers/media/platform/coda/trace.h
+++ b/drivers/media/platform/chips-media/trace.h
@@ -167,7 +167,7 @@ DEFINE_EVENT(coda_buf_class, coda_jpeg_done,
#endif /* __CODA_TRACE_H__ */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../drivers/media/platform/coda
+#define TRACE_INCLUDE_PATH ../../drivers/media/platform/chips-media
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
diff --git a/drivers/media/platform/intel/Kconfig b/drivers/media/platform/intel/Kconfig
new file mode 100644
index 000000000000..724e80a9086d
--- /dev/null
+++ b/drivers/media/platform/intel/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Intel media platform drivers"
+
+config VIDEO_PXA27x
+ tristate "PXA27x Quick Capture Interface driver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
+ depends on PXA27x || COMPILE_TEST
+ select VIDEOBUF2_DMA_SG
+ select SG_SPLIT
+ select V4L2_FWNODE
+ help
+ This is a v4l2 driver for the PXA27x Quick Capture Interface
diff --git a/drivers/media/platform/intel/Makefile b/drivers/media/platform/intel/Makefile
new file mode 100644
index 000000000000..7e8889cbd2df
--- /dev/null
+++ b/drivers/media/platform/intel/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/intel/pxa_camera.c
index 3ba00b0f9320..35145e3348f0 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/intel/pxa_camera.c
@@ -1573,39 +1573,40 @@ static int pxa_camera_set_bus_param(struct pxa_camera_dev *pcdev)
mbus_config |= V4L2_MBUS_PCLK_SAMPLE_FALLING;
mbus_config |= V4L2_MBUS_DATA_ACTIVE_HIGH;
- cfg.flags = mbus_config;
- ret = sensor_call(pcdev, pad, set_mbus_config, 0, &cfg);
+ ret = sensor_call(pcdev, pad, get_mbus_config, 0, &cfg);
if (ret < 0 && ret != -ENOIOCTLCMD) {
dev_err(pcdev_to_dev(pcdev),
- "Failed to call set_mbus_config: %d\n", ret);
+ "Failed to call get_mbus_config: %d\n", ret);
return ret;
}
/*
- * If the requested media bus configuration has not been fully applied
- * make sure it is supported by the platform.
+ * If the media bus configuration of the sensor differs, make sure it
+ * is supported by the platform.
*
* PXA does not support V4L2_MBUS_DATA_ACTIVE_LOW and the bus mastering
* roles should match.
*/
- if (cfg.flags != mbus_config) {
+ if (cfg.bus.parallel.flags != mbus_config) {
unsigned int pxa_mbus_role = mbus_config & (V4L2_MBUS_MASTER |
V4L2_MBUS_SLAVE);
- if (pxa_mbus_role != (cfg.flags & (V4L2_MBUS_MASTER |
- V4L2_MBUS_SLAVE))) {
+ unsigned int flags = cfg.bus.parallel.flags;
+
+ if (pxa_mbus_role != (flags & (V4L2_MBUS_MASTER |
+ V4L2_MBUS_SLAVE))) {
dev_err(pcdev_to_dev(pcdev),
"Unsupported mbus configuration: bus mastering\n");
return -EINVAL;
}
- if (cfg.flags & V4L2_MBUS_DATA_ACTIVE_LOW) {
+ if (flags & V4L2_MBUS_DATA_ACTIVE_LOW) {
dev_err(pcdev_to_dev(pcdev),
"Unsupported mbus configuration: DATA_ACTIVE_LOW\n");
return -EINVAL;
}
}
- pxa_camera_setup_cicr(pcdev, cfg.flags, pixfmt);
+ pxa_camera_setup_cicr(pcdev, cfg.bus.parallel.flags, pixfmt);
return 0;
}
diff --git a/drivers/media/platform/marvell-ccic/Kconfig b/drivers/media/platform/marvell/Kconfig
index 3e3f86264762..ec1a16734a28 100644
--- a/drivers/media/platform/marvell-ccic/Kconfig
+++ b/drivers/media/platform/marvell/Kconfig
@@ -1,7 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Marvell media platform drivers"
+
config VIDEO_CAFE_CCIC
tristate "Marvell 88ALP01 (Cafe) CMOS Camera Controller support"
- depends on PCI && I2C && VIDEO_V4L2
+ depends on V4L_PLATFORM_DRIVERS
+ depends on PCI && I2C && VIDEO_DEV
depends on COMMON_CLK
select VIDEO_OV7670
select VIDEOBUF2_VMALLOC
@@ -14,7 +18,8 @@ config VIDEO_CAFE_CCIC
config VIDEO_MMP_CAMERA
tristate "Marvell Armada 610 integrated camera controller support"
- depends on I2C && VIDEO_V4L2
+ depends on V4L_PLATFORM_DRIVERS
+ depends on I2C && VIDEO_DEV
depends on ARCH_MMP || COMPILE_TEST
depends on COMMON_CLK
select VIDEO_OV7670
diff --git a/drivers/media/platform/marvell-ccic/Makefile b/drivers/media/platform/marvell/Makefile
index 90c3c2bc6dde..90c3c2bc6dde 100644
--- a/drivers/media/platform/marvell-ccic/Makefile
+++ b/drivers/media/platform/marvell/Makefile
diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell/cafe-driver.c
index 03dcf8bf705e..03dcf8bf705e 100644
--- a/drivers/media/platform/marvell-ccic/cafe-driver.c
+++ b/drivers/media/platform/marvell/cafe-driver.c
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell/mcam-core.c
index ad4a7922d0d7..ad4a7922d0d7 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell/mcam-core.c
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell/mcam-core.h
index f324d808d737..f324d808d737 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.h
+++ b/drivers/media/platform/marvell/mcam-core.h
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell/mmp-driver.c
index 343ab4f7d807..df16899ab1cb 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell/mmp-driver.c
@@ -270,12 +270,10 @@ static int mmpcam_probe(struct platform_device *pdev)
* Finally, set up our IRQ now that the core is ready to
* deal with it.
*/
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- ret = -ENODEV;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
goto out;
- }
- cam->irq = res->start;
+ cam->irq = ret;
ret = devm_request_irq(&pdev->dev, cam->irq, mmpcam_irq, IRQF_SHARED,
"mmp-camera", mcam);
if (ret)
diff --git a/drivers/media/platform/mediatek/Kconfig b/drivers/media/platform/mediatek/Kconfig
new file mode 100644
index 000000000000..af47d9888552
--- /dev/null
+++ b/drivers/media/platform/mediatek/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Mediatek media platform drivers"
+
+source "drivers/media/platform/mediatek/jpeg/Kconfig"
+source "drivers/media/platform/mediatek/mdp/Kconfig"
+source "drivers/media/platform/mediatek/vcodec/Kconfig"
+source "drivers/media/platform/mediatek/vpu/Kconfig"
diff --git a/drivers/media/platform/mediatek/Makefile b/drivers/media/platform/mediatek/Makefile
new file mode 100644
index 000000000000..d3850a13f128
--- /dev/null
+++ b/drivers/media/platform/mediatek/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += jpeg/
+obj-y += mdp/
+obj-y += vcodec/
+obj-y += vpu/
diff --git a/drivers/media/platform/mediatek/jpeg/Kconfig b/drivers/media/platform/mediatek/jpeg/Kconfig
new file mode 100644
index 000000000000..39c4d1bc66ce
--- /dev/null
+++ b/drivers/media/platform/mediatek/jpeg/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_MEDIATEK_JPEG
+ tristate "Mediatek JPEG Codec driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on MTK_IOMMU_V1 || MTK_IOMMU || COMPILE_TEST
+ depends on VIDEO_DEV
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Mediatek jpeg codec driver provides HW capability to decode
+ JPEG format
+
+ To compile this driver as a module, choose M here: the
+ module will be called mtk-jpeg
diff --git a/drivers/media/platform/mtk-jpeg/Makefile b/drivers/media/platform/mediatek/jpeg/Makefile
index 76c33aad0f3f..76c33aad0f3f 100644
--- a/drivers/media/platform/mtk-jpeg/Makefile
+++ b/drivers/media/platform/mediatek/jpeg/Makefile
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
index f332beb06d51..ab5485dfc20c 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
@@ -22,7 +22,6 @@
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-dma-contig.h>
-#include <soc/mediatek/smi.h>
#include "mtk_jpeg_enc_hw.h"
#include "mtk_jpeg_dec_hw.h"
@@ -681,7 +680,7 @@ static int mtk_jpeg_buf_prepare(struct vb2_buffer *vb)
{
struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct mtk_jpeg_q_data *q_data = NULL;
- struct v4l2_plane_pix_format plane_fmt = {};
+ struct v4l2_plane_pix_format plane_fmt;
int i;
q_data = mtk_jpeg_get_q_data(ctx, vb->vb2_queue->type);
@@ -1055,10 +1054,6 @@ static void mtk_jpeg_clk_on(struct mtk_jpeg_dev *jpeg)
{
int ret;
- ret = mtk_smi_larb_get(jpeg->larb);
- if (ret)
- dev_err(jpeg->dev, "mtk_smi_larb_get larbvdec fail %d\n", ret);
-
ret = clk_bulk_prepare_enable(jpeg->variant->num_clks,
jpeg->variant->clks);
if (ret)
@@ -1069,7 +1064,6 @@ static void mtk_jpeg_clk_off(struct mtk_jpeg_dev *jpeg)
{
clk_bulk_disable_unprepare(jpeg->variant->num_clks,
jpeg->variant->clks);
- mtk_smi_larb_put(jpeg->larb);
}
static irqreturn_t mtk_jpeg_enc_done(struct mtk_jpeg_dev *jpeg)
@@ -1284,35 +1278,6 @@ static struct clk_bulk_data mtk_jpeg_clocks[] = {
{ .id = "jpgenc" },
};
-static int mtk_jpeg_clk_init(struct mtk_jpeg_dev *jpeg)
-{
- struct device_node *node;
- struct platform_device *pdev;
- int ret;
-
- node = of_parse_phandle(jpeg->dev->of_node, "mediatek,larb", 0);
- if (!node)
- return -EINVAL;
- pdev = of_find_device_by_node(node);
- if (WARN_ON(!pdev)) {
- of_node_put(node);
- return -EINVAL;
- }
- of_node_put(node);
-
- jpeg->larb = &pdev->dev;
-
- ret = devm_clk_bulk_get(jpeg->dev, jpeg->variant->num_clks,
- jpeg->variant->clks);
- if (ret) {
- dev_err(&pdev->dev, "failed to get jpeg clock:%d\n", ret);
- put_device(&pdev->dev);
- return ret;
- }
-
- return 0;
-}
-
static void mtk_jpeg_job_timeout_work(struct work_struct *work)
{
struct mtk_jpeg_dev *jpeg = container_of(work, struct mtk_jpeg_dev,
@@ -1333,11 +1298,6 @@ static void mtk_jpeg_job_timeout_work(struct work_struct *work)
v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
}
-static inline void mtk_jpeg_clk_release(struct mtk_jpeg_dev *jpeg)
-{
- put_device(jpeg->larb);
-}
-
static int mtk_jpeg_probe(struct platform_device *pdev)
{
struct mtk_jpeg_dev *jpeg;
@@ -1372,7 +1332,8 @@ static int mtk_jpeg_probe(struct platform_device *pdev)
goto err_req_irq;
}
- ret = mtk_jpeg_clk_init(jpeg);
+ ret = devm_clk_bulk_get(jpeg->dev, jpeg->variant->num_clks,
+ jpeg->variant->clks);
if (ret) {
dev_err(&pdev->dev, "Failed to init clk, err %d\n", ret);
goto err_clk_init;
@@ -1438,7 +1399,6 @@ err_m2m_init:
v4l2_device_unregister(&jpeg->v4l2_dev);
err_dev_register:
- mtk_jpeg_clk_release(jpeg);
err_clk_init:
@@ -1456,7 +1416,6 @@ static int mtk_jpeg_remove(struct platform_device *pdev)
video_device_release(jpeg->vdev);
v4l2_m2m_release(jpeg->m2m_dev);
v4l2_device_unregister(&jpeg->v4l2_dev);
- mtk_jpeg_clk_release(jpeg);
return 0;
}
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h
index 595f7f10c9fd..3e4811a41ba2 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.h
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h
@@ -85,7 +85,6 @@ struct mtk_jpeg_variant {
* @alloc_ctx: videobuf2 memory allocator's context
* @vdev: video device node for jpeg mem2mem mode
* @reg_base: JPEG registers mapping
- * @larb: SMI device
* @job_timeout_work: IRQ timeout structure
* @variant: driver variant to be used
*/
@@ -99,7 +98,6 @@ struct mtk_jpeg_dev {
void *alloc_ctx;
struct video_device *vdev;
void __iomem *reg_base;
- struct device *larb;
struct delayed_work job_timeout_work;
const struct mtk_jpeg_variant *variant;
};
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
index afbbfd5d02bc..afbbfd5d02bc 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_hw.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_hw.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h
index fa0d45fd7c34..fa0d45fd7c34 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_hw.h
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_parse.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_parse.c
index b95c45791c29..b95c45791c29 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_parse.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_parse.c
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_parse.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_parse.h
index 2918f15811f8..2918f15811f8 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_parse.h
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_parse.h
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_reg.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h
index 21ec8f96797f..21ec8f96797f 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_reg.h
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_enc_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
index 1cf037bf72dd..1cf037bf72dd 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_enc_hw.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_enc_hw.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.h
index 61c60e4e58ea..61c60e4e58ea 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_enc_hw.h
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.h
diff --git a/drivers/media/platform/mediatek/mdp/Kconfig b/drivers/media/platform/mediatek/mdp/Kconfig
new file mode 100644
index 000000000000..9f13a42899bd
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_MEDIATEK_MDP
+ tristate "Mediatek MDP driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on MTK_IOMMU || COMPILE_TEST
+ depends on VIDEO_DEV
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select VIDEO_MEDIATEK_VPU
+ help
+ It is a v4l2 driver and present in Mediatek MT8173 SoCs.
+ The driver supports for scaling and color space conversion.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mtk-mdp.
diff --git a/drivers/media/platform/mtk-mdp/Makefile b/drivers/media/platform/mediatek/mdp/Makefile
index 5982d65c9971..b7c16ebecc80 100644
--- a/drivers/media/platform/mtk-mdp/Makefile
+++ b/drivers/media/platform/mediatek/mdp/Makefile
@@ -7,4 +7,4 @@ mtk-mdp-y += mtk_mdp_vpu.o
obj-$(CONFIG_VIDEO_MEDIATEK_MDP) += mtk-mdp.o
-ccflags-y += -I$(srctree)/drivers/media/platform/mtk-vpu
+ccflags-y += -I$(srctree)/drivers/media/platform/mediatek/vpu
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c
index b3426a551bea..1e3833f1c9ae 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c
@@ -9,7 +9,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
-#include <soc/mediatek/smi.h>
#include "mtk_mdp_comp.h"
@@ -18,14 +17,6 @@ void mtk_mdp_comp_clock_on(struct device *dev, struct mtk_mdp_comp *comp)
{
int i, err;
- if (comp->larb_dev) {
- err = mtk_smi_larb_get(comp->larb_dev);
- if (err)
- dev_err(dev,
- "failed to get larb, err %d. type:%d\n",
- err, comp->type);
- }
-
for (i = 0; i < ARRAY_SIZE(comp->clk); i++) {
if (IS_ERR(comp->clk[i]))
continue;
@@ -46,17 +37,12 @@ void mtk_mdp_comp_clock_off(struct device *dev, struct mtk_mdp_comp *comp)
continue;
clk_disable_unprepare(comp->clk[i]);
}
-
- if (comp->larb_dev)
- mtk_smi_larb_put(comp->larb_dev);
}
int mtk_mdp_comp_init(struct device *dev, struct device_node *node,
struct mtk_mdp_comp *comp,
enum mtk_mdp_comp_type comp_type)
{
- struct device_node *larb_node;
- struct platform_device *larb_pdev;
int ret;
int i;
@@ -77,32 +63,6 @@ int mtk_mdp_comp_init(struct device *dev, struct device_node *node,
break;
}
- /* Only DMA capable components need the LARB property */
- comp->larb_dev = NULL;
- if (comp->type != MTK_MDP_RDMA &&
- comp->type != MTK_MDP_WDMA &&
- comp->type != MTK_MDP_WROT)
- return 0;
-
- larb_node = of_parse_phandle(node, "mediatek,larb", 0);
- if (!larb_node) {
- dev_err(dev,
- "Missing mediadek,larb phandle in %pOF node\n", node);
- ret = -EINVAL;
- goto put_dev;
- }
-
- larb_pdev = of_find_device_by_node(larb_node);
- if (!larb_pdev) {
- dev_warn(dev, "Waiting for larb device %pOF\n", larb_node);
- of_node_put(larb_node);
- ret = -EPROBE_DEFER;
- goto put_dev;
- }
- of_node_put(larb_node);
-
- comp->larb_dev = &larb_pdev->dev;
-
return 0;
put_dev:
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h b/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.h
index 7897766c96bb..ae41dd3cd72a 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.h
@@ -26,14 +26,12 @@ enum mtk_mdp_comp_type {
* @node: list node to track sibing MDP components
* @dev_node: component device node
* @clk: clocks required for component
- * @larb_dev: SMI device required for component
* @type: component type
*/
struct mtk_mdp_comp {
struct list_head node;
struct device_node *dev_node;
struct clk *clk[2];
- struct device *larb_dev;
enum mtk_mdp_comp_type type;
};
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
index 3d38793aaa25..d83c4964eaf9 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
@@ -17,7 +17,6 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/workqueue.h>
-#include <soc/mediatek/smi.h>
#include "mtk_mdp_core.h"
#include "mtk_mdp_m2m.h"
@@ -246,10 +245,8 @@ static int mtk_mdp_remove(struct platform_device *pdev)
mtk_mdp_unregister_m2m_device(mdp);
v4l2_device_unregister(&mdp->v4l2_dev);
- flush_workqueue(mdp->wdt_wq);
destroy_workqueue(mdp->wdt_wq);
- flush_workqueue(mdp->job_wq);
destroy_workqueue(mdp->job_wq);
list_for_each_entry_safe(comp, comp_temp, &mdp->comp_list, node) {
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.h b/drivers/media/platform/mediatek/mdp/mtk_mdp_core.h
index a6e6dc36307b..a6e6dc36307b 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.h
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_core.h
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h b/drivers/media/platform/mediatek/mdp/mtk_mdp_ipi.h
index 2cb8cecb3077..2cb8cecb3077 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_ipi.h
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
index f14779e7596e..f14779e7596e 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h b/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.h
index 485dbdbbf51d..485dbdbbf51d 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.h
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_regs.c
index ba476d50ae43..ba476d50ae43 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_regs.c
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h b/drivers/media/platform/mediatek/mdp/mtk_mdp_regs.h
index 32cf202f2399..32cf202f2399 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_regs.h
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c
index b065ccd06914..b065ccd06914 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h b/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.h
index 5a1020508446..5a1020508446 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.h
diff --git a/drivers/media/platform/mediatek/vcodec/Kconfig b/drivers/media/platform/mediatek/vcodec/Kconfig
new file mode 100644
index 000000000000..c5c76753c626
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/Kconfig
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_MEDIATEK_VCODEC_SCP
+ bool
+
+config VIDEO_MEDIATEK_VCODEC_VPU
+ bool
+
+config VIDEO_MEDIATEK_VCODEC
+ tristate "Mediatek Video Codec driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on MTK_IOMMU || COMPILE_TEST
+ depends on VIDEO_DEV
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on VIDEO_MEDIATEK_VPU || MTK_SCP
+ # The two following lines ensure we have the same state ("m" or "y") as
+ # our dependencies, to avoid missing symbols during link.
+ depends on VIDEO_MEDIATEK_VPU || !VIDEO_MEDIATEK_VPU
+ depends on MTK_SCP || !MTK_SCP
+ depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select VIDEO_MEDIATEK_VCODEC_VPU if VIDEO_MEDIATEK_VPU
+ select VIDEO_MEDIATEK_VCODEC_SCP if MTK_SCP
+ select V4L2_H264
+ select MEDIA_CONTROLLER
+ select MEDIA_CONTROLLER_REQUEST_API
+ help
+ Mediatek video codec driver provides HW capability to
+ encode and decode in a range of video formats on MT8173
+ and MT8183.
+
+ Note that support for MT8173 requires VIDEO_MEDIATEK_VPU to
+ also be selected. Support for MT8183 depends on MTK_SCP.
+
+ To compile this driver as modules, choose M here: the
+ modules will be called mtk-vcodec-dec and mtk-vcodec-enc.
diff --git a/drivers/media/platform/mtk-vcodec/Makefile b/drivers/media/platform/mediatek/vcodec/Makefile
index ca8e9e7a9c4e..359619653a0e 100644
--- a/drivers/media/platform/mtk-vcodec/Makefile
+++ b/drivers/media/platform/mediatek/vcodec/Makefile
@@ -2,7 +2,8 @@
obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-dec.o \
mtk-vcodec-enc.o \
- mtk-vcodec-common.o
+ mtk-vcodec-common.o \
+ mtk-vcodec-dec-hw.o
mtk-vcodec-dec-y := vdec/vdec_h264_if.o \
vdec/vdec_vp8_if.o \
@@ -11,11 +12,14 @@ mtk-vcodec-dec-y := vdec/vdec_h264_if.o \
mtk_vcodec_dec_drv.o \
vdec_drv_if.o \
vdec_vpu_if.o \
+ vdec_msg_queue.o \
mtk_vcodec_dec.o \
mtk_vcodec_dec_stateful.o \
mtk_vcodec_dec_stateless.o \
mtk_vcodec_dec_pm.o \
+mtk-vcodec-dec-hw-y := mtk_vcodec_dec_hw.o
+
mtk-vcodec-enc-y := venc/venc_vp8_if.o \
venc/venc_h264_if.o \
mtk_vcodec_enc.o \
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
index 2b334a8a81c6..130ecef2e766 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
@@ -105,12 +105,12 @@ static int vidioc_decoder_cmd(struct file *file, void *priv,
void mtk_vdec_unlock(struct mtk_vcodec_ctx *ctx)
{
- mutex_unlock(&ctx->dev->dec_mutex);
+ mutex_unlock(&ctx->dev->dec_mutex[ctx->hw_id]);
}
void mtk_vdec_lock(struct mtk_vcodec_ctx *ctx)
{
- mutex_lock(&ctx->dev->dec_mutex);
+ mutex_lock(&ctx->dev->dec_mutex[ctx->hw_id]);
}
void mtk_vcodec_dec_release(struct mtk_vcodec_ctx *ctx)
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.h
index e08886a600a3..66cd6d2242c3 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.h
@@ -68,6 +68,7 @@ extern const struct v4l2_m2m_ops mtk_vdec_m2m_ops;
extern const struct media_device_ops mtk_vcodec_media_ops;
extern const struct mtk_vcodec_dec_pdata mtk_vdec_8173_pdata;
extern const struct mtk_vcodec_dec_pdata mtk_vdec_8183_pdata;
+extern const struct mtk_vcodec_dec_pdata mtk_lat_sig_core_pdata;
/*
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
index 40c39e1e596b..df7b25e9cbc8 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of.h>
+#include <linux/pm_runtime.h>
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
@@ -18,21 +19,23 @@
#include "mtk_vcodec_drv.h"
#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_dec_hw.h"
#include "mtk_vcodec_dec_pm.h"
#include "mtk_vcodec_intr.h"
#include "mtk_vcodec_util.h"
#include "mtk_vcodec_fw.h"
-#define VDEC_HW_ACTIVE 0x10
-#define VDEC_IRQ_CFG 0x11
-#define VDEC_IRQ_CLR 0x10
-#define VDEC_IRQ_CFG_REG 0xa4
-
-/* Wake up context wait_queue */
-static void wake_up_ctx(struct mtk_vcodec_ctx *ctx)
+static int mtk_vcodec_get_hw_count(struct mtk_vcodec_dev *dev)
{
- ctx->int_cond = 1;
- wake_up_interruptible(&ctx->queue);
+ switch (dev->vdec_pdata->hw_arch) {
+ case MTK_VDEC_PURE_SINGLE_CORE:
+ return MTK_VDEC_ONE_CORE;
+ case MTK_VDEC_LAT_SINGLE_CORE:
+ return MTK_VDEC_ONE_LAT_ONE_CORE;
+ default:
+ mtk_v4l2_err("hw arch %d not supported", dev->vdec_pdata->hw_arch);
+ return MTK_VDEC_NO_HW;
+ }
}
static irqreturn_t mtk_vcodec_dec_irq_handler(int irq, void *priv)
@@ -44,7 +47,7 @@ static irqreturn_t mtk_vcodec_dec_irq_handler(int irq, void *priv)
void __iomem *vdec_misc_addr = dev->reg_base[VDEC_MISC] +
VDEC_IRQ_CFG_REG;
- ctx = mtk_vcodec_get_curr_ctx(dev);
+ ctx = mtk_vcodec_get_curr_ctx(dev, MTK_VDEC_CORE);
/* check if HW active or not */
cg_status = readl(dev->reg_base[0]);
@@ -66,7 +69,7 @@ static irqreturn_t mtk_vcodec_dec_irq_handler(int irq, void *priv)
writel((readl(vdec_misc_addr) & ~VDEC_IRQ_CLR),
dev->reg_base[VDEC_MISC] + VDEC_IRQ_CFG_REG);
- wake_up_ctx(ctx);
+ wake_up_ctx(ctx, MTK_INST_IRQ_RECEIVED, 0);
mtk_v4l2_debug(3,
"mtk_vcodec_dec_irq_handler :wake up ctx %d, dec_done_status=%x",
@@ -75,11 +78,70 @@ static irqreturn_t mtk_vcodec_dec_irq_handler(int irq, void *priv)
return IRQ_HANDLED;
}
+static int mtk_vcodec_get_reg_bases(struct mtk_vcodec_dev *dev)
+{
+ struct platform_device *pdev = dev->plat_dev;
+ int reg_num, i;
+
+ /* Sizeof(u32) * 4 bytes for each register base. */
+ reg_num = of_property_count_elems_of_size(pdev->dev.of_node, "reg",
+ sizeof(u32) * 4);
+ if (reg_num <= 0 || reg_num > NUM_MAX_VDEC_REG_BASE) {
+ dev_err(&pdev->dev, "Invalid register property size: %d\n", reg_num);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reg_num; i++) {
+ dev->reg_base[i] = devm_platform_ioremap_resource(pdev, i);
+ if (IS_ERR(dev->reg_base[i]))
+ return PTR_ERR(dev->reg_base[i]);
+
+ mtk_v4l2_debug(2, "reg[%d] base=%p", i, dev->reg_base[i]);
+ }
+
+ return 0;
+}
+
+static int mtk_vcodec_init_dec_resources(struct mtk_vcodec_dev *dev)
+{
+ struct platform_device *pdev = dev->plat_dev;
+ int ret;
+
+ ret = mtk_vcodec_get_reg_bases(dev);
+ if (ret)
+ return ret;
+
+ if (dev->vdec_pdata->is_subdev_supported)
+ return 0;
+
+ dev->dec_irq = platform_get_irq(pdev, 0);
+ if (dev->dec_irq < 0)
+ return dev->dec_irq;
+
+ irq_set_status_flags(dev->dec_irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(&pdev->dev, dev->dec_irq,
+ mtk_vcodec_dec_irq_handler, 0, pdev->name, dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to install dev->dec_irq %d (%d)",
+ dev->dec_irq, ret);
+ return ret;
+ }
+
+ ret = mtk_vcodec_init_dec_clk(pdev, &dev->pm);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get mt vcodec clock source");
+ return ret;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ return 0;
+}
+
static int fops_vcodec_open(struct file *file)
{
struct mtk_vcodec_dev *dev = video_drvdata(file);
struct mtk_vcodec_ctx *ctx = NULL;
- int ret = 0;
+ int ret = 0, i, hw_count;
struct vb2_queue *src_vq;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -93,7 +155,22 @@ static int fops_vcodec_open(struct file *file)
v4l2_fh_add(&ctx->fh);
INIT_LIST_HEAD(&ctx->list);
ctx->dev = dev;
- init_waitqueue_head(&ctx->queue);
+ if (ctx->dev->vdec_pdata->is_subdev_supported) {
+ hw_count = mtk_vcodec_get_hw_count(dev);
+ if (!hw_count || !dev->subdev_prob_done) {
+ ret = -EINVAL;
+ goto err_ctrls_setup;
+ }
+
+ ret = dev->subdev_prob_done(dev);
+ if (ret)
+ goto err_ctrls_setup;
+
+ for (i = 0; i < hw_count; i++)
+ init_waitqueue_head(&ctx->queue[i]);
+ } else {
+ init_waitqueue_head(&ctx->queue[0]);
+ }
mutex_init(&ctx->lock);
ctx->type = MTK_INST_DECODER;
@@ -116,7 +193,7 @@ static int fops_vcodec_open(struct file *file)
mtk_vcodec_dec_set_default_params(ctx);
if (v4l2_fh_is_singular(&ctx->fh)) {
- ret = mtk_vcodec_dec_pw_on(&dev->pm);
+ ret = mtk_vcodec_dec_pw_on(dev, MTK_VDEC_LAT0);
if (ret < 0)
goto err_load_fw;
/*
@@ -176,7 +253,7 @@ static int fops_vcodec_release(struct file *file)
mtk_vcodec_dec_release(ctx);
if (v4l2_fh_is_singular(&ctx->fh))
- mtk_vcodec_dec_pw_off(&dev->pm);
+ mtk_vcodec_dec_pw_off(dev, MTK_VDEC_LAT0);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
@@ -200,7 +277,6 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
{
struct mtk_vcodec_dev *dev;
struct video_device *vfd_dec;
- struct resource *res;
phandle rproc_phandle;
enum mtk_vcodec_fw_type fw_type;
int i, ret;
@@ -229,40 +305,34 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
if (IS_ERR(dev->fw_handler))
return PTR_ERR(dev->fw_handler);
- ret = mtk_vcodec_init_dec_pm(dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get mt vcodec clock source");
+ ret = mtk_vcodec_init_dec_resources(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init dec resources");
goto err_dec_pm;
}
- for (i = 0; i < NUM_MAX_VDEC_REG_BASE; i++) {
- dev->reg_base[i] = devm_platform_ioremap_resource(pdev, i);
- if (IS_ERR((__force void *)dev->reg_base[i])) {
- ret = PTR_ERR((__force void *)dev->reg_base[i]);
+ if (IS_VDEC_LAT_ARCH(dev->vdec_pdata->hw_arch)) {
+ vdec_msg_queue_init_ctx(&dev->msg_queue_core_ctx, MTK_VDEC_CORE);
+ dev->core_workqueue =
+ alloc_ordered_workqueue("core-decoder",
+ WQ_MEM_RECLAIM | WQ_FREEZABLE);
+ if (!dev->core_workqueue) {
+ mtk_v4l2_err("Failed to create core workqueue");
+ ret = -EINVAL;
goto err_res;
}
- mtk_v4l2_debug(2, "reg[%d] base=%p", i, dev->reg_base[i]);
- }
-
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to get irq resource");
- ret = -ENOENT;
- goto err_res;
}
- dev->dec_irq = platform_get_irq(pdev, 0);
- irq_set_status_flags(dev->dec_irq, IRQ_NOAUTOEN);
- ret = devm_request_irq(&pdev->dev, dev->dec_irq,
- mtk_vcodec_dec_irq_handler, 0, pdev->name, dev);
- if (ret) {
- dev_err(&pdev->dev, "Failed to install dev->dec_irq %d (%d)",
- dev->dec_irq,
- ret);
- goto err_res;
+ if (of_get_property(pdev->dev.of_node, "dma-ranges", NULL)) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(34));
+ if (ret) {
+ mtk_v4l2_err("Failed to set mask");
+ goto err_core_workq;
+ }
}
- mutex_init(&dev->dec_mutex);
+ for (i = 0; i < MTK_VDEC_HW_MAX; i++)
+ mutex_init(&dev->dec_mutex[i]);
mutex_init(&dev->dev_mutex);
spin_lock_init(&dev->irqlock);
@@ -272,7 +342,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret) {
mtk_v4l2_err("v4l2_device_register err=%d", ret);
- goto err_res;
+ goto err_core_workq;
}
init_waitqueue_head(&dev->queue);
@@ -302,7 +372,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
if (IS_ERR((__force void *)dev->m2m_dev_dec)) {
mtk_v4l2_err("Failed to init mem2mem dec device");
ret = PTR_ERR((__force void *)dev->m2m_dev_dec);
- goto err_dec_mem_init;
+ goto err_dec_alloc;
}
dev->decode_workqueue =
@@ -314,6 +384,21 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
goto err_event_workq;
}
+ if (dev->vdec_pdata->is_subdev_supported) {
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL,
+ &pdev->dev);
+ if (ret) {
+ mtk_v4l2_err("Main device of_platform_populate failed.");
+ goto err_reg_cont;
+ }
+ }
+
+ ret = video_register_device(vfd_dec, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ mtk_v4l2_err("Failed to register video device");
+ goto err_reg_cont;
+ }
+
if (dev->vdec_pdata->uses_stateless_api) {
dev->mdev_dec.dev = &pdev->dev;
strscpy(dev->mdev_dec.model, MTK_VCODEC_DEC_NAME,
@@ -327,7 +412,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
MEDIA_ENT_F_PROC_VIDEO_DECODER);
if (ret) {
mtk_v4l2_err("Failed to register media controller");
- goto err_reg_cont;
+ goto err_dec_mem_init;
}
ret = media_device_register(&dev->mdev_dec);
@@ -338,34 +423,28 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
mtk_v4l2_debug(0, "media registered as /dev/media%d", vfd_dec->minor);
}
- ret = video_register_device(vfd_dec, VFL_TYPE_VIDEO, 0);
- if (ret) {
- mtk_v4l2_err("Failed to register video device");
- goto err_dec_reg;
- }
mtk_v4l2_debug(0, "decoder registered as /dev/video%d", vfd_dec->minor);
return 0;
-err_dec_reg:
- if (dev->vdec_pdata->uses_stateless_api)
- media_device_unregister(&dev->mdev_dec);
err_media_reg:
- if (dev->vdec_pdata->uses_stateless_api)
- v4l2_m2m_unregister_media_controller(dev->m2m_dev_dec);
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev_dec);
+err_dec_mem_init:
+ video_unregister_device(vfd_dec);
err_reg_cont:
if (dev->vdec_pdata->uses_stateless_api)
media_device_cleanup(&dev->mdev_dec);
destroy_workqueue(dev->decode_workqueue);
err_event_workq:
v4l2_m2m_release(dev->m2m_dev_dec);
-err_dec_mem_init:
- video_unregister_device(vfd_dec);
err_dec_alloc:
v4l2_device_unregister(&dev->v4l2_dev);
+err_core_workq:
+ if (IS_VDEC_LAT_ARCH(dev->vdec_pdata->hw_arch))
+ destroy_workqueue(dev->core_workqueue);
err_res:
- mtk_vcodec_release_dec_pm(dev);
+ pm_runtime_disable(dev->pm.dev);
err_dec_pm:
mtk_vcodec_fw_release(dev->fw_handler);
return ret;
@@ -380,6 +459,10 @@ static const struct of_device_id mtk_vcodec_match[] = {
.compatible = "mediatek,mt8183-vcodec-dec",
.data = &mtk_vdec_8183_pdata,
},
+ {
+ .compatible = "mediatek,mt8192-vcodec-dec",
+ .data = &mtk_lat_sig_core_pdata,
+ },
{},
};
@@ -404,7 +487,7 @@ static int mtk_vcodec_dec_remove(struct platform_device *pdev)
video_unregister_device(dev->vfd_dec);
v4l2_device_unregister(&dev->v4l2_dev);
- mtk_vcodec_release_dec_pm(dev);
+ pm_runtime_disable(dev->pm.dev);
mtk_vcodec_fw_release(dev->fw_handler);
return 0;
}
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
new file mode 100644
index 000000000000..14bed2bd4283
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Yunfei Dong <yunfei.dong@mediatek.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_dec_hw.h"
+#include "mtk_vcodec_dec_pm.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+
+static const struct of_device_id mtk_vdec_hw_match[] = {
+ {
+ .compatible = "mediatek,mtk-vcodec-lat",
+ .data = (void *)MTK_VDEC_LAT0,
+ },
+ {
+ .compatible = "mediatek,mtk-vcodec-core",
+ .data = (void *)MTK_VDEC_CORE,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_vdec_hw_match);
+
+static int mtk_vdec_hw_prob_done(struct mtk_vcodec_dev *vdec_dev)
+{
+ struct platform_device *pdev = vdec_dev->plat_dev;
+ struct device_node *subdev_node;
+ enum mtk_vdec_hw_id hw_idx;
+ const struct of_device_id *of_id;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mtk_vdec_hw_match); i++) {
+ of_id = &mtk_vdec_hw_match[i];
+ subdev_node = of_find_compatible_node(NULL, NULL,
+ of_id->compatible);
+ if (!subdev_node)
+ continue;
+
+ of_node_put(subdev_node);
+
+ hw_idx = (enum mtk_vdec_hw_id)(uintptr_t)of_id->data;
+ if (!test_bit(hw_idx, vdec_dev->subdev_bitmap)) {
+ dev_err(&pdev->dev, "vdec %d is not ready", hw_idx);
+ return -EAGAIN;
+ }
+ }
+
+ return 0;
+}
+
+static irqreturn_t mtk_vdec_hw_irq_handler(int irq, void *priv)
+{
+ struct mtk_vdec_hw_dev *dev = priv;
+ struct mtk_vcodec_ctx *ctx;
+ u32 cg_status;
+ unsigned int dec_done_status;
+ void __iomem *vdec_misc_addr = dev->reg_base[VDEC_HW_MISC] +
+ VDEC_IRQ_CFG_REG;
+
+ ctx = mtk_vcodec_get_curr_ctx(dev->main_dev, dev->hw_idx);
+
+ /* check if HW active or not */
+ cg_status = readl(dev->reg_base[VDEC_HW_SYS]);
+ if (cg_status & VDEC_HW_ACTIVE) {
+ mtk_v4l2_err("vdec active is not 0x0 (0x%08x)",
+ cg_status);
+ return IRQ_HANDLED;
+ }
+
+ dec_done_status = readl(vdec_misc_addr);
+ if ((dec_done_status & MTK_VDEC_IRQ_STATUS_DEC_SUCCESS) !=
+ MTK_VDEC_IRQ_STATUS_DEC_SUCCESS)
+ return IRQ_HANDLED;
+
+ /* clear interrupt */
+ writel(dec_done_status | VDEC_IRQ_CFG, vdec_misc_addr);
+ writel(dec_done_status & ~VDEC_IRQ_CLR, vdec_misc_addr);
+
+ wake_up_ctx(ctx, MTK_INST_IRQ_RECEIVED, dev->hw_idx);
+
+ mtk_v4l2_debug(3, "wake up ctx %d, dec_done_status=%x",
+ ctx->id, dec_done_status);
+
+ return IRQ_HANDLED;
+}
+
+static int mtk_vdec_hw_init_irq(struct mtk_vdec_hw_dev *dev)
+{
+ struct platform_device *pdev = dev->plat_dev;
+ int ret;
+
+ dev->dec_irq = platform_get_irq(pdev, 0);
+ if (dev->dec_irq < 0)
+ return dev->dec_irq;
+
+ irq_set_status_flags(dev->dec_irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(&pdev->dev, dev->dec_irq,
+ mtk_vdec_hw_irq_handler, 0, pdev->name, dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to install dev->dec_irq %d (%d)",
+ dev->dec_irq, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mtk_vdec_hw_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_vdec_hw_dev *subdev_dev;
+ struct mtk_vcodec_dev *main_dev;
+ const struct of_device_id *of_id;
+ int hw_idx;
+ int ret;
+
+ if (!dev->parent) {
+ dev_err(dev, "no parent for hardware devices.\n");
+ return -ENODEV;
+ }
+
+ main_dev = dev_get_drvdata(dev->parent);
+ if (!main_dev) {
+ dev_err(dev, "failed to get parent driver data");
+ return -EINVAL;
+ }
+
+ subdev_dev = devm_kzalloc(dev, sizeof(*subdev_dev), GFP_KERNEL);
+ if (!subdev_dev)
+ return -ENOMEM;
+
+ subdev_dev->plat_dev = pdev;
+ ret = mtk_vcodec_init_dec_clk(pdev, &subdev_dev->pm);
+ if (ret)
+ return ret;
+ pm_runtime_enable(&pdev->dev);
+
+ of_id = of_match_device(mtk_vdec_hw_match, dev);
+ if (!of_id) {
+ dev_err(dev, "Can't get vdec subdev id.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ hw_idx = (enum mtk_vdec_hw_id)(uintptr_t)of_id->data;
+ if (hw_idx >= MTK_VDEC_HW_MAX) {
+ dev_err(dev, "Hardware index %d not correct.\n", hw_idx);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ main_dev->subdev_dev[hw_idx] = subdev_dev;
+ subdev_dev->hw_idx = hw_idx;
+ subdev_dev->main_dev = main_dev;
+ subdev_dev->reg_base[VDEC_HW_SYS] = main_dev->reg_base[VDEC_HW_SYS];
+ set_bit(subdev_dev->hw_idx, main_dev->subdev_bitmap);
+
+ ret = mtk_vdec_hw_init_irq(subdev_dev);
+ if (ret)
+ goto err;
+
+ subdev_dev->reg_base[VDEC_HW_MISC] =
+ devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR((__force void *)subdev_dev->reg_base[VDEC_HW_MISC])) {
+ ret = PTR_ERR((__force void *)subdev_dev->reg_base[VDEC_HW_MISC]);
+ goto err;
+ }
+
+ if (!main_dev->subdev_prob_done)
+ main_dev->subdev_prob_done = mtk_vdec_hw_prob_done;
+
+ platform_set_drvdata(pdev, subdev_dev);
+ return 0;
+err:
+ pm_runtime_disable(subdev_dev->pm.dev);
+ return ret;
+}
+
+static struct platform_driver mtk_vdec_driver = {
+ .probe = mtk_vdec_hw_probe,
+ .driver = {
+ .name = "mtk-vdec-comp",
+ .of_match_table = mtk_vdec_hw_match,
+ },
+};
+module_platform_driver(mtk_vdec_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mediatek video decoder hardware driver");
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.h
new file mode 100644
index 000000000000..a63e4b1b81c3
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Yunfei Dong <yunfei.dong@mediatek.com>
+ */
+
+#ifndef _MTK_VCODEC_DEC_HW_H_
+#define _MTK_VCODEC_DEC_HW_H_
+
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include "mtk_vcodec_drv.h"
+
+#define VDEC_HW_ACTIVE 0x10
+#define VDEC_IRQ_CFG 0x11
+#define VDEC_IRQ_CLR 0x10
+#define VDEC_IRQ_CFG_REG 0xa4
+
+/**
+ * enum mtk_vdec_hw_reg_idx - subdev hardware register base index
+ * @VDEC_HW_SYS : vdec soc register index
+ * @VDEC_HW_MISC: vdec misc register index
+ * @VDEC_HW_MAX : vdec supported max register index
+ */
+enum mtk_vdec_hw_reg_idx {
+ VDEC_HW_SYS,
+ VDEC_HW_MISC,
+ VDEC_HW_MAX
+};
+
+/**
+ * struct mtk_vdec_hw_dev - vdec hardware driver data
+ * @plat_dev: platform device
+ * @main_dev: main device
+ * @reg_base: mapped address of MTK Vcodec registers.
+ *
+ * @curr_ctx: the context that is waiting for codec hardware
+ *
+ * @dec_irq : decoder irq resource
+ * @pm : power management control
+ * @hw_idx : each hardware index
+ */
+struct mtk_vdec_hw_dev {
+ struct platform_device *plat_dev;
+ struct mtk_vcodec_dev *main_dev;
+ void __iomem *reg_base[VDEC_HW_MAX];
+
+ struct mtk_vcodec_ctx *curr_ctx;
+
+ int dec_irq;
+ struct mtk_vcodec_pm pm;
+ int hw_idx;
+};
+
+#endif /* _MTK_VCODEC_DEC_HW_H_ */
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.c
new file mode 100644
index 000000000000..7e0c2644bf7b
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Tiffany Lin <tiffany.lin@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+
+#include "mtk_vcodec_dec_hw.h"
+#include "mtk_vcodec_dec_pm.h"
+#include "mtk_vcodec_util.h"
+
+int mtk_vcodec_init_dec_clk(struct platform_device *pdev, struct mtk_vcodec_pm *pm)
+{
+ struct mtk_vcodec_clk *dec_clk;
+ struct mtk_vcodec_clk_info *clk_info;
+ int i = 0, ret;
+
+ dec_clk = &pm->vdec_clk;
+ pm->dev = &pdev->dev;
+
+ dec_clk->clk_num =
+ of_property_count_strings(pdev->dev.of_node, "clock-names");
+ if (dec_clk->clk_num > 0) {
+ dec_clk->clk_info = devm_kcalloc(&pdev->dev,
+ dec_clk->clk_num, sizeof(*clk_info),
+ GFP_KERNEL);
+ if (!dec_clk->clk_info)
+ return -ENOMEM;
+ } else {
+ mtk_v4l2_err("Failed to get vdec clock count");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dec_clk->clk_num; i++) {
+ clk_info = &dec_clk->clk_info[i];
+ ret = of_property_read_string_index(pdev->dev.of_node,
+ "clock-names", i, &clk_info->clk_name);
+ if (ret) {
+ mtk_v4l2_err("Failed to get clock name id = %d", i);
+ return ret;
+ }
+ clk_info->vcodec_clk = devm_clk_get(&pdev->dev,
+ clk_info->clk_name);
+ if (IS_ERR(clk_info->vcodec_clk)) {
+ mtk_v4l2_err("devm_clk_get (%d)%s fail", i,
+ clk_info->clk_name);
+ return PTR_ERR(clk_info->vcodec_clk);
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_vcodec_init_dec_clk);
+
+int mtk_vcodec_dec_pw_on(struct mtk_vcodec_dev *vdec_dev, int hw_idx)
+{
+ struct mtk_vdec_hw_dev *subdev_dev;
+ struct mtk_vcodec_pm *pm;
+ int ret;
+
+ if (vdec_dev->vdec_pdata->is_subdev_supported) {
+ subdev_dev = mtk_vcodec_get_hw_dev(vdec_dev, hw_idx);
+ if (!subdev_dev) {
+ mtk_v4l2_err("Failed to get hw dev\n");
+ return -EINVAL;
+ }
+ pm = &subdev_dev->pm;
+ } else {
+ pm = &vdec_dev->pm;
+ }
+
+ ret = pm_runtime_resume_and_get(pm->dev);
+ if (ret)
+ mtk_v4l2_err("pm_runtime_resume_and_get fail %d", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtk_vcodec_dec_pw_on);
+
+void mtk_vcodec_dec_pw_off(struct mtk_vcodec_dev *vdec_dev, int hw_idx)
+{
+ struct mtk_vdec_hw_dev *subdev_dev;
+ struct mtk_vcodec_pm *pm;
+ int ret;
+
+ if (vdec_dev->vdec_pdata->is_subdev_supported) {
+ subdev_dev = mtk_vcodec_get_hw_dev(vdec_dev, hw_idx);
+ if (!subdev_dev) {
+ mtk_v4l2_err("Failed to get hw dev\n");
+ return;
+ }
+ pm = &subdev_dev->pm;
+ } else {
+ pm = &vdec_dev->pm;
+ }
+
+ ret = pm_runtime_put_sync(pm->dev);
+ if (ret)
+ mtk_v4l2_err("pm_runtime_put_sync fail %d", ret);
+}
+EXPORT_SYMBOL_GPL(mtk_vcodec_dec_pw_off);
+
+void mtk_vcodec_dec_clock_on(struct mtk_vcodec_dev *vdec_dev, int hw_idx)
+{
+ struct mtk_vdec_hw_dev *subdev_dev;
+ struct mtk_vcodec_pm *pm;
+ struct mtk_vcodec_clk *dec_clk;
+ int ret, i;
+
+ if (vdec_dev->vdec_pdata->is_subdev_supported) {
+ subdev_dev = mtk_vcodec_get_hw_dev(vdec_dev, hw_idx);
+ if (!subdev_dev) {
+ mtk_v4l2_err("Failed to get hw dev\n");
+ return;
+ }
+ pm = &subdev_dev->pm;
+ enable_irq(subdev_dev->dec_irq);
+ } else {
+ pm = &vdec_dev->pm;
+ enable_irq(vdec_dev->dec_irq);
+ }
+
+ dec_clk = &pm->vdec_clk;
+ for (i = 0; i < dec_clk->clk_num; i++) {
+ ret = clk_prepare_enable(dec_clk->clk_info[i].vcodec_clk);
+ if (ret) {
+ mtk_v4l2_err("clk_prepare_enable %d %s fail %d", i,
+ dec_clk->clk_info[i].clk_name, ret);
+ goto error;
+ }
+ }
+
+ return;
+error:
+ for (i -= 1; i >= 0; i--)
+ clk_disable_unprepare(dec_clk->clk_info[i].vcodec_clk);
+}
+EXPORT_SYMBOL_GPL(mtk_vcodec_dec_clock_on);
+
+void mtk_vcodec_dec_clock_off(struct mtk_vcodec_dev *vdec_dev, int hw_idx)
+{
+ struct mtk_vdec_hw_dev *subdev_dev;
+ struct mtk_vcodec_pm *pm;
+ struct mtk_vcodec_clk *dec_clk;
+ int i;
+
+ if (vdec_dev->vdec_pdata->is_subdev_supported) {
+ subdev_dev = mtk_vcodec_get_hw_dev(vdec_dev, hw_idx);
+ if (!subdev_dev) {
+ mtk_v4l2_err("Failed to get hw dev\n");
+ return;
+ }
+ pm = &subdev_dev->pm;
+ disable_irq(subdev_dev->dec_irq);
+ } else {
+ pm = &vdec_dev->pm;
+ disable_irq(vdec_dev->dec_irq);
+ }
+
+ dec_clk = &pm->vdec_clk;
+ for (i = dec_clk->clk_num - 1; i >= 0; i--)
+ clk_disable_unprepare(dec_clk->clk_info[i].vcodec_clk);
+}
+EXPORT_SYMBOL_GPL(mtk_vcodec_dec_clock_off);
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.h
new file mode 100644
index 000000000000..3cc721bbfaf6
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Tiffany Lin <tiffany.lin@mediatek.com>
+ */
+
+#ifndef _MTK_VCODEC_DEC_PM_H_
+#define _MTK_VCODEC_DEC_PM_H_
+
+#include "mtk_vcodec_drv.h"
+
+int mtk_vcodec_init_dec_clk(struct platform_device *pdev, struct mtk_vcodec_pm *pm);
+
+int mtk_vcodec_dec_pw_on(struct mtk_vcodec_dev *vdec_dev, int hw_idx);
+void mtk_vcodec_dec_pw_off(struct mtk_vcodec_dev *vdec_dev, int hw_idx);
+void mtk_vcodec_dec_clock_on(struct mtk_vcodec_dev *vdec_dev, int hw_idx);
+void mtk_vcodec_dec_clock_off(struct mtk_vcodec_dev *vdec_dev, int hw_idx);
+
+#endif /* _MTK_VCODEC_DEC_PM_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateful.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
index bef49244e61b..04ca43c77e5f 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateful.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
@@ -625,4 +625,6 @@ const struct mtk_vcodec_dec_pdata mtk_vdec_8173_pdata = {
.num_framesizes = NUM_SUPPORTED_FRAMESIZE,
.worker = mtk_vdec_worker,
.flush_decoder = mtk_vdec_flush_decoder,
+ .is_subdev_supported = false,
+ .hw_arch = MTK_VDEC_PURE_SINGLE_CORE,
};
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
index 3d9f47555884..23d997ac114d 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
@@ -356,4 +356,25 @@ const struct mtk_vcodec_dec_pdata mtk_vdec_8183_pdata = {
.uses_stateless_api = true,
.worker = mtk_vdec_worker,
.flush_decoder = mtk_vdec_flush_decoder,
+ .is_subdev_supported = false,
+ .hw_arch = MTK_VDEC_PURE_SINGLE_CORE,
+};
+
+/* This platform data is used for one lat and one core architecture. */
+const struct mtk_vcodec_dec_pdata mtk_lat_sig_core_pdata = {
+ .chip = MTK_MT8192,
+ .init_vdec_params = mtk_init_vdec_params,
+ .ctrls_setup = mtk_vcodec_dec_ctrls_setup,
+ .vdec_vb2_ops = &mtk_vdec_request_vb2_ops,
+ .vdec_formats = mtk_video_formats,
+ .num_formats = NUM_FORMATS,
+ .default_out_fmt = &mtk_video_formats[DEFAULT_OUT_FMT_IDX],
+ .default_cap_fmt = &mtk_video_formats[DEFAULT_CAP_FMT_IDX],
+ .vdec_framesizes = mtk_vdec_framesizes,
+ .num_framesizes = NUM_SUPPORTED_FRAMESIZE,
+ .uses_stateless_api = true,
+ .worker = mtk_vdec_worker,
+ .flush_decoder = mtk_vdec_flush_decoder,
+ .is_subdev_supported = true,
+ .hw_arch = MTK_VDEC_LAT_SINGLE_CORE,
};
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h
index 581522177308..813901c4be5e 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h
@@ -15,7 +15,9 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-core.h>
+
#include "mtk_vcodec_util.h"
+#include "vdec_msg_queue.h"
#define MTK_VCODEC_DRV_NAME "mtk_vcodec_drv"
#define MTK_VCODEC_DEC_NAME "mtk-vcodec-dec"
@@ -25,6 +27,7 @@
#define MTK_VCODEC_MAX_PLANES 3
#define MTK_V4L2_BENCHMARK 0
#define WAIT_INTR_TIMEOUT_MS 1000
+#define IS_VDEC_LAT_ARCH(hw_arch) ((hw_arch) >= MTK_VDEC_LAT_SINGLE_CORE)
/*
* enum mtk_hw_reg_idx - MTK hw register base index
@@ -94,6 +97,27 @@ enum mtk_fmt_type {
};
/*
+ * enum mtk_vdec_hw_id - Hardware index used to separate
+ * different hardware
+ */
+enum mtk_vdec_hw_id {
+ MTK_VDEC_CORE,
+ MTK_VDEC_LAT0,
+ MTK_VDEC_LAT1,
+ MTK_VDEC_HW_MAX,
+};
+
+/*
+ * enum mtk_vdec_hw_count - Supported hardware count
+ */
+enum mtk_vdec_hw_count {
+ MTK_VDEC_NO_HW = 0,
+ MTK_VDEC_ONE_CORE,
+ MTK_VDEC_ONE_LAT_ONE_CORE,
+ MTK_VDEC_MAX_HW_COUNT,
+};
+
+/*
* struct mtk_video_fmt - Structure used to store information about pixelformats
*/
struct mtk_video_fmt {
@@ -190,12 +214,8 @@ struct mtk_vcodec_clk {
*/
struct mtk_vcodec_pm {
struct mtk_vcodec_clk vdec_clk;
- struct device *larbvdec;
-
struct mtk_vcodec_clk venc_clk;
- struct device *larbvenc;
struct device *dev;
- struct mtk_vcodec_dev *mtkdev;
};
/**
@@ -262,6 +282,9 @@ struct vdec_pic_info {
* @decoded_frame_cnt: number of decoded frames
* @lock: protect variables accessed by V4L2 threads and worker thread such as
* mtk_video_dec_buf.
+ * @hw_id: hardware index used to identify different hardware.
+ *
+ * @msg_queue: msg queue used to store lat buffer information.
*/
struct mtk_vcodec_ctx {
enum mtk_instance_type type;
@@ -283,9 +306,9 @@ struct mtk_vcodec_ctx {
struct vdec_pic_info picinfo;
int dpb_size;
- int int_cond;
- int int_type;
- wait_queue_head_t queue;
+ int int_cond[MTK_VDEC_HW_MAX];
+ int int_type[MTK_VDEC_HW_MAX];
+ wait_queue_head_t queue[MTK_VDEC_HW_MAX];
unsigned int irq_status;
struct v4l2_ctrl_handler ctrl_hdl;
@@ -304,7 +327,9 @@ struct mtk_vcodec_ctx {
int decoded_frame_cnt;
struct mutex lock;
+ int hw_id;
+ struct vdec_msg_queue msg_queue;
};
enum mtk_chip {
@@ -314,6 +339,14 @@ enum mtk_chip {
MTK_MT8195,
};
+/*
+ * enum mtk_vdec_hw_arch - Used to separate different hardware architecture
+ */
+enum mtk_vdec_hw_arch {
+ MTK_VDEC_PURE_SINGLE_CORE,
+ MTK_VDEC_LAT_SINGLE_CORE,
+};
+
/**
* struct mtk_vcodec_dec_pdata - compatible data for each IC
* @init_vdec_params: init vdec params
@@ -332,7 +365,9 @@ enum mtk_chip {
* @num_framesizes: count of video decoder frame sizes
*
* @chip: chip this decoder is compatible with
+ * @hw_arch: hardware arch is used to separate pure_sin_core and lat_sin_core
*
+ * @is_subdev_supported: whether support parent-node architecture(subdev)
* @uses_stateless_api: whether the decoder uses the stateless API with requests
*/
@@ -353,7 +388,9 @@ struct mtk_vcodec_dec_pdata {
const int num_framesizes;
enum mtk_chip chip;
+ enum mtk_vdec_hw_arch hw_arch;
+ bool is_subdev_supported;
bool uses_stateless_api;
};
@@ -424,6 +461,13 @@ struct mtk_vcodec_enc_pdata {
* @pm: power management control
* @dec_capability: used to identify decode capability, ex: 4k
* @enc_capability: used to identify encode capability
+ *
+ * @core_workqueue: queue used for core hardware decode
+ * @msg_queue_core_ctx: msg queue context used for core workqueue
+ *
+ * @subdev_dev: subdev hardware device
+ * @subdev_prob_done: check whether all used hw device is prob done
+ * @subdev_bitmap: used to record hardware is ready or not
*/
struct mtk_vcodec_dev {
struct v4l2_device v4l2_dev;
@@ -455,12 +499,20 @@ struct mtk_vcodec_dev {
int dec_irq;
int enc_irq;
- struct mutex dec_mutex;
+ /* decoder hardware mutex lock */
+ struct mutex dec_mutex[MTK_VDEC_HW_MAX];
struct mutex enc_mutex;
struct mtk_vcodec_pm pm;
unsigned int dec_capability;
unsigned int enc_capability;
+
+ struct workqueue_struct *core_workqueue;
+ struct vdec_msg_queue_ctx msg_queue_core_ctx;
+
+ void *subdev_dev[MTK_VDEC_HW_MAX];
+ int (*subdev_prob_done)(struct mtk_vcodec_dev *vdec_dev);
+ DECLARE_BITMAP(subdev_bitmap, MTK_VDEC_HW_MAX);
};
static inline struct mtk_vcodec_ctx *fh_to_ctx(struct v4l2_fh *fh)
@@ -473,4 +525,13 @@ static inline struct mtk_vcodec_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
return container_of(ctrl->handler, struct mtk_vcodec_ctx, ctrl_hdl);
}
+/* Wake up context wait_queue */
+static inline void
+wake_up_ctx(struct mtk_vcodec_ctx *ctx, unsigned int reason, unsigned int hw_id)
+{
+ ctx->int_cond[hw_id] = 1;
+ ctx->int_type[hw_id] = reason;
+ wake_up_interruptible(&ctx->queue[hw_id]);
+}
+
#endif /* _MTK_VCODEC_DRV_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
index ffb046eec610..c21367038c34 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
@@ -8,7 +8,6 @@
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
-#include <soc/mediatek/smi.h>
#include <linux/pm_runtime.h>
#include "mtk_vcodec_drv.h"
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.h
index 513ee7993e34..513ee7993e34 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.h
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
index aeaecb8d416e..5172cfe0db4a 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
@@ -11,10 +11,10 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of.h>
+#include <linux/pm_runtime.h>
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
-#include <linux/pm_runtime.h>
#include "mtk_vcodec_drv.h"
#include "mtk_vcodec_enc.h"
@@ -62,14 +62,6 @@ static const struct mtk_video_fmt mtk_video_formats_capture_vp8[] = {
},
};
-/* Wake up context wait_queue */
-static void wake_up_ctx(struct mtk_vcodec_ctx *ctx, unsigned int reason)
-{
- ctx->int_cond = 1;
- ctx->int_type = reason;
- wake_up_interruptible(&ctx->queue);
-}
-
static void clean_irq_status(unsigned int irq_status, void __iomem *addr)
{
if (irq_status & MTK_VENC_IRQ_STATUS_PAUSE)
@@ -111,7 +103,7 @@ static irqreturn_t mtk_vcodec_enc_irq_handler(int irq, void *priv)
clean_irq_status(ctx->irq_status, addr);
- wake_up_ctx(ctx, MTK_INST_IRQ_RECEIVED);
+ wake_up_ctx(ctx, MTK_INST_IRQ_RECEIVED, 0);
return IRQ_HANDLED;
}
@@ -137,7 +129,7 @@ static int fops_vcodec_open(struct file *file)
v4l2_fh_add(&ctx->fh);
INIT_LIST_HEAD(&ctx->list);
ctx->dev = dev;
- init_waitqueue_head(&ctx->queue);
+ init_waitqueue_head(&ctx->queue[0]);
ctx->type = MTK_INST_ENCODER;
ret = mtk_vcodec_enc_ctrls_setup(ctx);
@@ -265,7 +257,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
return PTR_ERR(dev->fw_handler);
dev->venc_pdata = of_device_get_match_data(&pdev->dev);
- ret = mtk_vcodec_init_enc_pm(dev);
+ ret = mtk_vcodec_init_enc_clk(dev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get mtk vcodec clock source!");
goto err_enc_pm;
@@ -357,7 +349,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
if (of_get_property(pdev->dev.of_node, "dma-ranges", NULL))
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(34));
- ret = video_register_device(vfd_enc, VFL_TYPE_VIDEO, 1);
+ ret = video_register_device(vfd_enc, VFL_TYPE_VIDEO, -1);
if (ret) {
mtk_v4l2_err("Failed to register video device");
goto err_enc_reg;
@@ -377,7 +369,7 @@ err_enc_mem_init:
err_enc_alloc:
v4l2_device_unregister(&dev->v4l2_dev);
err_res:
- mtk_vcodec_release_enc_pm(dev);
+ pm_runtime_disable(dev->pm.dev);
err_enc_pm:
mtk_vcodec_fw_release(dev->fw_handler);
return ret;
@@ -466,7 +458,7 @@ static int mtk_vcodec_enc_remove(struct platform_device *pdev)
video_unregister_device(dev->vfd_enc);
v4l2_device_unregister(&dev->v4l2_dev);
- mtk_vcodec_release_enc_pm(dev);
+ pm_runtime_disable(dev->pm.dev);
mtk_vcodec_fw_release(dev->fw_handler);
return 0;
}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_pm.c
index 1b2e4930ed27..7055954eb2af 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_pm.c
@@ -8,58 +8,35 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
-#include <soc/mediatek/smi.h>
#include "mtk_vcodec_enc_pm.h"
#include "mtk_vcodec_util.h"
-int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
+int mtk_vcodec_init_enc_clk(struct mtk_vcodec_dev *mtkdev)
{
- struct device_node *node;
struct platform_device *pdev;
struct mtk_vcodec_pm *pm;
struct mtk_vcodec_clk *enc_clk;
struct mtk_vcodec_clk_info *clk_info;
- int ret = 0, i = 0;
- struct device *dev;
+ int ret, i;
pdev = mtkdev->plat_dev;
pm = &mtkdev->pm;
memset(pm, 0, sizeof(struct mtk_vcodec_pm));
- pm->mtkdev = mtkdev;
pm->dev = &pdev->dev;
- dev = &pdev->dev;
enc_clk = &pm->venc_clk;
- node = of_parse_phandle(dev->of_node, "mediatek,larb", 0);
- if (!node) {
- mtk_v4l2_err("no mediatek,larb found");
- return -ENODEV;
- }
- pdev = of_find_device_by_node(node);
- of_node_put(node);
- if (!pdev) {
- mtk_v4l2_err("no mediatek,larb device found");
- return -ENODEV;
- }
- pm->larbvenc = &pdev->dev;
- pdev = mtkdev->plat_dev;
- pm->dev = &pdev->dev;
-
enc_clk->clk_num = of_property_count_strings(pdev->dev.of_node,
"clock-names");
if (enc_clk->clk_num > 0) {
enc_clk->clk_info = devm_kcalloc(&pdev->dev,
enc_clk->clk_num, sizeof(*clk_info),
GFP_KERNEL);
- if (!enc_clk->clk_info) {
- ret = -ENOMEM;
- goto put_larbvenc;
- }
+ if (!enc_clk->clk_info)
+ return -ENOMEM;
} else {
mtk_v4l2_err("Failed to get venc clock count");
- ret = -EINVAL;
- goto put_larbvenc;
+ return -EINVAL;
}
for (i = 0; i < enc_clk->clk_num; i++) {
@@ -68,32 +45,20 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
"clock-names", i, &clk_info->clk_name);
if (ret) {
mtk_v4l2_err("venc failed to get clk name %d", i);
- goto put_larbvenc;
+ return ret;
}
clk_info->vcodec_clk = devm_clk_get(&pdev->dev,
clk_info->clk_name);
if (IS_ERR(clk_info->vcodec_clk)) {
mtk_v4l2_err("venc devm_clk_get (%d)%s fail", i,
clk_info->clk_name);
- ret = PTR_ERR(clk_info->vcodec_clk);
- goto put_larbvenc;
+ return PTR_ERR(clk_info->vcodec_clk);
}
}
return 0;
-
-put_larbvenc:
- put_device(pm->larbvenc);
- return ret;
-}
-
-void mtk_vcodec_release_enc_pm(struct mtk_vcodec_dev *mtkdev)
-{
- pm_runtime_disable(mtkdev->pm.dev);
- put_device(mtkdev->pm.larbvenc);
}
-
void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm)
{
struct mtk_vcodec_clk *enc_clk = &pm->venc_clk;
@@ -108,11 +73,6 @@ void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm)
}
}
- ret = mtk_smi_larb_get(pm->larbvenc);
- if (ret) {
- mtk_v4l2_err("mtk_smi_larb_get larb3 fail %d", ret);
- goto clkerr;
- }
return;
clkerr:
@@ -125,7 +85,6 @@ void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm)
struct mtk_vcodec_clk *enc_clk = &pm->venc_clk;
int i = 0;
- mtk_smi_larb_put(pm->larbvenc);
for (i = enc_clk->clk_num - 1; i >= 0; i--)
clk_disable_unprepare(enc_clk->clk_info[i].vcodec_clk);
}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_pm.h
index b7ecdfd74823..bc455cefc0cd 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.h
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_pm.h
@@ -9,8 +9,7 @@
#include "mtk_vcodec_drv.h"
-int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *dev);
-void mtk_vcodec_release_enc_pm(struct mtk_vcodec_dev *dev);
+int mtk_vcodec_init_enc_clk(struct mtk_vcodec_dev *dev);
void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm);
void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw.c
index 94b39ae5c2e1..94b39ae5c2e1 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw.c
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw.h
index 539bb626772c..15ab6b8e3ae2 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.h
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw.h
@@ -6,7 +6,7 @@
#include <linux/remoteproc.h>
#include <linux/remoteproc/mtk_scp.h>
-#include "../mtk-vpu/mtk_vpu.h"
+#include "../vpu/mtk_vpu.h"
struct mtk_vcodec_dev;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_priv.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_priv.h
index b41e66185cec..b41e66185cec 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_priv.h
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_priv.h
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_scp.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c
index d8e66b645bd8..d8e66b645bd8 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_scp.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c
index cd27f637dbe7..cfc7ebed8fb7 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c
@@ -102,6 +102,8 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_vpu_init(struct mtk_vcodec_dev *dev,
vpu_wdt_reg_handler(fw_pdev, mtk_vcodec_vpu_reset_handler, dev, rst_id);
fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return ERR_PTR(-ENOMEM);
fw->type = VPU;
fw->ops = &mtk_vcodec_vpu_msg;
fw->pdev = fw_pdev;
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_intr.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_intr.c
new file mode 100644
index 000000000000..552b4c93d972
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_intr.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: Tiffany Lin <tiffany.lin@mediatek.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/wait.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+
+int mtk_vcodec_wait_for_done_ctx(struct mtk_vcodec_ctx *ctx,
+ int command, unsigned int timeout_ms,
+ unsigned int hw_id)
+{
+ long timeout_jiff, ret;
+ int status = 0;
+
+ timeout_jiff = msecs_to_jiffies(timeout_ms);
+ ret = wait_event_interruptible_timeout(ctx->queue[hw_id],
+ ctx->int_cond[hw_id],
+ timeout_jiff);
+
+ if (!ret) {
+ status = -1; /* timeout */
+ mtk_v4l2_err("[%d] cmd=%d, type=%d, dec timeout=%ums (%d %d)",
+ ctx->id, command, ctx->type, timeout_ms,
+ ctx->int_cond[hw_id], ctx->int_type[hw_id]);
+ } else if (-ERESTARTSYS == ret) {
+ status = -1;
+ mtk_v4l2_err("[%d] cmd=%d, type=%d, dec inter fail (%d %d)",
+ ctx->id, command, ctx->type,
+ ctx->int_cond[hw_id], ctx->int_type[hw_id]);
+ }
+
+ ctx->int_cond[hw_id] = 0;
+ ctx->int_type[hw_id] = 0;
+
+ return status;
+}
+EXPORT_SYMBOL(mtk_vcodec_wait_for_done_ctx);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_intr.h
index 638cd1f3526a..9681f492813b 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_intr.h
@@ -12,7 +12,8 @@
struct mtk_vcodec_ctx;
/* timeout is ms */
-int mtk_vcodec_wait_for_done_ctx(struct mtk_vcodec_ctx *data, int command,
- unsigned int timeout_ms);
+int mtk_vcodec_wait_for_done_ctx(struct mtk_vcodec_ctx *ctx,
+ int command, unsigned int timeout_ms,
+ unsigned int hw_id);
#endif /* _MTK_VCODEC_INTR_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.c
index 5bac820a47fc..ace78c4b5b9e 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.c
@@ -6,7 +6,10 @@
*/
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include "mtk_vcodec_dec_hw.h"
#include "mtk_vcodec_drv.h"
#include "mtk_vcodec_util.h"
@@ -71,25 +74,59 @@ void mtk_vcodec_mem_free(struct mtk_vcodec_ctx *data,
}
EXPORT_SYMBOL(mtk_vcodec_mem_free);
-void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dev *dev,
- struct mtk_vcodec_ctx *ctx)
+void *mtk_vcodec_get_hw_dev(struct mtk_vcodec_dev *dev, int hw_idx)
{
- unsigned long flags;
+ if (hw_idx >= MTK_VDEC_HW_MAX || hw_idx < 0 || !dev->subdev_dev[hw_idx]) {
+ mtk_v4l2_err("hw idx is out of range:%d", hw_idx);
+ return NULL;
+ }
+
+ return dev->subdev_dev[hw_idx];
+}
+EXPORT_SYMBOL(mtk_vcodec_get_hw_dev);
- spin_lock_irqsave(&dev->irqlock, flags);
- dev->curr_ctx = ctx;
- spin_unlock_irqrestore(&dev->irqlock, flags);
+void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dev *vdec_dev,
+ struct mtk_vcodec_ctx *ctx, int hw_idx)
+{
+ unsigned long flags;
+ struct mtk_vdec_hw_dev *subdev_dev;
+
+ spin_lock_irqsave(&vdec_dev->irqlock, flags);
+ if (vdec_dev->vdec_pdata->is_subdev_supported) {
+ subdev_dev = mtk_vcodec_get_hw_dev(vdec_dev, hw_idx);
+ if (!subdev_dev) {
+ mtk_v4l2_err("Failed to get hw dev");
+ spin_unlock_irqrestore(&vdec_dev->irqlock, flags);
+ return;
+ }
+ subdev_dev->curr_ctx = ctx;
+ } else {
+ vdec_dev->curr_ctx = ctx;
+ }
+ spin_unlock_irqrestore(&vdec_dev->irqlock, flags);
}
EXPORT_SYMBOL(mtk_vcodec_set_curr_ctx);
-struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *dev)
+struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *vdec_dev,
+ unsigned int hw_idx)
{
unsigned long flags;
struct mtk_vcodec_ctx *ctx;
-
- spin_lock_irqsave(&dev->irqlock, flags);
- ctx = dev->curr_ctx;
- spin_unlock_irqrestore(&dev->irqlock, flags);
+ struct mtk_vdec_hw_dev *subdev_dev;
+
+ spin_lock_irqsave(&vdec_dev->irqlock, flags);
+ if (vdec_dev->vdec_pdata->is_subdev_supported) {
+ subdev_dev = mtk_vcodec_get_hw_dev(vdec_dev, hw_idx);
+ if (!subdev_dev) {
+ mtk_v4l2_err("Failed to get hw dev");
+ spin_unlock_irqrestore(&vdec_dev->irqlock, flags);
+ return NULL;
+ }
+ ctx = subdev_dev->curr_ctx;
+ } else {
+ ctx = vdec_dev->curr_ctx;
+ }
+ spin_unlock_irqrestore(&vdec_dev->irqlock, flags);
return ctx;
}
EXPORT_SYMBOL(mtk_vcodec_get_curr_ctx);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.h
index 87c3d6d4bfa7..71956627a0e2 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.h
@@ -54,8 +54,10 @@ int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
struct mtk_vcodec_mem *mem);
void mtk_vcodec_mem_free(struct mtk_vcodec_ctx *data,
struct mtk_vcodec_mem *mem);
-void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dev *dev,
- struct mtk_vcodec_ctx *ctx);
-struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *dev);
+void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dev *vdec_dev,
+ struct mtk_vcodec_ctx *ctx, int hw_idx);
+struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *vdec_dev,
+ unsigned int hw_idx);
+void *mtk_vcodec_get_hw_dev(struct mtk_vcodec_dev *dev, int hw_idx);
#endif /* _MTK_VCODEC_UTIL_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_if.c
index 40d6e6c5ac7a..481655bb6016 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_if.c
@@ -413,7 +413,7 @@ static int vdec_h264_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
/* wait decoder done interrupt */
err = mtk_vcodec_wait_for_done_ctx(inst->ctx,
MTK_INST_IRQ_RECEIVED,
- WAIT_INTR_TIMEOUT_MS);
+ WAIT_INTR_TIMEOUT_MS, 0);
if (err)
goto err_free_fb_out;
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_if.c
index fada4d146703..43542de11e9c 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_if.c
@@ -727,7 +727,7 @@ static int vdec_h264_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
/* wait decoder done interrupt */
err = mtk_vcodec_wait_for_done_ctx(inst->ctx,
MTK_INST_IRQ_RECEIVED,
- WAIT_INTR_TIMEOUT_MS);
+ WAIT_INTR_TIMEOUT_MS, 0);
if (err)
goto err_free_fb_out;
vpu_dec_end(vpu);
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_if.c
index e5393f841080..88c046731754 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_if.c
@@ -488,7 +488,7 @@ static int vdec_vp8_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
/* wait decoder done interrupt */
mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
- WAIT_INTR_TIMEOUT_MS);
+ WAIT_INTR_TIMEOUT_MS, 0);
if (inst->vsi->load_data)
load_dec_table(inst);
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_if.c
index 71cdc3ddafcb..70b8383f7c8e 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_if.c
@@ -539,7 +539,7 @@ static bool vp9_wait_dec_end(struct vdec_vp9_inst *inst)
mtk_vcodec_wait_for_done_ctx(inst->ctx,
MTK_INST_IRQ_RECEIVED,
- WAIT_INTR_TIMEOUT_MS);
+ WAIT_INTR_TIMEOUT_MS, 0);
if (ctx->irq_status & MTK_VDEC_IRQ_STATUS_DEC_SUCCESS)
return true;
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_base.h b/drivers/media/platform/mediatek/vcodec/vdec_drv_base.h
index e913f963b7db..e913f963b7db 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_drv_base.h
+++ b/drivers/media/platform/mediatek/vcodec/vdec_drv_base.h
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.c b/drivers/media/platform/mediatek/vcodec/vdec_drv_if.c
index 42008243ceac..05a5b240e906 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec_drv_if.c
@@ -24,21 +24,24 @@ int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
break;
case V4L2_PIX_FMT_H264:
ctx->dec_if = &vdec_h264_if;
+ ctx->hw_id = MTK_VDEC_CORE;
break;
case V4L2_PIX_FMT_VP8:
ctx->dec_if = &vdec_vp8_if;
+ ctx->hw_id = MTK_VDEC_CORE;
break;
case V4L2_PIX_FMT_VP9:
ctx->dec_if = &vdec_vp9_if;
+ ctx->hw_id = MTK_VDEC_CORE;
break;
default:
return -EINVAL;
}
mtk_vdec_lock(ctx);
- mtk_vcodec_dec_clock_on(&ctx->dev->pm);
+ mtk_vcodec_dec_clock_on(ctx->dev, ctx->hw_id);
ret = ctx->dec_if->init(ctx);
- mtk_vcodec_dec_clock_off(&ctx->dev->pm);
+ mtk_vcodec_dec_clock_off(ctx->dev, ctx->hw_id);
mtk_vdec_unlock(ctx);
return ret;
@@ -69,13 +72,11 @@ int vdec_if_decode(struct mtk_vcodec_ctx *ctx, struct mtk_vcodec_mem *bs,
mtk_vdec_lock(ctx);
- mtk_vcodec_set_curr_ctx(ctx->dev, ctx);
- mtk_vcodec_dec_clock_on(&ctx->dev->pm);
- enable_irq(ctx->dev->dec_irq);
+ mtk_vcodec_set_curr_ctx(ctx->dev, ctx, ctx->hw_id);
+ mtk_vcodec_dec_clock_on(ctx->dev, ctx->hw_id);
ret = ctx->dec_if->decode(ctx->drv_handle, bs, fb, res_chg);
- disable_irq(ctx->dev->dec_irq);
- mtk_vcodec_dec_clock_off(&ctx->dev->pm);
- mtk_vcodec_set_curr_ctx(ctx->dev, NULL);
+ mtk_vcodec_dec_clock_off(ctx->dev, ctx->hw_id);
+ mtk_vcodec_set_curr_ctx(ctx->dev, NULL, ctx->hw_id);
mtk_vdec_unlock(ctx);
@@ -103,9 +104,9 @@ void vdec_if_deinit(struct mtk_vcodec_ctx *ctx)
return;
mtk_vdec_lock(ctx);
- mtk_vcodec_dec_clock_on(&ctx->dev->pm);
+ mtk_vcodec_dec_clock_on(ctx->dev, ctx->hw_id);
ctx->dec_if->deinit(ctx->drv_handle);
- mtk_vcodec_dec_clock_off(&ctx->dev->pm);
+ mtk_vcodec_dec_clock_off(ctx->dev, ctx->hw_id);
mtk_vdec_unlock(ctx);
ctx->drv_handle = NULL;
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h b/drivers/media/platform/mediatek/vcodec/vdec_drv_if.h
index d467e8af4a84..d467e8af4a84 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
+++ b/drivers/media/platform/mediatek/vcodec/vdec_drv_if.h
diff --git a/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h b/drivers/media/platform/mediatek/vcodec/vdec_ipi_msg.h
index 5f45a537beb4..bf54d6d9a857 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
+++ b/drivers/media/platform/mediatek/vcodec/vdec_ipi_msg.h
@@ -18,12 +18,16 @@ enum vdec_ipi_msgid {
AP_IPIMSG_DEC_END = 0xA002,
AP_IPIMSG_DEC_DEINIT = 0xA003,
AP_IPIMSG_DEC_RESET = 0xA004,
+ AP_IPIMSG_DEC_CORE = 0xA005,
+ AP_IPIMSG_DEC_CORE_END = 0xA006,
VPU_IPIMSG_DEC_INIT_ACK = 0xB000,
VPU_IPIMSG_DEC_START_ACK = 0xB001,
VPU_IPIMSG_DEC_END_ACK = 0xB002,
VPU_IPIMSG_DEC_DEINIT_ACK = 0xB003,
VPU_IPIMSG_DEC_RESET_ACK = 0xB004,
+ VPU_IPIMSG_DEC_CORE_ACK = 0xB005,
+ VPU_IPIMSG_DEC_CORE_END_ACK = 0xB006,
};
/**
@@ -31,6 +35,8 @@ enum vdec_ipi_msgid {
* @msg_id : vdec_ipi_msgid
* @vpu_inst_addr : VPU decoder instance address. Used if ABI version < 2.
* @inst_id : instance ID. Used if the ABI version >= 2.
+ * @codec_type : codec fourcc
+ * @reserved : reserved param
*/
struct vdec_ap_ipi_cmd {
uint32_t msg_id;
@@ -38,6 +44,8 @@ struct vdec_ap_ipi_cmd {
uint32_t vpu_inst_addr;
uint32_t inst_id;
};
+ u32 codec_type;
+ u32 reserved;
};
/**
@@ -55,12 +63,12 @@ struct vdec_vpu_ipi_ack {
/**
* struct vdec_ap_ipi_init - for AP_IPIMSG_DEC_INIT
* @msg_id : AP_IPIMSG_DEC_INIT
- * @reserved : Reserved field
+ * @codec_type : codec fourcc
* @ap_inst_addr : AP video decoder instance address
*/
struct vdec_ap_ipi_init {
uint32_t msg_id;
- uint32_t reserved;
+ u32 codec_type;
uint64_t ap_inst_addr;
};
@@ -73,7 +81,7 @@ struct vdec_ap_ipi_init {
* H264 decoder [0]:buf_sz [1]:nal_start
* VP8 decoder [0]:width/height
* VP9 decoder [0]:profile, [1][2] width/height
- * @reserved : Reserved field
+ * @codec_type : codec fourcc
*/
struct vdec_ap_ipi_dec_start {
uint32_t msg_id;
@@ -82,7 +90,7 @@ struct vdec_ap_ipi_dec_start {
uint32_t inst_id;
};
uint32_t data[3];
- uint32_t reserved;
+ u32 codec_type;
};
/**
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
new file mode 100644
index 000000000000..4b062a8128b4
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Yunfei Dong <yunfei.dong@mediatek.com>
+ */
+
+#include <linux/freezer.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+
+#include "mtk_vcodec_dec_pm.h"
+#include "mtk_vcodec_drv.h"
+#include "vdec_msg_queue.h"
+
+#define VDEC_MSG_QUEUE_TIMEOUT_MS 1500
+
+/* the size used to store lat slice header information */
+#define VDEC_LAT_SLICE_HEADER_SZ (640 * SZ_1K)
+
+/* the size used to store avc error information */
+#define VDEC_ERR_MAP_SZ_AVC (17 * SZ_1K)
+
+/* core will read the trans buffer which decoded by lat to decode again.
+ * The trans buffer size of FHD and 4K bitstreams are different.
+ */
+static int vde_msg_queue_get_trans_size(int width, int height)
+{
+ if (width > 1920 || height > 1088)
+ return 30 * SZ_1M;
+ else
+ return 6 * SZ_1M;
+}
+
+void vdec_msg_queue_init_ctx(struct vdec_msg_queue_ctx *ctx, int hardware_index)
+{
+ init_waitqueue_head(&ctx->ready_to_use);
+ INIT_LIST_HEAD(&ctx->ready_queue);
+ spin_lock_init(&ctx->ready_lock);
+ ctx->ready_num = 0;
+ ctx->hardware_index = hardware_index;
+}
+
+static struct list_head *vdec_get_buf_list(int hardware_index, struct vdec_lat_buf *buf)
+{
+ switch (hardware_index) {
+ case MTK_VDEC_CORE:
+ return &buf->core_list;
+ case MTK_VDEC_LAT0:
+ return &buf->lat_list;
+ default:
+ return NULL;
+ }
+}
+
+int vdec_msg_queue_qbuf(struct vdec_msg_queue_ctx *msg_ctx, struct vdec_lat_buf *buf)
+{
+ struct list_head *head;
+
+ head = vdec_get_buf_list(msg_ctx->hardware_index, buf);
+ if (!head) {
+ mtk_v4l2_err("fail to qbuf: %d", msg_ctx->hardware_index);
+ return -EINVAL;
+ }
+
+ spin_lock(&msg_ctx->ready_lock);
+ list_add_tail(head, &msg_ctx->ready_queue);
+ msg_ctx->ready_num++;
+
+ if (msg_ctx->hardware_index != MTK_VDEC_CORE)
+ wake_up_all(&msg_ctx->ready_to_use);
+ else
+ queue_work(buf->ctx->dev->core_workqueue,
+ &buf->ctx->msg_queue.core_work);
+
+ mtk_v4l2_debug(3, "enqueue buf type: %d addr: 0x%p num: %d",
+ msg_ctx->hardware_index, buf, msg_ctx->ready_num);
+ spin_unlock(&msg_ctx->ready_lock);
+
+ return 0;
+}
+
+static bool vdec_msg_queue_wait_event(struct vdec_msg_queue_ctx *msg_ctx)
+{
+ int ret;
+
+ ret = wait_event_timeout(msg_ctx->ready_to_use,
+ !list_empty(&msg_ctx->ready_queue),
+ msecs_to_jiffies(VDEC_MSG_QUEUE_TIMEOUT_MS));
+ if (!ret)
+ return false;
+
+ return true;
+}
+
+struct vdec_lat_buf *vdec_msg_queue_dqbuf(struct vdec_msg_queue_ctx *msg_ctx)
+{
+ struct vdec_lat_buf *buf;
+ struct list_head *head;
+ int ret;
+
+ spin_lock(&msg_ctx->ready_lock);
+ if (list_empty(&msg_ctx->ready_queue)) {
+ mtk_v4l2_debug(3, "queue is NULL, type:%d num: %d",
+ msg_ctx->hardware_index, msg_ctx->ready_num);
+ spin_unlock(&msg_ctx->ready_lock);
+
+ if (msg_ctx->hardware_index == MTK_VDEC_CORE)
+ return NULL;
+
+ ret = vdec_msg_queue_wait_event(msg_ctx);
+ if (!ret)
+ return NULL;
+ spin_lock(&msg_ctx->ready_lock);
+ }
+
+ if (msg_ctx->hardware_index == MTK_VDEC_CORE)
+ buf = list_first_entry(&msg_ctx->ready_queue,
+ struct vdec_lat_buf, core_list);
+ else
+ buf = list_first_entry(&msg_ctx->ready_queue,
+ struct vdec_lat_buf, lat_list);
+
+ head = vdec_get_buf_list(msg_ctx->hardware_index, buf);
+ if (!head) {
+ spin_unlock(&msg_ctx->ready_lock);
+ mtk_v4l2_err("fail to dqbuf: %d", msg_ctx->hardware_index);
+ return NULL;
+ }
+ list_del(head);
+
+ msg_ctx->ready_num--;
+ mtk_v4l2_debug(3, "dqueue buf type:%d addr: 0x%p num: %d",
+ msg_ctx->hardware_index, buf, msg_ctx->ready_num);
+ spin_unlock(&msg_ctx->ready_lock);
+
+ return buf;
+}
+
+void vdec_msg_queue_update_ube_rptr(struct vdec_msg_queue *msg_queue, uint64_t ube_rptr)
+{
+ spin_lock(&msg_queue->lat_ctx.ready_lock);
+ msg_queue->wdma_rptr_addr = ube_rptr;
+ mtk_v4l2_debug(3, "update ube rprt (0x%llx)", ube_rptr);
+ spin_unlock(&msg_queue->lat_ctx.ready_lock);
+}
+
+void vdec_msg_queue_update_ube_wptr(struct vdec_msg_queue *msg_queue, uint64_t ube_wptr)
+{
+ spin_lock(&msg_queue->lat_ctx.ready_lock);
+ msg_queue->wdma_wptr_addr = ube_wptr;
+ mtk_v4l2_debug(3, "update ube wprt: (0x%llx 0x%llx) offset: 0x%llx",
+ msg_queue->wdma_rptr_addr, msg_queue->wdma_wptr_addr,
+ ube_wptr);
+ spin_unlock(&msg_queue->lat_ctx.ready_lock);
+}
+
+bool vdec_msg_queue_wait_lat_buf_full(struct vdec_msg_queue *msg_queue)
+{
+ long timeout_jiff;
+ int ret;
+
+ timeout_jiff = msecs_to_jiffies(1000 * (NUM_BUFFER_COUNT + 2));
+ ret = wait_event_timeout(msg_queue->lat_ctx.ready_to_use,
+ msg_queue->lat_ctx.ready_num == NUM_BUFFER_COUNT,
+ timeout_jiff);
+ if (ret) {
+ mtk_v4l2_debug(3, "success to get lat buf: %d",
+ msg_queue->lat_ctx.ready_num);
+ return true;
+ }
+ mtk_v4l2_err("failed with lat buf isn't full: %d",
+ msg_queue->lat_ctx.ready_num);
+ return false;
+}
+
+void vdec_msg_queue_deinit(struct vdec_msg_queue *msg_queue,
+ struct mtk_vcodec_ctx *ctx)
+{
+ struct vdec_lat_buf *lat_buf;
+ struct mtk_vcodec_mem *mem;
+ int i;
+
+ mem = &msg_queue->wdma_addr;
+ if (mem->va)
+ mtk_vcodec_mem_free(ctx, mem);
+ for (i = 0; i < NUM_BUFFER_COUNT; i++) {
+ lat_buf = &msg_queue->lat_buf[i];
+
+ mem = &lat_buf->wdma_err_addr;
+ if (mem->va)
+ mtk_vcodec_mem_free(ctx, mem);
+
+ mem = &lat_buf->slice_bc_addr;
+ if (mem->va)
+ mtk_vcodec_mem_free(ctx, mem);
+
+ kfree(lat_buf->private_data);
+ }
+}
+
+static void vdec_msg_queue_core_work(struct work_struct *work)
+{
+ struct vdec_msg_queue *msg_queue =
+ container_of(work, struct vdec_msg_queue, core_work);
+ struct mtk_vcodec_ctx *ctx =
+ container_of(msg_queue, struct mtk_vcodec_ctx, msg_queue);
+ struct mtk_vcodec_dev *dev = ctx->dev;
+ struct vdec_lat_buf *lat_buf;
+
+ lat_buf = vdec_msg_queue_dqbuf(&dev->msg_queue_core_ctx);
+ if (!lat_buf)
+ return;
+
+ ctx = lat_buf->ctx;
+ mtk_vcodec_set_curr_ctx(dev, ctx, MTK_VDEC_CORE);
+
+ lat_buf->core_decode(lat_buf);
+
+ mtk_vcodec_set_curr_ctx(dev, NULL, MTK_VDEC_CORE);
+ vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);
+
+ if (!list_empty(&ctx->msg_queue.lat_ctx.ready_queue)) {
+ mtk_v4l2_debug(3, "re-schedule to decode for core: %d",
+ dev->msg_queue_core_ctx.ready_num);
+ queue_work(dev->core_workqueue, &msg_queue->core_work);
+ }
+}
+
+int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
+ struct mtk_vcodec_ctx *ctx, core_decode_cb_t core_decode,
+ int private_size)
+{
+ struct vdec_lat_buf *lat_buf;
+ int i, err;
+
+ /* already init msg queue */
+ if (msg_queue->wdma_addr.size)
+ return 0;
+
+ vdec_msg_queue_init_ctx(&msg_queue->lat_ctx, MTK_VDEC_LAT0);
+ INIT_WORK(&msg_queue->core_work, vdec_msg_queue_core_work);
+ msg_queue->wdma_addr.size =
+ vde_msg_queue_get_trans_size(ctx->picinfo.buf_w,
+ ctx->picinfo.buf_h);
+
+ err = mtk_vcodec_mem_alloc(ctx, &msg_queue->wdma_addr);
+ if (err) {
+ mtk_v4l2_err("failed to allocate wdma_addr buf");
+ return -ENOMEM;
+ }
+ msg_queue->wdma_rptr_addr = msg_queue->wdma_addr.dma_addr;
+ msg_queue->wdma_wptr_addr = msg_queue->wdma_addr.dma_addr;
+
+ for (i = 0; i < NUM_BUFFER_COUNT; i++) {
+ lat_buf = &msg_queue->lat_buf[i];
+
+ lat_buf->wdma_err_addr.size = VDEC_ERR_MAP_SZ_AVC;
+ err = mtk_vcodec_mem_alloc(ctx, &lat_buf->wdma_err_addr);
+ if (err) {
+ mtk_v4l2_err("failed to allocate wdma_err_addr buf[%d]", i);
+ goto mem_alloc_err;
+ }
+
+ lat_buf->slice_bc_addr.size = VDEC_LAT_SLICE_HEADER_SZ;
+ err = mtk_vcodec_mem_alloc(ctx, &lat_buf->slice_bc_addr);
+ if (err) {
+ mtk_v4l2_err("failed to allocate wdma_addr buf[%d]", i);
+ goto mem_alloc_err;
+ }
+
+ lat_buf->private_data = kzalloc(private_size, GFP_KERNEL);
+ if (!lat_buf->private_data) {
+ err = -ENOMEM;
+ goto mem_alloc_err;
+ }
+
+ lat_buf->ctx = ctx;
+ lat_buf->core_decode = core_decode;
+ err = vdec_msg_queue_qbuf(&msg_queue->lat_ctx, lat_buf);
+ if (err) {
+ mtk_v4l2_err("failed to qbuf buf[%d]", i);
+ goto mem_alloc_err;
+ }
+ }
+ return 0;
+
+mem_alloc_err:
+ vdec_msg_queue_deinit(msg_queue, ctx);
+ return err;
+}
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
new file mode 100644
index 000000000000..b6ba66d3e026
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Yunfei Dong <yunfei.dong@mediatek.com>
+ */
+
+#ifndef _VDEC_MSG_QUEUE_H_
+#define _VDEC_MSG_QUEUE_H_
+
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "mtk_vcodec_util.h"
+
+#define NUM_BUFFER_COUNT 3
+
+struct vdec_lat_buf;
+struct mtk_vcodec_ctx;
+struct mtk_vcodec_dev;
+typedef int (*core_decode_cb_t)(struct vdec_lat_buf *lat_buf);
+
+/**
+ * struct vdec_msg_queue_ctx - represents a queue for buffers ready to be processed
+ * @ready_to_use: ready used queue used to signalize when get a job queue
+ * @ready_queue: list of ready lat buffer queues
+ * @ready_lock: spin lock to protect the lat buffer usage
+ * @ready_num: number of buffers ready to be processed
+ * @hardware_index: hardware id that this queue is used for
+ */
+struct vdec_msg_queue_ctx {
+ wait_queue_head_t ready_to_use;
+ struct list_head ready_queue;
+ /* protect lat buffer */
+ spinlock_t ready_lock;
+ int ready_num;
+ int hardware_index;
+};
+
+/**
+ * struct vdec_lat_buf - lat buffer message used to store lat info for core decode
+ * @wdma_err_addr: wdma error address used for lat hardware
+ * @slice_bc_addr: slice bc address used for lat hardware
+ * @ts_info: need to set timestamp from output to capture
+ *
+ * @private_data: shared information used to lat and core hardware
+ * @ctx: mtk vcodec context information
+ * @core_decode: different codec use different decode callback function
+ * @lat_list: add lat buffer to lat head list
+ * @core_list: add lat buffer to core head list
+ */
+struct vdec_lat_buf {
+ struct mtk_vcodec_mem wdma_err_addr;
+ struct mtk_vcodec_mem slice_bc_addr;
+ struct vb2_v4l2_buffer ts_info;
+
+ void *private_data;
+ struct mtk_vcodec_ctx *ctx;
+ core_decode_cb_t core_decode;
+ struct list_head lat_list;
+ struct list_head core_list;
+};
+
+/**
+ * struct vdec_msg_queue - used to store lat buffer message
+ * @lat_buf: lat buffer used to store lat buffer information
+ * @wdma_addr: wdma address used for ube
+ * @wdma_rptr_addr: ube read point
+ * @wdma_wptr_addr: ube write point
+ * @core_work: core hardware work
+ * @lat_ctx: used to store lat buffer list
+ */
+struct vdec_msg_queue {
+ struct vdec_lat_buf lat_buf[NUM_BUFFER_COUNT];
+
+ struct mtk_vcodec_mem wdma_addr;
+ u64 wdma_rptr_addr;
+ u64 wdma_wptr_addr;
+
+ struct work_struct core_work;
+ struct vdec_msg_queue_ctx lat_ctx;
+};
+
+/**
+ * vdec_msg_queue_init - init lat buffer information.
+ * @msg_queue: used to store the lat buffer information
+ * @ctx: v4l2 ctx
+ * @core_decode: core decode callback for each codec
+ * @private_size: the private data size used to share with core
+ *
+ * Return: returns 0 if init successfully, or fail.
+ */
+int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
+ struct mtk_vcodec_ctx *ctx, core_decode_cb_t core_decode,
+ int private_size);
+
+/**
+ * vdec_msg_queue_init_ctx - used to init msg queue context information.
+ * @ctx: message queue context
+ * @hardware_index: hardware index
+ */
+void vdec_msg_queue_init_ctx(struct vdec_msg_queue_ctx *ctx, int hardware_index);
+
+/**
+ * vdec_msg_queue_qbuf - enqueue lat buffer to queue list.
+ * @ctx: message queue context
+ * @buf: current lat buffer
+ *
+ * Return: returns 0 if qbuf successfully, or fail.
+ */
+int vdec_msg_queue_qbuf(struct vdec_msg_queue_ctx *ctx, struct vdec_lat_buf *buf);
+
+/**
+ * vdec_msg_queue_dqbuf - dequeue lat buffer from queue list.
+ * @ctx: message queue context
+ *
+ * Return: returns not null if dq successfully, or fail.
+ */
+struct vdec_lat_buf *vdec_msg_queue_dqbuf(struct vdec_msg_queue_ctx *ctx);
+
+/**
+ * vdec_msg_queue_update_ube_rptr - used to updata the ube read point.
+ * @msg_queue: used to store the lat buffer information
+ * @ube_rptr: current ube read point
+ */
+void vdec_msg_queue_update_ube_rptr(struct vdec_msg_queue *msg_queue, uint64_t ube_rptr);
+
+/**
+ * vdec_msg_queue_update_ube_wptr - used to updata the ube write point.
+ * @msg_queue: used to store the lat buffer information
+ * @ube_wptr: current ube write point
+ */
+void vdec_msg_queue_update_ube_wptr(struct vdec_msg_queue *msg_queue, uint64_t ube_wptr);
+
+/**
+ * vdec_msg_queue_wait_lat_buf_full - used to check whether all lat buffer
+ * in lat list.
+ * @msg_queue: used to store the lat buffer information
+ *
+ * Return: returns true if successfully, or fail.
+ */
+bool vdec_msg_queue_wait_lat_buf_full(struct vdec_msg_queue *msg_queue);
+
+/**
+ * vdec_msg_queue_deinit - deinit lat buffer information.
+ * @msg_queue: used to store the lat buffer information
+ * @ctx: v4l2 ctx
+ */
+void vdec_msg_queue_deinit(struct vdec_msg_queue *msg_queue,
+ struct mtk_vcodec_ctx *ctx);
+
+#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mediatek/vcodec/vdec_vpu_if.c
index 5dffc459a33d..dd35d2c5f920 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec_vpu_if.c
@@ -85,6 +85,8 @@ static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
case VPU_IPIMSG_DEC_END_ACK:
case VPU_IPIMSG_DEC_DEINIT_ACK:
case VPU_IPIMSG_DEC_RESET_ACK:
+ case VPU_IPIMSG_DEC_CORE_ACK:
+ case VPU_IPIMSG_DEC_CORE_END_ACK:
break;
default:
@@ -98,18 +100,29 @@ static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
static int vcodec_vpu_send_msg(struct vdec_vpu_inst *vpu, void *msg, int len)
{
- int err;
+ int err, id, msgid;
- mtk_vcodec_debug(vpu, "id=%X", *(uint32_t *)msg);
+ msgid = *(uint32_t *)msg;
+ mtk_vcodec_debug(vpu, "id=%X", msgid);
vpu->failure = 0;
vpu->signaled = 0;
- err = mtk_vcodec_fw_ipi_send(vpu->ctx->dev->fw_handler, vpu->id, msg,
+ if (vpu->ctx->dev->vdec_pdata->hw_arch == MTK_VDEC_LAT_SINGLE_CORE) {
+ if (msgid == AP_IPIMSG_DEC_CORE ||
+ msgid == AP_IPIMSG_DEC_CORE_END)
+ id = vpu->core_id;
+ else
+ id = vpu->id;
+ } else {
+ id = vpu->id;
+ }
+
+ err = mtk_vcodec_fw_ipi_send(vpu->ctx->dev->fw_handler, id, msg,
len, 2000);
if (err) {
mtk_vcodec_err(vpu, "send fail vpu_id=%d msg_id=%X status=%d",
- vpu->id, *(uint32_t *)msg, err);
+ id, msgid, err);
return err;
}
@@ -129,6 +142,7 @@ static int vcodec_send_ap_ipi(struct vdec_vpu_inst *vpu, unsigned int msg_id)
msg.vpu_inst_addr = vpu->inst_addr;
else
msg.inst_id = vpu->inst_id;
+ msg.codec_type = vpu->codec_type;
err = vcodec_vpu_send_msg(vpu, &msg, sizeof(msg));
mtk_vcodec_debug(vpu, "- id=%X ret=%d", msg_id, err);
@@ -147,14 +161,25 @@ int vpu_dec_init(struct vdec_vpu_inst *vpu)
err = mtk_vcodec_fw_ipi_register(vpu->ctx->dev->fw_handler, vpu->id,
vpu->handler, "vdec", NULL);
- if (err != 0) {
+ if (err) {
mtk_vcodec_err(vpu, "vpu_ipi_register fail status=%d", err);
return err;
}
+ if (vpu->ctx->dev->vdec_pdata->hw_arch == MTK_VDEC_LAT_SINGLE_CORE) {
+ err = mtk_vcodec_fw_ipi_register(vpu->ctx->dev->fw_handler,
+ vpu->core_id, vpu->handler,
+ "vdec", NULL);
+ if (err) {
+ mtk_vcodec_err(vpu, "vpu_ipi_register core fail status=%d", err);
+ return err;
+ }
+ }
+
memset(&msg, 0, sizeof(msg));
msg.msg_id = AP_IPIMSG_DEC_INIT;
msg.ap_inst_addr = (unsigned long)vpu;
+ msg.codec_type = vpu->codec_type;
mtk_vcodec_debug(vpu, "vdec_inst=%p", vpu);
@@ -185,17 +210,28 @@ int vpu_dec_start(struct vdec_vpu_inst *vpu, uint32_t *data, unsigned int len)
for (i = 0; i < len; i++)
msg.data[i] = data[i];
+ msg.codec_type = vpu->codec_type;
err = vcodec_vpu_send_msg(vpu, (void *)&msg, sizeof(msg));
mtk_vcodec_debug(vpu, "- ret=%d", err);
return err;
}
+int vpu_dec_core(struct vdec_vpu_inst *vpu)
+{
+ return vcodec_send_ap_ipi(vpu, AP_IPIMSG_DEC_CORE);
+}
+
int vpu_dec_end(struct vdec_vpu_inst *vpu)
{
return vcodec_send_ap_ipi(vpu, AP_IPIMSG_DEC_END);
}
+int vpu_dec_core_end(struct vdec_vpu_inst *vpu)
+{
+ return vcodec_send_ap_ipi(vpu, AP_IPIMSG_DEC_CORE_END);
+}
+
int vpu_dec_deinit(struct vdec_vpu_inst *vpu)
{
return vcodec_send_ap_ipi(vpu, AP_IPIMSG_DEC_DEINIT);
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h b/drivers/media/platform/mediatek/vcodec/vdec_vpu_if.h
index c2ed5b6cab8b..4cb3c7f5a3ad 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
+++ b/drivers/media/platform/mediatek/vcodec/vdec_vpu_if.h
@@ -14,6 +14,7 @@ struct mtk_vcodec_ctx;
/**
* struct vdec_vpu_inst - VPU instance for video codec
* @id : ipi msg id for each decoder
+ * @core_id : core id used to separate different hardware
* @vsi : driver structure allocated by VPU side and shared to AP side
* for control and info share
* @failure : VPU execution result status, 0: success, others: fail
@@ -26,9 +27,11 @@ struct mtk_vcodec_ctx;
* @dev : platform device of VPU
* @wq : wait queue to wait VPU message ack
* @handler : ipi handler for each decoder
+ * @codec_type : use codec type to separate different codecs
*/
struct vdec_vpu_inst {
int id;
+ int core_id;
void *vsi;
int32_t failure;
uint32_t inst_addr;
@@ -38,6 +41,7 @@ struct vdec_vpu_inst {
struct mtk_vcodec_ctx *ctx;
wait_queue_head_t wq;
mtk_vcodec_ipi_handler handler;
+ unsigned int codec_type;
};
/**
@@ -82,4 +86,22 @@ int vpu_dec_deinit(struct vdec_vpu_inst *vpu);
*/
int vpu_dec_reset(struct vdec_vpu_inst *vpu);
+/**
+ * vpu_dec_core - core start decoding, basically the function will be invoked once
+ * every frame.
+ *
+ * @vpu : instance for vdec_vpu_inst
+ */
+int vpu_dec_core(struct vdec_vpu_inst *vpu);
+
+/**
+ * vpu_dec_core_end - core end decoding, basically the function will be invoked once
+ * when core HW decoding done and receive interrupt successfully. The
+ * decoder in VPU will updata hardware information and deinit hardware
+ * and check if there is a new decoded frame available to display.
+ *
+ * @vpu : instance for vdec_vpu_inst
+ */
+int vpu_dec_core_end(struct vdec_vpu_inst *vpu);
+
#endif
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c
index bf03888a824f..4d9b8798dffe 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
+++ b/drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c
@@ -335,7 +335,7 @@ static unsigned int h264_enc_wait_venc_done(struct venc_h264_inst *inst)
struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)inst->ctx;
if (!mtk_vcodec_wait_for_done_ctx(ctx, MTK_INST_IRQ_RECEIVED,
- WAIT_INTR_TIMEOUT_MS)) {
+ WAIT_INTR_TIMEOUT_MS, 0)) {
irq_status = ctx->irq_status;
mtk_vcodec_debug(inst, "irq_status %x <-", irq_status);
}
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mediatek/vcodec/venc/venc_vp8_if.c
index 6b66957d5192..56ce58f761f1 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
+++ b/drivers/media/platform/mediatek/vcodec/venc/venc_vp8_if.c
@@ -222,7 +222,7 @@ static unsigned int vp8_enc_wait_venc_done(struct venc_vp8_inst *inst)
struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)inst->ctx;
if (!mtk_vcodec_wait_for_done_ctx(ctx, MTK_INST_IRQ_RECEIVED,
- WAIT_INTR_TIMEOUT_MS)) {
+ WAIT_INTR_TIMEOUT_MS, 0)) {
irq_status = ctx->irq_status;
mtk_vcodec_debug(inst, "isr return %x", irq_status);
}
diff --git a/drivers/media/platform/mtk-vcodec/venc_drv_base.h b/drivers/media/platform/mediatek/vcodec/venc_drv_base.h
index 3d718411dc73..3d718411dc73 100644
--- a/drivers/media/platform/mtk-vcodec/venc_drv_base.h
+++ b/drivers/media/platform/mediatek/vcodec/venc_drv_base.h
diff --git a/drivers/media/platform/mtk-vcodec/venc_drv_if.c b/drivers/media/platform/mediatek/vcodec/venc_drv_if.c
index ce0bce811615..ce0bce811615 100644
--- a/drivers/media/platform/mtk-vcodec/venc_drv_if.c
+++ b/drivers/media/platform/mediatek/vcodec/venc_drv_if.c
diff --git a/drivers/media/platform/mtk-vcodec/venc_drv_if.h b/drivers/media/platform/mediatek/vcodec/venc_drv_if.h
index 0b04a1020873..0b04a1020873 100644
--- a/drivers/media/platform/mtk-vcodec/venc_drv_if.h
+++ b/drivers/media/platform/mediatek/vcodec/venc_drv_if.h
diff --git a/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h b/drivers/media/platform/mediatek/vcodec/venc_ipi_msg.h
index 587a2cf15b76..587a2cf15b76 100644
--- a/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h
+++ b/drivers/media/platform/mediatek/vcodec/venc_ipi_msg.h
diff --git a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/venc_vpu_if.c
index e7899d8a3e4e..e7899d8a3e4e 100644
--- a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/venc_vpu_if.c
diff --git a/drivers/media/platform/mtk-vcodec/venc_vpu_if.h b/drivers/media/platform/mediatek/vcodec/venc_vpu_if.h
index f83bc1b3f2bf..f83bc1b3f2bf 100644
--- a/drivers/media/platform/mtk-vcodec/venc_vpu_if.h
+++ b/drivers/media/platform/mediatek/vcodec/venc_vpu_if.h
diff --git a/drivers/media/platform/mediatek/vpu/Kconfig b/drivers/media/platform/mediatek/vpu/Kconfig
new file mode 100644
index 000000000000..2a8443a93ce0
--- /dev/null
+++ b/drivers/media/platform/mediatek/vpu/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config VIDEO_MEDIATEK_VPU
+ tristate "Mediatek Video Processor Unit"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ This driver provides downloading VPU firmware and
+ communicating with VPU. This driver for hw video
+ codec embedded in Mediatek's MT8173 SOCs. It is able
+ to handle video decoding/encoding in a range of formats.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mtk-vpu.
diff --git a/drivers/media/platform/mtk-vpu/Makefile b/drivers/media/platform/mediatek/vpu/Makefile
index ecd2d392b818..ecd2d392b818 100644
--- a/drivers/media/platform/mtk-vpu/Makefile
+++ b/drivers/media/platform/mediatek/vpu/Makefile
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
index 7bd715fc844d..47b684b92f81 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
@@ -810,7 +810,6 @@ static int mtk_vpu_probe(struct platform_device *pdev)
{
struct mtk_vpu *vpu;
struct device *dev;
- struct resource *res;
int ret = 0;
dev_dbg(&pdev->dev, "initialization\n");
@@ -908,13 +907,10 @@ static int mtk_vpu_probe(struct platform_device *pdev)
init_waitqueue_head(&vpu->run.wq);
init_waitqueue_head(&vpu->ack_wq);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "get IRQ resource failed.\n");
- ret = -ENXIO;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
goto free_p_mem;
- }
- vpu->reg.irq = platform_get_irq(pdev, 0);
+ vpu->reg.irq = ret;
ret = devm_request_irq(dev, vpu->reg.irq, vpu_irq_handler, 0,
pdev->name, vpu);
if (ret) {
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.h b/drivers/media/platform/mediatek/vpu/mtk_vpu.h
index a56053ff135a..a56053ff135a 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.h
+++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.h
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
deleted file mode 100644
index 6038db96f71c..000000000000
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
+++ /dev/null
@@ -1,145 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2016 MediaTek Inc.
- * Author: Tiffany Lin <tiffany.lin@mediatek.com>
- */
-
-#include <linux/clk.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/pm_runtime.h>
-#include <soc/mediatek/smi.h>
-
-#include "mtk_vcodec_dec_pm.h"
-#include "mtk_vcodec_util.h"
-
-int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev)
-{
- struct device_node *node;
- struct platform_device *pdev;
- struct mtk_vcodec_pm *pm;
- struct mtk_vcodec_clk *dec_clk;
- struct mtk_vcodec_clk_info *clk_info;
- int i = 0, ret = 0;
-
- pdev = mtkdev->plat_dev;
- pm = &mtkdev->pm;
- pm->mtkdev = mtkdev;
- dec_clk = &pm->vdec_clk;
- node = of_parse_phandle(pdev->dev.of_node, "mediatek,larb", 0);
- if (!node) {
- mtk_v4l2_err("of_parse_phandle mediatek,larb fail!");
- return -1;
- }
-
- pdev = of_find_device_by_node(node);
- of_node_put(node);
- if (WARN_ON(!pdev)) {
- return -1;
- }
- pm->larbvdec = &pdev->dev;
- pdev = mtkdev->plat_dev;
- pm->dev = &pdev->dev;
-
- dec_clk->clk_num =
- of_property_count_strings(pdev->dev.of_node, "clock-names");
- if (dec_clk->clk_num > 0) {
- dec_clk->clk_info = devm_kcalloc(&pdev->dev,
- dec_clk->clk_num, sizeof(*clk_info),
- GFP_KERNEL);
- if (!dec_clk->clk_info) {
- ret = -ENOMEM;
- goto put_device;
- }
- } else {
- mtk_v4l2_err("Failed to get vdec clock count");
- ret = -EINVAL;
- goto put_device;
- }
-
- for (i = 0; i < dec_clk->clk_num; i++) {
- clk_info = &dec_clk->clk_info[i];
- ret = of_property_read_string_index(pdev->dev.of_node,
- "clock-names", i, &clk_info->clk_name);
- if (ret) {
- mtk_v4l2_err("Failed to get clock name id = %d", i);
- goto put_device;
- }
- clk_info->vcodec_clk = devm_clk_get(&pdev->dev,
- clk_info->clk_name);
- if (IS_ERR(clk_info->vcodec_clk)) {
- mtk_v4l2_err("devm_clk_get (%d)%s fail", i,
- clk_info->clk_name);
- ret = PTR_ERR(clk_info->vcodec_clk);
- goto put_device;
- }
- }
-
- pm_runtime_enable(&pdev->dev);
- return 0;
-put_device:
- put_device(pm->larbvdec);
- return ret;
-}
-
-void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev)
-{
- pm_runtime_disable(dev->pm.dev);
- put_device(dev->pm.larbvdec);
-}
-
-int mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm)
-{
- int ret;
-
- ret = pm_runtime_resume_and_get(pm->dev);
- if (ret)
- mtk_v4l2_err("pm_runtime_resume_and_get fail %d", ret);
-
- return ret;
-}
-
-void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm)
-{
- int ret;
-
- ret = pm_runtime_put_sync(pm->dev);
- if (ret)
- mtk_v4l2_err("pm_runtime_put_sync fail %d", ret);
-}
-
-void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm)
-{
- struct mtk_vcodec_clk *dec_clk = &pm->vdec_clk;
- int ret, i = 0;
-
- for (i = 0; i < dec_clk->clk_num; i++) {
- ret = clk_prepare_enable(dec_clk->clk_info[i].vcodec_clk);
- if (ret) {
- mtk_v4l2_err("clk_prepare_enable %d %s fail %d", i,
- dec_clk->clk_info[i].clk_name, ret);
- goto error;
- }
- }
-
- ret = mtk_smi_larb_get(pm->larbvdec);
- if (ret) {
- mtk_v4l2_err("mtk_smi_larb_get larbvdec fail %d", ret);
- goto error;
- }
- return;
-
-error:
- for (i -= 1; i >= 0; i--)
- clk_disable_unprepare(dec_clk->clk_info[i].vcodec_clk);
-}
-
-void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm)
-{
- struct mtk_vcodec_clk *dec_clk = &pm->vdec_clk;
- int i = 0;
-
- mtk_smi_larb_put(pm->larbvdec);
- for (i = dec_clk->clk_num - 1; i >= 0; i--)
- clk_disable_unprepare(dec_clk->clk_info[i].vcodec_clk);
-}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
deleted file mode 100644
index 280aeaefdb65..000000000000
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2016 MediaTek Inc.
- * Author: Tiffany Lin <tiffany.lin@mediatek.com>
- */
-
-#ifndef _MTK_VCODEC_DEC_PM_H_
-#define _MTK_VCODEC_DEC_PM_H_
-
-#include "mtk_vcodec_drv.h"
-
-int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *dev);
-void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev);
-
-int mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm);
-void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm);
-void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm);
-void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm);
-
-#endif /* _MTK_VCODEC_DEC_PM_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
deleted file mode 100644
index 70580c2525ba..000000000000
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
+++ /dev/null
@@ -1,45 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
-* Copyright (c) 2016 MediaTek Inc.
-* Author: Tiffany Lin <tiffany.lin@mediatek.com>
-*/
-
-#include <linux/errno.h>
-#include <linux/wait.h>
-
-#include "mtk_vcodec_drv.h"
-#include "mtk_vcodec_intr.h"
-#include "mtk_vcodec_util.h"
-
-int mtk_vcodec_wait_for_done_ctx(struct mtk_vcodec_ctx *ctx, int command,
- unsigned int timeout_ms)
-{
- wait_queue_head_t *waitqueue;
- long timeout_jiff, ret;
- int status = 0;
-
- waitqueue = (wait_queue_head_t *)&ctx->queue;
- timeout_jiff = msecs_to_jiffies(timeout_ms);
-
- ret = wait_event_interruptible_timeout(*waitqueue,
- ctx->int_cond,
- timeout_jiff);
-
- if (!ret) {
- status = -1; /* timeout */
- mtk_v4l2_err("[%d] ctx->type=%d, cmd=%d, wait_event_interruptible_timeout time=%ums out %d %d!",
- ctx->id, ctx->type, command, timeout_ms,
- ctx->int_cond, ctx->int_type);
- } else if (-ERESTARTSYS == ret) {
- mtk_v4l2_err("[%d] ctx->type=%d, cmd=%d, wait_event_interruptible_timeout interrupted by a signal %d %d",
- ctx->id, ctx->type, command, ctx->int_cond,
- ctx->int_type);
- status = -1;
- }
-
- ctx->int_cond = 0;
- ctx->int_type = 0;
-
- return status;
-}
-EXPORT_SYMBOL(mtk_vcodec_wait_for_done_ctx);
diff --git a/drivers/media/platform/nvidia/Kconfig b/drivers/media/platform/nvidia/Kconfig
new file mode 100644
index 000000000000..b211b46877f6
--- /dev/null
+++ b/drivers/media/platform/nvidia/Kconfig
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "NVidia media platform drivers"
+
+source "drivers/media/platform/nvidia/tegra-vde/Kconfig"
diff --git a/drivers/media/platform/nvidia/Makefile b/drivers/media/platform/nvidia/Makefile
new file mode 100644
index 000000000000..428415ff83de
--- /dev/null
+++ b/drivers/media/platform/nvidia/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-y += tegra-vde/
diff --git a/drivers/media/platform/nvidia/tegra-vde/Kconfig b/drivers/media/platform/nvidia/tegra-vde/Kconfig
new file mode 100644
index 000000000000..f7454823bbbb
--- /dev/null
+++ b/drivers/media/platform/nvidia/tegra-vde/Kconfig
@@ -0,0 +1,17 @@
+config VIDEO_TEGRA_VDE
+ tristate "NVIDIA Tegra Video Decoder Engine driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on VIDEO_DEV
+ select DMA_SHARED_BUFFER
+ select IOMMU_IOVA
+ select MEDIA_CONTROLLER
+ select MEDIA_CONTROLLER_REQUEST_API
+ select SRAM
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_DMA_SG
+ select V4L2_H264
+ select V4L2_MEM2MEM_DEV
+ help
+ Support for the NVIDIA Tegra video decoder unit.
+ To compile this driver as a module choose m here.
diff --git a/drivers/media/platform/nvidia/tegra-vde/Makefile b/drivers/media/platform/nvidia/tegra-vde/Makefile
new file mode 100644
index 000000000000..4e96f3305567
--- /dev/null
+++ b/drivers/media/platform/nvidia/tegra-vde/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+tegra-vde-y := vde.o iommu.o dmabuf-cache.o h264.o v4l2.o
+obj-$(CONFIG_VIDEO_TEGRA_VDE) += tegra-vde.o
diff --git a/drivers/staging/media/tegra-vde/dmabuf-cache.c b/drivers/media/platform/nvidia/tegra-vde/dmabuf-cache.c
index a98d03419b8f..69c346148070 100644
--- a/drivers/staging/media/tegra-vde/dmabuf-cache.c
+++ b/drivers/media/platform/nvidia/tegra-vde/dmabuf-cache.c
@@ -66,9 +66,9 @@ int tegra_vde_dmabuf_cache_map(struct tegra_vde *vde,
struct dma_buf_attachment **ap,
dma_addr_t *addrp)
{
- struct device *dev = vde->miscdev.parent;
struct dma_buf_attachment *attachment;
struct tegra_vde_cache_entry *entry;
+ struct device *dev = vde->dev;
struct sg_table *sgt;
struct iova *iova;
int err;
diff --git a/drivers/media/platform/nvidia/tegra-vde/h264.c b/drivers/media/platform/nvidia/tegra-vde/h264.c
new file mode 100644
index 000000000000..d8e5534e80c8
--- /dev/null
+++ b/drivers/media/platform/nvidia/tegra-vde/h264.c
@@ -0,0 +1,946 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NVIDIA Tegra Video decoder driver
+ *
+ * Copyright (C) 2016-2022 Dmitry Osipenko <digetx@gmail.com>
+ *
+ */
+
+#include <linux/iopoll.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-h264.h>
+
+#include "trace.h"
+#include "vde.h"
+
+#define FLAG_B_FRAME 0x1
+#define FLAG_REFERENCE 0x2
+
+struct tegra_vde_h264_frame {
+ unsigned int frame_num;
+ unsigned int flags;
+};
+
+struct tegra_vde_h264_decoder_ctx {
+ unsigned int dpb_frames_nb;
+ unsigned int dpb_ref_frames_with_earlier_poc_nb;
+ unsigned int baseline_profile;
+ unsigned int level_idc;
+ unsigned int log2_max_pic_order_cnt_lsb;
+ unsigned int log2_max_frame_num;
+ unsigned int pic_order_cnt_type;
+ unsigned int direct_8x8_inference_flag;
+ unsigned int pic_width_in_mbs;
+ unsigned int pic_height_in_mbs;
+ unsigned int pic_init_qp;
+ unsigned int deblocking_filter_control_present_flag;
+ unsigned int constrained_intra_pred_flag;
+ unsigned int chroma_qp_index_offset;
+ unsigned int pic_order_present_flag;
+ unsigned int num_ref_idx_l0_active_minus1;
+ unsigned int num_ref_idx_l1_active_minus1;
+};
+
+struct h264_reflists {
+ u8 p[V4L2_H264_NUM_DPB_ENTRIES];
+ u8 b0[V4L2_H264_NUM_DPB_ENTRIES];
+ u8 b1[V4L2_H264_NUM_DPB_ENTRIES];
+};
+
+static int tegra_vde_wait_mbe(struct tegra_vde *vde)
+{
+ u32 tmp;
+
+ return readl_relaxed_poll_timeout(vde->mbe + 0x8C, tmp,
+ tmp >= 0x10, 1, 100);
+}
+
+static int tegra_vde_setup_mbe_frame_idx(struct tegra_vde *vde,
+ unsigned int refs_nb,
+ bool setup_refs)
+{
+ u32 value, frame_idx_enb_mask = 0;
+ unsigned int frame_idx;
+ unsigned int idx;
+ int err;
+
+ tegra_vde_writel(vde, 0xD0000000 | (0 << 23), vde->mbe, 0x80);
+ tegra_vde_writel(vde, 0xD0200000 | (0 << 23), vde->mbe, 0x80);
+
+ err = tegra_vde_wait_mbe(vde);
+ if (err)
+ return err;
+
+ if (!setup_refs)
+ return 0;
+
+ for (idx = 0, frame_idx = 1; idx < refs_nb; idx++, frame_idx++) {
+ tegra_vde_writel(vde, 0xD0000000 | (frame_idx << 23),
+ vde->mbe, 0x80);
+ tegra_vde_writel(vde, 0xD0200000 | (frame_idx << 23),
+ vde->mbe, 0x80);
+
+ frame_idx_enb_mask |= frame_idx << (6 * (idx % 4));
+
+ if (idx % 4 == 3 || idx == refs_nb - 1) {
+ value = 0xC0000000;
+ value |= (idx >> 2) << 24;
+ value |= frame_idx_enb_mask;
+
+ tegra_vde_writel(vde, value, vde->mbe, 0x80);
+
+ err = tegra_vde_wait_mbe(vde);
+ if (err)
+ return err;
+
+ frame_idx_enb_mask = 0;
+ }
+ }
+
+ return 0;
+}
+
+static void tegra_vde_mbe_set_0xa_reg(struct tegra_vde *vde, int reg, u32 val)
+{
+ tegra_vde_writel(vde, 0xA0000000 | (reg << 24) | (val & 0xFFFF),
+ vde->mbe, 0x80);
+ tegra_vde_writel(vde, 0xA0000000 | ((reg + 1) << 24) | (val >> 16),
+ vde->mbe, 0x80);
+}
+
+static int tegra_vde_wait_bsev(struct tegra_vde *vde, bool wait_dma)
+{
+ struct device *dev = vde->dev;
+ u32 value;
+ int err;
+
+ err = readl_relaxed_poll_timeout(vde->bsev + INTR_STATUS, value,
+ !(value & BIT(2)), 1, 100);
+ if (err) {
+ dev_err(dev, "BSEV unknown bit timeout\n");
+ return err;
+ }
+
+ err = readl_relaxed_poll_timeout(vde->bsev + INTR_STATUS, value,
+ (value & BSE_ICMDQUE_EMPTY), 1, 100);
+ if (err) {
+ dev_err(dev, "BSEV ICMDQUE flush timeout\n");
+ return err;
+ }
+
+ if (!wait_dma)
+ return 0;
+
+ err = readl_relaxed_poll_timeout(vde->bsev + INTR_STATUS, value,
+ !(value & BSE_DMA_BUSY), 1, 1000);
+ if (err) {
+ dev_err(dev, "BSEV DMA timeout\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_vde_push_to_bsev_icmdqueue(struct tegra_vde *vde,
+ u32 value, bool wait_dma)
+{
+ tegra_vde_writel(vde, value, vde->bsev, ICMDQUE_WR);
+
+ return tegra_vde_wait_bsev(vde, wait_dma);
+}
+
+static void tegra_vde_setup_frameid(struct tegra_vde *vde,
+ struct tegra_video_frame *frame,
+ unsigned int frameid,
+ u32 mbs_width, u32 mbs_height)
+{
+ u32 y_addr = frame ? frame->y_addr : 0x6CDEAD00;
+ u32 cb_addr = frame ? frame->cb_addr : 0x6CDEAD00;
+ u32 cr_addr = frame ? frame->cr_addr : 0x6CDEAD00;
+ u32 value1 = frame ? ((frame->luma_atoms_pitch << 16) | mbs_height) : 0;
+ u32 value2 = frame ? ((frame->chroma_atoms_pitch << 6) | 1) : 0;
+
+ tegra_vde_writel(vde, y_addr >> 8, vde->frameid, 0x000 + frameid * 4);
+ tegra_vde_writel(vde, cb_addr >> 8, vde->frameid, 0x100 + frameid * 4);
+ tegra_vde_writel(vde, cr_addr >> 8, vde->frameid, 0x180 + frameid * 4);
+ tegra_vde_writel(vde, value1, vde->frameid, 0x080 + frameid * 4);
+ tegra_vde_writel(vde, value2, vde->frameid, 0x280 + frameid * 4);
+}
+
+static void tegra_setup_frameidx(struct tegra_vde *vde,
+ struct tegra_video_frame *frames,
+ unsigned int frames_nb,
+ u32 mbs_width, u32 mbs_height)
+{
+ unsigned int idx;
+
+ for (idx = 0; idx < frames_nb; idx++)
+ tegra_vde_setup_frameid(vde, &frames[idx], idx,
+ mbs_width, mbs_height);
+
+ for (; idx < 17; idx++)
+ tegra_vde_setup_frameid(vde, NULL, idx, 0, 0);
+}
+
+static void tegra_vde_setup_iram_entry(struct tegra_vde *vde,
+ unsigned int table,
+ unsigned int row,
+ u32 value1, u32 value2)
+{
+ u32 *iram_tables = vde->iram;
+
+ trace_vde_setup_iram_entry(table, row, value1, value2);
+
+ iram_tables[0x20 * table + row * 2 + 0] = value1;
+ iram_tables[0x20 * table + row * 2 + 1] = value2;
+}
+
+static void tegra_vde_setup_iram_tables(struct tegra_vde *vde,
+ struct tegra_video_frame *dpb_frames,
+ unsigned int ref_frames_nb,
+ unsigned int with_earlier_poc_nb)
+{
+ struct tegra_video_frame *frame;
+ int with_later_poc_nb;
+ u32 value, aux_addr;
+ unsigned int i, k;
+
+ trace_vde_ref_l0(dpb_frames[0].frame_num);
+
+ for (i = 0; i < 16; i++) {
+ if (i < ref_frames_nb) {
+ frame = &dpb_frames[i + 1];
+
+ aux_addr = frame->aux_addr;
+
+ value = (i + 1) << 26;
+ value |= !(frame->flags & FLAG_B_FRAME) << 25;
+ value |= 1 << 24;
+ value |= frame->frame_num;
+ } else {
+ aux_addr = 0x6ADEAD00;
+ value = 0x3f;
+ }
+
+ tegra_vde_setup_iram_entry(vde, 0, i, value, aux_addr);
+ tegra_vde_setup_iram_entry(vde, 1, i, value, aux_addr);
+ tegra_vde_setup_iram_entry(vde, 2, i, value, aux_addr);
+ tegra_vde_setup_iram_entry(vde, 3, i, value, aux_addr);
+ }
+
+ if (!(dpb_frames[0].flags & FLAG_B_FRAME))
+ return;
+
+ if (with_earlier_poc_nb >= ref_frames_nb)
+ return;
+
+ with_later_poc_nb = ref_frames_nb - with_earlier_poc_nb;
+
+ trace_vde_ref_l1(with_later_poc_nb, with_earlier_poc_nb);
+
+ for (i = 0, k = with_earlier_poc_nb; i < with_later_poc_nb; i++, k++) {
+ frame = &dpb_frames[k + 1];
+
+ aux_addr = frame->aux_addr;
+
+ value = (k + 1) << 26;
+ value |= !(frame->flags & FLAG_B_FRAME) << 25;
+ value |= 1 << 24;
+ value |= frame->frame_num;
+
+ tegra_vde_setup_iram_entry(vde, 2, i, value, aux_addr);
+ }
+
+ for (k = 0; i < ref_frames_nb; i++, k++) {
+ frame = &dpb_frames[k + 1];
+
+ aux_addr = frame->aux_addr;
+
+ value = (k + 1) << 26;
+ value |= !(frame->flags & FLAG_B_FRAME) << 25;
+ value |= 1 << 24;
+ value |= frame->frame_num;
+
+ tegra_vde_setup_iram_entry(vde, 2, i, value, aux_addr);
+ }
+}
+
+static int tegra_vde_setup_hw_context(struct tegra_vde *vde,
+ struct tegra_vde_h264_decoder_ctx *ctx,
+ struct tegra_video_frame *dpb_frames,
+ dma_addr_t bitstream_data_addr,
+ size_t bitstream_data_size,
+ unsigned int macroblocks_nb)
+{
+ struct device *dev = vde->dev;
+ u32 value;
+ int err;
+
+ tegra_vde_set_bits(vde, 0x000A, vde->sxe, 0xF0);
+ tegra_vde_set_bits(vde, 0x000B, vde->bsev, CMDQUE_CONTROL);
+ tegra_vde_set_bits(vde, 0x8002, vde->mbe, 0x50);
+ tegra_vde_set_bits(vde, 0x000A, vde->mbe, 0xA0);
+ tegra_vde_set_bits(vde, 0x000A, vde->ppe, 0x14);
+ tegra_vde_set_bits(vde, 0x000A, vde->ppe, 0x28);
+ tegra_vde_set_bits(vde, 0x0A00, vde->mce, 0x08);
+ tegra_vde_set_bits(vde, 0x000A, vde->tfe, 0x00);
+ tegra_vde_set_bits(vde, 0x0005, vde->vdma, 0x04);
+
+ tegra_vde_writel(vde, 0x00000000, vde->vdma, 0x1C);
+ tegra_vde_writel(vde, 0x00000000, vde->vdma, 0x00);
+ tegra_vde_writel(vde, 0x00000007, vde->vdma, 0x04);
+ tegra_vde_writel(vde, 0x00000007, vde->frameid, 0x200);
+ tegra_vde_writel(vde, 0x00000005, vde->tfe, 0x04);
+ tegra_vde_writel(vde, 0x00000000, vde->mbe, 0x84);
+ tegra_vde_writel(vde, 0x00000010, vde->sxe, 0x08);
+ tegra_vde_writel(vde, 0x00000150, vde->sxe, 0x54);
+ tegra_vde_writel(vde, 0x0000054C, vde->sxe, 0x58);
+ tegra_vde_writel(vde, 0x00000E34, vde->sxe, 0x5C);
+ tegra_vde_writel(vde, 0x063C063C, vde->mce, 0x10);
+ tegra_vde_writel(vde, 0x0003FC00, vde->bsev, INTR_STATUS);
+ tegra_vde_writel(vde, 0x0000150D, vde->bsev, BSE_CONFIG);
+ tegra_vde_writel(vde, 0x00000100, vde->bsev, BSE_INT_ENB);
+ tegra_vde_writel(vde, 0x00000000, vde->bsev, 0x98);
+ tegra_vde_writel(vde, 0x00000060, vde->bsev, 0x9C);
+
+ memset(vde->iram + 128, 0, macroblocks_nb / 2);
+
+ tegra_setup_frameidx(vde, dpb_frames, ctx->dpb_frames_nb,
+ ctx->pic_width_in_mbs, ctx->pic_height_in_mbs);
+
+ tegra_vde_setup_iram_tables(vde, dpb_frames,
+ ctx->dpb_frames_nb - 1,
+ ctx->dpb_ref_frames_with_earlier_poc_nb);
+
+ /*
+ * The IRAM mapping is write-combine, ensure that CPU buffers have
+ * been flushed at this point.
+ */
+ wmb();
+
+ tegra_vde_writel(vde, 0x00000000, vde->bsev, 0x8C);
+ tegra_vde_writel(vde, bitstream_data_addr + bitstream_data_size,
+ vde->bsev, 0x54);
+
+ vde->bitstream_data_addr = bitstream_data_addr;
+
+ value = ctx->pic_width_in_mbs << 11 | ctx->pic_height_in_mbs << 3;
+
+ tegra_vde_writel(vde, value, vde->bsev, 0x88);
+
+ err = tegra_vde_wait_bsev(vde, false);
+ if (err)
+ return err;
+
+ err = tegra_vde_push_to_bsev_icmdqueue(vde, 0x800003FC, false);
+ if (err)
+ return err;
+
+ value = 0x01500000;
+ value |= ((vde->iram_lists_addr + 512) >> 2) & 0xFFFF;
+
+ err = tegra_vde_push_to_bsev_icmdqueue(vde, value, true);
+ if (err)
+ return err;
+
+ err = tegra_vde_push_to_bsev_icmdqueue(vde, 0x840F054C, false);
+ if (err)
+ return err;
+
+ err = tegra_vde_push_to_bsev_icmdqueue(vde, 0x80000080, false);
+ if (err)
+ return err;
+
+ value = 0x0E340000 | ((vde->iram_lists_addr >> 2) & 0xFFFF);
+
+ err = tegra_vde_push_to_bsev_icmdqueue(vde, value, true);
+ if (err)
+ return err;
+
+ value = 0x00800005;
+ value |= ctx->pic_width_in_mbs << 11;
+ value |= ctx->pic_height_in_mbs << 3;
+
+ tegra_vde_writel(vde, value, vde->sxe, 0x10);
+
+ value = !ctx->baseline_profile << 17;
+ value |= ctx->level_idc << 13;
+ value |= ctx->log2_max_pic_order_cnt_lsb << 7;
+ value |= ctx->pic_order_cnt_type << 5;
+ value |= ctx->log2_max_frame_num;
+
+ tegra_vde_writel(vde, value, vde->sxe, 0x40);
+
+ value = ctx->pic_init_qp << 25;
+ value |= !!(ctx->deblocking_filter_control_present_flag) << 2;
+ value |= !!ctx->pic_order_present_flag;
+
+ tegra_vde_writel(vde, value, vde->sxe, 0x44);
+
+ value = ctx->chroma_qp_index_offset;
+ value |= ctx->num_ref_idx_l0_active_minus1 << 5;
+ value |= ctx->num_ref_idx_l1_active_minus1 << 10;
+ value |= !!ctx->constrained_intra_pred_flag << 15;
+
+ tegra_vde_writel(vde, value, vde->sxe, 0x48);
+
+ value = 0x0C000000;
+ value |= !!(dpb_frames[0].flags & FLAG_B_FRAME) << 24;
+
+ tegra_vde_writel(vde, value, vde->sxe, 0x4C);
+
+ value = 0x03800000;
+ value |= bitstream_data_size & GENMASK(19, 15);
+
+ tegra_vde_writel(vde, value, vde->sxe, 0x68);
+
+ tegra_vde_writel(vde, bitstream_data_addr, vde->sxe, 0x6C);
+
+ if (vde->soc->supports_ref_pic_marking)
+ tegra_vde_writel(vde, vde->secure_bo->dma_addr, vde->sxe, 0x7c);
+
+ value = 0x10000005;
+ value |= ctx->pic_width_in_mbs << 11;
+ value |= ctx->pic_height_in_mbs << 3;
+
+ tegra_vde_writel(vde, value, vde->mbe, 0x80);
+
+ value = 0x26800000;
+ value |= ctx->level_idc << 4;
+ value |= !ctx->baseline_profile << 1;
+ value |= !!ctx->direct_8x8_inference_flag;
+
+ tegra_vde_writel(vde, value, vde->mbe, 0x80);
+
+ tegra_vde_writel(vde, 0xF4000001, vde->mbe, 0x80);
+ tegra_vde_writel(vde, 0x20000000, vde->mbe, 0x80);
+ tegra_vde_writel(vde, 0xF4000101, vde->mbe, 0x80);
+
+ value = 0x20000000;
+ value |= ctx->chroma_qp_index_offset << 8;
+
+ tegra_vde_writel(vde, value, vde->mbe, 0x80);
+
+ err = tegra_vde_setup_mbe_frame_idx(vde,
+ ctx->dpb_frames_nb - 1,
+ ctx->pic_order_cnt_type == 0);
+ if (err) {
+ dev_err(dev, "MBE frames setup failed %d\n", err);
+ return err;
+ }
+
+ tegra_vde_mbe_set_0xa_reg(vde, 0, 0x000009FC);
+ tegra_vde_mbe_set_0xa_reg(vde, 2, 0x61DEAD00);
+ tegra_vde_mbe_set_0xa_reg(vde, 4, 0x62DEAD00);
+ tegra_vde_mbe_set_0xa_reg(vde, 6, 0x63DEAD00);
+ tegra_vde_mbe_set_0xa_reg(vde, 8, dpb_frames[0].aux_addr);
+
+ value = 0xFC000000;
+ value |= !!(dpb_frames[0].flags & FLAG_B_FRAME) << 2;
+
+ if (!ctx->baseline_profile)
+ value |= !!(dpb_frames[0].flags & FLAG_REFERENCE) << 1;
+
+ tegra_vde_writel(vde, value, vde->mbe, 0x80);
+
+ err = tegra_vde_wait_mbe(vde);
+ if (err) {
+ dev_err(dev, "MBE programming failed %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void tegra_vde_decode_frame(struct tegra_vde *vde,
+ unsigned int macroblocks_nb)
+{
+ reinit_completion(&vde->decode_completion);
+
+ tegra_vde_writel(vde, 0x00000001, vde->bsev, 0x8C);
+ tegra_vde_writel(vde, 0x20000000 | (macroblocks_nb - 1),
+ vde->sxe, 0x00);
+}
+
+static int tegra_vde_validate_h264_ctx(struct device *dev,
+ struct tegra_vde_h264_decoder_ctx *ctx)
+{
+ if (ctx->dpb_frames_nb == 0 || ctx->dpb_frames_nb > 17) {
+ dev_err(dev, "Bad DPB size %u\n", ctx->dpb_frames_nb);
+ return -EINVAL;
+ }
+
+ if (ctx->level_idc > 15) {
+ dev_err(dev, "Bad level value %u\n", ctx->level_idc);
+ return -EINVAL;
+ }
+
+ if (ctx->pic_init_qp > 52) {
+ dev_err(dev, "Bad pic_init_qp value %u\n", ctx->pic_init_qp);
+ return -EINVAL;
+ }
+
+ if (ctx->log2_max_pic_order_cnt_lsb > 16) {
+ dev_err(dev, "Bad log2_max_pic_order_cnt_lsb value %u\n",
+ ctx->log2_max_pic_order_cnt_lsb);
+ return -EINVAL;
+ }
+
+ if (ctx->log2_max_frame_num > 16) {
+ dev_err(dev, "Bad log2_max_frame_num value %u\n",
+ ctx->log2_max_frame_num);
+ return -EINVAL;
+ }
+
+ if (ctx->chroma_qp_index_offset > 31) {
+ dev_err(dev, "Bad chroma_qp_index_offset value %u\n",
+ ctx->chroma_qp_index_offset);
+ return -EINVAL;
+ }
+
+ if (ctx->pic_order_cnt_type > 2) {
+ dev_err(dev, "Bad pic_order_cnt_type value %u\n",
+ ctx->pic_order_cnt_type);
+ return -EINVAL;
+ }
+
+ if (ctx->num_ref_idx_l0_active_minus1 > 15) {
+ dev_err(dev, "Bad num_ref_idx_l0_active_minus1 value %u\n",
+ ctx->num_ref_idx_l0_active_minus1);
+ return -EINVAL;
+ }
+
+ if (ctx->num_ref_idx_l1_active_minus1 > 15) {
+ dev_err(dev, "Bad num_ref_idx_l1_active_minus1 value %u\n",
+ ctx->num_ref_idx_l1_active_minus1);
+ return -EINVAL;
+ }
+
+ if (!ctx->pic_width_in_mbs || ctx->pic_width_in_mbs > 127) {
+ dev_err(dev, "Bad pic_width_in_mbs value %u\n",
+ ctx->pic_width_in_mbs);
+ return -EINVAL;
+ }
+
+ if (!ctx->pic_height_in_mbs || ctx->pic_height_in_mbs > 127) {
+ dev_err(dev, "Bad pic_height_in_mbs value %u\n",
+ ctx->pic_height_in_mbs);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tegra_vde_decode_begin(struct tegra_vde *vde,
+ struct tegra_vde_h264_decoder_ctx *ctx,
+ struct tegra_video_frame *dpb_frames,
+ dma_addr_t bitstream_data_addr,
+ size_t bitstream_data_size)
+{
+ struct device *dev = vde->dev;
+ unsigned int macroblocks_nb;
+ int err;
+
+ err = mutex_lock_interruptible(&vde->lock);
+ if (err)
+ return err;
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err < 0)
+ goto unlock;
+
+ /*
+ * We rely on the VDE registers reset value, otherwise VDE
+ * causes bus lockup.
+ */
+ err = reset_control_assert(vde->rst_mc);
+ if (err) {
+ dev_err(dev, "DEC start: Failed to assert MC reset: %d\n",
+ err);
+ goto put_runtime_pm;
+ }
+
+ err = reset_control_reset(vde->rst);
+ if (err) {
+ dev_err(dev, "DEC start: Failed to reset HW: %d\n", err);
+ goto put_runtime_pm;
+ }
+
+ err = reset_control_deassert(vde->rst_mc);
+ if (err) {
+ dev_err(dev, "DEC start: Failed to deassert MC reset: %d\n",
+ err);
+ goto put_runtime_pm;
+ }
+
+ macroblocks_nb = ctx->pic_width_in_mbs * ctx->pic_height_in_mbs;
+
+ err = tegra_vde_setup_hw_context(vde, ctx, dpb_frames,
+ bitstream_data_addr,
+ bitstream_data_size,
+ macroblocks_nb);
+ if (err)
+ goto put_runtime_pm;
+
+ tegra_vde_decode_frame(vde, macroblocks_nb);
+
+ return 0;
+
+put_runtime_pm:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+unlock:
+ mutex_unlock(&vde->lock);
+
+ return err;
+}
+
+static void tegra_vde_decode_abort(struct tegra_vde *vde)
+{
+ struct device *dev = vde->dev;
+ int err;
+
+ /*
+ * At first reset memory client to avoid resetting VDE HW in the
+ * middle of DMA which could result into memory corruption or hang
+ * the whole system.
+ */
+ err = reset_control_assert(vde->rst_mc);
+ if (err)
+ dev_err(dev, "DEC end: Failed to assert MC reset: %d\n", err);
+
+ err = reset_control_assert(vde->rst);
+ if (err)
+ dev_err(dev, "DEC end: Failed to assert HW reset: %d\n", err);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ mutex_unlock(&vde->lock);
+}
+
+static int tegra_vde_decode_end(struct tegra_vde *vde)
+{
+ unsigned int read_bytes, macroblocks_nb;
+ struct device *dev = vde->dev;
+ dma_addr_t bsev_ptr;
+ long timeout;
+ int ret;
+
+ timeout = wait_for_completion_interruptible_timeout(
+ &vde->decode_completion, msecs_to_jiffies(1000));
+ if (timeout == 0) {
+ bsev_ptr = tegra_vde_readl(vde, vde->bsev, 0x10);
+ macroblocks_nb = tegra_vde_readl(vde, vde->sxe, 0xC8) & 0x1FFF;
+ read_bytes = bsev_ptr ? bsev_ptr - vde->bitstream_data_addr : 0;
+
+ dev_err(dev, "Decoding failed: read 0x%X bytes, %u macroblocks parsed\n",
+ read_bytes, macroblocks_nb);
+
+ ret = -EIO;
+ } else if (timeout < 0) {
+ ret = timeout;
+ } else {
+ ret = 0;
+ }
+
+ tegra_vde_decode_abort(vde);
+
+ return ret;
+}
+
+static struct vb2_buffer *get_ref_buf(struct tegra_ctx *ctx,
+ struct vb2_v4l2_buffer *dst,
+ unsigned int dpb_idx)
+{
+ const struct v4l2_h264_dpb_entry *dpb = ctx->h264.decode_params->dpb;
+ struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ int buf_idx = -1;
+
+ if (dpb[dpb_idx].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
+ buf_idx = vb2_find_timestamp(cap_q,
+ dpb[dpb_idx].reference_ts, 0);
+
+ /*
+ * If a DPB entry is unused or invalid, address of current destination
+ * buffer is returned.
+ */
+ if (buf_idx < 0)
+ return &dst->vb2_buf;
+
+ return vb2_get_buffer(cap_q, buf_idx);
+}
+
+static int tegra_vde_validate_vb_size(struct tegra_ctx *ctx,
+ struct vb2_buffer *vb,
+ unsigned int plane_id,
+ size_t min_size)
+{
+ u64 offset = vb->planes[plane_id].data_offset;
+ struct device *dev = ctx->vde->dev;
+
+ if (offset + min_size > vb2_plane_size(vb, plane_id)) {
+ dev_err(dev, "Too small plane[%u] size %lu @0x%llX, should be at least %zu\n",
+ plane_id, vb2_plane_size(vb, plane_id), offset, min_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tegra_vde_h264_setup_frame(struct tegra_ctx *ctx,
+ struct tegra_vde_h264_decoder_ctx *h264,
+ struct v4l2_h264_reflist_builder *b,
+ struct vb2_buffer *vb,
+ unsigned int ref_id,
+ unsigned int id)
+{
+ struct v4l2_pix_format_mplane *pixfmt = &ctx->decoded_fmt.fmt.pix_mp;
+ struct tegra_m2m_buffer *tb = vb_to_tegra_buf(vb);
+ struct tegra_ctx_h264 *h = &ctx->h264;
+ struct tegra_vde *vde = ctx->vde;
+ struct device *dev = vde->dev;
+ unsigned int cstride, lstride;
+ unsigned int flags = 0;
+ size_t lsize, csize;
+ int err, frame_num;
+
+ lsize = h264->pic_width_in_mbs * 16 * h264->pic_height_in_mbs * 16;
+ csize = h264->pic_width_in_mbs * 8 * h264->pic_height_in_mbs * 8;
+ lstride = pixfmt->plane_fmt[0].bytesperline;
+ cstride = pixfmt->plane_fmt[1].bytesperline;
+
+ err = tegra_vde_validate_vb_size(ctx, vb, 0, lsize);
+ if (err)
+ return err;
+
+ err = tegra_vde_validate_vb_size(ctx, vb, 1, csize);
+ if (err)
+ return err;
+
+ err = tegra_vde_validate_vb_size(ctx, vb, 2, csize);
+ if (err)
+ return err;
+
+ if (!tb->aux || tb->aux->size < csize) {
+ dev_err(dev, "Too small aux size %zd, should be at least %zu\n",
+ tb->aux ? tb->aux->size : -1, csize);
+ return -EINVAL;
+ }
+
+ if (id == 0) {
+ frame_num = h->decode_params->frame_num;
+
+ if (h->decode_params->nal_ref_idc)
+ flags |= FLAG_REFERENCE;
+ } else {
+ frame_num = b->refs[ref_id].frame_num;
+ }
+
+ if (tb->b_frame)
+ flags |= FLAG_B_FRAME;
+
+ vde->frames[id].flags = flags;
+ vde->frames[id].y_addr = tb->dma_addr[0];
+ vde->frames[id].cb_addr = tb->dma_addr[1];
+ vde->frames[id].cr_addr = tb->dma_addr[2];
+ vde->frames[id].aux_addr = tb->aux->dma_addr;
+ vde->frames[id].frame_num = frame_num & 0x7fffff;
+ vde->frames[id].luma_atoms_pitch = lstride / VDE_ATOM;
+ vde->frames[id].chroma_atoms_pitch = cstride / VDE_ATOM;
+
+ return 0;
+}
+
+static int tegra_vde_h264_setup_frames(struct tegra_ctx *ctx,
+ struct tegra_vde_h264_decoder_ctx *h264)
+{
+ struct vb2_v4l2_buffer *src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct vb2_v4l2_buffer *dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ const struct v4l2_h264_dpb_entry *dpb = ctx->h264.decode_params->dpb;
+ struct tegra_m2m_buffer *tb = vb_to_tegra_buf(&dst->vb2_buf);
+ struct tegra_ctx_h264 *h = &ctx->h264;
+ struct v4l2_h264_reflist_builder b;
+ struct h264_reflists reflists;
+ struct vb2_buffer *ref;
+ unsigned int i;
+ u8 *dpb_id;
+ int err;
+
+ /*
+ * Tegra hardware requires information about frame's type, assuming
+ * that frame consists of the same type slices. Userspace must tag
+ * frame's type appropriately.
+ *
+ * Decoding of a non-uniform frames isn't supported by hardware and
+ * require software preprocessing that we don't implement. Decoding
+ * is expected to fail in this case. Such video streams are rare in
+ * practice, so not a big deal.
+ *
+ * If userspace doesn't tell us frame's type, then we will try decode
+ * as-is.
+ */
+ v4l2_m2m_buf_copy_metadata(src, dst, true);
+
+ if (h->decode_params->flags & V4L2_H264_DECODE_PARAM_FLAG_BFRAME)
+ tb->b_frame = true;
+ else
+ tb->b_frame = false;
+
+ err = tegra_vde_h264_setup_frame(ctx, h264, NULL, &dst->vb2_buf, 0,
+ h264->dpb_frames_nb++);
+ if (err)
+ return err;
+
+ if (!(h->decode_params->flags & (V4L2_H264_DECODE_PARAM_FLAG_PFRAME |
+ V4L2_H264_DECODE_PARAM_FLAG_BFRAME)))
+ return 0;
+
+ v4l2_h264_init_reflist_builder(&b, h->decode_params, h->sps, dpb);
+
+ if (h->decode_params->flags & V4L2_H264_DECODE_PARAM_FLAG_BFRAME) {
+ v4l2_h264_build_b_ref_lists(&b, reflists.b0, reflists.b1);
+ dpb_id = reflists.b0;
+ } else {
+ v4l2_h264_build_p_ref_list(&b, reflists.p);
+ dpb_id = reflists.p;
+ }
+
+ for (i = 0; i < b.num_valid; i++) {
+ ref = get_ref_buf(ctx, dst, dpb_id[i]);
+
+ err = tegra_vde_h264_setup_frame(ctx, h264, &b, ref, dpb_id[i],
+ h264->dpb_frames_nb++);
+ if (err)
+ return err;
+
+ if (b.refs[dpb_id[i]].pic_order_count < b.cur_pic_order_count)
+ h264->dpb_ref_frames_with_earlier_poc_nb++;
+ }
+
+ return 0;
+}
+
+static unsigned int to_tegra_vde_h264_level_idc(unsigned int level_idc)
+{
+ switch (level_idc) {
+ case 11:
+ return 2;
+ case 12:
+ return 3;
+ case 13:
+ return 4;
+ case 20:
+ return 5;
+ case 21:
+ return 6;
+ case 22:
+ return 7;
+ case 30:
+ return 8;
+ case 31:
+ return 9;
+ case 32:
+ return 10;
+ case 40:
+ return 11;
+ case 41:
+ return 12;
+ case 42:
+ return 13;
+ case 50:
+ return 14;
+ default:
+ break;
+ }
+
+ return 15;
+}
+
+static int tegra_vde_h264_setup_context(struct tegra_ctx *ctx,
+ struct tegra_vde_h264_decoder_ctx *h264)
+{
+ struct tegra_ctx_h264 *h = &ctx->h264;
+ struct tegra_vde *vde = ctx->vde;
+ struct device *dev = vde->dev;
+ int err;
+
+ memset(h264, 0, sizeof(*h264));
+ memset(vde->frames, 0, sizeof(vde->frames));
+
+ tegra_vde_prepare_control_data(ctx, V4L2_CID_STATELESS_H264_DECODE_PARAMS);
+ tegra_vde_prepare_control_data(ctx, V4L2_CID_STATELESS_H264_SPS);
+ tegra_vde_prepare_control_data(ctx, V4L2_CID_STATELESS_H264_PPS);
+
+ /* CABAC unsupported by hardware, requires software preprocessing */
+ if (h->pps->flags & V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE)
+ return -EOPNOTSUPP;
+
+ if (h->sps->profile_idc == 66)
+ h264->baseline_profile = 1;
+
+ if (h->sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE)
+ h264->direct_8x8_inference_flag = 1;
+
+ if (h->pps->flags & V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED)
+ h264->constrained_intra_pred_flag = 1;
+
+ if (h->pps->flags & V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT)
+ h264->deblocking_filter_control_present_flag = 1;
+
+ if (h->pps->flags & V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT)
+ h264->pic_order_present_flag = 1;
+
+ h264->level_idc = to_tegra_vde_h264_level_idc(h->sps->level_idc);
+ h264->log2_max_pic_order_cnt_lsb = h->sps->log2_max_pic_order_cnt_lsb_minus4 + 4;
+ h264->log2_max_frame_num = h->sps->log2_max_frame_num_minus4 + 4;
+ h264->pic_order_cnt_type = h->sps->pic_order_cnt_type;
+ h264->pic_width_in_mbs = h->sps->pic_width_in_mbs_minus1 + 1;
+ h264->pic_height_in_mbs = h->sps->pic_height_in_map_units_minus1 + 1;
+
+ h264->num_ref_idx_l0_active_minus1 = h->pps->num_ref_idx_l0_default_active_minus1;
+ h264->num_ref_idx_l1_active_minus1 = h->pps->num_ref_idx_l1_default_active_minus1;
+ h264->chroma_qp_index_offset = h->pps->chroma_qp_index_offset & 0x1f;
+ h264->pic_init_qp = h->pps->pic_init_qp_minus26 + 26;
+
+ err = tegra_vde_h264_setup_frames(ctx, h264);
+ if (err)
+ return err;
+
+ err = tegra_vde_validate_h264_ctx(dev, h264);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int tegra_vde_h264_decode_run(struct tegra_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct tegra_m2m_buffer *bitstream = vb_to_tegra_buf(&src->vb2_buf);
+ size_t bitstream_size = vb2_get_plane_payload(&src->vb2_buf, 0);
+ struct tegra_vde_h264_decoder_ctx h264;
+ struct tegra_vde *vde = ctx->vde;
+ int err;
+
+ err = tegra_vde_h264_setup_context(ctx, &h264);
+ if (err)
+ return err;
+
+ err = tegra_vde_decode_begin(vde, &h264, vde->frames,
+ bitstream->dma_addr[0],
+ bitstream_size);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int tegra_vde_h264_decode_wait(struct tegra_ctx *ctx)
+{
+ return tegra_vde_decode_end(ctx->vde);
+}
diff --git a/drivers/staging/media/tegra-vde/iommu.c b/drivers/media/platform/nvidia/tegra-vde/iommu.c
index adf8dc7ee25c..5521ed3e465f 100644
--- a/drivers/staging/media/tegra-vde/iommu.c
+++ b/drivers/media/platform/nvidia/tegra-vde/iommu.c
@@ -60,7 +60,7 @@ void tegra_vde_iommu_unmap(struct tegra_vde *vde, struct iova *iova)
int tegra_vde_iommu_init(struct tegra_vde *vde)
{
- struct device *dev = vde->miscdev.parent;
+ struct device *dev = vde->dev;
struct iova *iova;
unsigned long order;
unsigned long shift;
diff --git a/drivers/staging/media/tegra-vde/trace.h b/drivers/media/platform/nvidia/tegra-vde/trace.h
index e5714107db58..7853ab095ca4 100644
--- a/drivers/staging/media/tegra-vde/trace.h
+++ b/drivers/media/platform/nvidia/tegra-vde/trace.h
@@ -90,6 +90,6 @@ TRACE_EVENT(vde_ref_l1,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../drivers/staging/media/tegra-vde
+#define TRACE_INCLUDE_PATH ../../drivers/media/platform/nvidia/tegra-vde
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>
diff --git a/drivers/media/platform/nvidia/tegra-vde/v4l2.c b/drivers/media/platform/nvidia/tegra-vde/v4l2.c
new file mode 100644
index 000000000000..bd8c207d5b54
--- /dev/null
+++ b/drivers/media/platform/nvidia/tegra-vde/v4l2.c
@@ -0,0 +1,1018 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NVIDIA Tegra Video decoder driver
+ *
+ * Copyright (C) 2019-2022 Dmitry Osipenko <digetx@gmail.com>
+ *
+ * Based on Cedrus driver by Bootlin.
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ *
+ * Based on Rockchip driver by Collabora.
+ * Copyright (C) 2019 Boris Brezillon <boris.brezillon@collabora.com>
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include "vde.h"
+
+static const struct v4l2_ctrl_config ctrl_cfgs[] = {
+ { .id = V4L2_CID_STATELESS_H264_DECODE_PARAMS, },
+ { .id = V4L2_CID_STATELESS_H264_SPS, },
+ { .id = V4L2_CID_STATELESS_H264_PPS, },
+ {
+ .id = V4L2_CID_STATELESS_H264_DECODE_MODE,
+ .min = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ .max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ .def = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ },
+ {
+ .id = V4L2_CID_STATELESS_H264_START_CODE,
+ .min = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ .max = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ .def = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
+ .def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ .min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .max = V4L2_MPEG_VIDEO_H264_LEVEL_5_1,
+ },
+};
+
+static inline struct tegra_ctx *fh_to_tegra_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct tegra_ctx, fh);
+}
+
+static void tegra_set_control_data(struct tegra_ctx *ctx, void *data, u32 id)
+{
+ switch (id) {
+ case V4L2_CID_STATELESS_H264_DECODE_PARAMS:
+ ctx->h264.decode_params = data;
+ break;
+ case V4L2_CID_STATELESS_H264_SPS:
+ ctx->h264.sps = data;
+ break;
+ case V4L2_CID_STATELESS_H264_PPS:
+ ctx->h264.pps = data;
+ break;
+ }
+}
+
+void tegra_vde_prepare_control_data(struct tegra_ctx *ctx, u32 id)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ctrl_cfgs); i++) {
+ if (ctx->ctrls[i]->id == id) {
+ tegra_set_control_data(ctx, ctx->ctrls[i]->p_cur.p, id);
+ return;
+ }
+ }
+
+ tegra_set_control_data(ctx, NULL, id);
+}
+
+static int tegra_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbufs,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct tegra_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_format *f;
+ unsigned int i;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ f = &ctx->coded_fmt;
+ else
+ f = &ctx->decoded_fmt;
+
+ if (*num_planes) {
+ if (*num_planes != f->fmt.pix_mp.num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < f->fmt.pix_mp.num_planes; i++) {
+ if (sizes[i] < f->fmt.pix_mp.plane_fmt[i].sizeimage)
+ return -EINVAL;
+ }
+ } else {
+ *num_planes = f->fmt.pix_mp.num_planes;
+
+ for (i = 0; i < f->fmt.pix_mp.num_planes; i++)
+ sizes[i] = f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ }
+
+ return 0;
+}
+
+static int tegra_buf_out_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+static void __tegra_buf_cleanup(struct vb2_buffer *vb, unsigned int i)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct tegra_ctx *ctx = vb2_get_drv_priv(vq);
+ struct tegra_m2m_buffer *tb = vb_to_tegra_buf(vb);
+
+ while (i--) {
+ if (tb->a[i]) {
+ tegra_vde_dmabuf_cache_unmap(ctx->vde, tb->a[i], true);
+ tb->a[i] = NULL;
+ }
+
+ if (tb->iova[i]) {
+ tegra_vde_iommu_unmap(ctx->vde, tb->iova[i]);
+ tb->iova[i] = NULL;
+ }
+ }
+
+ if (tb->aux) {
+ tegra_vde_free_bo(tb->aux);
+ tb->aux = NULL;
+ }
+}
+
+static int tegra_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct tegra_ctx *ctx = vb2_get_drv_priv(vq);
+ struct tegra_m2m_buffer *tb = vb_to_tegra_buf(vb);
+ struct tegra_vde *vde = ctx->vde;
+ enum dma_data_direction dma_dir;
+ struct sg_table *sgt;
+ unsigned int i;
+ int err;
+
+ if (V4L2_TYPE_IS_CAPTURE(vq->type) && vb->num_planes > 1) {
+ /*
+ * Tegra decoder writes auxiliary data for I/P frames.
+ * This data is needed for decoding of B frames.
+ */
+ err = tegra_vde_alloc_bo(vde, &tb->aux, DMA_FROM_DEVICE,
+ vb2_plane_size(vb, 1));
+ if (err)
+ return err;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ dma_dir = DMA_TO_DEVICE;
+ else
+ dma_dir = DMA_FROM_DEVICE;
+
+ for (i = 0; i < vb->num_planes; i++) {
+ if (vq->memory == VB2_MEMORY_DMABUF) {
+ get_dma_buf(vb->planes[i].dbuf);
+
+ err = tegra_vde_dmabuf_cache_map(vde, vb->planes[i].dbuf,
+ dma_dir, &tb->a[i],
+ &tb->dma_base[i]);
+ if (err) {
+ dma_buf_put(vb->planes[i].dbuf);
+ goto cleanup;
+ }
+
+ continue;
+ }
+
+ if (vde->domain) {
+ sgt = vb2_dma_sg_plane_desc(vb, i);
+
+ err = tegra_vde_iommu_map(vde, sgt, &tb->iova[i],
+ vb2_plane_size(vb, i));
+ if (err)
+ goto cleanup;
+
+ tb->dma_base[i] = iova_dma_addr(&vde->iova, tb->iova[i]);
+ } else {
+ tb->dma_base[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+ }
+ }
+
+ return 0;
+
+cleanup:
+ __tegra_buf_cleanup(vb, i);
+
+ return err;
+}
+
+static void tegra_buf_cleanup(struct vb2_buffer *vb)
+{
+ __tegra_buf_cleanup(vb, vb->num_planes);
+}
+
+static int tegra_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct tegra_ctx *ctx = vb2_get_drv_priv(vq);
+ struct tegra_m2m_buffer *tb = vb_to_tegra_buf(vb);
+ size_t hw_align, hw_size, hw_payload, size, offset;
+ struct v4l2_pix_format_mplane *pixfmt;
+ unsigned int i;
+ void *vb_data;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ hw_align = BSEV_ALIGN;
+ pixfmt = &ctx->coded_fmt.fmt.pix_mp;
+ } else {
+ hw_align = FRAMEID_ALIGN;
+ pixfmt = &ctx->decoded_fmt.fmt.pix_mp;
+ }
+
+ for (i = 0; i < vb->num_planes; i++) {
+ offset = vb->planes[i].data_offset;
+
+ if (offset & (hw_align - 1))
+ return -EINVAL;
+
+ if (V4L2_TYPE_IS_CAPTURE(vq->type)) {
+ size = pixfmt->plane_fmt[i].sizeimage;
+ hw_payload = ALIGN(size, VDE_ATOM);
+ } else {
+ size = vb2_get_plane_payload(vb, i) - offset;
+ hw_payload = ALIGN(size + VDE_ATOM, SXE_BUFFER);
+ }
+
+ hw_size = offset + hw_payload;
+
+ if (vb2_plane_size(vb, i) < hw_size)
+ return -EINVAL;
+
+ vb2_set_plane_payload(vb, i, hw_payload);
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ vb_data = vb2_plane_vaddr(vb, i);
+
+ /*
+ * Hardware requires zero-padding of coded data.
+ * Otherwise it will fail to parse the trailing
+ * data and abort the decoding.
+ */
+ if (vb_data)
+ memset(vb_data + offset + size, 0,
+ hw_size - offset - size);
+ }
+
+ tb->dma_addr[i] = tb->dma_base[i] + offset;
+ }
+
+ switch (pixfmt->pixelformat) {
+ case V4L2_PIX_FMT_YVU420M:
+ swap(tb->dma_addr[1], tb->dma_addr[2]);
+ break;
+ }
+
+ return 0;
+}
+
+static void tegra_buf_queue(struct vb2_buffer *vb)
+{
+ struct tegra_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void tegra_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct tegra_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->hdl);
+}
+
+static int tegra_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ return 0;
+}
+
+static void tegra_stop_streaming(struct vb2_queue *vq)
+{
+ struct tegra_ctx *ctx = vb2_get_drv_priv(vq);
+
+ while (true) {
+ struct vb2_v4l2_buffer *vbuf;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (!vbuf)
+ break;
+
+ v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req, &ctx->hdl);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static const struct vb2_ops tegra_qops = {
+ .queue_setup = tegra_queue_setup,
+ .buf_init = tegra_buf_init,
+ .buf_cleanup = tegra_buf_cleanup,
+ .buf_prepare = tegra_buf_prepare,
+ .buf_queue = tegra_buf_queue,
+ .buf_out_validate = tegra_buf_out_validate,
+ .buf_request_complete = tegra_buf_request_complete,
+ .start_streaming = tegra_start_streaming,
+ .stop_streaming = tegra_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int tegra_queue_init(void *priv,
+ struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct tegra_ctx *ctx = priv;
+ struct tegra_vde *vde = ctx->vde;
+ const struct vb2_mem_ops *mem_ops;
+ unsigned long dma_attrs;
+ int err;
+
+ /*
+ * TODO: Switch to use of vb2_dma_contig_memops uniformly once we
+ * will add IOMMU_DOMAIN support for video decoder to tegra-smmu
+ * driver. For now we need to stick with SG ops in order to be able
+ * to get SGT table easily. This is suboptimal since SG mappings are
+ * wasting CPU cache and we don't need that caching.
+ */
+ if (vde->domain)
+ mem_ops = &vb2_dma_sg_memops;
+ else
+ mem_ops = &vb2_dma_contig_memops;
+
+ dma_attrs = DMA_ATTR_WRITE_COMBINE;
+
+ src_vq->buf_struct_size = sizeof(struct tegra_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ src_vq->supports_requests = true;
+ src_vq->requires_requests = true;
+ src_vq->lock = &vde->v4l2_lock;
+ src_vq->dma_attrs = dma_attrs;
+ src_vq->mem_ops = mem_ops;
+ src_vq->ops = &tegra_qops;
+ src_vq->drv_priv = ctx;
+ src_vq->dev = vde->dev;
+
+ err = vb2_queue_init(src_vq);
+ if (err) {
+ v4l2_err(&vde->v4l2_dev,
+ "failed to initialize src queue: %d\n", err);
+ return err;
+ }
+
+ /*
+ * We may need to zero the end of bitstream in kernel if userspace
+ * doesn't do that, hence kmap is needed for the coded data. It's not
+ * needed for framebuffers.
+ */
+ dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
+
+ dst_vq->buf_struct_size = sizeof(struct tegra_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ dst_vq->lock = &vde->v4l2_lock;
+ dst_vq->dma_attrs = dma_attrs;
+ dst_vq->mem_ops = mem_ops;
+ dst_vq->ops = &tegra_qops;
+ dst_vq->drv_priv = ctx;
+ dst_vq->dev = vde->dev;
+
+ err = vb2_queue_init(dst_vq);
+ if (err) {
+ v4l2_err(&vde->v4l2_dev,
+ "failed to initialize dst queue: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void tegra_reset_fmt(struct tegra_ctx *ctx, struct v4l2_format *f,
+ u32 fourcc)
+{
+ memset(f, 0, sizeof(*f));
+ f->fmt.pix_mp.pixelformat = fourcc;
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709;
+ f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT;
+}
+
+static void tegra_reset_coded_fmt(struct tegra_ctx *ctx)
+{
+ const struct tegra_vde_soc *soc = ctx->vde->soc;
+ struct v4l2_format *f = &ctx->coded_fmt;
+
+ ctx->coded_fmt_desc = &soc->coded_fmts[0];
+ tegra_reset_fmt(ctx, f, ctx->coded_fmt_desc->fourcc);
+
+ f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ f->fmt.pix_mp.width = ctx->coded_fmt_desc->frmsize.min_width;
+ f->fmt.pix_mp.height = ctx->coded_fmt_desc->frmsize.min_height;
+}
+
+static void tegra_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
+ u32 pixelformat, u32 width, u32 height)
+{
+ const struct v4l2_format_info *info = v4l2_format_info(pixelformat);
+ struct v4l2_plane_pix_format *plane;
+ unsigned int i;
+
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUV420M:
+ case V4L2_PIX_FMT_YVU420M:
+ pixfmt->width = width;
+ pixfmt->height = height;
+ pixfmt->pixelformat = pixelformat;
+ pixfmt->num_planes = info->mem_planes;
+
+ for (i = 0; i < pixfmt->num_planes; i++) {
+ unsigned int hdiv = (i == 0) ? 1 : 2;
+ unsigned int vdiv = (i == 0) ? 1 : 2;
+
+ /*
+ * VDE is connected to Graphics Memory using 128bit port,
+ * all memory accesses are made using 16B atoms.
+ *
+ * V4L requires Cb/Cr strides to be exactly half of the
+ * Y stride, hence we're aligning Y to 16B x 2.
+ */
+ plane = &pixfmt->plane_fmt[i];
+ plane->bytesperline = ALIGN(width, VDE_ATOM * 2) / hdiv;
+ plane->sizeimage = plane->bytesperline * height / vdiv;
+ }
+
+ break;
+ }
+}
+
+static void tegra_reset_decoded_fmt(struct tegra_ctx *ctx)
+{
+ struct v4l2_format *f = &ctx->decoded_fmt;
+
+ tegra_reset_fmt(ctx, f, ctx->coded_fmt_desc->decoded_fmts[0]);
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ tegra_fill_pixfmt_mp(&f->fmt.pix_mp,
+ ctx->coded_fmt_desc->decoded_fmts[0],
+ ctx->coded_fmt.fmt.pix_mp.width,
+ ctx->coded_fmt.fmt.pix_mp.height);
+}
+
+static void tegra_job_finish(struct tegra_ctx *ctx,
+ enum vb2_buffer_state result)
+{
+ v4l2_m2m_buf_done_and_job_finish(ctx->vde->m2m, ctx->fh.m2m_ctx,
+ result);
+}
+
+static void tegra_decode_complete(struct work_struct *work)
+{
+ struct tegra_ctx *ctx = container_of(work, struct tegra_ctx, work);
+ int err;
+
+ err = ctx->coded_fmt_desc->decode_wait(ctx);
+ if (err)
+ tegra_job_finish(ctx, VB2_BUF_STATE_ERROR);
+ else
+ tegra_job_finish(ctx, VB2_BUF_STATE_DONE);
+}
+
+static int tegra_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->bus_info, "platform:tegra-vde", sizeof(cap->bus_info));
+ strscpy(cap->driver, "tegra-vde", sizeof(cap->driver));
+ strscpy(cap->card, "tegra-vde", sizeof(cap->card));
+
+ return 0;
+}
+
+static int tegra_enum_decoded_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+
+ if (WARN_ON(!ctx->coded_fmt_desc))
+ return -EINVAL;
+
+ if (f->index >= ctx->coded_fmt_desc->num_decoded_fmts)
+ return -EINVAL;
+
+ f->pixelformat = ctx->coded_fmt_desc->decoded_fmts[f->index];
+
+ return 0;
+}
+
+static int tegra_g_decoded_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+
+ *f = ctx->decoded_fmt;
+ return 0;
+}
+
+static int tegra_try_decoded_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ const struct tegra_coded_fmt_desc *coded_desc;
+ unsigned int i;
+
+ /*
+ * The codec context should point to a coded format desc, if the format
+ * on the coded end has not been set yet, it should point to the
+ * default value.
+ */
+ coded_desc = ctx->coded_fmt_desc;
+ if (WARN_ON(!coded_desc))
+ return -EINVAL;
+
+ if (!coded_desc->num_decoded_fmts)
+ return -EINVAL;
+
+ for (i = 0; i < coded_desc->num_decoded_fmts; i++) {
+ if (coded_desc->decoded_fmts[i] == pix_mp->pixelformat)
+ break;
+ }
+
+ if (i == coded_desc->num_decoded_fmts)
+ pix_mp->pixelformat = coded_desc->decoded_fmts[0];
+
+ /* always apply the frmsize constraint of the coded end */
+ v4l2_apply_frmsize_constraints(&pix_mp->width,
+ &pix_mp->height,
+ &coded_desc->frmsize);
+
+ tegra_fill_pixfmt_mp(pix_mp, pix_mp->pixelformat,
+ pix_mp->width, pix_mp->height);
+ pix_mp->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int tegra_s_decoded_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct vb2_queue *vq;
+ int err;
+
+ /* change not allowed if queue is busy */
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ err = tegra_try_decoded_fmt(file, priv, f);
+ if (err)
+ return err;
+
+ ctx->decoded_fmt = *f;
+
+ return 0;
+}
+
+static int tegra_enum_coded_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ const struct tegra_vde_soc *soc = ctx->vde->soc;
+
+ if (f->index >= soc->num_coded_fmts)
+ return -EINVAL;
+
+ f->pixelformat = soc->coded_fmts[f->index].fourcc;
+
+ return 0;
+}
+
+static int tegra_g_coded_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+
+ *f = ctx->coded_fmt;
+ return 0;
+}
+
+static const struct tegra_coded_fmt_desc *
+tegra_find_coded_fmt_desc(struct tegra_ctx *ctx, u32 fourcc)
+{
+ const struct tegra_vde_soc *soc = ctx->vde->soc;
+ unsigned int i;
+
+ for (i = 0; i < soc->num_coded_fmts; i++) {
+ if (soc->coded_fmts[i].fourcc == fourcc)
+ return &soc->coded_fmts[i];
+ }
+
+ return NULL;
+}
+
+static int tegra_try_coded_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ const struct tegra_vde_soc *soc = ctx->vde->soc;
+ int size = pix_mp->plane_fmt[0].sizeimage;
+ const struct tegra_coded_fmt_desc *desc;
+
+ desc = tegra_find_coded_fmt_desc(ctx, pix_mp->pixelformat);
+ if (!desc) {
+ pix_mp->pixelformat = soc->coded_fmts[0].fourcc;
+ desc = &soc->coded_fmts[0];
+ }
+
+ v4l2_apply_frmsize_constraints(&pix_mp->width,
+ &pix_mp->height,
+ &desc->frmsize);
+
+ pix_mp->plane_fmt[0].sizeimage = max(ALIGN(size, SXE_BUFFER), SZ_2M);
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->num_planes = 1;
+
+ return 0;
+}
+
+static int tegra_s_coded_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ const struct tegra_coded_fmt_desc *desc;
+ struct vb2_queue *peer_vq, *vq;
+ struct v4l2_format *cap_fmt;
+ int err;
+
+ /*
+ * In order to support dynamic resolution change, the decoder admits
+ * a resolution change, as long as the pixelformat remains. Can't be
+ * done if streaming.
+ */
+ vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (vb2_is_streaming(vq) ||
+ (vb2_is_busy(vq) &&
+ f->fmt.pix_mp.pixelformat != ctx->coded_fmt.fmt.pix_mp.pixelformat))
+ return -EBUSY;
+
+ /*
+ * Since format change on the OUTPUT queue will reset the CAPTURE
+ * queue, we can't allow doing so when the CAPTURE queue has buffers
+ * allocated.
+ */
+ peer_vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (vb2_is_busy(peer_vq))
+ return -EBUSY;
+
+ err = tegra_try_coded_fmt(file, priv, f);
+ if (err)
+ return err;
+
+ desc = tegra_find_coded_fmt_desc(ctx, f->fmt.pix_mp.pixelformat);
+ if (!desc)
+ return -EINVAL;
+
+ ctx->coded_fmt_desc = desc;
+ ctx->coded_fmt = *f;
+
+ /*
+ * Current decoded format might have become invalid with newly
+ * selected codec, so reset it to default just to be safe and
+ * keep internal driver state sane. User is mandated to set
+ * the decoded format again after we return, so we don't need
+ * anything smarter.
+ *
+ * Note that this will propagates any size changes to the decoded format.
+ */
+ tegra_reset_decoded_fmt(ctx);
+
+ /* propagate colorspace information to capture */
+ cap_fmt = &ctx->decoded_fmt;
+ cap_fmt->fmt.pix_mp.xfer_func = f->fmt.pix_mp.xfer_func;
+ cap_fmt->fmt.pix_mp.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ cap_fmt->fmt.pix_mp.colorspace = f->fmt.pix_mp.colorspace;
+ cap_fmt->fmt.pix_mp.quantization = f->fmt.pix_mp.quantization;
+
+ return 0;
+}
+
+static int tegra_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ const struct tegra_coded_fmt_desc *fmt;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ fmt = tegra_find_coded_fmt_desc(ctx, fsize->pixel_format);
+ if (!fmt)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise = fmt->frmsize;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops tegra_v4l2_ioctl_ops = {
+ .vidioc_querycap = tegra_querycap,
+ .vidioc_enum_framesizes = tegra_enum_framesizes,
+
+ .vidioc_try_fmt_vid_out_mplane = tegra_try_coded_fmt,
+ .vidioc_g_fmt_vid_out_mplane = tegra_g_coded_fmt,
+ .vidioc_s_fmt_vid_out_mplane = tegra_s_coded_fmt,
+ .vidioc_enum_fmt_vid_out = tegra_enum_coded_fmt,
+
+ .vidioc_try_fmt_vid_cap_mplane = tegra_try_decoded_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = tegra_g_decoded_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = tegra_s_decoded_fmt,
+ .vidioc_enum_fmt_vid_cap = tegra_enum_decoded_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int tegra_init_ctrls(struct tegra_ctx *ctx)
+{
+ unsigned int i;
+ int err;
+
+ err = v4l2_ctrl_handler_init(&ctx->hdl, ARRAY_SIZE(ctrl_cfgs));
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(ctrl_cfgs); i++) {
+ ctx->ctrls[i] = v4l2_ctrl_new_custom(&ctx->hdl, &ctrl_cfgs[i],
+ NULL);
+ if (ctx->hdl.error) {
+ err = ctx->hdl.error;
+ goto free_ctrls;
+ }
+ }
+
+ err = v4l2_ctrl_handler_setup(&ctx->hdl);
+ if (err)
+ goto free_ctrls;
+
+ ctx->fh.ctrl_handler = &ctx->hdl;
+
+ return 0;
+
+free_ctrls:
+ v4l2_ctrl_handler_free(&ctx->hdl);
+
+ return err;
+}
+
+static int tegra_init_m2m(struct tegra_ctx *ctx)
+{
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(ctx->vde->m2m,
+ ctx, tegra_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx))
+ return PTR_ERR(ctx->fh.m2m_ctx);
+
+ return 0;
+}
+
+static int tegra_open(struct file *file)
+{
+ struct tegra_vde *vde = video_drvdata(file);
+ struct tegra_ctx *ctx;
+ int err;
+
+ ctx = kzalloc(offsetof(struct tegra_ctx, ctrls[ARRAY_SIZE(ctrl_cfgs)]),
+ GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->vde = vde;
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ INIT_WORK(&ctx->work, tegra_decode_complete);
+
+ err = tegra_init_ctrls(ctx);
+ if (err) {
+ v4l2_err(&vde->v4l2_dev, "failed to add controls: %d\n", err);
+ goto free_ctx;
+ }
+
+ err = tegra_init_m2m(ctx);
+ if (err) {
+ v4l2_err(&vde->v4l2_dev, "failed to initialize m2m: %d\n", err);
+ goto free_ctrls;
+ }
+
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ tegra_reset_coded_fmt(ctx);
+ tegra_try_coded_fmt(file, file->private_data, &ctx->coded_fmt);
+
+ tegra_reset_decoded_fmt(ctx);
+ tegra_try_decoded_fmt(file, file->private_data, &ctx->decoded_fmt);
+
+ return 0;
+
+free_ctrls:
+ v4l2_ctrl_handler_free(&ctx->hdl);
+free_ctx:
+ kfree(ctx);
+
+ return err;
+}
+
+static int tegra_release(struct file *file)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct tegra_ctx *ctx = fh_to_tegra_ctx(fh);
+ struct tegra_vde *vde = ctx->vde;
+
+ v4l2_fh_del(fh);
+ v4l2_m2m_ctx_release(fh->m2m_ctx);
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ v4l2_fh_exit(fh);
+ kfree(ctx);
+
+ tegra_vde_dmabuf_cache_unmap_sync(vde);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations tegra_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_open,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+ .release = tegra_release,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static void tegra_device_run(void *priv)
+{
+ struct tegra_ctx *ctx = priv;
+ struct vb2_v4l2_buffer *src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct media_request *src_req = src->vb2_buf.req_obj.req;
+ int err;
+
+ v4l2_ctrl_request_setup(src_req, &ctx->hdl);
+
+ err = ctx->coded_fmt_desc->decode_run(ctx);
+
+ v4l2_ctrl_request_complete(src_req, &ctx->hdl);
+
+ if (err)
+ tegra_job_finish(ctx, VB2_BUF_STATE_ERROR);
+ else
+ queue_work(ctx->vde->wq, &ctx->work);
+}
+
+static const struct v4l2_m2m_ops tegra_v4l2_m2m_ops = {
+ .device_run = tegra_device_run,
+};
+
+static int tegra_request_validate(struct media_request *req)
+{
+ unsigned int count;
+
+ count = vb2_request_buffer_cnt(req);
+ if (!count)
+ return -ENOENT;
+ else if (count > 1)
+ return -EINVAL;
+
+ return vb2_request_validate(req);
+}
+
+static const struct media_device_ops tegra_media_device_ops = {
+ .req_validate = tegra_request_validate,
+ .req_queue = v4l2_m2m_request_queue,
+};
+
+int tegra_vde_v4l2_init(struct tegra_vde *vde)
+{
+ struct device *dev = vde->dev;
+ int err;
+
+ mutex_init(&vde->v4l2_lock);
+ media_device_init(&vde->mdev);
+ video_set_drvdata(&vde->vdev, vde);
+
+ vde->vdev.lock = &vde->v4l2_lock,
+ vde->vdev.fops = &tegra_v4l2_fops,
+ vde->vdev.vfl_dir = VFL_DIR_M2M,
+ vde->vdev.release = video_device_release_empty,
+ vde->vdev.v4l2_dev = &vde->v4l2_dev;
+ vde->vdev.ioctl_ops = &tegra_v4l2_ioctl_ops,
+ vde->vdev.device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
+
+ vde->v4l2_dev.mdev = &vde->mdev;
+ vde->mdev.ops = &tegra_media_device_ops;
+ vde->mdev.dev = dev;
+
+ strscpy(vde->mdev.model, "tegra-vde", sizeof(vde->mdev.model));
+ strscpy(vde->vdev.name, "tegra-vde", sizeof(vde->vdev.name));
+ strscpy(vde->mdev.bus_info, "platform:tegra-vde",
+ sizeof(vde->mdev.bus_info));
+
+ vde->wq = create_workqueue("tegra-vde");
+ if (!vde->wq)
+ return -ENOMEM;
+
+ err = media_device_register(&vde->mdev);
+ if (err) {
+ dev_err(dev, "failed to register media device: %d\n", err);
+ goto clean_up_media_device;
+ }
+
+ err = v4l2_device_register(dev, &vde->v4l2_dev);
+ if (err) {
+ dev_err(dev, "failed to register v4l2 device: %d\n", err);
+ goto unreg_media_device;
+ }
+
+ err = video_register_device(&vde->vdev, VFL_TYPE_VIDEO, -1);
+ if (err) {
+ dev_err(dev, "failed to register video device: %d\n", err);
+ goto unreg_v4l2;
+ }
+
+ vde->m2m = v4l2_m2m_init(&tegra_v4l2_m2m_ops);
+ err = PTR_ERR_OR_ZERO(vde->m2m);
+ if (err) {
+ dev_err(dev, "failed to initialize m2m device: %d\n", err);
+ goto unreg_video_device;
+ }
+
+ err = v4l2_m2m_register_media_controller(vde->m2m, &vde->vdev,
+ MEDIA_ENT_F_PROC_VIDEO_DECODER);
+ if (err) {
+ dev_err(dev, "failed to register media controller: %d\n", err);
+ goto release_m2m;
+ }
+
+ v4l2_info(&vde->v4l2_dev, "v4l2 device registered as /dev/video%d\n",
+ vde->vdev.num);
+
+ return 0;
+
+release_m2m:
+ v4l2_m2m_release(vde->m2m);
+unreg_video_device:
+ video_unregister_device(&vde->vdev);
+unreg_v4l2:
+ v4l2_device_unregister(&vde->v4l2_dev);
+unreg_media_device:
+ media_device_unregister(&vde->mdev);
+clean_up_media_device:
+ media_device_cleanup(&vde->mdev);
+
+ destroy_workqueue(vde->wq);
+
+ return err;
+}
+
+void tegra_vde_v4l2_deinit(struct tegra_vde *vde)
+{
+ v4l2_m2m_unregister_media_controller(vde->m2m);
+ v4l2_m2m_release(vde->m2m);
+
+ video_unregister_device(&vde->vdev);
+ v4l2_device_unregister(&vde->v4l2_dev);
+
+ media_device_unregister(&vde->mdev);
+ media_device_cleanup(&vde->mdev);
+
+ destroy_workqueue(vde->wq);
+}
diff --git a/drivers/media/platform/nvidia/tegra-vde/vde.c b/drivers/media/platform/nvidia/tegra-vde/vde.c
new file mode 100644
index 000000000000..f3e863a94c5a
--- /dev/null
+++ b/drivers/media/platform/nvidia/tegra-vde/vde.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NVIDIA Tegra Video decoder driver
+ *
+ * Copyright (C) 2016-2017 Dmitry Osipenko <digetx@gmail.com>
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-buf.h>
+#include <linux/genalloc.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/pmc.h>
+
+#include "vde.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+void tegra_vde_writel(struct tegra_vde *vde, u32 value,
+ void __iomem *base, u32 offset)
+{
+ trace_vde_writel(vde, base, offset, value);
+
+ writel_relaxed(value, base + offset);
+}
+
+u32 tegra_vde_readl(struct tegra_vde *vde, void __iomem *base, u32 offset)
+{
+ u32 value = readl_relaxed(base + offset);
+
+ trace_vde_readl(vde, base, offset, value);
+
+ return value;
+}
+
+void tegra_vde_set_bits(struct tegra_vde *vde, u32 mask,
+ void __iomem *base, u32 offset)
+{
+ u32 value = tegra_vde_readl(vde, base, offset);
+
+ tegra_vde_writel(vde, value | mask, base, offset);
+}
+
+int tegra_vde_alloc_bo(struct tegra_vde *vde,
+ struct tegra_vde_bo **ret_bo,
+ enum dma_data_direction dma_dir,
+ size_t size)
+{
+ struct device *dev = vde->dev;
+ struct tegra_vde_bo *bo;
+ int err;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return -ENOMEM;
+
+ bo->vde = vde;
+ bo->size = size;
+ bo->dma_dir = dma_dir;
+ bo->dma_attrs = DMA_ATTR_WRITE_COMBINE |
+ DMA_ATTR_NO_KERNEL_MAPPING;
+
+ if (!vde->domain)
+ bo->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
+
+ bo->dma_cookie = dma_alloc_attrs(dev, bo->size, &bo->dma_handle,
+ GFP_KERNEL, bo->dma_attrs);
+ if (!bo->dma_cookie) {
+ dev_err(dev, "Failed to allocate DMA buffer of size: %zu\n",
+ bo->size);
+ err = -ENOMEM;
+ goto free_bo;
+ }
+
+ err = dma_get_sgtable_attrs(dev, &bo->sgt, bo->dma_cookie,
+ bo->dma_handle, bo->size, bo->dma_attrs);
+ if (err) {
+ dev_err(dev, "Failed to get DMA buffer SG table: %d\n", err);
+ goto free_attrs;
+ }
+
+ err = dma_map_sgtable(dev, &bo->sgt, bo->dma_dir, bo->dma_attrs);
+ if (err) {
+ dev_err(dev, "Failed to map DMA buffer SG table: %d\n", err);
+ goto free_table;
+ }
+
+ if (vde->domain) {
+ err = tegra_vde_iommu_map(vde, &bo->sgt, &bo->iova, bo->size);
+ if (err) {
+ dev_err(dev, "Failed to map DMA buffer IOVA: %d\n", err);
+ goto unmap_sgtable;
+ }
+
+ bo->dma_addr = iova_dma_addr(&vde->iova, bo->iova);
+ } else {
+ bo->dma_addr = sg_dma_address(bo->sgt.sgl);
+ }
+
+ *ret_bo = bo;
+
+ return 0;
+
+unmap_sgtable:
+ dma_unmap_sgtable(dev, &bo->sgt, bo->dma_dir, bo->dma_attrs);
+free_table:
+ sg_free_table(&bo->sgt);
+free_attrs:
+ dma_free_attrs(dev, bo->size, bo->dma_cookie, bo->dma_handle,
+ bo->dma_attrs);
+free_bo:
+ kfree(bo);
+
+ return err;
+}
+
+void tegra_vde_free_bo(struct tegra_vde_bo *bo)
+{
+ struct tegra_vde *vde = bo->vde;
+ struct device *dev = vde->dev;
+
+ if (vde->domain)
+ tegra_vde_iommu_unmap(vde, bo->iova);
+
+ dma_unmap_sgtable(dev, &bo->sgt, bo->dma_dir, bo->dma_attrs);
+
+ sg_free_table(&bo->sgt);
+
+ dma_free_attrs(dev, bo->size, bo->dma_cookie, bo->dma_handle,
+ bo->dma_attrs);
+ kfree(bo);
+}
+
+static irqreturn_t tegra_vde_isr(int irq, void *data)
+{
+ struct tegra_vde *vde = data;
+
+ if (completion_done(&vde->decode_completion))
+ return IRQ_NONE;
+
+ tegra_vde_set_bits(vde, 0, vde->frameid, 0x208);
+ complete(&vde->decode_completion);
+
+ return IRQ_HANDLED;
+}
+
+static __maybe_unused int tegra_vde_runtime_suspend(struct device *dev)
+{
+ struct tegra_vde *vde = dev_get_drvdata(dev);
+ int err;
+
+ if (!dev->pm_domain) {
+ err = tegra_powergate_power_off(TEGRA_POWERGATE_VDEC);
+ if (err) {
+ dev_err(dev, "Failed to power down HW: %d\n", err);
+ return err;
+ }
+ }
+
+ clk_disable_unprepare(vde->clk);
+ reset_control_release(vde->rst);
+ reset_control_release(vde->rst_mc);
+
+ return 0;
+}
+
+static __maybe_unused int tegra_vde_runtime_resume(struct device *dev)
+{
+ struct tegra_vde *vde = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_acquire(vde->rst_mc);
+ if (err) {
+ dev_err(dev, "Failed to acquire mc reset: %d\n", err);
+ return err;
+ }
+
+ err = reset_control_acquire(vde->rst);
+ if (err) {
+ dev_err(dev, "Failed to acquire reset: %d\n", err);
+ goto release_mc_reset;
+ }
+
+ if (!dev->pm_domain) {
+ err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_VDEC,
+ vde->clk, vde->rst);
+ if (err) {
+ dev_err(dev, "Failed to power up HW : %d\n", err);
+ goto release_reset;
+ }
+ } else {
+ /*
+ * tegra_powergate_sequence_power_up() leaves clocks enabled,
+ * while GENPD not.
+ */
+ err = clk_prepare_enable(vde->clk);
+ if (err) {
+ dev_err(dev, "Failed to enable clock: %d\n", err);
+ goto release_reset;
+ }
+ }
+
+ return 0;
+
+release_reset:
+ reset_control_release(vde->rst);
+release_mc_reset:
+ reset_control_release(vde->rst_mc);
+
+ return err;
+}
+
+static int tegra_vde_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_vde *vde;
+ int irq, err;
+
+ vde = devm_kzalloc(dev, sizeof(*vde), GFP_KERNEL);
+ if (!vde)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, vde);
+
+ vde->soc = of_device_get_match_data(&pdev->dev);
+ vde->dev = dev;
+
+ vde->sxe = devm_platform_ioremap_resource_byname(pdev, "sxe");
+ if (IS_ERR(vde->sxe))
+ return PTR_ERR(vde->sxe);
+
+ vde->bsev = devm_platform_ioremap_resource_byname(pdev, "bsev");
+ if (IS_ERR(vde->bsev))
+ return PTR_ERR(vde->bsev);
+
+ vde->mbe = devm_platform_ioremap_resource_byname(pdev, "mbe");
+ if (IS_ERR(vde->mbe))
+ return PTR_ERR(vde->mbe);
+
+ vde->ppe = devm_platform_ioremap_resource_byname(pdev, "ppe");
+ if (IS_ERR(vde->ppe))
+ return PTR_ERR(vde->ppe);
+
+ vde->mce = devm_platform_ioremap_resource_byname(pdev, "mce");
+ if (IS_ERR(vde->mce))
+ return PTR_ERR(vde->mce);
+
+ vde->tfe = devm_platform_ioremap_resource_byname(pdev, "tfe");
+ if (IS_ERR(vde->tfe))
+ return PTR_ERR(vde->tfe);
+
+ vde->ppb = devm_platform_ioremap_resource_byname(pdev, "ppb");
+ if (IS_ERR(vde->ppb))
+ return PTR_ERR(vde->ppb);
+
+ vde->vdma = devm_platform_ioremap_resource_byname(pdev, "vdma");
+ if (IS_ERR(vde->vdma))
+ return PTR_ERR(vde->vdma);
+
+ vde->frameid = devm_platform_ioremap_resource_byname(pdev, "frameid");
+ if (IS_ERR(vde->frameid))
+ return PTR_ERR(vde->frameid);
+
+ vde->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(vde->clk)) {
+ err = PTR_ERR(vde->clk);
+ dev_err(dev, "Could not get VDE clk %d\n", err);
+ return err;
+ }
+
+ vde->rst = devm_reset_control_get_exclusive_released(dev, NULL);
+ if (IS_ERR(vde->rst)) {
+ err = PTR_ERR(vde->rst);
+ dev_err(dev, "Could not get VDE reset %d\n", err);
+ return err;
+ }
+
+ vde->rst_mc = devm_reset_control_get_optional_exclusive_released(dev, "mc");
+ if (IS_ERR(vde->rst_mc)) {
+ err = PTR_ERR(vde->rst_mc);
+ dev_err(dev, "Could not get MC reset %d\n", err);
+ return err;
+ }
+
+ irq = platform_get_irq_byname(pdev, "sync-token");
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, tegra_vde_isr, 0,
+ dev_name(dev), vde);
+ if (err) {
+ dev_err(dev, "Could not request IRQ %d\n", err);
+ return err;
+ }
+
+ err = devm_tegra_core_dev_init_opp_table_common(dev);
+ if (err) {
+ dev_err(dev, "Could initialize OPP table %d\n", err);
+ return err;
+ }
+
+ vde->iram_pool = of_gen_pool_get(dev->of_node, "iram", 0);
+ if (!vde->iram_pool) {
+ dev_err(dev, "Could not get IRAM pool\n");
+ return -EPROBE_DEFER;
+ }
+
+ vde->iram = gen_pool_dma_alloc(vde->iram_pool,
+ gen_pool_size(vde->iram_pool),
+ &vde->iram_lists_addr);
+ if (!vde->iram) {
+ dev_err(dev, "Could not reserve IRAM\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&vde->map_list);
+ mutex_init(&vde->map_lock);
+ mutex_init(&vde->lock);
+ init_completion(&vde->decode_completion);
+
+ err = tegra_vde_iommu_init(vde);
+ if (err) {
+ dev_err(dev, "Failed to initialize IOMMU: %d\n", err);
+ goto err_gen_free;
+ }
+
+ pm_runtime_enable(dev);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 300);
+
+ /*
+ * VDE partition may be left ON after bootloader, hence let's
+ * power-cycle it in order to put hardware into a predictable lower
+ * power state.
+ */
+ err = pm_runtime_resume_and_get(dev);
+ if (err)
+ goto err_pm_runtime;
+
+ pm_runtime_put(dev);
+
+ err = tegra_vde_alloc_bo(vde, &vde->secure_bo, DMA_FROM_DEVICE, 4096);
+ if (err) {
+ dev_err(dev, "Failed to allocate secure BO: %d\n", err);
+ goto err_pm_runtime;
+ }
+
+ err = tegra_vde_v4l2_init(vde);
+ if (err) {
+ dev_err(dev, "Failed to initialize V4L2: %d\n", err);
+ goto err_free_secure_bo;
+ }
+
+ return 0;
+
+err_free_secure_bo:
+ tegra_vde_free_bo(vde->secure_bo);
+err_pm_runtime:
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_disable(dev);
+
+ tegra_vde_iommu_deinit(vde);
+
+err_gen_free:
+ gen_pool_free(vde->iram_pool, (unsigned long)vde->iram,
+ gen_pool_size(vde->iram_pool));
+
+ return err;
+}
+
+static int tegra_vde_remove(struct platform_device *pdev)
+{
+ struct tegra_vde *vde = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ tegra_vde_v4l2_deinit(vde);
+ tegra_vde_free_bo(vde->secure_bo);
+
+ /*
+ * As it increments RPM usage_count even on errors, we don't need to
+ * check the returned code here.
+ */
+ pm_runtime_get_sync(dev);
+
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_disable(dev);
+
+ /*
+ * Balance RPM state, the VDE power domain is left ON and hardware
+ * is clock-gated. It's safe to reboot machine now.
+ */
+ pm_runtime_put_noidle(dev);
+ clk_disable_unprepare(vde->clk);
+
+ tegra_vde_dmabuf_cache_unmap_all(vde);
+ tegra_vde_iommu_deinit(vde);
+
+ gen_pool_free(vde->iram_pool, (unsigned long)vde->iram,
+ gen_pool_size(vde->iram_pool));
+
+ return 0;
+}
+
+static void tegra_vde_shutdown(struct platform_device *pdev)
+{
+ /*
+ * On some devices bootloader isn't ready to a power-gated VDE on
+ * a warm-reboot, machine will hang in that case.
+ */
+ pm_runtime_get_sync(&pdev->dev);
+}
+
+static __maybe_unused int tegra_vde_pm_suspend(struct device *dev)
+{
+ struct tegra_vde *vde = dev_get_drvdata(dev);
+ int err;
+
+ mutex_lock(&vde->lock);
+
+ err = pm_runtime_force_suspend(dev);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static __maybe_unused int tegra_vde_pm_resume(struct device *dev)
+{
+ struct tegra_vde *vde = dev_get_drvdata(dev);
+ int err;
+
+ err = pm_runtime_force_resume(dev);
+ if (err < 0)
+ return err;
+
+ mutex_unlock(&vde->lock);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_vde_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_vde_runtime_suspend,
+ tegra_vde_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_vde_pm_suspend,
+ tegra_vde_pm_resume)
+};
+
+static const u32 tegra124_decoded_fmts[] = {
+ /* TBD: T124 supports only a non-standard Tegra tiled format */
+};
+
+static const struct tegra_coded_fmt_desc tegra124_coded_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .frmsize = {
+ .min_width = 16,
+ .max_width = 1920,
+ .step_width = 16,
+ .min_height = 16,
+ .max_height = 2032,
+ .step_height = 16,
+ },
+ .num_decoded_fmts = ARRAY_SIZE(tegra124_decoded_fmts),
+ .decoded_fmts = tegra124_decoded_fmts,
+ .decode_run = tegra_vde_h264_decode_run,
+ .decode_wait = tegra_vde_h264_decode_wait,
+ },
+};
+
+static const u32 tegra20_decoded_fmts[] = {
+ V4L2_PIX_FMT_YUV420M,
+ V4L2_PIX_FMT_YVU420M,
+};
+
+static const struct tegra_coded_fmt_desc tegra20_coded_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .frmsize = {
+ .min_width = 16,
+ .max_width = 1920,
+ .step_width = 16,
+ .min_height = 16,
+ .max_height = 2032,
+ .step_height = 16,
+ },
+ .num_decoded_fmts = ARRAY_SIZE(tegra20_decoded_fmts),
+ .decoded_fmts = tegra20_decoded_fmts,
+ .decode_run = tegra_vde_h264_decode_run,
+ .decode_wait = tegra_vde_h264_decode_wait,
+ },
+};
+
+static const struct tegra_vde_soc tegra124_vde_soc = {
+ .supports_ref_pic_marking = true,
+ .coded_fmts = tegra124_coded_fmts,
+ .num_coded_fmts = ARRAY_SIZE(tegra124_coded_fmts),
+};
+
+static const struct tegra_vde_soc tegra114_vde_soc = {
+ .supports_ref_pic_marking = true,
+ .coded_fmts = tegra20_coded_fmts,
+ .num_coded_fmts = ARRAY_SIZE(tegra20_coded_fmts),
+};
+
+static const struct tegra_vde_soc tegra30_vde_soc = {
+ .supports_ref_pic_marking = false,
+ .coded_fmts = tegra20_coded_fmts,
+ .num_coded_fmts = ARRAY_SIZE(tegra20_coded_fmts),
+};
+
+static const struct tegra_vde_soc tegra20_vde_soc = {
+ .supports_ref_pic_marking = false,
+ .coded_fmts = tegra20_coded_fmts,
+ .num_coded_fmts = ARRAY_SIZE(tegra20_coded_fmts),
+};
+
+static const struct of_device_id tegra_vde_of_match[] = {
+ { .compatible = "nvidia,tegra124-vde", .data = &tegra124_vde_soc },
+ { .compatible = "nvidia,tegra114-vde", .data = &tegra114_vde_soc },
+ { .compatible = "nvidia,tegra30-vde", .data = &tegra30_vde_soc },
+ { .compatible = "nvidia,tegra20-vde", .data = &tegra20_vde_soc },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_vde_of_match);
+
+static struct platform_driver tegra_vde_driver = {
+ .probe = tegra_vde_probe,
+ .remove = tegra_vde_remove,
+ .shutdown = tegra_vde_shutdown,
+ .driver = {
+ .name = "tegra-vde",
+ .of_match_table = tegra_vde_of_match,
+ .pm = &tegra_vde_pm_ops,
+ },
+};
+module_platform_driver(tegra_vde_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra Video Decoder driver");
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/nvidia/tegra-vde/vde.h b/drivers/media/platform/nvidia/tegra-vde/vde.h
new file mode 100644
index 000000000000..0fbb1f3d2c88
--- /dev/null
+++ b/drivers/media/platform/nvidia/tegra-vde/vde.h
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * NVIDIA Tegra Video decoder driver
+ *
+ * Copyright (C) 2016-2019 GRATE-DRIVER project
+ */
+
+#ifndef TEGRA_VDE_H
+#define TEGRA_VDE_H
+
+#include <linux/completion.h>
+#include <linux/dma-direction.h>
+#include <linux/iova.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <media/media-device.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+
+#define ICMDQUE_WR 0x00
+#define CMDQUE_CONTROL 0x08
+#define INTR_STATUS 0x18
+#define BSE_INT_ENB 0x40
+#define BSE_CONFIG 0x44
+
+#define BSE_ICMDQUE_EMPTY BIT(3)
+#define BSE_DMA_BUSY BIT(23)
+
+#define BSEV_ALIGN SZ_1
+#define FRAMEID_ALIGN SZ_256
+#define SXE_BUFFER SZ_32K
+#define VDE_ATOM SZ_16
+
+struct clk;
+struct dma_buf;
+struct gen_pool;
+struct tegra_ctx;
+struct iommu_group;
+struct iommu_domain;
+struct reset_control;
+struct dma_buf_attachment;
+struct tegra_vde_h264_frame;
+struct tegra_vde_h264_decoder_ctx;
+
+struct tegra_video_frame {
+ struct dma_buf_attachment *y_dmabuf_attachment;
+ struct dma_buf_attachment *cb_dmabuf_attachment;
+ struct dma_buf_attachment *cr_dmabuf_attachment;
+ struct dma_buf_attachment *aux_dmabuf_attachment;
+ dma_addr_t y_addr;
+ dma_addr_t cb_addr;
+ dma_addr_t cr_addr;
+ dma_addr_t aux_addr;
+ u32 frame_num;
+ u32 flags;
+ u32 luma_atoms_pitch;
+ u32 chroma_atoms_pitch;
+};
+
+struct tegra_coded_fmt_desc {
+ u32 fourcc;
+ struct v4l2_frmsize_stepwise frmsize;
+ unsigned int num_decoded_fmts;
+ const u32 *decoded_fmts;
+ int (*decode_run)(struct tegra_ctx *ctx);
+ int (*decode_wait)(struct tegra_ctx *ctx);
+};
+
+struct tegra_vde_soc {
+ bool supports_ref_pic_marking;
+ const struct tegra_coded_fmt_desc *coded_fmts;
+ u32 num_coded_fmts;
+};
+
+struct tegra_vde_bo {
+ struct iova *iova;
+ struct sg_table sgt;
+ struct tegra_vde *vde;
+ enum dma_data_direction dma_dir;
+ unsigned long dma_attrs;
+ dma_addr_t dma_handle;
+ dma_addr_t dma_addr;
+ void *dma_cookie;
+ size_t size;
+};
+
+struct tegra_vde {
+ void __iomem *sxe;
+ void __iomem *bsev;
+ void __iomem *mbe;
+ void __iomem *ppe;
+ void __iomem *mce;
+ void __iomem *tfe;
+ void __iomem *ppb;
+ void __iomem *vdma;
+ void __iomem *frameid;
+ struct device *dev;
+ struct mutex lock;
+ struct mutex map_lock;
+ struct list_head map_list;
+ struct reset_control *rst;
+ struct reset_control *rst_mc;
+ struct gen_pool *iram_pool;
+ struct completion decode_completion;
+ struct clk *clk;
+ struct iommu_domain *domain;
+ struct iommu_group *group;
+ struct iova_domain iova;
+ struct iova *iova_resv_static_addresses;
+ struct iova *iova_resv_last_page;
+ const struct tegra_vde_soc *soc;
+ struct tegra_vde_bo *secure_bo;
+ dma_addr_t bitstream_data_addr;
+ dma_addr_t iram_lists_addr;
+ u32 *iram;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m;
+ struct media_device mdev;
+ struct video_device vdev;
+ struct mutex v4l2_lock;
+ struct workqueue_struct *wq;
+ struct tegra_video_frame frames[V4L2_H264_NUM_DPB_ENTRIES + 1];
+};
+
+int tegra_vde_alloc_bo(struct tegra_vde *vde,
+ struct tegra_vde_bo **ret_bo,
+ enum dma_data_direction dma_dir,
+ size_t size);
+void tegra_vde_free_bo(struct tegra_vde_bo *bo);
+
+struct tegra_ctx_h264 {
+ const struct v4l2_ctrl_h264_decode_params *decode_params;
+ const struct v4l2_ctrl_h264_sps *sps;
+ const struct v4l2_ctrl_h264_pps *pps;
+};
+
+struct tegra_ctx {
+ struct tegra_vde *vde;
+ struct tegra_ctx_h264 h264;
+ struct work_struct work;
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_format coded_fmt;
+ struct v4l2_format decoded_fmt;
+ const struct tegra_coded_fmt_desc *coded_fmt_desc;
+ struct v4l2_ctrl *ctrls[];
+};
+
+struct tegra_m2m_buffer {
+ struct v4l2_m2m_buffer m2m;
+ struct dma_buf_attachment *a[VB2_MAX_PLANES];
+ dma_addr_t dma_base[VB2_MAX_PLANES];
+ dma_addr_t dma_addr[VB2_MAX_PLANES];
+ struct iova *iova[VB2_MAX_PLANES];
+ struct tegra_vde_bo *aux;
+ bool b_frame;
+};
+
+static inline struct tegra_m2m_buffer *
+vb_to_tegra_buf(struct vb2_buffer *vb)
+{
+ struct v4l2_m2m_buffer *m2m = container_of(vb, struct v4l2_m2m_buffer,
+ vb.vb2_buf);
+
+ return container_of(m2m, struct tegra_m2m_buffer, m2m);
+}
+
+void tegra_vde_prepare_control_data(struct tegra_ctx *ctx, u32 id);
+
+void tegra_vde_writel(struct tegra_vde *vde, u32 value, void __iomem *base,
+ u32 offset);
+u32 tegra_vde_readl(struct tegra_vde *vde, void __iomem *base, u32 offset);
+void tegra_vde_set_bits(struct tegra_vde *vde, u32 mask, void __iomem *base,
+ u32 offset);
+
+int tegra_vde_h264_decode_run(struct tegra_ctx *ctx);
+int tegra_vde_h264_decode_wait(struct tegra_ctx *ctx);
+
+int tegra_vde_iommu_init(struct tegra_vde *vde);
+void tegra_vde_iommu_deinit(struct tegra_vde *vde);
+int tegra_vde_iommu_map(struct tegra_vde *vde,
+ struct sg_table *sgt,
+ struct iova **iovap,
+ size_t size);
+void tegra_vde_iommu_unmap(struct tegra_vde *vde, struct iova *iova);
+
+int tegra_vde_dmabuf_cache_map(struct tegra_vde *vde,
+ struct dma_buf *dmabuf,
+ enum dma_data_direction dma_dir,
+ struct dma_buf_attachment **ap,
+ dma_addr_t *addrp);
+void tegra_vde_dmabuf_cache_unmap(struct tegra_vde *vde,
+ struct dma_buf_attachment *a,
+ bool release);
+void tegra_vde_dmabuf_cache_unmap_sync(struct tegra_vde *vde);
+void tegra_vde_dmabuf_cache_unmap_all(struct tegra_vde *vde);
+
+static __maybe_unused char const *
+tegra_vde_reg_base_name(struct tegra_vde *vde, void __iomem *base)
+{
+ if (vde->sxe == base)
+ return "SXE";
+
+ if (vde->bsev == base)
+ return "BSEV";
+
+ if (vde->mbe == base)
+ return "MBE";
+
+ if (vde->ppe == base)
+ return "PPE";
+
+ if (vde->mce == base)
+ return "MCE";
+
+ if (vde->tfe == base)
+ return "TFE";
+
+ if (vde->ppb == base)
+ return "PPB";
+
+ if (vde->vdma == base)
+ return "VDMA";
+
+ if (vde->frameid == base)
+ return "FRAMEID";
+
+ return "???";
+}
+
+int tegra_vde_v4l2_init(struct tegra_vde *vde);
+void tegra_vde_v4l2_deinit(struct tegra_vde *vde);
+
+#endif /* TEGRA_VDE_H */
diff --git a/drivers/media/platform/nxp/Kconfig b/drivers/media/platform/nxp/Kconfig
new file mode 100644
index 000000000000..28f2bafc14d2
--- /dev/null
+++ b/drivers/media/platform/nxp/Kconfig
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+# V4L drivers
+
+comment "NXP media platform drivers"
+
+config VIDEO_IMX_MIPI_CSIS
+ tristate "NXP MIPI CSI-2 CSIS receiver found on i.MX7 and i.MX8 models"
+ depends on ARCH_MXC || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ default n
+ help
+ Video4Linux2 sub-device driver for the MIPI CSI-2 CSIS receiver
+ v3.3/v3.6.3 found on some i.MX7 and i.MX8 SoCs.
+
+config VIDEO_VIU
+ tristate "NXP VIU Video Driver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && (PPC_MPC512x || COMPILE_TEST) && I2C
+ select VIDEOBUF_DMA_CONTIG
+ default y
+ help
+ Support for Freescale VIU video driver. This device captures
+ video data, or overlays video on DIU frame buffer.
+
+ Say Y here if you want to enable VIU device on MPC5121e Rev2+.
+ In doubt, say N.
+
+# mem2mem drivers
+
+config VIDEO_IMX_PXP
+ tristate "NXP i.MX Pixel Pipeline (PXP)"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV && (ARCH_MXC || COMPILE_TEST)
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ The i.MX Pixel Pipeline is a memory-to-memory engine for scaling,
+ color space conversion, and rotation.
+
+config VIDEO_MX2_EMMAPRP
+ tristate "NXP MX2 eMMa-PrP support"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on SOC_IMX27 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ MX2X chips have a PrP that can be used to process buffers from
+ memory to memory. Operations include resizing and format
+ conversion.
+
+source "drivers/media/platform/nxp/imx-jpeg/Kconfig"
diff --git a/drivers/media/platform/nxp/Makefile b/drivers/media/platform/nxp/Makefile
new file mode 100644
index 000000000000..efc38c6578ce
--- /dev/null
+++ b/drivers/media/platform/nxp/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-y += imx-jpeg/
+
+obj-$(CONFIG_VIDEO_IMX_MIPI_CSIS) += imx-mipi-csis.o
+obj-$(CONFIG_VIDEO_IMX_PXP) += imx-pxp.o
+obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
+obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/nxp/fsl-viu.c
index a4bfa70b49b2..afc96f6db2a1 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/nxp/fsl-viu.c
@@ -1407,7 +1407,7 @@ static int viu_of_probe(struct platform_device *op)
}
/* Prepare our private structure */
- viu_dev = devm_kzalloc(&op->dev, sizeof(struct viu_dev), GFP_ATOMIC);
+ viu_dev = devm_kzalloc(&op->dev, sizeof(struct viu_dev), GFP_KERNEL);
if (!viu_dev) {
dev_err(&op->dev, "Can't allocate private structure\n");
ret = -ENOMEM;
diff --git a/drivers/media/platform/imx-jpeg/Kconfig b/drivers/media/platform/nxp/imx-jpeg/Kconfig
index 2fdd648cda80..5214dcd7fab5 100644
--- a/drivers/media/platform/imx-jpeg/Kconfig
+++ b/drivers/media/platform/nxp/imx-jpeg/Kconfig
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
config VIDEO_IMX8_JPEG
tristate "IMX8 JPEG Encoder/Decoder"
+ depends on V4L_MEM2MEM_DRIVERS
depends on ARCH_MXC || COMPILE_TEST
- depends on VIDEO_DEV && VIDEO_V4L2
+ depends on VIDEO_DEV
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
select V4L2_JPEG_HELPER
diff --git a/drivers/media/platform/imx-jpeg/Makefile b/drivers/media/platform/nxp/imx-jpeg/Makefile
index bf19c82e61b4..bf19c82e61b4 100644
--- a/drivers/media/platform/imx-jpeg/Makefile
+++ b/drivers/media/platform/nxp/imx-jpeg/Makefile
diff --git a/drivers/media/platform/imx-jpeg/mxc-jpeg-hw.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.c
index 29c604b1b179..29c604b1b179 100644
--- a/drivers/media/platform/imx-jpeg/mxc-jpeg-hw.c
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.c
diff --git a/drivers/media/platform/imx-jpeg/mxc-jpeg-hw.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h
index ae70d3a0dc24..ae70d3a0dc24 100644
--- a/drivers/media/platform/imx-jpeg/mxc-jpeg-hw.h
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h
diff --git a/drivers/media/platform/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
index 4ca96cf9def7..d1ec1f4b506b 100644
--- a/drivers/media/platform/imx-jpeg/mxc-jpeg.c
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
@@ -35,7 +35,7 @@
* it, enable dynamic debug for this module and:
* echo 1 > /sys/module/mxc_jpeg_encdec/parameters/jpeg_tracing
*
- * This is inspired by the drivers/media/platform/s5p-jpeg driver
+ * This is inspired by the drivers/media/platform/samsung/s5p-jpeg driver
*
* Copyright 2018-2019 NXP
*/
@@ -96,7 +96,7 @@ static const struct mxc_jpeg_fmt mxc_formats[] = {
},
{
.name = "YUV420", /* 1st plane = Y, 2nd plane = UV */
- .fourcc = V4L2_PIX_FMT_NV12,
+ .fourcc = V4L2_PIX_FMT_NV12M,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
.nc = 3,
.depth = 12, /* 6 bytes (4Y + UV) for 4 pixels */
@@ -404,7 +404,7 @@ static enum mxc_jpeg_image_format mxc_jpeg_fourcc_to_imgfmt(u32 fourcc)
return MXC_JPEG_GRAY;
case V4L2_PIX_FMT_YUYV:
return MXC_JPEG_YUV422;
- case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV12M:
return MXC_JPEG_YUV420;
case V4L2_PIX_FMT_YUV24:
return MXC_JPEG_YUV444;
@@ -673,7 +673,7 @@ static int mxc_jpeg_fixup_sof(struct mxc_jpeg_sof *sof,
_bswap16(&sof->width);
switch (fourcc) {
- case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV12M:
sof->components_no = 3;
sof->comp[0].v = 0x2;
sof->comp[0].h = 0x2;
@@ -709,7 +709,7 @@ static int mxc_jpeg_fixup_sos(struct mxc_jpeg_sos *sos,
u8 *sof_u8 = (u8 *)sos;
switch (fourcc) {
- case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV12M:
sos->components_no = 3;
break;
case V4L2_PIX_FMT_YUYV:
@@ -947,8 +947,13 @@ static void mxc_jpeg_device_run(void *priv)
v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true);
jpeg_src_buf = vb2_to_mxc_buf(&src_buf->vb2_buf);
+ if (q_data_cap->fmt->colplanes != dst_buf->vb2_buf.num_planes) {
+ dev_err(dev, "Capture format %s has %d planes, but capture buffer has %d planes\n",
+ q_data_cap->fmt->name, q_data_cap->fmt->colplanes,
+ dst_buf->vb2_buf.num_planes);
+ jpeg_src_buf->jpeg_parse_error = true;
+ }
if (jpeg_src_buf->jpeg_parse_error) {
- jpeg->slot_data[ctx->slot].used = false;
v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
@@ -992,6 +997,20 @@ end:
spin_unlock_irqrestore(&ctx->mxc_jpeg->hw_lock, flags);
}
+static void mxc_jpeg_set_last_buffer_dequeued(struct mxc_jpeg_ctx *ctx)
+{
+ struct vb2_queue *q;
+
+ ctx->stopped = 1;
+ q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
+ if (!list_empty(&q->done_list))
+ return;
+
+ q->last_buffer_dequeued = true;
+ wake_up(&q->done_wq);
+ ctx->stopped = 0;
+}
+
static int mxc_jpeg_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *cmd)
{
@@ -1009,6 +1028,7 @@ static int mxc_jpeg_decoder_cmd(struct file *file, void *priv,
if (v4l2_m2m_num_src_bufs_ready(fh->m2m_ctx) == 0) {
/* No more src bufs, notify app EOS */
notify_eos(ctx);
+ mxc_jpeg_set_last_buffer_dequeued(ctx);
} else {
/* will send EOS later*/
ctx->stopping = 1;
@@ -1035,6 +1055,7 @@ static int mxc_jpeg_encoder_cmd(struct file *file, void *priv,
if (v4l2_m2m_num_src_bufs_ready(fh->m2m_ctx) == 0) {
/* No more src bufs, notify app EOS */
notify_eos(ctx);
+ mxc_jpeg_set_last_buffer_dequeued(ctx);
} else {
/* will send EOS later*/
ctx->stopping = 1;
@@ -1111,6 +1132,10 @@ static void mxc_jpeg_stop_streaming(struct vb2_queue *q)
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
}
pm_runtime_put_sync(&ctx->mxc_jpeg->pdev->dev);
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ ctx->stopping = 0;
+ ctx->stopped = 0;
+ }
}
static int mxc_jpeg_valid_comp_id(struct device *dev,
@@ -1183,7 +1208,7 @@ static void mxc_jpeg_bytesperline(struct mxc_jpeg_q_data *q,
/* bytesperline unused for compressed formats */
q->bytesperline[0] = 0;
q->bytesperline[1] = 0;
- } else if (q->fmt->fourcc == V4L2_PIX_FMT_NV12) {
+ } else if (q->fmt->fourcc == V4L2_PIX_FMT_NV12M) {
/* When the image format is planar the bytesperline value
* applies to the first plane and is divided by the same factor
* as the width field for the other planes
@@ -1215,7 +1240,7 @@ static void mxc_jpeg_sizeimage(struct mxc_jpeg_q_data *q)
} else {
q->sizeimage[0] = q->bytesperline[0] * q->h;
q->sizeimage[1] = 0;
- if (q->fmt->fourcc == V4L2_PIX_FMT_NV12)
+ if (q->fmt->fourcc == V4L2_PIX_FMT_NV12M)
q->sizeimage[1] = q->sizeimage[0] / 2;
}
}
@@ -1402,12 +1427,29 @@ static int mxc_jpeg_buf_prepare(struct vb2_buffer *vb)
return 0;
}
+static void mxc_jpeg_buf_finish(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mxc_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_queue *q = vb->vb2_queue;
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->type))
+ return;
+ if (!ctx->stopped)
+ return;
+ if (list_empty(&q->done_list)) {
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
+ ctx->stopped = 0;
+ }
+}
+
static const struct vb2_ops mxc_jpeg_qops = {
.queue_setup = mxc_jpeg_queue_setup,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.buf_out_validate = mxc_jpeg_buf_out_validate,
.buf_prepare = mxc_jpeg_buf_prepare,
+ .buf_finish = mxc_jpeg_buf_finish,
.start_streaming = mxc_jpeg_start_streaming,
.stop_streaming = mxc_jpeg_stop_streaming,
.buf_queue = mxc_jpeg_buf_queue,
@@ -1843,14 +1885,14 @@ static int mxc_jpeg_dqbuf(struct file *file, void *priv,
int ret;
dev_dbg(dev, "DQBUF type=%d, index=%d", buf->type, buf->index);
- if (ctx->stopping == 1 && num_src_ready == 0) {
+ if (ctx->stopping == 1 && num_src_ready == 0) {
/* No more src bufs, notify app EOS */
notify_eos(ctx);
ctx->stopping = 0;
+ mxc_jpeg_set_last_buffer_dequeued(ctx);
}
ret = v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
-
return ret;
}
@@ -2016,7 +2058,6 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
for (slot = 0; slot < MXC_MAX_SLOTS; slot++) {
dec_irq = platform_get_irq(pdev, slot);
if (dec_irq < 0) {
- dev_err(&pdev->dev, "Failed to get irq %d\n", dec_irq);
ret = dec_irq;
goto err_irq;
}
diff --git a/drivers/media/platform/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
index 9fb2a5aaa941..f53f004ba851 100644
--- a/drivers/media/platform/imx-jpeg/mxc-jpeg.h
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
@@ -91,6 +91,7 @@ struct mxc_jpeg_ctx {
struct v4l2_fh fh;
enum mxc_jpeg_enc_state enc_state;
unsigned int stopping;
+ unsigned int stopped;
unsigned int slot;
};
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c
index 2b73fa55c938..0a72734db55e 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/media/platform/nxp/imx-mipi-csis.c
@@ -1,6 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Freescale i.MX7 SoC series MIPI-CSI V3.3 receiver driver
+ * Samsung CSIS MIPI CSI-2 receiver driver.
+ *
+ * The Samsung CSIS IP is a MIPI CSI-2 receiver found in various NXP i.MX7 and
+ * i.MX8 SoCs. The i.MX7 features version 3.3 of the IP, while i.MX8 features
+ * version 3.6.3.
*
* Copyright (C) 2019 Linaro Ltd
* Copyright (C) 2015-2016 Freescale Semiconductor, Inc. All Rights Reserved.
@@ -31,8 +35,7 @@
#include <media/v4l2-mc.h>
#include <media/v4l2-subdev.h>
-#define CSIS_DRIVER_NAME "imx7-mipi-csis"
-#define CSIS_SUBDEV_NAME CSIS_DRIVER_NAME
+#define CSIS_DRIVER_NAME "imx-mipi-csis"
#define CSIS_PAD_SINK 0
#define CSIS_PAD_SOURCE 1
@@ -170,6 +173,7 @@
#define MIPI_CSIS_ISPCFG_PIXEL_MODE_SINGLE (0 << 12)
#define MIPI_CSIS_ISPCFG_PIXEL_MODE_DUAL (1 << 12)
#define MIPI_CSIS_ISPCFG_PIXEL_MODE_QUAD (2 << 12) /* i.MX8M[MNP] only */
+#define MIPI_CSIS_ISPCFG_PIXEL_MASK (3 << 12)
#define MIPI_CSIS_ISPCFG_ALIGN_32BIT BIT(11)
#define MIPI_CSIS_ISPCFG_FMT(fmt) ((fmt) << 2)
#define MIPI_CSIS_ISPCFG_FMT_MASK (0x3f << 2)
@@ -211,6 +215,8 @@
#define MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_FALL BIT(4)
#define MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_RISE BIT(0)
+#define MIPI_CSIS_FRAME_COUNTER_CH(n) (0x0100 + (n) * 4)
+
/* Non-image packet data buffers */
#define MIPI_CSIS_PKTDATA_ODD 0x2000
#define MIPI_CSIS_PKTDATA_EVEN 0x3000
@@ -311,27 +317,30 @@ struct csi_state {
struct reset_control *mrst;
struct regulator *mipi_phy_regulator;
const struct mipi_csis_info *info;
- u8 index;
struct v4l2_subdev sd;
struct media_pad pads[CSIS_PADS_NUM];
struct v4l2_async_notifier notifier;
struct v4l2_subdev *src_sd;
- struct v4l2_fwnode_bus_mipi_csi2 bus;
+ struct v4l2_mbus_config_mipi_csi2 bus;
u32 clk_frequency;
u32 hs_settle;
u32 clk_settle;
struct mutex lock; /* Protect csis_fmt, format_mbus and state */
const struct csis_pix_format *csis_fmt;
- struct v4l2_mbus_framefmt format_mbus;
+ struct v4l2_mbus_framefmt format_mbus[CSIS_PADS_NUM];
u32 state;
spinlock_t slock; /* Protect events */
struct mipi_csis_event events[MIPI_CSIS_NUM_EVENTS];
struct dentry *debugfs_root;
- bool debug;
+ struct {
+ bool enable;
+ u32 hs_settle;
+ u32 clk_settle;
+ } debug;
};
/* -----------------------------------------------------------------------------
@@ -340,6 +349,7 @@ struct csi_state {
struct csis_pix_format {
u32 code;
+ u32 output;
u32 data_type;
u8 width;
};
@@ -348,84 +358,116 @@ static const struct csis_pix_format mipi_csis_formats[] = {
/* YUV formats. */
{
.code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .output = MEDIA_BUS_FMT_UYVY8_1X16,
.data_type = MIPI_CSI2_DATA_TYPE_YUV422_8,
.width = 16,
},
+ /* RGB formats. */
+ {
+ .code = MEDIA_BUS_FMT_RGB565_1X16,
+ .output = MEDIA_BUS_FMT_RGB565_1X16,
+ .data_type = MIPI_CSI2_DATA_TYPE_RGB565,
+ .width = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_BGR888_1X24,
+ .output = MEDIA_BUS_FMT_RGB888_1X24,
+ .data_type = MIPI_CSI2_DATA_TYPE_RGB888,
+ .width = 24,
+ },
/* RAW (Bayer and greyscale) formats. */
{
.code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .output = MEDIA_BUS_FMT_SBGGR8_1X8,
.data_type = MIPI_CSI2_DATA_TYPE_RAW8,
.width = 8,
}, {
.code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .output = MEDIA_BUS_FMT_SGBRG8_1X8,
.data_type = MIPI_CSI2_DATA_TYPE_RAW8,
.width = 8,
}, {
.code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .output = MEDIA_BUS_FMT_SGRBG8_1X8,
.data_type = MIPI_CSI2_DATA_TYPE_RAW8,
.width = 8,
}, {
.code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .output = MEDIA_BUS_FMT_SRGGB8_1X8,
.data_type = MIPI_CSI2_DATA_TYPE_RAW8,
.width = 8,
}, {
.code = MEDIA_BUS_FMT_Y8_1X8,
+ .output = MEDIA_BUS_FMT_Y8_1X8,
.data_type = MIPI_CSI2_DATA_TYPE_RAW8,
.width = 8,
}, {
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .output = MEDIA_BUS_FMT_SBGGR10_1X10,
.data_type = MIPI_CSI2_DATA_TYPE_RAW10,
.width = 10,
}, {
.code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .output = MEDIA_BUS_FMT_SGBRG10_1X10,
.data_type = MIPI_CSI2_DATA_TYPE_RAW10,
.width = 10,
}, {
.code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .output = MEDIA_BUS_FMT_SGRBG10_1X10,
.data_type = MIPI_CSI2_DATA_TYPE_RAW10,
.width = 10,
}, {
.code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .output = MEDIA_BUS_FMT_SRGGB10_1X10,
.data_type = MIPI_CSI2_DATA_TYPE_RAW10,
.width = 10,
}, {
.code = MEDIA_BUS_FMT_Y10_1X10,
+ .output = MEDIA_BUS_FMT_Y10_1X10,
.data_type = MIPI_CSI2_DATA_TYPE_RAW10,
.width = 10,
}, {
.code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .output = MEDIA_BUS_FMT_SBGGR12_1X12,
.data_type = MIPI_CSI2_DATA_TYPE_RAW12,
.width = 12,
}, {
.code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .output = MEDIA_BUS_FMT_SGBRG12_1X12,
.data_type = MIPI_CSI2_DATA_TYPE_RAW12,
.width = 12,
}, {
.code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .output = MEDIA_BUS_FMT_SGRBG12_1X12,
.data_type = MIPI_CSI2_DATA_TYPE_RAW12,
.width = 12,
}, {
.code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .output = MEDIA_BUS_FMT_SRGGB12_1X12,
.data_type = MIPI_CSI2_DATA_TYPE_RAW12,
.width = 12,
}, {
.code = MEDIA_BUS_FMT_Y12_1X12,
+ .output = MEDIA_BUS_FMT_Y12_1X12,
.data_type = MIPI_CSI2_DATA_TYPE_RAW12,
.width = 12,
}, {
.code = MEDIA_BUS_FMT_SBGGR14_1X14,
+ .output = MEDIA_BUS_FMT_SBGGR14_1X14,
.data_type = MIPI_CSI2_DATA_TYPE_RAW14,
.width = 14,
}, {
.code = MEDIA_BUS_FMT_SGBRG14_1X14,
+ .output = MEDIA_BUS_FMT_SGBRG14_1X14,
.data_type = MIPI_CSI2_DATA_TYPE_RAW14,
.width = 14,
}, {
.code = MEDIA_BUS_FMT_SGRBG14_1X14,
+ .output = MEDIA_BUS_FMT_SGRBG14_1X14,
.data_type = MIPI_CSI2_DATA_TYPE_RAW14,
.width = 14,
}, {
.code = MEDIA_BUS_FMT_SRGGB14_1X14,
+ .output = MEDIA_BUS_FMT_SRGGB14_1X14,
.data_type = MIPI_CSI2_DATA_TYPE_RAW14,
.width = 14,
}
@@ -493,12 +535,30 @@ static void mipi_csis_system_enable(struct csi_state *state, int on)
/* Called with the state.lock mutex held */
static void __mipi_csis_set_format(struct csi_state *state)
{
- struct v4l2_mbus_framefmt *mf = &state->format_mbus;
+ struct v4l2_mbus_framefmt *mf = &state->format_mbus[CSIS_PAD_SINK];
u32 val;
/* Color format */
val = mipi_csis_read(state, MIPI_CSIS_ISP_CONFIG_CH(0));
- val &= ~(MIPI_CSIS_ISPCFG_ALIGN_32BIT | MIPI_CSIS_ISPCFG_FMT_MASK);
+ val &= ~(MIPI_CSIS_ISPCFG_ALIGN_32BIT | MIPI_CSIS_ISPCFG_FMT_MASK
+ | MIPI_CSIS_ISPCFG_PIXEL_MASK);
+
+ /*
+ * YUV 4:2:2 can be transferred with 8 or 16 bits per clock sample
+ * (referred to in the documentation as single and dual pixel modes
+ * respectively, although the 8-bit mode transfers half a pixel per
+ * clock sample and the 16-bit mode one pixel). While both mode work
+ * when the CSIS is connected to a receiver that supports either option,
+ * single pixel mode requires clock rates twice as high. As all SoCs
+ * that integrate the CSIS can operate in 16-bit bit mode, and some do
+ * not support 8-bit mode (this is the case of the i.MX8MP), use dual
+ * pixel mode unconditionally.
+ *
+ * TODO: Verify which other formats require DUAL (or QUAD) modes.
+ */
+ if (state->csis_fmt->data_type == MIPI_CSI2_DATA_TYPE_YUV422_8)
+ val |= MIPI_CSIS_ISPCFG_PIXEL_MODE_DUAL;
+
val |= MIPI_CSIS_ISPCFG_FMT(state->csis_fmt->data_type);
mipi_csis_write(state, MIPI_CSIS_ISP_CONFIG_CH(0), val);
@@ -541,6 +601,18 @@ static int mipi_csis_calculate_params(struct csi_state *state)
dev_dbg(state->dev, "lane rate %u, Tclk_settle %u, Ths_settle %u\n",
lane_rate, state->clk_settle, state->hs_settle);
+ if (state->debug.hs_settle < 0xff) {
+ dev_dbg(state->dev, "overriding Ths_settle with %u\n",
+ state->debug.hs_settle);
+ state->hs_settle = state->debug.hs_settle;
+ }
+
+ if (state->debug.clk_settle < 4) {
+ dev_dbg(state->dev, "overriding Tclk_settle with %u\n",
+ state->debug.clk_settle);
+ state->clk_settle = state->debug.clk_settle;
+ }
+
return 0;
}
@@ -657,7 +729,7 @@ static irqreturn_t mipi_csis_irq_handler(int irq, void *dev_id)
spin_lock_irqsave(&state->slock, flags);
/* Update the event/error counters */
- if ((status & MIPI_CSIS_INT_SRC_ERRORS) || state->debug) {
+ if ((status & MIPI_CSIS_INT_SRC_ERRORS) || state->debug.enable) {
for (i = 0; i < MIPI_CSIS_NUM_EVENTS; i++) {
struct mipi_csis_event *event = &state->events[i];
@@ -747,7 +819,7 @@ static void mipi_csis_log_counters(struct csi_state *state, bool non_errors)
spin_lock_irqsave(&state->slock, flags);
for (i = 0; i < num_events; ++i) {
- if (state->events[i].counter > 0 || state->debug)
+ if (state->events[i].counter > 0 || state->debug.enable)
dev_info(state->dev, "%s events: %d\n",
state->events[i].name,
state->events[i].counter);
@@ -773,6 +845,7 @@ static int mipi_csis_dump_regs(struct csi_state *state)
{ MIPI_CSIS_SDW_CONFIG_CH(0), "SDW_CONFIG_CH0" },
{ MIPI_CSIS_SDW_RESOL_CH(0), "SDW_RESOL_CH0" },
{ MIPI_CSIS_DBG_CTRL, "DBG_CTRL" },
+ { MIPI_CSIS_FRAME_COUNTER_CH(0), "FRAME_COUNTER_CH0" },
};
unsigned int i;
@@ -798,12 +871,19 @@ DEFINE_SHOW_ATTRIBUTE(mipi_csis_dump_regs);
static void mipi_csis_debugfs_init(struct csi_state *state)
{
+ state->debug.hs_settle = UINT_MAX;
+ state->debug.clk_settle = UINT_MAX;
+
state->debugfs_root = debugfs_create_dir(dev_name(state->dev), NULL);
debugfs_create_bool("debug_enable", 0600, state->debugfs_root,
- &state->debug);
+ &state->debug.enable);
debugfs_create_file("dump_regs", 0600, state->debugfs_root, state,
&mipi_csis_dump_regs_fops);
+ debugfs_create_u32("tclk_settle", 0600, state->debugfs_root,
+ &state->debug.clk_settle);
+ debugfs_create_u32("ths_settle", 0600, state->debugfs_root,
+ &state->debug.hs_settle);
}
static void mipi_csis_debugfs_exit(struct csi_state *state)
@@ -864,7 +944,7 @@ static int mipi_csis_s_stream(struct v4l2_subdev *sd, int enable)
ret = 0;
mipi_csis_stop_stream(state);
state->state &= ~ST_STREAMING;
- if (state->debug)
+ if (state->debug.enable)
mipi_csis_log_counters(state, true);
}
@@ -887,7 +967,7 @@ mipi_csis_get_format(struct csi_state *state,
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&state->sd, sd_state, pad);
- return &state->format_mbus;
+ return &state->format_mbus[pad];
}
static int mipi_csis_init_cfg(struct v4l2_subdev *sd,
@@ -1038,6 +1118,10 @@ static int mipi_csis_set_fmt(struct v4l2_subdev *sd,
fmt->code = csis_fmt->code;
fmt->width = sdformat->format.width;
fmt->height = sdformat->format.height;
+ fmt->colorspace = sdformat->format.colorspace;
+ fmt->quantization = sdformat->format.quantization;
+ fmt->xfer_func = sdformat->format.xfer_func;
+ fmt->ycbcr_enc = sdformat->format.ycbcr_enc;
sdformat->format = *fmt;
@@ -1046,6 +1130,9 @@ static int mipi_csis_set_fmt(struct v4l2_subdev *sd,
CSIS_PAD_SOURCE);
*fmt = sdformat->format;
+ /* The format on the source pad might change due to unpacking. */
+ fmt->code = csis_fmt->output;
+
/* Store the CSIS format descriptor for active formats. */
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
state->csis_fmt = csis_fmt;
@@ -1061,7 +1148,7 @@ static int mipi_csis_log_status(struct v4l2_subdev *sd)
mutex_lock(&state->lock);
mipi_csis_log_counters(state, true);
- if (state->debug && (state->state & ST_POWERED))
+ if (state->debug.enable && (state->state & ST_POWERED))
mipi_csis_dump_regs(state);
mutex_unlock(&state->lock);
@@ -1303,8 +1390,8 @@ static int mipi_csis_subdev_init(struct csi_state *state)
v4l2_subdev_init(sd, &mipi_csis_subdev_ops);
sd->owner = THIS_MODULE;
- snprintf(sd->name, sizeof(sd->name), "%s.%d",
- CSIS_SUBDEV_NAME, state->index);
+ snprintf(sd->name, sizeof(sd->name), "csis-%s",
+ dev_name(state->dev));
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
sd->ctrl_handler = NULL;
@@ -1491,4 +1578,4 @@ module_platform_driver(mipi_csis_driver);
MODULE_DESCRIPTION("i.MX7 & i.MX8 MIPI CSI-2 receiver driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:imx7-mipi-csi2");
+MODULE_ALIAS("platform:imx-mipi-csi2");
diff --git a/drivers/media/platform/imx-pxp.c b/drivers/media/platform/nxp/imx-pxp.c
index 689ae5e6ac62..689ae5e6ac62 100644
--- a/drivers/media/platform/imx-pxp.c
+++ b/drivers/media/platform/nxp/imx-pxp.c
diff --git a/drivers/media/platform/imx-pxp.h b/drivers/media/platform/nxp/imx-pxp.h
index 44f95c749d2e..44f95c749d2e 100644
--- a/drivers/media/platform/imx-pxp.h
+++ b/drivers/media/platform/nxp/imx-pxp.h
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/nxp/mx2_emmaprp.c
index 3ce84d0f078c..3ce84d0f078c 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/nxp/mx2_emmaprp.c
diff --git a/drivers/media/platform/qcom/Kconfig b/drivers/media/platform/qcom/Kconfig
new file mode 100644
index 000000000000..cc5799b9ea00
--- /dev/null
+++ b/drivers/media/platform/qcom/Kconfig
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Qualcomm media platform drivers"
+
+source "drivers/media/platform/qcom/camss/Kconfig"
+source "drivers/media/platform/qcom/venus/Kconfig"
diff --git a/drivers/media/platform/qcom/Makefile b/drivers/media/platform/qcom/Makefile
new file mode 100644
index 000000000000..4f055c396e04
--- /dev/null
+++ b/drivers/media/platform/qcom/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += camss/
+obj-y += venus/
diff --git a/drivers/media/platform/qcom/camss/Kconfig b/drivers/media/platform/qcom/camss/Kconfig
new file mode 100644
index 000000000000..4eda48cb1adf
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/Kconfig
@@ -0,0 +1,9 @@
+config VIDEO_QCOM_CAMSS
+ tristate "Qualcomm V4L2 Camera Subsystem driver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
+ depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_SG
+ select V4L2_FWNODE
diff --git a/drivers/media/platform/qcom/camss/Makefile b/drivers/media/platform/qcom/camss/Makefile
index 0752c46ea37b..4e2222358973 100644
--- a/drivers/media/platform/qcom/camss/Makefile
+++ b/drivers/media/platform/qcom/camss/Makefile
@@ -6,7 +6,7 @@ qcom-camss-objs += \
camss-csid.o \
camss-csid-4-1.o \
camss-csid-4-7.o \
- camss-csid-170.o \
+ camss-csid-gen2.o \
camss-csiphy-2ph-1-0.o \
camss-csiphy-3ph-1-0.o \
camss-csiphy.o \
@@ -15,6 +15,7 @@ qcom-camss-objs += \
camss-vfe-4-7.o \
camss-vfe-4-8.o \
camss-vfe-170.o \
+ camss-vfe-480.o \
camss-vfe-gen1.o \
camss-vfe.o \
camss-video.o \
diff --git a/drivers/media/platform/qcom/camss/camss-csid-170.c b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
index ac22ff29d2a9..2031bde13a93 100644
--- a/drivers/media/platform/qcom/camss/camss-csid-170.c
+++ b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
@@ -21,7 +21,7 @@
* interface support. As a result of that it has an
* alternate register layout.
*/
-#define IS_LITE (csid->id == 2 ? 1 : 0)
+#define IS_LITE (csid->id >= 2 ? 1 : 0)
#define CSID_HW_VERSION 0x0
#define HW_VERSION_STEPPING 0
@@ -105,7 +105,8 @@
#define CSID_RDI_CTRL(rdi) ((IS_LITE ? 0x208 : 0x308)\
+ 0x100 * (rdi))
#define RDI_CTRL_HALT_CMD 0
-#define ALT_CMD_RESUME_AT_FRAME_BOUNDARY 1
+#define HALT_CMD_HALT_AT_FRAME_BOUNDARY 0
+#define HALT_CMD_RESUME_AT_FRAME_BOUNDARY 1
#define RDI_CTRL_HALT_MODE 2
#define CSID_RDI_FRM_DROP_PATTERN(rdi) ((IS_LITE ? 0x20C : 0x30C)\
@@ -262,6 +263,13 @@ static const struct csid_format csid_formats[] = {
1,
},
{
+ MEDIA_BUS_FMT_Y8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
MEDIA_BUS_FMT_Y10_1X10,
DATA_TYPE_RAW_10BIT,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
@@ -366,7 +374,7 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable)
val |= input_format->width & 0x1fff << TPG_DT_n_CFG_0_FRAME_WIDTH;
writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_0(0));
- val = DATA_TYPE_RAW_10BIT << TPG_DT_n_CFG_1_DATA_TYPE;
+ val = format->data_type << TPG_DT_n_CFG_1_DATA_TYPE;
writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_1(0));
val = tg->mode << TPG_DT_n_CFG_2_PAYLOAD_MODE;
@@ -382,8 +390,9 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable)
val = 1 << RDI_CFG0_BYTE_CNTR_EN;
val |= 1 << RDI_CFG0_FORMAT_MEASURE_EN;
val |= 1 << RDI_CFG0_TIMESTAMP_EN;
+ /* note: for non-RDI path, this should be format->decode_format */
val |= DECODE_FORMAT_PAYLOAD_ONLY << RDI_CFG0_DECODE_FORMAT;
- val |= DATA_TYPE_RAW_10BIT << RDI_CFG0_DATA_TYPE;
+ val |= format->data_type << RDI_CFG0_DATA_TYPE;
val |= vc << RDI_CFG0_VIRTUAL_CHANNEL;
val |= dt_id << RDI_CFG0_DT_ID;
writel_relaxed(val, csid->base + CSID_RDI_CFG0(0));
@@ -396,7 +405,7 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable)
writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PERIOD(0));
val = 0;
- writel_relaxed(0, csid->base + CSID_RDI_FRM_DROP_PATTERN(0));
+ writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PATTERN(0));
val = 1;
writel_relaxed(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PERIOD(0));
@@ -441,15 +450,12 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable)
val = 1 << CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN;
val |= 1 << CSI2_RX_CFG1_MISR_EN;
- writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1); // csi2_vc_mode_shift_val ?
-
- /* error irqs start at BIT(11) */
- writel_relaxed(~0u, csid->base + CSID_CSI2_RX_IRQ_MASK);
-
- /* RDI irq */
- writel_relaxed(~0u, csid->base + CSID_TOP_IRQ_MASK);
+ writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1);
- val = 1 << RDI_CTRL_HALT_CMD;
+ if (enable)
+ val = HALT_CMD_RESUME_AT_FRAME_BOUNDARY << RDI_CTRL_HALT_CMD;
+ else
+ val = HALT_CMD_HALT_AT_FRAME_BOUNDARY << RDI_CTRL_HALT_CMD;
writel_relaxed(val, csid->base + CSID_RDI_CTRL(0));
}
@@ -588,7 +594,7 @@ static void csid_subdev_init(struct csid_device *csid)
csid->testgen.nmodes = CSID_PAYLOAD_MODE_NUM_SUPPORTED_GEN2;
}
-const struct csid_hw_ops csid_ops_170 = {
+const struct csid_hw_ops csid_ops_gen2 = {
.configure_stream = csid_configure_stream,
.configure_testgen_pattern = csid_configure_testgen_pattern,
.hw_version = csid_hw_version,
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index a1637b78568b..f993f349b66b 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -25,6 +25,10 @@
#include "camss-csid-gen1.h"
#include "camss.h"
+/* offset of CSID registers in VFE region for VFE 480 */
+#define VFE_480_CSID_OFFSET 0x1200
+#define VFE_480_LITE_CSID_OFFSET 0x200
+
#define MSM_CSID_NAME "msm_csid"
const char * const csid_testgen_modes[] = {
@@ -152,15 +156,25 @@ static int csid_set_clock_rates(struct csid_device *csid)
static int csid_set_power(struct v4l2_subdev *sd, int on)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
- struct device *dev = csid->camss->dev;
- int ret;
+ struct camss *camss = csid->camss;
+ struct device *dev = camss->dev;
+ struct vfe_device *vfe = &camss->vfe[csid->id];
+ u32 version = camss->version;
+ int ret = 0;
if (on) {
+ if (version == CAMSS_8250 || version == CAMSS_845) {
+ ret = vfe_get(vfe);
+ if (ret < 0)
+ return ret;
+ }
+
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
- ret = regulator_enable(csid->vdda);
+ ret = regulator_bulk_enable(csid->num_supplies,
+ csid->supplies);
if (ret < 0) {
pm_runtime_put_sync(dev);
return ret;
@@ -168,14 +182,16 @@ static int csid_set_power(struct v4l2_subdev *sd, int on)
ret = csid_set_clock_rates(csid);
if (ret < 0) {
- regulator_disable(csid->vdda);
+ regulator_bulk_disable(csid->num_supplies,
+ csid->supplies);
pm_runtime_put_sync(dev);
return ret;
}
ret = camss_enable_clocks(csid->nclocks, csid->clock, dev);
if (ret < 0) {
- regulator_disable(csid->vdda);
+ regulator_bulk_disable(csid->num_supplies,
+ csid->supplies);
pm_runtime_put_sync(dev);
return ret;
}
@@ -186,7 +202,8 @@ static int csid_set_power(struct v4l2_subdev *sd, int on)
if (ret < 0) {
disable_irq(csid->irq);
camss_disable_clocks(csid->nclocks, csid->clock);
- regulator_disable(csid->vdda);
+ regulator_bulk_disable(csid->num_supplies,
+ csid->supplies);
pm_runtime_put_sync(dev);
return ret;
}
@@ -195,8 +212,11 @@ static int csid_set_power(struct v4l2_subdev *sd, int on)
} else {
disable_irq(csid->irq);
camss_disable_clocks(csid->nclocks, csid->clock);
- ret = regulator_disable(csid->vdda);
+ regulator_bulk_disable(csid->num_supplies,
+ csid->supplies);
pm_runtime_put_sync(dev);
+ if (version == CAMSS_8250 || version == CAMSS_845)
+ vfe_put(vfe);
}
return ret;
@@ -544,7 +564,6 @@ int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
{
struct device *dev = camss->dev;
struct platform_device *pdev = to_platform_device(dev);
- struct resource *r;
int i, j;
int ret;
@@ -556,8 +575,9 @@ int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
} else if (camss->version == CAMSS_8x96 ||
camss->version == CAMSS_660) {
csid->ops = &csid_ops_4_7;
- } else if (camss->version == CAMSS_845) {
- csid->ops = &csid_ops_170;
+ } else if (camss->version == CAMSS_845 ||
+ camss->version == CAMSS_8250) {
+ csid->ops = &csid_ops_gen2;
} else {
return -EINVAL;
}
@@ -565,20 +585,28 @@ int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
/* Memory */
- csid->base = devm_platform_ioremap_resource_byname(pdev, res->reg[0]);
- if (IS_ERR(csid->base))
- return PTR_ERR(csid->base);
+ if (camss->version == CAMSS_8250) {
+ /* for titan 480, CSID registers are inside the VFE region,
+ * between the VFE "top" and "bus" registers. this requires
+ * VFE to be initialized before CSID
+ */
+ if (id >= 2) /* VFE/CSID lite */
+ csid->base = camss->vfe[id].base + VFE_480_LITE_CSID_OFFSET;
+ else
+ csid->base = camss->vfe[id].base + VFE_480_CSID_OFFSET;
+ } else {
+ csid->base = devm_platform_ioremap_resource_byname(pdev, res->reg[0]);
+ if (IS_ERR(csid->base))
+ return PTR_ERR(csid->base);
+ }
/* Interrupt */
- r = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- res->interrupt[0]);
- if (!r) {
- dev_err(dev, "missing IRQ\n");
- return -EINVAL;
- }
+ ret = platform_get_irq_byname(pdev, res->interrupt[0]);
+ if (ret < 0)
+ return ret;
- csid->irq = r->start;
+ csid->irq = ret;
snprintf(csid->irq_name, sizeof(csid->irq_name), "%s_%s%d",
dev_name(dev), MSM_CSID_NAME, csid->id);
ret = devm_request_irq(dev, csid->irq, csid->ops->isr,
@@ -630,13 +658,28 @@ int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
}
/* Regulator */
+ for (i = 0; i < ARRAY_SIZE(res->regulators); i++) {
+ if (res->regulators[i])
+ csid->num_supplies++;
+ }
- csid->vdda = devm_regulator_get(dev, res->regulator[0]);
- if (IS_ERR(csid->vdda)) {
- dev_err(dev, "could not get regulator\n");
- return PTR_ERR(csid->vdda);
+ if (csid->num_supplies) {
+ csid->supplies = devm_kmalloc_array(camss->dev,
+ csid->num_supplies,
+ sizeof(csid->supplies),
+ GFP_KERNEL);
+ if (!csid->supplies)
+ return -ENOMEM;
}
+ for (i = 0; i < csid->num_supplies; i++)
+ csid->supplies[i].supply = res->regulators[i];
+
+ ret = devm_regulator_bulk_get(camss->dev, csid->num_supplies,
+ csid->supplies);
+ if (ret)
+ return ret;
+
init_completion(&csid->reset_complete);
return 0;
diff --git a/drivers/media/platform/qcom/camss/camss-csid.h b/drivers/media/platform/qcom/camss/camss-csid.h
index 814ebc7c29d6..f06040e44c51 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.h
+++ b/drivers/media/platform/qcom/camss/camss-csid.h
@@ -152,7 +152,8 @@ struct csid_device {
char irq_name[30];
struct camss_clock *clock;
int nclocks;
- struct regulator *vdda;
+ struct regulator_bulk_data *supplies;
+ int num_supplies;
struct completion reset_complete;
struct csid_testgen_config testgen;
struct csid_phy_config phy;
@@ -205,7 +206,7 @@ extern const char * const csid_testgen_modes[];
extern const struct csid_hw_ops csid_ops_4_1;
extern const struct csid_hw_ops csid_ops_4_7;
-extern const struct csid_hw_ops csid_ops_170;
+extern const struct csid_hw_ops csid_ops_gen2;
#endif /* QC_MSM_CAMSS_CSID_H */
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
index 30b454c369ab..cd4a8c369234 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
@@ -16,6 +16,7 @@
#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n))
#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n))
+#define CAMSS_CSI_PHY_LN_CLK 1
#define CAMSS_CSI_PHY_GLBL_RESET 0x140
#define CAMSS_CSI_PHY_GLBL_PWR_CFG 0x144
#define CAMSS_CSI_PHY_GLBL_IRQ_CMD 0x164
@@ -26,6 +27,19 @@
#define CAMSS_CSI_PHY_GLBL_T_INIT_CFG0 0x1ec
#define CAMSS_CSI_PHY_T_WAKEUP_CFG0 0x1f4
+static u8 csiphy_get_lane_mask(struct csiphy_lanes_cfg *lane_cfg)
+{
+ u8 lane_mask;
+ int i;
+
+ lane_mask = 1 << CAMSS_CSI_PHY_LN_CLK;
+
+ for (i = 0; i < lane_cfg->num_data; i++)
+ lane_mask |= 1 << lane_cfg->data[i].pos;
+
+ return lane_mask;
+}
+
static void csiphy_hw_version_read(struct csiphy_device *csiphy,
struct device *dev)
{
@@ -105,7 +119,7 @@ static void csiphy_lanes_enable(struct csiphy_device *csiphy,
for (i = 0; i <= c->num_data; i++) {
if (i == c->num_data)
- l = c->clk.pos;
+ l = CAMSS_CSI_PHY_LN_CLK;
else
l = c->data[i].pos;
@@ -129,7 +143,7 @@ static void csiphy_lanes_disable(struct csiphy_device *csiphy,
for (i = 0; i <= c->num_data; i++) {
if (i == c->num_data)
- l = c->clk.pos;
+ l = CAMSS_CSI_PHY_LN_CLK;
else
l = c->data[i].pos;
@@ -167,6 +181,7 @@ static irqreturn_t csiphy_isr(int irq, void *dev)
}
const struct csiphy_hw_ops csiphy_ops_2ph_1_0 = {
+ .get_lane_mask = csiphy_get_lane_mask,
.hw_version_read = csiphy_hw_version_read,
.reset = csiphy_reset,
.lanes_enable = csiphy_lanes_enable,
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
index e318c822ab04..451a4c9b3d30 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -43,6 +43,7 @@
#define CSIPHY_3PH_LNn_CSI_LANE_CTRL15_SWI_SOT_SYMBOL 0xb8
#define CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(n) (0x800 + 0x4 * (n))
+#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE BIT(7)
#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_COMMON_PWRDN_B BIT(0)
#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_SHOW_REV_ID BIT(1)
#define CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(n) (0x8b0 + 0x4 * (n))
@@ -62,6 +63,7 @@ struct csiphy_reg_t {
u32 csiphy_param_type;
};
+/* GEN2 1.0 2PH */
static const struct
csiphy_reg_t lane_regs_sdm845[5][14] = {
{
@@ -146,6 +148,121 @@ csiphy_reg_t lane_regs_sdm845[5][14] = {
},
};
+/* GEN2 1.2.1 2PH */
+static const struct
+csiphy_reg_t lane_regs_sm8250[5][20] = {
+ {
+ {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0900, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0908, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0904, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0904, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0010, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0000, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0024, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0730, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C80, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C88, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C84, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x070c, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0724, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0230, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0210, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0200, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x020c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0224, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0410, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0400, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x040c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0424, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0630, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0610, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0600, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x060c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0624, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+};
+
static void csiphy_hw_version_read(struct csiphy_device *csiphy,
struct device *dev)
{
@@ -163,7 +280,7 @@ static void csiphy_hw_version_read(struct csiphy_device *csiphy,
hw_version |= readl_relaxed(csiphy->base +
CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(15)) << 24;
- dev_err(dev, "CSIPHY 3PH HW Version = 0x%08x\n", hw_version);
+ dev_dbg(dev, "CSIPHY 3PH HW Version = 0x%08x\n", hw_version);
}
/*
@@ -298,13 +415,25 @@ static void csiphy_gen1_config_lanes(struct csiphy_device *csiphy,
static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
u8 settle_cnt)
{
- int i, l;
+ const struct csiphy_reg_t *r;
+ int i, l, array_size;
u32 val;
- for (l = 0; l < 5; l++) {
- for (i = 0; i < 14; i++) {
- const struct csiphy_reg_t *r = &lane_regs_sdm845[l][i];
+ switch (csiphy->camss->version) {
+ case CAMSS_845:
+ r = &lane_regs_sdm845[0][0];
+ array_size = ARRAY_SIZE(lane_regs_sdm845[0]);
+ break;
+ case CAMSS_8250:
+ r = &lane_regs_sm8250[0][0];
+ array_size = ARRAY_SIZE(lane_regs_sm8250[0]);
+ break;
+ default:
+ unreachable();
+ }
+ for (l = 0; l < 5; l++) {
+ for (i = 0; i < array_size; i++, r++) {
switch (r->csiphy_param_type) {
case CSIPHY_SETTLE_CNT_LOWER_BYTE:
val = settle_cnt & 0xff;
@@ -320,18 +449,33 @@ static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
}
}
+static u8 csiphy_get_lane_mask(struct csiphy_lanes_cfg *lane_cfg)
+{
+ u8 lane_mask;
+ int i;
+
+ lane_mask = CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
+
+ for (i = 0; i < lane_cfg->num_data; i++)
+ lane_mask |= 1 << lane_cfg->data[i].pos;
+
+ return lane_mask;
+}
+
static void csiphy_lanes_enable(struct csiphy_device *csiphy,
struct csiphy_config *cfg,
s64 link_freq, u8 lane_mask)
{
struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
+ bool is_gen2 = (csiphy->camss->version == CAMSS_845 ||
+ csiphy->camss->version == CAMSS_8250);
u8 settle_cnt;
u8 val;
int i;
settle_cnt = csiphy_settle_cnt_calc(link_freq, csiphy->timer_clk_rate);
- val = BIT(c->clk.pos);
+ val = is_gen2 ? BIT(7) : CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
for (i = 0; i < c->num_data; i++)
val |= BIT(c->data[i].pos * 2);
@@ -346,44 +490,14 @@ static void csiphy_lanes_enable(struct csiphy_device *csiphy,
val = 0x00;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0));
- if (csiphy->camss->version == CAMSS_8x16 ||
- csiphy->camss->version == CAMSS_8x96)
- csiphy_gen1_config_lanes(csiphy, cfg, settle_cnt);
- else if (csiphy->camss->version == CAMSS_845)
+ if (is_gen2)
csiphy_gen2_config_lanes(csiphy, settle_cnt);
+ else
+ csiphy_gen1_config_lanes(csiphy, cfg, settle_cnt);
- val = 0xff;
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(11));
-
- val = 0xff;
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(12));
-
- val = 0xfb;
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(13));
-
- val = 0xff;
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(14));
-
- val = 0x7f;
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(15));
-
- val = 0xff;
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(16));
-
- val = 0xff;
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(17));
-
- val = 0xef;
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(18));
-
- val = 0xff;
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(19));
-
- val = 0xff;
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(20));
-
- val = 0xff;
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(21));
+ /* IRQ_MASK registers - disable all interrupts */
+ for (i = 11; i < 22; i++)
+ writel_relaxed(0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(i));
}
static void csiphy_lanes_disable(struct csiphy_device *csiphy,
@@ -397,6 +511,7 @@ static void csiphy_lanes_disable(struct csiphy_device *csiphy,
}
const struct csiphy_hw_ops csiphy_ops_3ph_1_0 = {
+ .get_lane_mask = csiphy_get_lane_mask,
.hw_version_read = csiphy_hw_version_read,
.reset = csiphy_reset,
.lanes_enable = csiphy_lanes_enable,
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 24eec16197e7..75fcfc627400 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -94,6 +94,7 @@ static const struct csiphy_format csiphy_formats_sdm845[] = {
{ MEDIA_BUS_FMT_SGBRG14_1X14, 14 },
{ MEDIA_BUS_FMT_SGRBG14_1X14, 14 },
{ MEDIA_BUS_FMT_SRGGB14_1X14, 14 },
+ { MEDIA_BUS_FMT_Y8_1X8, 8 },
{ MEDIA_BUS_FMT_Y10_1X10, 10 },
};
@@ -230,25 +231,6 @@ static int csiphy_set_power(struct v4l2_subdev *sd, int on)
}
/*
- * csiphy_get_lane_mask - Calculate CSI2 lane mask configuration parameter
- * @lane_cfg - CSI2 lane configuration
- *
- * Return lane mask
- */
-static u8 csiphy_get_lane_mask(struct csiphy_lanes_cfg *lane_cfg)
-{
- u8 lane_mask;
- int i;
-
- lane_mask = 1 << lane_cfg->clk.pos;
-
- for (i = 0; i < lane_cfg->num_data; i++)
- lane_mask |= 1 << lane_cfg->data[i].pos;
-
- return lane_mask;
-}
-
-/*
* csiphy_stream_on - Enable streaming on CSIPHY module
* @csiphy: CSIPHY device
*
@@ -261,7 +243,7 @@ static int csiphy_stream_on(struct csiphy_device *csiphy)
{
struct csiphy_config *cfg = &csiphy->cfg;
s64 link_freq;
- u8 lane_mask = csiphy_get_lane_mask(&cfg->csi2->lane_cfg);
+ u8 lane_mask = csiphy->ops->get_lane_mask(&cfg->csi2->lane_cfg);
u8 bpp = csiphy_get_bpp(csiphy->formats, csiphy->nformats,
csiphy->fmt[MSM_CSIPHY_PAD_SINK].code);
u8 num_lanes = csiphy->cfg.csi2->lane_cfg.num_data;
@@ -568,7 +550,6 @@ int msm_csiphy_subdev_init(struct camss *camss,
{
struct device *dev = camss->dev;
struct platform_device *pdev = to_platform_device(dev);
- struct resource *r;
int i, j;
int ret;
@@ -585,7 +566,8 @@ int msm_csiphy_subdev_init(struct camss *camss,
csiphy->ops = &csiphy_ops_3ph_1_0;
csiphy->formats = csiphy_formats_8x96;
csiphy->nformats = ARRAY_SIZE(csiphy_formats_8x96);
- } else if (camss->version == CAMSS_845) {
+ } else if (camss->version == CAMSS_845 ||
+ camss->version == CAMSS_8250) {
csiphy->ops = &csiphy_ops_3ph_1_0;
csiphy->formats = csiphy_formats_sdm845;
csiphy->nformats = ARRAY_SIZE(csiphy_formats_sdm845);
@@ -611,14 +593,11 @@ int msm_csiphy_subdev_init(struct camss *camss,
/* Interrupt */
- r = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- res->interrupt[0]);
- if (!r) {
- dev_err(dev, "missing IRQ\n");
- return -EINVAL;
- }
+ ret = platform_get_irq_byname(pdev, res->interrupt[0]);
+ if (ret < 0)
+ return ret;
- csiphy->irq = r->start;
+ csiphy->irq = ret;
snprintf(csiphy->irq_name, sizeof(csiphy->irq_name), "%s_%s%d",
dev_name(dev), MSM_CSIPHY_NAME, csiphy->id);
@@ -679,7 +658,10 @@ int msm_csiphy_subdev_init(struct camss *camss,
if (!strcmp(clock->name, "csiphy0_timer") ||
!strcmp(clock->name, "csiphy1_timer") ||
- !strcmp(clock->name, "csiphy2_timer"))
+ !strcmp(clock->name, "csiphy2_timer") ||
+ !strcmp(clock->name, "csiphy3_timer") ||
+ !strcmp(clock->name, "csiphy4_timer") ||
+ !strcmp(clock->name, "csiphy5_timer"))
csiphy->rate_set[i] = true;
if (camss->version == CAMSS_660 &&
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.h b/drivers/media/platform/qcom/camss/camss-csiphy.h
index d71b8bc6ec00..1c14947f92d3 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.h
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.h
@@ -45,6 +45,13 @@ struct csiphy_config {
struct csiphy_device;
struct csiphy_hw_ops {
+ /*
+ * csiphy_get_lane_mask - Calculate CSI2 lane mask configuration parameter
+ * @lane_cfg - CSI2 lane configuration
+ *
+ * Return lane mask
+ */
+ u8 (*get_lane_mask)(struct csiphy_lanes_cfg *lane_cfg);
void (*hw_version_read)(struct csiphy_device *csiphy,
struct device *dev);
void (*reset)(struct csiphy_device *csiphy);
diff --git a/drivers/media/platform/qcom/camss/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c
index ba5d65f6ef34..4ee11bb979cd 100644
--- a/drivers/media/platform/qcom/camss/camss-ispif.c
+++ b/drivers/media/platform/qcom/camss/camss-ispif.c
@@ -1100,7 +1100,6 @@ int msm_ispif_subdev_init(struct camss *camss,
struct device *dev = camss->dev;
struct ispif_device *ispif = camss->ispif;
struct platform_device *pdev = to_platform_device(dev);
- struct resource *r;
int i;
int ret;
@@ -1153,14 +1152,11 @@ int msm_ispif_subdev_init(struct camss *camss,
/* Interrupt */
- r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res->interrupt);
-
- if (!r) {
- dev_err(dev, "missing IRQ\n");
- return -EINVAL;
- }
+ ret = platform_get_irq_byname(pdev, res->interrupt);
+ if (ret < 0)
+ return ret;
- ispif->irq = r->start;
+ ispif->irq = ret;
snprintf(ispif->irq_name, sizeof(ispif->irq_name), "%s_%s",
dev_name(dev), MSM_ISPIF_NAME);
if (camss->version == CAMSS_8x16)
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c
index f524af712a84..600150cfc4f7 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-170.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c
@@ -395,17 +395,7 @@ static irqreturn_t vfe_isr(int irq, void *dev)
*/
static int vfe_halt(struct vfe_device *vfe)
{
- unsigned long time;
-
- reinit_completion(&vfe->halt_complete);
-
- time = wait_for_completion_timeout(&vfe->halt_complete,
- msecs_to_jiffies(VFE_HALT_TIMEOUT_MS));
- if (!time) {
- dev_err(vfe->camss->dev, "VFE halt timeout\n");
- return -EIO;
- }
-
+ /* rely on vfe_disable_output() to stop the VFE */
return 0;
}
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-480.c b/drivers/media/platform/qcom/camss/camss-vfe-480.c
new file mode 100644
index 000000000000..129585110393
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-vfe-480.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-vfe-480.c
+ *
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v480 (SM8250)
+ *
+ * Copyright (C) 2020-2021 Linaro Ltd.
+ * Copyright (C) 2021 Jonathan Marek
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+
+#include "camss.h"
+#include "camss-vfe.h"
+
+/* VFE 2/3 are lite and have a different register layout */
+#define IS_LITE (vfe->id >= 2 ? 1 : 0)
+
+#define VFE_HW_VERSION (0x00)
+
+#define VFE_GLOBAL_RESET_CMD (IS_LITE ? 0x0c : 0x1c)
+#define GLOBAL_RESET_HW_AND_REG (IS_LITE ? BIT(1) : BIT(0))
+
+#define VFE_REG_UPDATE_CMD (IS_LITE ? 0x20 : 0x34)
+static inline int reg_update_rdi(struct vfe_device *vfe, int n)
+{
+ return IS_LITE ? BIT(n) : BIT(1 + (n));
+}
+
+#define REG_UPDATE_RDI reg_update_rdi
+#define VFE_IRQ_CMD (IS_LITE ? 0x24 : 0x38)
+#define IRQ_CMD_GLOBAL_CLEAR BIT(0)
+
+#define VFE_IRQ_MASK(n) ((IS_LITE ? 0x28 : 0x3c) + (n) * 4)
+#define IRQ_MASK_0_RESET_ACK (IS_LITE ? BIT(17) : BIT(0))
+#define IRQ_MASK_0_BUS_TOP_IRQ (IS_LITE ? BIT(4) : BIT(7))
+#define VFE_IRQ_CLEAR(n) ((IS_LITE ? 0x34 : 0x48) + (n) * 4)
+#define VFE_IRQ_STATUS(n) ((IS_LITE ? 0x40 : 0x54) + (n) * 4)
+
+#define BUS_REG_BASE (IS_LITE ? 0x1a00 : 0xaa00)
+
+#define VFE_BUS_WM_CGC_OVERRIDE (BUS_REG_BASE + 0x08)
+#define WM_CGC_OVERRIDE_ALL (0x3FFFFFF)
+
+#define VFE_BUS_WM_TEST_BUS_CTRL (BUS_REG_BASE + 0xdc)
+
+#define VFE_BUS_IRQ_MASK(n) (BUS_REG_BASE + 0x18 + (n) * 4)
+static inline int bus_irq_mask_0_rdi_rup(struct vfe_device *vfe, int n)
+{
+ return IS_LITE ? BIT(n) : BIT(3 + (n));
+}
+
+#define BUS_IRQ_MASK_0_RDI_RUP bus_irq_mask_0_rdi_rup
+static inline int bus_irq_mask_0_comp_done(struct vfe_device *vfe, int n)
+{
+ return IS_LITE ? BIT(4 + (n)) : BIT(6 + (n));
+}
+
+#define BUS_IRQ_MASK_0_COMP_DONE bus_irq_mask_0_comp_done
+#define VFE_BUS_IRQ_CLEAR(n) (BUS_REG_BASE + 0x20 + (n) * 4)
+#define VFE_BUS_IRQ_STATUS(n) (BUS_REG_BASE + 0x28 + (n) * 4)
+#define VFE_BUS_IRQ_CLEAR_GLOBAL (BUS_REG_BASE + 0x30)
+
+#define VFE_BUS_WM_CFG(n) (BUS_REG_BASE + 0x200 + (n) * 0x100)
+#define WM_CFG_EN (0)
+#define WM_CFG_MODE (16)
+#define MODE_QCOM_PLAIN (0)
+#define MODE_MIPI_RAW (1)
+#define VFE_BUS_WM_IMAGE_ADDR(n) (BUS_REG_BASE + 0x204 + (n) * 0x100)
+#define VFE_BUS_WM_FRAME_INCR(n) (BUS_REG_BASE + 0x208 + (n) * 0x100)
+#define VFE_BUS_WM_IMAGE_CFG_0(n) (BUS_REG_BASE + 0x20c + (n) * 0x100)
+#define WM_IMAGE_CFG_0_DEFAULT_WIDTH (0xFFFF)
+#define VFE_BUS_WM_IMAGE_CFG_1(n) (BUS_REG_BASE + 0x210 + (n) * 0x100)
+#define VFE_BUS_WM_IMAGE_CFG_2(n) (BUS_REG_BASE + 0x214 + (n) * 0x100)
+#define VFE_BUS_WM_PACKER_CFG(n) (BUS_REG_BASE + 0x218 + (n) * 0x100)
+#define VFE_BUS_WM_HEADER_ADDR(n) (BUS_REG_BASE + 0x220 + (n) * 0x100)
+#define VFE_BUS_WM_HEADER_INCR(n) (BUS_REG_BASE + 0x224 + (n) * 0x100)
+#define VFE_BUS_WM_HEADER_CFG(n) (BUS_REG_BASE + 0x228 + (n) * 0x100)
+
+#define VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(n) (BUS_REG_BASE + 0x230 + (n) * 0x100)
+#define VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(n) (BUS_REG_BASE + 0x234 + (n) * 0x100)
+#define VFE_BUS_WM_FRAMEDROP_PERIOD(n) (BUS_REG_BASE + 0x238 + (n) * 0x100)
+#define VFE_BUS_WM_FRAMEDROP_PATTERN(n) (BUS_REG_BASE + 0x23c + (n) * 0x100)
+
+#define VFE_BUS_WM_SYSTEM_CACHE_CFG(n) (BUS_REG_BASE + 0x260 + (n) * 0x100)
+#define VFE_BUS_WM_BURST_LIMIT(n) (BUS_REG_BASE + 0x264 + (n) * 0x100)
+
+/* for titan 480, each bus client is hardcoded to a specific path
+ * and each bus client is part of a hardcoded "comp group"
+ */
+#define RDI_WM(n) ((IS_LITE ? 0 : 23) + (n))
+#define RDI_COMP_GROUP(n) ((IS_LITE ? 0 : 11) + (n))
+
+static u32 vfe_hw_version(struct vfe_device *vfe)
+{
+ u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION);
+
+ u32 gen = (hw_version >> 28) & 0xF;
+ u32 rev = (hw_version >> 16) & 0xFFF;
+ u32 step = hw_version & 0xFFFF;
+
+ dev_dbg(vfe->camss->dev, "VFE HW Version = %u.%u.%u\n", gen, rev, step);
+
+ return hw_version;
+}
+
+static void vfe_global_reset(struct vfe_device *vfe)
+{
+ writel_relaxed(IRQ_MASK_0_RESET_ACK, vfe->base + VFE_IRQ_MASK(0));
+ writel_relaxed(GLOBAL_RESET_HW_AND_REG, vfe->base + VFE_GLOBAL_RESET_CMD);
+}
+
+static void vfe_wm_start(struct vfe_device *vfe, u8 wm, struct vfe_line *line)
+{
+ struct v4l2_pix_format_mplane *pix =
+ &line->video_out.active_fmt.fmt.pix_mp;
+
+ wm = RDI_WM(wm); /* map to actual WM used (from wm=RDI index) */
+
+ /* no clock gating at bus input */
+ writel_relaxed(WM_CGC_OVERRIDE_ALL, vfe->base + VFE_BUS_WM_CGC_OVERRIDE);
+
+ writel_relaxed(0x0, vfe->base + VFE_BUS_WM_TEST_BUS_CTRL);
+
+ writel_relaxed(pix->plane_fmt[0].bytesperline * pix->height,
+ vfe->base + VFE_BUS_WM_FRAME_INCR(wm));
+ writel_relaxed(0xf, vfe->base + VFE_BUS_WM_BURST_LIMIT(wm));
+ writel_relaxed(WM_IMAGE_CFG_0_DEFAULT_WIDTH,
+ vfe->base + VFE_BUS_WM_IMAGE_CFG_0(wm));
+ writel_relaxed(pix->plane_fmt[0].bytesperline,
+ vfe->base + VFE_BUS_WM_IMAGE_CFG_2(wm));
+ writel_relaxed(0, vfe->base + VFE_BUS_WM_PACKER_CFG(wm));
+
+ /* no dropped frames, one irq per frame */
+ writel_relaxed(0, vfe->base + VFE_BUS_WM_FRAMEDROP_PERIOD(wm));
+ writel_relaxed(1, vfe->base + VFE_BUS_WM_FRAMEDROP_PATTERN(wm));
+ writel_relaxed(0, vfe->base + VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(wm));
+ writel_relaxed(1, vfe->base + VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(wm));
+
+ writel_relaxed(1 << WM_CFG_EN | MODE_MIPI_RAW << WM_CFG_MODE,
+ vfe->base + VFE_BUS_WM_CFG(wm));
+}
+
+static void vfe_wm_stop(struct vfe_device *vfe, u8 wm)
+{
+ wm = RDI_WM(wm); /* map to actual WM used (from wm=RDI index) */
+ writel_relaxed(0, vfe->base + VFE_BUS_WM_CFG(wm));
+}
+
+static void vfe_wm_update(struct vfe_device *vfe, u8 wm, u32 addr,
+ struct vfe_line *line)
+{
+ wm = RDI_WM(wm); /* map to actual WM used (from wm=RDI index) */
+ writel_relaxed(addr, vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm));
+}
+
+static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ vfe->reg_update |= REG_UPDATE_RDI(vfe, line_id);
+ writel_relaxed(vfe->reg_update, vfe->base + VFE_REG_UPDATE_CMD);
+}
+
+static inline void vfe_reg_update_clear(struct vfe_device *vfe,
+ enum vfe_line_id line_id)
+{
+ vfe->reg_update &= ~REG_UPDATE_RDI(vfe, line_id);
+}
+
+static void vfe_enable_irq_common(struct vfe_device *vfe)
+{
+ /* enable only the IRQs used: rup and comp_done irqs for RDI0 */
+ writel_relaxed(IRQ_MASK_0_RESET_ACK | IRQ_MASK_0_BUS_TOP_IRQ,
+ vfe->base + VFE_IRQ_MASK(0));
+ writel_relaxed(BUS_IRQ_MASK_0_RDI_RUP(vfe, 0) |
+ BUS_IRQ_MASK_0_COMP_DONE(vfe, RDI_COMP_GROUP(0)),
+ vfe->base + VFE_BUS_IRQ_MASK(0));
+}
+
+static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id);
+static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm);
+
+/*
+ * vfe_isr - VFE module interrupt handler
+ * @irq: Interrupt line
+ * @dev: VFE device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t vfe_isr(int irq, void *dev)
+{
+ struct vfe_device *vfe = dev;
+ u32 status;
+
+ status = readl_relaxed(vfe->base + VFE_IRQ_STATUS(0));
+ writel_relaxed(status, vfe->base + VFE_IRQ_CLEAR(0));
+ writel_relaxed(IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_IRQ_CMD);
+
+ if (status & IRQ_MASK_0_RESET_ACK)
+ vfe_isr_reset_ack(vfe);
+
+ if (status & IRQ_MASK_0_BUS_TOP_IRQ) {
+ u32 status = readl_relaxed(vfe->base + VFE_BUS_IRQ_STATUS(0));
+
+ writel_relaxed(status, vfe->base + VFE_BUS_IRQ_CLEAR(0));
+ writel_relaxed(1, vfe->base + VFE_BUS_IRQ_CLEAR_GLOBAL);
+
+ if (status & BUS_IRQ_MASK_0_RDI_RUP(vfe, 0))
+ vfe_isr_reg_update(vfe, 0);
+
+ if (status & BUS_IRQ_MASK_0_COMP_DONE(vfe, RDI_COMP_GROUP(0)))
+ vfe_isr_wm_done(vfe, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * vfe_halt - Trigger halt on VFE module and wait to complete
+ * @vfe: VFE device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int vfe_halt(struct vfe_device *vfe)
+{
+ /* rely on vfe_disable_output() to stop the VFE */
+ return 0;
+}
+
+static int vfe_get_output(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output;
+ unsigned long flags;
+ int wm_idx;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ output = &line->output;
+ if (output->state != VFE_OUTPUT_OFF) {
+ dev_err(vfe->camss->dev, "Output is running\n");
+ goto error;
+ }
+
+ output->wm_num = 1;
+
+ wm_idx = vfe_reserve_wm(vfe, line->id);
+ if (wm_idx < 0) {
+ dev_err(vfe->camss->dev, "Can not reserve wm\n");
+ goto error_get_wm;
+ }
+ output->wm_idx[0] = wm_idx;
+
+ output->drop_update_idx = 0;
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return 0;
+
+error_get_wm:
+ vfe_release_wm(vfe, output->wm_idx[0]);
+ output->state = VFE_OUTPUT_OFF;
+error:
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return -EINVAL;
+}
+
+static int vfe_enable_output(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output = &line->output;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ vfe_reg_update_clear(vfe, line->id);
+
+ if (output->state != VFE_OUTPUT_OFF) {
+ dev_err(vfe->camss->dev, "Output is not in reserved state %d\n",
+ output->state);
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+ return -EINVAL;
+ }
+
+ WARN_ON(output->gen2.active_num);
+
+ output->state = VFE_OUTPUT_ON;
+
+ output->sequence = 0;
+ output->wait_reg_update = 0;
+ reinit_completion(&output->reg_update);
+
+ vfe_wm_start(vfe, output->wm_idx[0], line);
+
+ for (i = 0; i < 2; i++) {
+ output->buf[i] = vfe_buf_get_pending(output);
+ if (!output->buf[i])
+ break;
+ output->gen2.active_num++;
+ vfe_wm_update(vfe, output->wm_idx[0], output->buf[i]->addr[0], line);
+ }
+
+ vfe_reg_update(vfe, line->id);
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return 0;
+}
+
+static int vfe_disable_output(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output = &line->output;
+ unsigned long flags;
+ unsigned int i;
+ bool done;
+ int timeout = 0;
+
+ do {
+ spin_lock_irqsave(&vfe->output_lock, flags);
+ done = !output->gen2.active_num;
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+ usleep_range(10000, 20000);
+
+ if (timeout++ == 100) {
+ dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
+ vfe_reset(vfe);
+ output->gen2.active_num = 0;
+ return 0;
+ }
+ } while (!done);
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+ for (i = 0; i < output->wm_num; i++)
+ vfe_wm_stop(vfe, output->wm_idx[i]);
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return 0;
+}
+
+/*
+ * vfe_enable - Enable streaming on VFE line
+ * @line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int vfe_enable(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+ int ret;
+
+ mutex_lock(&vfe->stream_lock);
+
+ if (!vfe->stream_count)
+ vfe_enable_irq_common(vfe);
+
+ vfe->stream_count++;
+
+ mutex_unlock(&vfe->stream_lock);
+
+ ret = vfe_get_output(line);
+ if (ret < 0)
+ goto error_get_output;
+
+ ret = vfe_enable_output(line);
+ if (ret < 0)
+ goto error_enable_output;
+
+ vfe->was_streaming = 1;
+
+ return 0;
+
+error_enable_output:
+ vfe_put_output(line);
+
+error_get_output:
+ mutex_lock(&vfe->stream_lock);
+
+ vfe->stream_count--;
+
+ mutex_unlock(&vfe->stream_lock);
+
+ return ret;
+}
+
+/*
+ * vfe_disable - Disable streaming on VFE line
+ * @line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int vfe_disable(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+
+ vfe_disable_output(line);
+
+ vfe_put_output(line);
+
+ mutex_lock(&vfe->stream_lock);
+
+ vfe->stream_count--;
+
+ mutex_unlock(&vfe->stream_lock);
+
+ return 0;
+}
+
+/*
+ * vfe_isr_reg_update - Process reg update interrupt
+ * @vfe: VFE Device
+ * @line_id: VFE line
+ */
+static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ struct vfe_output *output;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+ vfe_reg_update_clear(vfe, line_id);
+
+ output = &vfe->line[line_id].output;
+
+ if (output->wait_reg_update) {
+ output->wait_reg_update = 0;
+ complete(&output->reg_update);
+ }
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+}
+
+/*
+ * vfe_isr_wm_done - Process write master done interrupt
+ * @vfe: VFE Device
+ * @wm: Write master id
+ */
+static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
+{
+ struct vfe_line *line = &vfe->line[vfe->wm_output_map[wm]];
+ struct camss_buffer *ready_buf;
+ struct vfe_output *output;
+ unsigned long flags;
+ u32 index;
+ u64 ts = ktime_get_ns();
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
+ dev_err_ratelimited(vfe->camss->dev,
+ "Received wm done for unmapped index\n");
+ goto out_unlock;
+ }
+ output = &vfe->line[vfe->wm_output_map[wm]].output;
+
+ ready_buf = output->buf[0];
+ if (!ready_buf) {
+ dev_err_ratelimited(vfe->camss->dev,
+ "Missing ready buf %d!\n", output->state);
+ goto out_unlock;
+ }
+
+ ready_buf->vb.vb2_buf.timestamp = ts;
+ ready_buf->vb.sequence = output->sequence++;
+
+ index = 0;
+ output->buf[0] = output->buf[1];
+ if (output->buf[0])
+ index = 1;
+
+ output->buf[index] = vfe_buf_get_pending(output);
+
+ if (output->buf[index])
+ vfe_wm_update(vfe, output->wm_idx[0], output->buf[index]->addr[0], line);
+ else
+ output->gen2.active_num--;
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+
+ return;
+
+out_unlock:
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+}
+
+/*
+ * vfe_pm_domain_off - Disable power domains specific to this VFE.
+ * @vfe: VFE Device
+ */
+static void vfe_pm_domain_off(struct vfe_device *vfe)
+{
+ /* nop */
+}
+
+/*
+ * vfe_pm_domain_on - Enable power domains specific to this VFE.
+ * @vfe: VFE Device
+ */
+static int vfe_pm_domain_on(struct vfe_device *vfe)
+{
+ return 0;
+}
+
+/*
+ * vfe_queue_buffer - Add empty buffer
+ * @vid: Video device structure
+ * @buf: Buffer to be enqueued
+ *
+ * Add an empty buffer - depending on the current number of buffers it will be
+ * put in pending buffer queue or directly given to the hardware to be filled.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int vfe_queue_buffer(struct camss_video *vid,
+ struct camss_buffer *buf)
+{
+ struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output;
+ unsigned long flags;
+
+ output = &line->output;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ if (output->state == VFE_OUTPUT_ON && output->gen2.active_num < 2) {
+ output->buf[output->gen2.active_num++] = buf;
+ vfe_wm_update(vfe, output->wm_idx[0], buf->addr[0], line);
+ } else {
+ vfe_buf_add_pending(output, buf);
+ }
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return 0;
+}
+
+static const struct camss_video_ops vfe_video_ops_480 = {
+ .queue_buffer = vfe_queue_buffer,
+ .flush_buffers = vfe_flush_buffers,
+};
+
+static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
+{
+ vfe->video_ops = vfe_video_ops_480;
+ vfe->line_num = 1;
+}
+
+const struct vfe_hw_ops vfe_ops_480 = {
+ .global_reset = vfe_global_reset,
+ .hw_version = vfe_hw_version,
+ .isr = vfe_isr,
+ .pm_domain_off = vfe_pm_domain_off,
+ .pm_domain_on = vfe_pm_domain_on,
+ .subdev_init = vfe_subdev_init,
+ .vfe_disable = vfe_disable,
+ .vfe_enable = vfe_enable,
+ .vfe_halt = vfe_halt,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index 71f78b40e7f5..5b148e9f8134 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -118,6 +118,7 @@ static const struct vfe_format formats_rdi_845[] = {
{ MEDIA_BUS_FMT_SGBRG14_1X14, 14 },
{ MEDIA_BUS_FMT_SGRBG14_1X14, 14 },
{ MEDIA_BUS_FMT_SRGGB14_1X14, 14 },
+ { MEDIA_BUS_FMT_Y8_1X8, 8 },
{ MEDIA_BUS_FMT_Y10_1X10, 10 },
{ MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, 16 },
};
@@ -219,7 +220,8 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
}
else if (vfe->camss->version == CAMSS_8x96 ||
vfe->camss->version == CAMSS_660 ||
- vfe->camss->version == CAMSS_845)
+ vfe->camss->version == CAMSS_845 ||
+ vfe->camss->version == CAMSS_8250)
switch (sink_code) {
case MEDIA_BUS_FMT_YUYV8_2X8:
{
@@ -573,7 +575,7 @@ static int vfe_check_clock_rates(struct vfe_device *vfe)
*
* Return 0 on success or a negative error code otherwise
*/
-static int vfe_get(struct vfe_device *vfe)
+int vfe_get(struct vfe_device *vfe)
{
int ret;
@@ -635,7 +637,7 @@ error_pm_domain:
* vfe_put - Power down VFE module
* @vfe: VFE Device
*/
-static void vfe_put(struct vfe_device *vfe)
+void vfe_put(struct vfe_device *vfe)
{
mutex_lock(&vfe->power_lock);
@@ -1279,7 +1281,6 @@ int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
{
struct device *dev = camss->dev;
struct platform_device *pdev = to_platform_device(dev);
- struct resource *r;
int i, j;
int ret;
@@ -1293,10 +1294,12 @@ int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
case CAMSS_660:
vfe->ops = &vfe_ops_4_8;
break;
-
case CAMSS_845:
vfe->ops = &vfe_ops_170;
break;
+ case CAMSS_8250:
+ vfe->ops = &vfe_ops_480;
+ break;
default:
return -EINVAL;
}
@@ -1312,16 +1315,13 @@ int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
/* Interrupt */
- r = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- res->interrupt[0]);
- if (!r) {
- dev_err(dev, "missing IRQ\n");
- return -EINVAL;
- }
+ ret = platform_get_irq_byname(pdev, res->interrupt[0]);
+ if (ret < 0)
+ return ret;
- vfe->irq = r->start;
+ vfe->irq = ret;
snprintf(vfe->irq_name, sizeof(vfe->irq_name), "%s_%s%d",
- dev_name(dev), MSM_VFE_NAME, vfe->id);
+ dev_name(dev), MSM_VFE_NAME, id);
ret = devm_request_irq(dev, vfe->irq, vfe->ops->isr,
IRQF_TRIGGER_RISING, vfe->irq_name, vfe);
if (ret < 0) {
@@ -1407,7 +1407,8 @@ int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
l->formats = formats_rdi_8x96;
l->nformats = ARRAY_SIZE(formats_rdi_8x96);
}
- } else if (camss->version == CAMSS_845) {
+ } else if (camss->version == CAMSS_845 ||
+ camss->version == CAMSS_8250) {
l->formats = formats_rdi_845;
l->nformats = ARRAY_SIZE(formats_rdi_845);
} else {
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h
index f166d176cb77..0eba04eb9b77 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.h
+++ b/drivers/media/platform/qcom/camss/camss-vfe.h
@@ -201,5 +201,9 @@ extern const struct vfe_hw_ops vfe_ops_4_1;
extern const struct vfe_hw_ops vfe_ops_4_7;
extern const struct vfe_hw_ops vfe_ops_4_8;
extern const struct vfe_hw_ops vfe_ops_170;
+extern const struct vfe_hw_ops vfe_ops_480;
+
+int vfe_get(struct vfe_device *vfe);
+void vfe_put(struct vfe_device *vfe);
#endif /* QC_MSM_CAMSS_VFE_H */
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index f282275af626..5dc1ddbe6d65 100644
--- a/drivers/media/platform/qcom/camss/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -176,6 +176,8 @@ static const struct camss_format_info formats_rdi_845[] = {
{ { 1, 1 } }, { { 1, 1 } }, { 14 } },
{ MEDIA_BUS_FMT_SRGGB14_1X14, V4L2_PIX_FMT_SRGGB14P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 14 } },
+ { MEDIA_BUS_FMT_Y8_1X8, V4L2_PIX_FMT_GREY, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
{ MEDIA_BUS_FMT_Y10_1X10, V4L2_PIX_FMT_Y10P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 10 } },
{ MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, V4L2_PIX_FMT_Y10, 1,
@@ -1009,7 +1011,8 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
video->formats = formats_rdi_8x96;
video->nformats = ARRAY_SIZE(formats_rdi_8x96);
}
- } else if (video->camss->version == CAMSS_845) {
+ } else if (video->camss->version == CAMSS_845 ||
+ video->camss->version == CAMSS_8250) {
video->formats = formats_rdi_845;
video->nformats = ARRAY_SIZE(formats_rdi_845);
} else {
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index be091c50a3c0..79ad82e233cb 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -8,6 +8,7 @@
* Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/clk.h>
+#include <linux/interconnect.h>
#include <linux/media-bus-format.h>
#include <linux/media.h>
#include <linux/module.h>
@@ -33,7 +34,7 @@
static const struct resources csiphy_res_8x16[] = {
/* CSIPHY0 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy0_timer" },
.clock_rate = { { 0 },
{ 0 },
@@ -45,7 +46,7 @@ static const struct resources csiphy_res_8x16[] = {
/* CSIPHY1 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy1_timer" },
.clock_rate = { { 0 },
{ 0 },
@@ -59,7 +60,7 @@ static const struct resources csiphy_res_8x16[] = {
static const struct resources csid_res_8x16[] = {
/* CSID0 */
{
- .regulator = { "vdda" },
+ .regulators = { "vdda" },
.clock = { "top_ahb", "ispif_ahb", "csi0_ahb", "ahb",
"csi0", "csi0_phy", "csi0_pix", "csi0_rdi" },
.clock_rate = { { 0 },
@@ -76,7 +77,7 @@ static const struct resources csid_res_8x16[] = {
/* CSID1 */
{
- .regulator = { "vdda" },
+ .regulators = { "vdda" },
.clock = { "top_ahb", "ispif_ahb", "csi1_ahb", "ahb",
"csi1", "csi1_phy", "csi1_pix", "csi1_rdi" },
.clock_rate = { { 0 },
@@ -106,7 +107,7 @@ static const struct resources_ispif ispif_res_8x16 = {
static const struct resources vfe_res_8x16[] = {
/* VFE0 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "top_ahb", "vfe0", "csi_vfe0",
"vfe_ahb", "vfe_axi", "ahb" },
.clock_rate = { { 0 },
@@ -128,7 +129,7 @@ static const struct resources vfe_res_8x16[] = {
static const struct resources csiphy_res_8x96[] = {
/* CSIPHY0 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy0_timer" },
.clock_rate = { { 0 },
{ 0 },
@@ -140,7 +141,7 @@ static const struct resources csiphy_res_8x96[] = {
/* CSIPHY1 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy1_timer" },
.clock_rate = { { 0 },
{ 0 },
@@ -152,7 +153,7 @@ static const struct resources csiphy_res_8x96[] = {
/* CSIPHY2 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy2_timer" },
.clock_rate = { { 0 },
{ 0 },
@@ -166,7 +167,7 @@ static const struct resources csiphy_res_8x96[] = {
static const struct resources csid_res_8x96[] = {
/* CSID0 */
{
- .regulator = { "vdda" },
+ .regulators = { "vdda" },
.clock = { "top_ahb", "ispif_ahb", "csi0_ahb", "ahb",
"csi0", "csi0_phy", "csi0_pix", "csi0_rdi" },
.clock_rate = { { 0 },
@@ -183,7 +184,7 @@ static const struct resources csid_res_8x96[] = {
/* CSID1 */
{
- .regulator = { "vdda" },
+ .regulators = { "vdda" },
.clock = { "top_ahb", "ispif_ahb", "csi1_ahb", "ahb",
"csi1", "csi1_phy", "csi1_pix", "csi1_rdi" },
.clock_rate = { { 0 },
@@ -200,7 +201,7 @@ static const struct resources csid_res_8x96[] = {
/* CSID2 */
{
- .regulator = { "vdda" },
+ .regulators = { "vdda" },
.clock = { "top_ahb", "ispif_ahb", "csi2_ahb", "ahb",
"csi2", "csi2_phy", "csi2_pix", "csi2_rdi" },
.clock_rate = { { 0 },
@@ -217,7 +218,7 @@ static const struct resources csid_res_8x96[] = {
/* CSID3 */
{
- .regulator = { "vdda" },
+ .regulators = { "vdda" },
.clock = { "top_ahb", "ispif_ahb", "csi3_ahb", "ahb",
"csi3", "csi3_phy", "csi3_pix", "csi3_rdi" },
.clock_rate = { { 0 },
@@ -248,7 +249,7 @@ static const struct resources_ispif ispif_res_8x96 = {
static const struct resources vfe_res_8x96[] = {
/* VFE0 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "top_ahb", "ahb", "vfe0", "csi_vfe0", "vfe_ahb",
"vfe0_ahb", "vfe_axi", "vfe0_stream"},
.clock_rate = { { 0 },
@@ -266,7 +267,7 @@ static const struct resources vfe_res_8x96[] = {
/* VFE1 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "top_ahb", "ahb", "vfe1", "csi_vfe1", "vfe_ahb",
"vfe1_ahb", "vfe_axi", "vfe1_stream"},
.clock_rate = { { 0 },
@@ -286,7 +287,7 @@ static const struct resources vfe_res_8x96[] = {
static const struct resources csiphy_res_660[] = {
/* CSIPHY0 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy0_timer",
"csi0_phy", "csiphy_ahb2crif" },
.clock_rate = { { 0 },
@@ -300,7 +301,7 @@ static const struct resources csiphy_res_660[] = {
/* CSIPHY1 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy1_timer",
"csi1_phy", "csiphy_ahb2crif" },
.clock_rate = { { 0 },
@@ -314,7 +315,7 @@ static const struct resources csiphy_res_660[] = {
/* CSIPHY2 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy2_timer",
"csi2_phy", "csiphy_ahb2crif" },
.clock_rate = { { 0 },
@@ -330,7 +331,7 @@ static const struct resources csiphy_res_660[] = {
static const struct resources csid_res_660[] = {
/* CSID0 */
{
- .regulator = { "vdda", "vdd_sec" },
+ .regulators = { "vdda", "vdd_sec" },
.clock = { "top_ahb", "ispif_ahb", "csi0_ahb", "ahb",
"csi0", "csi0_phy", "csi0_pix", "csi0_rdi",
"cphy_csid0" },
@@ -350,7 +351,7 @@ static const struct resources csid_res_660[] = {
/* CSID1 */
{
- .regulator = { "vdda", "vdd_sec" },
+ .regulators = { "vdda", "vdd_sec" },
.clock = { "top_ahb", "ispif_ahb", "csi1_ahb", "ahb",
"csi1", "csi1_phy", "csi1_pix", "csi1_rdi",
"cphy_csid1" },
@@ -370,7 +371,7 @@ static const struct resources csid_res_660[] = {
/* CSID2 */
{
- .regulator = { "vdda", "vdd_sec" },
+ .regulators = { "vdda", "vdd_sec" },
.clock = { "top_ahb", "ispif_ahb", "csi2_ahb", "ahb",
"csi2", "csi2_phy", "csi2_pix", "csi2_rdi",
"cphy_csid2" },
@@ -390,7 +391,7 @@ static const struct resources csid_res_660[] = {
/* CSID3 */
{
- .regulator = { "vdda", "vdd_sec" },
+ .regulators = { "vdda", "vdd_sec" },
.clock = { "top_ahb", "ispif_ahb", "csi3_ahb", "ahb",
"csi3", "csi3_phy", "csi3_pix", "csi3_rdi",
"cphy_csid3" },
@@ -424,7 +425,7 @@ static const struct resources_ispif ispif_res_660 = {
static const struct resources vfe_res_660[] = {
/* VFE0 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "throttle_axi", "top_ahb", "ahb", "vfe0",
"csi_vfe0", "vfe_ahb", "vfe0_ahb", "vfe_axi",
"vfe0_stream"},
@@ -445,7 +446,7 @@ static const struct resources vfe_res_660[] = {
/* VFE1 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "throttle_axi", "top_ahb", "ahb", "vfe1",
"csi_vfe1", "vfe_ahb", "vfe1_ahb", "vfe_axi",
"vfe1_stream"},
@@ -468,7 +469,7 @@ static const struct resources vfe_res_660[] = {
static const struct resources csiphy_res_845[] = {
/* CSIPHY0 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "camnoc_axi", "soc_ahb", "slow_ahb_src",
"cpas_ahb", "cphy_rx_src", "csiphy0",
"csiphy0_timer_src", "csiphy0_timer" },
@@ -486,7 +487,7 @@ static const struct resources csiphy_res_845[] = {
/* CSIPHY1 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "camnoc_axi", "soc_ahb", "slow_ahb_src",
"cpas_ahb", "cphy_rx_src", "csiphy1",
"csiphy1_timer_src", "csiphy1_timer" },
@@ -504,7 +505,7 @@ static const struct resources csiphy_res_845[] = {
/* CSIPHY2 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "camnoc_axi", "soc_ahb", "slow_ahb_src",
"cpas_ahb", "cphy_rx_src", "csiphy2",
"csiphy2_timer_src", "csiphy2_timer" },
@@ -522,7 +523,7 @@ static const struct resources csiphy_res_845[] = {
/* CSIPHY3 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "camnoc_axi", "soc_ahb", "slow_ahb_src",
"cpas_ahb", "cphy_rx_src", "csiphy3",
"csiphy3_timer_src", "csiphy3_timer" },
@@ -542,7 +543,7 @@ static const struct resources csiphy_res_845[] = {
static const struct resources csid_res_845[] = {
/* CSID0 */
{
- .regulator = { "vdda-csi0" },
+ .regulators = { "vdda-phy", "vdda-pll" },
.clock = { "cpas_ahb", "cphy_rx_src", "slow_ahb_src",
"soc_ahb", "vfe0", "vfe0_src",
"vfe0_cphy_rx", "csi0",
@@ -562,7 +563,7 @@ static const struct resources csid_res_845[] = {
/* CSID1 */
{
- .regulator = { "vdda-csi1" },
+ .regulators = { "vdda-phy", "vdda-pll" },
.clock = { "cpas_ahb", "cphy_rx_src", "slow_ahb_src",
"soc_ahb", "vfe1", "vfe1_src",
"vfe1_cphy_rx", "csi1",
@@ -582,7 +583,7 @@ static const struct resources csid_res_845[] = {
/* CSID2 */
{
- .regulator = { "vdda-csi2" },
+ .regulators = { "vdda-phy", "vdda-pll" },
.clock = { "cpas_ahb", "cphy_rx_src", "slow_ahb_src",
"soc_ahb", "vfe_lite", "vfe_lite_src",
"vfe_lite_cphy_rx", "csi2",
@@ -604,7 +605,7 @@ static const struct resources csid_res_845[] = {
static const struct resources vfe_res_845[] = {
/* VFE0 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "camnoc_axi", "cpas_ahb", "slow_ahb_src",
"soc_ahb", "vfe0", "vfe0_axi",
"vfe0_src", "csi0",
@@ -624,7 +625,7 @@ static const struct resources vfe_res_845[] = {
/* VFE1 */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "camnoc_axi", "cpas_ahb", "slow_ahb_src",
"soc_ahb", "vfe1", "vfe1_axi",
"vfe1_src", "csi1",
@@ -644,7 +645,7 @@ static const struct resources vfe_res_845[] = {
/* VFE-lite */
{
- .regulator = { NULL },
+ .regulators = {},
.clock = { "camnoc_axi", "cpas_ahb", "slow_ahb_src",
"soc_ahb", "vfe_lite",
"vfe_lite_src", "csi2",
@@ -662,6 +663,208 @@ static const struct resources vfe_res_845[] = {
}
};
+static const struct resources csiphy_res_8250[] = {
+ /* CSIPHY0 */
+ {
+ .regulators = {},
+ .clock = { "csiphy0", "csiphy0_timer" },
+ .clock_rate = { { 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy0" },
+ .interrupt = { "csiphy0" }
+ },
+ /* CSIPHY1 */
+ {
+ .regulators = {},
+ .clock = { "csiphy1", "csiphy1_timer" },
+ .clock_rate = { { 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy1" },
+ .interrupt = { "csiphy1" }
+ },
+ /* CSIPHY2 */
+ {
+ .regulators = {},
+ .clock = { "csiphy2", "csiphy2_timer" },
+ .clock_rate = { { 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy2" },
+ .interrupt = { "csiphy2" }
+ },
+ /* CSIPHY3 */
+ {
+ .regulators = {},
+ .clock = { "csiphy3", "csiphy3_timer" },
+ .clock_rate = { { 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy3" },
+ .interrupt = { "csiphy3" }
+ },
+ /* CSIPHY4 */
+ {
+ .regulators = {},
+ .clock = { "csiphy4", "csiphy4_timer" },
+ .clock_rate = { { 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy4" },
+ .interrupt = { "csiphy4" }
+ },
+ /* CSIPHY5 */
+ {
+ .regulators = {},
+ .clock = { "csiphy5", "csiphy5_timer" },
+ .clock_rate = { { 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy5" },
+ .interrupt = { "csiphy5" }
+ }
+};
+
+static const struct resources csid_res_8250[] = {
+ /* CSID0 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "vfe0_csid", "vfe0_cphy_rx", "vfe0", "vfe0_areg", "vfe0_ahb" },
+ .clock_rate = { { 400000000 },
+ { 400000000 },
+ { 350000000, 475000000, 576000000, 720000000 },
+ { 100000000, 200000000, 300000000, 400000000 },
+ { 0 } },
+ .reg = { "csid0" },
+ .interrupt = { "csid0" }
+ },
+ /* CSID1 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "vfe1_csid", "vfe1_cphy_rx", "vfe1", "vfe1_areg", "vfe1_ahb" },
+ .clock_rate = { { 400000000 },
+ { 400000000 },
+ { 350000000, 475000000, 576000000, 720000000 },
+ { 100000000, 200000000, 300000000, 400000000 },
+ { 0 } },
+ .reg = { "csid1" },
+ .interrupt = { "csid1" }
+ },
+ /* CSID2 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "vfe_lite_csid", "vfe_lite_cphy_rx", "vfe_lite", "vfe_lite_ahb" },
+ .clock_rate = { { 400000000 },
+ { 400000000 },
+ { 400000000, 480000000 },
+ { 0 } },
+ .reg = { "csid2" },
+ .interrupt = { "csid2" }
+ },
+ /* CSID3 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "vfe_lite_csid", "vfe_lite_cphy_rx", "vfe_lite", "vfe_lite_ahb" },
+ .clock_rate = { { 400000000 },
+ { 400000000 },
+ { 400000000, 480000000 },
+ { 0 } },
+ .reg = { "csid3" },
+ .interrupt = { "csid3" }
+ }
+};
+
+static const struct resources vfe_res_8250[] = {
+ /* VFE0 */
+ {
+ .regulators = {},
+ .clock = { "camnoc_axi_src", "slow_ahb_src", "cpas_ahb",
+ "camnoc_axi", "vfe0_ahb", "vfe0_areg", "vfe0",
+ "vfe0_axi", "cam_hf_axi" },
+ .clock_rate = { { 19200000, 300000000, 400000000, 480000000 },
+ { 19200000, 80000000 },
+ { 19200000 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 300000000, 400000000 },
+ { 350000000, 475000000, 576000000, 720000000 },
+ { 0 },
+ { 0 } },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" }
+ },
+ /* VFE1 */
+ {
+ .regulators = {},
+ .clock = { "camnoc_axi_src", "slow_ahb_src", "cpas_ahb",
+ "camnoc_axi", "vfe1_ahb", "vfe1_areg", "vfe1",
+ "vfe1_axi", "cam_hf_axi" },
+ .clock_rate = { { 19200000, 300000000, 400000000, 480000000 },
+ { 19200000, 80000000 },
+ { 19200000 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 300000000, 400000000 },
+ { 350000000, 475000000, 576000000, 720000000 },
+ { 0 },
+ { 0 } },
+ .reg = { "vfe1" },
+ .interrupt = { "vfe1" }
+ },
+ /* VFE2 (lite) */
+ {
+ .regulators = {},
+ .clock = { "camnoc_axi_src", "slow_ahb_src", "cpas_ahb",
+ "camnoc_axi", "vfe_lite_ahb", "vfe_lite_axi",
+ "vfe_lite", "cam_hf_axi" },
+ .clock_rate = { { 19200000, 300000000, 400000000, 480000000 },
+ { 19200000, 80000000 },
+ { 19200000 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 400000000, 480000000 },
+ { 0 } },
+ .reg = { "vfe_lite0" },
+ .interrupt = { "vfe_lite0" }
+ },
+ /* VFE3 (lite) */
+ {
+ .regulators = {},
+ .clock = { "camnoc_axi_src", "slow_ahb_src", "cpas_ahb",
+ "camnoc_axi", "vfe_lite_ahb", "vfe_lite_axi",
+ "vfe_lite", "cam_hf_axi" },
+ .clock_rate = { { 19200000, 300000000, 400000000, 480000000 },
+ { 19200000, 80000000 },
+ { 19200000 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 400000000, 480000000 },
+ { 0 } },
+ .reg = { "vfe_lite1" },
+ .interrupt = { "vfe_lite1" }
+ },
+};
+
+static const struct resources_icc icc_res_sm8250[] = {
+ {
+ .name = "cam_ahb",
+ .icc_bw_tbl.avg = 38400,
+ .icc_bw_tbl.peak = 76800,
+ },
+ {
+ .name = "cam_hf_0_mnoc",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+ {
+ .name = "cam_sf_0_mnoc",
+ .icc_bw_tbl.avg = 0,
+ .icc_bw_tbl.peak = 2097152,
+ },
+ {
+ .name = "cam_sf_icp_mnoc",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+};
+
/*
* camss_add_clock_margin - Add margin to clock frequency rate
* @rate: Clock frequency rate
@@ -832,7 +1035,7 @@ static int camss_of_parse_endpoint_node(struct device *dev,
struct camss_async_subdev *csd)
{
struct csiphy_lanes_cfg *lncfg = &csd->interface.csi2.lane_cfg;
- struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2;
+ struct v4l2_mbus_config_mipi_csi2 *mipi_csi2;
struct v4l2_fwnode_endpoint vep = { { 0 } };
unsigned int i;
@@ -945,6 +1148,12 @@ static int camss_init_subdevices(struct camss *camss)
/* Titan VFEs don't have an ISPIF */
ispif_res = NULL;
vfe_res = vfe_res_845;
+ } else if (camss->version == CAMSS_8250) {
+ csiphy_res = csiphy_res_8250;
+ csid_res = csid_res_8250;
+ /* Titan VFEs don't have an ISPIF */
+ ispif_res = NULL;
+ vfe_res = vfe_res_8250;
} else {
return -EINVAL;
}
@@ -960,6 +1169,17 @@ static int camss_init_subdevices(struct camss *camss)
}
}
+ /* note: SM8250 requires VFE to be initialized before CSID */
+ for (i = 0; i < camss->vfe_num; i++) {
+ ret = msm_vfe_subdev_init(camss, &camss->vfe[i],
+ &vfe_res[i], i);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Fail to init vfe%d sub-device: %d\n", i, ret);
+ return ret;
+ }
+ }
+
for (i = 0; i < camss->csid_num; i++) {
ret = msm_csid_subdev_init(camss, &camss->csid[i],
&csid_res[i], i);
@@ -978,16 +1198,6 @@ static int camss_init_subdevices(struct camss *camss)
return ret;
}
- for (i = 0; i < camss->vfe_num; i++) {
- ret = msm_vfe_subdev_init(camss, &camss->vfe[i],
- &vfe_res[i], i);
- if (ret < 0) {
- dev_err(camss->dev,
- "Fail to init vfe%d sub-device: %d\n", i, ret);
- return ret;
- }
- }
-
return 0;
}
@@ -1250,7 +1460,8 @@ static int camss_configure_pd(struct camss *camss)
if (camss->version == CAMSS_8x96 ||
camss->version == CAMSS_660)
nbr_pm_domains = PM_DOMAIN_GEN1_COUNT;
- else if (camss->version == CAMSS_845)
+ else if (camss->version == CAMSS_845 ||
+ camss->version == CAMSS_8250)
nbr_pm_domains = PM_DOMAIN_GEN2_COUNT;
for (i = 0; i < nbr_pm_domains; i++) {
@@ -1283,6 +1494,29 @@ fail_pm:
return ret;
}
+static int camss_icc_get(struct camss *camss)
+{
+ const struct resources_icc *icc_res;
+ int nbr_icc_paths = 0;
+ int i;
+
+ if (camss->version == CAMSS_8250) {
+ icc_res = &icc_res_sm8250[0];
+ nbr_icc_paths = ICC_SM8250_COUNT;
+ }
+
+ for (i = 0; i < nbr_icc_paths; i++) {
+ camss->icc_path[i] = devm_of_icc_get(camss->dev,
+ icc_res[i].name);
+ if (IS_ERR(camss->icc_path[i]))
+ return PTR_ERR(camss->icc_path[i]);
+
+ camss->icc_bw_tbl[i] = icc_res[i].icc_bw_tbl;
+ }
+
+ return 0;
+}
+
/*
* camss_probe - Probe CAMSS platform device
* @pdev: Pointer to CAMSS platform device
@@ -1326,6 +1560,12 @@ static int camss_probe(struct platform_device *pdev)
camss->csiphy_num = 4;
camss->csid_num = 3;
camss->vfe_num = 3;
+ } else if (of_device_is_compatible(dev->of_node,
+ "qcom,sm8250-camss")) {
+ camss->version = CAMSS_8250;
+ camss->csiphy_num = 6;
+ camss->csid_num = 4;
+ camss->vfe_num = 4;
} else {
ret = -EINVAL;
goto err_free;
@@ -1369,6 +1609,10 @@ static int camss_probe(struct platform_device *pdev)
goto err_cleanup;
}
+ ret = camss_icc_get(camss);
+ if (ret < 0)
+ goto err_cleanup;
+
ret = camss_init_subdevices(camss);
if (ret < 0)
goto err_cleanup;
@@ -1457,7 +1701,8 @@ void camss_delete(struct camss *camss)
if (camss->version == CAMSS_8x96 ||
camss->version == CAMSS_660)
nbr_pm_domains = PM_DOMAIN_GEN1_COUNT;
- else if (camss->version == CAMSS_845)
+ else if (camss->version == CAMSS_845 ||
+ camss->version == CAMSS_8250)
nbr_pm_domains = PM_DOMAIN_GEN2_COUNT;
for (i = 0; i < nbr_pm_domains; i++) {
@@ -1493,6 +1738,7 @@ static const struct of_device_id camss_dt_match[] = {
{ .compatible = "qcom,msm8996-camss" },
{ .compatible = "qcom,sdm660-camss" },
{ .compatible = "qcom,sdm845-camss" },
+ { .compatible = "qcom,sm8250-camss" },
{ }
};
@@ -1500,11 +1746,41 @@ MODULE_DEVICE_TABLE(of, camss_dt_match);
static int __maybe_unused camss_runtime_suspend(struct device *dev)
{
+ struct camss *camss = dev_get_drvdata(dev);
+ int nbr_icc_paths = 0;
+ int i;
+ int ret;
+
+ if (camss->version == CAMSS_8250)
+ nbr_icc_paths = ICC_SM8250_COUNT;
+
+ for (i = 0; i < nbr_icc_paths; i++) {
+ ret = icc_set_bw(camss->icc_path[i], 0, 0);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
static int __maybe_unused camss_runtime_resume(struct device *dev)
{
+ struct camss *camss = dev_get_drvdata(dev);
+ int nbr_icc_paths = 0;
+ int i;
+ int ret;
+
+ if (camss->version == CAMSS_8250)
+ nbr_icc_paths = ICC_SM8250_COUNT;
+
+ for (i = 0; i < nbr_icc_paths; i++) {
+ ret = icc_set_bw(camss->icc_path[i],
+ camss->icc_bw_tbl[i].avg,
+ camss->icc_bw_tbl[i].peak);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/media/platform/qcom/camss/camss.h b/drivers/media/platform/qcom/camss/camss.h
index dc8b4154f92b..c9b3e0df5be8 100644
--- a/drivers/media/platform/qcom/camss/camss.h
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -42,7 +42,7 @@
#define CAMSS_RES_MAX 17
struct resources {
- char *regulator[CAMSS_RES_MAX];
+ char *regulators[CAMSS_RES_MAX];
char *clock[CAMSS_RES_MAX];
u32 clock_rate[CAMSS_RES_MAX][CAMSS_RES_MAX];
char *reg[CAMSS_RES_MAX];
@@ -56,6 +56,16 @@ struct resources_ispif {
char *interrupt;
};
+struct icc_bw_tbl {
+ u32 avg;
+ u32 peak;
+};
+
+struct resources_icc {
+ char *name;
+ struct icc_bw_tbl icc_bw_tbl;
+};
+
enum pm_domain {
PM_DOMAIN_VFE0 = 0,
PM_DOMAIN_VFE1 = 1,
@@ -69,6 +79,12 @@ enum camss_version {
CAMSS_8x96,
CAMSS_660,
CAMSS_845,
+ CAMSS_8250,
+};
+
+enum icc_count {
+ ICC_DEFAULT_COUNT = 0,
+ ICC_SM8250_COUNT = 4,
};
struct camss {
@@ -87,6 +103,8 @@ struct camss {
atomic_t ref_count;
struct device *genpd[PM_DOMAIN_GEN2_COUNT];
struct device_link *genpd_link[PM_DOMAIN_GEN2_COUNT];
+ struct icc_path *icc_path[ICC_SM8250_COUNT];
+ struct icc_bw_tbl icc_bw_tbl[ICC_SM8250_COUNT];
};
struct camss_camera_interface {
diff --git a/drivers/media/platform/qcom/venus/Kconfig b/drivers/media/platform/qcom/venus/Kconfig
new file mode 100644
index 000000000000..bfd50e8f3421
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/Kconfig
@@ -0,0 +1,14 @@
+config VIDEO_QCOM_VENUS
+ tristate "Qualcomm Venus V4L2 encoder/decoder driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV && QCOM_SMEM
+ depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
+ select QCOM_MDT_LOADER if ARCH_QCOM
+ select QCOM_SCM
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This is a V4L2 driver for Qualcomm Venus video accelerator
+ hardware. It accelerates encoding and decoding operations
+ on various Qualcomm SoCs.
+ To compile this driver as a module choose m here.
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 7c3bac01cd49..c3023340d95c 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -127,6 +127,7 @@ struct venus_format {
* @done: a completion for sync HFI operations
* @error: an error returned during last HFI sync operations
* @sys_error: an error flag that signal system error event
+ * @sys_err_done: a waitqueue to wait for system error recovery end
* @core_ops: the core operations
* @pm_ops: a pointer to pm operations
* @pm_lock: a lock for PM operations
@@ -346,6 +347,7 @@ enum venus_inst_modes {
* @width: current capture width
* @height: current capture height
* @crop: current crop rectangle
+ * @fw_min_cnt: firmware minimum buffer count
* @out_width: current output width
* @out_height: current output height
* @colorspace: current color space
@@ -390,6 +392,8 @@ enum venus_inst_modes {
* @pic_struct: bitstream progressive vs interlaced
* @next_buf_last: a flag to mark next queued capture buffer as last
* @drain_active: Drain sequence is in progress
+ * @flags: bitmask flags describing current instance mode
+ * @dpb_ids: DPB buffer ID's
*/
struct venus_inst {
struct list_head list;
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 84c3a511ec31..0bca95d01650 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -189,7 +189,6 @@ int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
buf->attrs);
if (!buf->va) {
- kfree(buf);
ret = -ENOMEM;
goto fail;
}
@@ -209,6 +208,7 @@ int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
return 0;
fail:
+ kfree(buf);
venus_helper_free_dpb_bufs(inst);
return ret;
}
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
index 5aea07307e02..4ecd444050bb 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -1054,6 +1054,8 @@ static int pkt_session_set_property_1x(struct hfi_session_set_property_pkt *pkt,
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info);
break;
}
+ case HFI_PROPERTY_PARAM_VENC_HDR10_PQ_SEI:
+ return -ENOTSUPP;
/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 84bafc3118cc..adea4c3b8c20 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -662,8 +662,8 @@ static int venc_set_properties(struct venus_inst *inst)
ptype = HFI_PROPERTY_PARAM_VENC_H264_TRANSFORM_8X8;
h264_transform.enable_type = 0;
- if (ctr->profile.h264 == HFI_H264_PROFILE_HIGH ||
- ctr->profile.h264 == HFI_H264_PROFILE_CONSTRAINED_HIGH)
+ if (ctr->profile.h264 == V4L2_MPEG_VIDEO_H264_PROFILE_HIGH ||
+ ctr->profile.h264 == V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH)
h264_transform.enable_type = ctr->h264_8x8_transform;
ret = hfi_session_set_property(inst, ptype, &h264_transform);
diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
index 1ada42df314d..ea5805e71c14 100644
--- a/drivers/media/platform/qcom/venus/venc_ctrls.c
+++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
@@ -320,8 +320,8 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
ctr->intra_refresh_period = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
- if (ctr->profile.h264 != HFI_H264_PROFILE_HIGH &&
- ctr->profile.h264 != HFI_H264_PROFILE_CONSTRAINED_HIGH)
+ if (ctr->profile.h264 != V4L2_MPEG_VIDEO_H264_PROFILE_HIGH &&
+ ctr->profile.h264 != V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH)
return -EINVAL;
/*
@@ -457,7 +457,7 @@ int venc_ctrl_init(struct venus_inst *inst)
V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP, 1, 51, 1, 1);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM, 0, 1, 1, 0);
+ V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM, 0, 1, 1, 1);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP, 1, 51, 1, 1);
diff --git a/drivers/media/platform/renesas/Kconfig b/drivers/media/platform/renesas/Kconfig
new file mode 100644
index 000000000000..9fd90672ea2d
--- /dev/null
+++ b/drivers/media/platform/renesas/Kconfig
@@ -0,0 +1,121 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Renesas media platform drivers"
+
+# V4L drivers
+
+config VIDEO_RENESAS_CEU
+ tristate "Renesas Capture Engine Unit (CEU) driver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_SHMOBILE || ARCH_R7S72100 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ help
+ This is a v4l2 driver for the Renesas CEU Interface
+
+config VIDEO_RCAR_ISP
+ tristate "R-Car Image Signal Processor (ISP)"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select RESET_CONTROLLER
+ select V4L2_FWNODE
+ help
+ Support for Renesas R-Car Image Signal Processor (ISP).
+ Enable this to support the Renesas R-Car Image Signal
+ Processor (ISP).
+
+ To compile this driver as a module, choose M here: the
+ module will be called rcar-isp.
+
+config VIDEO_SH_VOU
+ tristate "SuperH VOU video output driver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && I2C
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Support for the Video Output Unit (VOU) on SuperH SoCs.
+
+source "drivers/media/platform/renesas/rcar-vin/Kconfig"
+
+# Mem2mem drivers
+
+config VIDEO_RENESAS_FCP
+ tristate "Renesas Frame Compression Processor"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on OF
+ help
+ This is a driver for the Renesas Frame Compression Processor (FCP).
+ The FCP is a companion module of video processing modules in the
+ Renesas R-Car Gen3 and RZ/G2 SoCs. It handles memory access for
+ the codec, VSP and FDP modules.
+
+ To compile this driver as a module, choose M here: the module
+ will be called rcar-fcp.
+
+config VIDEO_RENESAS_FDP1
+ tristate "Renesas Fine Display Processor"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on (!ARM64 && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This is a V4L2 driver for the Renesas Fine Display Processor
+ providing colour space conversion, and de-interlacing features.
+
+ To compile this driver as a module, choose M here: the module
+ will be called rcar_fdp1.
+
+config VIDEO_RENESAS_JPU
+ tristate "Renesas JPEG Processing Unit"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This is a V4L2 driver for the Renesas JPEG Processing Unit.
+
+ To compile this driver as a module, choose M here: the module
+ will be called rcar_jpu.
+
+config VIDEO_RENESAS_VSP1
+ tristate "Renesas VSP1 Video Processing Engine"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on (!ARM64 && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ help
+ This is a V4L2 driver for the Renesas VSP1 video processing engine.
+
+ To compile this driver as a module, choose M here: the module
+ will be called vsp1.
+
+# SDR drivers
+
+config VIDEO_RCAR_DRIF
+ tristate "Renesas Digital Radio Interface (DRIF)"
+ depends on SDR_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select VIDEOBUF2_VMALLOC
+ select V4L2_ASYNC
+ help
+ Say Y if you want to enable R-Car Gen3 DRIF support. DRIF is Digital
+ Radio Interface that interfaces with an RF front end chip. It is a
+ receiver of digital data which uses DMA to transfer received data to
+ a configured location for an application to use.
+
+ To compile this driver as a module, choose M here; the module
+ will be called rcar_drif.
diff --git a/drivers/media/platform/renesas/Makefile b/drivers/media/platform/renesas/Makefile
new file mode 100644
index 000000000000..3ec226ef5fd2
--- /dev/null
+++ b/drivers/media/platform/renesas/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Renesas capture/playback device drivers.
+#
+
+obj-y += rcar-vin/
+obj-y += vsp1/
+
+obj-$(CONFIG_VIDEO_RCAR_DRIF) += rcar_drif.o
+obj-$(CONFIG_VIDEO_RCAR_ISP) += rcar-isp.o
+obj-$(CONFIG_VIDEO_RENESAS_CEU) += renesas-ceu.o
+obj-$(CONFIG_VIDEO_RENESAS_FCP) += rcar-fcp.o
+obj-$(CONFIG_VIDEO_RENESAS_FDP1) += rcar_fdp1.o
+obj-$(CONFIG_VIDEO_RENESAS_JPU) += rcar_jpu.o
+obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/renesas/rcar-fcp.c
index eb59a3ba6d0f..eb59a3ba6d0f 100644
--- a/drivers/media/platform/rcar-fcp.c
+++ b/drivers/media/platform/renesas/rcar-fcp.c
diff --git a/drivers/media/platform/rcar-isp.c b/drivers/media/platform/renesas/rcar-isp.c
index 2ffab30bc011..10b3474f93a4 100644
--- a/drivers/media/platform/rcar-isp.c
+++ b/drivers/media/platform/renesas/rcar-isp.c
@@ -17,6 +17,7 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <media/mipi-csi2.h>
#include <media/v4l2-subdev.h>
#define ISPINPUTSEL0_REG 0x0008
@@ -51,12 +52,31 @@ struct rcar_isp_format {
};
static const struct rcar_isp_format rcar_isp_formats[] = {
- { .code = MEDIA_BUS_FMT_RGB888_1X24, .datatype = 0x24, .procmode = 0x15 },
- { .code = MEDIA_BUS_FMT_Y10_1X10, .datatype = 0x2b, .procmode = 0x10 },
- { .code = MEDIA_BUS_FMT_UYVY8_1X16, .datatype = 0x1e, .procmode = 0x0c },
- { .code = MEDIA_BUS_FMT_YUYV8_1X16, .datatype = 0x1e, .procmode = 0x0c },
- { .code = MEDIA_BUS_FMT_UYVY8_2X8, .datatype = 0x1e, .procmode = 0x0c },
- { .code = MEDIA_BUS_FMT_YUYV10_2X10, .datatype = 0x1e, .procmode = 0x0c },
+ {
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .datatype = MIPI_CSI2_DT_RGB888,
+ .procmode = 0x15
+ }, {
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .procmode = 0x10,
+ }, {
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .procmode = 0x0c,
+ }, {
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .procmode = 0x0c,
+ }, {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .procmode = 0x0c,
+ }, {
+ .code = MEDIA_BUS_FMT_YUYV10_2X10,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .procmode = 0x0c,
+ },
};
static const struct rcar_isp_format *risp_code_to_fmt(unsigned int code)
diff --git a/drivers/media/platform/rcar-vin/Kconfig b/drivers/media/platform/renesas/rcar-vin/Kconfig
index 030312d862e7..de55fe63d84c 100644
--- a/drivers/media/platform/rcar-vin/Kconfig
+++ b/drivers/media/platform/renesas/rcar-vin/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
config VIDEO_RCAR_CSI2
tristate "R-Car MIPI CSI-2 Receiver"
- depends on VIDEO_V4L2 && OF
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
depends on ARCH_RENESAS || COMPILE_TEST
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
@@ -16,7 +17,8 @@ config VIDEO_RCAR_CSI2
config VIDEO_RCAR_VIN
tristate "R-Car Video Input (VIN) Driver"
- depends on VIDEO_V4L2 && OF
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
depends on ARCH_RENESAS || COMPILE_TEST
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
diff --git a/drivers/media/platform/rcar-vin/Makefile b/drivers/media/platform/renesas/rcar-vin/Makefile
index 00d809f5d2c1..00d809f5d2c1 100644
--- a/drivers/media/platform/rcar-vin/Makefile
+++ b/drivers/media/platform/renesas/rcar-vin/Makefile
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
index 0186ae235113..64cb05b3907c 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
@@ -743,27 +743,6 @@ static int rvin_parallel_init(struct rvin_dev *vin)
* CSI-2
*/
-static unsigned int rvin_csi2_get_mask(struct rvin_dev *vin,
- enum rvin_csi_id csi_id,
- unsigned char channel)
-{
- const struct rvin_group_route *route;
- unsigned int mask = 0;
-
- for (route = vin->info->routes; route->mask; route++) {
- if (route->vin == vin->id &&
- route->csi == csi_id &&
- route->channel == channel) {
- vin_dbg(vin,
- "Adding route: vin: %d csi: %d channel: %d\n",
- route->vin, route->csi, route->channel);
- mask |= route->mask;
- }
- }
-
- return mask;
-}
-
/*
* Link setup for the links between a VIN and a CSI-2 receiver is a bit
* complex. The reason for this is that the register controlling routing
@@ -793,12 +772,10 @@ static int rvin_csi2_link_notify(struct media_link *link, u32 flags,
{
struct rvin_group *group = container_of(link->graph_obj.mdev,
struct rvin_group, mdev);
- unsigned int master_id, channel, mask_new, i;
- unsigned int mask = ~0;
struct media_entity *entity;
struct video_device *vdev;
- struct media_pad *csi_pad;
- struct rvin_dev *vin = NULL;
+ struct rvin_dev *vin;
+ unsigned int i;
int csi_id, ret;
ret = v4l2_pipeline_link_notify(link, flags, notification);
@@ -816,41 +793,16 @@ static int rvin_csi2_link_notify(struct media_link *link, u32 flags,
* running streams.
*/
media_device_for_each_entity(entity, &group->mdev)
- if (entity->stream_count)
+ if (media_entity_is_streaming(entity))
return -EBUSY;
- mutex_lock(&group->lock);
-
/* Find the master VIN that controls the routes. */
vdev = media_entity_to_video_device(link->sink->entity);
vin = container_of(vdev, struct rvin_dev, vdev);
- master_id = rvin_group_id_to_master(vin->id);
- if (WARN_ON(!group->vin[master_id])) {
- ret = -ENODEV;
- goto out;
- }
-
- /* Build a mask for already enabled links. */
- for (i = master_id; i < master_id + 4; i++) {
- if (!group->vin[i])
- continue;
-
- /* Get remote CSI-2, if any. */
- csi_pad = media_entity_remote_pad(
- &group->vin[i]->vdev.entity.pads[0]);
- if (!csi_pad)
- continue;
-
- csi_id = rvin_group_entity_to_remote_id(group, csi_pad->entity);
- channel = rvin_group_csi_pad_to_channel(csi_pad->index);
-
- mask &= rvin_csi2_get_mask(group->vin[i], csi_id, channel);
- }
+ mutex_lock(&group->lock);
- /* Add the new link to the existing mask and check if it works. */
csi_id = rvin_group_entity_to_remote_id(group, link->source->entity);
-
if (csi_id == -ENODEV) {
struct v4l2_subdev *sd;
@@ -875,25 +827,58 @@ static int rvin_csi2_link_notify(struct media_link *link, u32 flags,
vin_err(vin, "Subdevice %s not registered to any VIN\n",
link->source->entity->name);
ret = -ENODEV;
- goto out;
- }
+ } else {
+ const struct rvin_group_route *route;
+ unsigned int chsel = UINT_MAX;
+ unsigned int master_id;
- channel = rvin_group_csi_pad_to_channel(link->source->index);
- mask_new = mask & rvin_csi2_get_mask(vin, csi_id, channel);
- vin_dbg(vin, "Try link change mask: 0x%x new: 0x%x\n", mask, mask_new);
+ master_id = rvin_group_id_to_master(vin->id);
- if (!mask_new) {
- ret = -EMLINK;
- goto out;
- }
+ if (WARN_ON(!group->vin[master_id])) {
+ ret = -ENODEV;
+ goto out;
+ }
- /* New valid CHSEL found, set the new value. */
- ret = rvin_set_channel_routing(group->vin[master_id], __ffs(mask_new));
- if (ret)
- goto out;
+ /* Make sure group is connected to same CSI-2 */
+ for (i = master_id; i < master_id + 4; i++) {
+ struct media_pad *csi_pad;
+
+ if (!group->vin[i])
+ continue;
+
+ /* Get remote CSI-2, if any. */
+ csi_pad = media_entity_remote_pad(
+ &group->vin[i]->vdev.entity.pads[0]);
+ if (!csi_pad)
+ continue;
+
+ if (csi_pad->entity != link->source->entity) {
+ vin_dbg(vin, "Already attached to %s\n",
+ csi_pad->entity->name);
+ ret = -EBUSY;
+ goto out;
+ }
+ }
+
+ for (route = vin->info->routes; route->chsel; route++) {
+ if (route->master == master_id && route->csi == csi_id) {
+ chsel = route->chsel;
+ break;
+ }
+ }
- vin->is_csi = true;
+ if (chsel == UINT_MAX) {
+ vin_err(vin, "No CHSEL value found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = rvin_set_channel_routing(group->vin[master_id], chsel);
+ if (ret)
+ goto out;
+ vin->is_csi = true;
+ }
out:
mutex_unlock(&group->lock);
@@ -904,48 +889,60 @@ static const struct media_device_ops rvin_csi2_media_ops = {
.link_notify = rvin_csi2_link_notify,
};
+static int rvin_csi2_create_link(struct rvin_group *group, unsigned int id,
+ const struct rvin_group_route *route)
+
+{
+ struct media_entity *source = &group->remotes[route->csi].subdev->entity;
+ struct media_entity *sink = &group->vin[id]->vdev.entity;
+ struct media_pad *sink_pad = &sink->pads[0];
+ unsigned int channel;
+ int ret;
+
+ for (channel = 0; channel < 4; channel++) {
+ unsigned int source_idx = rvin_group_csi_channel_to_pad(channel);
+ struct media_pad *source_pad = &source->pads[source_idx];
+
+ /* Skip if link already exists. */
+ if (media_entity_find_link(source_pad, sink_pad))
+ continue;
+
+ ret = media_create_pad_link(source, source_idx, sink, 0, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int rvin_csi2_setup_links(struct rvin_dev *vin)
{
const struct rvin_group_route *route;
+ unsigned int id;
int ret = -EINVAL;
/* Create all media device links between VINs and CSI-2's. */
mutex_lock(&vin->group->lock);
- for (route = vin->info->routes; route->mask; route++) {
- struct media_pad *source_pad, *sink_pad;
- struct media_entity *source, *sink;
- unsigned int source_idx;
-
- /* Check that VIN is part of the group. */
- if (!vin->group->vin[route->vin])
- continue;
-
+ for (route = vin->info->routes; route->chsel; route++) {
/* Check that VIN' master is part of the group. */
- if (!vin->group->vin[rvin_group_id_to_master(route->vin)])
+ if (!vin->group->vin[route->master])
continue;
/* Check that CSI-2 is part of the group. */
if (!vin->group->remotes[route->csi].subdev)
continue;
- source = &vin->group->remotes[route->csi].subdev->entity;
- source_idx = rvin_group_csi_channel_to_pad(route->channel);
- source_pad = &source->pads[source_idx];
-
- sink = &vin->group->vin[route->vin]->vdev.entity;
- sink_pad = &sink->pads[0];
-
- /* Skip if link already exists. */
- if (media_entity_find_link(source_pad, sink_pad))
- continue;
+ for (id = route->master; id < route->master + 4; id++) {
+ /* Check that VIN is part of the group. */
+ if (!vin->group->vin[id])
+ continue;
- ret = media_create_pad_link(source, source_idx, sink, 0, 0);
- if (ret) {
- vin_err(vin, "Error adding link from %s to %s\n",
- source->name, sink->name);
- break;
+ ret = rvin_csi2_create_link(vin->group, id, route);
+ if (ret)
+ goto out;
}
}
+out:
mutex_unlock(&vin->group->lock);
return ret;
@@ -1155,30 +1152,9 @@ static const struct rvin_info rcar_info_gen2 = {
};
static const struct rvin_group_route rcar_info_r8a774e1_routes[] = {
- { .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 0, .mask = BIT(1) | BIT(4) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 0, .mask = BIT(2) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 1, .mask = BIT(0) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(1) | BIT(3) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 1, .mask = BIT(4) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 2, .mask = BIT(0) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 2, .mask = BIT(1) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 2, .mask = BIT(2) },
- { .csi = RVIN_CSI40, .channel = 2, .vin = 2, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 2, .vin = 2, .mask = BIT(4) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 3, .mask = BIT(0) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 3, .mask = BIT(1) | BIT(2) },
- { .csi = RVIN_CSI40, .channel = 3, .vin = 3, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 3, .vin = 3, .mask = BIT(4) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 4, .mask = BIT(1) | BIT(4) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 5, .mask = BIT(0) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 5, .mask = BIT(4) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 6, .mask = BIT(0) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 6, .mask = BIT(2) },
- { .csi = RVIN_CSI20, .channel = 2, .vin = 6, .mask = BIT(4) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 7, .mask = BIT(1) | BIT(2) },
- { .csi = RVIN_CSI20, .channel = 3, .vin = 7, .mask = BIT(4) },
+ { .master = 0, .csi = RVIN_CSI20, .chsel = 0x04 },
+ { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 },
+ { .master = 4, .csi = RVIN_CSI20, .chsel = 0x04 },
{ /* Sentinel */ }
};
@@ -1191,38 +1167,10 @@ static const struct rvin_info rcar_info_r8a774e1 = {
};
static const struct rvin_group_route rcar_info_r8a7795_routes[] = {
- { .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 0, .mask = BIT(1) | BIT(4) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 0, .mask = BIT(2) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 1, .mask = BIT(0) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(1) | BIT(3) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 1, .mask = BIT(4) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 2, .mask = BIT(0) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 2, .mask = BIT(1) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 2, .mask = BIT(2) },
- { .csi = RVIN_CSI40, .channel = 2, .vin = 2, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 2, .vin = 2, .mask = BIT(4) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 3, .mask = BIT(0) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 3, .mask = BIT(1) | BIT(2) },
- { .csi = RVIN_CSI40, .channel = 3, .vin = 3, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 3, .vin = 3, .mask = BIT(4) },
- { .csi = RVIN_CSI41, .channel = 0, .vin = 4, .mask = BIT(0) | BIT(3) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 4, .mask = BIT(1) | BIT(4) },
- { .csi = RVIN_CSI41, .channel = 1, .vin = 4, .mask = BIT(2) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 5, .mask = BIT(0) },
- { .csi = RVIN_CSI41, .channel = 1, .vin = 5, .mask = BIT(1) | BIT(3) },
- { .csi = RVIN_CSI41, .channel = 0, .vin = 5, .mask = BIT(2) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 5, .mask = BIT(4) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 6, .mask = BIT(0) },
- { .csi = RVIN_CSI41, .channel = 0, .vin = 6, .mask = BIT(1) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 6, .mask = BIT(2) },
- { .csi = RVIN_CSI41, .channel = 2, .vin = 6, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 2, .vin = 6, .mask = BIT(4) },
- { .csi = RVIN_CSI41, .channel = 1, .vin = 7, .mask = BIT(0) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 7, .mask = BIT(1) | BIT(2) },
- { .csi = RVIN_CSI41, .channel = 3, .vin = 7, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 3, .vin = 7, .mask = BIT(4) },
+ { .master = 0, .csi = RVIN_CSI20, .chsel = 0x04 },
+ { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 },
+ { .master = 4, .csi = RVIN_CSI20, .chsel = 0x04 },
+ { .master = 4, .csi = RVIN_CSI41, .chsel = 0x03 },
{ /* Sentinel */ }
};
@@ -1236,48 +1184,12 @@ static const struct rvin_info rcar_info_r8a7795 = {
};
static const struct rvin_group_route rcar_info_r8a7795es1_routes[] = {
- { .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 0, .mask = BIT(1) | BIT(4) },
- { .csi = RVIN_CSI21, .channel = 0, .vin = 0, .mask = BIT(2) | BIT(5) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 1, .mask = BIT(0) },
- { .csi = RVIN_CSI21, .channel = 0, .vin = 1, .mask = BIT(1) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 1, .mask = BIT(4) },
- { .csi = RVIN_CSI21, .channel = 1, .vin = 1, .mask = BIT(5) },
- { .csi = RVIN_CSI21, .channel = 0, .vin = 2, .mask = BIT(0) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 2, .mask = BIT(1) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 2, .mask = BIT(2) },
- { .csi = RVIN_CSI40, .channel = 2, .vin = 2, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 2, .vin = 2, .mask = BIT(4) },
- { .csi = RVIN_CSI21, .channel = 2, .vin = 2, .mask = BIT(5) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 3, .mask = BIT(0) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 3, .mask = BIT(1) },
- { .csi = RVIN_CSI21, .channel = 1, .vin = 3, .mask = BIT(2) },
- { .csi = RVIN_CSI40, .channel = 3, .vin = 3, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 3, .vin = 3, .mask = BIT(4) },
- { .csi = RVIN_CSI21, .channel = 3, .vin = 3, .mask = BIT(5) },
- { .csi = RVIN_CSI41, .channel = 0, .vin = 4, .mask = BIT(0) | BIT(3) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 4, .mask = BIT(1) | BIT(4) },
- { .csi = RVIN_CSI21, .channel = 0, .vin = 4, .mask = BIT(2) | BIT(5) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 5, .mask = BIT(0) },
- { .csi = RVIN_CSI21, .channel = 0, .vin = 5, .mask = BIT(1) },
- { .csi = RVIN_CSI41, .channel = 0, .vin = 5, .mask = BIT(2) },
- { .csi = RVIN_CSI41, .channel = 1, .vin = 5, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 5, .mask = BIT(4) },
- { .csi = RVIN_CSI21, .channel = 1, .vin = 5, .mask = BIT(5) },
- { .csi = RVIN_CSI21, .channel = 0, .vin = 6, .mask = BIT(0) },
- { .csi = RVIN_CSI41, .channel = 0, .vin = 6, .mask = BIT(1) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 6, .mask = BIT(2) },
- { .csi = RVIN_CSI41, .channel = 2, .vin = 6, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 2, .vin = 6, .mask = BIT(4) },
- { .csi = RVIN_CSI21, .channel = 2, .vin = 6, .mask = BIT(5) },
- { .csi = RVIN_CSI41, .channel = 1, .vin = 7, .mask = BIT(0) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 7, .mask = BIT(1) },
- { .csi = RVIN_CSI21, .channel = 1, .vin = 7, .mask = BIT(2) },
- { .csi = RVIN_CSI41, .channel = 3, .vin = 7, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 3, .vin = 7, .mask = BIT(4) },
- { .csi = RVIN_CSI21, .channel = 3, .vin = 7, .mask = BIT(5) },
+ { .master = 0, .csi = RVIN_CSI20, .chsel = 0x04 },
+ { .master = 0, .csi = RVIN_CSI21, .chsel = 0x05 },
+ { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 },
+ { .master = 4, .csi = RVIN_CSI20, .chsel = 0x04 },
+ { .master = 4, .csi = RVIN_CSI21, .chsel = 0x05 },
+ { .master = 4, .csi = RVIN_CSI41, .chsel = 0x03 },
{ /* Sentinel */ }
};
@@ -1290,34 +1202,10 @@ static const struct rvin_info rcar_info_r8a7795es1 = {
};
static const struct rvin_group_route rcar_info_r8a7796_routes[] = {
- { .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 0, .mask = BIT(1) | BIT(4) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 1, .mask = BIT(0) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 1, .mask = BIT(4) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 2, .mask = BIT(1) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 2, .mask = BIT(2) },
- { .csi = RVIN_CSI40, .channel = 2, .vin = 2, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 2, .vin = 2, .mask = BIT(4) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 3, .mask = BIT(0) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 3, .mask = BIT(1) },
- { .csi = RVIN_CSI40, .channel = 3, .vin = 3, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 3, .vin = 3, .mask = BIT(4) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 4, .mask = BIT(0) | BIT(3) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 4, .mask = BIT(1) | BIT(4) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 5, .mask = BIT(0) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 5, .mask = BIT(2) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 5, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 5, .mask = BIT(4) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 6, .mask = BIT(1) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 6, .mask = BIT(2) },
- { .csi = RVIN_CSI40, .channel = 2, .vin = 6, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 2, .vin = 6, .mask = BIT(4) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 7, .mask = BIT(0) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 7, .mask = BIT(1) },
- { .csi = RVIN_CSI40, .channel = 3, .vin = 7, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 3, .vin = 7, .mask = BIT(4) },
+ { .master = 0, .csi = RVIN_CSI20, .chsel = 0x04 },
+ { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 },
+ { .master = 4, .csi = RVIN_CSI20, .chsel = 0x04 },
+ { .master = 4, .csi = RVIN_CSI40, .chsel = 0x03 },
{ /* Sentinel */ }
};
@@ -1331,38 +1219,10 @@ static const struct rvin_info rcar_info_r8a7796 = {
};
static const struct rvin_group_route rcar_info_r8a77965_routes[] = {
- { .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 0, .mask = BIT(1) | BIT(4) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 0, .mask = BIT(2) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 1, .mask = BIT(0) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(1) | BIT(3) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 1, .mask = BIT(4) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 2, .mask = BIT(0) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 2, .mask = BIT(1) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 2, .mask = BIT(2) },
- { .csi = RVIN_CSI40, .channel = 2, .vin = 2, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 2, .vin = 2, .mask = BIT(4) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 3, .mask = BIT(0) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 3, .mask = BIT(1) | BIT(2) },
- { .csi = RVIN_CSI40, .channel = 3, .vin = 3, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 3, .vin = 3, .mask = BIT(4) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 4, .mask = BIT(0) | BIT(3) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 4, .mask = BIT(1) | BIT(4) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 4, .mask = BIT(2) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 5, .mask = BIT(0) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 5, .mask = BIT(1) | BIT(3) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 5, .mask = BIT(2) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 5, .mask = BIT(4) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 6, .mask = BIT(0) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 6, .mask = BIT(1) },
- { .csi = RVIN_CSI20, .channel = 0, .vin = 6, .mask = BIT(2) },
- { .csi = RVIN_CSI40, .channel = 2, .vin = 6, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 2, .vin = 6, .mask = BIT(4) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 7, .mask = BIT(0) },
- { .csi = RVIN_CSI20, .channel = 1, .vin = 7, .mask = BIT(1) | BIT(2) },
- { .csi = RVIN_CSI40, .channel = 3, .vin = 7, .mask = BIT(3) },
- { .csi = RVIN_CSI20, .channel = 3, .vin = 7, .mask = BIT(4) },
+ { .master = 0, .csi = RVIN_CSI20, .chsel = 0x04 },
+ { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 },
+ { .master = 4, .csi = RVIN_CSI20, .chsel = 0x04 },
+ { .master = 4, .csi = RVIN_CSI40, .chsel = 0x03 },
{ /* Sentinel */ }
};
@@ -1376,13 +1236,7 @@ static const struct rvin_info rcar_info_r8a77965 = {
};
static const struct rvin_group_route rcar_info_r8a77970_routes[] = {
- { .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(3) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 2, .mask = BIT(1) },
- { .csi = RVIN_CSI40, .channel = 2, .vin = 2, .mask = BIT(3) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 3, .mask = BIT(0) },
- { .csi = RVIN_CSI40, .channel = 3, .vin = 3, .mask = BIT(3) },
+ { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 },
{ /* Sentinel */ }
};
@@ -1395,22 +1249,8 @@ static const struct rvin_info rcar_info_r8a77970 = {
};
static const struct rvin_group_route rcar_info_r8a77980_routes[] = {
- { .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 0, .mask = BIT(2) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(1) | BIT(3) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 2, .mask = BIT(1) },
- { .csi = RVIN_CSI40, .channel = 2, .vin = 2, .mask = BIT(3) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 3, .mask = BIT(0) },
- { .csi = RVIN_CSI40, .channel = 3, .vin = 3, .mask = BIT(3) },
- { .csi = RVIN_CSI41, .channel = 0, .vin = 4, .mask = BIT(0) | BIT(3) },
- { .csi = RVIN_CSI41, .channel = 1, .vin = 4, .mask = BIT(2) },
- { .csi = RVIN_CSI41, .channel = 0, .vin = 5, .mask = BIT(2) },
- { .csi = RVIN_CSI41, .channel = 1, .vin = 5, .mask = BIT(1) | BIT(3) },
- { .csi = RVIN_CSI41, .channel = 0, .vin = 6, .mask = BIT(1) },
- { .csi = RVIN_CSI41, .channel = 2, .vin = 6, .mask = BIT(3) },
- { .csi = RVIN_CSI41, .channel = 1, .vin = 7, .mask = BIT(0) },
- { .csi = RVIN_CSI41, .channel = 3, .vin = 7, .mask = BIT(3) },
+ { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 },
+ { .master = 4, .csi = RVIN_CSI41, .chsel = 0x03 },
{ /* Sentinel */ }
};
@@ -1424,10 +1264,7 @@ static const struct rvin_info rcar_info_r8a77980 = {
};
static const struct rvin_group_route rcar_info_r8a77990_routes[] = {
- { .csi = RVIN_CSI40, .channel = 0, .vin = 4, .mask = BIT(0) | BIT(3) },
- { .csi = RVIN_CSI40, .channel = 0, .vin = 5, .mask = BIT(2) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 4, .mask = BIT(2) },
- { .csi = RVIN_CSI40, .channel = 1, .vin = 5, .mask = BIT(1) | BIT(3) },
+ { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 },
{ /* Sentinel */ }
};
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
index 8c939cb3073d..fea8f00a9152 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
@@ -17,6 +17,7 @@
#include <linux/reset.h>
#include <linux/sys_soc.h>
+#include <media/mipi-csi2.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -412,17 +413,51 @@ struct rcar_csi2_format {
};
static const struct rcar_csi2_format rcar_csi2_formats[] = {
- { .code = MEDIA_BUS_FMT_RGB888_1X24, .datatype = 0x24, .bpp = 24 },
- { .code = MEDIA_BUS_FMT_UYVY8_1X16, .datatype = 0x1e, .bpp = 16 },
- { .code = MEDIA_BUS_FMT_YUYV8_1X16, .datatype = 0x1e, .bpp = 16 },
- { .code = MEDIA_BUS_FMT_UYVY8_2X8, .datatype = 0x1e, .bpp = 16 },
- { .code = MEDIA_BUS_FMT_YUYV10_2X10, .datatype = 0x1e, .bpp = 20 },
- { .code = MEDIA_BUS_FMT_Y10_1X10, .datatype = 0x2b, .bpp = 10 },
- { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .datatype = 0x2a, .bpp = 8 },
- { .code = MEDIA_BUS_FMT_SGBRG8_1X8, .datatype = 0x2a, .bpp = 8 },
- { .code = MEDIA_BUS_FMT_SGRBG8_1X8, .datatype = 0x2a, .bpp = 8 },
- { .code = MEDIA_BUS_FMT_SRGGB8_1X8, .datatype = 0x2a, .bpp = 8 },
- { .code = MEDIA_BUS_FMT_Y8_1X8, .datatype = 0x2a, .bpp = 8 },
+ {
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .datatype = MIPI_CSI2_DT_RGB888,
+ .bpp = 24,
+ }, {
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .bpp = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .bpp = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .bpp = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_YUYV10_2X10,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .bpp = 20,
+ }, {
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ },
};
static const struct rcar_csi2_format *rcsi2_code_to_fmt(unsigned int code)
@@ -468,6 +503,8 @@ struct rcar_csi2 {
struct v4l2_subdev *remote;
unsigned int remote_pad;
+ int channel_vc[4];
+
struct mutex lock; /* Protects mf and stream_count. */
struct v4l2_mbus_framefmt mf;
int stream_count;
@@ -603,7 +640,6 @@ static int rcsi2_get_active_lanes(struct rcar_csi2 *priv,
unsigned int *lanes)
{
struct v4l2_mbus_config mbus_config = { 0 };
- unsigned int num_lanes = UINT_MAX;
int ret;
*lanes = priv->lanes;
@@ -626,23 +662,14 @@ static int rcsi2_get_active_lanes(struct rcar_csi2 *priv,
return -EINVAL;
}
- if (mbus_config.flags & V4L2_MBUS_CSI2_1_LANE)
- num_lanes = 1;
- else if (mbus_config.flags & V4L2_MBUS_CSI2_2_LANE)
- num_lanes = 2;
- else if (mbus_config.flags & V4L2_MBUS_CSI2_3_LANE)
- num_lanes = 3;
- else if (mbus_config.flags & V4L2_MBUS_CSI2_4_LANE)
- num_lanes = 4;
-
- if (num_lanes > priv->lanes) {
+ if (mbus_config.bus.mipi_csi2.num_data_lanes > priv->lanes) {
dev_err(priv->dev,
"Unsupported mbus config: too many data lanes %u\n",
- num_lanes);
+ mbus_config.bus.mipi_csi2.num_data_lanes);
return -EINVAL;
}
- *lanes = num_lanes;
+ *lanes = mbus_config.bus.mipi_csi2.num_data_lanes;
return 0;
}
@@ -675,8 +702,11 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
for (i = 0; i < priv->info->num_channels; i++) {
u32 vcdt_part;
- vcdt_part = VCDT_SEL_VC(i) | VCDT_VCDTN_EN | VCDT_SEL_DTN_ON |
- VCDT_SEL_DT(format->datatype);
+ if (priv->channel_vc[i] < 0)
+ continue;
+
+ vcdt_part = VCDT_SEL_VC(priv->channel_vc[i]) | VCDT_VCDTN_EN |
+ VCDT_SEL_DTN_ON | VCDT_SEL_DT(format->datatype);
/* Store in correct reg and offset. */
if (i < 2)
@@ -1258,7 +1288,52 @@ static int rcsi2_init_phtw_v3u(struct rcar_csi2 *priv,
* Platform Device Driver.
*/
+static int rcsi2_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct rcar_csi2 *priv = sd_to_csi2(sd);
+ struct video_device *vdev;
+ int channel, vc;
+ u32 id;
+
+ if (!is_media_entity_v4l2_video_device(remote->entity)) {
+ dev_err(priv->dev, "Remote is not a video device\n");
+ return -EINVAL;
+ }
+
+ vdev = media_entity_to_video_device(remote->entity);
+
+ if (of_property_read_u32(vdev->dev_parent->of_node, "renesas,id", &id)) {
+ dev_err(priv->dev, "No renesas,id, can't configure routing\n");
+ return -EINVAL;
+ }
+
+ channel = id % 4;
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (media_entity_remote_pad(local)) {
+ dev_dbg(priv->dev,
+ "Each VC can only be routed to one output channel\n");
+ return -EINVAL;
+ }
+
+ vc = local->index - 1;
+
+ dev_dbg(priv->dev, "Route VC%d to VIN%u on output channel %d\n",
+ vc, id, channel);
+ } else {
+ vc = -1;
+ }
+
+ priv->channel_vc[channel] = vc;
+
+ return 0;
+}
+
static const struct media_entity_operations rcar_csi2_entity_ops = {
+ .link_setup = rcsi2_link_setup,
.link_validate = v4l2_subdev_link_validate,
};
@@ -1414,7 +1489,7 @@ static const struct soc_device_attribute r8a7795[] = {
.soc_id = "r8a7795", .revision = "ES2.*",
.data = &rcar_csi2_info_r8a7795es2,
},
- { /* sentinel */ },
+ { /* sentinel */ }
};
static int rcsi2_probe(struct platform_device *pdev)
@@ -1477,6 +1552,9 @@ static int rcsi2_probe(struct platform_device *pdev)
if (ret)
goto error_async;
+ for (i = 0; i < ARRAY_SIZE(priv->channel_vc); i++)
+ priv->channel_vc[i] = -1;
+
pm_runtime_enable(&pdev->dev);
ret = v4l2_async_register_subdev(&priv->subdev);
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
index 8136bc75e7c4..2272f1c96aaf 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
@@ -1507,7 +1507,7 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel)
* register. IFMD_DES1 controls data expansion mode for CSI20/21,
* IFMD_DES0 controls data expansion mode for CSI40/41.
*/
- for (route = vin->info->routes; route->mask; route++) {
+ for (route = vin->info->routes; route->chsel; route++) {
if (route->csi == RVIN_CSI20 || route->csi == RVIN_CSI21)
ifmd |= VNCSI_IFMD_DES1;
else
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
index 2e60b9fce03b..2e60b9fce03b 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
index 6c06320174a2..1f94589d9ef1 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
@@ -118,7 +118,7 @@ struct rvin_parallel_entity {
struct v4l2_subdev *subdev;
enum v4l2_mbus_type mbus_type;
- struct v4l2_fwnode_bus_parallel bus;
+ struct v4l2_mbus_config_parallel bus;
unsigned int source_pad;
unsigned int sink_pad;
@@ -128,11 +128,9 @@ struct rvin_parallel_entity {
* struct rvin_group_route - describes a route from a channel of a
* CSI-2 receiver to a VIN
*
+ * @master: VIN group master ID.
* @csi: CSI-2 receiver ID.
- * @channel: Output channel of the CSI-2 receiver.
- * @vin: VIN ID.
- * @mask: Bitmask of the different CHSEL register values that
- * allow for a route from @csi + @chan to @vin.
+ * @chsel: CHSEL register values that connects VIN group to CSI-2.
*
* .. note::
* Each R-Car CSI-2 receiver has four output channels facing the VIN
@@ -140,19 +138,11 @@ struct rvin_parallel_entity {
* There is no correlation between channel number and CSI-2 VC. It's
* up to the CSI-2 receiver driver to configure which VC is output
* on which channel, the VIN devices only care about output channels.
- *
- * There are in some cases multiple CHSEL register settings which would
- * allow for the same route from @csi + @channel to @vin. For example
- * on R-Car H3 both the CHSEL values 0 and 3 allow for a route from
- * CSI40/VC0 to VIN0. All possible CHSEL values for a route need to be
- * recorded as a bitmask in @mask, in this example bit 0 and 3 should
- * be set.
*/
struct rvin_group_route {
+ unsigned int master;
enum rvin_csi_id csi;
- unsigned int channel;
- unsigned int vin;
- unsigned int mask;
+ unsigned int chsel;
};
/**
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/renesas/rcar_drif.c
index 9a0982fa5c6b..9a0982fa5c6b 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/renesas/rcar_drif.c
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/renesas/rcar_fdp1.c
index 37ecf489d112..37ecf489d112 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/renesas/rcar_fdp1.c
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/renesas/rcar_jpu.c
index 56bb464629ed..293beba131e2 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/renesas/rcar_jpu.c
@@ -4,7 +4,7 @@
* Copyright (C) 2014-2015 Cogent Embedded, Inc. <source@cogentembedded.com>
* Copyright (C) 2014-2015 Renesas Electronics Corporation
*
- * This is based on the drivers/media/platform/s5p-jpeg driver by
+ * This is based on the drivers/media/platform/samsung/s5p-jpeg driver by
* Andrzej Pietrasiewicz and Jacek Anaszewski.
* Some portions of code inspired by VSP1 driver by Laurent Pinchart.
*
diff --git a/drivers/media/platform/renesas-ceu.c b/drivers/media/platform/renesas/renesas-ceu.c
index 2e8dbacc414e..2e8dbacc414e 100644
--- a/drivers/media/platform/renesas-ceu.c
+++ b/drivers/media/platform/renesas/renesas-ceu.c
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/renesas/sh_vou.c
index ca4310e26c49..ca4310e26c49 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/renesas/sh_vou.c
diff --git a/drivers/media/platform/vsp1/Makefile b/drivers/media/platform/renesas/vsp1/Makefile
index 4bb4dcbef7b5..4bb4dcbef7b5 100644
--- a/drivers/media/platform/vsp1/Makefile
+++ b/drivers/media/platform/renesas/vsp1/Makefile
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/renesas/vsp1/vsp1.h
index 37cf33c7e6ca..37cf33c7e6ca 100644
--- a/drivers/media/platform/vsp1/vsp1.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1.h
diff --git a/drivers/media/platform/vsp1/vsp1_brx.c b/drivers/media/platform/renesas/vsp1/vsp1_brx.c
index 89385b4cabe5..89385b4cabe5 100644
--- a/drivers/media/platform/vsp1/vsp1_brx.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_brx.c
diff --git a/drivers/media/platform/vsp1/vsp1_brx.h b/drivers/media/platform/renesas/vsp1/vsp1_brx.h
index 6abbb8c3343c..6abbb8c3343c 100644
--- a/drivers/media/platform/vsp1/vsp1_brx.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_brx.h
diff --git a/drivers/media/platform/vsp1/vsp1_clu.c b/drivers/media/platform/renesas/vsp1/vsp1_clu.c
index c5217fee24f1..c5217fee24f1 100644
--- a/drivers/media/platform/vsp1/vsp1_clu.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_clu.c
diff --git a/drivers/media/platform/vsp1/vsp1_clu.h b/drivers/media/platform/renesas/vsp1/vsp1_clu.h
index cef2f44481ba..cef2f44481ba 100644
--- a/drivers/media/platform/vsp1/vsp1_clu.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_clu.h
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/renesas/vsp1/vsp1_dl.c
index ad3fa1c9cc73..ad3fa1c9cc73 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_dl.c
diff --git a/drivers/media/platform/vsp1/vsp1_dl.h b/drivers/media/platform/renesas/vsp1/vsp1_dl.h
index bebe16483ca5..bebe16483ca5 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_dl.h
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
index 0c2507dc03d6..0c2507dc03d6 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
diff --git a/drivers/media/platform/vsp1/vsp1_drm.h b/drivers/media/platform/renesas/vsp1/vsp1_drm.h
index ab8b7e3161a2..ab8b7e3161a2 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drm.h
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
index c9044785b903..502c7d9d6890 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
@@ -550,6 +550,16 @@ static int vsp1_device_init(struct vsp1_device *vsp1)
return 0;
}
+static void vsp1_mask_all_interrupts(struct vsp1_device *vsp1)
+{
+ unsigned int i;
+
+ for (i = 0; i < vsp1->info->lif_count; ++i)
+ vsp1_write(vsp1, VI6_DISP_IRQ_ENB(i), 0);
+ for (i = 0; i < vsp1->info->wpf_count; ++i)
+ vsp1_write(vsp1, VI6_WPF_IRQ_ENB(i), 0);
+}
+
/*
* vsp1_device_get - Acquire the VSP1 device
*
@@ -794,9 +804,9 @@ static int vsp1_probe(struct platform_device *pdev)
{
struct vsp1_device *vsp1;
struct device_node *fcp_node;
- struct resource *irq;
unsigned int i;
int ret;
+ int irq;
vsp1 = devm_kzalloc(&pdev->dev, sizeof(*vsp1), GFP_KERNEL);
if (vsp1 == NULL)
@@ -813,18 +823,9 @@ static int vsp1_probe(struct platform_device *pdev)
if (IS_ERR(vsp1->mmio))
return PTR_ERR(vsp1->mmio);
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq) {
- dev_err(&pdev->dev, "missing IRQ\n");
- return -EINVAL;
- }
-
- ret = devm_request_irq(&pdev->dev, irq->start, vsp1_irq_handler,
- IRQF_SHARED, dev_name(&pdev->dev), vsp1);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to request IRQ\n");
- return ret;
- }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
/* FCP (optional). */
fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0);
@@ -855,7 +856,6 @@ static int vsp1_probe(struct platform_device *pdev)
goto done;
vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
- vsp1_device_put(vsp1);
for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) ==
@@ -868,12 +868,31 @@ static int vsp1_probe(struct platform_device *pdev)
if (!vsp1->info) {
dev_err(&pdev->dev, "unsupported IP version 0x%08x\n",
vsp1->version);
+ vsp1_device_put(vsp1);
ret = -ENXIO;
goto done;
}
dev_dbg(&pdev->dev, "IP version 0x%08x\n", vsp1->version);
+ /*
+ * Previous use of the hardware (e.g. by the bootloader) could leave
+ * some interrupts enabled and pending.
+ *
+ * TODO: Investigate if this shouldn't be better handled by using the
+ * device reset provided by the CPG.
+ */
+ vsp1_mask_all_interrupts(vsp1);
+
+ vsp1_device_put(vsp1);
+
+ ret = devm_request_irq(&pdev->dev, irq, vsp1_irq_handler,
+ IRQF_SHARED, dev_name(&pdev->dev), vsp1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ goto done;
+ }
+
/* Instantiate entities. */
ret = vsp1_create_entities(vsp1);
if (ret < 0) {
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/renesas/vsp1/vsp1_entity.c
index 823c15facd1b..823c15facd1b 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_entity.c
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/renesas/vsp1/vsp1_entity.h
index f22724439cdc..f22724439cdc 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_entity.h
diff --git a/drivers/media/platform/vsp1/vsp1_hgo.c b/drivers/media/platform/renesas/vsp1/vsp1_hgo.c
index bf3f981f93a1..bf3f981f93a1 100644
--- a/drivers/media/platform/vsp1/vsp1_hgo.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hgo.c
diff --git a/drivers/media/platform/vsp1/vsp1_hgo.h b/drivers/media/platform/renesas/vsp1/vsp1_hgo.h
index 6b0c8580e1bf..6b0c8580e1bf 100644
--- a/drivers/media/platform/vsp1/vsp1_hgo.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hgo.h
diff --git a/drivers/media/platform/vsp1/vsp1_hgt.c b/drivers/media/platform/renesas/vsp1/vsp1_hgt.c
index aa1c718e0453..aa1c718e0453 100644
--- a/drivers/media/platform/vsp1/vsp1_hgt.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hgt.c
diff --git a/drivers/media/platform/vsp1/vsp1_hgt.h b/drivers/media/platform/renesas/vsp1/vsp1_hgt.h
index 38ec237bdd2d..38ec237bdd2d 100644
--- a/drivers/media/platform/vsp1/vsp1_hgt.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hgt.h
diff --git a/drivers/media/platform/vsp1/vsp1_histo.c b/drivers/media/platform/renesas/vsp1/vsp1_histo.c
index 5e5013d2cd2a..5e5013d2cd2a 100644
--- a/drivers/media/platform/vsp1/vsp1_histo.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_histo.c
diff --git a/drivers/media/platform/vsp1/vsp1_histo.h b/drivers/media/platform/renesas/vsp1/vsp1_histo.h
index 06f029846244..06f029846244 100644
--- a/drivers/media/platform/vsp1/vsp1_histo.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_histo.h
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/renesas/vsp1/vsp1_hsit.c
index 361a870380c2..361a870380c2 100644
--- a/drivers/media/platform/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hsit.c
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.h b/drivers/media/platform/renesas/vsp1/vsp1_hsit.h
index a658b1aa49e7..a658b1aa49e7 100644
--- a/drivers/media/platform/vsp1/vsp1_hsit.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hsit.h
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/renesas/vsp1/vsp1_lif.c
index 6a6857ac9327..6a6857ac9327 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_lif.c
diff --git a/drivers/media/platform/vsp1/vsp1_lif.h b/drivers/media/platform/renesas/vsp1/vsp1_lif.h
index 71a4eda9c2b2..71a4eda9c2b2 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_lif.h
diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/renesas/vsp1/vsp1_lut.c
index ac6802a325f5..ac6802a325f5 100644
--- a/drivers/media/platform/vsp1/vsp1_lut.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_lut.c
diff --git a/drivers/media/platform/vsp1/vsp1_lut.h b/drivers/media/platform/renesas/vsp1/vsp1_lut.h
index 8cb0df1b7e27..8cb0df1b7e27 100644
--- a/drivers/media/platform/vsp1/vsp1_lut.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_lut.h
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.c b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
index f72ac01c21ea..f72ac01c21ea 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.h b/drivers/media/platform/renesas/vsp1/vsp1_pipe.h
index ae646c9ef337..ae646c9ef337 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_pipe.h
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/renesas/vsp1/vsp1_regs.h
index fae7286eb01e..fae7286eb01e 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_regs.h
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
index 85587c1b6a37..85587c1b6a37 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
index 22a82d218152..22a82d218152 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.h
index eac5c04c2239..eac5c04c2239 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.h
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/renesas/vsp1/vsp1_sru.c
index b614a2aea461..b614a2aea461 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_sru.c
diff --git a/drivers/media/platform/vsp1/vsp1_sru.h b/drivers/media/platform/renesas/vsp1/vsp1_sru.h
index ddb00eadd1ea..ddb00eadd1ea 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_sru.h
diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/renesas/vsp1/vsp1_uds.c
index 1c290cda005a..1c290cda005a 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_uds.c
diff --git a/drivers/media/platform/vsp1/vsp1_uds.h b/drivers/media/platform/renesas/vsp1/vsp1_uds.h
index c34f95a666d2..c34f95a666d2 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_uds.h
diff --git a/drivers/media/platform/vsp1/vsp1_uif.c b/drivers/media/platform/renesas/vsp1/vsp1_uif.c
index 83d7f17df80e..83d7f17df80e 100644
--- a/drivers/media/platform/vsp1/vsp1_uif.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_uif.c
diff --git a/drivers/media/platform/vsp1/vsp1_uif.h b/drivers/media/platform/renesas/vsp1/vsp1_uif.h
index c71ab5f6a6f8..c71ab5f6a6f8 100644
--- a/drivers/media/platform/vsp1/vsp1_uif.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_uif.h
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/renesas/vsp1/vsp1_video.c
index 044eb5778820..044eb5778820 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_video.c
diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/renesas/vsp1/vsp1_video.h
index f3cf5e2fdf5a..f3cf5e2fdf5a 100644
--- a/drivers/media/platform/vsp1/vsp1_video.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_video.h
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
index 94e91d7bb56c..94e91d7bb56c 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
diff --git a/drivers/media/platform/rockchip/Kconfig b/drivers/media/platform/rockchip/Kconfig
new file mode 100644
index 000000000000..b41d3960c1b4
--- /dev/null
+++ b/drivers/media/platform/rockchip/Kconfig
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Rockchip media platform drivers"
+
+source "drivers/media/platform/rockchip/rga/Kconfig"
+source "drivers/media/platform/rockchip/rkisp1/Kconfig"
diff --git a/drivers/media/platform/rockchip/Makefile b/drivers/media/platform/rockchip/Makefile
new file mode 100644
index 000000000000..4f782b876ac9
--- /dev/null
+++ b/drivers/media/platform/rockchip/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += rga/
+obj-y += rkisp1/
diff --git a/drivers/media/platform/rockchip/rga/Kconfig b/drivers/media/platform/rockchip/rga/Kconfig
new file mode 100644
index 000000000000..727a0f6ea466
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/Kconfig
@@ -0,0 +1,14 @@
+config VIDEO_ROCKCHIP_RGA
+ tristate "Rockchip Raster 2d Graphic Acceleration Unit"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ select VIDEOBUF2_DMA_SG
+ select V4L2_MEM2MEM_DEV
+ help
+ This is a v4l2 driver for Rockchip SOC RGA 2d graphics accelerator.
+ Rockchip RGA is a separate 2D raster graphic acceleration unit.
+ It accelerates 2D graphics operations, such as point/line drawing,
+ image scaling, rotation, BitBLT, alpha blending and image blur/sharpness.
+
+ To compile this driver as a module choose m here.
diff --git a/drivers/media/platform/rockchip/rkisp1/Kconfig b/drivers/media/platform/rockchip/rkisp1/Kconfig
new file mode 100644
index 000000000000..dabd7e42c193
--- /dev/null
+++ b/drivers/media/platform/rockchip/rkisp1/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_ROCKCHIP_ISP1
+ tristate "Rockchip Image Signal Processing v1 Unit driver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ select V4L2_FWNODE
+ select GENERIC_PHY_MIPI_DPHY
+ default n
+ help
+ Enable this to support the Image Signal Processing (ISP) module
+ present in RK3399 SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called rockchip-isp1.
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
index 768987d5f2dd..fee2aaacb26b 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
@@ -249,7 +249,7 @@ static const struct rkisp1_capture_fmt_cfg rkisp1_sp_fmts[] = {
.fourcc = V4L2_PIX_FMT_GREY,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
- .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV400,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
},
/* rgb */
@@ -631,12 +631,26 @@ static void rkisp1_set_next_buf(struct rkisp1_capture *cap)
rkisp1_write(cap->rkisp1,
buff_addr[RKISP1_PLANE_Y],
cap->config->mi.y_base_ad_init);
- rkisp1_write(cap->rkisp1,
- buff_addr[RKISP1_PLANE_CB],
- cap->config->mi.cb_base_ad_init);
- rkisp1_write(cap->rkisp1,
- buff_addr[RKISP1_PLANE_CR],
- cap->config->mi.cr_base_ad_init);
+ /*
+ * In order to support grey format we capture
+ * YUV422 planar format from the camera and
+ * set the U and V planes to the dummy buffer
+ */
+ if (cap->pix.cfg->fourcc == V4L2_PIX_FMT_GREY) {
+ rkisp1_write(cap->rkisp1,
+ cap->buf.dummy.dma_addr,
+ cap->config->mi.cb_base_ad_init);
+ rkisp1_write(cap->rkisp1,
+ cap->buf.dummy.dma_addr,
+ cap->config->mi.cr_base_ad_init);
+ } else {
+ rkisp1_write(cap->rkisp1,
+ buff_addr[RKISP1_PLANE_CB],
+ cap->config->mi.cb_base_ad_init);
+ rkisp1_write(cap->rkisp1,
+ buff_addr[RKISP1_PLANE_CR],
+ cap->config->mi.cr_base_ad_init);
+ }
} else {
/*
* Use the dummy space allocated by dma_alloc_coherent to
diff --git a/drivers/media/platform/samsung/Kconfig b/drivers/media/platform/samsung/Kconfig
new file mode 100644
index 000000000000..0e34c5fc1dfc
--- /dev/null
+++ b/drivers/media/platform/samsung/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Samsung media platform drivers"
+
+source "drivers/media/platform/samsung/exynos-gsc/Kconfig"
+source "drivers/media/platform/samsung/exynos4-is/Kconfig"
+source "drivers/media/platform/samsung/s3c-camif/Kconfig"
+source "drivers/media/platform/samsung/s5p-g2d/Kconfig"
+source "drivers/media/platform/samsung/s5p-jpeg/Kconfig"
+source "drivers/media/platform/samsung/s5p-mfc/Kconfig"
diff --git a/drivers/media/platform/samsung/Makefile b/drivers/media/platform/samsung/Makefile
new file mode 100644
index 000000000000..21fea3330e4b
--- /dev/null
+++ b/drivers/media/platform/samsung/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += exynos-gsc/
+obj-y += exynos4-is/
+obj-y += s3c-camif/
+obj-y += s5p-g2d/
+obj-y += s5p-jpeg/
+obj-y += s5p-mfc/
diff --git a/drivers/media/platform/samsung/exynos-gsc/Kconfig b/drivers/media/platform/samsung/exynos-gsc/Kconfig
new file mode 100644
index 000000000000..7244d63c9646
--- /dev/null
+++ b/drivers/media/platform/samsung/exynos-gsc/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_SAMSUNG_EXYNOS_GSC
+ tristate "Samsung Exynos G-Scaler driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This is a v4l2 driver for Samsung EXYNOS5 SoC G-Scaler.
diff --git a/drivers/media/platform/exynos-gsc/Makefile b/drivers/media/platform/samsung/exynos-gsc/Makefile
index bcefbad17a73..bcefbad17a73 100644
--- a/drivers/media/platform/exynos-gsc/Makefile
+++ b/drivers/media/platform/samsung/exynos-gsc/Makefile
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/samsung/exynos-gsc/gsc-core.c
index cfd6ae70b8d8..e3559b047092 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/samsung/exynos-gsc/gsc-core.c
@@ -1106,9 +1106,9 @@ MODULE_DEVICE_TABLE(of, exynos_gsc_match);
static int gsc_probe(struct platform_device *pdev)
{
struct gsc_dev *gsc;
- struct resource *res;
struct device *dev = &pdev->dev;
const struct gsc_driverdata *drv_data = of_device_get_match_data(dev);
+ int irq;
int ret;
int i;
@@ -1141,11 +1141,9 @@ static int gsc_probe(struct platform_device *pdev)
if (IS_ERR(gsc->regs))
return PTR_ERR(gsc->regs);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "failed to get IRQ resource\n");
- return -ENXIO;
- }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
for (i = 0; i < gsc->num_clocks; i++) {
gsc->clock[i] = devm_clk_get(dev, drv_data->clk_names[i]);
@@ -1167,8 +1165,8 @@ static int gsc_probe(struct platform_device *pdev)
}
}
- ret = devm_request_irq(dev, res->start, gsc_irq_handler,
- 0, pdev->name, gsc);
+ ret = devm_request_irq(dev, irq, gsc_irq_handler,
+ 0, pdev->name, gsc);
if (ret) {
dev_err(dev, "failed to install irq (%d)\n", ret);
goto err_clk;
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/samsung/exynos-gsc/gsc-core.h
index e894e85e84a4..e894e85e84a4 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/samsung/exynos-gsc/gsc-core.h
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c
index f1cf847d1cc2..f1cf847d1cc2 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c
diff --git a/drivers/media/platform/exynos-gsc/gsc-regs.c b/drivers/media/platform/samsung/exynos-gsc/gsc-regs.c
index 995a1f0f875d..995a1f0f875d 100644
--- a/drivers/media/platform/exynos-gsc/gsc-regs.c
+++ b/drivers/media/platform/samsung/exynos-gsc/gsc-regs.c
diff --git a/drivers/media/platform/exynos-gsc/gsc-regs.h b/drivers/media/platform/samsung/exynos-gsc/gsc-regs.h
index d4f7ead6b322..d4f7ead6b322 100644
--- a/drivers/media/platform/exynos-gsc/gsc-regs.h
+++ b/drivers/media/platform/samsung/exynos-gsc/gsc-regs.h
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/samsung/exynos4-is/Kconfig
index 136d3b2a0fbb..da33faa7132e 100644
--- a/drivers/media/platform/exynos4-is/Kconfig
+++ b/drivers/media/platform/samsung/exynos4-is/Kconfig
@@ -2,7 +2,8 @@
config VIDEO_SAMSUNG_EXYNOS4_IS
tristate "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
- depends on VIDEO_V4L2 && OF && COMMON_CLK
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF && COMMON_CLK
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
diff --git a/drivers/media/platform/exynos4-is/Makefile b/drivers/media/platform/samsung/exynos4-is/Makefile
index a5ab01c73b95..a5ab01c73b95 100644
--- a/drivers/media/platform/exynos4-is/Makefile
+++ b/drivers/media/platform/samsung/exynos4-is/Makefile
diff --git a/drivers/media/platform/exynos4-is/common.c b/drivers/media/platform/samsung/exynos4-is/common.c
index 944b224eb621..023f624d29d5 100644
--- a/drivers/media/platform/exynos4-is/common.c
+++ b/drivers/media/platform/samsung/exynos4-is/common.c
@@ -10,7 +10,10 @@
#include <media/drv-intf/exynos-fimc.h>
#include "common.h"
-/* Called with the media graph mutex held or entity->stream_count > 0. */
+/*
+ * Called with the media graph mutex held or media_entity_is_streaming(entity)
+ * true.
+ */
struct v4l2_subdev *fimc_find_remote_sensor(struct media_entity *entity)
{
struct media_pad *pad = &entity->pads[0];
diff --git a/drivers/media/platform/exynos4-is/common.h b/drivers/media/platform/samsung/exynos4-is/common.h
index 0389b66e5144..0389b66e5144 100644
--- a/drivers/media/platform/exynos4-is/common.h
+++ b/drivers/media/platform/samsung/exynos4-is/common.h
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
index 7ff4024003f4..7ff4024003f4 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
index bfdee771cef9..91cc8d58a663 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
@@ -926,6 +926,7 @@ static int fimc_probe(struct platform_device *pdev)
struct fimc_dev *fimc;
struct resource *res;
int ret = 0;
+ int irq;
fimc = devm_kzalloc(dev, sizeof(*fimc), GFP_KERNEL);
if (!fimc)
@@ -965,11 +966,9 @@ static int fimc_probe(struct platform_device *pdev)
if (IS_ERR(fimc->regs))
return PTR_ERR(fimc->regs);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- dev_err(dev, "Failed to get IRQ resource\n");
- return -ENXIO;
- }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
ret = fimc_clk_get(fimc);
if (ret)
@@ -986,7 +985,7 @@ static int fimc_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- ret = devm_request_irq(dev, res->start, fimc_irq_handler,
+ ret = devm_request_irq(dev, irq, fimc_irq_handler,
0, dev_name(dev), fimc);
if (ret < 0) {
dev_err(dev, "failed to install irq (%d)\n", ret);
diff --git a/drivers/media/platform/exynos4-is/fimc-core.h b/drivers/media/platform/samsung/exynos4-is/fimc-core.h
index 7a058f3e6298..7a058f3e6298 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-core.h
diff --git a/drivers/media/platform/exynos4-is/fimc-is-command.h b/drivers/media/platform/samsung/exynos4-is/fimc-is-command.h
index 87978609ad55..87978609ad55 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-command.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-command.h
diff --git a/drivers/media/platform/exynos4-is/fimc-is-errno.c b/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.c
index 5d9f4c1cdc5e..5d9f4c1cdc5e 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-errno.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.c
diff --git a/drivers/media/platform/exynos4-is/fimc-is-errno.h b/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h
index da36b48b8f9f..da36b48b8f9f 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-errno.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h
diff --git a/drivers/media/platform/exynos4-is/fimc-is-i2c.c b/drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c
index 83a28ef8e099..83a28ef8e099 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-i2c.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c
diff --git a/drivers/media/platform/exynos4-is/fimc-is-i2c.h b/drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.h
index a23bd20be6c8..a23bd20be6c8 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-i2c.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.h
diff --git a/drivers/media/platform/exynos4-is/fimc-is-param.c b/drivers/media/platform/samsung/exynos4-is/fimc-is-param.c
index 9c816ae3b3e5..9c816ae3b3e5 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-param.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-param.c
diff --git a/drivers/media/platform/exynos4-is/fimc-is-param.h b/drivers/media/platform/samsung/exynos4-is/fimc-is-param.h
index 206904674927..206904674927 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-param.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-param.h
diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.c b/drivers/media/platform/samsung/exynos4-is/fimc-is-regs.c
index 366e6393817d..366e6393817d 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-regs.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-regs.c
diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.h b/drivers/media/platform/samsung/exynos4-is/fimc-is-regs.h
index 5d8b01bc84a2..5d8b01bc84a2 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-regs.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-regs.h
diff --git a/drivers/media/platform/exynos4-is/fimc-is-sensor.c b/drivers/media/platform/samsung/exynos4-is/fimc-is-sensor.c
index 0e5b9fede4ae..0e5b9fede4ae 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-sensor.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-sensor.c
diff --git a/drivers/media/platform/exynos4-is/fimc-is-sensor.h b/drivers/media/platform/samsung/exynos4-is/fimc-is-sensor.h
index 9aefc63889de..9aefc63889de 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-sensor.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-sensor.h
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/samsung/exynos4-is/fimc-is.c
index e55e411038f4..e55e411038f4 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is.c
diff --git a/drivers/media/platform/exynos4-is/fimc-is.h b/drivers/media/platform/samsung/exynos4-is/fimc-is.h
index 06586e455b1d..06586e455b1d 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is.h
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
index 83688a7982f7..83688a7982f7 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.h b/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.h
index edcb3a5e3cb9..edcb3a5e3cb9 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.h
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/samsung/exynos4-is/fimc-isp.c
index 855235bea46d..b85986e50f46 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-isp.c
@@ -226,7 +226,7 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
}
}
} else {
- if (sd->entity.stream_count == 0) {
+ if (!media_entity_is_streaming(&sd->entity)) {
if (fmt->pad == FIMC_ISP_SD_PAD_SINK) {
struct v4l2_subdev_format format = *fmt;
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.h b/drivers/media/platform/samsung/exynos4-is/fimc-isp.h
index 12017cd924d9..12017cd924d9 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-isp.h
diff --git a/drivers/media/platform/exynos4-is/fimc-lite-reg.c b/drivers/media/platform/samsung/exynos4-is/fimc-lite-reg.c
index 57996b4104b4..57996b4104b4 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite-reg.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-lite-reg.c
diff --git a/drivers/media/platform/exynos4-is/fimc-lite-reg.h b/drivers/media/platform/samsung/exynos4-is/fimc-lite-reg.h
index c5656e902750..c5656e902750 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite-reg.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-lite-reg.h
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
index aaa3af0493ce..2e8f476efc5c 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
@@ -1073,7 +1073,7 @@ static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&fimc->lock);
if ((atomic_read(&fimc->out_path) == FIMC_IO_ISP &&
- sd->entity.stream_count > 0) ||
+ media_entity_is_streaming(&sd->entity)) ||
(atomic_read(&fimc->out_path) == FIMC_IO_DMA &&
vb2_is_busy(&fimc->vb_queue))) {
mutex_unlock(&fimc->lock);
@@ -1197,8 +1197,8 @@ static int fimc_lite_subdev_s_stream(struct v4l2_subdev *sd, int on)
* Find sensor subdev linked to FIMC-LITE directly or through
* MIPI-CSIS. This is required for configuration where FIMC-LITE
* is used as a subdev only and feeds data internally to FIMC-IS.
- * The pipeline links are protected through entity.stream_count
- * so there is no need to take the media graph mutex here.
+ * The pipeline links are protected through entity.pipe so there is no
+ * need to take the media graph mutex here.
*/
fimc->sensor = fimc_find_remote_sensor(&sd->entity);
@@ -1454,6 +1454,7 @@ static int fimc_lite_probe(struct platform_device *pdev)
struct fimc_lite *fimc;
struct resource *res;
int ret;
+ int irq;
if (!dev->of_node)
return -ENODEV;
@@ -1485,17 +1486,15 @@ static int fimc_lite_probe(struct platform_device *pdev)
if (IS_ERR(fimc->regs))
return PTR_ERR(fimc->regs);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- dev_err(dev, "Failed to get IRQ resource\n");
- return -ENXIO;
- }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
ret = fimc_lite_clk_get(fimc);
if (ret)
return ret;
- ret = devm_request_irq(dev, res->start, flite_irq_handler,
+ ret = devm_request_irq(dev, irq, flite_irq_handler,
0, dev_name(dev), fimc);
if (ret) {
dev_err(dev, "Failed to install irq (%d)\n", ret);
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.h b/drivers/media/platform/samsung/exynos4-is/fimc-lite.h
index ddf29e0b5b1c..ddf29e0b5b1c 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-lite.h
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/samsung/exynos4-is/fimc-m2m.c
index df8e2aa454d8..df8e2aa454d8 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-m2m.c
diff --git a/drivers/media/platform/exynos4-is/fimc-reg.c b/drivers/media/platform/samsung/exynos4-is/fimc-reg.c
index 95165a2cc7d1..95165a2cc7d1 100644
--- a/drivers/media/platform/exynos4-is/fimc-reg.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-reg.c
diff --git a/drivers/media/platform/exynos4-is/fimc-reg.h b/drivers/media/platform/samsung/exynos4-is/fimc-reg.h
index b9b33aa1f12f..b9b33aa1f12f 100644
--- a/drivers/media/platform/exynos4-is/fimc-reg.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-reg.h
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/samsung/exynos4-is/media-dev.c
index 544b54e428c9..544b54e428c9 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/samsung/exynos4-is/media-dev.c
diff --git a/drivers/media/platform/exynos4-is/media-dev.h b/drivers/media/platform/samsung/exynos4-is/media-dev.h
index 62ad5d7e035a..62ad5d7e035a 100644
--- a/drivers/media/platform/exynos4-is/media-dev.h
+++ b/drivers/media/platform/samsung/exynos4-is/media-dev.h
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
index 27a214936cb0..27a214936cb0 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.h b/drivers/media/platform/samsung/exynos4-is/mipi-csis.h
index 193f253c7907..193f253c7907 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.h
+++ b/drivers/media/platform/samsung/exynos4-is/mipi-csis.h
diff --git a/drivers/media/platform/samsung/s3c-camif/Kconfig b/drivers/media/platform/samsung/s3c-camif/Kconfig
new file mode 100644
index 000000000000..8cb8d1ac3edc
--- /dev/null
+++ b/drivers/media/platform/samsung/s3c-camif/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_S3C_CAMIF
+ tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && I2C && PM
+ depends on ARCH_S3C64XX || PLAT_S3C24XX || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ This is a v4l2 driver for s3c24xx and s3c64xx SoC series camera
+ host interface (CAMIF).
+
+ To compile this driver as a module, choose M here: the module
+ will be called s3c-camif.
diff --git a/drivers/media/platform/s3c-camif/Makefile b/drivers/media/platform/samsung/s3c-camif/Makefile
index 70ee042a3dae..70ee042a3dae 100644
--- a/drivers/media/platform/s3c-camif/Makefile
+++ b/drivers/media/platform/samsung/s3c-camif/Makefile
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
index 140854ab4dd8..140854ab4dd8 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/samsung/s3c-camif/camif-core.c
index 6e8ef86566b7..6e8ef86566b7 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/samsung/s3c-camif/camif-core.c
diff --git a/drivers/media/platform/s3c-camif/camif-core.h b/drivers/media/platform/samsung/s3c-camif/camif-core.h
index f3442e251bc9..f3442e251bc9 100644
--- a/drivers/media/platform/s3c-camif/camif-core.h
+++ b/drivers/media/platform/samsung/s3c-camif/camif-core.h
diff --git a/drivers/media/platform/s3c-camif/camif-regs.c b/drivers/media/platform/samsung/s3c-camif/camif-regs.c
index e80204f5720c..e80204f5720c 100644
--- a/drivers/media/platform/s3c-camif/camif-regs.c
+++ b/drivers/media/platform/samsung/s3c-camif/camif-regs.c
diff --git a/drivers/media/platform/s3c-camif/camif-regs.h b/drivers/media/platform/samsung/s3c-camif/camif-regs.h
index 052948a7b669..052948a7b669 100644
--- a/drivers/media/platform/s3c-camif/camif-regs.h
+++ b/drivers/media/platform/samsung/s3c-camif/camif-regs.h
diff --git a/drivers/media/platform/samsung/s5p-g2d/Kconfig b/drivers/media/platform/samsung/s5p-g2d/Kconfig
new file mode 100644
index 000000000000..28ab88fc2d7c
--- /dev/null
+++ b/drivers/media/platform/samsung/s5p-g2d/Kconfig
@@ -0,0 +1,11 @@
+config VIDEO_SAMSUNG_S5P_G2D
+ tristate "Samsung S5P and EXYNOS4 G2D 2d graphics accelerator driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This is a v4l2 driver for Samsung S5P and EXYNOS4 G2D
+ 2d graphics accelerator.
+
diff --git a/drivers/media/platform/s5p-g2d/Makefile b/drivers/media/platform/samsung/s5p-g2d/Makefile
index ad2c5bf66a5f..ad2c5bf66a5f 100644
--- a/drivers/media/platform/s5p-g2d/Makefile
+++ b/drivers/media/platform/samsung/s5p-g2d/Makefile
diff --git a/drivers/media/platform/s5p-g2d/g2d-hw.c b/drivers/media/platform/samsung/s5p-g2d/g2d-hw.c
index b69d3fb12502..b69d3fb12502 100644
--- a/drivers/media/platform/s5p-g2d/g2d-hw.c
+++ b/drivers/media/platform/samsung/s5p-g2d/g2d-hw.c
diff --git a/drivers/media/platform/s5p-g2d/g2d-regs.h b/drivers/media/platform/samsung/s5p-g2d/g2d-regs.h
index b2630c6133f1..b2630c6133f1 100644
--- a/drivers/media/platform/s5p-g2d/g2d-regs.h
+++ b/drivers/media/platform/samsung/s5p-g2d/g2d-regs.h
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/samsung/s5p-g2d/g2d.c
index fa0bb31bd2b9..dd8864779a7c 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/samsung/s5p-g2d/g2d.c
@@ -623,7 +623,6 @@ static int g2d_probe(struct platform_device *pdev)
{
struct g2d_dev *dev;
struct video_device *vfd;
- struct resource *res;
const struct of_device_id *of_id;
int ret = 0;
@@ -664,14 +663,11 @@ static int g2d_probe(struct platform_device *pdev)
goto put_clk_gate;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to find IRQ\n");
- ret = -ENXIO;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
goto unprep_clk_gate;
- }
- dev->irq = res->start;
+ dev->irq = ret;
ret = devm_request_irq(&pdev->dev, dev->irq, g2d_isr,
0, pdev->name, dev);
diff --git a/drivers/media/platform/s5p-g2d/g2d.h b/drivers/media/platform/samsung/s5p-g2d/g2d.h
index c2309c1370da..c2309c1370da 100644
--- a/drivers/media/platform/s5p-g2d/g2d.h
+++ b/drivers/media/platform/samsung/s5p-g2d/g2d.h
diff --git a/drivers/media/platform/samsung/s5p-jpeg/Kconfig b/drivers/media/platform/samsung/s5p-jpeg/Kconfig
new file mode 100644
index 000000000000..11f6e99dec39
--- /dev/null
+++ b/drivers/media/platform/samsung/s5p-jpeg/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config VIDEO_SAMSUNG_S5P_JPEG
+ tristate "Samsung S5P/Exynos3250/Exynos4 JPEG codec driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This is a v4l2 driver for Samsung S5P, EXYNOS3250
+ and EXYNOS4 JPEG codec
diff --git a/drivers/media/platform/s5p-jpeg/Makefile b/drivers/media/platform/samsung/s5p-jpeg/Makefile
index 8b0f92e27e70..8b0f92e27e70 100644
--- a/drivers/media/platform/s5p-jpeg/Makefile
+++ b/drivers/media/platform/samsung/s5p-jpeg/Makefile
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
index a8d9159d5ed8..5479bc8d474d 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* linux/drivers/media/platform/s5p-jpeg/jpeg-core.c
+/* linux/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
*
* Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.h
index 4a5fb1b15455..5570c79f122f 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* linux/drivers/media/platform/s5p-jpeg/jpeg-core.h
+/* linux/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.h
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c b/drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos3250.c
index 637a5104d948..637a5104d948 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c
+++ b/drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos3250.c
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.h b/drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos3250.h
index 68160befce39..15af928fad76 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.h
+++ b/drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos3250.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* linux/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.h
+/* linux/drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos3250.h
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c b/drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos4.c
index 0828cfa783fe..0828cfa783fe 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
+++ b/drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos4.c
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h b/drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos4.h
index 3e2887526960..3e2887526960 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h
+++ b/drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos4.h
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c b/drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-s5p.c
index 491e9248286c..01b47b3df1e7 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
+++ b/drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-s5p.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* linux/drivers/media/platform/s5p-jpeg/jpeg-hw.h
+/* linux/drivers/media/platform/samsung/s5p-jpeg/jpeg-hw.h
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h b/drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-s5p.h
index 98ddf7097562..f068d52c66b7 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
+++ b/drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-s5p.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* linux/drivers/media/platform/s5p-jpeg/jpeg-hw.h
+/* linux/drivers/media/platform/samsung/s5p-jpeg/jpeg-hw.h
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-regs.h b/drivers/media/platform/samsung/s5p-jpeg/jpeg-regs.h
index 86f376b50581..c2298b680022 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-regs.h
+++ b/drivers/media/platform/samsung/s5p-jpeg/jpeg-regs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* linux/drivers/media/platform/s5p-jpeg/jpeg-regs.h
+/* linux/drivers/media/platform/samsung/s5p-jpeg/jpeg-regs.h
*
* Register definition file for Samsung JPEG codec driver
*
diff --git a/drivers/media/platform/samsung/s5p-mfc/Kconfig b/drivers/media/platform/samsung/s5p-mfc/Kconfig
new file mode 100644
index 000000000000..7ee3b0c8d98b
--- /dev/null
+++ b/drivers/media/platform/samsung/s5p-mfc/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_SAMSUNG_S5P_MFC
+ tristate "Samsung S5P MFC Video Codec"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ MFC 5.1 and 6.x driver for V4L2
diff --git a/drivers/media/platform/s5p-mfc/Makefile b/drivers/media/platform/samsung/s5p-mfc/Makefile
index 0b324af2ab00..0b324af2ab00 100644
--- a/drivers/media/platform/s5p-mfc/Makefile
+++ b/drivers/media/platform/samsung/s5p-mfc/Makefile
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v10.h b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v10.h
index fadd9139b489..fadd9139b489 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc-v10.h
+++ b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v10.h
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v6.h
index fa49fe580e1a..fa49fe580e1a 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
+++ b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v6.h
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v7.h b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v7.h
index 4a7adfdaa359..4a7adfdaa359 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc-v7.h
+++ b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v7.h
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v8.h b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v8.h
index 162e3c7e920f..162e3c7e920f 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc-v8.h
+++ b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v8.h
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc.h b/drivers/media/platform/samsung/s5p-mfc/regs-mfc.h
index 9171e8181c18..9171e8181c18 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc.h
+++ b/drivers/media/platform/samsung/s5p-mfc/regs-mfc.h
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
index f6732f031e96..761341934925 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
@@ -1268,7 +1268,6 @@ static int s5p_mfc_probe(struct platform_device *pdev)
{
struct s5p_mfc_dev *dev;
struct video_device *vfd;
- struct resource *res;
int ret;
pr_debug("%s++\n", __func__);
@@ -1294,12 +1293,10 @@ static int s5p_mfc_probe(struct platform_device *pdev)
if (IS_ERR(dev->regs_base))
return PTR_ERR(dev->regs_base);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get irq resource\n");
- return -ENOENT;
- }
- dev->irq = res->start;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ dev->irq = ret;
ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq,
0, pdev->name, dev);
if (ret) {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.c
index 0e88c28f4ad3..774c573dc075 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c
+ * linux/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.c
*
* Copyright (C) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.h
index ed4e32a12552..945d12fdceb7 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h
+ * linux/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.h
*
* Copyright (C) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.c
index 1ea4eda9c8e0..327e54e70611 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
+ * linux/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.c
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.h
index 917854bffe9f..6eafa514aebc 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.h
+ * linux/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.h
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c
index 1f42130cc865..f8588e52dfc8 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
+ * linux/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.h
index c19884ea2bfc..9dc44460cc38 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.h
+ * linux/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.h
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
index c3ef4f6a42d2..5304f42c8c72 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
@@ -732,6 +732,7 @@ struct s5p_mfc_fmt {
enum s5p_mfc_fmt_type type;
u32 num_planes;
u32 versions;
+ u32 flags;
};
/*
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c
index da138c314963..72d70984e99a 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * linux/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+ * linux/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.h
index 7f32ef8a6b61..653ba5f3d048 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * linux/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
+ * linux/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.h
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_debug.h
index 752bbe4fe48e..bba5dad6dbff 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_debug.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
+ * drivers/media/platform/samsung/s5p-mfc/s5p_mfc_debug.h
*
* Header file for Samsung MFC (Multi Function Codec - FIMV) driver
* This file contains debug macros
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
index c1d3bda8385b..4b89df8bfd18 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * linux/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+ * linux/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
@@ -62,6 +62,8 @@ static struct s5p_mfc_fmt formats[] = {
.type = MFC_FMT_DEC,
.num_planes = 1,
.versions = MFC_V5PLUS_BITS,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION |
+ V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM,
},
{
.fourcc = V4L2_PIX_FMT_H264_MVC,
@@ -69,6 +71,8 @@ static struct s5p_mfc_fmt formats[] = {
.type = MFC_FMT_DEC,
.num_planes = 1,
.versions = MFC_V6PLUS_BITS,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION |
+ V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM,
},
{
.fourcc = V4L2_PIX_FMT_H263,
@@ -76,6 +80,7 @@ static struct s5p_mfc_fmt formats[] = {
.type = MFC_FMT_DEC,
.num_planes = 1,
.versions = MFC_V5PLUS_BITS,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
{
.fourcc = V4L2_PIX_FMT_MPEG1,
@@ -83,6 +88,8 @@ static struct s5p_mfc_fmt formats[] = {
.type = MFC_FMT_DEC,
.num_planes = 1,
.versions = MFC_V5PLUS_BITS,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION |
+ V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM,
},
{
.fourcc = V4L2_PIX_FMT_MPEG2,
@@ -90,6 +97,8 @@ static struct s5p_mfc_fmt formats[] = {
.type = MFC_FMT_DEC,
.num_planes = 1,
.versions = MFC_V5PLUS_BITS,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION |
+ V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM,
},
{
.fourcc = V4L2_PIX_FMT_MPEG4,
@@ -97,6 +106,8 @@ static struct s5p_mfc_fmt formats[] = {
.type = MFC_FMT_DEC,
.num_planes = 1,
.versions = MFC_V5PLUS_BITS,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION |
+ V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM,
},
{
.fourcc = V4L2_PIX_FMT_XVID,
@@ -104,6 +115,7 @@ static struct s5p_mfc_fmt formats[] = {
.type = MFC_FMT_DEC,
.num_planes = 1,
.versions = MFC_V5PLUS_BITS,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
{
.fourcc = V4L2_PIX_FMT_VC1_ANNEX_G,
@@ -111,6 +123,7 @@ static struct s5p_mfc_fmt formats[] = {
.type = MFC_FMT_DEC,
.num_planes = 1,
.versions = MFC_V5PLUS_BITS,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
{
.fourcc = V4L2_PIX_FMT_VC1_ANNEX_L,
@@ -118,6 +131,7 @@ static struct s5p_mfc_fmt formats[] = {
.type = MFC_FMT_DEC,
.num_planes = 1,
.versions = MFC_V5PLUS_BITS,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
{
.fourcc = V4L2_PIX_FMT_VP8,
@@ -125,6 +139,7 @@ static struct s5p_mfc_fmt formats[] = {
.type = MFC_FMT_DEC,
.num_planes = 1,
.versions = MFC_V6PLUS_BITS,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
{
.fourcc = V4L2_PIX_FMT_HEVC,
@@ -132,6 +147,8 @@ static struct s5p_mfc_fmt formats[] = {
.type = MFC_FMT_DEC,
.num_planes = 1,
.versions = MFC_V10_BIT,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION |
+ V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM,
},
{
.fourcc = V4L2_PIX_FMT_VP9,
@@ -139,6 +156,7 @@ static struct s5p_mfc_fmt formats[] = {
.type = MFC_FMT_DEC,
.num_planes = 1,
.versions = MFC_V10_BIT,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
};
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.h
index 0e9a0e3bbbe7..0c52ab46cff7 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * linux/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h
+ * linux/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.h
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
index 1fad99edb091..a8877d805b29 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * linux/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+ * linux/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.h
index cacd1ca43e19..3f1b1a037a4f 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * linux/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h
+ * linux/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.h
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_intr.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_intr.c
index 0a38f6d70ee9..0a38f6d70ee9 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_intr.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_intr.c
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_intr.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_intr.h
index d32860db17d2..d32860db17d2 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_intr.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_intr.h
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_iommu.h
index 1a32266b7ddc..1a32266b7ddc 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_iommu.h
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.c
index bb65671eea91..673962301173 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
+ * drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.c
*
* Samsung MFC (Multi Function Codec - FIMV) driver
* This file contains hw related functions.
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.h
index 1c5d2d4c0543..b9831275f3ab 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
+ * drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.h
*
* Header file for Samsung MFC (Multi Function Codec - FIMV) driver
* Contains declarations of hw related functions.
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.c
index 28a06dc343fd..28a06dc343fd 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.c
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.h
index b53d376ead60..b53d376ead60 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.h
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
index a1453053e31a..8227004f6746 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+ * drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
*
* Samsung MFC (Multi Function Codec - FIMV) driver
* This file contains hw related functions.
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.h
index 8ca514bf5e37..e4dd03c5454c 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.h
+ * drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.h
*
* Header file for Samsung MFC (Multi Function Codec - FIMV) driver
* Contains declarations of hw related functions.
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.c
index 88b7d33c9197..72a901e99450 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * linux/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+ * linux/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.h
index 3d26443189a2..4159d2364e87 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * linux/drivers/media/platform/s5p-mfc/s5p_mfc_pm.h
+ * linux/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.h
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
diff --git a/drivers/media/platform/st/Kconfig b/drivers/media/platform/st/Kconfig
new file mode 100644
index 000000000000..b29c258ea5fc
--- /dev/null
+++ b/drivers/media/platform/st/Kconfig
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "STMicroelectronics media platform drivers"
+
+source "drivers/media/platform/st/sti/Kconfig"
+source "drivers/media/platform/st/stm32/Kconfig"
diff --git a/drivers/media/platform/st/Makefile b/drivers/media/platform/st/Makefile
new file mode 100644
index 000000000000..a1f75b2a8225
--- /dev/null
+++ b/drivers/media/platform/st/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-y += sti/bdisp/
+obj-y += sti/c8sectpfe/
+obj-y += sti/delta/
+obj-y += sti/hva/
+obj-y += stm32/
diff --git a/drivers/media/platform/st/sti/Kconfig b/drivers/media/platform/st/sti/Kconfig
new file mode 100644
index 000000000000..60068e8b47b8
--- /dev/null
+++ b/drivers/media/platform/st/sti/Kconfig
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+source "drivers/media/platform/st/sti/bdisp/Kconfig"
+source "drivers/media/platform/st/sti/c8sectpfe/Kconfig"
+source "drivers/media/platform/st/sti/delta/Kconfig"
+source "drivers/media/platform/st/sti/hva/Kconfig"
diff --git a/drivers/media/platform/st/sti/Makefile b/drivers/media/platform/st/sti/Makefile
new file mode 100644
index 000000000000..f9ce8169b040
--- /dev/null
+++ b/drivers/media/platform/st/sti/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += bdisp/
+obj-y += c8sectpfe/
+obj-y += delta/
+obj-y += hva/
+obj-y += stm32/
diff --git a/drivers/media/platform/st/sti/bdisp/Kconfig b/drivers/media/platform/st/sti/bdisp/Kconfig
new file mode 100644
index 000000000000..496f8aedf0a4
--- /dev/null
+++ b/drivers/media/platform/st/sti/bdisp/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_STI_BDISP
+ tristate "STMicroelectronics BDISP 2D blitter driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_STI || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This v4l2 mem2mem driver is a 2D blitter for STMicroelectronics SoC.
diff --git a/drivers/media/platform/sti/bdisp/Makefile b/drivers/media/platform/st/sti/bdisp/Makefile
index 39ade0a34723..39ade0a34723 100644
--- a/drivers/media/platform/sti/bdisp/Makefile
+++ b/drivers/media/platform/st/sti/bdisp/Makefile
diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/st/sti/bdisp/bdisp-debug.c
index a27f638df11c..a27f638df11c 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-debug.c
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-debug.c
diff --git a/drivers/media/platform/sti/bdisp/bdisp-filter.h b/drivers/media/platform/st/sti/bdisp/bdisp-filter.h
index 9e1a95fd27ed..9e1a95fd27ed 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-filter.h
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-filter.h
diff --git a/drivers/media/platform/sti/bdisp/bdisp-hw.c b/drivers/media/platform/st/sti/bdisp/bdisp-hw.c
index a74e9fd65238..a74e9fd65238 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-hw.c
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-hw.c
diff --git a/drivers/media/platform/sti/bdisp/bdisp-reg.h b/drivers/media/platform/st/sti/bdisp/bdisp-reg.h
index b07ecc903707..b07ecc903707 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-reg.h
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-reg.h
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
index 01ce7b711774..5aa79d9277c8 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
@@ -1284,7 +1284,6 @@ static int bdisp_remove(struct platform_device *pdev)
static int bdisp_probe(struct platform_device *pdev)
{
struct bdisp_dev *bdisp;
- struct resource *res;
struct device *dev = &pdev->dev;
int ret;
@@ -1335,14 +1334,11 @@ static int bdisp_probe(struct platform_device *pdev)
goto err_wq;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "failed to get IRQ resource\n");
- ret = -EINVAL;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
goto err_clk;
- }
- ret = devm_request_threaded_irq(dev, res->start, bdisp_irq_handler,
+ ret = devm_request_threaded_irq(dev, ret, bdisp_irq_handler,
bdisp_irq_thread, IRQF_ONESHOT,
pdev->name, bdisp);
if (ret) {
diff --git a/drivers/media/platform/sti/bdisp/bdisp.h b/drivers/media/platform/st/sti/bdisp/bdisp.h
index 3fb009d24791..3fb009d24791 100644
--- a/drivers/media/platform/sti/bdisp/bdisp.h
+++ b/drivers/media/platform/st/sti/bdisp/bdisp.h
diff --git a/drivers/media/platform/sti/c8sectpfe/Kconfig b/drivers/media/platform/st/sti/c8sectpfe/Kconfig
index 369509e03071..702b910509c9 100644
--- a/drivers/media/platform/sti/c8sectpfe/Kconfig
+++ b/drivers/media/platform/st/sti/c8sectpfe/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DVB_C8SECTPFE
tristate "STMicroelectronics C8SECTPFE DVB support"
+ depends on DVB_PLATFORM_DRIVERS
depends on PINCTRL && DVB_CORE && I2C
depends on ARCH_STI || ARCH_MULTIPLATFORM || COMPILE_TEST
select FW_LOADER
diff --git a/drivers/media/platform/sti/c8sectpfe/Makefile b/drivers/media/platform/st/sti/c8sectpfe/Makefile
index aedfc725cc19..aedfc725cc19 100644
--- a/drivers/media/platform/sti/c8sectpfe/Makefile
+++ b/drivers/media/platform/st/sti/c8sectpfe/Makefile
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.c
index 5df67da25525..5df67da25525 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.c
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.h b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.h
index 5ab7ca448cf9..5ab7ca448cf9 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.h
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.h
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
index 7bb1384e4bad..7bb1384e4bad 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.h
index c9d6021904cd..c9d6021904cd 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.h
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.c
index 301fa10f419b..301fa10f419b 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.c
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.h b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h
index d2c35fb32d7e..d2c35fb32d7e 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.h
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.c
index feb48cb546d7..feb48cb546d7 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.c
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.h b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.h
index 3d87a9ae8702..3d87a9ae8702 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.h
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.h
diff --git a/drivers/media/platform/st/sti/delta/Kconfig b/drivers/media/platform/st/sti/delta/Kconfig
new file mode 100644
index 000000000000..efa936b1cc8a
--- /dev/null
+++ b/drivers/media/platform/st/sti/delta/Kconfig
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_STI_DELTA
+ tristate "STMicroelectronics DELTA multi-format video decoder V4L2 driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_STI || COMPILE_TEST
+ help
+ This V4L2 driver enables DELTA multi-format video decoder
+ of STMicroelectronics STiH4xx SoC series allowing hardware
+ decoding of various compressed video bitstream format in
+ raw uncompressed format.
+
+ Use this option to see the decoders available for such
+ hardware.
+
+ Please notice that the driver will only be built if
+ at least one of the DELTA decoder below is selected.
+
+config VIDEO_STI_DELTA_MJPEG
+ bool "STMicroelectronics DELTA MJPEG support"
+ default y
+ depends on VIDEO_STI_DELTA
+ help
+ Enables DELTA MJPEG hardware support.
+
+ To compile this driver as a module, choose M here:
+ the module will be called st-delta.
+
+config VIDEO_STI_DELTA_DRIVER
+ tristate
+ depends on VIDEO_STI_DELTA
+ depends on VIDEO_STI_DELTA_MJPEG
+ default VIDEO_STI_DELTA_MJPEG
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select RPMSG
diff --git a/drivers/media/platform/sti/delta/Makefile b/drivers/media/platform/st/sti/delta/Makefile
index 32412fa4c632..32412fa4c632 100644
--- a/drivers/media/platform/sti/delta/Makefile
+++ b/drivers/media/platform/st/sti/delta/Makefile
diff --git a/drivers/media/platform/sti/delta/delta-cfg.h b/drivers/media/platform/st/sti/delta/delta-cfg.h
index f47c6e6ff083..f47c6e6ff083 100644
--- a/drivers/media/platform/sti/delta/delta-cfg.h
+++ b/drivers/media/platform/st/sti/delta/delta-cfg.h
diff --git a/drivers/media/platform/sti/delta/delta-debug.c b/drivers/media/platform/st/sti/delta/delta-debug.c
index 4b2eb6b63aa2..4b2eb6b63aa2 100644
--- a/drivers/media/platform/sti/delta/delta-debug.c
+++ b/drivers/media/platform/st/sti/delta/delta-debug.c
diff --git a/drivers/media/platform/sti/delta/delta-debug.h b/drivers/media/platform/st/sti/delta/delta-debug.h
index fa90252623e1..fa90252623e1 100644
--- a/drivers/media/platform/sti/delta/delta-debug.h
+++ b/drivers/media/platform/st/sti/delta/delta-debug.h
diff --git a/drivers/media/platform/sti/delta/delta-ipc.c b/drivers/media/platform/st/sti/delta/delta-ipc.c
index 21d3e08e259a..21d3e08e259a 100644
--- a/drivers/media/platform/sti/delta/delta-ipc.c
+++ b/drivers/media/platform/st/sti/delta/delta-ipc.c
diff --git a/drivers/media/platform/sti/delta/delta-ipc.h b/drivers/media/platform/st/sti/delta/delta-ipc.h
index 9fba6b5d169a..9fba6b5d169a 100644
--- a/drivers/media/platform/sti/delta/delta-ipc.h
+++ b/drivers/media/platform/st/sti/delta/delta-ipc.h
diff --git a/drivers/media/platform/sti/delta/delta-mem.c b/drivers/media/platform/st/sti/delta/delta-mem.c
index aeccd50583da..aeccd50583da 100644
--- a/drivers/media/platform/sti/delta/delta-mem.c
+++ b/drivers/media/platform/st/sti/delta/delta-mem.c
diff --git a/drivers/media/platform/sti/delta/delta-mem.h b/drivers/media/platform/st/sti/delta/delta-mem.h
index ff7d02f00b28..ff7d02f00b28 100644
--- a/drivers/media/platform/sti/delta/delta-mem.h
+++ b/drivers/media/platform/st/sti/delta/delta-mem.h
diff --git a/drivers/media/platform/sti/delta/delta-mjpeg-dec.c b/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
index 0533d4a083d2..0533d4a083d2 100644
--- a/drivers/media/platform/sti/delta/delta-mjpeg-dec.c
+++ b/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
diff --git a/drivers/media/platform/sti/delta/delta-mjpeg-fw.h b/drivers/media/platform/st/sti/delta/delta-mjpeg-fw.h
index 5a9404f4d055..5a9404f4d055 100644
--- a/drivers/media/platform/sti/delta/delta-mjpeg-fw.h
+++ b/drivers/media/platform/st/sti/delta/delta-mjpeg-fw.h
diff --git a/drivers/media/platform/sti/delta/delta-mjpeg-hdr.c b/drivers/media/platform/st/sti/delta/delta-mjpeg-hdr.c
index 90e5b2f72c82..90e5b2f72c82 100644
--- a/drivers/media/platform/sti/delta/delta-mjpeg-hdr.c
+++ b/drivers/media/platform/st/sti/delta/delta-mjpeg-hdr.c
diff --git a/drivers/media/platform/sti/delta/delta-mjpeg.h b/drivers/media/platform/st/sti/delta/delta-mjpeg.h
index 43f7a88b6e59..43f7a88b6e59 100644
--- a/drivers/media/platform/sti/delta/delta-mjpeg.h
+++ b/drivers/media/platform/st/sti/delta/delta-mjpeg.h
diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/st/sti/delta/delta-v4l2.c
index c887a31ebb54..c887a31ebb54 100644
--- a/drivers/media/platform/sti/delta/delta-v4l2.c
+++ b/drivers/media/platform/st/sti/delta/delta-v4l2.c
diff --git a/drivers/media/platform/sti/delta/delta.h b/drivers/media/platform/st/sti/delta/delta.h
index 914556030e70..914556030e70 100644
--- a/drivers/media/platform/sti/delta/delta.h
+++ b/drivers/media/platform/st/sti/delta/delta.h
diff --git a/drivers/media/platform/st/sti/hva/Kconfig b/drivers/media/platform/st/sti/hva/Kconfig
new file mode 100644
index 000000000000..46d6f82f648e
--- /dev/null
+++ b/drivers/media/platform/st/sti/hva/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_STI_HVA
+ tristate "STMicroelectronics HVA multi-format video encoder V4L2 driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_STI || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This V4L2 driver enables HVA (Hardware Video Accelerator) multi-format
+ video encoder of STMicroelectronics SoC, allowing hardware encoding of
+ raw uncompressed formats in various compressed video bitstreams format.
+
+ To compile this driver as a module, choose M here:
+ the module will be called st-hva.
+
+config VIDEO_STI_HVA_DEBUGFS
+ bool "Export STMicroelectronics HVA internals in debugfs"
+ depends on VIDEO_STI_HVA
+ depends on DEBUG_FS
+ help
+ Select this to see information about the internal state and the last
+ operation of STMicroelectronics HVA multi-format video encoder in
+ debugfs.
+
+ Choose N unless you know you need this.
diff --git a/drivers/media/platform/sti/hva/Makefile b/drivers/media/platform/st/sti/hva/Makefile
index b5a5478bdd01..b5a5478bdd01 100644
--- a/drivers/media/platform/sti/hva/Makefile
+++ b/drivers/media/platform/st/sti/hva/Makefile
diff --git a/drivers/media/platform/sti/hva/hva-debugfs.c b/drivers/media/platform/st/sti/hva/hva-debugfs.c
index a86a07b6fbc7..a86a07b6fbc7 100644
--- a/drivers/media/platform/sti/hva/hva-debugfs.c
+++ b/drivers/media/platform/st/sti/hva/hva-debugfs.c
diff --git a/drivers/media/platform/sti/hva/hva-h264.c b/drivers/media/platform/st/sti/hva/hva-h264.c
index 98cb00d2d868..98cb00d2d868 100644
--- a/drivers/media/platform/sti/hva/hva-h264.c
+++ b/drivers/media/platform/st/sti/hva/hva-h264.c
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/st/sti/hva/hva-hw.c
index fe4ea2e7f37e..fe4ea2e7f37e 100644
--- a/drivers/media/platform/sti/hva/hva-hw.c
+++ b/drivers/media/platform/st/sti/hva/hva-hw.c
diff --git a/drivers/media/platform/sti/hva/hva-hw.h b/drivers/media/platform/st/sti/hva/hva-hw.h
index b298990264d5..b298990264d5 100644
--- a/drivers/media/platform/sti/hva/hva-hw.h
+++ b/drivers/media/platform/st/sti/hva/hva-hw.h
diff --git a/drivers/media/platform/sti/hva/hva-mem.c b/drivers/media/platform/st/sti/hva/hva-mem.c
index 68047b60b66c..68047b60b66c 100644
--- a/drivers/media/platform/sti/hva/hva-mem.c
+++ b/drivers/media/platform/st/sti/hva/hva-mem.c
diff --git a/drivers/media/platform/sti/hva/hva-mem.h b/drivers/media/platform/st/sti/hva/hva-mem.h
index fec549dff2b3..fec549dff2b3 100644
--- a/drivers/media/platform/sti/hva/hva-mem.h
+++ b/drivers/media/platform/st/sti/hva/hva-mem.h
diff --git a/drivers/media/platform/sti/hva/hva-v4l2.c b/drivers/media/platform/st/sti/hva/hva-v4l2.c
index bb34d6997d99..bb34d6997d99 100644
--- a/drivers/media/platform/sti/hva/hva-v4l2.c
+++ b/drivers/media/platform/st/sti/hva/hva-v4l2.c
diff --git a/drivers/media/platform/sti/hva/hva.h b/drivers/media/platform/st/sti/hva/hva.h
index ba6b893416ec..ba6b893416ec 100644
--- a/drivers/media/platform/sti/hva/hva.h
+++ b/drivers/media/platform/st/sti/hva/hva.h
diff --git a/drivers/media/platform/st/stm32/Kconfig b/drivers/media/platform/st/stm32/Kconfig
new file mode 100644
index 000000000000..b22dd4753496
--- /dev/null
+++ b/drivers/media/platform/st/stm32/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+# V4L drivers
+config VIDEO_STM32_DCMI
+ tristate "STM32 Digital Camera Memory Interface (DCMI) support"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
+ depends on ARCH_STM32 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ help
+ This module makes the STM32 Digital Camera Memory Interface (DCMI)
+ available as a v4l2 device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called stm32-dcmi.
+
+# Mem2mem drivers
+config VIDEO_STM32_DMA2D
+ tristate "STM32 Chrom-Art Accelerator (DMA2D)"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_STM32 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Enables DMA2D hardware support on stm32.
+
+ The STM32 DMA2D is a memory-to-memory engine for pixel conversion
+ and specialized DMA dedicated to image manipulation.
diff --git a/drivers/media/platform/stm32/Makefile b/drivers/media/platform/st/stm32/Makefile
index 896ef98a73ab..896ef98a73ab 100644
--- a/drivers/media/platform/stm32/Makefile
+++ b/drivers/media/platform/st/stm32/Makefile
diff --git a/drivers/media/platform/stm32/dma2d/dma2d-hw.c b/drivers/media/platform/st/stm32/dma2d/dma2d-hw.c
index ea4cc84d8a39..ea4cc84d8a39 100644
--- a/drivers/media/platform/stm32/dma2d/dma2d-hw.c
+++ b/drivers/media/platform/st/stm32/dma2d/dma2d-hw.c
diff --git a/drivers/media/platform/stm32/dma2d/dma2d-regs.h b/drivers/media/platform/st/stm32/dma2d/dma2d-regs.h
index 6444592d415b..6444592d415b 100644
--- a/drivers/media/platform/stm32/dma2d/dma2d-regs.h
+++ b/drivers/media/platform/st/stm32/dma2d/dma2d-regs.h
diff --git a/drivers/media/platform/stm32/dma2d/dma2d.c b/drivers/media/platform/st/stm32/dma2d/dma2d.c
index 17af90d86898..9706aa41b5d2 100644
--- a/drivers/media/platform/stm32/dma2d/dma2d.c
+++ b/drivers/media/platform/st/stm32/dma2d/dma2d.c
@@ -633,14 +633,11 @@ static int dma2d_probe(struct platform_device *pdev)
goto put_clk_gate;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to find IRQ\n");
- ret = -ENXIO;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
goto unprep_clk_gate;
- }
- dev->irq = res->start;
+ dev->irq = ret;
ret = devm_request_irq(&pdev->dev, dev->irq, dma2d_isr,
0, pdev->name, dev);
diff --git a/drivers/media/platform/stm32/dma2d/dma2d.h b/drivers/media/platform/st/stm32/dma2d/dma2d.h
index 3f03a7ca9ee3..3f03a7ca9ee3 100644
--- a/drivers/media/platform/stm32/dma2d/dma2d.h
+++ b/drivers/media/platform/st/stm32/dma2d/dma2d.h
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/st/stm32/stm32-dcmi.c
index e1b17c05229c..c4c65d852525 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmi.c
@@ -113,7 +113,7 @@ struct dcmi_framesize {
struct dcmi_buf {
struct vb2_v4l2_buffer vb;
bool prepared;
- dma_addr_t paddr;
+ struct sg_table sgt;
size_t size;
struct list_head list;
};
@@ -150,13 +150,14 @@ struct stm32_dcmi {
struct mutex lock;
struct vb2_queue queue;
- struct v4l2_fwnode_bus_parallel bus;
+ struct v4l2_mbus_config_parallel bus;
enum v4l2_mbus_type bus_type;
struct completion complete;
struct clk *mclk;
enum state state;
struct dma_chan *dma_chan;
dma_cookie_t dma_cookie;
+ u32 dma_max_burst;
u32 misr;
int errors_count;
int overrun_count;
@@ -326,13 +327,11 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi,
mutex_lock(&dcmi->dma_lock);
/* Prepare a DMA transaction */
- desc = dmaengine_prep_slave_single(dcmi->dma_chan, buf->paddr,
- buf->size,
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT);
+ desc = dmaengine_prep_slave_sg(dcmi->dma_chan, buf->sgt.sgl, buf->sgt.nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
if (!desc) {
- dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer phy=%pad size=%zu\n",
- __func__, &buf->paddr, buf->size);
+ dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_sg failed\n", __func__);
mutex_unlock(&dcmi->dma_lock);
return -EINVAL;
}
@@ -524,6 +523,10 @@ static int dcmi_buf_prepare(struct vb2_buffer *vb)
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
unsigned long size;
+ unsigned int num_sgs = 1;
+ dma_addr_t dma_buf;
+ struct scatterlist *sg;
+ int i, ret;
size = dcmi->fmt.fmt.pix.sizeimage;
@@ -537,15 +540,33 @@ static int dcmi_buf_prepare(struct vb2_buffer *vb)
if (!buf->prepared) {
/* Get memory addresses */
- buf->paddr =
- vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
buf->size = vb2_plane_size(&buf->vb.vb2_buf, 0);
- buf->prepared = true;
+ if (buf->size > dcmi->dma_max_burst)
+ num_sgs = DIV_ROUND_UP(buf->size, dcmi->dma_max_burst);
- vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
+ ret = sg_alloc_table(&buf->sgt, num_sgs, GFP_ATOMIC);
+ if (ret) {
+ dev_err(dcmi->dev, "sg table alloc failed\n");
+ return ret;
+ }
+
+ dma_buf = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
dev_dbg(dcmi->dev, "buffer[%d] phy=%pad size=%zu\n",
- vb->index, &buf->paddr, buf->size);
+ vb->index, &dma_buf, buf->size);
+
+ for_each_sg(buf->sgt.sgl, sg, num_sgs, i) {
+ size_t bytes = min_t(size_t, size, dcmi->dma_max_burst);
+
+ sg_dma_address(sg) = dma_buf;
+ sg_dma_len(sg) = bytes;
+ dma_buf += bytes;
+ size -= bytes;
+ }
+
+ buf->prepared = true;
+
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
}
return 0;
@@ -1866,6 +1887,7 @@ static int dcmi_probe(struct platform_device *pdev)
struct stm32_dcmi *dcmi;
struct vb2_queue *q;
struct dma_chan *chan;
+ struct dma_slave_caps caps;
struct clk *mclk;
int irq;
int ret = 0;
@@ -1953,6 +1975,11 @@ static int dcmi_probe(struct platform_device *pdev)
return ret;
}
+ dcmi->dma_max_burst = UINT_MAX;
+ ret = dma_get_slave_caps(chan, &caps);
+ if (!ret && caps.max_sg_burst)
+ dcmi->dma_max_burst = caps.max_sg_burst * DMA_SLAVE_BUSWIDTH_4_BYTES;
+
spin_lock_init(&dcmi->irqlock);
mutex_init(&dcmi->lock);
mutex_init(&dcmi->dma_lock);
diff --git a/drivers/media/platform/sunxi/Kconfig b/drivers/media/platform/sunxi/Kconfig
index 7151cc249afa..46b7b9bf989c 100644
--- a/drivers/media/platform/sunxi/Kconfig
+++ b/drivers/media/platform/sunxi/Kconfig
@@ -1,4 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
+comment "Sunxi media platform drivers"
+
source "drivers/media/platform/sunxi/sun4i-csi/Kconfig"
source "drivers/media/platform/sunxi/sun6i-csi/Kconfig"
+source "drivers/media/platform/sunxi/sun8i-di/Kconfig"
+source "drivers/media/platform/sunxi/sun8i-rotate/Kconfig"
diff --git a/drivers/media/platform/sunxi/sun4i-csi/Kconfig b/drivers/media/platform/sunxi/sun4i-csi/Kconfig
index 903c6152f6e8..7960e6836f41 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/Kconfig
+++ b/drivers/media/platform/sunxi/sun4i-csi/Kconfig
@@ -2,7 +2,8 @@
config VIDEO_SUN4I_CSI
tristate "Allwinner A10 CMOS Sensor Interface Support"
- depends on VIDEO_V4L2 && COMMON_CLK && HAS_DMA
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && COMMON_CLK && HAS_DMA
depends on ARCH_SUNXI || COMPILE_TEST
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
index a5f61ee0ec4d..8eeed87bfb13 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
@@ -124,7 +124,7 @@ struct sun4i_csi {
dma_addr_t paddr;
} scratch;
- struct v4l2_fwnode_bus_parallel bus;
+ struct v4l2_mbus_config_parallel bus;
/* Main Device */
struct v4l2_device v4l;
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
index 2c39cd7f2862..0912a1b6d525 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
@@ -226,7 +226,7 @@ static void return_all_buffers(struct sun4i_csi *csi,
static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct sun4i_csi *csi = vb2_get_drv_priv(vq);
- struct v4l2_fwnode_bus_parallel *bus = &csi->bus;
+ struct v4l2_mbus_config_parallel *bus = &csi->bus;
const struct sun4i_csi_format *csi_fmt;
unsigned long href_pol, pclk_pol, vref_pol;
unsigned long flags;
diff --git a/drivers/media/platform/sunxi/sun6i-csi/Kconfig b/drivers/media/platform/sunxi/sun6i-csi/Kconfig
index 586e3fb3a80d..0345901617d4 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/Kconfig
+++ b/drivers/media/platform/sunxi/sun6i-csi/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_SUN6I_CSI
tristate "Allwinner V3s Camera Sensor Interface driver"
- depends on VIDEO_V4L2 && COMMON_CLK && HAS_DMA
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && COMMON_CLK && HAS_DMA
depends on ARCH_SUNXI || COMPILE_TEST
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
index 607a8d39fbe2..682c26536034 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
@@ -368,7 +368,11 @@ static int sun6i_video_try_fmt(struct sun6i_video *video,
if (pixfmt->field == V4L2_FIELD_ANY)
pixfmt->field = V4L2_FIELD_NONE;
- pixfmt->colorspace = V4L2_COLORSPACE_RAW;
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_JPEG)
+ pixfmt->colorspace = V4L2_COLORSPACE_JPEG;
+ else
+ pixfmt->colorspace = V4L2_COLORSPACE_SRGB;
+
pixfmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
pixfmt->quantization = V4L2_QUANTIZATION_DEFAULT;
pixfmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
diff --git a/drivers/media/platform/sunxi/sun8i-di/Kconfig b/drivers/media/platform/sunxi/sun8i-di/Kconfig
new file mode 100644
index 000000000000..ff71e06ee2df
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-di/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_SUN8I_DEINTERLACE
+ tristate "Allwinner Deinterlace driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on COMMON_CLK && OF
+ depends on PM
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Support for the Allwinner deinterlace unit with scaling
+ capability found on some SoCs, like H3.
+ To compile this driver as a module choose m here.
diff --git a/drivers/media/platform/sunxi/sun8i-rotate/Kconfig b/drivers/media/platform/sunxi/sun8i-rotate/Kconfig
new file mode 100644
index 000000000000..cfba29072d75
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-rotate/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config VIDEO_SUN8I_ROTATE
+ tristate "Allwinner DE2 rotation driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on COMMON_CLK && OF
+ depends on PM
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Support for the Allwinner DE2 rotation unit.
+ To compile this driver as a module choose m here.
diff --git a/drivers/media/platform/ti/Kconfig b/drivers/media/platform/ti/Kconfig
new file mode 100644
index 000000000000..e1ab56c3be1f
--- /dev/null
+++ b/drivers/media/platform/ti/Kconfig
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Texas Instruments drivers"
+
+# TI VIDEO PORT Helper Modules
+# These will be selected by VPE and VIP
+config VIDEO_TI_VPDMA
+ tristate
+
+config VIDEO_TI_SC
+ tristate
+
+config VIDEO_TI_CSC
+ tristate
+
+# V4L drivers
+
+config VIDEO_TI_CAL
+ tristate "TI CAL (Camera Adaptation Layer) driver"
+ depends on VIDEO_DEV
+ depends on V4L_PLATFORM_DRIVERS
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ depends on SOC_DRA7XX || ARCH_K3 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ help
+ Support for the TI CAL (Camera Adaptation Layer) block
+ found on DRA72X SoC.
+ In TI Technical Reference Manual this module is referred as
+ Camera Interface Subsystem (CAMSS).
+
+config VIDEO_TI_CAL_MC
+ bool "Media Controller centric mode by default"
+ depends on VIDEO_TI_CAL
+ default n
+ help
+ Enables Media Controller centric mode by default.
+
+ If set, CAL driver will start in Media Controller mode by
+ default. Note that this behavior can be overridden via
+ module parameter 'mc_api'.
+
+# Mem2mem drivers
+
+config VIDEO_TI_VPE
+ tristate "TI VPE (Video Processing Engine) driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on SOC_DRA7XX || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select VIDEO_TI_VPDMA
+ select VIDEO_TI_SC
+ select VIDEO_TI_CSC
+ help
+ Support for the TI VPE(Video Processing Engine) block
+ found on DRA7XX SoC.
+
+config VIDEO_TI_VPE_DEBUG
+ bool "VPE debug messages"
+ depends on VIDEO_TI_VPE
+ help
+ Enable debug messages on VPE driver.
+
+source "drivers/media/platform/ti/am437x/Kconfig"
+source "drivers/media/platform/ti/davinci/Kconfig"
+source "drivers/media/platform/ti/omap/Kconfig"
+source "drivers/media/platform/ti/omap3isp/Kconfig"
diff --git a/drivers/media/platform/ti/Makefile b/drivers/media/platform/ti/Makefile
new file mode 100644
index 000000000000..98c5fe5c40d6
--- /dev/null
+++ b/drivers/media/platform/ti/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += am437x/
+obj-y += cal/
+obj-y += vpe/
+obj-y += davinci/
+obj-y += omap/
+obj-y += omap3isp/
diff --git a/drivers/media/platform/am437x/Kconfig b/drivers/media/platform/ti/am437x/Kconfig
index 9ef898f512de..2e24fff7e625 100644
--- a/drivers/media/platform/am437x/Kconfig
+++ b/drivers/media/platform/ti/am437x/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_AM437X_VPFE
tristate "TI AM437x VPFE video capture driver"
- depends on VIDEO_V4L2
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
depends on SOC_AM43XX || COMPILE_TEST
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
diff --git a/drivers/media/platform/am437x/Makefile b/drivers/media/platform/ti/am437x/Makefile
index 541043487268..541043487268 100644
--- a/drivers/media/platform/am437x/Makefile
+++ b/drivers/media/platform/ti/am437x/Makefile
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/ti/am437x/am437x-vpfe.c
index 2dfae9bc0bba..2dfae9bc0bba 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/ti/am437x/am437x-vpfe.c
diff --git a/drivers/media/platform/am437x/am437x-vpfe.h b/drivers/media/platform/ti/am437x/am437x-vpfe.h
index 05ee37db0273..05ee37db0273 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.h
+++ b/drivers/media/platform/ti/am437x/am437x-vpfe.h
diff --git a/drivers/media/platform/am437x/am437x-vpfe_regs.h b/drivers/media/platform/ti/am437x/am437x-vpfe_regs.h
index 63ecdca3b908..63ecdca3b908 100644
--- a/drivers/media/platform/am437x/am437x-vpfe_regs.h
+++ b/drivers/media/platform/ti/am437x/am437x-vpfe_regs.h
diff --git a/drivers/media/platform/ti/cal/Makefile b/drivers/media/platform/ti/cal/Makefile
new file mode 100644
index 000000000000..45ac35585f0b
--- /dev/null
+++ b/drivers/media/platform/ti/cal/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_TI_CAL) += ti-cal.o
+ti-cal-y := cal.o cal-camerarx.o cal-video.o
diff --git a/drivers/media/platform/ti-vpe/cal-camerarx.c b/drivers/media/platform/ti/cal/cal-camerarx.c
index 4bf7a8c2e711..6b43a1525b45 100644
--- a/drivers/media/platform/ti-vpe/cal-camerarx.c
+++ b/drivers/media/platform/ti/cal/cal-camerarx.c
@@ -47,7 +47,7 @@ static inline void camerarx_write(struct cal_camerarx *phy, u32 offset, u32 val)
static s64 cal_camerarx_get_ext_link_freq(struct cal_camerarx *phy)
{
- struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2 = &phy->endpoint.bus.mipi_csi2;
+ struct v4l2_mbus_config_mipi_csi2 *mipi_csi2 = &phy->endpoint.bus.mipi_csi2;
u32 num_lanes = mipi_csi2->num_data_lanes;
const struct cal_format_info *fmtinfo;
u32 bpp;
@@ -76,7 +76,7 @@ static void cal_camerarx_lane_config(struct cal_camerarx *phy)
u32 val = cal_read(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance));
u32 lane_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK;
u32 polarity_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK;
- struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2 =
+ struct v4l2_mbus_config_mipi_csi2 *mipi_csi2 =
&phy->endpoint.bus.mipi_csi2;
int lane;
@@ -518,7 +518,7 @@ static int cal_camerarx_regmap_init(struct cal_dev *cal,
static int cal_camerarx_parse_dt(struct cal_camerarx *phy)
{
struct v4l2_fwnode_endpoint *endpoint = &phy->endpoint;
- char data_lanes[V4L2_FWNODE_CSI2_MAX_DATA_LANES * 2];
+ char data_lanes[V4L2_MBUS_CSI2_MAX_DATA_LANES * 2];
struct device_node *ep_node;
unsigned int i;
int ret;
diff --git a/drivers/media/platform/ti-vpe/cal-video.c b/drivers/media/platform/ti/cal/cal-video.c
index 7799da1cc261..3e936a2ca36c 100644
--- a/drivers/media/platform/ti-vpe/cal-video.c
+++ b/drivers/media/platform/ti/cal/cal-video.c
@@ -823,6 +823,9 @@ static int cal_ctx_v4l2_init_formats(struct cal_ctx *ctx)
/* Enumerate sub device formats and enable all matching local formats */
ctx->active_fmt = devm_kcalloc(ctx->cal->dev, cal_num_formats,
sizeof(*ctx->active_fmt), GFP_KERNEL);
+ if (!ctx->active_fmt)
+ return -ENOMEM;
+
ctx->num_active_fmt = 0;
for (j = 0, i = 0; ; ++j) {
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti/cal/cal.c
index 4a4a6c5983f7..4a4a6c5983f7 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti/cal/cal.c
diff --git a/drivers/media/platform/ti-vpe/cal.h b/drivers/media/platform/ti/cal/cal.h
index 527e22d022f3..527e22d022f3 100644
--- a/drivers/media/platform/ti-vpe/cal.h
+++ b/drivers/media/platform/ti/cal/cal.h
diff --git a/drivers/media/platform/ti-vpe/cal_regs.h b/drivers/media/platform/ti/cal/cal_regs.h
index 40e4f972fcb7..40e4f972fcb7 100644
--- a/drivers/media/platform/ti-vpe/cal_regs.h
+++ b/drivers/media/platform/ti/cal/cal_regs.h
diff --git a/drivers/media/platform/davinci/Kconfig b/drivers/media/platform/ti/davinci/Kconfig
index 9d2a9eeb3499..c61e697aeb12 100644
--- a/drivers/media/platform/davinci/Kconfig
+++ b/drivers/media/platform/ti/davinci/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_DAVINCI_VPIF_DISPLAY
tristate "TI DaVinci VPIF V4L2-Display driver"
- depends on VIDEO_V4L2
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
depends on ARCH_DAVINCI || COMPILE_TEST
depends on I2C
select VIDEOBUF2_DMA_CONTIG
@@ -17,7 +18,8 @@ config VIDEO_DAVINCI_VPIF_DISPLAY
config VIDEO_DAVINCI_VPIF_CAPTURE
tristate "TI DaVinci VPIF video capture driver"
- depends on VIDEO_V4L2
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
depends on ARCH_DAVINCI || COMPILE_TEST
depends on I2C
select VIDEOBUF2_DMA_CONTIG
@@ -32,7 +34,8 @@ config VIDEO_DAVINCI_VPIF_CAPTURE
config VIDEO_DM6446_CCDC
tristate "TI DM6446 CCDC video capture driver"
- depends on VIDEO_V4L2
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
depends on ARCH_DAVINCI || COMPILE_TEST
depends on I2C
select VIDEOBUF_DMA_CONTIG
@@ -48,7 +51,8 @@ config VIDEO_DM6446_CCDC
config VIDEO_DM355_CCDC
tristate "TI DM355 CCDC video capture driver"
- depends on VIDEO_V4L2
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
depends on ARCH_DAVINCI || COMPILE_TEST
depends on I2C
select VIDEOBUF_DMA_CONTIG
@@ -64,7 +68,8 @@ config VIDEO_DM355_CCDC
config VIDEO_DM365_ISIF
tristate "TI DM365 ISIF video capture driver"
- depends on VIDEO_V4L2
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
depends on ARCH_DAVINCI || COMPILE_TEST
depends on I2C
select VIDEOBUF_DMA_CONTIG
@@ -78,7 +83,8 @@ config VIDEO_DM365_ISIF
config VIDEO_DAVINCI_VPBE_DISPLAY
tristate "TI DaVinci VPBE V4L2-Display driver"
- depends on VIDEO_V4L2
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
depends on ARCH_DAVINCI || COMPILE_TEST
depends on I2C
select VIDEOBUF2_DMA_CONTIG
diff --git a/drivers/media/platform/davinci/Makefile b/drivers/media/platform/ti/davinci/Makefile
index 05c45bf371aa..05c45bf371aa 100644
--- a/drivers/media/platform/davinci/Makefile
+++ b/drivers/media/platform/ti/davinci/Makefile
diff --git a/drivers/media/platform/davinci/ccdc_hw_device.h b/drivers/media/platform/ti/davinci/ccdc_hw_device.h
index a545052a95a9..a545052a95a9 100644
--- a/drivers/media/platform/davinci/ccdc_hw_device.h
+++ b/drivers/media/platform/ti/davinci/ccdc_hw_device.h
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/ti/davinci/dm355_ccdc.c
index e06d113dfe96..e06d113dfe96 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/ti/davinci/dm355_ccdc.c
diff --git a/drivers/media/platform/davinci/dm355_ccdc_regs.h b/drivers/media/platform/ti/davinci/dm355_ccdc_regs.h
index eb381f075245..eb381f075245 100644
--- a/drivers/media/platform/davinci/dm355_ccdc_regs.h
+++ b/drivers/media/platform/ti/davinci/dm355_ccdc_regs.h
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/ti/davinci/dm644x_ccdc.c
index c6378c4e0074..c6378c4e0074 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/ti/davinci/dm644x_ccdc.c
diff --git a/drivers/media/platform/davinci/dm644x_ccdc_regs.h b/drivers/media/platform/ti/davinci/dm644x_ccdc_regs.h
index c4894f6a254e..c4894f6a254e 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc_regs.h
+++ b/drivers/media/platform/ti/davinci/dm644x_ccdc_regs.h
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/ti/davinci/isif.c
index c53cecd072b1..c53cecd072b1 100644
--- a/drivers/media/platform/davinci/isif.c
+++ b/drivers/media/platform/ti/davinci/isif.c
diff --git a/drivers/media/platform/davinci/isif_regs.h b/drivers/media/platform/ti/davinci/isif_regs.h
index d68d38841ae7..d68d38841ae7 100644
--- a/drivers/media/platform/davinci/isif_regs.h
+++ b/drivers/media/platform/ti/davinci/isif_regs.h
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/ti/davinci/vpbe.c
index 5f0aeb744e81..5f0aeb744e81 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/ti/davinci/vpbe.c
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/ti/davinci/vpbe_display.c
index bf3c3e76b921..bf3c3e76b921 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/ti/davinci/vpbe_display.c
diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/ti/davinci/vpbe_osd.c
index 32f7ef547c82..32f7ef547c82 100644
--- a/drivers/media/platform/davinci/vpbe_osd.c
+++ b/drivers/media/platform/ti/davinci/vpbe_osd.c
diff --git a/drivers/media/platform/davinci/vpbe_osd_regs.h b/drivers/media/platform/ti/davinci/vpbe_osd_regs.h
index cecd5991d4c5..cecd5991d4c5 100644
--- a/drivers/media/platform/davinci/vpbe_osd_regs.h
+++ b/drivers/media/platform/ti/davinci/vpbe_osd_regs.h
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/ti/davinci/vpbe_venc.c
index 4c8e31de12b1..4c8e31de12b1 100644
--- a/drivers/media/platform/davinci/vpbe_venc.c
+++ b/drivers/media/platform/ti/davinci/vpbe_venc.c
diff --git a/drivers/media/platform/davinci/vpbe_venc_regs.h b/drivers/media/platform/ti/davinci/vpbe_venc_regs.h
index 29d8fc3af662..29d8fc3af662 100644
--- a/drivers/media/platform/davinci/vpbe_venc_regs.h
+++ b/drivers/media/platform/ti/davinci/vpbe_venc_regs.h
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/ti/davinci/vpfe_capture.c
index 0a2226b321d7..0a2226b321d7 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/ti/davinci/vpfe_capture.c
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/ti/davinci/vpif.c
index 5a89d885d0e3..97ef770266af 100644
--- a/drivers/media/platform/davinci/vpif.c
+++ b/drivers/media/platform/ti/davinci/vpif.c
@@ -20,8 +20,10 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
@@ -41,6 +43,11 @@ MODULE_ALIAS("platform:" VPIF_DRIVER_NAME);
#define VPIF_CH2_MAX_MODES 15
#define VPIF_CH3_MAX_MODES 2
+struct vpif_data {
+ struct platform_device *capture;
+ struct platform_device *display;
+};
+
DEFINE_SPINLOCK(vpif_lock);
EXPORT_SYMBOL_GPL(vpif_lock);
@@ -423,21 +430,35 @@ int vpif_channel_getfid(u8 channel_id)
}
EXPORT_SYMBOL(vpif_channel_getfid);
+static void vpif_pdev_release(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ kfree(pdev);
+}
+
static int vpif_probe(struct platform_device *pdev)
{
- static struct resource *res_irq;
+ static struct resource res_irq;
struct platform_device *pdev_capture, *pdev_display;
struct device_node *endpoint = NULL;
+ struct vpif_data *data;
+ int ret;
+ int irq;
vpif_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vpif_base))
return PTR_ERR(vpif_base);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, data);
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get(&pdev->dev);
- dev_info(&pdev->dev, "vpif probe success\n");
-
/*
* If VPIF Node has endpoints, assume "new" DT support,
* where capture and display drivers don't have DT nodes
@@ -453,49 +474,83 @@ static int vpif_probe(struct platform_device *pdev)
* For DT platforms, manually create platform_devices for
* capture/display drivers.
*/
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res_irq) {
- dev_warn(&pdev->dev, "Missing IRQ resource.\n");
- pm_runtime_put(&pdev->dev);
- return -EINVAL;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_put_rpm;
}
+ res_irq = (struct resource)DEFINE_RES_IRQ_NAMED(irq, of_node_full_name(pdev->dev.of_node));
+ res_irq.flags |= irq_get_trigger_type(irq);
- pdev_capture = devm_kzalloc(&pdev->dev, sizeof(*pdev_capture),
- GFP_KERNEL);
- if (pdev_capture) {
- pdev_capture->name = "vpif_capture";
- pdev_capture->id = -1;
- pdev_capture->resource = res_irq;
- pdev_capture->num_resources = 1;
- pdev_capture->dev.dma_mask = pdev->dev.dma_mask;
- pdev_capture->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
- pdev_capture->dev.parent = &pdev->dev;
- platform_device_register(pdev_capture);
- } else {
- dev_warn(&pdev->dev, "Unable to allocate memory for pdev_capture.\n");
+ pdev_capture = kzalloc(sizeof(*pdev_capture), GFP_KERNEL);
+ if (!pdev_capture) {
+ ret = -ENOMEM;
+ goto err_put_rpm;
}
- pdev_display = devm_kzalloc(&pdev->dev, sizeof(*pdev_display),
- GFP_KERNEL);
- if (pdev_display) {
- pdev_display->name = "vpif_display";
- pdev_display->id = -1;
- pdev_display->resource = res_irq;
- pdev_display->num_resources = 1;
- pdev_display->dev.dma_mask = pdev->dev.dma_mask;
- pdev_display->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
- pdev_display->dev.parent = &pdev->dev;
- platform_device_register(pdev_display);
- } else {
- dev_warn(&pdev->dev, "Unable to allocate memory for pdev_display.\n");
+ pdev_capture->name = "vpif_capture";
+ pdev_capture->id = -1;
+ pdev_capture->resource = &res_irq;
+ pdev_capture->num_resources = 1;
+ pdev_capture->dev.dma_mask = pdev->dev.dma_mask;
+ pdev_capture->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
+ pdev_capture->dev.parent = &pdev->dev;
+ pdev_capture->dev.release = vpif_pdev_release;
+
+ ret = platform_device_register(pdev_capture);
+ if (ret)
+ goto err_put_pdev_capture;
+
+ pdev_display = kzalloc(sizeof(*pdev_display), GFP_KERNEL);
+ if (!pdev_display) {
+ ret = -ENOMEM;
+ goto err_put_pdev_capture;
}
+ pdev_display->name = "vpif_display";
+ pdev_display->id = -1;
+ pdev_display->resource = &res_irq;
+ pdev_display->num_resources = 1;
+ pdev_display->dev.dma_mask = pdev->dev.dma_mask;
+ pdev_display->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
+ pdev_display->dev.parent = &pdev->dev;
+ pdev_display->dev.release = vpif_pdev_release;
+
+ ret = platform_device_register(pdev_display);
+ if (ret)
+ goto err_put_pdev_display;
+
+ data->capture = pdev_capture;
+ data->display = pdev_display;
+
return 0;
+
+err_put_pdev_display:
+ platform_device_put(pdev_display);
+err_put_pdev_capture:
+ platform_device_put(pdev_capture);
+err_put_rpm:
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ kfree(data);
+
+ return ret;
}
static int vpif_remove(struct platform_device *pdev)
{
+ struct vpif_data *data = platform_get_drvdata(pdev);
+
+ if (data->capture)
+ platform_device_unregister(data->capture);
+ if (data->display)
+ platform_device_unregister(data->display);
+
+ pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+
+ kfree(data);
+
return 0;
}
diff --git a/drivers/media/platform/davinci/vpif.h b/drivers/media/platform/ti/davinci/vpif.h
index c6d1d890478a..c6d1d890478a 100644
--- a/drivers/media/platform/davinci/vpif.h
+++ b/drivers/media/platform/ti/davinci/vpif.h
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/ti/davinci/vpif_capture.c
index 8fe55374c5a3..bf76c5c83743 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/ti/davinci/vpif_capture.c
@@ -1607,7 +1607,6 @@ static __init int vpif_probe(struct platform_device *pdev)
{
struct vpif_subdev_info *subdevdata;
struct i2c_adapter *i2c_adap;
- struct resource *res;
int subdev_count;
int res_idx = 0;
int i, err;
@@ -1632,17 +1631,23 @@ static __init int vpif_probe(struct platform_device *pdev)
goto vpif_free;
}
- while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
- err = devm_request_irq(&pdev->dev, res->start, vpif_channel_isr,
- IRQF_SHARED, VPIF_DRIVER_NAME,
- (void *)(&vpif_obj.dev[res_idx]->
- channel_id));
- if (err) {
- err = -EINVAL;
+ do {
+ int irq;
+
+ err = platform_get_irq_optional(pdev, res_idx);
+ if (err < 0 && err != -ENXIO)
goto vpif_unregister;
- }
- res_idx++;
- }
+ if (err > 0)
+ irq = err;
+ else
+ break;
+
+ err = devm_request_irq(&pdev->dev, irq, vpif_channel_isr,
+ IRQF_SHARED, VPIF_DRIVER_NAME,
+ (void *)(&vpif_obj.dev[res_idx]->channel_id));
+ if (err)
+ goto vpif_unregister;
+ } while (++res_idx);
vpif_obj.config = pdev->dev.platform_data;
diff --git a/drivers/media/platform/davinci/vpif_capture.h b/drivers/media/platform/ti/davinci/vpif_capture.h
index d5951f61df47..d5951f61df47 100644
--- a/drivers/media/platform/davinci/vpif_capture.h
+++ b/drivers/media/platform/ti/davinci/vpif_capture.h
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/ti/davinci/vpif_display.c
index 59f6b782e104..fca148b66471 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/ti/davinci/vpif_display.c
@@ -1221,7 +1221,6 @@ static __init int vpif_probe(struct platform_device *pdev)
{
struct vpif_subdev_info *subdevdata;
struct i2c_adapter *i2c_adap;
- struct resource *res;
int subdev_count;
int res_idx = 0;
int i, err;
@@ -1245,18 +1244,25 @@ static __init int vpif_probe(struct platform_device *pdev)
goto vpif_free;
}
- while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
- err = devm_request_irq(&pdev->dev, res->start, vpif_channel_isr,
- IRQF_SHARED, VPIF_DRIVER_NAME,
- (void *)(&vpif_obj.dev[res_idx]->
- channel_id));
+ do {
+ int irq;
+
+ err = platform_get_irq_optional(pdev, res_idx);
+ if (err < 0 && err != -ENXIO)
+ goto vpif_unregister;
+ if (err > 0)
+ irq = err;
+ else
+ break;
+
+ err = devm_request_irq(&pdev->dev, irq, vpif_channel_isr,
+ IRQF_SHARED, VPIF_DRIVER_NAME,
+ (void *)(&vpif_obj.dev[res_idx]->channel_id));
if (err) {
- err = -EINVAL;
vpif_err("VPIF IRQ request failed\n");
goto vpif_unregister;
}
- res_idx++;
- }
+ } while (++res_idx);
vpif_obj.config = pdev->dev.platform_data;
subdev_count = vpif_obj.config->subdev_count;
diff --git a/drivers/media/platform/davinci/vpif_display.h b/drivers/media/platform/ti/davinci/vpif_display.h
index f98062e79167..f98062e79167 100644
--- a/drivers/media/platform/davinci/vpif_display.h
+++ b/drivers/media/platform/ti/davinci/vpif_display.h
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/ti/davinci/vpss.c
index d15b991ab17c..d15b991ab17c 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/ti/davinci/vpss.c
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/ti/omap/Kconfig
index de16de46c0f4..a9dbe1097775 100644
--- a/drivers/media/platform/omap/Kconfig
+++ b/drivers/media/platform/ti/omap/Kconfig
@@ -6,10 +6,11 @@ config VIDEO_OMAP2_VOUT_VRFB
config VIDEO_OMAP2_VOUT
tristate "OMAP2/OMAP3 V4L2-Display driver"
+ depends on V4L_PLATFORM_DRIVERS
depends on MMU
depends on FB_OMAP2 || (COMPILE_TEST && FB_OMAP2=n)
depends on ARCH_OMAP2 || ARCH_OMAP3 || COMPILE_TEST
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
select VIDEOBUF2_DMA_CONTIG
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
help
diff --git a/drivers/media/platform/omap/Makefile b/drivers/media/platform/ti/omap/Makefile
index b17a0ac10c00..b17a0ac10c00 100644
--- a/drivers/media/platform/omap/Makefile
+++ b/drivers/media/platform/ti/omap/Makefile
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/ti/omap/omap_vout.c
index 3e0d9af7ffec..3e0d9af7ffec 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/ti/omap/omap_vout.c
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/ti/omap/omap_vout_vrfb.c
index 0cfa0169875f..0cfa0169875f 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/ti/omap/omap_vout_vrfb.c
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.h b/drivers/media/platform/ti/omap/omap_vout_vrfb.h
index 40bc9e54ecc6..40bc9e54ecc6 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.h
+++ b/drivers/media/platform/ti/omap/omap_vout_vrfb.h
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/ti/omap/omap_voutdef.h
index b586193341d2..b586193341d2 100644
--- a/drivers/media/platform/omap/omap_voutdef.h
+++ b/drivers/media/platform/ti/omap/omap_voutdef.h
diff --git a/drivers/media/platform/omap/omap_voutlib.c b/drivers/media/platform/ti/omap/omap_voutlib.c
index 480a7e95533d..480a7e95533d 100644
--- a/drivers/media/platform/omap/omap_voutlib.c
+++ b/drivers/media/platform/ti/omap/omap_voutlib.c
diff --git a/drivers/media/platform/omap/omap_voutlib.h b/drivers/media/platform/ti/omap/omap_voutlib.h
index f9d1c0779f33..f9d1c0779f33 100644
--- a/drivers/media/platform/omap/omap_voutlib.h
+++ b/drivers/media/platform/ti/omap/omap_voutlib.h
diff --git a/drivers/media/platform/ti/omap3isp/Kconfig b/drivers/media/platform/ti/omap3isp/Kconfig
new file mode 100644
index 000000000000..f0a680938d5e
--- /dev/null
+++ b/drivers/media/platform/ti/omap3isp/Kconfig
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_OMAP3
+ tristate "OMAP 3 Camera support"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && I2C
+ depends on (ARCH_OMAP3 && OMAP_IOMMU) || COMPILE_TEST
+ depends on COMMON_CLK && OF
+ select ARM_DMA_USE_IOMMU if OMAP_IOMMU
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_CONTIG
+ select MFD_SYSCON
+ select V4L2_FWNODE
+ help
+ Driver for an OMAP 3 camera controller.
+
+config VIDEO_OMAP3_DEBUG
+ bool "OMAP 3 Camera debug messages"
+ depends on VIDEO_OMAP3
+ help
+ Enable debug messages on OMAP 3 camera controller driver.
diff --git a/drivers/media/platform/omap3isp/Makefile b/drivers/media/platform/ti/omap3isp/Makefile
index 56e99b4f7d23..56e99b4f7d23 100644
--- a/drivers/media/platform/omap3isp/Makefile
+++ b/drivers/media/platform/ti/omap3isp/Makefile
diff --git a/drivers/media/platform/omap3isp/cfa_coef_table.h b/drivers/media/platform/ti/omap3isp/cfa_coef_table.h
index 786200c5e4fa..786200c5e4fa 100644
--- a/drivers/media/platform/omap3isp/cfa_coef_table.h
+++ b/drivers/media/platform/ti/omap3isp/cfa_coef_table.h
diff --git a/drivers/media/platform/omap3isp/gamma_table.h b/drivers/media/platform/ti/omap3isp/gamma_table.h
index 442c82c2eb22..442c82c2eb22 100644
--- a/drivers/media/platform/omap3isp/gamma_table.h
+++ b/drivers/media/platform/ti/omap3isp/gamma_table.h
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c
index 4c937f3f323e..4c937f3f323e 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/ti/omap3isp/isp.c
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/ti/omap3isp/isp.h
index a9d760fbf349..a9d760fbf349 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/ti/omap3isp/isp.h
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/ti/omap3isp/ispccdc.c
index 108b5e9f82cb..108b5e9f82cb 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/ti/omap3isp/ispccdc.c
diff --git a/drivers/media/platform/omap3isp/ispccdc.h b/drivers/media/platform/ti/omap3isp/ispccdc.h
index 7883365d7203..7883365d7203 100644
--- a/drivers/media/platform/omap3isp/ispccdc.h
+++ b/drivers/media/platform/ti/omap3isp/ispccdc.h
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/ti/omap3isp/ispccp2.c
index acb58b6ddba1..acb58b6ddba1 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/ti/omap3isp/ispccp2.c
diff --git a/drivers/media/platform/omap3isp/ispccp2.h b/drivers/media/platform/ti/omap3isp/ispccp2.h
index 03e6af3de1d9..03e6af3de1d9 100644
--- a/drivers/media/platform/omap3isp/ispccp2.h
+++ b/drivers/media/platform/ti/omap3isp/ispccp2.h
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/ti/omap3isp/ispcsi2.c
index 6302e0c94034..6302e0c94034 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/ti/omap3isp/ispcsi2.c
diff --git a/drivers/media/platform/omap3isp/ispcsi2.h b/drivers/media/platform/ti/omap3isp/ispcsi2.h
index 036b97f8470e..036b97f8470e 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.h
+++ b/drivers/media/platform/ti/omap3isp/ispcsi2.h
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.c b/drivers/media/platform/ti/omap3isp/ispcsiphy.c
index 6dc7359c5131..6dc7359c5131 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.c
+++ b/drivers/media/platform/ti/omap3isp/ispcsiphy.c
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.h b/drivers/media/platform/ti/omap3isp/ispcsiphy.h
index ed9b8d221e3f..ed9b8d221e3f 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.h
+++ b/drivers/media/platform/ti/omap3isp/ispcsiphy.h
diff --git a/drivers/media/platform/omap3isp/isph3a.h b/drivers/media/platform/ti/omap3isp/isph3a.h
index 5144f7689dda..5144f7689dda 100644
--- a/drivers/media/platform/omap3isp/isph3a.h
+++ b/drivers/media/platform/ti/omap3isp/isph3a.h
diff --git a/drivers/media/platform/omap3isp/isph3a_aewb.c b/drivers/media/platform/ti/omap3isp/isph3a_aewb.c
index e6c54c4bbfca..e6c54c4bbfca 100644
--- a/drivers/media/platform/omap3isp/isph3a_aewb.c
+++ b/drivers/media/platform/ti/omap3isp/isph3a_aewb.c
diff --git a/drivers/media/platform/omap3isp/isph3a_af.c b/drivers/media/platform/ti/omap3isp/isph3a_af.c
index de7b116d0122..de7b116d0122 100644
--- a/drivers/media/platform/omap3isp/isph3a_af.c
+++ b/drivers/media/platform/ti/omap3isp/isph3a_af.c
diff --git a/drivers/media/platform/omap3isp/isphist.c b/drivers/media/platform/ti/omap3isp/isphist.c
index 0ef78aace6da..0ef78aace6da 100644
--- a/drivers/media/platform/omap3isp/isphist.c
+++ b/drivers/media/platform/ti/omap3isp/isphist.c
diff --git a/drivers/media/platform/omap3isp/isphist.h b/drivers/media/platform/ti/omap3isp/isphist.h
index 93cd27a3b617..93cd27a3b617 100644
--- a/drivers/media/platform/omap3isp/isphist.h
+++ b/drivers/media/platform/ti/omap3isp/isphist.h
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/ti/omap3isp/isppreview.c
index 53aedec7990d..53aedec7990d 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/ti/omap3isp/isppreview.c
diff --git a/drivers/media/platform/omap3isp/isppreview.h b/drivers/media/platform/ti/omap3isp/isppreview.h
index 5fff1ec3624f..5fff1ec3624f 100644
--- a/drivers/media/platform/omap3isp/isppreview.h
+++ b/drivers/media/platform/ti/omap3isp/isppreview.h
diff --git a/drivers/media/platform/omap3isp/ispreg.h b/drivers/media/platform/ti/omap3isp/ispreg.h
index 86b6ebb0438d..86b6ebb0438d 100644
--- a/drivers/media/platform/omap3isp/ispreg.h
+++ b/drivers/media/platform/ti/omap3isp/ispreg.h
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/ti/omap3isp/ispresizer.c
index ed2fb0c7a57e..ed2fb0c7a57e 100644
--- a/drivers/media/platform/omap3isp/ispresizer.c
+++ b/drivers/media/platform/ti/omap3isp/ispresizer.c
diff --git a/drivers/media/platform/omap3isp/ispresizer.h b/drivers/media/platform/ti/omap3isp/ispresizer.h
index 28cc89940ead..28cc89940ead 100644
--- a/drivers/media/platform/omap3isp/ispresizer.h
+++ b/drivers/media/platform/ti/omap3isp/ispresizer.h
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/ti/omap3isp/ispstat.c
index 5b9b57f4d9bf..68cf68dbcace 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/ti/omap3isp/ispstat.c
@@ -512,7 +512,7 @@ int omap3isp_stat_request_statistics(struct ispstat *stat,
int omap3isp_stat_request_statistics_time32(struct ispstat *stat,
struct omap3isp_stat_data_time32 *data)
{
- struct omap3isp_stat_data data64;
+ struct omap3isp_stat_data data64 = { };
int ret;
ret = omap3isp_stat_request_statistics(stat, &data64);
@@ -521,7 +521,8 @@ int omap3isp_stat_request_statistics_time32(struct ispstat *stat,
data->ts.tv_sec = data64.ts.tv_sec;
data->ts.tv_usec = data64.ts.tv_usec;
- memcpy(&data->buf, &data64.buf, sizeof(*data) - sizeof(data->ts));
+ data->buf = (uintptr_t)data64.buf;
+ memcpy(&data->frame, &data64.frame, sizeof(data->frame));
return 0;
}
diff --git a/drivers/media/platform/omap3isp/ispstat.h b/drivers/media/platform/ti/omap3isp/ispstat.h
index b548e617cf62..b548e617cf62 100644
--- a/drivers/media/platform/omap3isp/ispstat.h
+++ b/drivers/media/platform/ti/omap3isp/ispstat.h
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/ti/omap3isp/ispvideo.c
index 8811d6dd4ee7..8811d6dd4ee7 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/ti/omap3isp/ispvideo.c
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/ti/omap3isp/ispvideo.h
index a0908670c0cf..a0908670c0cf 100644
--- a/drivers/media/platform/omap3isp/ispvideo.h
+++ b/drivers/media/platform/ti/omap3isp/ispvideo.h
diff --git a/drivers/media/platform/omap3isp/luma_enhance_table.h b/drivers/media/platform/ti/omap3isp/luma_enhance_table.h
index d5fbf9241f48..d5fbf9241f48 100644
--- a/drivers/media/platform/omap3isp/luma_enhance_table.h
+++ b/drivers/media/platform/ti/omap3isp/luma_enhance_table.h
diff --git a/drivers/media/platform/omap3isp/noise_filter_table.h b/drivers/media/platform/ti/omap3isp/noise_filter_table.h
index da66bd0a3b9f..da66bd0a3b9f 100644
--- a/drivers/media/platform/omap3isp/noise_filter_table.h
+++ b/drivers/media/platform/ti/omap3isp/noise_filter_table.h
diff --git a/drivers/media/platform/omap3isp/omap3isp.h b/drivers/media/platform/ti/omap3isp/omap3isp.h
index 4a003c8263ed..4a003c8263ed 100644
--- a/drivers/media/platform/omap3isp/omap3isp.h
+++ b/drivers/media/platform/ti/omap3isp/omap3isp.h
diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti/vpe/Makefile
index ad624056e039..3fadfe084f87 100644
--- a/drivers/media/platform/ti-vpe/Makefile
+++ b/drivers/media/platform/ti/vpe/Makefile
@@ -10,7 +10,3 @@ ti-sc-y := sc.o
ti-csc-y := csc.o
ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG
-
-obj-$(CONFIG_VIDEO_TI_CAL) += ti-cal.o
-
-ti-cal-y := cal.o cal-camerarx.o cal-video.o
diff --git a/drivers/media/platform/ti-vpe/csc.c b/drivers/media/platform/ti/vpe/csc.c
index ff15bc589f1b..ff15bc589f1b 100644
--- a/drivers/media/platform/ti-vpe/csc.c
+++ b/drivers/media/platform/ti/vpe/csc.c
diff --git a/drivers/media/platform/ti-vpe/csc.h b/drivers/media/platform/ti/vpe/csc.h
index af2e86bccf57..af2e86bccf57 100644
--- a/drivers/media/platform/ti-vpe/csc.h
+++ b/drivers/media/platform/ti/vpe/csc.h
diff --git a/drivers/media/platform/ti-vpe/sc.c b/drivers/media/platform/ti/vpe/sc.c
index 0202d278523f..0202d278523f 100644
--- a/drivers/media/platform/ti-vpe/sc.c
+++ b/drivers/media/platform/ti/vpe/sc.c
diff --git a/drivers/media/platform/ti-vpe/sc.h b/drivers/media/platform/ti/vpe/sc.h
index d55de44d5257..d55de44d5257 100644
--- a/drivers/media/platform/ti-vpe/sc.h
+++ b/drivers/media/platform/ti/vpe/sc.h
diff --git a/drivers/media/platform/ti-vpe/sc_coeff.h b/drivers/media/platform/ti/vpe/sc_coeff.h
index c525d1764099..c525d1764099 100644
--- a/drivers/media/platform/ti-vpe/sc_coeff.h
+++ b/drivers/media/platform/ti/vpe/sc_coeff.h
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti/vpe/vpdma.c
index f8998a8ad371..f8998a8ad371 100644
--- a/drivers/media/platform/ti-vpe/vpdma.c
+++ b/drivers/media/platform/ti/vpe/vpdma.c
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti/vpe/vpdma.h
index 393fcbb3cb40..393fcbb3cb40 100644
--- a/drivers/media/platform/ti-vpe/vpdma.h
+++ b/drivers/media/platform/ti/vpe/vpdma.h
diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti/vpe/vpdma_priv.h
index 0bbee45338bd..0bbee45338bd 100644
--- a/drivers/media/platform/ti-vpe/vpdma_priv.h
+++ b/drivers/media/platform/ti/vpe/vpdma_priv.h
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti/vpe/vpe.c
index 5b1c5d96a407..5b1c5d96a407 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti/vpe/vpe.c
diff --git a/drivers/media/platform/ti-vpe/vpe_regs.h b/drivers/media/platform/ti/vpe/vpe_regs.h
index 1a1ad5ae1228..1a1ad5ae1228 100644
--- a/drivers/media/platform/ti-vpe/vpe_regs.h
+++ b/drivers/media/platform/ti/vpe/vpe_regs.h
diff --git a/drivers/media/platform/via/Kconfig b/drivers/media/platform/via/Kconfig
new file mode 100644
index 000000000000..8926eb0803b2
--- /dev/null
+++ b/drivers/media/platform/via/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "VIA media platform drivers"
+
+config VIDEO_VIA_CAMERA
+ tristate "VIAFB camera controller support"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on FB_VIA && VIDEO_DEV
+ select VIDEOBUF2_DMA_SG
+ select VIDEO_OV7670
+ help
+ Driver support for the integrated camera controller in VIA
+ Chrome9 chipsets. Currently only tested on OLPC xo-1.5 systems
+ with ov7670 sensors.
diff --git a/drivers/media/platform/via/Makefile b/drivers/media/platform/via/Makefile
new file mode 100644
index 000000000000..80f747f3fffc
--- /dev/null
+++ b/drivers/media/platform/via/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via/via-camera.c
index 95483c84c3f2..95483c84c3f2 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via/via-camera.c
diff --git a/drivers/media/platform/via-camera.h b/drivers/media/platform/via/via-camera.h
index 54f16318b1b3..54f16318b1b3 100644
--- a/drivers/media/platform/via-camera.h
+++ b/drivers/media/platform/via/via-camera.h
diff --git a/drivers/media/platform/xilinx/Kconfig b/drivers/media/platform/xilinx/Kconfig
index 44587dccacf1..93ef78bf62e6 100644
--- a/drivers/media/platform/xilinx/Kconfig
+++ b/drivers/media/platform/xilinx/Kconfig
@@ -1,8 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
+comment "Xilinx media platform drivers"
+
config VIDEO_XILINX
tristate "Xilinx Video IP (EXPERIMENTAL)"
- depends on VIDEO_V4L2 && OF && HAS_DMA
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF && HAS_DMA
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_DMA_CONTIG
@@ -10,9 +13,8 @@ config VIDEO_XILINX
help
Driver for Xilinx Video IP Pipelines
-if VIDEO_XILINX
-
config VIDEO_XILINX_CSI2RXSS
+ depends on VIDEO_XILINX
tristate "Xilinx CSI-2 Rx Subsystem"
help
Driver for Xilinx MIPI CSI-2 Rx Subsystem. This is a V4L sub-device
@@ -31,5 +33,3 @@ config VIDEO_XILINX_VTC
depends on VIDEO_XILINX
help
Driver for the Xilinx Video Timing Controller
-
-endif #VIDEO_XILINX
diff --git a/drivers/media/platform/xilinx/xilinx-csi2rxss.c b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
index b1baf9d7b6ec..051c60cba1e0 100644
--- a/drivers/media/platform/xilinx/xilinx-csi2rxss.c
+++ b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/v4l2-subdev.h>
#include <media/media-entity.h>
+#include <media/mipi-csi2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
@@ -115,23 +116,6 @@
#define XCSI_DEFAULT_WIDTH 1920
#define XCSI_DEFAULT_HEIGHT 1080
-/* MIPI CSI-2 Data Types from spec */
-#define XCSI_DT_YUV4228B 0x1e
-#define XCSI_DT_YUV42210B 0x1f
-#define XCSI_DT_RGB444 0x20
-#define XCSI_DT_RGB555 0x21
-#define XCSI_DT_RGB565 0x22
-#define XCSI_DT_RGB666 0x23
-#define XCSI_DT_RGB888 0x24
-#define XCSI_DT_RAW6 0x28
-#define XCSI_DT_RAW7 0x29
-#define XCSI_DT_RAW8 0x2a
-#define XCSI_DT_RAW10 0x2b
-#define XCSI_DT_RAW12 0x2c
-#define XCSI_DT_RAW14 0x2d
-#define XCSI_DT_RAW16 0x2e
-#define XCSI_DT_RAW20 0x2f
-
#define XCSI_VCX_START 4
#define XCSI_MAX_VC 4
#define XCSI_MAX_VCX 16
@@ -183,32 +167,32 @@ static const struct xcsi2rxss_event xcsi2rxss_events[] = {
* and media bus formats
*/
static const u32 xcsi2dt_mbus_lut[][2] = {
- { XCSI_DT_YUV4228B, MEDIA_BUS_FMT_UYVY8_1X16 },
- { XCSI_DT_YUV42210B, MEDIA_BUS_FMT_UYVY10_1X20 },
- { XCSI_DT_RGB444, 0 },
- { XCSI_DT_RGB555, 0 },
- { XCSI_DT_RGB565, 0 },
- { XCSI_DT_RGB666, 0 },
- { XCSI_DT_RGB888, MEDIA_BUS_FMT_RBG888_1X24 },
- { XCSI_DT_RAW6, 0 },
- { XCSI_DT_RAW7, 0 },
- { XCSI_DT_RAW8, MEDIA_BUS_FMT_SRGGB8_1X8 },
- { XCSI_DT_RAW8, MEDIA_BUS_FMT_SBGGR8_1X8 },
- { XCSI_DT_RAW8, MEDIA_BUS_FMT_SGBRG8_1X8 },
- { XCSI_DT_RAW8, MEDIA_BUS_FMT_SGRBG8_1X8 },
- { XCSI_DT_RAW10, MEDIA_BUS_FMT_SRGGB10_1X10 },
- { XCSI_DT_RAW10, MEDIA_BUS_FMT_SBGGR10_1X10 },
- { XCSI_DT_RAW10, MEDIA_BUS_FMT_SGBRG10_1X10 },
- { XCSI_DT_RAW10, MEDIA_BUS_FMT_SGRBG10_1X10 },
- { XCSI_DT_RAW12, MEDIA_BUS_FMT_SRGGB12_1X12 },
- { XCSI_DT_RAW12, MEDIA_BUS_FMT_SBGGR12_1X12 },
- { XCSI_DT_RAW12, MEDIA_BUS_FMT_SGBRG12_1X12 },
- { XCSI_DT_RAW12, MEDIA_BUS_FMT_SGRBG12_1X12 },
- { XCSI_DT_RAW16, MEDIA_BUS_FMT_SRGGB16_1X16 },
- { XCSI_DT_RAW16, MEDIA_BUS_FMT_SBGGR16_1X16 },
- { XCSI_DT_RAW16, MEDIA_BUS_FMT_SGBRG16_1X16 },
- { XCSI_DT_RAW16, MEDIA_BUS_FMT_SGRBG16_1X16 },
- { XCSI_DT_RAW20, 0 },
+ { MIPI_CSI2_DT_YUV422_8B, MEDIA_BUS_FMT_UYVY8_1X16 },
+ { MIPI_CSI2_DT_YUV422_10B, MEDIA_BUS_FMT_UYVY10_1X20 },
+ { MIPI_CSI2_DT_RGB444, 0 },
+ { MIPI_CSI2_DT_RGB555, 0 },
+ { MIPI_CSI2_DT_RGB565, 0 },
+ { MIPI_CSI2_DT_RGB666, 0 },
+ { MIPI_CSI2_DT_RGB888, MEDIA_BUS_FMT_RBG888_1X24 },
+ { MIPI_CSI2_DT_RAW6, 0 },
+ { MIPI_CSI2_DT_RAW7, 0 },
+ { MIPI_CSI2_DT_RAW8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { MIPI_CSI2_DT_RAW8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { MIPI_CSI2_DT_RAW8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { MIPI_CSI2_DT_RAW8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { MIPI_CSI2_DT_RAW10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { MIPI_CSI2_DT_RAW10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { MIPI_CSI2_DT_RAW10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { MIPI_CSI2_DT_RAW10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { MIPI_CSI2_DT_RAW12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+ { MIPI_CSI2_DT_RAW12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { MIPI_CSI2_DT_RAW12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { MIPI_CSI2_DT_RAW12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { MIPI_CSI2_DT_RAW16, MEDIA_BUS_FMT_SRGGB16_1X16 },
+ { MIPI_CSI2_DT_RAW16, MEDIA_BUS_FMT_SBGGR16_1X16 },
+ { MIPI_CSI2_DT_RAW16, MEDIA_BUS_FMT_SGBRG16_1X16 },
+ { MIPI_CSI2_DT_RAW16, MEDIA_BUS_FMT_SGRBG16_1X16 },
+ { MIPI_CSI2_DT_RAW20, 0 },
};
/**
@@ -791,7 +775,7 @@ static int xcsi2rxss_set_format(struct v4l2_subdev *sd,
* other RAW, YUV422 8/10 or RGB888, set appropriate media bus format.
*/
dt = xcsi2rxss_get_dt(fmt->format.code);
- if (dt != xcsi2rxss->datatype && dt != XCSI_DT_RAW8) {
+ if (dt != xcsi2rxss->datatype && dt != MIPI_CSI2_DT_RAW8) {
dev_dbg(xcsi2rxss->dev, "Unsupported media bus format");
/* set the default format for the data type */
fmt->format.code = xcsi2rxss_get_nth_mbus(xcsi2rxss->datatype,
@@ -823,8 +807,8 @@ static int xcsi2rxss_enum_mbus_code(struct v4l2_subdev *sd,
/* RAW8 dt packets are available in all DT configurations */
if (code->index < 4) {
n = code->index;
- dt = XCSI_DT_RAW8;
- } else if (state->datatype != XCSI_DT_RAW8) {
+ dt = MIPI_CSI2_DT_RAW8;
+ } else if (state->datatype != MIPI_CSI2_DT_RAW8) {
n = code->index - 4;
dt = state->datatype;
} else {
@@ -895,22 +879,22 @@ static int xcsi2rxss_parse_of(struct xcsi2rxss_state *xcsi2rxss)
}
switch (xcsi2rxss->datatype) {
- case XCSI_DT_YUV4228B:
- case XCSI_DT_RGB444:
- case XCSI_DT_RGB555:
- case XCSI_DT_RGB565:
- case XCSI_DT_RGB666:
- case XCSI_DT_RGB888:
- case XCSI_DT_RAW6:
- case XCSI_DT_RAW7:
- case XCSI_DT_RAW8:
- case XCSI_DT_RAW10:
- case XCSI_DT_RAW12:
- case XCSI_DT_RAW14:
+ case MIPI_CSI2_DT_YUV422_8B:
+ case MIPI_CSI2_DT_RGB444:
+ case MIPI_CSI2_DT_RGB555:
+ case MIPI_CSI2_DT_RGB565:
+ case MIPI_CSI2_DT_RGB666:
+ case MIPI_CSI2_DT_RGB888:
+ case MIPI_CSI2_DT_RAW6:
+ case MIPI_CSI2_DT_RAW7:
+ case MIPI_CSI2_DT_RAW8:
+ case MIPI_CSI2_DT_RAW10:
+ case MIPI_CSI2_DT_RAW12:
+ case MIPI_CSI2_DT_RAW14:
break;
- case XCSI_DT_YUV42210B:
- case XCSI_DT_RAW16:
- case XCSI_DT_RAW20:
+ case MIPI_CSI2_DT_YUV422_10B:
+ case MIPI_CSI2_DT_RAW16:
+ case MIPI_CSI2_DT_RAW20:
if (!en_csi_v20) {
ret = -EINVAL;
dev_dbg(dev, "enable csi v2 for this pixel format");
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index d29e29645e04..cca03bd2cc42 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -5,69 +5,17 @@
menuconfig RADIO_ADAPTERS
bool "Radio Adapters"
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
depends on MEDIA_RADIO_SUPPORT
default y
help
Say Y here to enable selecting AM/FM radio adapters.
-if RADIO_ADAPTERS && VIDEO_V4L2
-
-config RADIO_TEA575X
- tristate
-
-source "drivers/media/radio/si470x/Kconfig"
-
-config RADIO_SI4713
- tristate "Silicon Labs Si4713 FM Radio with RDS Transmitter support"
- depends on VIDEO_V4L2
-
-source "drivers/media/radio/si4713/Kconfig"
-
-config RADIO_SI476X
- tristate "Silicon Laboratories Si476x I2C FM Radio"
- depends on I2C && VIDEO_V4L2
- depends on MFD_SI476X_CORE
- depends on SND_SOC
- select SND_SOC_SI476X
- help
- Choose Y here if you have this FM radio chip.
-
- In order to control your radio card, you will need to use programs
- that are compatible with the Video For Linux 2 API. Information on
- this API and pointers to "v4l2" programs may be found at
- <file:Documentation/userspace-api/media/index.rst>.
-
- To compile this driver as a module, choose M here: the
- module will be called radio-si476x.
-
-config USB_MR800
- tristate "AverMedia MR 800 USB FM radio support"
- depends on USB && VIDEO_V4L2
- help
- Say Y here if you want to connect this type of radio to your
- computer's USB port. Note that the audio is not digital, and
- you must connect the line out connector to a sound card or a
- set of speakers.
-
- To compile this driver as a module, choose M here: the
- module will be called radio-mr800.
-
-config USB_DSBR
- tristate "D-Link/GemTek USB FM radio support"
- depends on USB && VIDEO_V4L2
- help
- Say Y here if you want to connect this type of radio to your
- computer's USB port. Note that the audio is not digital, and
- you must connect the line out connector to a sound card or a
- set of speakers.
-
- To compile this driver as a module, choose M here: the
- module will be called dsbr100.
+if RADIO_ADAPTERS
config RADIO_MAXIRADIO
tristate "Guillemot MAXI Radio FM 2000 radio"
- depends on VIDEO_V4L2 && PCI
+ depends on PCI
select RADIO_TEA575X
help
Choose Y here if you have this radio card. This card may also be
@@ -81,6 +29,18 @@ config RADIO_MAXIRADIO
To compile this driver as a module, choose M here: the
module will be called radio-maxiradio.
+config RADIO_SAA7706H
+ tristate "SAA7706H Car Radio DSP"
+ depends on I2C
+ help
+ Say Y here if you want to use the SAA7706H Car radio Digital
+ Signal Processor, found for instance on the Russellville development
+ board. On the russellville the device is connected to internal
+ timberdale I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called SAA7706H.
+
config RADIO_SHARK
tristate "Griffin radioSHARK USB radio receiver"
depends on USB
@@ -116,45 +76,32 @@ config RADIO_SHARK2
To compile this driver as a module, choose M here: the
module will be called radio-shark2.
-config USB_KEENE
- tristate "Keene FM Transmitter USB support"
- depends on USB && VIDEO_V4L2
- help
- Say Y here if you want to connect this type of FM transmitter
- to your computer's USB port.
-
- To compile this driver as a module, choose M here: the
- module will be called radio-keene.
+config RADIO_SI4713
+ tristate "Silicon Labs Si4713 FM Radio with RDS Transmitter support"
-config USB_RAREMONO
- tristate "Thanko's Raremono AM/FM/SW radio support"
- depends on USB && VIDEO_V4L2
+config RADIO_SI476X
+ tristate "Silicon Laboratories Si476x I2C FM Radio"
+ depends on I2C
+ depends on MFD_SI476X_CORE
+ depends on SND_SOC
+ select SND_SOC_SI476X
help
- The 'Thanko's Raremono' device contains the Si4734 chip from Silicon Labs Inc.
- It is one of the very few or perhaps the only consumer USB radio device
- to receive the AM/FM/SW bands.
+ Choose Y here if you have this FM radio chip.
- Say Y here if you want to connect this type of AM/FM/SW receiver
- to your computer's USB port.
+ In order to control your radio card, you will need to use programs
+ that are compatible with the Video For Linux 2 API. Information on
+ this API and pointers to "v4l2" programs may be found at
+ <file:Documentation/userspace-api/media/index.rst>.
To compile this driver as a module, choose M here: the
- module will be called radio-raremono.
-
-config USB_MA901
- tristate "Masterkit MA901 USB FM radio support"
- depends on USB && VIDEO_V4L2
- help
- Say Y here if you want to connect this type of radio to your
- computer's USB port. Note that the audio is not digital, and
- you must connect the line out connector to a sound card or a
- set of speakers or headphones.
+ module will be called radio-si476x.
- To compile this driver as a module, choose M here: the
- module will be called radio-ma901.
+config RADIO_TEA575X
+ tristate
config RADIO_TEA5764
tristate "TEA5764 I2C FM radio support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C
help
Say Y here if you want to use the TEA5764 FM chip found in
EZX phones. This FM chip is present in EZX phones from Motorola,
@@ -171,21 +118,9 @@ config RADIO_TEA5764_XTAL
Say Y here if TEA5764 have a 32768 Hz crystal in circuit, say N
here if TEA5764 reference frequency is connected in FREQIN.
-config RADIO_SAA7706H
- tristate "SAA7706H Car Radio DSP"
- depends on I2C && VIDEO_V4L2
- help
- Say Y here if you want to use the SAA7706H Car radio Digital
- Signal Processor, found for instance on the Russellville development
- board. On the russellville the device is connected to internal
- timberdale I2C bus.
-
- To compile this driver as a module, choose M here: the
- module will be called SAA7706H.
-
config RADIO_TEF6862
tristate "TEF6862 Car Radio Enhanced Selectivity Tuner"
- depends on I2C && VIDEO_V4L2
+ depends on I2C
help
Say Y here if you want to use the TEF6862 Car Radio Enhanced
Selectivity Tuner, found for instance on the Russellville development
@@ -197,7 +132,7 @@ config RADIO_TEF6862
config RADIO_TIMBERDALE
tristate "Enable the Timberdale radio driver"
- depends on MFD_TIMBERDALE && VIDEO_V4L2
+ depends on MFD_TIMBERDALE
depends on I2C # for RADIO_SAA7706H
select RADIO_TEF6862
select RADIO_SAA7706H
@@ -208,7 +143,7 @@ config RADIO_TIMBERDALE
config RADIO_WL1273
tristate "Texas Instruments WL1273 I2C FM Radio"
- depends on I2C && VIDEO_V4L2
+ depends on I2C
select MFD_CORE
select MFD_WL1273_CORE
select FW_LOADER
@@ -223,96 +158,88 @@ config RADIO_WL1273
To compile this driver as a module, choose M here: the
module will be called radio-wl1273.
-# TI's ST based wl128x FM radio
-source "drivers/media/radio/wl128x/Kconfig"
+config USB_DSBR
+ tristate "D-Link/GemTek USB FM radio support"
+ depends on USB
+ help
+ Say Y here if you want to connect this type of radio to your
+ computer's USB port. Note that the audio is not digital, and
+ you must connect the line out connector to a sound card or a
+ set of speakers.
-#
-# ISA drivers configuration
-#
+ To compile this driver as a module, choose M here: the
+ module will be called dsbr100.
-menuconfig V4L_RADIO_ISA_DRIVERS
- bool "ISA radio devices"
- depends on ISA || COMPILE_TEST
+config USB_KEENE
+ tristate "Keene FM Transmitter USB support"
+ depends on USB
help
- Say Y here to enable support for these ISA drivers.
-
-if V4L_RADIO_ISA_DRIVERS
+ Say Y here if you want to connect this type of FM transmitter
+ to your computer's USB port.
-config RADIO_ISA
- depends on ISA || COMPILE_TEST
- tristate
+ To compile this driver as a module, choose M here: the
+ module will be called radio-keene.
-config RADIO_CADET
- tristate "ADS Cadet AM/FM Tuner"
- depends on ISA || COMPILE_TEST
- depends on VIDEO_V4L2
+config USB_MA901
+ tristate "Masterkit MA901 USB FM radio support"
+ depends on USB
help
- Choose Y here if you have one of these AM/FM radio cards, and then
- fill in the port address below.
+ Say Y here if you want to connect this type of radio to your
+ computer's USB port. Note that the audio is not digital, and
+ you must connect the line out connector to a sound card or a
+ set of speakers or headphones.
To compile this driver as a module, choose M here: the
- module will be called radio-cadet.
+ module will be called radio-ma901.
-config RADIO_RTRACK
- tristate "AIMSlab RadioTrack (aka RadioReveal) support"
- depends on ISA || COMPILE_TEST
- depends on VIDEO_V4L2
- select RADIO_ISA
+config USB_MR800
+ tristate "AverMedia MR 800 USB FM radio support"
+ depends on USB
help
- Choose Y here if you have one of these FM radio cards, and then fill
- in the port address below.
+ Say Y here if you want to connect this type of radio to your
+ computer's USB port. Note that the audio is not digital, and
+ you must connect the line out connector to a sound card or a
+ set of speakers.
- Note that newer AIMSlab RadioTrack cards have a different chipset
- and are not supported by this driver. For these cards, use the
- RadioTrack II driver below.
+ To compile this driver as a module, choose M here: the
+ module will be called radio-mr800.
- If you have a GemTeks combined (PnP) sound- and radio card you must
- use this driver as a module and setup the card with isapnptools.
- You must also pass the module a suitable io parameter, 0x248 has
- been reported to be used by these cards.
+config USB_RAREMONO
+ tristate "Thanko's Raremono AM/FM/SW radio support"
+ depends on USB
+ help
+ The 'Thanko's Raremono' device contains the Si4734 chip from Silicon Labs Inc.
+ It is one of the very few or perhaps the only consumer USB radio device
+ to receive the AM/FM/SW bands.
- More information is contained in the file
- <file:Documentation/driver-api/media/drivers/radiotrack.rst>.
+ Say Y here if you want to connect this type of AM/FM/SW receiver
+ to your computer's USB port.
To compile this driver as a module, choose M here: the
- module will be called radio-aimslab.
+ module will be called radio-raremono.
-config RADIO_RTRACK_PORT
- hex "RadioTrack i/o port (0x20f or 0x30f)"
- depends on RADIO_RTRACK=y
- default "30f"
- help
- Enter either 0x30f or 0x20f here. The card default is 0x30f, if you
- haven't changed the jumper setting on the card.
+source "drivers/media/radio/si470x/Kconfig"
+source "drivers/media/radio/si4713/Kconfig"
-config RADIO_RTRACK2
- tristate "AIMSlab RadioTrack II support"
- depends on ISA || COMPILE_TEST
- depends on VIDEO_V4L2
- select RADIO_ISA
- help
- Choose Y here if you have this FM radio card, and then fill in the
- port address below.
+# TI's ST based wl128x FM radio
- Note: this driver hasn't been tested since a long time due to lack
- of hardware. If you have this hardware, then please contact the
- linux-media mailinglist.
+source "drivers/media/radio/wl128x/Kconfig"
- To compile this driver as a module, choose M here: the
- module will be called radio-rtrack2.
+#
+# ISA drivers configuration
+#
-config RADIO_RTRACK2_PORT
- hex "RadioTrack II i/o port (0x20c or 0x30c)"
- depends on RADIO_RTRACK2=y
- default "30c"
+menuconfig V4L_RADIO_ISA_DRIVERS
+ bool "ISA radio devices"
+ depends on ISA || COMPILE_TEST
help
- Enter either 0x30c or 0x20c here. The card default is 0x30c, if you
- haven't changed the jumper setting on the card.
+ Say Y here to enable support for these ISA drivers.
+
+if V4L_RADIO_ISA_DRIVERS
config RADIO_AZTECH
tristate "Aztech/Packard Bell Radio"
depends on ISA || COMPILE_TEST
- depends on VIDEO_V4L2
select RADIO_ISA
help
Choose Y here if you have one of these FM radio cards, and then fill
@@ -330,10 +257,19 @@ config RADIO_AZTECH_PORT
haven't changed the setting of jumper JP3 on the card. Removing the
jumper sets the card to 0x358.
+config RADIO_CADET
+ tristate "ADS Cadet AM/FM Tuner"
+ depends on ISA || COMPILE_TEST
+ help
+ Choose Y here if you have one of these AM/FM radio cards, and then
+ fill in the port address below.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-cadet.
+
config RADIO_GEMTEK
tristate "GemTek Radio card (or compatible) support"
depends on ISA || COMPILE_TEST
- depends on VIDEO_V4L2
select RADIO_ISA
help
Choose Y here if you have this FM radio card, and then fill in the
@@ -371,10 +307,14 @@ config RADIO_GEMTEK_PROBE
following ports will be probed: 0x20c, 0x30c, 0x24c, 0x34c, 0x248 and
0x28c.
+config RADIO_ISA
+ depends on ISA || COMPILE_TEST
+ tristate
+
config RADIO_MIROPCM20
tristate "miroSOUND PCM20 radio"
depends on ISA || COMPILE_TEST
- depends on ISA_DMA_API && VIDEO_V4L2 && SND
+ depends on ISA_DMA_API && SND
select SND_ISA
select SND_MIRO
help
@@ -386,10 +326,63 @@ config RADIO_MIROPCM20
To compile this driver as a module, choose M here: the
module will be called radio-miropcm20.
+config RADIO_RTRACK
+ tristate "AIMSlab RadioTrack (aka RadioReveal) support"
+ depends on ISA || COMPILE_TEST
+ select RADIO_ISA
+ help
+ Choose Y here if you have one of these FM radio cards, and then fill
+ in the port address below.
+
+ Note that newer AIMSlab RadioTrack cards have a different chipset
+ and are not supported by this driver. For these cards, use the
+ RadioTrack II driver below.
+
+ If you have a GemTeks combined (PnP) sound- and radio card you must
+ use this driver as a module and setup the card with isapnptools.
+ You must also pass the module a suitable io parameter, 0x248 has
+ been reported to be used by these cards.
+
+ More information is contained in the file
+ <file:Documentation/driver-api/media/drivers/radiotrack.rst>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-aimslab.
+
+config RADIO_RTRACK2
+ tristate "AIMSlab RadioTrack II support"
+ depends on ISA || COMPILE_TEST
+ select RADIO_ISA
+ help
+ Choose Y here if you have this FM radio card, and then fill in the
+ port address below.
+
+ Note: this driver hasn't been tested since a long time due to lack
+ of hardware. If you have this hardware, then please contact the
+ linux-media mailinglist.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-rtrack2.
+
+config RADIO_RTRACK2_PORT
+ hex "RadioTrack II i/o port (0x20c or 0x30c)"
+ depends on RADIO_RTRACK2=y
+ default "30c"
+ help
+ Enter either 0x30c or 0x20c here. The card default is 0x30c, if you
+ haven't changed the jumper setting on the card.
+
+config RADIO_RTRACK_PORT
+ hex "RadioTrack i/o port (0x20f or 0x30f)"
+ depends on RADIO_RTRACK=y
+ default "30f"
+ help
+ Enter either 0x30f or 0x20f here. The card default is 0x30f, if you
+ haven't changed the jumper setting on the card.
+
config RADIO_SF16FMI
tristate "SF16-FMI/SF16-FMP/SF16-FMD Radio"
depends on ISA || COMPILE_TEST
- depends on VIDEO_V4L2
help
Choose Y here if you have one of these FM radio cards.
@@ -399,7 +392,6 @@ config RADIO_SF16FMI
config RADIO_SF16FMR2
tristate "SF16-FMR2/SF16-FMD2 Radio"
depends on ISA || COMPILE_TEST
- depends on VIDEO_V4L2
select RADIO_TEA575X
help
Choose Y here if you have one of these FM radio cards.
@@ -410,7 +402,6 @@ config RADIO_SF16FMR2
config RADIO_TERRATEC
tristate "TerraTec ActiveRadio ISA Standalone"
depends on ISA || COMPILE_TEST
- depends on VIDEO_V4L2
select RADIO_ISA
help
Choose Y here if you have this FM radio card.
@@ -425,7 +416,6 @@ config RADIO_TERRATEC
config RADIO_TRUST
tristate "Trust FM radio card"
depends on ISA || COMPILE_TEST
- depends on VIDEO_V4L2
select RADIO_ISA
help
This is a driver for the Trust FM radio cards. Say Y if you have
@@ -449,7 +439,6 @@ config RADIO_TRUST_PORT
config RADIO_TYPHOON
tristate "Typhoon Radio (a.k.a. EcoRadio)"
depends on ISA || COMPILE_TEST
- depends on VIDEO_V4L2
select RADIO_ISA
help
Choose Y here if you have one of these FM radio cards, and then fill
@@ -462,13 +451,6 @@ config RADIO_TYPHOON
To compile this driver as a module, choose M here: the
module will be called radio-typhoon.
-config RADIO_TYPHOON_PORT
- hex "Typhoon I/O port (0x316 or 0x336)"
- depends on RADIO_TYPHOON=y
- default "316"
- help
- Enter the I/O port of your Typhoon or EcoRadio radio card.
-
config RADIO_TYPHOON_MUTEFREQ
int "Typhoon frequency set when muting the device (kHz)"
depends on RADIO_TYPHOON=y
@@ -481,10 +463,16 @@ config RADIO_TYPHOON_MUTEFREQ
the device is muted. There should be no local radio station at that
frequency.
+config RADIO_TYPHOON_PORT
+ hex "Typhoon I/O port (0x316 or 0x336)"
+ depends on RADIO_TYPHOON=y
+ default "316"
+ help
+ Enter the I/O port of your Typhoon or EcoRadio radio card.
+
config RADIO_ZOLTRIX
tristate "Zoltrix Radio"
depends on ISA || COMPILE_TEST
- depends on VIDEO_V4L2
select RADIO_ISA
help
Choose Y here if you have one of these FM radio cards, and then fill
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index 53c7ae135460..cfb6af7d3bc3 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -3,36 +3,39 @@
# Makefile for the kernel character device drivers.
#
-obj-$(CONFIG_RADIO_ISA) += radio-isa.o
+shark2-objs := radio-shark2.o radio-tea5777.o
+
+# Please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
obj-$(CONFIG_RADIO_AZTECH) += radio-aztech.o
+obj-$(CONFIG_RADIO_CADET) += radio-cadet.o
+obj-$(CONFIG_RADIO_GEMTEK) += radio-gemtek.o
+obj-$(CONFIG_RADIO_ISA) += radio-isa.o
+obj-$(CONFIG_RADIO_MAXIRADIO) += radio-maxiradio.o
+obj-$(CONFIG_RADIO_MIROPCM20) += radio-miropcm20.o
obj-$(CONFIG_RADIO_RTRACK2) += radio-rtrack2.o
+obj-$(CONFIG_RADIO_RTRACK) += radio-aimslab.o
+obj-$(CONFIG_RADIO_SAA7706H) += saa7706h.o
obj-$(CONFIG_RADIO_SF16FMI) += radio-sf16fmi.o
obj-$(CONFIG_RADIO_SF16FMR2) += radio-sf16fmr2.o
-obj-$(CONFIG_RADIO_CADET) += radio-cadet.o
-obj-$(CONFIG_RADIO_TYPHOON) += radio-typhoon.o
-obj-$(CONFIG_RADIO_TERRATEC) += radio-terratec.o
-obj-$(CONFIG_RADIO_MAXIRADIO) += radio-maxiradio.o
-obj-$(CONFIG_RADIO_SHARK) += radio-shark.o
obj-$(CONFIG_RADIO_SHARK2) += shark2.o
-obj-$(CONFIG_RADIO_RTRACK) += radio-aimslab.o
-obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o
-obj-$(CONFIG_RADIO_GEMTEK) += radio-gemtek.o
-obj-$(CONFIG_RADIO_TRUST) += radio-trust.o
-obj-$(CONFIG_RADIO_SI476X) += radio-si476x.o
-obj-$(CONFIG_RADIO_MIROPCM20) += radio-miropcm20.o
-obj-$(CONFIG_USB_DSBR) += dsbr100.o
+obj-$(CONFIG_RADIO_SHARK) += radio-shark.o
obj-$(CONFIG_RADIO_SI470X) += si470x/
obj-$(CONFIG_RADIO_SI4713) += si4713/
-obj-$(CONFIG_USB_MR800) += radio-mr800.o
-obj-$(CONFIG_USB_KEENE) += radio-keene.o
-obj-$(CONFIG_USB_MA901) += radio-ma901.o
+obj-$(CONFIG_RADIO_SI476X) += radio-si476x.o
+obj-$(CONFIG_RADIO_TEA575X) += tea575x.o
obj-$(CONFIG_RADIO_TEA5764) += radio-tea5764.o
-obj-$(CONFIG_RADIO_SAA7706H) += saa7706h.o
obj-$(CONFIG_RADIO_TEF6862) += tef6862.o
+obj-$(CONFIG_RADIO_TERRATEC) += radio-terratec.o
obj-$(CONFIG_RADIO_TIMBERDALE) += radio-timb.o
+obj-$(CONFIG_RADIO_TRUST) += radio-trust.o
+obj-$(CONFIG_RADIO_TYPHOON) += radio-typhoon.o
obj-$(CONFIG_RADIO_WL1273) += radio-wl1273.o
obj-$(CONFIG_RADIO_WL128X) += wl128x/
-obj-$(CONFIG_RADIO_TEA575X) += tea575x.o
-obj-$(CONFIG_USB_RAREMONO) += radio-raremono.o
+obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o
-shark2-objs := radio-shark2.o radio-tea5777.o
+obj-$(CONFIG_USB_DSBR) += dsbr100.o
+obj-$(CONFIG_USB_KEENE) += radio-keene.o
+obj-$(CONFIG_USB_MA901) += radio-ma901.o
+obj-$(CONFIG_USB_MR800) += radio-mr800.o
+obj-$(CONFIG_USB_RAREMONO) += radio-raremono.o
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index 54a40d60e4fd..1fb88c2b916c 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -275,7 +275,7 @@ static int __init fmi_init(void)
struct v4l2_device *v4l2_dev = &fmi->v4l2_dev;
struct v4l2_ctrl_handler *hdl = &fmi->hdl;
int res, i;
- int probe_ports[] = { 0, 0x284, 0x384 };
+ static const int probe_ports[] = { 0, 0x284, 0x384 };
if (io < 0) {
for (i = 0; i < ARRAY_SIZE(probe_ports); i++) {
diff --git a/drivers/media/radio/si470x/Kconfig b/drivers/media/radio/si470x/Kconfig
index 7161bd6cd13c..9f7d35b04a13 100644
--- a/drivers/media/radio/si470x/Kconfig
+++ b/drivers/media/radio/si470x/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config RADIO_SI470X
tristate "Silicon Labs Si470x FM Radio Receiver support"
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
help
This is a driver for devices with the Silicon Labs SI470x
chip (either via USB or I2C buses).
diff --git a/drivers/media/radio/wl128x/Kconfig b/drivers/media/radio/wl128x/Kconfig
index d5ae3388d3db..3e7713872e3f 100644
--- a/drivers/media/radio/wl128x/Kconfig
+++ b/drivers/media/radio/wl128x/Kconfig
@@ -4,7 +4,7 @@
#
config RADIO_WL128X
tristate "Texas Instruments WL128x FM Radio"
- depends on VIDEO_V4L2 && RFKILL && TTY && TI_ST
+ depends on VIDEO_DEV && RFKILL && TTY && TI_ST
depends on GPIOLIB || COMPILE_TEST
help
Choose Y here if you have this FM radio chip.
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 6142484d5cb4..8a316de70e6c 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -23,6 +23,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/nospec.h>
+#include <linux/jiffies.h>
#include "fmdrv.h"
#include "fmdrv_v4l2.h"
@@ -342,7 +343,7 @@ static void send_tasklet(struct tasklet_struct *t)
return;
/* Check, is there any timeout happened to last transmitted packet */
- if ((jiffies - fmdev->last_tx_jiffies) > FM_DRV_TX_TIMEOUT) {
+ if (time_is_before_jiffies(fmdev->last_tx_jiffies + FM_DRV_TX_TIMEOUT)) {
fmerr("TX timeout occurred\n");
atomic_set(&fmdev->tx_cnt, 1);
}
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index c111af820ae4..f560fc38895f 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -15,15 +15,6 @@ menuconfig RC_CORE
Say Y when you have a TV or an IR device.
if RC_CORE
-source "drivers/media/rc/keymaps/Kconfig"
-
-config LIRC
- bool "LIRC user interface"
- help
- Enable this option to enable the Linux Infrared Remote
- Control user interface (e.g. /dev/lirc*). This interface
- passes raw IR to and from userspace, which is needed for
- IR transmitting (aka "blasting") and for the lirc daemon.
config BPF_LIRC_MODE2
bool "Support for eBPF programs attached to lirc devices"
@@ -38,10 +29,45 @@ config BPF_LIRC_MODE2
These eBPF programs can be used to decode IR into scancodes, for
IR protocols not supported by the kernel decoders.
+config LIRC
+ bool "LIRC user interface"
+ help
+ Enable this option to enable the Linux Infrared Remote
+ Control user interface (e.g. /dev/lirc*). This interface
+ passes raw IR to and from userspace, which is needed for
+ IR transmitting (aka "blasting") and for the lirc daemon.
+
+source "drivers/media/rc/keymaps/Kconfig"
+
menuconfig RC_DECODERS
bool "Remote controller decoders"
if RC_DECODERS
+
+config IR_IMON_DECODER
+ tristate "Enable IR raw decoder for the iMON protocol"
+ help
+ Enable this option if you have iMON PAD or Antec Veris infrared
+ remote control and you would like to use it with a raw IR
+ receiver, or if you wish to use an encoder to transmit this IR.
+
+config IR_JVC_DECODER
+ tristate "Enable IR raw decoder for the JVC protocol"
+ select BITREVERSE
+
+ help
+ Enable this option if you have an infrared remote control which
+ uses the JVC protocol, and you need software decoding support.
+
+config IR_MCE_KBD_DECODER
+ tristate "Enable IR raw decoder for the MCE keyboard/mouse protocol"
+ select BITREVERSE
+
+ help
+ Enable this option if you have a Microsoft Remote Keyboard for
+ Windows Media Center Edition, which you would like to use with
+ a raw IR receiver in your system.
+
config IR_NEC_DECODER
tristate "Enable IR raw decoder for the NEC protocol"
select BITREVERSE
@@ -66,21 +92,17 @@ config IR_RC6_DECODER
Enable this option if you have an infrared remote control which
uses the RC6 protocol, and you need software decoding support.
-config IR_JVC_DECODER
- tristate "Enable IR raw decoder for the JVC protocol"
- select BITREVERSE
-
+config IR_RCMM_DECODER
+ tristate "Enable IR raw decoder for the RC-MM protocol"
help
- Enable this option if you have an infrared remote control which
- uses the JVC protocol, and you need software decoding support.
-
-config IR_SONY_DECODER
- tristate "Enable IR raw decoder for the Sony protocol"
- select BITREVERSE
+ Enable this option when you have IR with RC-MM protocol, and
+ you need the software decoder. The driver supports 12,
+ 24 and 32 bits RC-MM variants. You can enable or disable the
+ different modes using the following RC protocol keywords:
+ 'rc-mm-12', 'rc-mm-24' and 'rc-mm-32'.
- help
- Enable this option if you have an infrared remote control which
- uses the Sony protocol, and you need software decoding support.
+ To compile this driver as a module, choose M here: the module
+ will be called ir-rcmm-decoder.
config IR_SANYO_DECODER
tristate "Enable IR raw decoder for the Sanyo protocol"
@@ -100,14 +122,13 @@ config IR_SHARP_DECODER
uses the Sharp protocol (Sharp, Denon), and you need software
decoding support.
-config IR_MCE_KBD_DECODER
- tristate "Enable IR raw decoder for the MCE keyboard/mouse protocol"
+config IR_SONY_DECODER
+ tristate "Enable IR raw decoder for the Sony protocol"
select BITREVERSE
help
- Enable this option if you have a Microsoft Remote Keyboard for
- Windows Media Center Edition, which you would like to use with
- a raw IR receiver in your system.
+ Enable this option if you have an infrared remote control which
+ uses the Sony protocol, and you need software decoding support.
config IR_XMP_DECODER
tristate "Enable IR raw decoder for the XMP protocol"
@@ -117,25 +138,6 @@ config IR_XMP_DECODER
Enable this option if you have IR with XMP protocol, and
if the IR is decoded in software
-config IR_IMON_DECODER
- tristate "Enable IR raw decoder for the iMON protocol"
- help
- Enable this option if you have iMON PAD or Antec Veris infrared
- remote control and you would like to use it with a raw IR
- receiver, or if you wish to use an encoder to transmit this IR.
-
-config IR_RCMM_DECODER
- tristate "Enable IR raw decoder for the RC-MM protocol"
- help
- Enable this option when you have IR with RC-MM protocol, and
- you need the software decoder. The driver supports 12,
- 24 and 32 bits RC-MM variants. You can enable or disable the
- different modes using the following RC protocol keywords:
- 'rc-mm-12', 'rc-mm-24' and 'rc-mm-32'.
-
- To compile this driver as a module, choose M here: the module
- will be called ir-rcmm-decoder.
-
endif #RC_DECODERS
menuconfig RC_DEVICES
@@ -143,23 +145,6 @@ menuconfig RC_DEVICES
if RC_DEVICES
-config RC_ATI_REMOTE
- tristate "ATI / X10 based USB RF remote controls"
- depends on USB
- help
- Say Y here if you want to use an X10 based USB remote control.
- These are RF remotes with USB receivers.
-
- Such devices include the ATI remote that comes with many of ATI's
- All-In-Wonder video cards, the X10 "Lola" remote, NVIDIA RF remote,
- Medion RF remote, and SnapStream FireFly remote.
-
- This driver provides mouse pointer, left and right mouse buttons,
- and maps all the other remote buttons to keypress events.
-
- To compile this driver as a module, choose M here: the module will be
- called ati_remote.
-
config IR_ENE
tristate "ENE eHome Receiver/Transceiver (pnp id: ENE0100/ENE02xxx)"
depends on PNP || COMPILE_TEST
@@ -173,6 +158,37 @@ config IR_ENE
To compile this driver as a module, choose M here: the
module will be called ene_ir.
+config IR_FINTEK
+ tristate "Fintek Consumer Infrared Transceiver"
+ depends on PNP || COMPILE_TEST
+ help
+ Say Y here to enable support for integrated infrared receiver
+ /transceiver made by Fintek. This chip is found on assorted
+ Jetway motherboards (and of course, possibly others).
+
+ To compile this driver as a module, choose M here: the
+ module will be called fintek-cir.
+
+config IR_GPIO_CIR
+ tristate "GPIO IR remote control"
+ depends on (OF && GPIOLIB) || COMPILE_TEST
+ help
+ Say Y if you want to use GPIO based IR Receiver.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gpio-ir-recv.
+
+config IR_GPIO_TX
+ tristate "GPIO IR Bit Banging Transmitter"
+ depends on LIRC
+ depends on (OF && GPIOLIB) || COMPILE_TEST
+ help
+ Say Y if you want to a GPIO based IR transmitter. This is a
+ bit banging driver.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gpio-ir-tx.
+
config IR_HIX5HD2
tristate "Hisilicon hix5hd2 IR remote control"
depends on (OF && HAS_IOMEM) || COMPILE_TEST
@@ -183,6 +199,33 @@ config IR_HIX5HD2
If you're not sure, select N here
+config IR_IGORPLUGUSB
+ tristate "IgorPlug-USB IR Receiver"
+ depends on USB
+ help
+ Say Y here if you want to use the IgorPlug-USB IR Receiver by
+ Igor Cesko. This device is included on the Fit-PC2.
+
+ Note that this device can only record bursts of 36 IR pulses and
+ spaces, which is not enough for the NEC, Sanyo and RC-6 protocol.
+
+ To compile this driver as a module, choose M here: the module will
+ be called igorplugusb.
+
+config IR_IGUANA
+ tristate "IguanaWorks USB IR Transceiver"
+ depends on USB
+ help
+ Say Y here if you want to use the IguanaWorks USB IR Transceiver.
+ Both infrared receive and send are supported. If you want to
+ change the ID or the pin config, use the user space driver from
+ IguanaWorks.
+
+ Only firmware 0x0205 and later is supported.
+
+ To compile this driver as a module, choose M here: the module will
+ be called iguanair.
+
config IR_IMON
tristate "SoundGraph iMON Receiver and Display"
depends on USB
@@ -203,16 +246,6 @@ config IR_IMON_RAW
To compile this driver as a module, choose M here: the
module will be called imon_raw.
-config IR_MCEUSB
- tristate "Windows Media Center Ed. eHome Infrared Transceiver"
- depends on USB
- help
- Say Y here if you want to use a Windows Media Center Edition
- eHome Infrared Transceiver.
-
- To compile this driver as a module, choose M here: the
- module will be called mceusb.
-
config IR_ITE_CIR
tristate "ITE Tech Inc. IT8712/IT8512 Consumer Infrared Transceiver"
depends on PNP || COMPILE_TEST
@@ -225,16 +258,15 @@ config IR_ITE_CIR
To compile this driver as a module, choose M here: the
module will be called ite-cir.
-config IR_FINTEK
- tristate "Fintek Consumer Infrared Transceiver"
- depends on PNP || COMPILE_TEST
+config IR_MCEUSB
+ tristate "Windows Media Center Ed. eHome Infrared Transceiver"
+ depends on USB
help
- Say Y here to enable support for integrated infrared receiver
- /transceiver made by Fintek. This chip is found on assorted
- Jetway motherboards (and of course, possibly others).
+ Say Y here if you want to use a Windows Media Center Edition
+ eHome Infrared Transceiver.
To compile this driver as a module, choose M here: the
- module will be called fintek-cir.
+ module will be called mceusb.
config IR_MESON
tristate "Amlogic Meson IR remote receiver"
@@ -278,6 +310,18 @@ config IR_NUVOTON
To compile this driver as a module, choose M here: the
module will be called nuvoton-cir.
+config IR_PWM_TX
+ tristate "PWM IR transmitter"
+ depends on LIRC
+ depends on PWM
+ depends on OF || COMPILE_TEST
+ help
+ Say Y if you want to use a PWM based IR transmitter. This is
+ more power efficient than the bit banging gpio driver.
+
+ To compile this driver as a module, choose M here: the module will
+ be called pwm-ir-tx.
+
config IR_REDRAT3
tristate "RedRat3 IR Transceiver"
depends on USB
@@ -289,6 +333,31 @@ config IR_REDRAT3
To compile this driver as a module, choose M here: the
module will be called redrat3.
+config IR_RX51
+ tristate "Nokia N900 IR transmitter diode"
+ depends on (OMAP_DM_TIMER && PWM_OMAP_DMTIMER && ARCH_OMAP2PLUS || COMPILE_TEST) && RC_CORE
+ help
+ Say Y or M here if you want to enable support for the IR
+ transmitter diode built in the Nokia N900 (RX51) device.
+
+ The driver uses omap DM timers for generating the carrier
+ wave and pulses.
+
+config IR_SERIAL
+ tristate "Homebrew Serial Port Receiver"
+ help
+ Say Y if you want to use Homebrew Serial Port Receivers and
+ Transceivers.
+
+ To compile this driver as a module, choose M here: the module will
+ be called serial-ir.
+
+config IR_SERIAL_TRANSMITTER
+ bool "Serial Port Transmitter"
+ depends on IR_SERIAL
+ help
+ Serial Port Transmitter support
+
config IR_SPI
tristate "SPI connected IR LED"
depends on SPI && LIRC
@@ -309,47 +378,24 @@ config IR_STREAMZAP
To compile this driver as a module, choose M here: the
module will be called streamzap.
-config IR_WINBOND_CIR
- tristate "Winbond IR remote control"
- depends on (X86 && PNP) || COMPILE_TEST
- select NEW_LEDS
- select LEDS_CLASS
- select BITREVERSE
- help
- Say Y here if you want to use the IR remote functionality found
- in some Winbond SuperI/O chips. Currently only the WPCD376I
- chip is supported (included in some Intel Media series
- motherboards).
-
- To compile this driver as a module, choose M here: the module will
- be called winbond_cir.
-
-config IR_IGORPLUGUSB
- tristate "IgorPlug-USB IR Receiver"
- depends on USB
+config IR_SUNXI
+ tristate "SUNXI IR remote control"
+ depends on ARCH_SUNXI || COMPILE_TEST
help
- Say Y here if you want to use the IgorPlug-USB IR Receiver by
- Igor Cesko. This device is included on the Fit-PC2.
-
- Note that this device can only record bursts of 36 IR pulses and
- spaces, which is not enough for the NEC, Sanyo and RC-6 protocol.
+ Say Y if you want to use sunXi internal IR Controller
To compile this driver as a module, choose M here: the module will
- be called igorplugusb.
+ be called sunxi-ir.
-config IR_IGUANA
- tristate "IguanaWorks USB IR Transceiver"
+config IR_TOY
+ tristate "Infrared Toy and IR Droid"
depends on USB
help
- Say Y here if you want to use the IguanaWorks USB IR Transceiver.
- Both infrared receive and send are supported. If you want to
- change the ID or the pin config, use the user space driver from
- IguanaWorks.
-
- Only firmware 0x0205 and later is supported.
+ Say Y here if you want to use the Infrared Toy or IR Droid, USB
+ versions.
- To compile this driver as a module, choose M here: the module will
- be called iguanair.
+ To compile this driver as a module, choose M here: the module will be
+ called ir_toy.
config IR_TTUSBIR
tristate "TechnoTrend USB IR Receiver"
@@ -363,17 +409,37 @@ config IR_TTUSBIR
To compile this driver as a module, choose M here: the module will
be called ttusbir.
-config IR_RX51
- tristate "Nokia N900 IR transmitter diode"
- depends on (OMAP_DM_TIMER && PWM_OMAP_DMTIMER && ARCH_OMAP2PLUS || COMPILE_TEST) && RC_CORE
+config IR_WINBOND_CIR
+ tristate "Winbond IR remote control"
+ depends on (X86 && PNP) || COMPILE_TEST
+ select NEW_LEDS
+ select LEDS_CLASS
+ select BITREVERSE
help
- Say Y or M here if you want to enable support for the IR
- transmitter diode built in the Nokia N900 (RX51) device.
+ Say Y here if you want to use the IR remote functionality found
+ in some Winbond SuperI/O chips. Currently only the WPCD376I
+ chip is supported (included in some Intel Media series
+ motherboards).
- The driver uses omap DM timers for generating the carrier
- wave and pulses.
+ To compile this driver as a module, choose M here: the module will
+ be called winbond_cir.
-source "drivers/media/rc/img-ir/Kconfig"
+config RC_ATI_REMOTE
+ tristate "ATI / X10 based USB RF remote controls"
+ depends on USB
+ help
+ Say Y here if you want to use an X10 based USB remote control.
+ These are RF remotes with USB receivers.
+
+ Such devices include the ATI remote that comes with many of ATI's
+ All-In-Wonder video cards, the X10 "Lola" remote, NVIDIA RF remote,
+ Medion RF remote, and SnapStream FireFly remote.
+
+ This driver provides mouse pointer, left and right mouse buttons,
+ and maps all the other remote buttons to keypress events.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ati_remote.
config RC_LOOPBACK
tristate "Remote Control Loopback Driver"
@@ -387,38 +453,6 @@ config RC_LOOPBACK
To compile this driver as a module, choose M here: the module will
be called rc_loopback.
-config IR_GPIO_CIR
- tristate "GPIO IR remote control"
- depends on (OF && GPIOLIB) || COMPILE_TEST
- help
- Say Y if you want to use GPIO based IR Receiver.
-
- To compile this driver as a module, choose M here: the module will
- be called gpio-ir-recv.
-
-config IR_GPIO_TX
- tristate "GPIO IR Bit Banging Transmitter"
- depends on LIRC
- depends on (OF && GPIOLIB) || COMPILE_TEST
- help
- Say Y if you want to a GPIO based IR transmitter. This is a
- bit banging driver.
-
- To compile this driver as a module, choose M here: the module will
- be called gpio-ir-tx.
-
-config IR_PWM_TX
- tristate "PWM IR transmitter"
- depends on LIRC
- depends on PWM
- depends on OF || COMPILE_TEST
- help
- Say Y if you want to use a PWM based IR transmitter. This is
- more power efficient than the bit banging gpio driver.
-
- To compile this driver as a module, choose M here: the module will
- be called pwm-ir-tx.
-
config RC_ST
tristate "ST remote control receiver"
depends on ARCH_STI || COMPILE_TEST
@@ -429,30 +463,6 @@ config RC_ST
If you're not sure, select N here.
-config IR_SUNXI
- tristate "SUNXI IR remote control"
- depends on ARCH_SUNXI || COMPILE_TEST
- help
- Say Y if you want to use sunXi internal IR Controller
-
- To compile this driver as a module, choose M here: the module will
- be called sunxi-ir.
-
-config IR_SERIAL
- tristate "Homebrew Serial Port Receiver"
- help
- Say Y if you want to use Homebrew Serial Port Receivers and
- Transceivers.
-
- To compile this driver as a module, choose M here: the module will
- be called serial-ir.
-
-config IR_SERIAL_TRANSMITTER
- bool "Serial Port Transmitter"
- depends on IR_SERIAL
- help
- Serial Port Transmitter support
-
config RC_XBOX_DVD
tristate "Xbox DVD Movie Playback Kit"
depends on USB
@@ -463,15 +473,7 @@ config RC_XBOX_DVD
To compile this driver as a module, choose M here: the module will be
called xbox_remote.
-config IR_TOY
- tristate "Infrared Toy and IR Droid"
- depends on USB
- help
- Say Y here if you want to use the Infrared Toy or IR Droid, USB
- versions.
-
- To compile this driver as a module, choose M here: the module will be
- called ir_toy.
+source "drivers/media/rc/img-ir/Kconfig"
endif #RC_DEVICES
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 378d62d21e06..a9285266e944 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -2,51 +2,56 @@
obj-y += keymaps/
-obj-$(CONFIG_RC_CORE) += rc-core.o
rc-core-y := rc-main.o rc-ir-raw.o
rc-core-$(CONFIG_LIRC) += lirc_dev.o
rc-core-$(CONFIG_MEDIA_CEC_RC) += keymaps/rc-cec.o
rc-core-$(CONFIG_BPF_LIRC_MODE2) += bpf-lirc.o
+
+obj-$(CONFIG_RC_CORE) += rc-core.o
+
+# IR decoders - please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
+obj-$(CONFIG_IR_IMON_DECODER) += ir-imon-decoder.o
+obj-$(CONFIG_IR_JVC_DECODER) += ir-jvc-decoder.o
+obj-$(CONFIG_IR_MCE_KBD_DECODER) += ir-mce_kbd-decoder.o
obj-$(CONFIG_IR_NEC_DECODER) += ir-nec-decoder.o
obj-$(CONFIG_IR_RC5_DECODER) += ir-rc5-decoder.o
obj-$(CONFIG_IR_RC6_DECODER) += ir-rc6-decoder.o
-obj-$(CONFIG_IR_JVC_DECODER) += ir-jvc-decoder.o
-obj-$(CONFIG_IR_SONY_DECODER) += ir-sony-decoder.o
+obj-$(CONFIG_IR_RCMM_DECODER) += ir-rcmm-decoder.o
obj-$(CONFIG_IR_SANYO_DECODER) += ir-sanyo-decoder.o
obj-$(CONFIG_IR_SHARP_DECODER) += ir-sharp-decoder.o
-obj-$(CONFIG_IR_MCE_KBD_DECODER) += ir-mce_kbd-decoder.o
+obj-$(CONFIG_IR_SONY_DECODER) += ir-sony-decoder.o
obj-$(CONFIG_IR_XMP_DECODER) += ir-xmp-decoder.o
-obj-$(CONFIG_IR_IMON_DECODER) += ir-imon-decoder.o
-obj-$(CONFIG_IR_RCMM_DECODER) += ir-rcmm-decoder.o
-# stand-alone IR receivers/transmitters
-obj-$(CONFIG_RC_ATI_REMOTE) += ati_remote.o
+# stand-alone IR receivers/transmitters - please keep it alphabetically
+# sorted by Kconfig name (e. g. LC_ALL=C sort Makefile)
+obj-$(CONFIG_IR_ENE) += ene_ir.o
+obj-$(CONFIG_IR_FINTEK) += fintek-cir.o
+obj-$(CONFIG_IR_GPIO_CIR) += gpio-ir-recv.o
+obj-$(CONFIG_IR_GPIO_TX) += gpio-ir-tx.o
obj-$(CONFIG_IR_HIX5HD2) += ir-hix5hd2.o
+obj-$(CONFIG_IR_IGORPLUGUSB) += igorplugusb.o
+obj-$(CONFIG_IR_IGUANA) += iguanair.o
+obj-$(CONFIG_IR_IMG) += img-ir/
obj-$(CONFIG_IR_IMON) += imon.o
obj-$(CONFIG_IR_IMON_RAW) += imon_raw.o
obj-$(CONFIG_IR_ITE_CIR) += ite-cir.o
obj-$(CONFIG_IR_MCEUSB) += mceusb.o
-obj-$(CONFIG_IR_FINTEK) += fintek-cir.o
obj-$(CONFIG_IR_MESON) += meson-ir.o
obj-$(CONFIG_IR_MESON_TX) += meson-ir-tx.o
+obj-$(CONFIG_IR_MTK) += mtk-cir.o
obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o
-obj-$(CONFIG_IR_ENE) += ene_ir.o
+obj-$(CONFIG_IR_PWM_TX) += pwm-ir-tx.o
obj-$(CONFIG_IR_REDRAT3) += redrat3.o
obj-$(CONFIG_IR_RX51) += ir-rx51.o
+obj-$(CONFIG_IR_SERIAL) += serial_ir.o
obj-$(CONFIG_IR_SPI) += ir-spi.o
obj-$(CONFIG_IR_STREAMZAP) += streamzap.o
+obj-$(CONFIG_IR_SUNXI) += sunxi-cir.o
+obj-$(CONFIG_IR_TOY) += ir_toy.o
+obj-$(CONFIG_IR_TTUSBIR) += ttusbir.o
obj-$(CONFIG_IR_WINBOND_CIR) += winbond-cir.o
+obj-$(CONFIG_RC_ATI_REMOTE) += ati_remote.o
obj-$(CONFIG_RC_LOOPBACK) += rc-loopback.o
-obj-$(CONFIG_IR_GPIO_CIR) += gpio-ir-recv.o
-obj-$(CONFIG_IR_GPIO_TX) += gpio-ir-tx.o
-obj-$(CONFIG_IR_PWM_TX) += pwm-ir-tx.o
-obj-$(CONFIG_IR_IGORPLUGUSB) += igorplugusb.o
-obj-$(CONFIG_IR_IGUANA) += iguanair.o
-obj-$(CONFIG_IR_TTUSBIR) += ttusbir.o
obj-$(CONFIG_RC_ST) += st_rc.o
-obj-$(CONFIG_IR_SUNXI) += sunxi-cir.o
-obj-$(CONFIG_IR_IMG) += img-ir/
-obj-$(CONFIG_IR_SERIAL) += serial_ir.o
-obj-$(CONFIG_IR_MTK) += mtk-cir.o
obj-$(CONFIG_RC_XBOX_DVD) += xbox_remote.o
-obj-$(CONFIG_IR_TOY) += ir_toy.o
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index b0d580566e4e..3fb0968efd57 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -287,7 +287,7 @@ static void fintek_process_rx_ir_data(struct fintek_dev *fintek)
if (fintek->rem)
fintek->parser_state = PARSE_IRDATA;
else
- ir_raw_event_reset(fintek->rdev);
+ ir_raw_event_overflow(fintek->rdev);
break;
case SUBCMD:
fintek->rem = fintek_cmdsize(fintek->cmd, sample);
diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c
index c6cd2e6d8e65..a50701cfbbd7 100644
--- a/drivers/media/rc/gpio-ir-tx.c
+++ b/drivers/media/rc/gpio-ir-tx.c
@@ -48,11 +48,29 @@ static int gpio_ir_tx_set_carrier(struct rc_dev *dev, u32 carrier)
return 0;
}
+static void delay_until(ktime_t until)
+{
+ /*
+ * delta should never exceed 0.5 seconds (IR_MAX_DURATION) and on
+ * m68k ndelay(s64) does not compile; so use s32 rather than s64.
+ */
+ s32 delta;
+
+ while (true) {
+ delta = ktime_us_delta(until, ktime_get());
+ if (delta <= 0)
+ return;
+
+ /* udelay more than 1ms may not work */
+ delta = min(delta, 1000);
+ udelay(delta);
+ }
+}
+
static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf,
uint count)
{
ktime_t edge;
- s32 delta;
int i;
local_irq_disable();
@@ -63,9 +81,7 @@ static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf,
gpiod_set_value(gpio_ir->gpio, !(i % 2));
edge = ktime_add_us(edge, txbuf[i]);
- delta = ktime_us_delta(edge, ktime_get());
- if (delta > 0)
- udelay(delta);
+ delay_until(edge);
}
gpiod_set_value(gpio_ir->gpio, 0);
@@ -97,9 +113,7 @@ static void gpio_ir_tx_modulated(struct gpio_ir *gpio_ir, uint *txbuf,
if (i % 2) {
// space
edge = ktime_add_us(edge, txbuf[i]);
- delta = ktime_us_delta(edge, ktime_get());
- if (delta > 0)
- udelay(delta);
+ delay_until(edge);
} else {
// pulse
ktime_t last = ktime_add_us(edge, txbuf[i]);
diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c
index 3e9988ee785f..b40dbf500186 100644
--- a/drivers/media/rc/igorplugusb.c
+++ b/drivers/media/rc/igorplugusb.c
@@ -67,7 +67,7 @@ static void igorplugusb_irdata(struct igorplugusb *ir, unsigned len)
if (overflow > 0) {
dev_warn(ir->dev, "receive overflow, at least %u lost",
overflow);
- ir_raw_event_reset(ir->rc);
+ ir_raw_event_overflow(ir->rc);
}
do {
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index f8d080e41f4c..c9cb8277723f 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -109,7 +109,7 @@ static void process_ir_data(struct iguanair *ir, unsigned len)
break;
case CMD_RX_OVERFLOW:
dev_warn(ir->dev, "receive overflow\n");
- ir_raw_event_reset(ir->rc);
+ ir_raw_event_overflow(ir->rc);
break;
default:
dev_warn(ir->dev, "control code %02x received\n",
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
index e0be6471afe5..4ff954b11dc7 100644
--- a/drivers/media/rc/ir-hix5hd2.c
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -194,7 +194,7 @@ static irqreturn_t hix5hd2_ir_rx_interrupt(int irq, void *data)
* IR_INTS availably since logic would not clear
* fifo when overflow, drv do the job
*/
- ir_raw_event_reset(priv->rdev);
+ ir_raw_event_overflow(priv->rdev);
symb_num = readl_relaxed(priv->base + IR_DATAH);
for (i = 0; i < symb_num; i++)
readl_relaxed(priv->base + IR_DATAL);
diff --git a/drivers/media/rc/ir-imon-decoder.c b/drivers/media/rc/ir-imon-decoder.c
index 41dbbef27fa6..dc68f64e7b51 100644
--- a/drivers/media/rc/ir-imon-decoder.c
+++ b/drivers/media/rc/ir-imon-decoder.c
@@ -95,7 +95,7 @@ static int ir_imon_decode(struct rc_dev *dev, struct ir_raw_event ev)
struct imon_dec *data = &dev->raw->imon;
if (!is_timing_event(ev)) {
- if (ev.reset)
+ if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/rc/ir-jvc-decoder.c b/drivers/media/rc/ir-jvc-decoder.c
index 470f2e1fd507..8b10954d2b6b 100644
--- a/drivers/media/rc/ir-jvc-decoder.c
+++ b/drivers/media/rc/ir-jvc-decoder.c
@@ -40,7 +40,7 @@ static int ir_jvc_decode(struct rc_dev *dev, struct ir_raw_event ev)
struct jvc_dec *data = &dev->raw->jvc;
if (!is_timing_event(ev)) {
- if (ev.reset)
+ if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
index 1524dc0fc566..66e8feb9a569 100644
--- a/drivers/media/rc/ir-mce_kbd-decoder.c
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -221,7 +221,7 @@ static int ir_mce_kbd_decode(struct rc_dev *dev, struct ir_raw_event ev)
struct lirc_scancode lsc = {};
if (!is_timing_event(ev)) {
- if (ev.reset)
+ if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index b4c3e4baf34d..37b99432ad0d 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -44,7 +44,7 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
u8 address, not_address, command, not_command;
if (!is_timing_event(ev)) {
- if (ev.reset)
+ if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index d58b6226afeb..82d7f6ad2338 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -45,7 +45,7 @@ static int ir_rc5_decode(struct rc_dev *dev, struct ir_raw_event ev)
enum rc_proto protocol;
if (!is_timing_event(ev)) {
- if (ev.reset)
+ if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index 0657ad5eef48..3b2c8bab3e73 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -85,7 +85,7 @@ static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev)
enum rc_proto protocol;
if (!is_timing_event(ev)) {
- if (ev.reset)
+ if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/rc/ir-rcmm-decoder.c b/drivers/media/rc/ir-rcmm-decoder.c
index fd9ec69a3718..a8a34436fe85 100644
--- a/drivers/media/rc/ir-rcmm-decoder.c
+++ b/drivers/media/rc/ir-rcmm-decoder.c
@@ -69,7 +69,7 @@ static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev)
return 0;
if (!is_timing_event(ev)) {
- if (ev.reset)
+ if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/rc/ir-sanyo-decoder.c b/drivers/media/rc/ir-sanyo-decoder.c
index bfc181be1044..2bc98c342882 100644
--- a/drivers/media/rc/ir-sanyo-decoder.c
+++ b/drivers/media/rc/ir-sanyo-decoder.c
@@ -51,8 +51,8 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
u8 command, not_command;
if (!is_timing_event(ev)) {
- if (ev.reset) {
- dev_dbg(&dev->dev, "SANYO event reset received. reset to state 0\n");
+ if (ev.overflow) {
+ dev_dbg(&dev->dev, "SANYO event overflow received. reset to state 0\n");
data->state = STATE_INACTIVE;
}
return 0;
diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
index d09c38c07dbd..3d8488c39c56 100644
--- a/drivers/media/rc/ir-sharp-decoder.c
+++ b/drivers/media/rc/ir-sharp-decoder.c
@@ -41,7 +41,7 @@ static int ir_sharp_decode(struct rc_dev *dev, struct ir_raw_event ev)
u32 msg, echo, address, command, scancode;
if (!is_timing_event(ev)) {
- if (ev.reset)
+ if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/rc/ir-sony-decoder.c b/drivers/media/rc/ir-sony-decoder.c
index d760d52abaa2..bb25867ecb5e 100644
--- a/drivers/media/rc/ir-sony-decoder.c
+++ b/drivers/media/rc/ir-sony-decoder.c
@@ -39,7 +39,7 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev)
u8 device, subdevice, function;
if (!is_timing_event(ev)) {
- if (ev.reset)
+ if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/rc/ir-xmp-decoder.c b/drivers/media/rc/ir-xmp-decoder.c
index ff94f48bda32..dc36b68739cb 100644
--- a/drivers/media/rc/ir-xmp-decoder.c
+++ b/drivers/media/rc/ir-xmp-decoder.c
@@ -37,7 +37,7 @@ static int ir_xmp_decode(struct rc_dev *dev, struct ir_raw_event ev)
struct xmp_dec *data = &dev->raw->xmp;
if (!is_timing_event(ev)) {
- if (ev.reset)
+ if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
index 7e98e7e3aace..196806709259 100644
--- a/drivers/media/rc/ir_toy.c
+++ b/drivers/media/rc/ir_toy.c
@@ -458,7 +458,7 @@ static int irtoy_probe(struct usb_interface *intf,
err = usb_submit_urb(irtoy->urb_in, GFP_KERNEL);
if (err != 0) {
dev_err(irtoy->dev, "fail to submit in urb: %d\n", err);
- return err;
+ goto free_rcdev;
}
err = irtoy_setup(irtoy);
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 4f77d4ebacdc..fcfadd7ea31c 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -238,7 +238,7 @@ static irqreturn_t ite_cir_isr(int irq, void *data)
/* Check for RX overflow */
if (iflags & ITE_IRQ_RX_FIFO_OVERRUN) {
dev_warn(&dev->rdev->dev, "receive overflow\n");
- ir_raw_event_reset(dev->rdev);
+ ir_raw_event_overflow(dev->rdev);
}
/* check for the receive interrupt */
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index 5fe5c9e1a46d..f513ff5caf4e 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -1,5 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
+
+# Please keep keymaps alphabetically sorted by directory name
+#(e. g. LC_ALL=C sort Makefile)
+obj-$(CONFIG_RC_MAP) += \
+ rc-adstech-dvb-t-pci.o \
rc-alink-dtu-m.o \
rc-anysee.o \
rc-apac-viewcomp.o \
@@ -9,17 +13,17 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-ati-tv-wonder-hd-600.o \
rc-ati-x10.o \
rc-avermedia-a16d.o \
- rc-avermedia.o \
rc-avermedia-cardbus.o \
rc-avermedia-dvbt.o \
rc-avermedia-m135a.o \
rc-avermedia-m733a-rm-k6.o \
+ rc-avermedia.o \
rc-avermedia-rm-ks.o \
rc-avertv-303.o \
rc-azurewave-ad-tu700.o \
rc-beelink-gs1.o \
- rc-behold.o \
rc-behold-columbus.o \
+ rc-behold.o \
rc-budget-ci-old.o \
rc-cinergy-1400.o \
rc-cinergy.o \
@@ -39,8 +43,8 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-dvico-portable.o \
rc-em-terratec.o \
rc-encore-enltv2.o \
- rc-encore-enltv.o \
rc-encore-enltv-fm53.o \
+ rc-encore-enltv.o \
rc-evga-indtube.o \
rc-eztv.o \
rc-flydvb.o \
@@ -50,6 +54,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-geekbox.o \
rc-genius-tvgo-a11mce.o \
rc-gotview7135.o \
+ rc-hauppauge.o \
rc-hisi-poplar.o \
rc-hisi-tv-demo.o \
rc-imon-mce.o \
@@ -67,14 +72,14 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-leadtek-y04g0051.o \
rc-lme2510.o \
rc-manli.o \
- rc-mecool-kii-pro.o \
rc-mecool-kiii-pro.o \
- rc-medion-x10.o \
+ rc-mecool-kii-pro.o \
rc-medion-x10-digitainer.o \
+ rc-medion-x10.o \
rc-medion-x10-or2x.o \
rc-minix-neo.o \
- rc-msi-digivox-ii.o \
rc-msi-digivox-iii.o \
+ rc-msi-digivox-ii.o \
rc-msi-tvanywhere.o \
rc-msi-tvanywhere-plus.o \
rc-nebula.o \
@@ -87,20 +92,20 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-pinnacle-color.o \
rc-pinnacle-grey.o \
rc-pinnacle-pctv-hd.o \
- rc-pixelview.o \
- rc-pixelview-mk12.o \
rc-pixelview-002t.o \
+ rc-pixelview-mk12.o \
rc-pixelview-new.o \
+ rc-pixelview.o \
rc-powercolor-real-angel.o \
rc-proteus-2309.o \
rc-purpletv.o \
rc-pv951.o \
- rc-hauppauge.o \
rc-rc6-mce.o \
rc-real-audio-220-32-keys.o \
rc-reddo.o \
rc-snapstream-firefly.o \
rc-streamzap.o \
+ rc-su3000.o \
rc-tanix-tx3mini.o \
rc-tanix-tx5max.o \
rc-tbs-nec.o \
@@ -109,16 +114,16 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-terratec-cinergy-c-pci.o \
rc-terratec-cinergy-s2-hd.o \
rc-terratec-cinergy-xs.o \
- rc-terratec-slim.o \
rc-terratec-slim-2.o \
+ rc-terratec-slim.o \
rc-tevii-nec.o \
rc-tivo.o \
- rc-total-media-in-hand.o \
rc-total-media-in-hand-02.o \
+ rc-total-media-in-hand.o \
rc-trekstor.o \
rc-tt-1500.o \
- rc-twinhan-dtv-cab-ci.o \
rc-twinhan1027.o \
+ rc-twinhan-dtv-cab-ci.o \
rc-vega-s9x.o \
rc-videomate-m1f.o \
rc-videomate-s350.o \
@@ -128,8 +133,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-wetek-play2.o \
rc-winfast.o \
rc-winfast-usbii-deluxe.o \
- rc-su3000.o \
+ rc-x96max.o \
rc-xbox-360.o \
rc-xbox-dvd.o \
- rc-x96max.o \
rc-zx-irdec.o
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index c7c5157725f8..765375bda0c6 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -41,17 +41,16 @@ void lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev)
struct lirc_fh *fh;
int sample;
- /* Packet start */
- if (ev.reset) {
+ /* Receiver overflow, data missing */
+ if (ev.overflow) {
/*
- * Userspace expects a long space event before the start of
- * the signal to use as a sync. This may be done with repeat
- * packets and normal samples. But if a reset has been sent
- * then we assume that a long time has passed, so we send a
- * space with the maximum time value.
+ * Send lirc overflow message. This message is unknown to
+ * lircd, but it will interpret this as a long space as
+ * long as the value is set to high value. This resets its
+ * decoder state.
*/
- sample = LIRC_SPACE(LIRC_VALUE_MASK);
- dev_dbg(&dev->dev, "delivering reset sync space to lirc_dev\n");
+ sample = LIRC_OVERFLOW(LIRC_VALUE_MASK);
+ dev_dbg(&dev->dev, "delivering overflow to lirc_dev\n");
/* Carrier reports */
} else if (ev.carrier_report) {
@@ -60,32 +59,25 @@ void lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev)
/* Packet end */
} else if (ev.timeout) {
- if (dev->gap)
- return;
-
dev->gap_start = ktime_get();
- dev->gap = true;
- dev->gap_duration = ev.duration;
sample = LIRC_TIMEOUT(ev.duration);
dev_dbg(&dev->dev, "timeout report (duration: %d)\n", sample);
/* Normal sample */
} else {
- if (dev->gap) {
- dev->gap_duration += ktime_to_us(ktime_sub(ktime_get(),
- dev->gap_start));
+ if (dev->gap_start) {
+ u64 duration = ktime_us_delta(ktime_get(),
+ dev->gap_start);
/* Cap by LIRC_VALUE_MASK */
- dev->gap_duration = min_t(u64, dev->gap_duration,
- LIRC_VALUE_MASK);
+ duration = min_t(u64, duration, LIRC_VALUE_MASK);
spin_lock_irqsave(&dev->lirc_fh_lock, flags);
list_for_each_entry(fh, &dev->lirc_fh, list)
- kfifo_put(&fh->rawir,
- LIRC_SPACE(dev->gap_duration));
+ kfifo_put(&fh->rawir, LIRC_SPACE(duration));
spin_unlock_irqrestore(&dev->lirc_fh_lock, flags);
- dev->gap = false;
+ dev->gap_start = 0;
}
sample = ev.pulse ? LIRC_PULSE(ev.duration) :
diff --git a/drivers/media/rc/meson-ir-tx.c b/drivers/media/rc/meson-ir-tx.c
index c22cd26a5c07..63e1dbf0a4e9 100644
--- a/drivers/media/rc/meson-ir-tx.c
+++ b/drivers/media/rc/meson-ir-tx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* meson-ir-tx.c - Amlogic Meson IR TX driver
*
* Copyright (c) 2021, SberDevices. All Rights Reserved.
diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c
index 840e7aec5c21..27b7412d02a5 100644
--- a/drivers/media/rc/mtk-cir.c
+++ b/drivers/media/rc/mtk-cir.c
@@ -24,7 +24,8 @@
* Register to setting ok count whose unit based on hardware sampling period
* indicating IR receiving completion and then making IRQ fires
*/
-#define MTK_OK_COUNT(x) (((x) & GENMASK(23, 16)) << 16)
+#define MTK_OK_COUNT_MASK (GENMASK(22, 16))
+#define MTK_OK_COUNT(x) ((x) << 16)
/* Bit to enable IR hardware function */
#define MTK_IR_EN BIT(0)
@@ -202,25 +203,24 @@ static inline void mtk_irq_enable(struct mtk_ir *ir, u32 mask)
static irqreturn_t mtk_ir_irq(int irqno, void *dev_id)
{
+ struct ir_raw_event rawir = {};
struct mtk_ir *ir = dev_id;
- u8 wid = 0;
u32 i, j, val;
- struct ir_raw_event rawir = {};
+ u8 wid;
/*
- * Reset decoder state machine explicitly is required
- * because 1) the longest duration for space MTK IR hardware
- * could record is not safely long. e.g 12ms if rx resolution
- * is 46us by default. There is still the risk to satisfying
- * every decoder to reset themselves through long enough
- * trailing spaces and 2) the IRQ handler guarantees that
- * start of IR message is always contained in and starting
- * from register mtk_chkdata_reg(ir, i).
+ * Each pulse and space is encoded as a single byte, each byte
+ * alternating between pulse and space. If a pulse or space is longer
+ * than can be encoded in a single byte, it is encoded as the maximum
+ * value 0xff.
+ *
+ * If a space is longer than ok_count (about 23ms), the value is
+ * encoded as zero, and all following bytes are zero. Any IR that
+ * follows will be presented in the next interrupt.
+ *
+ * If there are more than 68 (=MTK_CHKDATA_SZ * 4) pulses and spaces,
+ * then the only the first 68 will be presented; the rest is lost.
*/
- ir_raw_event_reset(ir->rc);
-
- /* First message must be pulse */
- rawir.pulse = false;
/* Handle all pulse and space IR controller captures */
for (i = 0 ; i < MTK_CHKDATA_SZ ; i++) {
@@ -228,7 +228,8 @@ static irqreturn_t mtk_ir_irq(int irqno, void *dev_id)
dev_dbg(ir->dev, "@reg%d=0x%08x\n", i, val);
for (j = 0 ; j < 4 ; j++) {
- wid = (val & (MTK_WIDTH_MASK << j * 8)) >> j * 8;
+ wid = val & MTK_WIDTH_MASK;
+ val >>= 8;
rawir.pulse = !rawir.pulse;
rawir.duration = wid * (MTK_IR_SAMPLE + 1);
ir_raw_event_store_with_filter(ir->rc, &rawir);
@@ -268,7 +269,7 @@ static irqreturn_t mtk_ir_irq(int irqno, void *dev_id)
static const struct mtk_ir_data mt7623_data = {
.regs = mt7623_regs,
.fields = mt7623_fields,
- .ok_count = 0xf,
+ .ok_count = 3,
.hw_period = 0xff,
.div = 4,
};
@@ -276,7 +277,7 @@ static const struct mtk_ir_data mt7623_data = {
static const struct mtk_ir_data mt7622_data = {
.regs = mt7622_regs,
.fields = mt7622_fields,
- .ok_count = 0xf,
+ .ok_count = 3,
.hw_period = 0xffff,
.div = 32,
};
@@ -400,7 +401,7 @@ static int mtk_ir_probe(struct platform_device *pdev)
mtk_w32_mask(ir, MTK_DG_CNT(1), MTK_DG_CNT_MASK, MTK_IRTHD);
/* Enable IR and PWM */
- val = mtk_r32(ir, MTK_CONFIG_HIGH_REG);
+ val = mtk_r32(ir, MTK_CONFIG_HIGH_REG) & ~MTK_OK_COUNT_MASK;
val |= MTK_OK_COUNT(ir->data->ok_count) | MTK_PWM_EN | MTK_IR_EN;
mtk_w32(ir, val, MTK_CONFIG_HIGH_REG);
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 8a37f083fe3d..2214d41ef579 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -742,7 +742,7 @@ static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
nvt->pkts = 0;
nvt_clear_cir_fifo(nvt);
- ir_raw_event_reset(nvt->rdev);
+ ir_raw_event_overflow(nvt->rdev);
}
/* copy data from hardware rx fifo into driver buffer */
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 62f032dffd33..ef1e95e1af7f 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -190,7 +190,7 @@ static inline void decrease_duration(struct ir_raw_event *ev, unsigned duration)
/* Returns true if event is normal pulse/space event */
static inline bool is_timing_event(struct ir_raw_event ev)
{
- return !ev.carrier_report && !ev.reset;
+ return !ev.carrier_report && !ev.overflow;
}
#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index c65bba4ec473..16e33d7eaaa2 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -35,8 +35,6 @@ static int ir_raw_event_thread(void *data)
!is_transition(&ev, &raw->prev_ev))
dev_warn_once(&dev->dev, "two consecutive events of type %s",
TO_STR(ev.pulse));
- if (raw->prev_ev.reset && ev.pulse == 0)
- dev_warn_once(&dev->dev, "timing event after reset should be pulse");
}
list_for_each_entry(handler, &ir_raw_handler_list, list)
if (dev->enabled_protocols &
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index 6441879fcba1..b356041c5c00 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -112,7 +112,11 @@ static int loop_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
rawir.pulse = i % 2 ? false : true;
rawir.duration = txbuf[i];
- ir_raw_event_store_with_filter(dev, &rawir);
+ /* simulate overflow if ridiculously long pulse was sent */
+ if (rawir.pulse && rawir.duration > MS_TO_US(50))
+ ir_raw_event_overflow(dev);
+ else
+ ir_raw_event_store_with_filter(dev, &rawir);
}
if (lodev->carrierreport) {
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
index 4e419dbbacd3..19e987a048cc 100644
--- a/drivers/media/rc/st_rc.c
+++ b/drivers/media/rc/st_rc.c
@@ -111,7 +111,7 @@ static irqreturn_t st_rc_rx_interrupt(int irq, void *data)
int_status = readl(dev->rx_base + IRB_RX_INT_STATUS);
if (unlikely(int_status & IRB_RX_OVERRUN_INT)) {
/* discard the entire collection in case of errors! */
- ir_raw_event_reset(dev->rdev);
+ ir_raw_event_overflow(dev->rdev);
dev_info(dev->dev, "IR RX overrun\n");
writel(IRB_RX_OVERRUN_INT,
dev->rx_base + IRB_RX_INT_CLEAR);
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index 391a591c1b75..b631a81e58bb 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -126,7 +126,7 @@ static irqreturn_t sunxi_ir_irq(int irqno, void *dev_id)
}
if (status & REG_RXSTA_ROI) {
- ir_raw_event_reset(ir->rc);
+ ir_raw_event_overflow(ir->rc);
} else if (status & REG_RXSTA_RPE) {
ir_raw_event_set_idle(ir->rc, true);
ir_raw_event_handle(ir->rc);
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 94efb035d21b..25884a79985c 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -470,7 +470,7 @@ wbcir_irq_handler(int irqno, void *cookie)
/* RX overflow? (read clears bit) */
if (inb(data->sbase + WBCIR_REG_SP3_LSR) & WBCIR_RX_OVERRUN) {
data->rxstate = WBCIR_RXSTATE_ERROR;
- ir_raw_event_reset(data->dev);
+ ir_raw_event_overflow(data->dev);
}
/* TX underflow? */
diff --git a/drivers/media/spi/Kconfig b/drivers/media/spi/Kconfig
index 857ef4ace6e9..4656afae5bb4 100644
--- a/drivers/media/spi/Kconfig
+++ b/drivers/media/spi/Kconfig
@@ -1,25 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
-if VIDEO_V4L2
+if VIDEO_DEV && SPI
comment "SPI I2C drivers auto-selected by 'Autoselect ancillary drivers'"
depends on MEDIA_HIDE_ANCILLARY_SUBDRV && SPI
-menu "SPI helper chips"
- visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
-
-config VIDEO_GS1662
- tristate "Gennum Serializers video"
- depends on SPI && VIDEO_V4L2
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- help
- Enable the GS1662 driver which serializes video streams.
-
-endmenu
-
-endif
-
-if SPI
menu "Media SPI Adapters"
config CXD2880_SPI_DRV
@@ -29,6 +13,14 @@ config CXD2880_SPI_DRV
help
Choose if you would like to have SPI interface support for Sony CXD2880.
+config VIDEO_GS1662
+ tristate "Gennum Serializers video"
+ depends on SPI && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Enable the GS1662 driver which serializes video streams.
+
endmenu
endif
diff --git a/drivers/media/spi/Makefile b/drivers/media/spi/Makefile
index 9f45787d680d..6ac7adc64124 100644
--- a/drivers/media/spi/Makefile
+++ b/drivers/media/spi/Makefile
@@ -1,5 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_VIDEO_GS1662) += gs1662.o
-obj-$(CONFIG_CXD2880_SPI_DRV) += cxd2880-spi.o
ccflags-y += -I $(srctree)/drivers/media/dvb-frontends/cxd2880
+
+# Please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
+obj-$(CONFIG_CXD2880_SPI_DRV) += cxd2880-spi.o
+obj-$(CONFIG_VIDEO_GS1662) += gs1662.o
diff --git a/drivers/media/spi/cxd2880-spi.c b/drivers/media/spi/cxd2880-spi.c
index 6f2a66bc87fb..6be4e5528879 100644
--- a/drivers/media/spi/cxd2880-spi.c
+++ b/drivers/media/spi/cxd2880-spi.c
@@ -625,7 +625,7 @@ fail_regulator:
return ret;
}
-static int
+static void
cxd2880_spi_remove(struct spi_device *spi)
{
struct cxd2880_dvb_spi *dvb_spi = spi_get_drvdata(spi);
@@ -643,8 +643,6 @@ cxd2880_spi_remove(struct spi_device *spi)
kfree(dvb_spi);
pr_info("cxd2880_spi remove ok.\n");
-
- return 0;
}
static const struct spi_device_id cxd2880_spi_id[] = {
diff --git a/drivers/media/spi/gs1662.c b/drivers/media/spi/gs1662.c
index f86ef1ca1288..75c21a93e6d0 100644
--- a/drivers/media/spi/gs1662.c
+++ b/drivers/media/spi/gs1662.c
@@ -458,13 +458,11 @@ static int gs_probe(struct spi_device *spi)
return ret;
}
-static int gs_remove(struct spi_device *spi)
+static void gs_remove(struct spi_device *spi)
{
struct v4l2_subdev *sd = spi_get_drvdata(spi);
v4l2_device_unregister_subdev(sd);
-
- return 0;
}
static struct spi_driver gs_driver = {
diff --git a/drivers/media/test-drivers/Kconfig b/drivers/media/test-drivers/Kconfig
index e27d6602545d..51cf27834df0 100644
--- a/drivers/media/test-drivers/Kconfig
+++ b/drivers/media/test-drivers/Kconfig
@@ -6,13 +6,9 @@ menuconfig V4L_TEST_DRIVERS
if V4L_TEST_DRIVERS
-source "drivers/media/test-drivers/vimc/Kconfig"
-
-source "drivers/media/test-drivers/vivid/Kconfig"
-
config VIDEO_VIM2M
tristate "Virtual Memory-to-Memory Driver"
- depends on VIDEO_DEV && VIDEO_V4L2
+ depends on VIDEO_DEV
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
select MEDIA_CONTROLLER
@@ -22,6 +18,8 @@ config VIDEO_VIM2M
framework.
source "drivers/media/test-drivers/vicodec/Kconfig"
+source "drivers/media/test-drivers/vimc/Kconfig"
+source "drivers/media/test-drivers/vivid/Kconfig"
endif #V4L_TEST_DRIVERS
diff --git a/drivers/media/test-drivers/Makefile b/drivers/media/test-drivers/Makefile
index 9f0e4ebb2efe..ff390b687189 100644
--- a/drivers/media/test-drivers/Makefile
+++ b/drivers/media/test-drivers/Makefile
@@ -3,8 +3,12 @@
# Makefile for the test drivers.
#
-obj-$(CONFIG_VIDEO_VIMC) += vimc/
-obj-$(CONFIG_VIDEO_VIVID) += vivid/
-obj-$(CONFIG_VIDEO_VIM2M) += vim2m.o
-obj-$(CONFIG_VIDEO_VICODEC) += vicodec/
-obj-$(CONFIG_DVB_VIDTV) += vidtv/
+# Please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
+
+obj-$(CONFIG_DVB_VIDTV) += vidtv/
+
+obj-$(CONFIG_VIDEO_VICODEC) += vicodec/
+obj-$(CONFIG_VIDEO_VIM2M) += vim2m.o
+obj-$(CONFIG_VIDEO_VIMC) += vimc/
+obj-$(CONFIG_VIDEO_VIVID) += vivid/
diff --git a/drivers/media/test-drivers/vicodec/Kconfig b/drivers/media/test-drivers/vicodec/Kconfig
index d77c67810c73..a7a828eec2a4 100644
--- a/drivers/media/test-drivers/vicodec/Kconfig
+++ b/drivers/media/test-drivers/vicodec/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_VICODEC
tristate "Virtual Codec Driver"
- depends on VIDEO_DEV && VIDEO_V4L2
+ depends on VIDEO_DEV
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
select MEDIA_CONTROLLER
diff --git a/drivers/media/test-drivers/vidtv/Kconfig b/drivers/media/test-drivers/vidtv/Kconfig
index 22c4fd39461f..e511e51c0b5b 100644
--- a/drivers/media/test-drivers/vidtv/Kconfig
+++ b/drivers/media/test-drivers/vidtv/Kconfig
@@ -7,5 +7,4 @@ config DVB_VIDTV
validate the existing APIs in the media subsystem. It can also aid developers
working on userspace applications.
-
When in doubt, say N.
diff --git a/drivers/media/test-drivers/vidtv/vidtv_s302m.c b/drivers/media/test-drivers/vidtv/vidtv_s302m.c
index d79b65854627..9da18eac04b5 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_s302m.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_s302m.c
@@ -455,6 +455,9 @@ struct vidtv_encoder
e->name = kstrdup(args.name, GFP_KERNEL);
e->encoder_buf = vzalloc(VIDTV_S302M_BUF_SZ);
+ if (!e->encoder_buf)
+ goto out_kfree_e;
+
e->encoder_buf_sz = VIDTV_S302M_BUF_SZ;
e->encoder_buf_offset = 0;
@@ -467,10 +470,8 @@ struct vidtv_encoder
e->is_video_encoder = false;
ctx = kzalloc(priv_sz, GFP_KERNEL);
- if (!ctx) {
- kfree(e);
- return NULL;
- }
+ if (!ctx)
+ goto out_kfree_buf;
e->ctx = ctx;
ctx->last_duration = 0;
@@ -498,6 +499,14 @@ struct vidtv_encoder
e->next = NULL;
return e;
+
+out_kfree_buf:
+ vfree(e->encoder_buf);
+
+out_kfree_e:
+ kfree(e->name);
+ kfree(e);
+ return NULL;
}
void vidtv_s302m_encoder_destroy(struct vidtv_encoder *e)
diff --git a/drivers/media/test-drivers/vimc/Kconfig b/drivers/media/test-drivers/vimc/Kconfig
index da4b2ad6e40c..0d5169819cac 100644
--- a/drivers/media/test-drivers/vimc/Kconfig
+++ b/drivers/media/test-drivers/vimc/Kconfig
@@ -1,12 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_VIMC
tristate "Virtual Media Controller Driver (VIMC)"
- depends on VIDEO_DEV && VIDEO_V4L2
+ depends on VIDEO_DEV
select FONT_SUPPORT
select FONT_8x16
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_VMALLOC
+ select VIDEOBUF2_DMA_CONTIG
select VIDEO_V4L2_TPG
help
Skeleton driver for Virtual Media Controller
diff --git a/drivers/media/test-drivers/vimc/vimc-capture.c b/drivers/media/test-drivers/vimc/vimc-capture.c
index 5e9fd902cd37..d1e2d0739c00 100644
--- a/drivers/media/test-drivers/vimc/vimc-capture.c
+++ b/drivers/media/test-drivers/vimc/vimc-capture.c
@@ -7,6 +7,7 @@
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
#include <media/videobuf2-vmalloc.h>
#include "vimc-common.h"
@@ -423,14 +424,18 @@ static struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
/* Initialize the vb2 queue */
q = &vcap->queue;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_USERPTR;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ if (vimc_allocator == VIMC_ALLOCATOR_VMALLOC)
+ q->io_modes |= VB2_USERPTR;
q->drv_priv = vcap;
q->buf_struct_size = sizeof(struct vimc_cap_buffer);
q->ops = &vimc_cap_qops;
- q->mem_ops = &vb2_vmalloc_memops;
+ q->mem_ops = vimc_allocator == VIMC_ALLOCATOR_DMA_CONTIG
+ ? &vb2_dma_contig_memops : &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 2;
q->lock = &vcap->lock;
+ q->dev = v4l2_dev->dev;
ret = vb2_queue_init(q);
if (ret) {
diff --git a/drivers/media/test-drivers/vimc/vimc-common.h b/drivers/media/test-drivers/vimc/vimc-common.h
index a289434e75ba..ba1930772589 100644
--- a/drivers/media/test-drivers/vimc/vimc-common.h
+++ b/drivers/media/test-drivers/vimc/vimc-common.h
@@ -35,6 +35,13 @@
#define VIMC_PIX_FMT_MAX_CODES 8
+extern unsigned int vimc_allocator;
+
+enum vimc_allocator_type {
+ VIMC_ALLOCATOR_VMALLOC = 0,
+ VIMC_ALLOCATOR_DMA_CONTIG = 1,
+};
+
/**
* vimc_colorimetry_clamp - Adjust colorimetry parameters
*
diff --git a/drivers/media/test-drivers/vimc/vimc-core.c b/drivers/media/test-drivers/vimc/vimc-core.c
index 4b0ae6f51d76..06edf9d4d92c 100644
--- a/drivers/media/test-drivers/vimc/vimc-core.c
+++ b/drivers/media/test-drivers/vimc/vimc-core.c
@@ -5,6 +5,7 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
+#include <linux/dma-mapping.h>
#include <linux/font.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -15,6 +16,12 @@
#include "vimc-common.h"
+unsigned int vimc_allocator;
+module_param_named(allocator, vimc_allocator, uint, 0444);
+MODULE_PARM_DESC(allocator, " memory allocator selection, default is 0.\n"
+ "\t\t 0 == vmalloc\n"
+ "\t\t 1 == dma-contig");
+
#define VIMC_MDEV_MODEL_NAME "VIMC MDEV"
#define VIMC_ENT_LINK(src, srcpad, sink, sinkpad, link_flags) { \
@@ -278,6 +285,9 @@ static int vimc_probe(struct platform_device *pdev)
tpg_set_font(font->data);
+ if (vimc_allocator == VIMC_ALLOCATOR_DMA_CONTIG)
+ dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+
vimc = kzalloc(sizeof(*vimc), GFP_KERNEL);
if (!vimc)
return -ENOMEM;
diff --git a/drivers/media/test-drivers/vivid/Kconfig b/drivers/media/test-drivers/vivid/Kconfig
index c3abde2986b2..318799d317ba 100644
--- a/drivers/media/test-drivers/vivid/Kconfig
+++ b/drivers/media/test-drivers/vivid/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_VIVID
tristate "Virtual Video Test Driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64 && FB
+ depends on VIDEO_DEV && !SPARC32 && !SPARC64 && FB
depends on HAS_DMA
select FONT_SUPPORT
select FONT_8x16
diff --git a/drivers/media/test-drivers/vivid/vivid-core.h b/drivers/media/test-drivers/vivid/vivid-core.h
index 45f96706edde..176b72cb143b 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.h
+++ b/drivers/media/test-drivers/vivid/vivid-core.h
@@ -307,7 +307,7 @@ struct vivid_dev {
bool dqbuf_error;
bool req_validate_error;
bool seq_wrap;
- bool time_wrap;
+ u64 time_wrap;
u64 time_wrap_offset;
unsigned perc_dropped_buffers;
enum vivid_signal_mode std_signal_mode[MAX_INPUTS];
@@ -437,6 +437,7 @@ struct vivid_dev {
bool touch_cap_seq_resync;
u32 touch_cap_seq_start;
u32 touch_cap_seq_count;
+ u32 touch_cap_with_seq_wrap_count;
bool touch_cap_streaming;
struct v4l2_fract timeperframe_tch_cap;
struct v4l2_pix_format tch_format;
@@ -524,7 +525,9 @@ struct vivid_dev {
struct task_struct *kthread_sdr_cap;
unsigned long jiffies_sdr_cap;
u32 sdr_cap_seq_offset;
+ u32 sdr_cap_seq_start;
u32 sdr_cap_seq_count;
+ u32 sdr_cap_with_seq_wrap_count;
bool sdr_cap_seq_resync;
/* RDS generator */
diff --git a/drivers/media/test-drivers/vivid/vivid-ctrls.c b/drivers/media/test-drivers/vivid/vivid-ctrls.c
index 8dc50fe22972..e7516dc1227b 100644
--- a/drivers/media/test-drivers/vivid/vivid-ctrls.c
+++ b/drivers/media/test-drivers/vivid/vivid-ctrls.c
@@ -1084,7 +1084,6 @@ static const struct v4l2_ctrl_config vivid_ctrl_display_present = {
static int vivid_streaming_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_streaming);
- u64 rem;
switch (ctrl->id) {
case VIVID_CID_DQBUF_ERROR:
@@ -1122,20 +1121,10 @@ static int vivid_streaming_s_ctrl(struct v4l2_ctrl *ctrl)
break;
case VIVID_CID_TIME_WRAP:
dev->time_wrap = ctrl->val;
- if (ctrl->val == 0) {
- dev->time_wrap_offset = 0;
- break;
- }
- /*
- * We want to set the time 16 seconds before the 32 bit tv_sec
- * value of struct timeval would wrap around. So first we
- * calculate ktime_get_ns() % ((1 << 32) * NSEC_PER_SEC), and
- * then we set the offset to ((1 << 32) - 16) * NSEC_PER_SEC).
- */
- div64_u64_rem(ktime_get_ns(),
- 0x100000000ULL * NSEC_PER_SEC, &rem);
- dev->time_wrap_offset =
- (0x100000000ULL - 16) * NSEC_PER_SEC - rem;
+ if (dev->time_wrap == 1)
+ dev->time_wrap = (1ULL << 63) - NSEC_PER_SEC * 16ULL;
+ else if (dev->time_wrap == 2)
+ dev->time_wrap = ((1ULL << 31) - 16) * NSEC_PER_SEC;
break;
}
return 0;
@@ -1208,13 +1197,20 @@ static const struct v4l2_ctrl_config vivid_ctrl_seq_wrap = {
.step = 1,
};
+static const char * const vivid_ctrl_time_wrap_strings[] = {
+ "None",
+ "64 Bit",
+ "32 Bit",
+ NULL,
+};
+
static const struct v4l2_ctrl_config vivid_ctrl_time_wrap = {
.ops = &vivid_streaming_ctrl_ops,
.id = VIVID_CID_TIME_WRAP,
.name = "Wrap Timestamp",
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .max = 1,
- .step = 1,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = ARRAY_SIZE(vivid_ctrl_time_wrap_strings) - 2,
+ .qmenu = vivid_ctrl_time_wrap_strings,
};
diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
index 9da730ccfa94..690daada7db4 100644
--- a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
@@ -18,6 +18,7 @@
#include <linux/freezer.h>
#include <linux/random.h>
#include <linux/v4l2-dv-timings.h>
+#include <linux/jiffies.h>
#include <asm/div64.h>
#include <media/videobuf2-vmalloc.h>
#include <media/v4l2-dv-timings.h>
@@ -719,8 +720,7 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
if (!vid_cap_buf && !vbi_cap_buf && !meta_cap_buf)
goto update_mv;
- f_time = dev->cap_frame_period * dev->vid_cap_seq_count +
- dev->cap_stream_start + dev->time_wrap_offset;
+ f_time = ktime_get_ns() + dev->time_wrap_offset;
if (vid_cap_buf) {
v4l2_ctrl_request_setup(vid_cap_buf->vb.vb2_buf.req_obj.req,
@@ -813,6 +813,10 @@ static int vivid_thread_vid_cap(void *data)
dev->cap_seq_resync = false;
dev->jiffies_vid_cap = jiffies;
dev->cap_stream_start = ktime_get_ns();
+ if (dev->time_wrap)
+ dev->time_wrap_offset = dev->time_wrap - dev->cap_stream_start;
+ else
+ dev->time_wrap_offset = 0;
vivid_cap_update_frame_period(dev);
for (;;) {
@@ -890,7 +894,7 @@ static int vivid_thread_vid_cap(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
- while (jiffies - cur_jiffies < wait_jiffies &&
+ while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
!kthread_should_stop())
schedule();
}
diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-out.c b/drivers/media/test-drivers/vivid/vivid-kthread-out.c
index 79c57d14ac4e..0833e021bb11 100644
--- a/drivers/media/test-drivers/vivid/vivid-kthread-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-out.c
@@ -18,6 +18,7 @@
#include <linux/freezer.h>
#include <linux/random.h>
#include <linux/v4l2-dv-timings.h>
+#include <linux/jiffies.h>
#include <asm/div64.h>
#include <media/videobuf2-vmalloc.h>
#include <media/v4l2-dv-timings.h>
@@ -154,12 +155,13 @@ static int vivid_thread_vid_out(void *data)
/* Resets frame counters */
dev->out_seq_offset = 0;
- if (dev->seq_wrap)
- dev->out_seq_count = 0xffffff80U;
+ dev->out_seq_count = 0;
dev->jiffies_vid_out = jiffies;
- dev->vid_out_seq_start = dev->vbi_out_seq_start = 0;
- dev->meta_out_seq_start = 0;
dev->out_seq_resync = false;
+ if (dev->time_wrap)
+ dev->time_wrap_offset = dev->time_wrap - ktime_get_ns();
+ else
+ dev->time_wrap_offset = 0;
for (;;) {
try_to_freeze();
@@ -233,7 +235,7 @@ static int vivid_thread_vid_out(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
- while (jiffies - cur_jiffies < wait_jiffies &&
+ while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
!kthread_should_stop())
schedule();
}
diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-touch.c b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c
index 38fdfee79498..fa711ee36a3f 100644
--- a/drivers/media/test-drivers/vivid/vivid-kthread-touch.c
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c
@@ -5,6 +5,7 @@
*/
#include <linux/freezer.h>
+#include <linux/jiffies.h>
#include "vivid-core.h"
#include "vivid-kthread-touch.h"
#include "vivid-touch-cap.h"
@@ -62,6 +63,10 @@ static int vivid_thread_touch_cap(void *data)
dev->touch_cap_seq_count = 0;
dev->touch_cap_seq_resync = false;
dev->jiffies_touch_cap = jiffies;
+ if (dev->time_wrap)
+ dev->time_wrap_offset = dev->time_wrap - ktime_get_ns();
+ else
+ dev->time_wrap_offset = 0;
for (;;) {
try_to_freeze();
@@ -102,6 +107,8 @@ static int vivid_thread_touch_cap(void *data)
}
dropped_bufs = buffers_since_start + dev->touch_cap_seq_offset - dev->touch_cap_seq_count;
dev->touch_cap_seq_count = buffers_since_start + dev->touch_cap_seq_offset;
+ dev->touch_cap_with_seq_wrap_count =
+ dev->touch_cap_seq_count - dev->touch_cap_seq_start;
vivid_thread_tch_cap_tick(dev, dropped_bufs);
@@ -128,7 +135,7 @@ static int vivid_thread_touch_cap(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
- while (jiffies - cur_jiffies < wait_jiffies &&
+ while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
!kthread_should_stop())
schedule();
}
@@ -143,6 +150,7 @@ int vivid_start_generating_touch_cap(struct vivid_dev *dev)
return 0;
}
+ dev->touch_cap_seq_start = dev->seq_wrap * 128;
dev->kthread_touch_cap = kthread_run(vivid_thread_touch_cap, dev,
"%s-tch-cap", dev->v4l2_dev.name);
diff --git a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
index 265db2114671..0ae5628b86c9 100644
--- a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
@@ -17,6 +17,7 @@
#include <media/v4l2-event.h>
#include <media/v4l2-dv-timings.h>
#include <linux/fixp-arith.h>
+#include <linux/jiffies.h>
#include "vivid-core.h"
#include "vivid-ctrls.h"
@@ -101,7 +102,7 @@ static void vivid_thread_sdr_cap_tick(struct vivid_dev *dev)
spin_unlock(&dev->slock);
if (sdr_cap_buf) {
- sdr_cap_buf->vb.sequence = dev->sdr_cap_seq_count;
+ sdr_cap_buf->vb.sequence = dev->sdr_cap_with_seq_wrap_count;
v4l2_ctrl_request_setup(sdr_cap_buf->vb.vb2_buf.req_obj.req,
&dev->ctrl_hdl_sdr_cap);
v4l2_ctrl_request_complete(sdr_cap_buf->vb.vb2_buf.req_obj.req,
@@ -131,10 +132,13 @@ static int vivid_thread_sdr_cap(void *data)
/* Resets frame counters */
dev->sdr_cap_seq_offset = 0;
- if (dev->seq_wrap)
- dev->sdr_cap_seq_offset = 0xffffff80U;
+ dev->sdr_cap_seq_count = 0;
dev->jiffies_sdr_cap = jiffies;
dev->sdr_cap_seq_resync = false;
+ if (dev->time_wrap)
+ dev->time_wrap_offset = dev->time_wrap - ktime_get_ns();
+ else
+ dev->time_wrap_offset = 0;
for (;;) {
try_to_freeze();
@@ -174,6 +178,7 @@ static int vivid_thread_sdr_cap(void *data)
}
dev->sdr_cap_seq_count =
buffers_since_start + dev->sdr_cap_seq_offset;
+ dev->sdr_cap_with_seq_wrap_count = dev->sdr_cap_seq_count - dev->sdr_cap_seq_start;
vivid_thread_sdr_cap_tick(dev);
mutex_unlock(&dev->mutex);
@@ -201,7 +206,7 @@ static int vivid_thread_sdr_cap(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
- while (jiffies - cur_jiffies < wait_jiffies &&
+ while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
!kthread_should_stop())
schedule();
}
@@ -263,7 +268,7 @@ static int sdr_cap_start_streaming(struct vb2_queue *vq, unsigned count)
int err = 0;
dprintk(dev, 1, "%s\n", __func__);
- dev->sdr_cap_seq_count = 0;
+ dev->sdr_cap_seq_start = dev->seq_wrap * 128;
if (dev->start_streaming_error) {
dev->start_streaming_error = false;
err = -EINVAL;
diff --git a/drivers/media/test-drivers/vivid/vivid-touch-cap.c b/drivers/media/test-drivers/vivid/vivid-touch-cap.c
index ebb00b128030..64e3e4cb30c2 100644
--- a/drivers/media/test-drivers/vivid/vivid-touch-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-touch-cap.c
@@ -262,7 +262,7 @@ void vivid_fillbuff_tch(struct vivid_dev *dev, struct vivid_buffer *buf)
__s16 *tch_buf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
- buf->vb.sequence = dev->touch_cap_seq_count;
+ buf->vb.sequence = dev->touch_cap_with_seq_wrap_count;
test_pattern = (buf->vb.sequence / TCH_SEQ_COUNT) % TEST_CASE_MAX;
test_pat_idx = buf->vb.sequence % TCH_SEQ_COUNT;
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig
index 4605bb377574..0c01b0298099 100644
--- a/drivers/media/tuners/Kconfig
+++ b/drivers/media/tuners/Kconfig
@@ -23,80 +23,80 @@ menu "Customize TV tuners"
visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT || MEDIA_SDR_SUPPORT
-config MEDIA_TUNER_SIMPLE
- tristate "Simple tuner support"
- depends on MEDIA_SUPPORT && I2C
- select MEDIA_TUNER_TDA9887
+config MEDIA_TUNER_E4000
+ tristate "Elonics E4000 silicon tuner"
+ depends on MEDIA_SUPPORT && I2C && VIDEO_DEV
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Say Y here to include support for various simple tuners.
+ Elonics E4000 silicon tuner driver.
-config MEDIA_TUNER_TDA18250
- tristate "NXP TDA18250 silicon tuner"
+config MEDIA_TUNER_FC0011
+ tristate "Fitipower FC0011 silicon tuner"
depends on MEDIA_SUPPORT && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Say Y here to include support for TDA18250 tuner.
+ Fitipower FC0011 silicon tuner driver.
-config MEDIA_TUNER_TDA8290
- tristate "TDA 8290/8295 + 8275(a)/18271 tuner combo"
+config MEDIA_TUNER_FC0012
+ tristate "Fitipower FC0012 silicon tuner"
depends on MEDIA_SUPPORT && I2C
- select MEDIA_TUNER_TDA827X
- select MEDIA_TUNER_TDA18271
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Say Y here to include support for Philips TDA8290+8275(a) tuner.
+ Fitipower FC0012 silicon tuner driver.
-config MEDIA_TUNER_TDA827X
- tristate "Philips TDA827X silicon tuner"
+config MEDIA_TUNER_FC0013
+ tristate "Fitipower FC0013 silicon tuner"
depends on MEDIA_SUPPORT && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A DVB-T silicon tuner module. Say Y when you want to support this tuner.
+ Fitipower FC0013 silicon tuner driver.
-config MEDIA_TUNER_TDA18271
- tristate "NXP TDA18271 silicon tuner"
+config MEDIA_TUNER_FC2580
+ tristate "FCI FC2580 silicon tuner"
+ depends on MEDIA_SUPPORT && I2C && VIDEO_DEV
+ select REGMAP_I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ FCI FC2580 silicon tuner driver.
+
+config MEDIA_TUNER_IT913X
+ tristate "ITE Tech IT913x silicon tuner"
depends on MEDIA_SUPPORT && I2C
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A silicon tuner module. Say Y when you want to support this tuner.
+ ITE Tech IT913x silicon tuner driver.
-config MEDIA_TUNER_TDA9887
- tristate "TDA 9885/6/7 analog IF demodulator"
+config MEDIA_TUNER_M88RS6000T
+ tristate "Montage M88RS6000 internal tuner"
depends on MEDIA_SUPPORT && I2C
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Say Y here to include support for Philips TDA9885/6/7
- analog IF demodulator.
+ Montage M88RS6000 internal tuner.
-config MEDIA_TUNER_TEA5761
- tristate "TEA 5761 radio tuner"
+config MEDIA_TUNER_MAX2165
+ tristate "Maxim MAX2165 silicon tuner"
depends on MEDIA_SUPPORT && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Say Y here to include support for the Philips TEA5761 radio tuner.
+ A driver for the silicon tuner MAX2165 from Maxim.
-config MEDIA_TUNER_TEA5767
- tristate "TEA 5767 radio tuner"
+config MEDIA_TUNER_MC44S803
+ tristate "Freescale MC44S803 Low Power CMOS Broadband tuners"
depends on MEDIA_SUPPORT && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Say Y here to include support for the Philips TEA5767 radio tuner.
+ Say Y here to support the Freescale MC44S803 based tuners
config MEDIA_TUNER_MSI001
tristate "Mirics MSi001"
- depends on MEDIA_SUPPORT && SPI && VIDEO_V4L2
+ depends on MEDIA_SUPPORT && SPI && VIDEO_DEV
default m if !MEDIA_SUBDRV_AUTOSELECT
help
Mirics MSi001 silicon tuner driver.
-config MEDIA_TUNER_MT20XX
- tristate "Microtune 2032 / 2050 tuners"
- depends on MEDIA_SUPPORT && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- Say Y here to include support for the MT2032 / MT2050 tuner.
-
config MEDIA_TUNER_MT2060
tristate "Microtune MT2060 silicon IF tuner"
depends on MEDIA_SUPPORT && I2C
@@ -111,12 +111,12 @@ config MEDIA_TUNER_MT2063
help
A driver for the silicon IF tuner MT2063 from Microtune.
-config MEDIA_TUNER_MT2266
- tristate "Microtune MT2266 silicon tuner"
+config MEDIA_TUNER_MT20XX
+ tristate "Microtune 2032 / 2050 tuners"
depends on MEDIA_SUPPORT && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A driver for the silicon baseband tuner MT2266 from Microtune.
+ Say Y here to include support for the MT2032 / MT2050 tuner.
config MEDIA_TUNER_MT2131
tristate "Microtune MT2131 silicon tuner"
@@ -125,37 +125,19 @@ config MEDIA_TUNER_MT2131
help
A driver for the silicon baseband tuner MT2131 from Microtune.
-config MEDIA_TUNER_QT1010
- tristate "Quantek QT1010 silicon tuner"
- depends on MEDIA_SUPPORT && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- A driver for the silicon tuner QT1010 from Quantek.
-
-config MEDIA_TUNER_XC2028
- tristate "XCeive xc2028/xc3028 tuners"
- depends on MEDIA_SUPPORT && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- Say Y here to include support for the xc2028/xc3028 tuners.
-
-config MEDIA_TUNER_XC5000
- tristate "Xceive XC5000 silicon tuner"
+config MEDIA_TUNER_MT2266
+ tristate "Microtune MT2266 silicon tuner"
depends on MEDIA_SUPPORT && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A driver for the silicon tuner XC5000 from Xceive.
- This device is only used inside a SiP called together with a
- demodulator for now.
+ A driver for the silicon baseband tuner MT2266 from Microtune.
-config MEDIA_TUNER_XC4000
- tristate "Xceive XC4000 silicon tuner"
+config MEDIA_TUNER_MXL301RF
+ tristate "MaxLinear MxL301RF tuner"
depends on MEDIA_SUPPORT && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A driver for the silicon tuner XC4000 from Xceive.
- This device is only used inside a SiP called together with a
- demodulator for now.
+ MaxLinear MxL301RF OFDM tuner driver.
config MEDIA_TUNER_MXL5005S
tristate "MaxLinear MSL5005S silicon tuner"
@@ -171,47 +153,49 @@ config MEDIA_TUNER_MXL5007T
help
A driver for the silicon tuner MxL5007T from MaxLinear.
-config MEDIA_TUNER_MC44S803
- tristate "Freescale MC44S803 Low Power CMOS Broadband tuners"
+config MEDIA_TUNER_QM1D1B0004
+ tristate "Sharp QM1D1B0004 tuner"
depends on MEDIA_SUPPORT && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Say Y here to support the Freescale MC44S803 based tuners
+ Sharp QM1D1B0004 ISDB-S tuner driver.
-config MEDIA_TUNER_MAX2165
- tristate "Maxim MAX2165 silicon tuner"
+config MEDIA_TUNER_QM1D1C0042
+ tristate "Sharp QM1D1C0042 tuner"
depends on MEDIA_SUPPORT && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A driver for the silicon tuner MAX2165 from Maxim.
+ Sharp QM1D1C0042 trellis coded 8PSK tuner driver.
-config MEDIA_TUNER_TDA18218
- tristate "NXP TDA18218 silicon tuner"
+config MEDIA_TUNER_QT1010
+ tristate "Quantek QT1010 silicon tuner"
depends on MEDIA_SUPPORT && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- NXP TDA18218 silicon tuner driver.
+ A driver for the silicon tuner QT1010 from Quantek.
-config MEDIA_TUNER_FC0011
- tristate "Fitipower FC0011 silicon tuner"
+config MEDIA_TUNER_R820T
+ tristate "Rafael Micro R820T silicon tuner"
depends on MEDIA_SUPPORT && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
+ select BITREVERSE
help
- Fitipower FC0011 silicon tuner driver.
+ Rafael Micro R820T silicon tuner driver.
-config MEDIA_TUNER_FC0012
- tristate "Fitipower FC0012 silicon tuner"
+config MEDIA_TUNER_SI2157
+ tristate "Silicon Labs Si2157 silicon tuner"
depends on MEDIA_SUPPORT && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Fitipower FC0012 silicon tuner driver.
+ Silicon Labs Si2157 silicon tuner driver.
-config MEDIA_TUNER_FC0013
- tristate "Fitipower FC0013 silicon tuner"
+config MEDIA_TUNER_SIMPLE
+ tristate "Simple tuner support"
depends on MEDIA_SUPPORT && I2C
+ select MEDIA_TUNER_TDA9887
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Fitipower FC0013 silicon tuner driver.
+ Say Y here to include support for various simple tuners.
config MEDIA_TUNER_TDA18212
tristate "NXP TDA18212 silicon tuner"
@@ -221,79 +205,96 @@ config MEDIA_TUNER_TDA18212
help
NXP TDA18212 silicon tuner driver.
-config MEDIA_TUNER_E4000
- tristate "Elonics E4000 silicon tuner"
- depends on MEDIA_SUPPORT && I2C && VIDEO_V4L2
- select REGMAP_I2C
+config MEDIA_TUNER_TDA18218
+ tristate "NXP TDA18218 silicon tuner"
+ depends on MEDIA_SUPPORT && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Elonics E4000 silicon tuner driver.
+ NXP TDA18218 silicon tuner driver.
-config MEDIA_TUNER_FC2580
- tristate "FCI FC2580 silicon tuner"
- depends on MEDIA_SUPPORT && I2C && VIDEO_V4L2
- select REGMAP_I2C
+config MEDIA_TUNER_TDA18250
+ tristate "NXP TDA18250 silicon tuner"
+ depends on MEDIA_SUPPORT && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- FCI FC2580 silicon tuner driver.
+ Say Y here to include support for TDA18250 tuner.
-config MEDIA_TUNER_M88RS6000T
- tristate "Montage M88RS6000 internal tuner"
+config MEDIA_TUNER_TDA18271
+ tristate "NXP TDA18271 silicon tuner"
depends on MEDIA_SUPPORT && I2C
- select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Montage M88RS6000 internal tuner.
+ A silicon tuner module. Say Y when you want to support this tuner.
-config MEDIA_TUNER_TUA9001
- tristate "Infineon TUA9001 silicon tuner"
+config MEDIA_TUNER_TDA827X
+ tristate "Philips TDA827X silicon tuner"
depends on MEDIA_SUPPORT && I2C
- select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Infineon TUA 9001 silicon tuner driver.
+ A DVB-T silicon tuner module. Say Y when you want to support this tuner.
-config MEDIA_TUNER_SI2157
- tristate "Silicon Labs Si2157 silicon tuner"
+config MEDIA_TUNER_TDA8290
+ tristate "TDA 8290/8295 + 8275(a)/18271 tuner combo"
depends on MEDIA_SUPPORT && I2C
+ select MEDIA_TUNER_TDA827X
+ select MEDIA_TUNER_TDA18271
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Silicon Labs Si2157 silicon tuner driver.
+ Say Y here to include support for Philips TDA8290+8275(a) tuner.
-config MEDIA_TUNER_IT913X
- tristate "ITE Tech IT913x silicon tuner"
+config MEDIA_TUNER_TDA9887
+ tristate "TDA 9885/6/7 analog IF demodulator"
depends on MEDIA_SUPPORT && I2C
- select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- ITE Tech IT913x silicon tuner driver.
+ Say Y here to include support for Philips TDA9885/6/7
+ analog IF demodulator.
-config MEDIA_TUNER_R820T
- tristate "Rafael Micro R820T silicon tuner"
+config MEDIA_TUNER_TEA5761
+ tristate "TEA 5761 radio tuner"
depends on MEDIA_SUPPORT && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
- select BITREVERSE
help
- Rafael Micro R820T silicon tuner driver.
+ Say Y here to include support for the Philips TEA5761 radio tuner.
-config MEDIA_TUNER_MXL301RF
- tristate "MaxLinear MxL301RF tuner"
+config MEDIA_TUNER_TEA5767
+ tristate "TEA 5767 radio tuner"
depends on MEDIA_SUPPORT && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- MaxLinear MxL301RF OFDM tuner driver.
+ Say Y here to include support for the Philips TEA5767 radio tuner.
-config MEDIA_TUNER_QM1D1C0042
- tristate "Sharp QM1D1C0042 tuner"
+config MEDIA_TUNER_TUA9001
+ tristate "Infineon TUA9001 silicon tuner"
depends on MEDIA_SUPPORT && I2C
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Sharp QM1D1C0042 trellis coded 8PSK tuner driver.
+ Infineon TUA 9001 silicon tuner driver.
-config MEDIA_TUNER_QM1D1B0004
- tristate "Sharp QM1D1B0004 tuner"
+config MEDIA_TUNER_XC2028
+ tristate "XCeive xc2028/xc3028 tuners"
depends on MEDIA_SUPPORT && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Sharp QM1D1B0004 ISDB-S tuner driver.
+ Say Y here to include support for the xc2028/xc3028 tuners.
+
+config MEDIA_TUNER_XC4000
+ tristate "Xceive XC4000 silicon tuner"
+ depends on MEDIA_SUPPORT && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A driver for the silicon tuner XC4000 from Xceive.
+ This device is only used inside a SiP called together with a
+ demodulator for now.
+
+config MEDIA_TUNER_XC5000
+ tristate "Xceive XC5000 silicon tuner"
+ depends on MEDIA_SUPPORT && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A driver for the silicon tuner XC5000 from Xceive.
+ This device is only used inside a SiP called together with a
+ demodulator for now.
+
endmenu
diff --git a/drivers/media/tuners/Makefile b/drivers/media/tuners/Makefile
index 7b4f8423501e..bd350a285aad 100644
--- a/drivers/media/tuners/Makefile
+++ b/drivers/media/tuners/Makefile
@@ -3,46 +3,46 @@
# Makefile for common V4L/DVB tuners
#
+ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
tda18271-objs := tda18271-maps.o tda18271-common.o tda18271-fe.o
-obj-$(CONFIG_MEDIA_TUNER_XC2028) += tuner-xc2028.o
-obj-$(CONFIG_MEDIA_TUNER_SIMPLE) += tuner-simple.o
-# tuner-types will be merged into tuner-simple, in the future
-obj-$(CONFIG_MEDIA_TUNER_SIMPLE) += tuner-types.o
-obj-$(CONFIG_MEDIA_TUNER_MT20XX) += mt20xx.o
-obj-$(CONFIG_MEDIA_TUNER_TDA8290) += tda8290.o
-obj-$(CONFIG_MEDIA_TUNER_TEA5767) += tea5767.o
-obj-$(CONFIG_MEDIA_TUNER_TEA5761) += tea5761.o
-obj-$(CONFIG_MEDIA_TUNER_TDA9887) += tda9887.o
-obj-$(CONFIG_MEDIA_TUNER_TDA827X) += tda827x.o
-obj-$(CONFIG_MEDIA_TUNER_TDA18271) += tda18271.o
-obj-$(CONFIG_MEDIA_TUNER_XC5000) += xc5000.o
-obj-$(CONFIG_MEDIA_TUNER_XC4000) += xc4000.o
-obj-$(CONFIG_MEDIA_TUNER_MSI001) += msi001.o
-obj-$(CONFIG_MEDIA_TUNER_MT2060) += mt2060.o
-obj-$(CONFIG_MEDIA_TUNER_MT2063) += mt2063.o
-obj-$(CONFIG_MEDIA_TUNER_MT2266) += mt2266.o
-obj-$(CONFIG_MEDIA_TUNER_QT1010) += qt1010.o
-obj-$(CONFIG_MEDIA_TUNER_MT2131) += mt2131.o
-obj-$(CONFIG_MEDIA_TUNER_MXL5005S) += mxl5005s.o
-obj-$(CONFIG_MEDIA_TUNER_MXL5007T) += mxl5007t.o
-obj-$(CONFIG_MEDIA_TUNER_MC44S803) += mc44s803.o
-obj-$(CONFIG_MEDIA_TUNER_MAX2165) += max2165.o
-obj-$(CONFIG_MEDIA_TUNER_TDA18218) += tda18218.o
-obj-$(CONFIG_MEDIA_TUNER_TDA18212) += tda18212.o
+# Please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
obj-$(CONFIG_MEDIA_TUNER_E4000) += e4000.o
-obj-$(CONFIG_MEDIA_TUNER_FC2580) += fc2580.o
-obj-$(CONFIG_MEDIA_TUNER_TUA9001) += tua9001.o
-obj-$(CONFIG_MEDIA_TUNER_SI2157) += si2157.o
obj-$(CONFIG_MEDIA_TUNER_FC0011) += fc0011.o
obj-$(CONFIG_MEDIA_TUNER_FC0012) += fc0012.o
obj-$(CONFIG_MEDIA_TUNER_FC0013) += fc0013.o
+obj-$(CONFIG_MEDIA_TUNER_FC2580) += fc2580.o
obj-$(CONFIG_MEDIA_TUNER_IT913X) += it913x.o
-obj-$(CONFIG_MEDIA_TUNER_R820T) += r820t.o
+obj-$(CONFIG_MEDIA_TUNER_M88RS6000T) += m88rs6000t.o
+obj-$(CONFIG_MEDIA_TUNER_MAX2165) += max2165.o
+obj-$(CONFIG_MEDIA_TUNER_MC44S803) += mc44s803.o
+obj-$(CONFIG_MEDIA_TUNER_MSI001) += msi001.o
+obj-$(CONFIG_MEDIA_TUNER_MT2060) += mt2060.o
+obj-$(CONFIG_MEDIA_TUNER_MT2063) += mt2063.o
+obj-$(CONFIG_MEDIA_TUNER_MT20XX) += mt20xx.o
+obj-$(CONFIG_MEDIA_TUNER_MT2131) += mt2131.o
+obj-$(CONFIG_MEDIA_TUNER_MT2266) += mt2266.o
obj-$(CONFIG_MEDIA_TUNER_MXL301RF) += mxl301rf.o
-obj-$(CONFIG_MEDIA_TUNER_QM1D1C0042) += qm1d1c0042.o
+obj-$(CONFIG_MEDIA_TUNER_MXL5005S) += mxl5005s.o
+obj-$(CONFIG_MEDIA_TUNER_MXL5007T) += mxl5007t.o
obj-$(CONFIG_MEDIA_TUNER_QM1D1B0004) += qm1d1b0004.o
-obj-$(CONFIG_MEDIA_TUNER_M88RS6000T) += m88rs6000t.o
+obj-$(CONFIG_MEDIA_TUNER_QM1D1C0042) += qm1d1c0042.o
+obj-$(CONFIG_MEDIA_TUNER_QT1010) += qt1010.o
+obj-$(CONFIG_MEDIA_TUNER_R820T) += r820t.o
+obj-$(CONFIG_MEDIA_TUNER_SI2157) += si2157.o
+obj-$(CONFIG_MEDIA_TUNER_SIMPLE) += tuner-simple.o
+obj-$(CONFIG_MEDIA_TUNER_SIMPLE) += tuner-types.o
+obj-$(CONFIG_MEDIA_TUNER_TDA18212) += tda18212.o
+obj-$(CONFIG_MEDIA_TUNER_TDA18218) += tda18218.o
obj-$(CONFIG_MEDIA_TUNER_TDA18250) += tda18250.o
-
-ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
+obj-$(CONFIG_MEDIA_TUNER_TDA18271) += tda18271.o
+obj-$(CONFIG_MEDIA_TUNER_TDA827X) += tda827x.o
+obj-$(CONFIG_MEDIA_TUNER_TDA8290) += tda8290.o
+obj-$(CONFIG_MEDIA_TUNER_TDA9887) += tda9887.o
+obj-$(CONFIG_MEDIA_TUNER_TEA5761) += tea5761.o
+obj-$(CONFIG_MEDIA_TUNER_TEA5767) += tea5767.o
+obj-$(CONFIG_MEDIA_TUNER_TUA9001) += tua9001.o
+obj-$(CONFIG_MEDIA_TUNER_XC2028) += xc2028.o
+obj-$(CONFIG_MEDIA_TUNER_XC4000) += xc4000.o
+obj-$(CONFIG_MEDIA_TUNER_XC5000) += xc5000.o
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index 3f1f9af92bc9..a3a8d051dc6c 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -257,7 +257,7 @@ err:
/*
* V4L2 API
*/
-#if IS_ENABLED(CONFIG_VIDEO_V4L2)
+#if IS_ENABLED(CONFIG_VIDEO_DEV)
static const struct v4l2_frequency_band bands[] = {
{
.type = V4L2_TUNER_RF,
@@ -654,7 +654,7 @@ static int e4000_probe(struct i2c_client *client,
if (ret)
goto err_kfree;
-#if IS_ENABLED(CONFIG_VIDEO_V4L2)
+#if IS_ENABLED(CONFIG_VIDEO_DEV)
/* Register controls */
v4l2_ctrl_handler_init(&dev->hdl, 9);
dev->bandwidth_auto = v4l2_ctrl_new_std(&dev->hdl, &e4000_ctrl_ops,
@@ -713,7 +713,7 @@ static int e4000_remove(struct i2c_client *client)
dev_dbg(&client->dev, "\n");
-#if IS_ENABLED(CONFIG_VIDEO_V4L2)
+#if IS_ENABLED(CONFIG_VIDEO_DEV)
v4l2_ctrl_handler_free(&dev->hdl);
#endif
kfree(dev);
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index 7639a305048f..1b5961bdf2d5 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -357,7 +357,7 @@ static const struct dvb_tuner_ops fc2580_dvb_tuner_ops = {
/*
* V4L2 API
*/
-#if IS_ENABLED(CONFIG_VIDEO_V4L2)
+#if IS_ENABLED(CONFIG_VIDEO_DEV)
static const struct v4l2_frequency_band bands[] = {
{
.type = V4L2_TUNER_RF,
@@ -552,7 +552,7 @@ static int fc2580_probe(struct i2c_client *client,
goto err_kfree;
}
-#if IS_ENABLED(CONFIG_VIDEO_V4L2)
+#if IS_ENABLED(CONFIG_VIDEO_DEV)
/* Register controls */
v4l2_ctrl_handler_init(&dev->hdl, 2);
dev->bandwidth_auto = v4l2_ctrl_new_std(&dev->hdl, &fc2580_ctrl_ops,
@@ -594,7 +594,7 @@ static int fc2580_remove(struct i2c_client *client)
dev_dbg(&client->dev, "\n");
-#if IS_ENABLED(CONFIG_VIDEO_V4L2)
+#if IS_ENABLED(CONFIG_VIDEO_DEV)
v4l2_ctrl_handler_free(&dev->hdl);
#endif
kfree(dev);
diff --git a/drivers/media/tuners/msi001.c b/drivers/media/tuners/msi001.c
index 44247049a319..ad6c72c1ed04 100644
--- a/drivers/media/tuners/msi001.c
+++ b/drivers/media/tuners/msi001.c
@@ -472,7 +472,7 @@ err:
return ret;
}
-static int msi001_remove(struct spi_device *spi)
+static void msi001_remove(struct spi_device *spi)
{
struct v4l2_subdev *sd = spi_get_drvdata(spi);
struct msi001_dev *dev = sd_to_msi001_dev(sd);
@@ -486,7 +486,6 @@ static int msi001_remove(struct spi_device *spi)
v4l2_device_unregister_subdev(&dev->sd);
v4l2_ctrl_handler_free(&dev->hdl);
kfree(dev);
- return 0;
}
static const struct spi_device_id msi001_id_table[] = {
diff --git a/drivers/media/tuners/tuner-types.c b/drivers/media/tuners/tuner-types.c
index 0ed2c5bc082e..ff5a6c0acdd4 100644
--- a/drivers/media/tuners/tuner-types.c
+++ b/drivers/media/tuners/tuner-types.c
@@ -1831,7 +1831,7 @@ struct tunertype tuners[] = {
},
[TUNER_XC2028] = { /* Xceive 2028 */
.name = "Xceive xc2028/xc3028 tuner",
- /* see tuner-xc2028.c for details */
+ /* see xc2028.c for details */
},
[TUNER_THOMSON_FE6600] = { /* Thomson PAL / DVB-T */
.name = "Thomson FE6600",
diff --git a/drivers/media/tuners/tuner-xc2028-types.h b/drivers/media/tuners/xc2028-types.h
index fcca39d3e006..63a03de1e97b 100644
--- a/drivers/media/tuners/tuner-xc2028-types.h
+++ b/drivers/media/tuners/xc2028-types.h
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * tuner-xc2028_types
+ * xc2028_types
*
- * This file includes internal tipes to be used inside tuner-xc2028.
- * Shouldn't be included outside tuner-xc2028
+ * This file includes internal tipes to be used inside xc2028.
+ * Shouldn't be included outside xc2028
*
* Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org>
*/
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/xc2028.c
index 574c3bb135d7..69c2e1b99bf1 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/xc2028.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-// tuner-xc2028
+// xc2028
//
// Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org>
//
@@ -16,8 +16,8 @@
#include <linux/slab.h>
#include <asm/unaligned.h>
#include "tuner-i2c.h"
-#include "tuner-xc2028.h"
-#include "tuner-xc2028-types.h"
+#include "xc2028.h"
+#include "xc2028-types.h"
#include <linux/dvb/frontend.h>
#include <media/dvb_frontend.h>
diff --git a/drivers/media/tuners/tuner-xc2028.h b/drivers/media/tuners/xc2028.h
index 2dd45d0765d7..072faae7a954 100644
--- a/drivers/media/tuners/tuner-xc2028.h
+++ b/drivers/media/tuners/xc2028.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * tuner-xc2028
+ * xc2028
*
* Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org>
*/
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index d9606738ce43..a04dfd5799f7 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -22,7 +22,7 @@
#include "xc4000.h"
#include "tuner-i2c.h"
-#include "tuner-xc2028-types.h"
+#include "xc2028-types.h"
static int debug;
module_param(debug, int, 0644);
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index f97153df3c84..8de08704f8e4 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -12,53 +12,64 @@ if MEDIA_USB_SUPPORT
if MEDIA_CAMERA_SUPPORT
comment "Webcam devices"
-source "drivers/media/usb/uvc/Kconfig"
+
+source "drivers/media/usb/cpia2/Kconfig"
source "drivers/media/usb/gspca/Kconfig"
source "drivers/media/usb/pwc/Kconfig"
-source "drivers/media/usb/cpia2/Kconfig"
-source "drivers/media/usb/zr364xx/Kconfig"
-source "drivers/media/usb/stkwebcam/Kconfig"
source "drivers/media/usb/s2255/Kconfig"
+source "drivers/media/usb/stkwebcam/Kconfig"
source "drivers/media/usb/usbtv/Kconfig"
+source "drivers/media/usb/uvc/Kconfig"
+source "drivers/media/usb/zr364xx/Kconfig"
+
endif
if MEDIA_ANALOG_TV_SUPPORT
comment "Analog TV USB devices"
-source "drivers/media/usb/pvrusb2/Kconfig"
+
+source "drivers/media/usb/go7007/Kconfig"
source "drivers/media/usb/hdpvr/Kconfig"
+source "drivers/media/usb/pvrusb2/Kconfig"
source "drivers/media/usb/stk1160/Kconfig"
-source "drivers/media/usb/go7007/Kconfig"
+
endif
if (MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT)
comment "Analog/digital TV USB devices"
+
source "drivers/media/usb/au0828/Kconfig"
source "drivers/media/usb/cx231xx/Kconfig"
source "drivers/media/usb/tm6000/Kconfig"
-endif
+endif
if I2C && MEDIA_DIGITAL_TV_SUPPORT
comment "Digital TV USB devices"
-source "drivers/media/usb/dvb-usb/Kconfig"
+
+source "drivers/media/usb/as102/Kconfig"
+source "drivers/media/usb/b2c2/Kconfig"
source "drivers/media/usb/dvb-usb-v2/Kconfig"
+source "drivers/media/usb/dvb-usb/Kconfig"
+source "drivers/media/usb/siano/Kconfig"
source "drivers/media/usb/ttusb-budget/Kconfig"
source "drivers/media/usb/ttusb-dec/Kconfig"
-source "drivers/media/usb/siano/Kconfig"
-source "drivers/media/usb/b2c2/Kconfig"
-source "drivers/media/usb/as102/Kconfig"
+
endif
if (MEDIA_CAMERA_SUPPORT || MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT)
comment "Webcam, TV (analog/digital) USB devices"
+
source "drivers/media/usb/em28xx/Kconfig"
+
endif
if MEDIA_SDR_SUPPORT
comment "Software defined radio USB devices"
+
source "drivers/media/usb/airspy/Kconfig"
source "drivers/media/usb/hackrf/Kconfig"
source "drivers/media/usb/msi2500/Kconfig"
+
endif
endif #MEDIA_USB_SUPPORT
diff --git a/drivers/media/usb/Makefile b/drivers/media/usb/Makefile
index 3eaff3149ef4..044bd46c799c 100644
--- a/drivers/media/usb/Makefile
+++ b/drivers/media/usb/Makefile
@@ -3,24 +3,34 @@
# Makefile for the USB media device drivers
#
-# DVB USB-only drivers
-obj-y += ttusb-dec/ ttusb-budget/ dvb-usb/ dvb-usb-v2/ siano/ b2c2/
-obj-y += zr364xx/ stkwebcam/ s2255/
+# DVB USB-only drivers. Please keep it alphabetically sorted by directory name
+# (e. g. LC_ALL=C sort Makefile)
+obj-y += b2c2/
+obj-y += dvb-usb/
+obj-y += dvb-usb-v2/
+obj-y += s2255/
+obj-y += siano/
+obj-y += stkwebcam/
+obj-y += ttusb-budget/
+obj-y += ttusb-dec/
+obj-y += zr364xx/
-obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
-obj-$(CONFIG_USB_GSPCA) += gspca/
-obj-$(CONFIG_USB_PWC) += pwc/
-obj-$(CONFIG_USB_AIRSPY) += airspy/
-obj-$(CONFIG_USB_HACKRF) += hackrf/
-obj-$(CONFIG_USB_MSI2500) += msi2500/
-obj-$(CONFIG_VIDEO_CPIA2) += cpia2/
+# Please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
+obj-$(CONFIG_DVB_AS102) += as102/
+obj-$(CONFIG_USB_AIRSPY) += airspy/
+obj-$(CONFIG_USB_GSPCA) += gspca/
+obj-$(CONFIG_USB_HACKRF) += hackrf/
+obj-$(CONFIG_USB_MSI2500) += msi2500/
+obj-$(CONFIG_USB_PWC) += pwc/
+obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
obj-$(CONFIG_VIDEO_AU0828) += au0828/
-obj-$(CONFIG_VIDEO_HDPVR) += hdpvr/
+obj-$(CONFIG_VIDEO_CPIA2) += cpia2/
+obj-$(CONFIG_VIDEO_CX231XX) += cx231xx/
+obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
+obj-$(CONFIG_VIDEO_GO7007) += go7007/
+obj-$(CONFIG_VIDEO_HDPVR) += hdpvr/
obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/
obj-$(CONFIG_VIDEO_STK1160) += stk1160/
-obj-$(CONFIG_VIDEO_CX231XX) += cx231xx/
obj-$(CONFIG_VIDEO_TM6000) += tm6000/
-obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
obj-$(CONFIG_VIDEO_USBTV) += usbtv/
-obj-$(CONFIG_VIDEO_GO7007) += go7007/
-obj-$(CONFIG_DVB_AS102) += as102/
diff --git a/drivers/media/usb/airspy/Kconfig b/drivers/media/usb/airspy/Kconfig
index 458345217f78..0662d8701c44 100644
--- a/drivers/media/usb/airspy/Kconfig
+++ b/drivers/media/usb/airspy/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config USB_AIRSPY
tristate "AirSpy"
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
select VIDEOBUF2_VMALLOC
help
This is a video4linux2 driver for AirSpy SDR device.
diff --git a/drivers/media/usb/au0828/Kconfig b/drivers/media/usb/au0828/Kconfig
index 05cc6c48c26f..31799b6ff91f 100644
--- a/drivers/media/usb/au0828/Kconfig
+++ b/drivers/media/usb/au0828/Kconfig
@@ -2,12 +2,12 @@
config VIDEO_AU0828
tristate "Auvitek AU0828 support"
- depends on I2C && INPUT && DVB_CORE && USB && VIDEO_V4L2
+ depends on I2C && INPUT && DVB_CORE && USB && VIDEO_DEV
select MEDIA_CONTROLLER
select MEDIA_CONTROLLER_DVB
select I2C_ALGOBIT
select VIDEO_TVEEPROM
- select VIDEOBUF2_VMALLOC if VIDEO_V4L2
+ select VIDEOBUF2_VMALLOC if VIDEO_DEV
select DVB_AU8522_DTV if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MXL5007T if MEDIA_SUBDRV_AUTOSELECT
@@ -22,7 +22,7 @@ config VIDEO_AU0828
config VIDEO_AU0828_V4L2
bool "Auvitek AU0828 v4l2 analog video support"
depends on VIDEO_AU0828
- depends on VIDEO_V4L2=y || VIDEO_V4L2=VIDEO_AU0828
+ depends on VIDEO_DEV=y || VIDEO_DEV=VIDEO_AU0828
select DVB_AU8522_V4L if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TUNER
default y
diff --git a/drivers/media/usb/cpia2/Kconfig b/drivers/media/usb/cpia2/Kconfig
index e2c18ab0262b..da2c6862b4a2 100644
--- a/drivers/media/usb/cpia2/Kconfig
+++ b/drivers/media/usb/cpia2/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_CPIA2
tristate "CPiA2 Video For Linux"
- depends on VIDEO_DEV && USB && VIDEO_V4L2
+ depends on USB && VIDEO_DEV
help
This is the video4linux driver for cameras based on Vision's CPiA2
(Colour Processor Interface ASIC), such as the Digital Blue QX5
diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig
index 60ca8b9d070b..4eadc9539b4c 100644
--- a/drivers/media/usb/dvb-usb-v2/Kconfig
+++ b/drivers/media/usb/dvb-usb-v2/Kconfig
@@ -14,6 +14,8 @@ config DVB_USB_V2
Say Y if you own a USB DVB device.
+if DVB_USB_V2
+
config DVB_USB_AF9015
tristate "Afatech AF9015 DVB-T USB2.0 support"
depends on DVB_USB_V2 && I2C_MUX
@@ -40,7 +42,7 @@ config DVB_USB_AF9035
select MEDIA_TUNER_FC0011 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MXL5007T if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18218 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_FC2580 if (MEDIA_SUBDRV_AUTOSELECT && VIDEO_V4L2)
+ select MEDIA_TUNER_FC2580 if (MEDIA_SUBDRV_AUTOSELECT && VIDEO_DEV)
select MEDIA_TUNER_IT913X if MEDIA_SUBDRV_AUTOSELECT
help
Say Y here to support the Afatech AF9035 based DVB USB receiver.
@@ -87,6 +89,17 @@ config DVB_USB_CE6230
help
Say Y here to support the Intel CE6230 DVB-T USB2.0 receiver
+config DVB_USB_DVBSKY
+ tristate "DVBSky USB support"
+ depends on DVB_USB_V2
+ select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_SP2 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y here to support the USB receivers from DVBSky.
+
config DVB_USB_EC168
tristate "E3C EC168 DVB-T USB2.0 support"
depends on DVB_USB_V2
@@ -139,12 +152,12 @@ config DVB_USB_RTL28XXU
select DVB_CXD2841ER if MEDIA_SUBDRV_AUTOSELECT
select DVB_RTL2830
select DVB_RTL2832
- select DVB_RTL2832_SDR if (MEDIA_SUBDRV_AUTOSELECT && MEDIA_SDR_SUPPORT && VIDEO_V4L2)
+ select DVB_RTL2832_SDR if (MEDIA_SUBDRV_AUTOSELECT && MEDIA_SDR_SUPPORT && VIDEO_DEV)
select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_E4000 if (MEDIA_SUBDRV_AUTOSELECT && VIDEO_V4L2)
+ select MEDIA_TUNER_E4000 if (MEDIA_SUBDRV_AUTOSELECT && VIDEO_DEV)
select MEDIA_TUNER_FC0012 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_FC0013 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_FC2580 if (MEDIA_SUBDRV_AUTOSELECT && VIDEO_V4L2)
+ select MEDIA_TUNER_FC2580 if (MEDIA_SUBDRV_AUTOSELECT && VIDEO_DEV)
select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MXL5005S if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
@@ -154,17 +167,6 @@ config DVB_USB_RTL28XXU
help
Say Y here to support the Realtek RTL28xxU DVB USB receiver.
-config DVB_USB_DVBSKY
- tristate "DVBSky USB support"
- depends on DVB_USB_V2
- select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_SP2 if MEDIA_SUBDRV_AUTOSELECT
- help
- Say Y here to support the USB receivers from DVBSky.
-
config DVB_USB_ZD1301
tristate "ZyDAS ZD1301"
depends on DVB_USB_V2
@@ -172,3 +174,5 @@ config DVB_USB_ZD1301
select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
help
Say Y here to support the ZyDAS ZD1301 DVB USB receiver.
+
+endif
diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig
index 7498110142e4..f10fe27e2a4d 100644
--- a/drivers/media/usb/dvb-usb/Kconfig
+++ b/drivers/media/usb/dvb-usb/Kconfig
@@ -22,15 +22,7 @@ config DVB_USB_DEBUG
Say Y if you want to enable debugging. See modinfo dvb-usb (and the
appropriate drivers) for debug levels.
-config DVB_USB_DIB3000MC
- tristate
- depends on DVB_USB
- select DVB_DIB3000MC
- help
- This is a module with helper functions for accessing the
- DIB3000MC from USB DVB devices. It must be a separate module
- in case DVB_USB is built-in and DVB_DIB3000MC is a module,
- and gets selected automatically when needed.
+if DVB_USB
config DVB_USB_A800
tristate "AVerMedia AverTV DVB-T USB 2.0 (A800)"
@@ -41,84 +33,37 @@ config DVB_USB_A800
help
Say Y here to support the AVerMedia AverTV DVB-T USB 2.0 (A800) receiver.
-config DVB_USB_DIBUSB_MB
- tristate "DiBcom USB DVB-T devices (based on the DiB3000M-B) (see help for device list)"
+config DVB_USB_AF9005
+ tristate "Afatech AF9005 DVB-T USB1.1 support"
depends on DVB_USB
- select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
- select DVB_DIB3000MB
- depends on DVB_DIB3000MC || !DVB_DIB3000MC
select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
help
- Support for USB 1.1 and 2.0 DVB-T receivers based on reference designs made by
- DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-B demodulator.
-
- For an up-to-date list of devices supported by this driver, have a look
- on the Linux-DVB Wiki at www.linuxtv.org.
-
- Say Y if you own such a device and want to use it. You should build it as
- a module.
+ Say Y here to support the Afatech AF9005 based DVB-T USB1.1 receiver
+ and the TerraTec Cinergy T USB XE (Rev.1)
-config DVB_USB_DIBUSB_MB_FAULTY
- bool "Support faulty USB IDs"
- depends on DVB_USB_DIBUSB_MB
+config DVB_USB_AF9005_REMOTE
+ tristate "Afatech AF9005 default remote control support"
+ depends on DVB_USB_AF9005
help
- Support for faulty USB IDs due to an invalid EEPROM on some Artec devices.
+ Say Y here to support the default remote control decoding for the
+ Afatech AF9005 based receiver.
-config DVB_USB_DIBUSB_MC
- tristate "DiBcom USB DVB-T devices (based on the DiB3000M-C/P) (see help for device list)"
+config DVB_USB_AZ6027
+ tristate "Azurewave DVB-S/S2 USB2.0 AZ6027 support"
depends on DVB_USB
- select DVB_USB_DIB3000MC
- select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STB0899 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT
help
- Support for USB2.0 DVB-T receivers based on reference designs made by
- DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-C/P demodulator.
-
- For an up-to-date list of devices supported by this driver, have a look
- on the Linux-DVB Wiki at www.linuxtv.org.
-
- Say Y if you own such a device and want to use it. You should build it as
- a module.
+ Say Y here to support the AZ6027 device
-config DVB_USB_DIB0700
- tristate "DiBcom DiB0700 USB DVB devices (see help for supported devices)"
+config DVB_USB_CINERGY_T2
+ tristate "Terratec CinergyT2/qanu USB 2.0 DVB-T receiver"
depends on DVB_USB
- select DVB_DIB7000P if MEDIA_SUBDRV_AUTOSELECT
- select DVB_DIB7000M if MEDIA_SUBDRV_AUTOSELECT
- select DVB_DIB8000 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_USB_DIB3000MC if MEDIA_SUBDRV_AUTOSELECT
- select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_LGDT3305 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_MN88472 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_TUNER_DIB0070 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_TUNER_DIB0090 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_MT2266 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_XC2028 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_XC4000 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_MXL5007T if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_TDA18250 if MEDIA_SUBDRV_AUTOSELECT
help
- Support for USB2.0/1.1 DVB receivers based on the DiB0700 USB bridge. The
- USB bridge is also present in devices having the DiB7700 DVB-T-USB
- silicon. This chip can be found in devices offered by Hauppauge,
- Avermedia and other big and small companies.
-
- For an up-to-date list of devices supported by this driver, have a look
- on the LinuxTV Wiki at www.linuxtv.org.
-
- Say Y if you own such a device and want to use it. You should build it as
- a module.
+ Support for "TerraTec CinergyT2" USB2.0 Highspeed DVB Receivers
-config DVB_USB_UMT_010
- tristate "HanfTek UMT-010 DVB-T USB2.0 support"
- depends on DVB_USB
- select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
- select DVB_USB_DIB3000MC
- select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_MT352 if MEDIA_SUBDRV_AUTOSELECT
- help
- Say Y here to support the HanfTek UMT-010 USB2.0 stick-sized DVB-T receiver.
+ Say Y if you own such a device and want to use it.
config DVB_USB_CXUSB
tristate "Conexant USB2.0 hybrid reference design support"
@@ -150,8 +95,8 @@ config DVB_USB_CXUSB
config DVB_USB_CXUSB_ANALOG
bool "Analog support for the Conexant USB2.0 hybrid reference design"
- depends on DVB_USB_CXUSB && VIDEO_V4L2
- depends on VIDEO_V4L2=y || VIDEO_V4L2=DVB_USB_CXUSB
+ depends on DVB_USB_CXUSB && VIDEO_DEV
+ depends on VIDEO_DEV=y || VIDEO_DEV=DVB_USB_CXUSB
select VIDEO_CX25840
select VIDEOBUF2_VMALLOC
help
@@ -159,87 +104,93 @@ config DVB_USB_CXUSB_ANALOG
USB2.0 hybrid reference design.
Currently this mode is supported only on a Medion MD95700 device.
-config DVB_USB_M920X
- tristate "Uli m920x DVB-T USB2.0 support"
+config DVB_USB_DIB0700
+ tristate "DiBcom DiB0700 USB DVB devices (see help for supported devices)"
depends on DVB_USB
- select DVB_MT352 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_TDA1004X if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_TDA827X if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_SIMPLE if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_DIB7000P if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_DIB7000M if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_DIB8000 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_USB_DIB3000MC if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_LGDT3305 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_MN88472 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TUNER_DIB0070 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TUNER_DIB0090 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_MT2266 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_XC2028 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_XC4000 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_MXL5007T if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_TDA18250 if MEDIA_SUBDRV_AUTOSELECT
help
- Say Y here to support the MSI Mega Sky 580 USB2.0 DVB-T receiver.
- Currently, only devices with a product id of
- "DTV USB MINI" (in cold state) are supported.
- Firmware required.
+ Support for USB2.0/1.1 DVB receivers based on the DiB0700 USB bridge. The
+ USB bridge is also present in devices having the DiB7700 DVB-T-USB
+ silicon. This chip can be found in devices offered by Hauppauge,
+ Avermedia and other big and small companies.
-config DVB_USB_DIGITV
- tristate "Nebula Electronics uDigiTV DVB-T USB2.0 support"
- depends on DVB_USB
- select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
- select DVB_NXT6000 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_MT352 if MEDIA_SUBDRV_AUTOSELECT
- help
- Say Y here to support the Nebula Electronics uDigitV USB2.0 DVB-T receiver.
+ For an up-to-date list of devices supported by this driver, have a look
+ on the LinuxTV Wiki at www.linuxtv.org.
-config DVB_USB_VP7045
- tristate "TwinhanDTV Alpha/MagicBoxII, DNTV tinyUSB2, Beetle USB2.0 support"
+ Say Y if you own such a device and want to use it. You should build it as
+ a module.
+
+config DVB_USB_DIB3000MC
+ tristate
depends on DVB_USB
+ select DVB_DIB3000MC
help
- Say Y here to support the
-
- TwinhanDTV Alpha (stick) (VP-7045),
- TwinhanDTV MagicBox II (VP-7046),
- DigitalNow TinyUSB 2 DVB-t,
- DigitalRise USB 2.0 Ter (Beetle) and
- TYPHOON DVB-T USB DRIVE
-
- DVB-T USB2.0 receivers.
+ This is a module with helper functions for accessing the
+ DIB3000MC from USB DVB devices. It must be a separate module
+ in case DVB_USB is built-in and DVB_DIB3000MC is a module,
+ and gets selected automatically when needed.
-config DVB_USB_VP702X
- tristate "TwinhanDTV StarBox and clones DVB-S USB2.0 support"
+config DVB_USB_DIBUSB_MB
+ tristate "DiBcom USB DVB-T devices (based on the DiB3000M-B) (see help for device list)"
depends on DVB_USB
+ select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_DIB3000MB
+ depends on DVB_DIB3000MC || !DVB_DIB3000MC
+ select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
help
- Say Y here to support the
+ Support for USB 1.1 and 2.0 DVB-T receivers based on reference designs made by
+ DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-B demodulator.
- TwinhanDTV StarBox,
- DigitalRise USB Starbox and
- TYPHOON DVB-S USB 2.0 BOX
+ For an up-to-date list of devices supported by this driver, have a look
+ on the Linux-DVB Wiki at www.linuxtv.org.
- DVB-S USB2.0 receivers.
+ Say Y if you own such a device and want to use it. You should build it as
+ a module.
-config DVB_USB_GP8PSK
- tristate "GENPIX 8PSK->USB module support"
- depends on DVB_USB
+config DVB_USB_DIBUSB_MB_FAULTY
+ bool "Support faulty USB IDs"
+ depends on DVB_USB_DIBUSB_MB
help
- Say Y here to support the
- GENPIX 8psk module
-
- DVB-S USB2.0 receivers.
+ Support for faulty USB IDs due to an invalid EEPROM on some Artec devices.
-config DVB_USB_NOVA_T_USB2
- tristate "Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 support"
+config DVB_USB_DIBUSB_MC
+ tristate "DiBcom USB DVB-T devices (based on the DiB3000M-C/P) (see help for device list)"
depends on DVB_USB
select DVB_USB_DIB3000MC
- select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
help
- Say Y here to support the Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 receiver.
+ Support for USB2.0 DVB-T receivers based on reference designs made by
+ DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-C/P demodulator.
-config DVB_USB_TTUSB2
- tristate "Pinnacle 400e DVB-S USB2.0 support"
+ For an up-to-date list of devices supported by this driver, have a look
+ on the Linux-DVB Wiki at www.linuxtv.org.
+
+ Say Y if you own such a device and want to use it. You should build it as
+ a module.
+
+config DVB_USB_DIGITV
+ tristate "Nebula Electronics uDigiTV DVB-T USB2.0 support"
depends on DVB_USB
- select DVB_TDA10086 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_LNBP21 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_TDA826X if MEDIA_SUBDRV_AUTOSELECT
- select DVB_TDA10023 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_TDA10048 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_TDA827X if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_NXT6000 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_MT352 if MEDIA_SUBDRV_AUTOSELECT
help
- Say Y here to support the Pinnacle 400e DVB-S USB2.0 receiver and
- the TechnoTrend CT-3650 CI DVB-C/T USB2.0 receiver. The
- firmware protocol used by this module is similar to the one used by the
- old ttusb-driver - that's why the module is called dvb-usb-ttusb2.
+ Say Y here to support the Nebula Electronics uDigitV USB2.0 DVB-T receiver.
config DVB_USB_DTT200U
tristate "WideView WT-200U and WT-220U (pen) DVB-T USB2.0 support (Yakumo/Hama/Typhoon/Yuan)"
@@ -251,43 +202,13 @@ config DVB_USB_DTT200U
The WT-220U and its clones are pen-sized.
-config DVB_USB_OPERA1
- tristate "Opera1 DVB-S USB2.0 receiver"
- depends on DVB_USB
- select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
- help
- Say Y here to support the Opera DVB-S USB2.0 receiver.
-
-config DVB_USB_AF9005
- tristate "Afatech AF9005 DVB-T USB1.1 support"
+config DVB_USB_DTV5100
+ tristate "AME DTV-5100 USB2.0 DVB-T support"
depends on DVB_USB
- select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_ZL10353 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
help
- Say Y here to support the Afatech AF9005 based DVB-T USB1.1 receiver
- and the TerraTec Cinergy T USB XE (Rev.1)
-
-config DVB_USB_AF9005_REMOTE
- tristate "Afatech AF9005 default remote control support"
- depends on DVB_USB_AF9005
- help
- Say Y here to support the default remote control decoding for the
- Afatech AF9005 based receiver.
-
-config DVB_USB_PCTV452E
- tristate "Pinnacle PCTV HDTV Pro USB device/TT Connect S2-3600"
- depends on DVB_USB
- select TTPCI_EEPROM
- select DVB_ISL6423 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_LNBP22 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_STB0899 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT
- help
- Support for external USB adapter designed by Pinnacle,
- shipped under the brand name 'PCTV HDTV Pro USB'.
- Also supports TT Connect S2-3600/3650 cards.
- Say Y if you own such a device and want to use it.
+ Say Y here to support the AME DTV-5100 USB2.0 DVB-T receiver.
config DVB_USB_DW2102
tristate "DvbWorld & TeVii DVB-S/S2 USB2.0 support"
@@ -312,29 +233,59 @@ config DVB_USB_DW2102
Say Y here to support the DvbWorld, TeVii, Prof, TechnoTrend
DVB-S/S2 USB2.0 receivers.
-config DVB_USB_CINERGY_T2
- tristate "Terratec CinergyT2/qanu USB 2.0 DVB-T receiver"
+config DVB_USB_GP8PSK
+ tristate "GENPIX 8PSK->USB module support"
depends on DVB_USB
help
- Support for "TerraTec CinergyT2" USB2.0 Highspeed DVB Receivers
+ Say Y here to support the
+ GENPIX 8psk module
- Say Y if you own such a device and want to use it.
+ DVB-S USB2.0 receivers.
-config DVB_USB_DTV5100
- tristate "AME DTV-5100 USB2.0 DVB-T support"
+config DVB_USB_M920X
+ tristate "Uli m920x DVB-T USB2.0 support"
depends on DVB_USB
- select DVB_ZL10353 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_MT352 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TDA1004X if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_TDA827X if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_SIMPLE if MEDIA_SUBDRV_AUTOSELECT
help
- Say Y here to support the AME DTV-5100 USB2.0 DVB-T receiver.
+ Say Y here to support the MSI Mega Sky 580 USB2.0 DVB-T receiver.
+ Currently, only devices with a product id of
+ "DTV USB MINI" (in cold state) are supported.
+ Firmware required.
-config DVB_USB_AZ6027
- tristate "Azurewave DVB-S/S2 USB2.0 AZ6027 support"
+config DVB_USB_NOVA_T_USB2
+ tristate "Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 support"
depends on DVB_USB
+ select DVB_USB_DIB3000MC
+ select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y here to support the Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 receiver.
+
+config DVB_USB_OPERA1
+ tristate "Opera1 DVB-S USB2.0 receiver"
+ depends on DVB_USB
+ select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y here to support the Opera DVB-S USB2.0 receiver.
+
+config DVB_USB_PCTV452E
+ tristate "Pinnacle PCTV HDTV Pro USB device/TT Connect S2-3600"
+ depends on DVB_USB
+ select TTPCI_EEPROM
+ select DVB_ISL6423 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_LNBP22 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STB0899 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT
help
- Say Y here to support the AZ6027 device
+ Support for external USB adapter designed by Pinnacle,
+ shipped under the brand name 'PCTV HDTV Pro USB'.
+ Also supports TT Connect S2-3600/3650 cards.
+ Say Y if you own such a device and want to use it.
config DVB_USB_TECHNISAT_USB2
tristate "Technisat DVB-S/S2 USB2.0 support"
@@ -343,3 +294,56 @@ config DVB_USB_TECHNISAT_USB2
select DVB_STV6110x if MEDIA_SUBDRV_AUTOSELECT
help
Say Y here to support the Technisat USB2 DVB-S/S2 device
+
+config DVB_USB_TTUSB2
+ tristate "Pinnacle 400e DVB-S USB2.0 support"
+ depends on DVB_USB
+ select DVB_TDA10086 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_LNBP21 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TDA826X if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TDA10023 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TDA10048 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_TDA827X if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y here to support the Pinnacle 400e DVB-S USB2.0 receiver and
+ the TechnoTrend CT-3650 CI DVB-C/T USB2.0 receiver. The
+ firmware protocol used by this module is similar to the one used by the
+ old ttusb-driver - that's why the module is called dvb-usb-ttusb2.
+
+config DVB_USB_UMT_010
+ tristate "HanfTek UMT-010 DVB-T USB2.0 support"
+ depends on DVB_USB
+ select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_USB_DIB3000MC
+ select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_MT352 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y here to support the HanfTek UMT-010 USB2.0 stick-sized DVB-T receiver.
+
+config DVB_USB_VP702X
+ tristate "TwinhanDTV StarBox and clones DVB-S USB2.0 support"
+ depends on DVB_USB
+ help
+ Say Y here to support the
+
+ TwinhanDTV StarBox,
+ DigitalRise USB Starbox and
+ TYPHOON DVB-S USB 2.0 BOX
+
+ DVB-S USB2.0 receivers.
+
+config DVB_USB_VP7045
+ tristate "TwinhanDTV Alpha/MagicBoxII, DNTV tinyUSB2, Beetle USB2.0 support"
+ depends on DVB_USB
+ help
+ Say Y here to support the
+
+ TwinhanDTV Alpha (stick) (VP-7045),
+ TwinhanDTV MagicBox II (VP-7046),
+ DigitalNow TinyUSB 2 DVB-t,
+ DigitalRise USB 2.0 Ter (Beetle) and
+ TYPHOON DVB-T USB DRIVE
+
+ DVB-T USB2.0 receivers.
+
+endif
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 7707de7bae7c..265b960db499 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -35,7 +35,7 @@
#include "mt352.h"
#include "mt352_priv.h"
#include "zl10353.h"
-#include "tuner-xc2028.h"
+#include "xc2028.h"
#include "tuner-simple.h"
#include "mxl5005s.h"
#include "max2165.h"
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 710c1afe3e85..08fcf120daf1 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -12,7 +12,7 @@
#include "dib9000.h"
#include "mt2060.h"
#include "mt2266.h"
-#include "tuner-xc2028.h"
+#include "xc2028.h"
#include "xc5000.h"
#include "xc4000.h"
#include "s5h1411.h"
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index b451ce3cb169..ae25d2cbfdfe 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -3936,6 +3936,8 @@ static int em28xx_usb_probe(struct usb_interface *intf,
goto err_free;
}
+ kref_init(&dev->ref);
+
dev->devno = nr;
dev->model = id->driver_info;
dev->alt = -1;
@@ -4036,6 +4038,8 @@ static int em28xx_usb_probe(struct usb_interface *intf,
}
if (dev->board.has_dual_ts && em28xx_duplicate_dev(dev) == 0) {
+ kref_init(&dev->dev_next->ref);
+
dev->dev_next->ts = SECONDARY_TS;
dev->dev_next->alt = -1;
dev->dev_next->is_audio_only = has_vendor_audio &&
@@ -4090,12 +4094,8 @@ static int em28xx_usb_probe(struct usb_interface *intf,
em28xx_write_reg(dev, 0x0b, 0x82);
mdelay(100);
}
-
- kref_init(&dev->dev_next->ref);
}
- kref_init(&dev->ref);
-
request_modules(dev);
/*
@@ -4150,11 +4150,8 @@ static void em28xx_usb_disconnect(struct usb_interface *intf)
em28xx_close_extension(dev);
- if (dev->dev_next) {
- em28xx_close_extension(dev->dev_next);
+ if (dev->dev_next)
em28xx_release_resources(dev->dev_next);
- }
-
em28xx_release_resources(dev);
if (dev->dev_next) {
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 255395959255..b9a8d3fbad1a 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -26,7 +26,7 @@
#include <linux/i2c.h>
#include <linux/jiffies.h>
-#include "tuner-xc2028.h"
+#include "xc2028.h"
#include <media/v4l2-common.h>
#include <media/tuner.h>
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index ab167cd1f400..7fc0b68a4a22 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -41,7 +41,7 @@
#include <media/v4l2-fh.h>
#include <media/i2c/ir-kbd-i2c.h>
#include <media/rc-core.h>
-#include "tuner-xc2028.h"
+#include "xc2028.h"
#include "xc5000.h"
#include "em28xx-reg.h"
diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c
index c742cc88fac5..1fa6f10ee157 100644
--- a/drivers/media/usb/go7007/s2250-board.c
+++ b/drivers/media/usb/go7007/s2250-board.c
@@ -504,6 +504,7 @@ static int s2250_probe(struct i2c_client *client,
u8 *data;
struct go7007 *go = i2c_get_adapdata(adapter);
struct go7007_usb *usb = go->hpi_context;
+ int err = -EIO;
audio = i2c_new_dummy_device(adapter, TLV320_ADDRESS >> 1);
if (IS_ERR(audio))
@@ -532,11 +533,8 @@ static int s2250_probe(struct i2c_client *client,
V4L2_CID_HUE, -512, 511, 1, 0);
sd->ctrl_handler = &state->hdl;
if (state->hdl.error) {
- int err = state->hdl.error;
-
- v4l2_ctrl_handler_free(&state->hdl);
- kfree(state);
- return err;
+ err = state->hdl.error;
+ goto fail;
}
state->std = V4L2_STD_NTSC;
@@ -600,7 +598,7 @@ fail:
i2c_unregister_device(audio);
v4l2_ctrl_handler_free(&state->hdl);
kfree(state);
- return -EIO;
+ return err;
}
static int s2250_remove(struct i2c_client *client)
diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c
index 2ce85ab38db5..9a6bd87fce03 100644
--- a/drivers/media/usb/go7007/snd-go7007.c
+++ b/drivers/media/usb/go7007/snd-go7007.c
@@ -191,7 +191,7 @@ static int go7007_snd_free(struct snd_device *device)
return 0;
}
-static struct snd_device_ops go7007_snd_device_ops = {
+static const struct snd_device_ops go7007_snd_device_ops = {
.dev_free = go7007_snd_free,
};
diff --git a/drivers/media/usb/gspca/Kconfig b/drivers/media/usb/gspca/Kconfig
index dca4e16ed133..9c1939ce6be4 100644
--- a/drivers/media/usb/gspca/Kconfig
+++ b/drivers/media/usb/gspca/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig USB_GSPCA
tristate "GSPCA based webcams"
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
depends on INPUT || INPUT=n
select VIDEOBUF2_VMALLOC
help
@@ -16,16 +16,11 @@ menuconfig USB_GSPCA
To compile this driver as modules, choose M here: the
module will be called gspca_main.
-
-if USB_GSPCA && VIDEO_V4L2
-
-source "drivers/media/usb/gspca/m5602/Kconfig"
-source "drivers/media/usb/gspca/stv06xx/Kconfig"
-source "drivers/media/usb/gspca/gl860/Kconfig"
+if USB_GSPCA && VIDEO_DEV
config USB_GSPCA_BENQ
tristate "Benq USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for the Benq DC E300 camera.
@@ -34,7 +29,7 @@ config USB_GSPCA_BENQ
config USB_GSPCA_CONEX
tristate "Conexant Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the Conexant chip.
@@ -43,7 +38,7 @@ config USB_GSPCA_CONEX
config USB_GSPCA_CPIA1
tristate "cpia CPiA (version 1) Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for USB cameras based on the cpia
CPiA chip. Note that you need at least version 0.6.4 of libv4l for
@@ -54,7 +49,7 @@ config USB_GSPCA_CPIA1
config USB_GSPCA_DTCS033
tristate "DTCS033 (Scopium) USB Astro-Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for the Scopium camera
for planetary astrophotography.
@@ -64,7 +59,7 @@ config USB_GSPCA_DTCS033
config USB_GSPCA_ETOMS
tristate "Etoms USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the Etoms chip.
@@ -73,7 +68,7 @@ config USB_GSPCA_ETOMS
config USB_GSPCA_FINEPIX
tristate "Fujifilm FinePix USB V4L2 driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the FinePix chip.
@@ -82,7 +77,7 @@ config USB_GSPCA_FINEPIX
config USB_GSPCA_JEILINJ
tristate "Jeilin JPEG USB V4L2 driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on this Jeilin chip.
@@ -91,7 +86,7 @@ config USB_GSPCA_JEILINJ
config USB_GSPCA_JL2005BCD
tristate "JL2005B/C/D USB V4L2 driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based the
JL2005B, JL2005C, or JL2005D chip.
@@ -101,7 +96,7 @@ config USB_GSPCA_JL2005BCD
config USB_GSPCA_KINECT
tristate "Kinect sensor device USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for the Microsoft Kinect sensor device.
@@ -110,7 +105,7 @@ config USB_GSPCA_KINECT
config USB_GSPCA_KONICA
tristate "Konica USB Camera V4L2 driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the Konica chip.
@@ -119,7 +114,7 @@ config USB_GSPCA_KONICA
config USB_GSPCA_MARS
tristate "Mars USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the Mars chip.
@@ -128,7 +123,7 @@ config USB_GSPCA_MARS
config USB_GSPCA_MR97310A
tristate "Mars-Semi MR97310A USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the MR97310A chip.
@@ -137,7 +132,7 @@ config USB_GSPCA_MR97310A
config USB_GSPCA_NW80X
tristate "Divio based (NW80x) USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the NW80x chips.
@@ -146,7 +141,7 @@ config USB_GSPCA_NW80X
config USB_GSPCA_OV519
tristate "OV51x / OVFX2 / W996xCF USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on one of these:
OV511(+), OV518(+), OV519, OVFX2, W9967CF, W9968CF
@@ -156,7 +151,7 @@ config USB_GSPCA_OV519
config USB_GSPCA_OV534
tristate "OV534 OV772x USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the OV534 chip
and sensor OV772x (e.g. Sony Playstation EYE)
@@ -166,7 +161,7 @@ config USB_GSPCA_OV534
config USB_GSPCA_OV534_9
tristate "OV534 OV965x USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the OV534 chip
and sensor OV965x (e.g. Hercules Dualpix)
@@ -176,7 +171,7 @@ config USB_GSPCA_OV534_9
config USB_GSPCA_PAC207
tristate "Pixart PAC207 USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the PAC207 chip.
@@ -185,7 +180,7 @@ config USB_GSPCA_PAC207
config USB_GSPCA_PAC7302
tristate "Pixart PAC7302 USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the PAC7302 chip.
@@ -194,7 +189,7 @@ config USB_GSPCA_PAC7302
config USB_GSPCA_PAC7311
tristate "Pixart PAC7311 USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the PAC7311 chip.
@@ -203,7 +198,7 @@ config USB_GSPCA_PAC7311
config USB_GSPCA_SE401
tristate "SE401 USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the
Endpoints (formerly known as AOX) se401 chip.
@@ -213,7 +208,7 @@ config USB_GSPCA_SE401
config USB_GSPCA_SN9C2028
tristate "SONIX Dual-Mode USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want streaming support for Sonix SN9C2028 cameras.
These are supported as stillcams in libgphoto2/camlibs/sonix.
@@ -223,7 +218,7 @@ config USB_GSPCA_SN9C2028
config USB_GSPCA_SN9C20X
tristate "SN9C20X USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the
sn9c20x chips (SN9C201 and SN9C202).
@@ -233,7 +228,7 @@ config USB_GSPCA_SN9C20X
config USB_GSPCA_SONIXB
tristate "SONIX Bayer USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the Sonix
chips with Bayer format (SN9C101, SN9C102 and SN9C103).
@@ -243,7 +238,7 @@ config USB_GSPCA_SONIXB
config USB_GSPCA_SONIXJ
tristate "SONIX JPEG USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the Sonix
chips with JPEG format (SN9C102P, SN9C105 and >= SN9C110).
@@ -251,9 +246,18 @@ config USB_GSPCA_SONIXJ
To compile this driver as a module, choose M here: the
module will be called gspca_sonixj
+config USB_GSPCA_SPCA1528
+ tristate "SPCA1528 USB Camera Driver"
+ depends on VIDEO_DEV && USB_GSPCA
+ help
+ Say Y here if you want support for cameras based on the SPCA1528 chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gspca_spca1528.
+
config USB_GSPCA_SPCA500
tristate "SPCA500 USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the SPCA500 chip.
@@ -262,7 +266,7 @@ config USB_GSPCA_SPCA500
config USB_GSPCA_SPCA501
tristate "SPCA501 USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the SPCA501 chip.
@@ -271,7 +275,7 @@ config USB_GSPCA_SPCA501
config USB_GSPCA_SPCA505
tristate "SPCA505 USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the SPCA505 chip.
@@ -280,7 +284,7 @@ config USB_GSPCA_SPCA505
config USB_GSPCA_SPCA506
tristate "SPCA506 USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the SPCA506 chip.
@@ -289,7 +293,7 @@ config USB_GSPCA_SPCA506
config USB_GSPCA_SPCA508
tristate "SPCA508 USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the SPCA508 chip.
@@ -298,25 +302,16 @@ config USB_GSPCA_SPCA508
config USB_GSPCA_SPCA561
tristate "SPCA561 USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the SPCA561 chip.
To compile this driver as a module, choose M here: the
module will be called gspca_spca561.
-config USB_GSPCA_SPCA1528
- tristate "SPCA1528 USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
- help
- Say Y here if you want support for cameras based on the SPCA1528 chip.
-
- To compile this driver as a module, choose M here: the
- module will be called gspca_spca1528.
-
config USB_GSPCA_SQ905
tristate "SQ Technologies SQ905 based USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the SQ905 chip.
@@ -325,7 +320,7 @@ config USB_GSPCA_SQ905
config USB_GSPCA_SQ905C
tristate "SQ Technologies SQ905C based USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the SQ905C chip.
@@ -334,7 +329,7 @@ config USB_GSPCA_SQ905C
config USB_GSPCA_SQ930X
tristate "SQ Technologies SQ930X based USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the SQ930X chip.
@@ -343,7 +338,7 @@ config USB_GSPCA_SQ930X
config USB_GSPCA_STK014
tristate "Syntek DV4000 (STK014) USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the STK014 chip.
@@ -352,7 +347,7 @@ config USB_GSPCA_STK014
config USB_GSPCA_STK1135
tristate "Syntek STK1135 USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the STK1135 chip.
@@ -361,7 +356,7 @@ config USB_GSPCA_STK1135
config USB_GSPCA_STV0680
tristate "STV0680 USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the STV0680 chip.
@@ -370,7 +365,7 @@ config USB_GSPCA_STV0680
config USB_GSPCA_SUNPLUS
tristate "SUNPLUS USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the Sunplus
SPCA504(abc) SPCA533 SPCA536 chips.
@@ -380,7 +375,7 @@ config USB_GSPCA_SUNPLUS
config USB_GSPCA_T613
tristate "T613 (JPEG Compliance) USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the T613 chip.
@@ -389,7 +384,7 @@ config USB_GSPCA_T613
config USB_GSPCA_TOPRO
tristate "TOPRO USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the
TP6800 and TP6810 Topro chips.
@@ -399,7 +394,7 @@ config USB_GSPCA_TOPRO
config USB_GSPCA_TOUPTEK
tristate "Touptek USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the ToupTek UCMOS
/ AmScope MU series camera.
@@ -409,7 +404,7 @@ config USB_GSPCA_TOUPTEK
config USB_GSPCA_TV8532
tristate "TV8532 USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the TV8531 chip.
@@ -418,7 +413,7 @@ config USB_GSPCA_TV8532
config USB_GSPCA_VC032X
tristate "VC032X USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the VC032X chip.
@@ -427,7 +422,7 @@ config USB_GSPCA_VC032X
config USB_GSPCA_VICAM
tristate "ViCam USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for the 3com homeconnect camera
(vicam).
@@ -437,7 +432,7 @@ config USB_GSPCA_VICAM
config USB_GSPCA_XIRLINK_CIT
tristate "Xirlink C-It USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for Xirlink C-It bases cameras.
@@ -446,11 +441,15 @@ config USB_GSPCA_XIRLINK_CIT
config USB_GSPCA_ZC3XX
tristate "ZC3XX USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the ZC3XX chip.
To compile this driver as a module, choose M here: the
module will be called gspca_zc3xx.
+source "drivers/media/usb/gspca/gl860/Kconfig"
+source "drivers/media/usb/gspca/m5602/Kconfig"
+source "drivers/media/usb/gspca/stv06xx/Kconfig"
+
endif
diff --git a/drivers/media/usb/gspca/Makefile b/drivers/media/usb/gspca/Makefile
index 3e3ecbffdf9f..a35c45006130 100644
--- a/drivers/media/usb/gspca/Makefile
+++ b/drivers/media/usb/gspca/Makefile
@@ -1,51 +1,51 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_USB_GSPCA) += gspca_main.o
-obj-$(CONFIG_USB_GSPCA_BENQ) += gspca_benq.o
-obj-$(CONFIG_USB_GSPCA_CONEX) += gspca_conex.o
-obj-$(CONFIG_USB_GSPCA_CPIA1) += gspca_cpia1.o
-obj-$(CONFIG_USB_GSPCA_DTCS033) += gspca_dtcs033.o
-obj-$(CONFIG_USB_GSPCA_ETOMS) += gspca_etoms.o
-obj-$(CONFIG_USB_GSPCA_FINEPIX) += gspca_finepix.o
-obj-$(CONFIG_USB_GSPCA_JEILINJ) += gspca_jeilinj.o
+obj-$(CONFIG_USB_GSPCA) += gspca_main.o
+obj-$(CONFIG_USB_GSPCA_BENQ) += gspca_benq.o
+obj-$(CONFIG_USB_GSPCA_CONEX) += gspca_conex.o
+obj-$(CONFIG_USB_GSPCA_CPIA1) += gspca_cpia1.o
+obj-$(CONFIG_USB_GSPCA_DTCS033) += gspca_dtcs033.o
+obj-$(CONFIG_USB_GSPCA_ETOMS) += gspca_etoms.o
+obj-$(CONFIG_USB_GSPCA_FINEPIX) += gspca_finepix.o
+obj-$(CONFIG_USB_GSPCA_JEILINJ) += gspca_jeilinj.o
obj-$(CONFIG_USB_GSPCA_JL2005BCD) += gspca_jl2005bcd.o
-obj-$(CONFIG_USB_GSPCA_KINECT) += gspca_kinect.o
-obj-$(CONFIG_USB_GSPCA_KONICA) += gspca_konica.o
-obj-$(CONFIG_USB_GSPCA_MARS) += gspca_mars.o
+obj-$(CONFIG_USB_GSPCA_KINECT) += gspca_kinect.o
+obj-$(CONFIG_USB_GSPCA_KONICA) += gspca_konica.o
+obj-$(CONFIG_USB_GSPCA_MARS) += gspca_mars.o
obj-$(CONFIG_USB_GSPCA_MR97310A) += gspca_mr97310a.o
-obj-$(CONFIG_USB_GSPCA_NW80X) += gspca_nw80x.o
-obj-$(CONFIG_USB_GSPCA_OV519) += gspca_ov519.o
-obj-$(CONFIG_USB_GSPCA_OV534) += gspca_ov534.o
-obj-$(CONFIG_USB_GSPCA_OV534_9) += gspca_ov534_9.o
-obj-$(CONFIG_USB_GSPCA_PAC207) += gspca_pac207.o
-obj-$(CONFIG_USB_GSPCA_PAC7302) += gspca_pac7302.o
-obj-$(CONFIG_USB_GSPCA_PAC7311) += gspca_pac7311.o
-obj-$(CONFIG_USB_GSPCA_SE401) += gspca_se401.o
+obj-$(CONFIG_USB_GSPCA_NW80X) += gspca_nw80x.o
+obj-$(CONFIG_USB_GSPCA_OV519) += gspca_ov519.o
+obj-$(CONFIG_USB_GSPCA_OV534) += gspca_ov534.o
+obj-$(CONFIG_USB_GSPCA_OV534_9) += gspca_ov534_9.o
+obj-$(CONFIG_USB_GSPCA_PAC207) += gspca_pac207.o
+obj-$(CONFIG_USB_GSPCA_PAC7302) += gspca_pac7302.o
+obj-$(CONFIG_USB_GSPCA_PAC7311) += gspca_pac7311.o
+obj-$(CONFIG_USB_GSPCA_SE401) += gspca_se401.o
obj-$(CONFIG_USB_GSPCA_SN9C2028) += gspca_sn9c2028.o
-obj-$(CONFIG_USB_GSPCA_SN9C20X) += gspca_sn9c20x.o
-obj-$(CONFIG_USB_GSPCA_SONIXB) += gspca_sonixb.o
-obj-$(CONFIG_USB_GSPCA_SONIXJ) += gspca_sonixj.o
-obj-$(CONFIG_USB_GSPCA_SPCA500) += gspca_spca500.o
-obj-$(CONFIG_USB_GSPCA_SPCA501) += gspca_spca501.o
-obj-$(CONFIG_USB_GSPCA_SPCA505) += gspca_spca505.o
-obj-$(CONFIG_USB_GSPCA_SPCA506) += gspca_spca506.o
-obj-$(CONFIG_USB_GSPCA_SPCA508) += gspca_spca508.o
-obj-$(CONFIG_USB_GSPCA_SPCA561) += gspca_spca561.o
+obj-$(CONFIG_USB_GSPCA_SN9C20X) += gspca_sn9c20x.o
+obj-$(CONFIG_USB_GSPCA_SONIXB) += gspca_sonixb.o
+obj-$(CONFIG_USB_GSPCA_SONIXJ) += gspca_sonixj.o
+obj-$(CONFIG_USB_GSPCA_SPCA500) += gspca_spca500.o
+obj-$(CONFIG_USB_GSPCA_SPCA501) += gspca_spca501.o
+obj-$(CONFIG_USB_GSPCA_SPCA505) += gspca_spca505.o
+obj-$(CONFIG_USB_GSPCA_SPCA506) += gspca_spca506.o
+obj-$(CONFIG_USB_GSPCA_SPCA508) += gspca_spca508.o
+obj-$(CONFIG_USB_GSPCA_SPCA561) += gspca_spca561.o
obj-$(CONFIG_USB_GSPCA_SPCA1528) += gspca_spca1528.o
-obj-$(CONFIG_USB_GSPCA_SQ905) += gspca_sq905.o
-obj-$(CONFIG_USB_GSPCA_SQ905C) += gspca_sq905c.o
-obj-$(CONFIG_USB_GSPCA_SQ930X) += gspca_sq930x.o
-obj-$(CONFIG_USB_GSPCA_SUNPLUS) += gspca_sunplus.o
-obj-$(CONFIG_USB_GSPCA_STK014) += gspca_stk014.o
-obj-$(CONFIG_USB_GSPCA_STK1135) += gspca_stk1135.o
-obj-$(CONFIG_USB_GSPCA_STV0680) += gspca_stv0680.o
-obj-$(CONFIG_USB_GSPCA_T613) += gspca_t613.o
-obj-$(CONFIG_USB_GSPCA_TOPRO) += gspca_topro.o
-obj-$(CONFIG_USB_GSPCA_TOUPTEK) += gspca_touptek.o
-obj-$(CONFIG_USB_GSPCA_TV8532) += gspca_tv8532.o
-obj-$(CONFIG_USB_GSPCA_VC032X) += gspca_vc032x.o
-obj-$(CONFIG_USB_GSPCA_VICAM) += gspca_vicam.o
+obj-$(CONFIG_USB_GSPCA_SQ905) += gspca_sq905.o
+obj-$(CONFIG_USB_GSPCA_SQ905C) += gspca_sq905c.o
+obj-$(CONFIG_USB_GSPCA_SQ930X) += gspca_sq930x.o
+obj-$(CONFIG_USB_GSPCA_SUNPLUS) += gspca_sunplus.o
+obj-$(CONFIG_USB_GSPCA_STK014) += gspca_stk014.o
+obj-$(CONFIG_USB_GSPCA_STK1135) += gspca_stk1135.o
+obj-$(CONFIG_USB_GSPCA_STV0680) += gspca_stv0680.o
+obj-$(CONFIG_USB_GSPCA_T613) += gspca_t613.o
+obj-$(CONFIG_USB_GSPCA_TOPRO) += gspca_topro.o
+obj-$(CONFIG_USB_GSPCA_TOUPTEK) += gspca_touptek.o
+obj-$(CONFIG_USB_GSPCA_TV8532) += gspca_tv8532.o
+obj-$(CONFIG_USB_GSPCA_VC032X) += gspca_vc032x.o
+obj-$(CONFIG_USB_GSPCA_VICAM) += gspca_vicam.o
obj-$(CONFIG_USB_GSPCA_XIRLINK_CIT) += gspca_xirlink_cit.o
-obj-$(CONFIG_USB_GSPCA_ZC3XX) += gspca_zc3xx.o
+obj-$(CONFIG_USB_GSPCA_ZC3XX) += gspca_zc3xx.o
gspca_main-objs := gspca.o autogain_functions.o
gspca_benq-objs := benq.o
@@ -95,6 +95,6 @@ gspca_vicam-objs := vicam.o
gspca_xirlink_cit-objs := xirlink_cit.o
gspca_zc3xx-objs := zc3xx.o
-obj-$(CONFIG_USB_M5602) += m5602/
+obj-$(CONFIG_USB_M5602) += m5602/
obj-$(CONFIG_USB_STV06XX) += stv06xx/
-obj-$(CONFIG_USB_GL860) += gl860/
+obj-$(CONFIG_USB_GL860) += gl860/
diff --git a/drivers/media/usb/gspca/gl860/Kconfig b/drivers/media/usb/gspca/gl860/Kconfig
index 2dfd2704c915..e5a35ca72b60 100644
--- a/drivers/media/usb/gspca/gl860/Kconfig
+++ b/drivers/media/usb/gspca/gl860/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config USB_GL860
tristate "GL860 USB Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the GL860 chip.
diff --git a/drivers/media/usb/gspca/jl2005bcd.c b/drivers/media/usb/gspca/jl2005bcd.c
index ca12f33f3e12..a408fcc3a060 100644
--- a/drivers/media/usb/gspca/jl2005bcd.c
+++ b/drivers/media/usb/gspca/jl2005bcd.c
@@ -166,7 +166,9 @@ static int jl2005c_get_firmware_id(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *)gspca_dev;
int i = 0;
int retval;
- unsigned char regs_to_read[] = {0x57, 0x02, 0x03, 0x5d, 0x5e, 0x5f};
+ static const unsigned char regs_to_read[] = {
+ 0x57, 0x02, 0x03, 0x5d, 0x5e, 0x5f
+ };
gspca_dbg(gspca_dev, D_PROBE, "Running jl2005c_get_firmware_id\n");
/* Read the first ID byte once for warmup */
diff --git a/drivers/media/usb/gspca/m5602/Kconfig b/drivers/media/usb/gspca/m5602/Kconfig
index 0a250652d717..d616408b67d9 100644
--- a/drivers/media/usb/gspca/m5602/Kconfig
+++ b/drivers/media/usb/gspca/m5602/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config USB_M5602
tristate "ALi USB m5602 Camera Driver"
- depends on VIDEO_V4L2 && USB_GSPCA
+ depends on VIDEO_DEV && USB_GSPCA
help
Say Y here if you want support for cameras based on the
ALi m5602 connected to various image sensors.
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index 2e8c3ef51ca3..608be0d64f94 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -794,7 +794,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
n = (sof - data) - (footer_length + sizeof pac_sof_marker);
if (n < 0) {
gspca_dev->image_len += n;
- n = 0;
} else {
gspca_frame_add(gspca_dev, INTER_PACKET, data, n);
}
diff --git a/drivers/media/usb/hackrf/Kconfig b/drivers/media/usb/hackrf/Kconfig
index 2267cebfdecb..1cf9b4d3a514 100644
--- a/drivers/media/usb/hackrf/Kconfig
+++ b/drivers/media/usb/hackrf/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config USB_HACKRF
tristate "HackRF"
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
select VIDEOBUF2_VMALLOC
help
This is a video4linux2 driver for HackRF SDR device.
diff --git a/drivers/media/usb/hdpvr/Kconfig b/drivers/media/usb/hdpvr/Kconfig
index 617400b27314..ee45a89aa607 100644
--- a/drivers/media/usb/hdpvr/Kconfig
+++ b/drivers/media/usb/hdpvr/Kconfig
@@ -2,7 +2,7 @@
config VIDEO_HDPVR
tristate "Hauppauge HD PVR support"
- depends on VIDEO_DEV && VIDEO_V4L2
+ depends on VIDEO_DEV
help
This is a video4linux driver for Hauppauge's HD PVR USB device.
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 563128d11731..60e57e0f1927 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -308,7 +308,6 @@ static int hdpvr_start_streaming(struct hdpvr_device *dev)
dev->status = STATUS_STREAMING;
- INIT_WORK(&dev->worker, hdpvr_transmit_buffers);
schedule_work(&dev->worker);
v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev,
@@ -1165,6 +1164,9 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
bool ac3 = dev->flags & HDPVR_FLAG_AC3_CAP;
int res;
+ // initialize dev->worker
+ INIT_WORK(&dev->worker, hdpvr_transmit_buffers);
+
dev->cur_std = V4L2_STD_525_60;
dev->width = 720;
dev->height = 480;
diff --git a/drivers/media/usb/msi2500/Kconfig b/drivers/media/usb/msi2500/Kconfig
index b403603bcc81..c2ded6482a5b 100644
--- a/drivers/media/usb/msi2500/Kconfig
+++ b/drivers/media/usb/msi2500/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config USB_MSI2500
tristate "Mirics MSi2500"
- depends on VIDEO_V4L2 && SPI
+ depends on VIDEO_DEV && SPI
select VIDEOBUF2_VMALLOC
select MEDIA_TUNER_MSI001
diff --git a/drivers/media/usb/pvrusb2/Kconfig b/drivers/media/usb/pvrusb2/Kconfig
index e6a4f730591b..f2b64e49c5a2 100644
--- a/drivers/media/usb/pvrusb2/Kconfig
+++ b/drivers/media/usb/pvrusb2/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_PVRUSB2
tristate "Hauppauge WinTV-PVR USB2 support"
- depends on VIDEO_V4L2 && I2C
+ depends on VIDEO_DEV && I2C
select VIDEO_TUNER
select VIDEO_TVEEPROM
select VIDEO_CX2341X
diff --git a/drivers/media/usb/pwc/Kconfig b/drivers/media/usb/pwc/Kconfig
index 7cebf6314a67..2078bd5ecf41 100644
--- a/drivers/media/usb/pwc/Kconfig
+++ b/drivers/media/usb/pwc/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config USB_PWC
tristate "USB Philips Cameras"
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
select VIDEOBUF2_VMALLOC
help
Say Y or M here if you want to use one of these Philips & OEM
diff --git a/drivers/media/usb/pwc/pwc-uncompress.c b/drivers/media/usb/pwc/pwc-uncompress.c
index 68bc3829c6b3..faf44cdeb268 100644
--- a/drivers/media/usb/pwc/pwc-uncompress.c
+++ b/drivers/media/usb/pwc/pwc-uncompress.c
@@ -41,7 +41,7 @@ int pwc_decompress(struct pwc_device *pdev, struct pwc_frame_buf *fbuf)
memcpy(raw_frame->cmd, pdev->cmd_buf, 4);
memcpy(raw_frame+1, yuv, pdev->frame_size);
vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0,
- pdev->frame_size + sizeof(struct pwc_raw_frame));
+ struct_size(raw_frame, rawframe, pdev->frame_size));
return 0;
}
diff --git a/drivers/media/usb/s2255/Kconfig b/drivers/media/usb/s2255/Kconfig
index e4a0c914d9c3..889593b21889 100644
--- a/drivers/media/usb/s2255/Kconfig
+++ b/drivers/media/usb/s2255/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config USB_S2255
tristate "USB Sensoray 2255 video capture device"
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
select VIDEOBUF2_VMALLOC
help
Say Y here if you want support for the Sensoray 2255 USB device.
diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c
index 4e1698f78818..ce717502ea4c 100644
--- a/drivers/media/usb/stk1160/stk1160-core.c
+++ b/drivers/media/usb/stk1160/stk1160-core.c
@@ -403,7 +403,7 @@ static void stk1160_disconnect(struct usb_interface *interface)
/* Here is the only place where isoc get released */
stk1160_uninit_isoc(dev);
- stk1160_clear_queue(dev);
+ stk1160_clear_queue(dev, VB2_BUF_STATE_ERROR);
video_unregister_device(&dev->vdev);
v4l2_device_disconnect(&dev->v4l2_dev);
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index 6a4eb616d516..a1f785a5ffd8 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -232,7 +232,11 @@ static int stk1160_start_streaming(struct stk1160 *dev)
/* submit urbs and enables IRQ */
for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
- rc = usb_submit_urb(dev->isoc_ctl.urb[i], GFP_KERNEL);
+ struct stk1160_urb *stk_urb = &dev->isoc_ctl.urb_ctl[i];
+
+ dma_sync_sgtable_for_device(stk1160_get_dmadev(dev), stk_urb->sgt,
+ DMA_FROM_DEVICE);
+ rc = usb_submit_urb(dev->isoc_ctl.urb_ctl[i].urb, GFP_KERNEL);
if (rc) {
stk1160_err("cannot submit urb[%d] (%d)\n", i, rc);
goto out_uninit;
@@ -258,7 +262,7 @@ out_uninit:
stk1160_uninit_isoc(dev);
out_stop_hw:
usb_set_interface(dev->udev, 0, 0);
- stk1160_clear_queue(dev);
+ stk1160_clear_queue(dev, VB2_BUF_STATE_QUEUED);
mutex_unlock(&dev->v4l_lock);
@@ -306,7 +310,7 @@ static int stk1160_stop_streaming(struct stk1160 *dev)
stk1160_stop_hw(dev);
- stk1160_clear_queue(dev);
+ stk1160_clear_queue(dev, VB2_BUF_STATE_ERROR);
stk1160_dbg("streaming stopped\n");
@@ -745,7 +749,7 @@ static const struct video_device v4l_template = {
/********************************************************************/
/* Must be called with both v4l_lock and vb_queue_lock hold */
-void stk1160_clear_queue(struct stk1160 *dev)
+void stk1160_clear_queue(struct stk1160 *dev, enum vb2_buffer_state vb2_state)
{
struct stk1160_buffer *buf;
unsigned long flags;
@@ -756,7 +760,7 @@ void stk1160_clear_queue(struct stk1160 *dev)
buf = list_first_entry(&dev->avail_bufs,
struct stk1160_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, vb2_state);
stk1160_dbg("buffer [%p/%d] aborted\n",
buf, buf->vb.vb2_buf.index);
}
@@ -766,7 +770,7 @@ void stk1160_clear_queue(struct stk1160 *dev)
buf = dev->isoc_ctl.buf;
dev->isoc_ctl.buf = NULL;
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, vb2_state);
stk1160_dbg("buffer [%p/%d] aborted\n",
buf, buf->vb.vb2_buf.index);
}
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index 202b084f65a2..4e966f6bf608 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -295,7 +295,9 @@ static void stk1160_process_isoc(struct stk1160 *dev, struct urb *urb)
static void stk1160_isoc_irq(struct urb *urb)
{
int i, rc;
- struct stk1160 *dev = urb->context;
+ struct stk1160_urb *stk_urb = urb->context;
+ struct stk1160 *dev = stk_urb->dev;
+ struct device *dma_dev = stk1160_get_dmadev(dev);
switch (urb->status) {
case 0:
@@ -310,6 +312,10 @@ static void stk1160_isoc_irq(struct urb *urb)
return;
}
+ invalidate_kernel_vmap_range(stk_urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ dma_sync_sgtable_for_cpu(dma_dev, stk_urb->sgt, DMA_FROM_DEVICE);
+
stk1160_process_isoc(dev, urb);
/* Reset urb buffers */
@@ -318,6 +324,7 @@ static void stk1160_isoc_irq(struct urb *urb)
urb->iso_frame_desc[i].actual_length = 0;
}
+ dma_sync_sgtable_for_device(dma_dev, stk_urb->sgt, DMA_FROM_DEVICE);
rc = usb_submit_urb(urb, GFP_ATOMIC);
if (rc)
stk1160_err("urb re-submit failed (%d)\n", rc);
@@ -347,49 +354,41 @@ void stk1160_cancel_isoc(struct stk1160 *dev)
* We don't care for NULL pointer since
* usb_kill_urb allows it.
*/
- usb_kill_urb(dev->isoc_ctl.urb[i]);
+ usb_kill_urb(dev->isoc_ctl.urb_ctl[i].urb);
}
stk1160_dbg("all urbs killed\n");
}
+static void stk_free_urb(struct stk1160 *dev, struct stk1160_urb *stk_urb)
+{
+ struct device *dma_dev = stk1160_get_dmadev(dev);
+
+ dma_vunmap_noncontiguous(dma_dev, stk_urb->transfer_buffer);
+ dma_free_noncontiguous(dma_dev, stk_urb->urb->transfer_buffer_length,
+ stk_urb->sgt, DMA_FROM_DEVICE);
+ usb_free_urb(stk_urb->urb);
+
+ stk_urb->transfer_buffer = NULL;
+ stk_urb->sgt = NULL;
+ stk_urb->urb = NULL;
+ stk_urb->dev = NULL;
+ stk_urb->dma = 0;
+}
+
/*
* Releases urb and transfer buffers
* Obviusly, associated urb must be killed before releasing it.
*/
void stk1160_free_isoc(struct stk1160 *dev)
{
- struct urb *urb;
int i, num_bufs = dev->isoc_ctl.num_bufs;
stk1160_dbg("freeing %d urb buffers...\n", num_bufs);
- for (i = 0; i < num_bufs; i++) {
-
- urb = dev->isoc_ctl.urb[i];
- if (urb) {
-
- if (dev->isoc_ctl.transfer_buffer[i]) {
-#ifndef CONFIG_DMA_NONCOHERENT
- usb_free_coherent(dev->udev,
- urb->transfer_buffer_length,
- dev->isoc_ctl.transfer_buffer[i],
- urb->transfer_dma);
-#else
- kfree(dev->isoc_ctl.transfer_buffer[i]);
-#endif
- }
- usb_free_urb(urb);
- dev->isoc_ctl.urb[i] = NULL;
- }
- dev->isoc_ctl.transfer_buffer[i] = NULL;
- }
+ for (i = 0; i < num_bufs; i++)
+ stk_free_urb(dev, &dev->isoc_ctl.urb_ctl[i]);
- kfree(dev->isoc_ctl.urb);
- kfree(dev->isoc_ctl.transfer_buffer);
-
- dev->isoc_ctl.urb = NULL;
- dev->isoc_ctl.transfer_buffer = NULL;
dev->isoc_ctl.num_bufs = 0;
stk1160_dbg("all urb buffers freed\n");
@@ -405,6 +404,41 @@ void stk1160_uninit_isoc(struct stk1160 *dev)
stk1160_free_isoc(dev);
}
+static int stk1160_fill_urb(struct stk1160 *dev, struct stk1160_urb *stk_urb,
+ int sb_size, int max_packets)
+{
+ struct device *dma_dev = stk1160_get_dmadev(dev);
+
+ stk_urb->urb = usb_alloc_urb(max_packets, GFP_KERNEL);
+ if (!stk_urb->urb)
+ return -ENOMEM;
+ stk_urb->sgt = dma_alloc_noncontiguous(dma_dev, sb_size,
+ DMA_FROM_DEVICE, GFP_KERNEL, 0);
+
+ /*
+ * If the buffer allocation failed, we exit but return 0 since
+ * we allow the driver working with less buffers
+ */
+ if (!stk_urb->sgt)
+ goto free_urb;
+
+ stk_urb->transfer_buffer = dma_vmap_noncontiguous(dma_dev, sb_size,
+ stk_urb->sgt);
+ if (!stk_urb->transfer_buffer)
+ goto free_sgt;
+
+ stk_urb->dma = stk_urb->sgt->sgl->dma_address;
+ stk_urb->dev = dev;
+ return 0;
+free_sgt:
+ dma_free_noncontiguous(dma_dev, sb_size, stk_urb->sgt, DMA_FROM_DEVICE);
+ stk_urb->sgt = NULL;
+free_urb:
+ usb_free_urb(stk_urb->urb);
+ stk_urb->urb = NULL;
+
+ return 0;
+}
/*
* Allocate URBs
*/
@@ -412,6 +446,7 @@ int stk1160_alloc_isoc(struct stk1160 *dev)
{
struct urb *urb;
int i, j, k, sb_size, max_packets, num_bufs;
+ int ret;
/*
* It may be necessary to release isoc here,
@@ -429,62 +464,39 @@ int stk1160_alloc_isoc(struct stk1160 *dev)
dev->isoc_ctl.buf = NULL;
dev->isoc_ctl.max_pkt_size = dev->max_pkt_size;
- dev->isoc_ctl.urb = kcalloc(num_bufs, sizeof(void *), GFP_KERNEL);
- if (!dev->isoc_ctl.urb) {
- stk1160_err("out of memory for urb array\n");
- return -ENOMEM;
- }
-
- dev->isoc_ctl.transfer_buffer = kcalloc(num_bufs, sizeof(void *),
- GFP_KERNEL);
- if (!dev->isoc_ctl.transfer_buffer) {
- stk1160_err("out of memory for usb transfers\n");
- kfree(dev->isoc_ctl.urb);
- return -ENOMEM;
- }
/* allocate urbs and transfer buffers */
for (i = 0; i < num_bufs; i++) {
- urb = usb_alloc_urb(max_packets, GFP_KERNEL);
- if (!urb)
+ ret = stk1160_fill_urb(dev, &dev->isoc_ctl.urb_ctl[i],
+ sb_size, max_packets);
+ if (ret)
goto free_i_bufs;
- dev->isoc_ctl.urb[i] = urb;
-
-#ifndef CONFIG_DMA_NONCOHERENT
- dev->isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->udev,
- sb_size, GFP_KERNEL, &urb->transfer_dma);
-#else
- dev->isoc_ctl.transfer_buffer[i] = kmalloc(sb_size, GFP_KERNEL);
-#endif
- if (!dev->isoc_ctl.transfer_buffer[i]) {
- stk1160_err("cannot alloc %d bytes for tx[%d] buffer\n",
- sb_size, i);
+ urb = dev->isoc_ctl.urb_ctl[i].urb;
+
+ if (!urb) {
/* Not enough transfer buffers, so just give up */
if (i < STK1160_MIN_BUFS)
goto free_i_bufs;
goto nomore_tx_bufs;
}
- memset(dev->isoc_ctl.transfer_buffer[i], 0, sb_size);
+ memset(dev->isoc_ctl.urb_ctl[i].transfer_buffer, 0, sb_size);
/*
* FIXME: Where can I get the endpoint?
*/
urb->dev = dev->udev;
urb->pipe = usb_rcvisocpipe(dev->udev, STK1160_EP_VIDEO);
- urb->transfer_buffer = dev->isoc_ctl.transfer_buffer[i];
+ urb->transfer_buffer = dev->isoc_ctl.urb_ctl[i].transfer_buffer;
urb->transfer_buffer_length = sb_size;
urb->complete = stk1160_isoc_irq;
- urb->context = dev;
+ urb->context = &dev->isoc_ctl.urb_ctl[i];
urb->interval = 1;
urb->start_frame = 0;
urb->number_of_packets = max_packets;
-#ifndef CONFIG_DMA_NONCOHERENT
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
-#else
- urb->transfer_flags = URB_ISO_ASAP;
-#endif
+ urb->transfer_dma = dev->isoc_ctl.urb_ctl[i].dma;
k = 0;
for (j = 0; j < max_packets; j++) {
@@ -508,18 +520,16 @@ nomore_tx_bufs:
* enough to work fine, so we just free the extra urb,
* store the allocated count and keep going, fingers crossed!
*/
- usb_free_urb(dev->isoc_ctl.urb[i]);
- dev->isoc_ctl.urb[i] = NULL;
- stk1160_warn("%d urbs allocated. Trying to continue...\n", i - 1);
+ stk1160_warn("%d urbs allocated. Trying to continue...\n", i);
- dev->isoc_ctl.num_bufs = i - 1;
+ dev->isoc_ctl.num_bufs = i;
return 0;
free_i_bufs:
/* Save the allocated buffers so far, so we can properly free them */
- dev->isoc_ctl.num_bufs = i+1;
+ dev->isoc_ctl.num_bufs = i;
stk1160_free_isoc(dev);
return -ENOMEM;
}
diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h
index a31ea1c80f25..7b498d14ed7a 100644
--- a/drivers/media/usb/stk1160/stk1160.h
+++ b/drivers/media/usb/stk1160/stk1160.h
@@ -16,6 +16,8 @@
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#define STK1160_VERSION "0.9.5"
#define STK1160_VERSION_NUM 0x000905
@@ -84,6 +86,14 @@ struct stk1160_buffer {
unsigned int pos; /* current pos inside buffer */
};
+struct stk1160_urb {
+ struct urb *urb;
+ char *transfer_buffer;
+ struct sg_table *sgt;
+ struct stk1160 *dev;
+ dma_addr_t dma;
+};
+
struct stk1160_isoc_ctl {
/* max packet size of isoc transaction */
int max_pkt_size;
@@ -91,11 +101,7 @@ struct stk1160_isoc_ctl {
/* number of allocated urbs */
int num_bufs;
- /* urb for isoc transfers */
- struct urb **urb;
-
- /* transfer buffers for isoc transfer */
- char **transfer_buffer;
+ struct stk1160_urb urb_ctl[STK1160_NUM_BUFS];
/* current buffer */
struct stk1160_buffer *buf;
@@ -166,7 +172,7 @@ struct regval {
int stk1160_vb2_setup(struct stk1160 *dev);
int stk1160_video_register(struct stk1160 *dev);
void stk1160_video_unregister(struct stk1160 *dev);
-void stk1160_clear_queue(struct stk1160 *dev);
+void stk1160_clear_queue(struct stk1160 *dev, enum vb2_buffer_state vb2_state);
/* Provided by stk1160-video.c */
int stk1160_alloc_isoc(struct stk1160 *dev);
@@ -189,3 +195,8 @@ void stk1160_select_input(struct stk1160 *dev);
/* Provided by stk1160-ac97.c */
void stk1160_ac97_setup(struct stk1160 *dev);
+
+static inline struct device *stk1160_get_dmadev(struct stk1160 *dev)
+{
+ return bus_to_hcd(dev->udev->bus)->self.sysdev;
+}
diff --git a/drivers/media/usb/stkwebcam/Kconfig b/drivers/media/usb/stkwebcam/Kconfig
index 775a5151539c..d94d023f1aa0 100644
--- a/drivers/media/usb/stkwebcam/Kconfig
+++ b/drivers/media/usb/stkwebcam/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config USB_STKWEBCAM
tristate "USB Syntek DC1125 Camera support"
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
help
Say Y here if you want to use this type of camera.
Supported devices are typically found in some Asus laptops,
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 9f445e6ab5fa..5b822214ccc5 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -114,6 +114,13 @@ static const struct dmi_system_id stk_upside_down_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "A6VM")
}
},
+ {
+ .ident = "ASUS A6JC",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "A6JC")
+ }
+ },
{}
};
diff --git a/drivers/media/usb/tm6000/tm6000-cards.c b/drivers/media/usb/tm6000/tm6000-cards.c
index 5358cd8c4603..98f4a63adc2a 100644
--- a/drivers/media/usb/tm6000/tm6000-cards.c
+++ b/drivers/media/usb/tm6000/tm6000-cards.c
@@ -17,7 +17,7 @@
#include "tm6000.h"
#include "tm6000-regs.h"
-#include "tuner-xc2028.h"
+#include "xc2028.h"
#include "xc5000.h"
#define TM6000_BOARD_UNKNOWN 0
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 4990fa886d7a..8c2725e4105b 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -16,7 +16,7 @@
#include <media/tuner.h>
-#include "tuner-xc2028.h"
+#include "xc2028.h"
#include "xc5000.h"
MODULE_DESCRIPTION("DVB driver extension module for tm5600/6000/6010 based TV cards");
diff --git a/drivers/media/usb/tm6000/tm6000-i2c.c b/drivers/media/usb/tm6000/tm6000-i2c.c
index b37782d6f79c..7554b93b82e6 100644
--- a/drivers/media/usb/tm6000/tm6000-i2c.c
+++ b/drivers/media/usb/tm6000/tm6000-i2c.c
@@ -15,7 +15,7 @@
#include "tm6000-regs.h"
#include <media/v4l2-common.h>
#include <media/tuner.h>
-#include "tuner-xc2028.h"
+#include "xc2028.h"
/* ----------------------------------------------------------- */
diff --git a/drivers/media/usb/usbtv/Kconfig b/drivers/media/usb/usbtv/Kconfig
index 84799c7203d3..578a0e693f8b 100644
--- a/drivers/media/usb/usbtv/Kconfig
+++ b/drivers/media/usb/usbtv/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_USBTV
tristate "USBTV007 video capture support"
- depends on VIDEO_V4L2 && SND
+ depends on VIDEO_DEV && SND
select SND_PCM
select VIDEOBUF2_VMALLOC
diff --git a/drivers/media/usb/uvc/Kconfig b/drivers/media/usb/uvc/Kconfig
index 4c2f4a3216f2..ca51ee8e45f3 100644
--- a/drivers/media/usb/uvc/Kconfig
+++ b/drivers/media/usb/uvc/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config USB_VIDEO_CLASS
tristate "USB Video Class (UVC)"
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
select VIDEOBUF2_VMALLOC
help
Support for the USB Video Class (UVC). Currently only video
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 5f394d4efc21..dda0f0aa78b8 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2845,6 +2845,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_QUIRK(UVC_QUIRK_PROBE_MINMAX
| UVC_QUIRK_BUILTIN_ISIGHT) },
+ /* Apple FaceTime HD Camera (Built-In) */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x05ac,
+ .idProduct = 0x8514,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def },
/* Apple Built-In iSight via iBridge */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/usb/zr364xx/Kconfig b/drivers/media/usb/zr364xx/Kconfig
index 49b4257487bb..a9fb02566c4b 100644
--- a/drivers/media/usb/zr364xx/Kconfig
+++ b/drivers/media/usb/zr364xx/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config USB_ZR364XX
tristate "USB ZR364XX Camera support"
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
select VIDEOBUF_GEN
select VIDEOBUF_VMALLOC
help
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 6ee75c6c820e..1be9a2cc947a 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -3,17 +3,9 @@
# Generic video config states
#
-# Enable the V4L2 core and API
-config VIDEO_V4L2
- tristate
- depends on (I2C || I2C=n) && VIDEO_DEV
- select RATIONAL
- select VIDEOBUF2_V4L2 if VIDEOBUF2_CORE
- default (I2C || I2C=n) && VIDEO_DEV
-
config VIDEO_V4L2_I2C
bool
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
default y
config VIDEO_V4L2_SUBDEV_API
@@ -64,7 +56,7 @@ config V4L2_MEM2MEM_DEV
# Used by LED subsystem flash drivers
config V4L2_FLASH_LED_CLASS
tristate "V4L2 flash API for LED flash class devices"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_DEV && VIDEO_V4L2_SUBDEV_API
depends on LEDS_CLASS_FLASH
select V4L2_ASYNC
help
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index 83fac5c746f5..41d91bd10cf2 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -3,37 +3,39 @@
# Makefile for the V4L2 core
#
+ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
+ccflags-y += -I$(srctree)/drivers/media/tuners
+
tuner-objs := tuner-core.o
videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \
v4l2-event.o v4l2-subdev.o v4l2-common.o \
v4l2-ctrls-core.o v4l2-ctrls-api.o \
v4l2-ctrls-request.o v4l2-ctrls-defs.o
+
+# Please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
videodev-$(CONFIG_COMPAT) += v4l2-compat-ioctl32.o
-videodev-$(CONFIG_TRACEPOINTS) += v4l2-trace.o
videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o
videodev-$(CONFIG_SPI) += v4l2-spi.o
+videodev-$(CONFIG_TRACEPOINTS) += v4l2-trace.o
videodev-$(CONFIG_VIDEO_V4L2_I2C) += v4l2-i2c.o
-obj-$(CONFIG_VIDEO_V4L2) += videodev.o
-obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o
-obj-$(CONFIG_V4L2_ASYNC) += v4l2-async.o
-obj-$(CONFIG_VIDEO_V4L2) += v4l2-dv-timings.o
-
-obj-$(CONFIG_VIDEO_TUNER) += tuner.o
-
-obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o
-obj-$(CONFIG_V4L2_H264) += v4l2-h264.o
-obj-$(CONFIG_V4L2_VP9) += v4l2-vp9.o
+# Please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
+obj-$(CONFIG_V4L2_ASYNC) += v4l2-async.o
obj-$(CONFIG_V4L2_FLASH_LED_CLASS) += v4l2-flash-led-class.o
-
+obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o
+obj-$(CONFIG_V4L2_H264) += v4l2-h264.o
obj-$(CONFIG_V4L2_JPEG_HELPER) += v4l2-jpeg.o
+obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o
+obj-$(CONFIG_V4L2_VP9) += v4l2-vp9.o
-obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o
-obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o
obj-$(CONFIG_VIDEOBUF_DMA_CONTIG) += videobuf-dma-contig.o
+obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o
+obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o
obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o
-ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
-ccflags-y += -I$(srctree)/drivers/media/tuners
+obj-$(CONFIG_VIDEO_TUNER) += tuner.o
+obj-$(CONFIG_VIDEO_DEV) += v4l2-dv-timings.o videodev.o
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index 12d1e0c33c3c..ad9224a18853 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -35,7 +35,7 @@
#include "tda8290.h"
#include "tea5761.h"
#include "tea5767.h"
-#include "tuner-xc2028.h"
+#include "xc2028.h"
#include "tuner-simple.h"
#include "tda9887.h"
#include "xc5000.h"
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
index 54abe5245dcc..8968cec8454e 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -112,7 +112,9 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture;
struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quant;
struct v4l2_ctrl_vp8_frame *p_vp8_frame;
+ struct v4l2_ctrl_vp9_frame *p_vp9_frame;
struct v4l2_ctrl_fwht_params *p_fwht_params;
+ struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix;
void *p = ptr.p + idx * ctrl->elem_size;
if (ctrl->p_def.p_const)
@@ -152,6 +154,13 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
p_vp8_frame = p;
p_vp8_frame->num_dct_parts = 1;
break;
+ case V4L2_CTRL_TYPE_VP9_FRAME:
+ p_vp9_frame = p;
+ p_vp9_frame->profile = 0;
+ p_vp9_frame->bit_depth = 8;
+ p_vp9_frame->flags |= V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING |
+ V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING;
+ break;
case V4L2_CTRL_TYPE_FWHT_PARAMS:
p_fwht_params = p;
p_fwht_params->version = V4L2_FWHT_VERSION;
@@ -160,6 +169,15 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV |
(2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET);
break;
+ case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
+ p_h264_scaling_matrix = p;
+ /*
+ * The default (flat) H.264 scaling matrix when none are
+ * specified in the bitstream, this is according to formulas
+ * (7-8) and (7-9) of the specification.
+ */
+ memset(p_h264_scaling_matrix, 16, sizeof(*p_h264_scaling_matrix));
+ break;
}
}
@@ -382,7 +400,7 @@ validate_vp9_seg_params(struct v4l2_vp9_segmentation *seg)
}
for (i = 0; i < ARRAY_SIZE(seg->feature_data); i++) {
- const int range[] = { 255, 63, 3, 0 };
+ static const int range[] = { 255, 63, 3, 0 };
for (j = 0; j < ARRAY_SIZE(seg->feature_data[j]); j++) {
if (seg->feature_data[i][j] < -range[j] ||
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index d03ace324db0..d00237ee4cae 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -511,7 +511,7 @@ static int get_index(struct video_device *vdev)
for (i = 0; i < VIDEO_NUM_DEVICES; i++) {
if (video_devices[i] != NULL &&
video_devices[i]->v4l2_dev == vdev->v4l2_dev) {
- set_bit(video_devices[i]->index, used);
+ __set_bit(video_devices[i]->index, used);
}
}
@@ -519,7 +519,7 @@ static int get_index(struct video_device *vdev)
}
#define SET_VALID_IOCTL(ops, cmd, op) \
- do { if ((ops)->op) set_bit(_IOC_NR(cmd), valid_ioctls); } while (0)
+ do { if ((ops)->op) __set_bit(_IOC_NR(cmd), valid_ioctls); } while (0)
/* This determines which ioctls are actually implemented in the driver.
It's a one-time thing which simplifies video_ioctl2 as it can just do
@@ -562,73 +562,73 @@ static void determine_valid_ioctls(struct video_device *vdev)
/* vfl_type and vfl_dir independent ioctls */
SET_VALID_IOCTL(ops, VIDIOC_QUERYCAP, vidioc_querycap);
- set_bit(_IOC_NR(VIDIOC_G_PRIORITY), valid_ioctls);
- set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_G_PRIORITY), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls);
/* Note: the control handler can also be passed through the filehandle,
and that can't be tested here. If the bit for these control ioctls
is set, then the ioctl is valid. But if it is 0, then it can still
be valid if the filehandle passed the control handler. */
if (vdev->ctrl_handler || ops->vidioc_queryctrl)
- set_bit(_IOC_NR(VIDIOC_QUERYCTRL), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_QUERYCTRL), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_query_ext_ctrl)
- set_bit(_IOC_NR(VIDIOC_QUERY_EXT_CTRL), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_QUERY_EXT_CTRL), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_g_ctrl || ops->vidioc_g_ext_ctrls)
- set_bit(_IOC_NR(VIDIOC_G_CTRL), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_G_CTRL), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_s_ctrl || ops->vidioc_s_ext_ctrls)
- set_bit(_IOC_NR(VIDIOC_S_CTRL), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_S_CTRL), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls)
- set_bit(_IOC_NR(VIDIOC_G_EXT_CTRLS), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_G_EXT_CTRLS), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls)
- set_bit(_IOC_NR(VIDIOC_S_EXT_CTRLS), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_S_EXT_CTRLS), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_try_ext_ctrls)
- set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_querymenu)
- set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls);
if (!is_tch) {
SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency);
SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
}
SET_VALID_IOCTL(ops, VIDIOC_LOG_STATUS, vidioc_log_status);
#ifdef CONFIG_VIDEO_ADV_DEBUG
- set_bit(_IOC_NR(VIDIOC_DBG_G_CHIP_INFO), valid_ioctls);
- set_bit(_IOC_NR(VIDIOC_DBG_G_REGISTER), valid_ioctls);
- set_bit(_IOC_NR(VIDIOC_DBG_S_REGISTER), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_DBG_G_CHIP_INFO), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_DBG_G_REGISTER), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_DBG_S_REGISTER), valid_ioctls);
#endif
/* yes, really vidioc_subscribe_event */
SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
SET_VALID_IOCTL(ops, VIDIOC_UNSUBSCRIBE_EVENT, vidioc_unsubscribe_event);
if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator)
- set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
if (is_vid) {
/* video specific ioctls */
if ((is_rx && (ops->vidioc_enum_fmt_vid_cap ||
ops->vidioc_enum_fmt_vid_overlay)) ||
(is_tx && ops->vidioc_enum_fmt_vid_out))
- set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_g_fmt_vid_cap ||
ops->vidioc_g_fmt_vid_cap_mplane ||
ops->vidioc_g_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_g_fmt_vid_out ||
ops->vidioc_g_fmt_vid_out_mplane ||
ops->vidioc_g_fmt_vid_out_overlay)))
- set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_s_fmt_vid_cap ||
ops->vidioc_s_fmt_vid_cap_mplane ||
ops->vidioc_s_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_s_fmt_vid_out ||
ops->vidioc_s_fmt_vid_out_mplane ||
ops->vidioc_s_fmt_vid_out_overlay)))
- set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_try_fmt_vid_cap ||
ops->vidioc_try_fmt_vid_cap_mplane ||
ops->vidioc_try_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_try_fmt_vid_out ||
ops->vidioc_try_fmt_vid_out_mplane ||
ops->vidioc_try_fmt_vid_out_overlay)))
- set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay);
SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf);
SET_VALID_IOCTL(ops, VIDIOC_S_FBUF, vidioc_s_fbuf);
@@ -642,11 +642,11 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
if (ops->vidioc_g_selection) {
- set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls);
- set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls);
}
if (ops->vidioc_s_selection)
- set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
}
@@ -669,17 +669,17 @@ static void determine_valid_ioctls(struct video_device *vdev)
ops->vidioc_g_fmt_sliced_vbi_cap)) ||
(is_tx && (ops->vidioc_g_fmt_vbi_out ||
ops->vidioc_g_fmt_sliced_vbi_out)))
- set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_s_fmt_vbi_cap ||
ops->vidioc_s_fmt_sliced_vbi_cap)) ||
(is_tx && (ops->vidioc_s_fmt_vbi_out ||
ops->vidioc_s_fmt_sliced_vbi_out)))
- set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_try_fmt_vbi_cap ||
ops->vidioc_try_fmt_sliced_vbi_cap)) ||
(is_tx && (ops->vidioc_try_fmt_vbi_out ||
ops->vidioc_try_fmt_sliced_vbi_out)))
- set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap);
} else if (is_tch) {
/* touch specific ioctls */
@@ -724,15 +724,15 @@ static void determine_valid_ioctls(struct video_device *vdev)
if (is_vid || is_vbi || is_meta) {
/* ioctls valid for video, vbi and metadata */
if (ops->vidioc_s_std)
- set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std);
SET_VALID_IOCTL(ops, VIDIOC_G_STD, vidioc_g_std);
if (is_rx) {
SET_VALID_IOCTL(ops, VIDIOC_QUERYSTD, vidioc_querystd);
if (is_io_mc) {
- set_bit(_IOC_NR(VIDIOC_ENUMINPUT), valid_ioctls);
- set_bit(_IOC_NR(VIDIOC_G_INPUT), valid_ioctls);
- set_bit(_IOC_NR(VIDIOC_S_INPUT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_ENUMINPUT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_G_INPUT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_S_INPUT), valid_ioctls);
} else {
SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
@@ -746,9 +746,9 @@ static void determine_valid_ioctls(struct video_device *vdev)
}
if (is_tx) {
if (is_io_mc) {
- set_bit(_IOC_NR(VIDIOC_ENUMOUTPUT), valid_ioctls);
- set_bit(_IOC_NR(VIDIOC_G_OUTPUT), valid_ioctls);
- set_bit(_IOC_NR(VIDIOC_S_OUTPUT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_ENUMOUTPUT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_G_OUTPUT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_S_OUTPUT), valid_ioctls);
} else {
SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output);
SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output);
@@ -759,7 +759,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout);
}
if (ops->vidioc_g_parm || ops->vidioc_g_std)
- set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 00457e1e93f6..afceb35e500c 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -119,11 +119,11 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint *vep,
enum v4l2_mbus_type bus_type)
{
- struct v4l2_fwnode_bus_mipi_csi2 *bus = &vep->bus.mipi_csi2;
+ struct v4l2_mbus_config_mipi_csi2 *bus = &vep->bus.mipi_csi2;
bool have_clk_lane = false, have_data_lanes = false,
have_lane_polarities = false;
unsigned int flags = 0, lanes_used = 0;
- u32 array[1 + V4L2_FWNODE_CSI2_MAX_DATA_LANES];
+ u32 array[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES];
u32 clock_lane = 0;
unsigned int num_data_lanes = 0;
bool use_default_lane_mapping = false;
@@ -136,7 +136,7 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
use_default_lane_mapping = true;
num_data_lanes = min_t(u32, bus->num_data_lanes,
- V4L2_FWNODE_CSI2_MAX_DATA_LANES);
+ V4L2_MBUS_CSI2_MAX_DATA_LANES);
clock_lane = bus->clock_lane;
if (clock_lane)
@@ -155,7 +155,7 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
rval = fwnode_property_count_u32(fwnode, "data-lanes");
if (rval > 0) {
num_data_lanes =
- min_t(int, V4L2_FWNODE_CSI2_MAX_DATA_LANES, rval);
+ min_t(int, V4L2_MBUS_CSI2_MAX_DATA_LANES, rval);
fwnode_property_read_u32_array(fwnode, "data-lanes", array,
num_data_lanes);
@@ -207,13 +207,11 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
if (fwnode_property_present(fwnode, "clock-noncontinuous")) {
flags |= V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
pr_debug("non-continuous clock\n");
- } else {
- flags |= V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
}
if (bus_type == V4L2_MBUS_CSI2_DPHY ||
- bus_type == V4L2_MBUS_CSI2_CPHY || lanes_used ||
- have_clk_lane || (flags & ~V4L2_MBUS_CSI2_CONTINUOUS_CLOCK)) {
+ bus_type == V4L2_MBUS_CSI2_CPHY ||
+ lanes_used || have_clk_lane || flags) {
/* Only D-PHY has a clock lane. */
unsigned int dfl_data_lane_index =
bus_type == V4L2_MBUS_CSI2_DPHY;
@@ -263,7 +261,7 @@ v4l2_fwnode_endpoint_parse_parallel_bus(struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint *vep,
enum v4l2_mbus_type bus_type)
{
- struct v4l2_fwnode_bus_parallel *bus = &vep->bus.parallel;
+ struct v4l2_mbus_config_parallel *bus = &vep->bus.parallel;
unsigned int flags = 0;
u32 v;
@@ -369,7 +367,7 @@ v4l2_fwnode_endpoint_parse_csi1_bus(struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint *vep,
enum v4l2_mbus_type bus_type)
{
- struct v4l2_fwnode_bus_mipi_csi1 *bus = &vep->bus.mipi_csi1;
+ struct v4l2_mbus_config_mipi_csi1 *bus = &vep->bus.mipi_csi1;
u32 v;
if (!fwnode_property_read_u32(fwnode, "clock-inv", &v)) {
@@ -896,25 +894,8 @@ static int v4l2_fwnode_reference_parse(struct device *dev,
int ret;
for (index = 0;
- !(ret = fwnode_property_get_reference_args(dev_fwnode(dev),
- prop, NULL, 0,
- index, &args));
- index++)
- fwnode_handle_put(args.fwnode);
-
- if (!index)
- return -ENOENT;
-
- /*
- * Note that right now both -ENODATA and -ENOENT may signal
- * out-of-bounds access. Return the error in cases other than that.
- */
- if (ret != -ENOENT && ret != -ENODATA)
- return ret;
-
- for (index = 0;
- !fwnode_property_get_reference_args(dev_fwnode(dev), prop, NULL,
- 0, index, &args);
+ !(ret = fwnode_property_get_reference_args(dev_fwnode(dev), prop,
+ NULL, 0, index, &args));
index++) {
struct v4l2_async_subdev *asd;
@@ -930,7 +911,12 @@ static int v4l2_fwnode_reference_parse(struct device *dev,
}
}
- return 0;
+ /* -ENOENT here means successful parsing */
+ if (ret != -ENOENT)
+ return ret;
+
+ /* Return -ENOENT if no references were found */
+ return index ? 0 : -ENOENT;
}
/*
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 9ac557b8e146..96e307fe3aab 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -279,8 +279,8 @@ static void v4l_print_format(const void *arg, bool write_only)
const struct v4l2_vbi_format *vbi;
const struct v4l2_sliced_vbi_format *sliced;
const struct v4l2_window *win;
- const struct v4l2_sdr_format *sdr;
const struct v4l2_meta_format *meta;
+ u32 pixelformat;
u32 planes;
unsigned i;
@@ -299,8 +299,9 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
mp = &p->fmt.pix_mp;
+ pixelformat = mp->pixelformat;
pr_cont(", width=%u, height=%u, format=%p4cc, field=%s, colorspace=%d, num_planes=%u, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
- mp->width, mp->height, &mp->pixelformat,
+ mp->width, mp->height, &pixelformat,
prt_names(mp->field, v4l2_field_names),
mp->colorspace, mp->num_planes, mp->flags,
mp->ycbcr_enc, mp->quantization, mp->xfer_func);
@@ -343,14 +344,15 @@ static void v4l_print_format(const void *arg, bool write_only)
break;
case V4L2_BUF_TYPE_SDR_CAPTURE:
case V4L2_BUF_TYPE_SDR_OUTPUT:
- sdr = &p->fmt.sdr;
- pr_cont(", pixelformat=%p4cc\n", &sdr->pixelformat);
+ pixelformat = p->fmt.sdr.pixelformat;
+ pr_cont(", pixelformat=%p4cc\n", &pixelformat);
break;
case V4L2_BUF_TYPE_META_CAPTURE:
case V4L2_BUF_TYPE_META_OUTPUT:
meta = &p->fmt.meta;
+ pixelformat = meta->dataformat;
pr_cont(", dataformat=%p4cc, buffersize=%u\n",
- &meta->dataformat, meta->buffersize);
+ &pixelformat, meta->buffersize);
break;
}
}
@@ -1388,6 +1390,8 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break;
case V4L2_META_FMT_RK_ISP1_PARAMS: descr = "Rockchip ISP1 3A Parameters"; break;
case V4L2_META_FMT_RK_ISP1_STAT_3A: descr = "Rockchip ISP1 3A Statistics"; break;
+ case V4L2_PIX_FMT_NV12M_8L128: descr = "NV12M (8x128 Linear)"; break;
+ case V4L2_PIX_FMT_NV12M_10BE_8L128: descr = "10-bit NV12M (8x128 Linear, BE)"; break;
default:
/* Compressed formats */
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index e2654b422334..675e22895ebe 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -585,19 +585,14 @@ int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
-int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
- struct v4l2_buffer *buf)
+static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
+ struct v4l2_buffer *buf)
{
- struct vb2_queue *vq;
- int ret = 0;
- unsigned int i;
-
- vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- ret = vb2_querybuf(vq, buf);
-
/* Adjust MMAP memory offsets for the CAPTURE queue */
if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) {
if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
+ unsigned int i;
+
for (i = 0; i < buf->length; ++i)
buf->m.planes[i].m.mem_offset
+= DST_QUEUE_OFF_BASE;
@@ -605,8 +600,23 @@ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
buf->m.offset += DST_QUEUE_OFF_BASE;
}
}
+}
- return ret;
+int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ ret = vb2_querybuf(vq, buf);
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
@@ -763,6 +773,9 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
if (ret)
return ret;
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
/*
* If the capture queue is streaming, but streaming hasn't started
* on the device, but was asked to stop, mark the previously queued
@@ -784,9 +797,17 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
struct vb2_queue *vq;
+ int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
+ ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
@@ -795,9 +816,17 @@ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
{
struct video_device *vdev = video_devdata(file);
struct vb2_queue *vq;
+ int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
+ ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 5d27a27cc2f2..30eb50407db5 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -318,13 +318,6 @@ static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
sd->ops->pad->get_mbus_config(sd, pad, config);
}
-static int call_set_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
- struct v4l2_mbus_config *config)
-{
- return check_pad(sd, pad) ? :
- sd->ops->pad->get_mbus_config(sd, pad, config);
-}
-
static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
.get_fmt = call_get_fmt,
.set_fmt = call_set_fmt,
@@ -338,7 +331,6 @@ static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
.dv_timings_cap = call_dv_timings_cap,
.enum_dv_timings = call_enum_dv_timings,
.get_mbus_config = call_get_mbus_config,
- .set_mbus_config = call_set_mbus_config,
};
static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = {
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
index f43ba69fbb3e..14412002775d 100644
--- a/drivers/memory/brcmstb_dpfe.c
+++ b/drivers/memory/brcmstb_dpfe.c
@@ -424,7 +424,7 @@ static void __finalize_command(struct brcmstb_dpfe_priv *priv)
/*
* It depends on the API version which MBOX register we have to write to
- * to signal we are done.
+ * signal we are done.
*/
release_mbox = (priv->dpfe_api->version < 2)
? REG_TO_HOST_MBOX : REG_TO_DCPU_MBOX;
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index 762d0c0f0716..ecc78d6f89ed 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -1025,7 +1025,7 @@ static struct emif_data *__init_or_module get_device_details(
temp = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
- if (!emif || !pd || !dev_info) {
+ if (!emif || !temp || !dev_info) {
dev_err(dev, "%s:%d: allocation error\n", __func__, __LINE__);
goto error;
}
@@ -1117,7 +1117,7 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
{
struct emif_data *emif;
struct resource *res;
- int irq;
+ int irq, ret;
if (pdev->dev.of_node)
emif = of_get_memory_device_details(pdev->dev.of_node, &pdev->dev);
@@ -1147,7 +1147,9 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
emif_onetime_settings(emif);
emif_debugfs_init(emif);
disable_and_clear_all_interrupts(emif);
- setup_interrupts(emif, irq);
+ ret = setup_interrupts(emif, irq);
+ if (ret)
+ goto error;
/* One-time actions taken on probing the first device */
if (!emif1) {
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index 75a8c38df939..2f6939da21cd 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -88,6 +88,7 @@ static int fsl_ifc_ctrl_remove(struct platform_device *dev)
{
struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(&dev->dev);
+ of_platform_depopulate(&dev->dev);
free_irq(ctrl->nand_irq, ctrl);
free_irq(ctrl->irq, ctrl);
@@ -285,8 +286,16 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
}
}
+ /* legacy dts may still use "simple-bus" compatible */
+ ret = of_platform_populate(dev->dev.of_node, NULL, NULL,
+ &dev->dev);
+ if (ret)
+ goto err_free_nandirq;
+
return 0;
+err_free_nandirq:
+ free_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_ctrl_dev);
err_free_irq:
free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev);
err_unmap_nandirq:
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index e201e5976f34..86a3d34f418e 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -32,6 +33,10 @@
#define SMI_DUMMY 0x444
/* SMI LARB */
+#define SMI_LARB_SLP_CON 0xc
+#define SLP_PROT_EN BIT(0)
+#define SLP_PROT_RDY BIT(16)
+
#define SMI_LARB_CMD_THRT_CON 0x24
#define SMI_LARB_THRT_RD_NU_LMT_MSK GENMASK(7, 4)
#define SMI_LARB_THRT_RD_NU_LMT (5 << 4)
@@ -81,6 +86,7 @@
#define MTK_SMI_FLAG_THRT_UPDATE BIT(0)
#define MTK_SMI_FLAG_SW_FLAG BIT(1)
+#define MTK_SMI_FLAG_SLEEP_CTL BIT(2)
#define MTK_SMI_CAPS(flags, _x) (!!((flags) & (_x)))
struct mtk_smi_reg_pair {
@@ -94,8 +100,6 @@ enum mtk_smi_type {
MTK_SMI_GEN2_SUB_COMM, /* gen2 smi sub common */
};
-#define MTK_SMI_CLK_NR_MAX 4
-
/* larbs: Require apb/smi clocks while gals is optional. */
static const char * const mtk_smi_larb_clks[] = {"apb", "smi", "gals"};
#define MTK_SMI_LARB_REQ_CLK_NR 2
@@ -106,6 +110,7 @@ static const char * const mtk_smi_larb_clks[] = {"apb", "smi", "gals"};
* sub common: Require apb/smi/gals0 clocks in has_gals case. Otherwise, only apb/smi are required.
*/
static const char * const mtk_smi_common_clks[] = {"apb", "smi", "gals0", "gals1"};
+#define MTK_SMI_CLK_NR_MAX ARRAY_SIZE(mtk_smi_common_clks)
#define MTK_SMI_COM_REQ_CLK_NR 2
#define MTK_SMI_COM_GALS_REQ_CLK_NR MTK_SMI_CLK_NR_MAX
#define MTK_SMI_SUB_COM_GALS_REQ_CLK_NR 3
@@ -149,20 +154,6 @@ struct mtk_smi_larb { /* larb: local arbiter */
unsigned char *bank;
};
-int mtk_smi_larb_get(struct device *larbdev)
-{
- int ret = pm_runtime_resume_and_get(larbdev);
-
- return (ret < 0) ? ret : 0;
-}
-EXPORT_SYMBOL_GPL(mtk_smi_larb_get);
-
-void mtk_smi_larb_put(struct device *larbdev)
-{
- pm_runtime_put_sync(larbdev);
-}
-EXPORT_SYMBOL_GPL(mtk_smi_larb_put);
-
static int
mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
{
@@ -349,13 +340,19 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = {
/* IPU0 | IPU1 | CCU */
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8186 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .flags_general = MTK_SMI_FLAG_SLEEP_CTL,
+};
+
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8192 = {
.config_port = mtk_smi_larb_config_port_gen2_general,
};
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8195 = {
.config_port = mtk_smi_larb_config_port_gen2_general,
- .flags_general = MTK_SMI_FLAG_THRT_UPDATE | MTK_SMI_FLAG_SW_FLAG,
+ .flags_general = MTK_SMI_FLAG_THRT_UPDATE | MTK_SMI_FLAG_SW_FLAG |
+ MTK_SMI_FLAG_SLEEP_CTL,
.ostd = mtk_smi_larb_mt8195_ostd,
};
@@ -366,11 +363,32 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
{.compatible = "mediatek,mt8167-smi-larb", .data = &mtk_smi_larb_mt8167},
{.compatible = "mediatek,mt8173-smi-larb", .data = &mtk_smi_larb_mt8173},
{.compatible = "mediatek,mt8183-smi-larb", .data = &mtk_smi_larb_mt8183},
+ {.compatible = "mediatek,mt8186-smi-larb", .data = &mtk_smi_larb_mt8186},
{.compatible = "mediatek,mt8192-smi-larb", .data = &mtk_smi_larb_mt8192},
{.compatible = "mediatek,mt8195-smi-larb", .data = &mtk_smi_larb_mt8195},
{}
};
+static int mtk_smi_larb_sleep_ctrl_enable(struct mtk_smi_larb *larb)
+{
+ int ret;
+ u32 tmp;
+
+ writel_relaxed(SLP_PROT_EN, larb->base + SMI_LARB_SLP_CON);
+ ret = readl_poll_timeout_atomic(larb->base + SMI_LARB_SLP_CON,
+ tmp, !!(tmp & SLP_PROT_RDY), 10, 1000);
+ if (ret) {
+ /* TODO: Reset this larb if it fails here. */
+ dev_err(larb->smi.dev, "sleep ctrl is not ready(0x%x).\n", tmp);
+ }
+ return ret;
+}
+
+static void mtk_smi_larb_sleep_ctrl_disable(struct mtk_smi_larb *larb)
+{
+ writel_relaxed(0, larb->base + SMI_LARB_SLP_CON);
+}
+
static int mtk_smi_device_link_common(struct device *dev, struct device **com_dev)
{
struct platform_device *smi_com_pdev;
@@ -480,9 +498,12 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
int ret;
ret = clk_bulk_prepare_enable(larb->smi.clk_num, larb->smi.clks);
- if (ret < 0)
+ if (ret)
return ret;
+ if (MTK_SMI_CAPS(larb->larb_gen->flags_general, MTK_SMI_FLAG_SLEEP_CTL))
+ mtk_smi_larb_sleep_ctrl_disable(larb);
+
/* Configure the basic setting for this larb */
larb_gen->config_port(dev);
@@ -492,6 +513,13 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+ int ret;
+
+ if (MTK_SMI_CAPS(larb->larb_gen->flags_general, MTK_SMI_FLAG_SLEEP_CTL)) {
+ ret = mtk_smi_larb_sleep_ctrl_enable(larb);
+ if (ret)
+ return ret;
+ }
clk_bulk_disable_unprepare(larb->smi.clk_num, larb->smi.clks);
return 0;
@@ -544,6 +572,12 @@ static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
F_MMU1_LARB(7),
};
+static const struct mtk_smi_common_plat mtk_smi_common_mt8186 = {
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(4) | F_MMU1_LARB(7),
+};
+
static const struct mtk_smi_common_plat mtk_smi_common_mt8192 = {
.type = MTK_SMI_GEN2,
.has_gals = true,
@@ -578,6 +612,7 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
{.compatible = "mediatek,mt8167-smi-common", .data = &mtk_smi_common_gen2},
{.compatible = "mediatek,mt8173-smi-common", .data = &mtk_smi_common_gen2},
{.compatible = "mediatek,mt8183-smi-common", .data = &mtk_smi_common_mt8183},
+ {.compatible = "mediatek,mt8186-smi-common", .data = &mtk_smi_common_mt8186},
{.compatible = "mediatek,mt8192-smi-common", .data = &mtk_smi_common_mt8192},
{.compatible = "mediatek,mt8195-smi-common-vdo", .data = &mtk_smi_common_mt8195_vdo},
{.compatible = "mediatek,mt8195-smi-common-vpp", .data = &mtk_smi_common_mt8195_vpp},
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index b94408954d85..dbdf87bc0b78 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -212,8 +212,10 @@ static int of_lpddr3_do_get_timings(struct device_node *np,
{
int ret;
- /* The 'reg' param required since DT has changed, used as 'max-freq' */
- ret = of_property_read_u32(np, "reg", &tim->max_freq);
+ ret = of_property_read_u32(np, "max-freq", &tim->max_freq);
+ if (ret)
+ /* Deprecated way of passing max-freq as 'reg' */
+ ret = of_property_read_u32(np, "reg", &tim->max_freq);
ret |= of_property_read_u32(np, "min-freq", &tim->min_freq);
ret |= of_property_read_u32(np, "tRFC", &tim->tRFC);
ret |= of_property_read_u32(np, "tRRD", &tim->tRRD);
@@ -316,14 +318,21 @@ const struct lpddr2_info
struct property *prop;
const char *cp;
int err;
-
- err = of_property_read_u32(np, "revision-id1", &info.revision_id1);
- if (err)
- info.revision_id1 = -ENOENT;
-
- err = of_property_read_u32(np, "revision-id2", &info.revision_id2);
- if (err)
- info.revision_id2 = -ENOENT;
+ u32 revision_id[2];
+
+ err = of_property_read_u32_array(np, "revision-id", revision_id, 2);
+ if (!err) {
+ info.revision_id1 = revision_id[0];
+ info.revision_id2 = revision_id[1];
+ } else {
+ err = of_property_read_u32(np, "revision-id1", &info.revision_id1);
+ if (err)
+ info.revision_id1 = -ENOENT;
+
+ err = of_property_read_u32(np, "revision-id2", &info.revision_id2);
+ if (err)
+ info.revision_id2 = -ENOENT;
+ }
err = of_property_read_u32(np, "io-width", &info.io_width);
if (err)
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
index 7951764b4efe..3fe83d7c2bf8 100644
--- a/drivers/memory/tegra/Kconfig
+++ b/drivers/memory/tegra/Kconfig
@@ -28,6 +28,7 @@ config TEGRA30_EMC
default y
depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
select PM_OPP
+ select DDR
help
This driver is for the External Memory Controller (EMC) found on
Tegra30 chips. The EMC controls the external DRAM on the board.
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 497b6edbf3ca..25ba3c5e4ad6 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -540,7 +540,7 @@ static int emc_read_lpddr_mode_register(struct tegra_emc *emc,
unsigned int register_addr,
unsigned int *register_data)
{
- u32 memory_dev = emem_dev + 1;
+ u32 memory_dev = emem_dev ? 1 : 2;
u32 val, mr_mask = 0xff;
int err;
diff --git a/drivers/memory/tegra/tegra210-emc-core.c b/drivers/memory/tegra/tegra210-emc-core.c
index 13584f9317a4..cbe1a7723514 100644
--- a/drivers/memory/tegra/tegra210-emc-core.c
+++ b/drivers/memory/tegra/tegra210-emc-core.c
@@ -711,7 +711,7 @@ static int tegra210_emc_cd_set_state(struct thermal_cooling_device *cd,
return 0;
}
-static struct thermal_cooling_device_ops tegra210_emc_cd_ops = {
+static const struct thermal_cooling_device_ops tegra210_emc_cd_ops = {
.get_max_state = tegra210_emc_cd_max_state,
.get_cur_state = tegra210_emc_cd_get_state,
.set_cur_state = tegra210_emc_cd_set_state,
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index 80f98d717e13..9ba2a9e5316b 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -9,6 +9,7 @@
* Copyright (C) 2019 GRATE-DRIVER project
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
@@ -31,11 +32,15 @@
#include <soc/tegra/common.h>
#include <soc/tegra/fuse.h>
+#include "../jedec_ddr.h"
+#include "../of_memory.h"
+
#include "mc.h"
#define EMC_INTSTATUS 0x000
#define EMC_INTMASK 0x004
#define EMC_DBG 0x008
+#define EMC_ADR_CFG 0x010
#define EMC_CFG 0x00c
#define EMC_REFCTRL 0x020
#define EMC_TIMING_CONTROL 0x028
@@ -81,6 +86,7 @@
#define EMC_EMRS 0x0d0
#define EMC_SELF_REF 0x0e0
#define EMC_MRW 0x0e8
+#define EMC_MRR 0x0ec
#define EMC_XM2DQSPADCTRL3 0x0f8
#define EMC_FBIO_SPARE 0x100
#define EMC_FBIO_CFG5 0x104
@@ -208,6 +214,13 @@
#define EMC_REFRESH_OVERFLOW_INT BIT(3)
#define EMC_CLKCHANGE_COMPLETE_INT BIT(4)
+#define EMC_MRR_DIVLD_INT BIT(5)
+
+#define EMC_MRR_DEV_SELECTN GENMASK(31, 30)
+#define EMC_MRR_MRR_MA GENMASK(23, 16)
+#define EMC_MRR_MRR_DATA GENMASK(15, 0)
+
+#define EMC_ADR_CFG_EMEM_NUMDEV BIT(0)
enum emc_dram_type {
DRAM_TYPE_DDR3,
@@ -378,6 +391,8 @@ struct tegra_emc {
/* protect shared rate-change code path */
struct mutex rate_lock;
+
+ bool mrr_error;
};
static int emc_seq_update_timing(struct tegra_emc *emc)
@@ -1008,12 +1023,18 @@ static int emc_load_timings_from_dt(struct tegra_emc *emc,
return 0;
}
-static struct device_node *emc_find_node_by_ram_code(struct device *dev)
+static struct device_node *emc_find_node_by_ram_code(struct tegra_emc *emc)
{
+ struct device *dev = emc->dev;
struct device_node *np;
u32 value, ram_code;
int err;
+ if (emc->mrr_error) {
+ dev_warn(dev, "memory timings skipped due to MRR error\n");
+ return NULL;
+ }
+
if (of_get_child_count(dev->of_node) == 0) {
dev_info_once(dev, "device-tree doesn't have memory timings\n");
return NULL;
@@ -1035,11 +1056,73 @@ static struct device_node *emc_find_node_by_ram_code(struct device *dev)
return NULL;
}
+static int emc_read_lpddr_mode_register(struct tegra_emc *emc,
+ unsigned int emem_dev,
+ unsigned int register_addr,
+ unsigned int *register_data)
+{
+ u32 memory_dev = emem_dev ? 1 : 2;
+ u32 val, mr_mask = 0xff;
+ int err;
+
+ /* clear data-valid interrupt status */
+ writel_relaxed(EMC_MRR_DIVLD_INT, emc->regs + EMC_INTSTATUS);
+
+ /* issue mode register read request */
+ val = FIELD_PREP(EMC_MRR_DEV_SELECTN, memory_dev);
+ val |= FIELD_PREP(EMC_MRR_MRR_MA, register_addr);
+
+ writel_relaxed(val, emc->regs + EMC_MRR);
+
+ /* wait for the LPDDR2 data-valid interrupt */
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, val,
+ val & EMC_MRR_DIVLD_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "mode register %u read failed: %d\n",
+ register_addr, err);
+ emc->mrr_error = true;
+ return err;
+ }
+
+ /* read out mode register data */
+ val = readl_relaxed(emc->regs + EMC_MRR);
+ *register_data = FIELD_GET(EMC_MRR_MRR_DATA, val) & mr_mask;
+
+ return 0;
+}
+
+static void emc_read_lpddr_sdram_info(struct tegra_emc *emc,
+ unsigned int emem_dev)
+{
+ union lpddr2_basic_config4 basic_conf4;
+ unsigned int manufacturer_id;
+ unsigned int revision_id1;
+ unsigned int revision_id2;
+
+ /* these registers are standard for all LPDDR JEDEC memory chips */
+ emc_read_lpddr_mode_register(emc, emem_dev, 5, &manufacturer_id);
+ emc_read_lpddr_mode_register(emc, emem_dev, 6, &revision_id1);
+ emc_read_lpddr_mode_register(emc, emem_dev, 7, &revision_id2);
+ emc_read_lpddr_mode_register(emc, emem_dev, 8, &basic_conf4.value);
+
+ dev_info(emc->dev, "SDRAM[dev%u]: manufacturer: 0x%x (%s) rev1: 0x%x rev2: 0x%x prefetch: S%u density: %uMbit iowidth: %ubit\n",
+ emem_dev, manufacturer_id,
+ lpddr2_jedec_manufacturer(manufacturer_id),
+ revision_id1, revision_id2,
+ 4 >> basic_conf4.arch_type,
+ 64 << basic_conf4.density,
+ 32 >> basic_conf4.io_width);
+}
+
static int emc_setup_hw(struct tegra_emc *emc)
{
+ u32 fbio_cfg5, emc_cfg, emc_dbg, emc_adr_cfg;
u32 intmask = EMC_REFRESH_OVERFLOW_INT;
- u32 fbio_cfg5, emc_cfg, emc_dbg;
+ static bool print_sdram_info_once;
enum emc_dram_type dram_type;
+ const char *dram_type_str;
+ unsigned int emem_numdev;
fbio_cfg5 = readl_relaxed(emc->regs + EMC_FBIO_CFG5);
dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK;
@@ -1076,6 +1159,34 @@ static int emc_setup_hw(struct tegra_emc *emc)
emc_dbg &= ~EMC_DBG_FORCE_UPDATE;
writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+ switch (dram_type) {
+ case DRAM_TYPE_DDR1:
+ dram_type_str = "DDR1";
+ break;
+ case DRAM_TYPE_LPDDR2:
+ dram_type_str = "LPDDR2";
+ break;
+ case DRAM_TYPE_DDR2:
+ dram_type_str = "DDR2";
+ break;
+ case DRAM_TYPE_DDR3:
+ dram_type_str = "DDR3";
+ break;
+ }
+
+ emc_adr_cfg = readl_relaxed(emc->regs + EMC_ADR_CFG);
+ emem_numdev = FIELD_GET(EMC_ADR_CFG_EMEM_NUMDEV, emc_adr_cfg) + 1;
+
+ dev_info_once(emc->dev, "%u %s %s attached\n", emem_numdev,
+ dram_type_str, emem_numdev == 2 ? "devices" : "device");
+
+ if (dram_type == DRAM_TYPE_LPDDR2 && !print_sdram_info_once) {
+ while (emem_numdev--)
+ emc_read_lpddr_sdram_info(emc, emem_numdev);
+
+ print_sdram_info_once = true;
+ }
+
return 0;
}
@@ -1538,14 +1649,6 @@ static int tegra_emc_probe(struct platform_device *pdev)
emc->clk_nb.notifier_call = emc_clk_change_notify;
emc->dev = &pdev->dev;
- np = emc_find_node_by_ram_code(&pdev->dev);
- if (np) {
- err = emc_load_timings_from_dt(emc, np);
- of_node_put(np);
- if (err)
- return err;
- }
-
emc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(emc->regs))
return PTR_ERR(emc->regs);
@@ -1554,6 +1657,14 @@ static int tegra_emc_probe(struct platform_device *pdev)
if (err)
return err;
+ np = emc_find_node_by_ram_code(emc);
+ if (np) {
+ err = emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ }
+
err = platform_get_irq(pdev, 0);
if (err < 0)
return err;
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 0cda6c6baefc..3993bdd4b519 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1943,22 +1943,6 @@ static void msb_io_work(struct work_struct *work)
static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
-static int msb_bd_open(struct block_device *bdev, fmode_t mode)
-{
- struct gendisk *disk = bdev->bd_disk;
- struct msb_data *msb = disk->private_data;
-
- dbg_verbose("block device open");
-
- mutex_lock(&msb_disk_lock);
-
- if (msb && msb->card)
- msb->usage_count++;
-
- mutex_unlock(&msb_disk_lock);
- return 0;
-}
-
static void msb_data_clear(struct msb_data *msb)
{
kfree(msb->boot_page);
@@ -1968,33 +1952,6 @@ static void msb_data_clear(struct msb_data *msb)
msb->card = NULL;
}
-static int msb_disk_release(struct gendisk *disk)
-{
- struct msb_data *msb = disk->private_data;
-
- dbg_verbose("block device release");
- mutex_lock(&msb_disk_lock);
-
- if (msb) {
- if (msb->usage_count)
- msb->usage_count--;
-
- if (!msb->usage_count) {
- disk->private_data = NULL;
- idr_remove(&msb_disk_idr, msb->disk_id);
- put_disk(disk);
- kfree(msb);
- }
- }
- mutex_unlock(&msb_disk_lock);
- return 0;
-}
-
-static void msb_bd_release(struct gendisk *disk, fmode_t mode)
-{
- msb_disk_release(disk);
-}
-
static int msb_bd_getgeo(struct block_device *bdev,
struct hd_geometry *geo)
{
@@ -2003,6 +1960,17 @@ static int msb_bd_getgeo(struct block_device *bdev,
return 0;
}
+static void msb_bd_free_disk(struct gendisk *disk)
+{
+ struct msb_data *msb = disk->private_data;
+
+ mutex_lock(&msb_disk_lock);
+ idr_remove(&msb_disk_idr, msb->disk_id);
+ mutex_unlock(&msb_disk_lock);
+
+ kfree(msb);
+}
+
static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -2096,10 +2064,9 @@ static void msb_start(struct memstick_dev *card)
}
static const struct block_device_operations msb_bdops = {
- .open = msb_bd_open,
- .release = msb_bd_release,
- .getgeo = msb_bd_getgeo,
- .owner = THIS_MODULE
+ .owner = THIS_MODULE,
+ .getgeo = msb_bd_getgeo,
+ .free_disk = msb_bd_free_disk,
};
static const struct blk_mq_ops msb_mq_ops = {
@@ -2147,7 +2114,6 @@ static int msb_init_disk(struct memstick_dev *card)
set_capacity(msb->disk, capacity);
dbg("Set total disk size to %lu sectors", capacity);
- msb->usage_count = 1;
msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
INIT_WORK(&msb->io_work, msb_io_work);
sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
@@ -2229,7 +2195,7 @@ static void msb_remove(struct memstick_dev *card)
msb_data_clear(msb);
mutex_unlock(&msb_disk_lock);
- msb_disk_release(msb->disk);
+ put_disk(msb->disk);
memstick_set_drvdata(card, NULL);
}
diff --git a/drivers/memstick/core/ms_block.h b/drivers/memstick/core/ms_block.h
index 122e1a8a8bd5..7058f9aefeb9 100644
--- a/drivers/memstick/core/ms_block.h
+++ b/drivers/memstick/core/ms_block.h
@@ -143,7 +143,6 @@ struct ms_boot_page {
} __packed;
struct msb_data {
- unsigned int usage_count;
struct memstick_dev *card;
struct gendisk *disk;
struct request_queue *queue;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index c0450397b673..725ba74ded30 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -133,7 +133,6 @@ struct mspro_devinfo {
struct mspro_block_data {
struct memstick_dev *card;
- unsigned int usage_count;
unsigned int caps;
struct gendisk *disk;
struct request_queue *queue;
@@ -178,53 +177,16 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error);
/*** Block device ***/
-static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode)
-{
- struct gendisk *disk = bdev->bd_disk;
- struct mspro_block_data *msb = disk->private_data;
- int rc = -ENXIO;
-
- mutex_lock(&mspro_block_disk_lock);
-
- if (msb && msb->card) {
- msb->usage_count++;
- if ((mode & FMODE_WRITE) && msb->read_only)
- rc = -EROFS;
- else
- rc = 0;
- }
-
- mutex_unlock(&mspro_block_disk_lock);
-
- return rc;
-}
-
-
-static void mspro_block_disk_release(struct gendisk *disk)
+static void mspro_block_bd_free_disk(struct gendisk *disk)
{
struct mspro_block_data *msb = disk->private_data;
int disk_id = MINOR(disk_devt(disk)) >> MSPRO_BLOCK_PART_SHIFT;
mutex_lock(&mspro_block_disk_lock);
-
- if (msb) {
- if (msb->usage_count)
- msb->usage_count--;
-
- if (!msb->usage_count) {
- kfree(msb);
- disk->private_data = NULL;
- idr_remove(&mspro_block_disk_idr, disk_id);
- put_disk(disk);
- }
- }
-
+ idr_remove(&mspro_block_disk_idr, disk_id);
mutex_unlock(&mspro_block_disk_lock);
-}
-static void mspro_block_bd_release(struct gendisk *disk, fmode_t mode)
-{
- mspro_block_disk_release(disk);
+ kfree(msb);
}
static int mspro_block_bd_getgeo(struct block_device *bdev,
@@ -240,10 +202,9 @@ static int mspro_block_bd_getgeo(struct block_device *bdev,
}
static const struct block_device_operations ms_block_bdops = {
- .open = mspro_block_bd_open,
- .release = mspro_block_bd_release,
- .getgeo = mspro_block_bd_getgeo,
- .owner = THIS_MODULE
+ .owner = THIS_MODULE,
+ .getgeo = mspro_block_bd_getgeo,
+ .free_disk = mspro_block_bd_free_disk,
};
/*** Information ***/
@@ -1226,7 +1187,6 @@ static int mspro_block_init_disk(struct memstick_dev *card)
msb->disk->first_minor = disk_id << MSPRO_BLOCK_PART_SHIFT;
msb->disk->minors = 1 << MSPRO_BLOCK_PART_SHIFT;
msb->disk->fops = &ms_block_bdops;
- msb->usage_count = 1;
msb->disk->private_data = msb;
sprintf(msb->disk->disk_name, "mspblk%d", disk_id);
@@ -1239,6 +1199,9 @@ static int mspro_block_init_disk(struct memstick_dev *card)
set_capacity(msb->disk, capacity);
dev_dbg(&card->dev, "capacity set %ld\n", capacity);
+ if (msb->read_only)
+ set_disk_ro(msb->disk, true);
+
rc = device_add_disk(&card->dev, msb->disk, NULL);
if (rc)
goto out_cleanup_disk;
@@ -1341,7 +1304,7 @@ static void mspro_block_remove(struct memstick_dev *card)
mspro_block_data_clear(msb);
mutex_unlock(&mspro_block_disk_lock);
- mspro_block_disk_release(msb->disk);
+ put_disk(msb->disk);
memstick_set_drvdata(card, NULL);
}
diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
index 9fe06dda3782..03620c8efe34 100644
--- a/drivers/mfd/arizona-spi.c
+++ b/drivers/mfd/arizona-spi.c
@@ -206,13 +206,11 @@ static int arizona_spi_probe(struct spi_device *spi)
return arizona_dev_init(arizona);
}
-static int arizona_spi_remove(struct spi_device *spi)
+static void arizona_spi_remove(struct spi_device *spi)
{
struct arizona *arizona = spi_get_drvdata(spi);
arizona_dev_exit(arizona);
-
- return 0;
}
static const struct spi_device_id arizona_spi_ids[] = {
diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
index 5faf3766a5e2..b79a57b45c1e 100644
--- a/drivers/mfd/da9052-spi.c
+++ b/drivers/mfd/da9052-spi.c
@@ -55,12 +55,11 @@ static int da9052_spi_probe(struct spi_device *spi)
return da9052_device_init(da9052, id->driver_data);
}
-static int da9052_spi_remove(struct spi_device *spi)
+static void da9052_spi_remove(struct spi_device *spi)
{
struct da9052 *da9052 = spi_get_drvdata(spi);
da9052_device_exit(da9052);
- return 0;
}
static const struct spi_device_id da9052_spi_id[] = {
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 70fa18b04ad2..3d5ce18aa9ae 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -193,13 +193,11 @@ static void pcap_isr_work(struct work_struct *work)
ezx_pcap_write(pcap, PCAP_REG_MSR, isr | msr);
ezx_pcap_write(pcap, PCAP_REG_ISR, isr);
- local_irq_disable();
service = isr & ~msr;
for (irq = pcap->irq_base; service; service >>= 1, irq++) {
if (service & 1)
- generic_handle_irq(irq);
+ generic_handle_irq_safe(irq);
}
- local_irq_enable();
ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
} while (gpio_get_value(pdata->gpio));
}
@@ -392,7 +390,7 @@ static int pcap_add_subdev(struct pcap_chip *pcap,
return ret;
}
-static int ezx_pcap_remove(struct spi_device *spi)
+static void ezx_pcap_remove(struct spi_device *spi)
{
struct pcap_chip *pcap = spi_get_drvdata(spi);
unsigned long flags;
@@ -412,8 +410,6 @@ static int ezx_pcap_remove(struct spi_device *spi)
irq_set_chip_and_handler(i, NULL, NULL);
destroy_workqueue(pcap->workqueue);
-
- return 0;
}
static int ezx_pcap_probe(struct spi_device *spi)
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index f10e53187f67..9ffab9aafd81 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -63,6 +63,8 @@
#define SPIBASE_BYT 0x54
#define SPIBASE_BYT_SZ 512
#define SPIBASE_BYT_EN BIT(1)
+#define BYT_BCR 0xfc
+#define BYT_BCR_WPD BIT(0)
#define SPIBASE_LPT 0x3800
#define SPIBASE_LPT_SZ 512
@@ -1084,12 +1086,57 @@ wdt_done:
return ret;
}
+static bool lpc_ich_byt_set_writeable(void __iomem *base, void *data)
+{
+ u32 val;
+
+ val = readl(base + BYT_BCR);
+ if (!(val & BYT_BCR_WPD)) {
+ val |= BYT_BCR_WPD;
+ writel(val, base + BYT_BCR);
+ val = readl(base + BYT_BCR);
+ }
+
+ return val & BYT_BCR_WPD;
+}
+
+static bool lpc_ich_lpt_set_writeable(void __iomem *base, void *data)
+{
+ struct pci_dev *pdev = data;
+ u32 bcr;
+
+ pci_read_config_dword(pdev, BCR, &bcr);
+ if (!(bcr & BCR_WPD)) {
+ bcr |= BCR_WPD;
+ pci_write_config_dword(pdev, BCR, bcr);
+ pci_read_config_dword(pdev, BCR, &bcr);
+ }
+
+ return bcr & BCR_WPD;
+}
+
+static bool lpc_ich_bxt_set_writeable(void __iomem *base, void *data)
+{
+ unsigned int spi = PCI_DEVFN(13, 2);
+ struct pci_bus *bus = data;
+ u32 bcr;
+
+ pci_bus_read_config_dword(bus, spi, BCR, &bcr);
+ if (!(bcr & BCR_WPD)) {
+ bcr |= BCR_WPD;
+ pci_bus_write_config_dword(bus, spi, BCR, bcr);
+ pci_bus_read_config_dword(bus, spi, BCR, &bcr);
+ }
+
+ return bcr & BCR_WPD;
+}
+
static int lpc_ich_init_spi(struct pci_dev *dev)
{
struct lpc_ich_priv *priv = pci_get_drvdata(dev);
struct resource *res = &intel_spi_res[0];
struct intel_spi_boardinfo *info;
- u32 spi_base, rcba, bcr;
+ u32 spi_base, rcba;
info = devm_kzalloc(&dev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -1103,6 +1150,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
if (spi_base & SPIBASE_BYT_EN) {
res->start = spi_base & ~(SPIBASE_BYT_SZ - 1);
res->end = res->start + SPIBASE_BYT_SZ - 1;
+
+ info->set_writeable = lpc_ich_byt_set_writeable;
}
break;
@@ -1113,8 +1162,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
res->start = spi_base + SPIBASE_LPT;
res->end = res->start + SPIBASE_LPT_SZ - 1;
- pci_read_config_dword(dev, BCR, &bcr);
- info->writeable = !!(bcr & BCR_WPD);
+ info->set_writeable = lpc_ich_lpt_set_writeable;
+ info->data = dev;
}
break;
@@ -1135,8 +1184,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
res->start = spi_base & 0xfffffff0;
res->end = res->start + SPIBASE_APL_SZ - 1;
- pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- info->writeable = !!(bcr & BCR_WPD);
+ info->set_writeable = lpc_ich_bxt_set_writeable;
+ info->data = bus;
}
pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x1);
diff --git a/drivers/mfd/madera-spi.c b/drivers/mfd/madera-spi.c
index e860f5ff0933..da84eb50e53a 100644
--- a/drivers/mfd/madera-spi.c
+++ b/drivers/mfd/madera-spi.c
@@ -112,13 +112,11 @@ static int madera_spi_probe(struct spi_device *spi)
return madera_dev_init(madera);
}
-static int madera_spi_remove(struct spi_device *spi)
+static void madera_spi_remove(struct spi_device *spi)
{
struct madera *madera = spi_get_drvdata(spi);
madera_dev_exit(madera);
-
- return 0;
}
static const struct spi_device_id madera_spi_ids[] = {
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index 4d8913d647e6..f803527e5819 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -166,10 +166,9 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
return mc13xxx_common_init(&spi->dev);
}
-static int mc13xxx_spi_remove(struct spi_device *spi)
+static void mc13xxx_spi_remove(struct spi_device *spi)
{
mc13xxx_common_exit(&spi->dev);
- return 0;
}
static struct spi_driver mc13xxx_spi_driver = {
diff --git a/drivers/mfd/rsmu_spi.c b/drivers/mfd/rsmu_spi.c
index fec2b4ec477c..d2f3d8f1e05a 100644
--- a/drivers/mfd/rsmu_spi.c
+++ b/drivers/mfd/rsmu_spi.c
@@ -220,13 +220,11 @@ static int rsmu_spi_probe(struct spi_device *client)
return rsmu_core_init(rsmu);
}
-static int rsmu_spi_remove(struct spi_device *client)
+static void rsmu_spi_remove(struct spi_device *client)
{
struct rsmu_ddata *rsmu = spi_get_drvdata(client);
rsmu_core_exit(rsmu);
-
- return 0;
}
static const struct spi_device_id rsmu_spi_id[] = {
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index 6c5915016be5..ad8055a0e286 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -102,13 +102,11 @@ stmpe_spi_probe(struct spi_device *spi)
return stmpe_probe(&spi_ci, id->driver_data);
}
-static int stmpe_spi_remove(struct spi_device *spi)
+static void stmpe_spi_remove(struct spi_device *spi)
{
struct stmpe *stmpe = spi_get_drvdata(spi);
stmpe_remove(stmpe);
-
- return 0;
}
static const struct of_device_id stmpe_spi_of_match[] = {
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
index d701926aa46e..bba38fbc781d 100644
--- a/drivers/mfd/tps65912-spi.c
+++ b/drivers/mfd/tps65912-spi.c
@@ -50,13 +50,11 @@ static int tps65912_spi_probe(struct spi_device *spi)
return tps65912_device_init(tps);
}
-static int tps65912_spi_remove(struct spi_device *spi)
+static void tps65912_spi_remove(struct spi_device *spi)
{
struct tps65912 *tps = spi_get_drvdata(spi);
tps65912_device_exit(tps);
-
- return 0;
}
static const struct spi_device_id tps65912_spi_id_table[] = {
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
index a9e75d80ad36..263055bda48b 100644
--- a/drivers/misc/ad525x_dpot-spi.c
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -90,10 +90,9 @@ static int ad_dpot_spi_probe(struct spi_device *spi)
spi_get_device_id(spi)->name);
}
-static int ad_dpot_spi_remove(struct spi_device *spi)
+static void ad_dpot_spi_remove(struct spi_device *spi)
{
ad_dpot_remove(&spi->dev);
- return 0;
}
static const struct spi_device_id ad_dpot_spi_id[] = {
diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h
index a1338f375589..25d51222eedf 100644
--- a/drivers/misc/bcm-vk/bcm_vk.h
+++ b/drivers/misc/bcm-vk/bcm_vk.h
@@ -311,7 +311,7 @@ struct bcm_vk_peer_log {
u32 wr_idx;
u32 buf_size;
u32 mask;
- char data[0];
+ char data[];
};
/* max buf size allowed */
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 1f15399e5cb4..b630625b3024 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -555,14 +555,12 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
return 0;
}
-static int eeprom_93xx46_remove(struct spi_device *spi)
+static void eeprom_93xx46_remove(struct spi_device *spi)
{
struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi);
if (!(edev->pdata->flags & EE_READONLY))
device_remove_file(&spi->dev, &dev_attr_erase);
-
- return 0;
}
static struct spi_driver eeprom_93xx46_driver = {
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index 737c39f33f05..f9c4acc9bf5a 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -540,19 +540,19 @@ struct cpucp_packet {
struct cpucp_unmask_irq_arr_packet {
struct cpucp_packet cpucp_pkt;
__le32 length;
- __le32 irqs[0];
+ __le32 irqs[];
};
struct cpucp_nic_status_packet {
struct cpucp_packet cpucp_pkt;
__le32 length;
- __le32 data[0];
+ __le32 data[];
};
struct cpucp_array_data_packet {
struct cpucp_packet cpucp_pkt;
__le32 length;
- __le32 data[0];
+ __le32 data[];
};
enum cpucp_packet_rc {
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
index 6e097ace2e96..66fc083a7c6a 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
@@ -54,7 +54,7 @@ struct gaudi_packet {
/* The rest of the packet data follows. Use the corresponding
* packet_XXX struct to deference the data, based on packet type
*/
- u8 contents[0];
+ u8 contents[];
};
struct packet_nop {
@@ -75,7 +75,7 @@ struct packet_wreg32 {
struct packet_wreg_bulk {
__le32 size64;
__le32 ctl;
- __le64 values[0]; /* data starts here */
+ __le64 values[]; /* data starts here */
};
#define GAUDI_PKT_LONG_CTL_OP_SHIFT 20
diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/misc/habanalabs/include/goya/goya_packets.h
index ef54bad20509..50ce5175b63a 100644
--- a/drivers/misc/habanalabs/include/goya/goya_packets.h
+++ b/drivers/misc/habanalabs/include/goya/goya_packets.h
@@ -62,7 +62,7 @@ struct goya_packet {
/* The rest of the packet data follows. Use the corresponding
* packet_XXX struct to deference the data, based on packet type
*/
- u8 contents[0];
+ u8 contents[];
};
struct packet_nop {
@@ -86,7 +86,7 @@ struct packet_wreg32 {
struct packet_wreg_bulk {
__le32 size64;
__le32 ctl;
- __le64 values[0]; /* data starts here */
+ __le64 values[]; /* data starts here */
};
struct packet_msg_long {
diff --git a/drivers/misc/hi6421v600-irq.c b/drivers/misc/hi6421v600-irq.c
index 1c763796cf1f..caa3de37698b 100644
--- a/drivers/misc/hi6421v600-irq.c
+++ b/drivers/misc/hi6421v600-irq.c
@@ -117,8 +117,8 @@ static irqreturn_t hi6421v600_irq_handler(int irq, void *__priv)
* If both powerkey down and up IRQs are received,
* handle them at the right order
*/
- generic_handle_irq(priv->irqs[POWERKEY_DOWN]);
- generic_handle_irq(priv->irqs[POWERKEY_UP]);
+ generic_handle_irq_safe(priv->irqs[POWERKEY_DOWN]);
+ generic_handle_irq_safe(priv->irqs[POWERKEY_UP]);
pending &= ~HISI_IRQ_POWERKEY_UP_DOWN;
}
@@ -126,7 +126,7 @@ static irqreturn_t hi6421v600_irq_handler(int irq, void *__priv)
continue;
for_each_set_bit(offset, &pending, BITS_PER_BYTE) {
- generic_handle_irq(priv->irqs[offset + i * BITS_PER_BYTE]);
+ generic_handle_irq_safe(priv->irqs[offset + i * BITS_PER_BYTE]);
}
}
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
index 98828030b5a4..bac4df2e5231 100644
--- a/drivers/misc/lattice-ecp3-config.c
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -211,13 +211,11 @@ static int lattice_ecp3_probe(struct spi_device *spi)
return 0;
}
-static int lattice_ecp3_remove(struct spi_device *spi)
+static void lattice_ecp3_remove(struct spi_device *spi)
{
struct fpga_data *data = spi_get_drvdata(spi);
wait_for_completion(&data->fw_loaded);
-
- return 0;
}
static const struct spi_device_id lattice_ecp3_id[] = {
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
index 9e40dfb60742..203a108b8883 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_spi.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
@@ -96,15 +96,13 @@ static int lis302dl_spi_probe(struct spi_device *spi)
return lis3lv02d_init_device(&lis3_dev);
}
-static int lis302dl_spi_remove(struct spi_device *spi)
+static void lis302dl_spi_remove(struct spi_device *spi)
{
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
lis3lv02d_joystick_disable(lis3);
lis3lv02d_poweroff(lis3);
lis3lv02d_remove_fs(&lis3_dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index dab7b92db790..50644f83e78c 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -247,7 +247,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
xpnet_device->stats.rx_packets++;
xpnet_device->stats.rx_bytes += skb->len + ETH_HLEN;
- netif_rx_ni(skb);
+ netif_rx(skb);
xpc_received(partid, channel, (void *)msg);
}
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 8d718aa56d33..4e67c1403cc9 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1908,7 +1908,7 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
cb_data.card = card;
cb_data.status = 0;
- err = __mmc_poll_for_busy(card->host, MMC_BLK_TIMEOUT_MS,
+ err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS,
&mmc_blk_busy_cb, &cb_data);
/*
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 096ae624be9a..58a60afa650b 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -15,6 +15,7 @@
#include <linux/stat.h>
#include <linux/of.h>
#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -34,13 +35,13 @@ static ssize_t type_show(struct device *dev,
switch (card->type) {
case MMC_TYPE_MMC:
- return sprintf(buf, "MMC\n");
+ return sysfs_emit(buf, "MMC\n");
case MMC_TYPE_SD:
- return sprintf(buf, "SD\n");
+ return sysfs_emit(buf, "SD\n");
case MMC_TYPE_SDIO:
- return sprintf(buf, "SDIO\n");
+ return sysfs_emit(buf, "SDIO\n");
case MMC_TYPE_SD_COMBO:
- return sprintf(buf, "SDcombo\n");
+ return sysfs_emit(buf, "SDcombo\n");
default:
return -EFAULT;
}
diff --git a/drivers/mmc/core/bus.h b/drivers/mmc/core/bus.h
index 8105852c4b62..3996b191b68d 100644
--- a/drivers/mmc/core/bus.h
+++ b/drivers/mmc/core/bus.h
@@ -9,6 +9,7 @@
#define _MMC_CORE_BUS_H
#include <linux/device.h>
+#include <linux/sysfs.h>
struct mmc_host;
struct mmc_card;
@@ -17,7 +18,7 @@ struct mmc_card;
static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct mmc_card *card = mmc_dev_to_card(dev); \
- return sprintf(buf, fmt, args); \
+ return sysfs_emit(buf, fmt, args); \
} \
static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index cf140f4ec864..2ed2b4d5e5a5 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -588,6 +588,25 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
EXPORT_SYMBOL(mmc_alloc_host);
+static int mmc_validate_host_caps(struct mmc_host *host)
+{
+ struct device *dev = host->parent;
+ u32 caps = host->caps, caps2 = host->caps2;
+
+ if (caps & MMC_CAP_SDIO_IRQ && !host->ops->enable_sdio_irq) {
+ dev_warn(dev, "missing ->enable_sdio_irq() ops\n");
+ return -EINVAL;
+ }
+
+ if (caps2 & (MMC_CAP2_HS400_ES | MMC_CAP2_HS400) &&
+ !(caps & MMC_CAP_8_BIT_DATA)) {
+ dev_warn(dev, "drop HS400 support since no 8-bit bus\n");
+ host->caps2 = caps2 & ~MMC_CAP2_HS400_ES & ~MMC_CAP2_HS400;
+ }
+
+ return 0;
+}
+
/**
* mmc_add_host - initialise host hardware
* @host: mmc host
@@ -600,8 +619,9 @@ int mmc_add_host(struct mmc_host *host)
{
int err;
- WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
- !host->ops->enable_sdio_irq);
+ err = mmc_validate_host_caps(host);
+ if (err)
+ return err;
err = device_add(&host->class_dev);
if (err)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index bbbbcaf70a59..e7ea45386c22 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -812,12 +813,11 @@ static ssize_t mmc_fwrev_show(struct device *dev,
{
struct mmc_card *card = mmc_dev_to_card(dev);
- if (card->ext_csd.rev < 7) {
- return sprintf(buf, "0x%x\n", card->cid.fwrev);
- } else {
- return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
- card->ext_csd.fwrev);
- }
+ if (card->ext_csd.rev < 7)
+ return sysfs_emit(buf, "0x%x\n", card->cid.fwrev);
+ else
+ return sysfs_emit(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
+ card->ext_csd.fwrev);
}
static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
@@ -830,10 +830,10 @@ static ssize_t mmc_dsr_show(struct device *dev,
struct mmc_host *host = card->host;
if (card->csd.dsr_imp && host->dsr_req)
- return sprintf(buf, "0x%x\n", host->dsr);
+ return sysfs_emit(buf, "0x%x\n", host->dsr);
else
/* return default DSR value */
- return sprintf(buf, "0x%x\n", 0x404);
+ return sysfs_emit(buf, "0x%x\n", 0x404);
}
static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
@@ -1355,11 +1355,6 @@ static int mmc_select_hs400es(struct mmc_card *card)
int err = -EINVAL;
u8 val;
- if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
- err = -ENOTSUPP;
- goto out_err;
- }
-
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
@@ -1523,13 +1518,23 @@ static int mmc_select_timing(struct mmc_card *card)
if (!mmc_can_ext_csd(card))
goto bus_speed;
- if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES)
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES) {
err = mmc_select_hs400es(card);
- else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
+ goto out;
+ }
+
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200) {
err = mmc_select_hs200(card);
- else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
+ if (err == -EBADMSG)
+ card->mmc_avail_type &= ~EXT_CSD_CARD_TYPE_HS200;
+ else
+ goto out;
+ }
+
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
err = mmc_select_hs(card);
+out:
if (err && err != -EBADMSG)
return err;
@@ -1962,7 +1967,7 @@ static int mmc_sleep(struct mmc_host *host)
goto out_release;
}
- err = __mmc_poll_for_busy(host, timeout_ms, &mmc_sleep_busy_cb, host);
+ err = __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_sleep_busy_cb, host);
out_release:
mmc_retune_release(host);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index d63d1c735335..180d7e9d3400 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -21,6 +21,8 @@
#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
#define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
+#define MMC_OP_COND_PERIOD_US (1 * 1000) /* 1ms */
+#define MMC_OP_COND_TIMEOUT_MS 1000 /* 1s */
static const u8 tuning_blk_pattern_4bit[] = {
0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
@@ -232,7 +234,9 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
- err = __mmc_poll_for_busy(host, 1000, &__mmc_send_op_cond_cb, &cb_data);
+ err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US,
+ MMC_OP_COND_TIMEOUT_MS,
+ &__mmc_send_op_cond_cb, &cb_data);
if (err)
return err;
@@ -495,13 +499,14 @@ static int mmc_busy_cb(void *cb_data, bool *busy)
return 0;
}
-int __mmc_poll_for_busy(struct mmc_host *host, unsigned int timeout_ms,
+int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
+ unsigned int timeout_ms,
int (*busy_cb)(void *cb_data, bool *busy),
void *cb_data)
{
int err;
unsigned long timeout;
- unsigned int udelay = 32, udelay_max = 32768;
+ unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768;
bool expired = false;
bool busy = false;
@@ -546,7 +551,7 @@ int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
cb_data.retry_crc_err = retry_crc_err;
cb_data.busy_cmd = busy_cmd;
- return __mmc_poll_for_busy(host, timeout_ms, &mmc_busy_cb, &cb_data);
+ return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data);
}
EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 9c813b851d0b..09ffbc00908b 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -41,7 +41,8 @@ int mmc_can_ext_csd(struct mmc_card *card);
int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
unsigned int timeout_ms);
-int __mmc_poll_for_busy(struct mmc_host *host, unsigned int timeout_ms,
+int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
+ unsigned int timeout_ms,
int (*busy_cb)(void *cb_data, bool *busy),
void *cb_data);
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index bd87012c220c..68df6b2f49cc 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -13,6 +13,7 @@
#include <linux/stat.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
+#include <linux/sysfs.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -708,18 +709,16 @@ MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
-static ssize_t mmc_dsr_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t mmc_dsr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct mmc_card *card = mmc_dev_to_card(dev);
- struct mmc_host *host = card->host;
-
- if (card->csd.dsr_imp && host->dsr_req)
- return sprintf(buf, "0x%x\n", host->dsr);
- else
- /* return default DSR value */
- return sprintf(buf, "0x%x\n", 0x404);
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+
+ if (card->csd.dsr_imp && host->dsr_req)
+ return sysfs_emit(buf, "0x%x\n", host->dsr);
+ /* return default DSR value */
+ return sysfs_emit(buf, "0x%x\n", 0x404);
}
static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
@@ -735,9 +734,9 @@ static ssize_t info##num##_show(struct device *dev, struct device_attribute *att
\
if (num > card->num_info) \
return -ENODATA; \
- if (!card->info[num-1][0]) \
+ if (!card->info[num - 1][0]) \
return 0; \
- return sprintf(buf, "%s\n", card->info[num-1]); \
+ return sysfs_emit(buf, "%s\n", card->info[num - 1]); \
} \
static DEVICE_ATTR_RO(info##num)
@@ -1672,7 +1671,7 @@ static int sd_poweroff_notify(struct mmc_card *card)
cb_data.card = card;
cb_data.reg_buf = reg_buf;
- err = __mmc_poll_for_busy(card->host, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
+ err = __mmc_poll_for_busy(card->host, 0, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
&sd_busy_poweroff_notify_cb, &cb_data);
out:
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 41164748723d..25799accf8a0 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -40,9 +41,9 @@ static ssize_t info##num##_show(struct device *dev, struct device_attribute *att
\
if (num > card->num_info) \
return -ENODATA; \
- if (!card->info[num-1][0]) \
+ if (!card->info[num - 1][0]) \
return 0; \
- return sprintf(buf, "%s\n", card->info[num-1]); \
+ return sysfs_emit(buf, "%s\n", card->info[num - 1]); \
} \
static DEVICE_ATTR_RO(info##num)
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index fda03b35c14a..c6268c38c69e 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -14,6 +14,7 @@
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/acpi.h>
+#include <linux/sysfs.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -35,7 +36,7 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
struct sdio_func *func; \
\
func = dev_to_sdio_func (dev); \
- return sprintf(buf, format_string, args); \
+ return sysfs_emit(buf, format_string, args); \
} \
static DEVICE_ATTR_RO(field)
@@ -52,9 +53,9 @@ static ssize_t info##num##_show(struct device *dev, struct device_attribute *att
\
if (num > func->num_info) \
return -ENODATA; \
- if (!func->info[num-1][0]) \
+ if (!func->info[num - 1][0]) \
return 0; \
- return sprintf(buf, "%s\n", func->info[num-1]); \
+ return sysfs_emit(buf, "%s\n", func->info[num - 1]); \
} \
static DEVICE_ATTR_RO(info##num)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 52b0b27a6839..af6c3c329076 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -1094,3 +1094,16 @@ config MMC_OWL
config MMC_SDHCI_EXTERNAL_DMA
bool
+
+config MMC_LITEX
+ tristate "LiteX MMC Host Controller support"
+ depends on ((PPC_MICROWATT || LITEX) && OF && HAVE_CLK) || COMPILE_TEST
+ select REGULATOR
+ select REGULATOR_FIXED_VOLTAGE
+ help
+ This selects support for the MMC Host Controller found in LiteX SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called litex_mmc.
+
+ If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index ea36d379bd3c..4e4ceb32c4b4 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -101,6 +101,7 @@ obj-$(CONFIG_MMC_CQHCI) += cqhci.o
cqhci-y += cqhci-core.o
cqhci-$(CONFIG_MMC_CRYPTO) += cqhci-crypto.o
obj-$(CONFIG_MMC_HSQ) += mmc_hsq.o
+obj-$(CONFIG_MMC_LITEX) += litex_mmc.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 2a757c88f9d2..7138dfa065bf 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1189,7 +1189,6 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
static int davinci_mmcsd_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct mmc_davinci_host *host = NULL;
struct mmc_host *mmc = NULL;
struct resource *r, *mem = NULL;
@@ -1235,9 +1234,8 @@ static int davinci_mmcsd_probe(struct platform_device *pdev)
host->mmc_input_clk = clk_get_rate(host->clk);
- match = of_match_device(davinci_mmc_dt_ids, &pdev->dev);
- if (match) {
- pdev->id_entry = match->data;
+ pdev->id_entry = of_device_get_match_data(&pdev->dev);
+ if (pdev->id_entry) {
ret = mmc_of_parse(mmc);
if (ret) {
dev_err_probe(&pdev->dev, ret,
@@ -1375,8 +1373,12 @@ static int davinci_mmcsd_suspend(struct device *dev)
static int davinci_mmcsd_resume(struct device *dev)
{
struct mmc_davinci_host *host = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(host->clk);
+ if (ret)
+ return ret;
- clk_enable(host->clk);
mmc_davinci_reset_ctrl(host, 0);
return 0;
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 95d0ec0f5f3a..f825487aa739 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -15,7 +15,9 @@
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
-#define RK3288_CLKGEN_DIV 2
+#define RK3288_CLKGEN_DIV 2
+
+static const unsigned int freqs[] = { 100000, 200000, 300000, 400000 };
struct dw_mci_rockchip_priv_data {
struct clk *drv_clk;
@@ -51,7 +53,7 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
ret = clk_set_rate(host->ciu_clk, cclkin);
if (ret)
- dev_warn(host->dev, "failed to set rate %uHz\n", ios->clock);
+ dev_warn(host->dev, "failed to set rate %uHz err: %d\n", cclkin, ret);
bus_hz = clk_get_rate(host->ciu_clk) / RK3288_CLKGEN_DIV;
if (bus_hz != host->bus_hz) {
@@ -290,13 +292,30 @@ static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
static int dw_mci_rockchip_init(struct dw_mci *host)
{
+ int ret, i;
+
/* It is slot 8 on Rockchip SoCs */
host->sdio_id0 = 8;
- if (of_device_is_compatible(host->dev->of_node,
- "rockchip,rk3288-dw-mshc"))
+ if (of_device_is_compatible(host->dev->of_node, "rockchip,rk3288-dw-mshc")) {
host->bus_hz /= RK3288_CLKGEN_DIV;
+ /* clock driver will fail if the clock is less than the lowest source clock
+ * divided by the internal clock divider. Test for the lowest available
+ * clock and set the minimum freq to clock / clock divider.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+ ret = clk_round_rate(host->ciu_clk, freqs[i] * RK3288_CLKGEN_DIV);
+ if (ret > 0) {
+ host->minimum_speed = ret / RK3288_CLKGEN_DIV;
+ break;
+ }
+ }
+ if (ret < 0)
+ dev_warn(host->dev, "no valid minimum freq: %d\n", ret);
+ }
+
return 0;
}
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 42bf8a2287ba..06dc56cbada8 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2898,7 +2898,12 @@ static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
if (host->pdata->caps2)
mmc->caps2 = host->pdata->caps2;
- mmc->f_min = DW_MCI_FREQ_MIN;
+ /* if host has set a minimum_freq, we should respect it */
+ if (host->minimum_speed)
+ mmc->f_min = host->minimum_speed;
+ else
+ mmc->f_min = DW_MCI_FREQ_MIN;
+
if (!mmc->f_max)
mmc->f_max = DW_MCI_FREQ_MAX;
@@ -3057,8 +3062,7 @@ static void dw_mci_init_dma(struct dw_mci *host)
dev_info(host->dev, "Using internal DMA controller.\n");
} else {
/* TRANS_MODE_EDMAC: check dma bindings again */
- if ((device_property_read_string_array(dev, "dma-names",
- NULL, 0) < 0) ||
+ if ((device_property_string_array_count(dev, "dma-names") < 0) ||
!device_property_present(dev, "dmas")) {
goto no_dma;
}
@@ -3568,7 +3572,7 @@ int dw_mci_runtime_resume(struct device *dev)
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
- if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
+ if (host->slot && host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
/* Force setup bus to guarantee available clock output */
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 7f1e38621d13..4ed81f94f7ca 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -99,6 +99,7 @@ struct dw_mci_dma_slave {
* @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
* rate and timeout calculations.
* @current_speed: Configured rate of the controller.
+ * @minimum_speed: Stored minimum rate of the controller.
* @fifoth_val: The value of FIFOTH register.
* @verid: Denote Version ID.
* @dev: Device associated with the MMC controller.
@@ -201,6 +202,7 @@ struct dw_mci {
u32 bus_hz;
u32 current_speed;
+ u32 minimum_speed;
u32 fifoth_val;
u16 verid;
struct device *dev;
diff --git a/drivers/mmc/host/litex_mmc.c b/drivers/mmc/host/litex_mmc.c
new file mode 100644
index 000000000000..6ba0d63b8c07
--- /dev/null
+++ b/drivers/mmc/host/litex_mmc.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LiteX LiteSDCard driver
+ *
+ * Copyright (C) 2019-2020 Antmicro <contact@antmicro.com>
+ * Copyright (C) 2019-2020 Kamil Rakoczy <krakoczy@antmicro.com>
+ * Copyright (C) 2019-2020 Maciej Dudek <mdudek@internships.antmicro.com>
+ * Copyright (C) 2020 Paul Mackerras <paulus@ozlabs.org>
+ * Copyright (C) 2020-2022 Gabriel Somlo <gsomlo@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/litex.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+
+#define LITEX_PHY_CARDDETECT 0x00
+#define LITEX_PHY_CLOCKERDIV 0x04
+#define LITEX_PHY_INITIALIZE 0x08
+#define LITEX_PHY_WRITESTATUS 0x0C
+#define LITEX_CORE_CMDARG 0x00
+#define LITEX_CORE_CMDCMD 0x04
+#define LITEX_CORE_CMDSND 0x08
+#define LITEX_CORE_CMDRSP 0x0C
+#define LITEX_CORE_CMDEVT 0x1C
+#define LITEX_CORE_DATEVT 0x20
+#define LITEX_CORE_BLKLEN 0x24
+#define LITEX_CORE_BLKCNT 0x28
+#define LITEX_BLK2MEM_BASE 0x00
+#define LITEX_BLK2MEM_LEN 0x08
+#define LITEX_BLK2MEM_ENA 0x0C
+#define LITEX_BLK2MEM_DONE 0x10
+#define LITEX_BLK2MEM_LOOP 0x14
+#define LITEX_MEM2BLK_BASE 0x00
+#define LITEX_MEM2BLK_LEN 0x08
+#define LITEX_MEM2BLK_ENA 0x0C
+#define LITEX_MEM2BLK_DONE 0x10
+#define LITEX_MEM2BLK_LOOP 0x14
+#define LITEX_MEM2BLK 0x18
+#define LITEX_IRQ_STATUS 0x00
+#define LITEX_IRQ_PENDING 0x04
+#define LITEX_IRQ_ENABLE 0x08
+
+#define SD_CTL_DATA_XFER_NONE 0
+#define SD_CTL_DATA_XFER_READ 1
+#define SD_CTL_DATA_XFER_WRITE 2
+
+#define SD_CTL_RESP_NONE 0
+#define SD_CTL_RESP_SHORT 1
+#define SD_CTL_RESP_LONG 2
+#define SD_CTL_RESP_SHORT_BUSY 3
+
+#define SD_BIT_DONE BIT(0)
+#define SD_BIT_WR_ERR BIT(1)
+#define SD_BIT_TIMEOUT BIT(2)
+#define SD_BIT_CRC_ERR BIT(3)
+
+#define SD_SLEEP_US 5
+#define SD_TIMEOUT_US 20000
+
+#define SDIRQ_CARD_DETECT 1
+#define SDIRQ_SD_TO_MEM_DONE 2
+#define SDIRQ_MEM_TO_SD_DONE 4
+#define SDIRQ_CMD_DONE 8
+
+struct litex_mmc_host {
+ struct mmc_host *mmc;
+
+ void __iomem *sdphy;
+ void __iomem *sdcore;
+ void __iomem *sdreader;
+ void __iomem *sdwriter;
+ void __iomem *sdirq;
+
+ void *buffer;
+ size_t buf_size;
+ dma_addr_t dma;
+
+ struct completion cmd_done;
+ int irq;
+
+ unsigned int ref_clk;
+ unsigned int sd_clk;
+
+ u32 resp[4];
+ u16 rca;
+
+ bool is_bus_width_set;
+ bool app_cmd;
+};
+
+static int litex_mmc_sdcard_wait_done(void __iomem *reg, struct device *dev)
+{
+ u8 evt;
+ int ret;
+
+ ret = readx_poll_timeout(litex_read8, reg, evt, evt & SD_BIT_DONE,
+ SD_SLEEP_US, SD_TIMEOUT_US);
+ if (ret)
+ return ret;
+ if (evt == SD_BIT_DONE)
+ return 0;
+ if (evt & SD_BIT_WR_ERR)
+ return -EIO;
+ if (evt & SD_BIT_TIMEOUT)
+ return -ETIMEDOUT;
+ if (evt & SD_BIT_CRC_ERR)
+ return -EILSEQ;
+ dev_err(dev, "%s: unknown error (evt=%x)\n", __func__, evt);
+ return -EINVAL;
+}
+
+static int litex_mmc_send_cmd(struct litex_mmc_host *host,
+ u8 cmd, u32 arg, u8 response_len, u8 transfer)
+{
+ struct device *dev = mmc_dev(host->mmc);
+ void __iomem *reg;
+ int ret;
+ u8 evt;
+
+ litex_write32(host->sdcore + LITEX_CORE_CMDARG, arg);
+ litex_write32(host->sdcore + LITEX_CORE_CMDCMD,
+ cmd << 8 | transfer << 5 | response_len);
+ litex_write8(host->sdcore + LITEX_CORE_CMDSND, 1);
+
+ /*
+ * Wait for an interrupt if we have an interrupt and either there is
+ * data to be transferred, or if the card can report busy via DAT0.
+ */
+ if (host->irq > 0 &&
+ (transfer != SD_CTL_DATA_XFER_NONE ||
+ response_len == SD_CTL_RESP_SHORT_BUSY)) {
+ reinit_completion(&host->cmd_done);
+ litex_write32(host->sdirq + LITEX_IRQ_ENABLE,
+ SDIRQ_CMD_DONE | SDIRQ_CARD_DETECT);
+ wait_for_completion(&host->cmd_done);
+ }
+
+ ret = litex_mmc_sdcard_wait_done(host->sdcore + LITEX_CORE_CMDEVT, dev);
+ if (ret) {
+ dev_err(dev, "Command (cmd %d) error, status %d\n", cmd, ret);
+ return ret;
+ }
+
+ if (response_len != SD_CTL_RESP_NONE) {
+ /*
+ * NOTE: this matches the semantics of litex_read32()
+ * regardless of underlying arch endianness!
+ */
+ memcpy_fromio(host->resp,
+ host->sdcore + LITEX_CORE_CMDRSP, 0x10);
+ }
+
+ if (!host->app_cmd && cmd == SD_SEND_RELATIVE_ADDR)
+ host->rca = (host->resp[3] >> 16);
+
+ host->app_cmd = (cmd == MMC_APP_CMD);
+
+ if (transfer == SD_CTL_DATA_XFER_NONE)
+ return ret; /* OK from prior litex_mmc_sdcard_wait_done() */
+
+ ret = litex_mmc_sdcard_wait_done(host->sdcore + LITEX_CORE_DATEVT, dev);
+ if (ret) {
+ dev_err(dev, "Data xfer (cmd %d) error, status %d\n", cmd, ret);
+ return ret;
+ }
+
+ /* Wait for completion of (read or write) DMA transfer */
+ reg = (transfer == SD_CTL_DATA_XFER_READ) ?
+ host->sdreader + LITEX_BLK2MEM_DONE :
+ host->sdwriter + LITEX_MEM2BLK_DONE;
+ ret = readx_poll_timeout(litex_read8, reg, evt, evt & SD_BIT_DONE,
+ SD_SLEEP_US, SD_TIMEOUT_US);
+ if (ret)
+ dev_err(dev, "DMA timeout (cmd %d)\n", cmd);
+
+ return ret;
+}
+
+static int litex_mmc_send_app_cmd(struct litex_mmc_host *host)
+{
+ return litex_mmc_send_cmd(host, MMC_APP_CMD, host->rca << 16,
+ SD_CTL_RESP_SHORT, SD_CTL_DATA_XFER_NONE);
+}
+
+static int litex_mmc_send_set_bus_w_cmd(struct litex_mmc_host *host, u32 width)
+{
+ return litex_mmc_send_cmd(host, SD_APP_SET_BUS_WIDTH, width,
+ SD_CTL_RESP_SHORT, SD_CTL_DATA_XFER_NONE);
+}
+
+static int litex_mmc_set_bus_width(struct litex_mmc_host *host)
+{
+ bool app_cmd_sent;
+ int ret;
+
+ if (host->is_bus_width_set)
+ return 0;
+
+ /* Ensure 'app_cmd' precedes 'app_set_bus_width_cmd' */
+ app_cmd_sent = host->app_cmd; /* was preceding command app_cmd? */
+ if (!app_cmd_sent) {
+ ret = litex_mmc_send_app_cmd(host);
+ if (ret)
+ return ret;
+ }
+
+ /* LiteSDCard only supports 4-bit bus width */
+ ret = litex_mmc_send_set_bus_w_cmd(host, MMC_BUS_WIDTH_4);
+ if (ret)
+ return ret;
+
+ /* Re-send 'app_cmd' if necessary */
+ if (app_cmd_sent) {
+ ret = litex_mmc_send_app_cmd(host);
+ if (ret)
+ return ret;
+ }
+
+ host->is_bus_width_set = true;
+
+ return 0;
+}
+
+static int litex_mmc_get_cd(struct mmc_host *mmc)
+{
+ struct litex_mmc_host *host = mmc_priv(mmc);
+ int ret;
+
+ if (!mmc_card_is_removable(mmc))
+ return 1;
+
+ ret = !litex_read8(host->sdphy + LITEX_PHY_CARDDETECT);
+ if (ret)
+ return ret;
+
+ /* Ensure bus width will be set (again) upon card (re)insertion */
+ host->is_bus_width_set = false;
+
+ return 0;
+}
+
+static irqreturn_t litex_mmc_interrupt(int irq, void *arg)
+{
+ struct mmc_host *mmc = arg;
+ struct litex_mmc_host *host = mmc_priv(mmc);
+ u32 pending = litex_read32(host->sdirq + LITEX_IRQ_PENDING);
+ irqreturn_t ret = IRQ_NONE;
+
+ /* Check for card change interrupt */
+ if (pending & SDIRQ_CARD_DETECT) {
+ litex_write32(host->sdirq + LITEX_IRQ_PENDING,
+ SDIRQ_CARD_DETECT);
+ mmc_detect_change(mmc, msecs_to_jiffies(10));
+ ret = IRQ_HANDLED;
+ }
+
+ /* Check for command completed */
+ if (pending & SDIRQ_CMD_DONE) {
+ /* Disable it so it doesn't keep interrupting */
+ litex_write32(host->sdirq + LITEX_IRQ_ENABLE,
+ SDIRQ_CARD_DETECT);
+ complete(&host->cmd_done);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static u32 litex_mmc_response_len(struct mmc_command *cmd)
+{
+ if (cmd->flags & MMC_RSP_136)
+ return SD_CTL_RESP_LONG;
+ if (!(cmd->flags & MMC_RSP_PRESENT))
+ return SD_CTL_RESP_NONE;
+ if (cmd->flags & MMC_RSP_BUSY)
+ return SD_CTL_RESP_SHORT_BUSY;
+ return SD_CTL_RESP_SHORT;
+}
+
+static void litex_mmc_do_dma(struct litex_mmc_host *host, struct mmc_data *data,
+ unsigned int *len, bool *direct, u8 *transfer)
+{
+ struct device *dev = mmc_dev(host->mmc);
+ dma_addr_t dma;
+ int sg_count;
+
+ /*
+ * Try to DMA directly to/from the data buffer.
+ * We can do that if the buffer can be mapped for DMA
+ * in one contiguous chunk.
+ */
+ dma = host->dma;
+ *len = data->blksz * data->blocks;
+ sg_count = dma_map_sg(dev, data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ if (sg_count == 1) {
+ dma = sg_dma_address(data->sg);
+ *len = sg_dma_len(data->sg);
+ *direct = true;
+ } else if (*len > host->buf_size)
+ *len = host->buf_size;
+
+ if (data->flags & MMC_DATA_READ) {
+ litex_write8(host->sdreader + LITEX_BLK2MEM_ENA, 0);
+ litex_write64(host->sdreader + LITEX_BLK2MEM_BASE, dma);
+ litex_write32(host->sdreader + LITEX_BLK2MEM_LEN, *len);
+ litex_write8(host->sdreader + LITEX_BLK2MEM_ENA, 1);
+ *transfer = SD_CTL_DATA_XFER_READ;
+ } else if (data->flags & MMC_DATA_WRITE) {
+ if (!*direct)
+ sg_copy_to_buffer(data->sg, data->sg_len,
+ host->buffer, *len);
+ litex_write8(host->sdwriter + LITEX_MEM2BLK_ENA, 0);
+ litex_write64(host->sdwriter + LITEX_MEM2BLK_BASE, dma);
+ litex_write32(host->sdwriter + LITEX_MEM2BLK_LEN, *len);
+ litex_write8(host->sdwriter + LITEX_MEM2BLK_ENA, 1);
+ *transfer = SD_CTL_DATA_XFER_WRITE;
+ } else {
+ dev_warn(dev, "Data present w/o read or write flag.\n");
+ /* Continue: set cmd status, mark req done */
+ }
+
+ litex_write16(host->sdcore + LITEX_CORE_BLKLEN, data->blksz);
+ litex_write32(host->sdcore + LITEX_CORE_BLKCNT, data->blocks);
+}
+
+static void litex_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct litex_mmc_host *host = mmc_priv(mmc);
+ struct device *dev = mmc_dev(mmc);
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_command *sbc = mrq->sbc;
+ struct mmc_data *data = mrq->data;
+ struct mmc_command *stop = mrq->stop;
+ unsigned int retries = cmd->retries;
+ unsigned int len = 0;
+ bool direct = false;
+ u32 response_len = litex_mmc_response_len(cmd);
+ u8 transfer = SD_CTL_DATA_XFER_NONE;
+
+ /* First check that the card is still there */
+ if (!litex_mmc_get_cd(mmc)) {
+ cmd->error = -ENOMEDIUM;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ /* Send set-block-count command if needed */
+ if (sbc) {
+ sbc->error = litex_mmc_send_cmd(host, sbc->opcode, sbc->arg,
+ litex_mmc_response_len(sbc),
+ SD_CTL_DATA_XFER_NONE);
+ if (sbc->error) {
+ host->is_bus_width_set = false;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+ }
+
+ if (data) {
+ /*
+ * LiteSDCard only supports 4-bit bus width; therefore, we MUST
+ * inject a SET_BUS_WIDTH (acmd6) before the very first data
+ * transfer, earlier than when the mmc subsystem would normally
+ * get around to it!
+ */
+ cmd->error = litex_mmc_set_bus_width(host);
+ if (cmd->error) {
+ dev_err(dev, "Can't set bus width!\n");
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ litex_mmc_do_dma(host, data, &len, &direct, &transfer);
+ }
+
+ do {
+ cmd->error = litex_mmc_send_cmd(host, cmd->opcode, cmd->arg,
+ response_len, transfer);
+ } while (cmd->error && retries-- > 0);
+
+ if (cmd->error) {
+ /* Card may be gone; don't assume bus width is still set */
+ host->is_bus_width_set = false;
+ }
+
+ if (response_len == SD_CTL_RESP_SHORT) {
+ /* Pull short response fields from appropriate host registers */
+ cmd->resp[0] = host->resp[3];
+ cmd->resp[1] = host->resp[2] & 0xFF;
+ } else if (response_len == SD_CTL_RESP_LONG) {
+ cmd->resp[0] = host->resp[0];
+ cmd->resp[1] = host->resp[1];
+ cmd->resp[2] = host->resp[2];
+ cmd->resp[3] = host->resp[3];
+ }
+
+ /* Send stop-transmission command if required */
+ if (stop && (cmd->error || !sbc)) {
+ stop->error = litex_mmc_send_cmd(host, stop->opcode, stop->arg,
+ litex_mmc_response_len(stop),
+ SD_CTL_DATA_XFER_NONE);
+ if (stop->error)
+ host->is_bus_width_set = false;
+ }
+
+ if (data) {
+ dma_unmap_sg(dev, data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ }
+
+ if (!cmd->error && transfer != SD_CTL_DATA_XFER_NONE) {
+ data->bytes_xfered = min(len, mmc->max_req_size);
+ if (transfer == SD_CTL_DATA_XFER_READ && !direct) {
+ sg_copy_from_buffer(data->sg, sg_nents(data->sg),
+ host->buffer, data->bytes_xfered);
+ }
+ }
+
+ mmc_request_done(mmc, mrq);
+}
+
+static void litex_mmc_setclk(struct litex_mmc_host *host, unsigned int freq)
+{
+ struct device *dev = mmc_dev(host->mmc);
+ u32 div;
+
+ div = freq ? host->ref_clk / freq : 256U;
+ div = roundup_pow_of_two(div);
+ div = clamp(div, 2U, 256U);
+ dev_dbg(dev, "sd_clk_freq=%d: set to %d via div=%d\n",
+ freq, host->ref_clk / div, div);
+ litex_write16(host->sdphy + LITEX_PHY_CLOCKERDIV, div);
+ host->sd_clk = freq;
+}
+
+static void litex_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct litex_mmc_host *host = mmc_priv(mmc);
+
+ /*
+ * NOTE: Ignore any ios->bus_width updates; they occur right after
+ * the mmc core sends its own acmd6 bus-width change notification,
+ * which is redundant since we snoop on the command flow and inject
+ * an early acmd6 before the first data transfer command is sent!
+ */
+
+ /* Update sd_clk */
+ if (ios->clock != host->sd_clk)
+ litex_mmc_setclk(host, ios->clock);
+}
+
+static const struct mmc_host_ops litex_mmc_ops = {
+ .get_cd = litex_mmc_get_cd,
+ .request = litex_mmc_request,
+ .set_ios = litex_mmc_set_ios,
+};
+
+static int litex_mmc_irq_init(struct platform_device *pdev,
+ struct litex_mmc_host *host)
+{
+ struct device *dev = mmc_dev(host->mmc);
+ int ret;
+
+ ret = platform_get_irq_optional(pdev, 0);
+ if (ret < 0 && ret != -ENXIO)
+ return ret;
+ if (ret > 0)
+ host->irq = ret;
+ else {
+ dev_warn(dev, "Failed to get IRQ, using polling\n");
+ goto use_polling;
+ }
+
+ host->sdirq = devm_platform_ioremap_resource_byname(pdev, "irq");
+ if (IS_ERR(host->sdirq))
+ return PTR_ERR(host->sdirq);
+
+ ret = devm_request_irq(dev, host->irq, litex_mmc_interrupt, 0,
+ "litex-mmc", host->mmc);
+ if (ret < 0) {
+ dev_warn(dev, "IRQ request error %d, using polling\n", ret);
+ goto use_polling;
+ }
+
+ /* Clear & enable card-change interrupts */
+ litex_write32(host->sdirq + LITEX_IRQ_PENDING, SDIRQ_CARD_DETECT);
+ litex_write32(host->sdirq + LITEX_IRQ_ENABLE, SDIRQ_CARD_DETECT);
+
+ return 0;
+
+use_polling:
+ host->mmc->caps |= MMC_CAP_NEEDS_POLL;
+ return 0;
+}
+
+static void litex_mmc_free_host_wrapper(void *mmc)
+{
+ mmc_free_host(mmc);
+}
+
+static int litex_mmc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct litex_mmc_host *host;
+ struct mmc_host *mmc;
+ struct clk *clk;
+ int ret;
+
+ /*
+ * NOTE: defaults to max_[req,seg]_size=PAGE_SIZE, max_blk_size=512,
+ * and max_blk_count accordingly set to 8;
+ * If for some reason we need to modify max_blk_count, we must also
+ * re-calculate `max_[req,seg]_size = max_blk_size * max_blk_count;`
+ */
+ mmc = mmc_alloc_host(sizeof(struct litex_mmc_host), dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ ret = devm_add_action_or_reset(dev, litex_mmc_free_host_wrapper, mmc);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Can't register mmc_free_host action\n");
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+
+ /* Initialize clock source */
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "can't get clock\n");
+ host->ref_clk = clk_get_rate(clk);
+ host->sd_clk = 0;
+
+ /*
+ * LiteSDCard only supports 4-bit bus width; therefore, we MUST inject
+ * a SET_BUS_WIDTH (acmd6) before the very first data transfer, earlier
+ * than when the mmc subsystem would normally get around to it!
+ */
+ host->is_bus_width_set = false;
+ host->app_cmd = false;
+
+ /* LiteSDCard can support 64-bit DMA addressing */
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ host->buf_size = mmc->max_req_size * 2;
+ host->buffer = dmam_alloc_coherent(dev, host->buf_size,
+ &host->dma, GFP_KERNEL);
+ if (host->buffer == NULL)
+ return -ENOMEM;
+
+ host->sdphy = devm_platform_ioremap_resource_byname(pdev, "phy");
+ if (IS_ERR(host->sdphy))
+ return PTR_ERR(host->sdphy);
+
+ host->sdcore = devm_platform_ioremap_resource_byname(pdev, "core");
+ if (IS_ERR(host->sdcore))
+ return PTR_ERR(host->sdcore);
+
+ host->sdreader = devm_platform_ioremap_resource_byname(pdev, "reader");
+ if (IS_ERR(host->sdreader))
+ return PTR_ERR(host->sdreader);
+
+ host->sdwriter = devm_platform_ioremap_resource_byname(pdev, "writer");
+ if (IS_ERR(host->sdwriter))
+ return PTR_ERR(host->sdwriter);
+
+ /* Ensure DMA bus masters are disabled */
+ litex_write8(host->sdreader + LITEX_BLK2MEM_ENA, 0);
+ litex_write8(host->sdwriter + LITEX_MEM2BLK_ENA, 0);
+
+ init_completion(&host->cmd_done);
+ ret = litex_mmc_irq_init(pdev, host);
+ if (ret)
+ return ret;
+
+ mmc->ops = &litex_mmc_ops;
+
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret || mmc->ocr_avail == 0) {
+ dev_warn(dev, "can't get voltage, defaulting to 3.3V\n");
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ }
+
+ /*
+ * Set default sd_clk frequency range based on empirical observations
+ * of LiteSDCard gateware behavior on typical SDCard media
+ */
+ mmc->f_min = 12.5e6;
+ mmc->f_max = 50e6;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ return ret;
+
+ /* Force 4-bit bus_width (only width supported by hardware) */
+ mmc->caps &= ~MMC_CAP_8_BIT_DATA;
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ /* Set default capabilities */
+ mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
+ MMC_CAP_DRIVER_TYPE_D |
+ MMC_CAP_CMD23;
+ mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT |
+ MMC_CAP2_NO_SDIO |
+ MMC_CAP2_NO_MMC;
+
+ platform_set_drvdata(pdev, host);
+
+ ret = mmc_add_host(mmc);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "LiteX MMC controller initialized.\n");
+ return 0;
+}
+
+static int litex_mmc_remove(struct platform_device *pdev)
+{
+ struct litex_mmc_host *host = platform_get_drvdata(pdev);
+
+ mmc_remove_host(host->mmc);
+ return 0;
+}
+
+static const struct of_device_id litex_match[] = {
+ { .compatible = "litex,mmc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, litex_match);
+
+static struct platform_driver litex_mmc_driver = {
+ .probe = litex_mmc_probe,
+ .remove = litex_mmc_remove,
+ .driver = {
+ .name = "litex-mmc",
+ .of_match_table = litex_match,
+ },
+};
+module_platform_driver(litex_mmc_driver);
+
+MODULE_DESCRIPTION("LiteX SDCard driver");
+MODULE_AUTHOR("Antmicro <contact@antmicro.com>");
+MODULE_AUTHOR("Kamil Rakoczy <krakoczy@antmicro.com>");
+MODULE_AUTHOR("Maciej Dudek <mdudek@internships.antmicro.com>");
+MODULE_AUTHOR("Paul Mackerras <paulus@ozlabs.org>");
+MODULE_AUTHOR("Gabriel Somlo <gsomlo@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 8f36536cb1b6..58ab9d90bc8b 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -173,6 +173,8 @@ struct meson_host {
int irq;
bool vqmmc_enabled;
+ bool needs_pre_post_req;
+
};
#define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
@@ -663,6 +665,8 @@ static void meson_mmc_request_done(struct mmc_host *mmc,
struct meson_host *host = mmc_priv(mmc);
host->cmd = NULL;
+ if (host->needs_pre_post_req)
+ meson_mmc_post_req(mmc, mrq, 0);
mmc_request_done(host->mmc, mrq);
}
@@ -880,7 +884,7 @@ static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data
static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct meson_host *host = mmc_priv(mmc);
- bool needs_pre_post_req = mrq->data &&
+ host->needs_pre_post_req = mrq->data &&
!(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
/*
@@ -896,22 +900,19 @@ static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
}
- if (needs_pre_post_req) {
+ if (host->needs_pre_post_req) {
meson_mmc_get_transfer_mode(mmc, mrq);
if (!meson_mmc_desc_chain_mode(mrq->data))
- needs_pre_post_req = false;
+ host->needs_pre_post_req = false;
}
- if (needs_pre_post_req)
+ if (host->needs_pre_post_req)
meson_mmc_pre_req(mmc, mrq);
/* Stop execution */
writel(0, host->regs + SD_EMMC_START);
meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd);
-
- if (needs_pre_post_req)
- meson_mmc_post_req(mmc, mrq, 0);
}
static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index a576181e9db0..106dd204b1a7 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1489,7 +1489,7 @@ nomem:
}
-static int mmc_spi_remove(struct spi_device *spi)
+static void mmc_spi_remove(struct spi_device *spi)
{
struct mmc_host *mmc = dev_get_drvdata(&spi->dev);
struct mmc_spi_host *host = mmc_priv(mmc);
@@ -1507,7 +1507,6 @@ static int mmc_spi_remove(struct spi_device *spi)
spi->max_speed_hz = mmc->f_max;
mmc_spi_put_pdata(spi);
mmc_free_host(mmc);
- return 0;
}
static const struct spi_device_id mmc_spi_dev_ids[] = {
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 65037e1d7723..e61b0b98065a 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -1911,8 +1911,8 @@ static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
final_phase = (start_final + len_final / 3) % PAD_DELAY_MAX;
else
final_phase = (start_final + len_final / 2) % PAD_DELAY_MAX;
- dev_info(host->dev, "phase: [map:%x] [maxlen:%d] [final:%d]\n",
- delay, len_final, final_phase);
+ dev_dbg(host->dev, "phase: [map:%x] [maxlen:%d] [final:%d]\n",
+ delay, len_final, final_phase);
delay_phase.maxlen = len_final;
delay_phase.start = start_final;
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 9d2c600fd4ce..1685df00863b 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -212,7 +212,7 @@ static const struct soc_device_attribute sdhi_quirks_match[] = {
{ .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
{ .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
- { /* Sentinel. */ },
+ { /* Sentinel. */ }
};
static const struct renesas_sdhi_of_data_with_quirks of_r8a7795_compatible = {
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 58cfaffa3c2d..219029224727 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -38,10 +38,7 @@ struct realtek_pci_sdmmc {
bool double_clk;
bool eject;
bool initial_mode;
- int power_state;
-#define SDMMC_POWER_ON 1
-#define SDMMC_POWER_OFF 0
-
+ int prev_power_state;
int sg_count;
s32 cookie;
int cookie_sg_count;
@@ -905,7 +902,7 @@ static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
return err;
}
-static int sd_power_on(struct realtek_pci_sdmmc *host)
+static int sd_power_on(struct realtek_pci_sdmmc *host, unsigned char power_mode)
{
struct rtsx_pcr *pcr = host->pcr;
struct mmc_host *mmc = host->mmc;
@@ -913,9 +910,14 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
u32 val;
u8 test_mode;
- if (host->power_state == SDMMC_POWER_ON)
+ if (host->prev_power_state == MMC_POWER_ON)
return 0;
+ if (host->prev_power_state == MMC_POWER_UP) {
+ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, 0);
+ goto finish;
+ }
+
msleep(100);
rtsx_pci_init_cmd(pcr);
@@ -936,10 +938,15 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
if (err < 0)
return err;
+ mdelay(1);
+
err = rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
if (err < 0)
return err;
+ /* send at least 74 clocks */
+ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN);
+
if (PCI_PID(pcr) == PID_5261) {
/*
* If test mode is set switch to SD Express mandatorily,
@@ -964,7 +971,8 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
}
}
- host->power_state = SDMMC_POWER_ON;
+finish:
+ host->prev_power_state = power_mode;
return 0;
}
@@ -973,7 +981,7 @@ static int sd_power_off(struct realtek_pci_sdmmc *host)
struct rtsx_pcr *pcr = host->pcr;
int err;
- host->power_state = SDMMC_POWER_OFF;
+ host->prev_power_state = MMC_POWER_OFF;
rtsx_pci_init_cmd(pcr);
@@ -999,7 +1007,7 @@ static int sd_set_power_mode(struct realtek_pci_sdmmc *host,
if (power_mode == MMC_POWER_OFF)
err = sd_power_off(host);
else
- err = sd_power_on(host);
+ err = sd_power_on(host, power_mode);
return err;
}
@@ -1482,10 +1490,11 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
host->pcr = pcr;
+ mmc->ios.power_delay_ms = 5;
host->mmc = mmc;
host->pdev = pdev;
host->cookie = -1;
- host->power_state = SDMMC_POWER_OFF;
+ host->prev_power_state = MMC_POWER_OFF;
INIT_WORK(&host->work, sd_request);
platform_set_drvdata(pdev, host);
pcr->slots[RTSX_SD_CARD].p_dev = pdev;
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index d1a1c548c515..10fb4cb2c731 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -308,17 +308,15 @@ static const struct dev_pm_ops sdhci_at91_dev_pm_ops = {
static int sdhci_at91_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
const struct sdhci_at91_soc_data *soc_data;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_at91_priv *priv;
int ret;
- match = of_match_device(sdhci_at91_dt_match, &pdev->dev);
- if (!match)
+ soc_data = of_device_get_match_data(&pdev->dev);
+ if (!soc_data)
return -EINVAL;
- soc_data = match->data;
host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*priv));
if (IS_ERR(host))
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 0f3658b36513..d9dc41143bb3 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -934,7 +934,7 @@ static struct soc_device_attribute soc_tuning_erratum_type1[] = {
{ .family = "QorIQ T1040", },
{ .family = "QorIQ T2080", },
{ .family = "QorIQ LS1021A", },
- { },
+ { /* sentinel */ }
};
static struct soc_device_attribute soc_tuning_erratum_type2[] = {
@@ -944,7 +944,7 @@ static struct soc_device_attribute soc_tuning_erratum_type2[] = {
{ .family = "QorIQ LS1080A", },
{ .family = "QorIQ LS2080A", },
{ .family = "QorIQ LA1575A", },
- { },
+ { /* sentinel */ }
};
static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
@@ -1316,21 +1316,21 @@ static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
static struct soc_device_attribute soc_incorrect_hostver[] = {
{ .family = "QorIQ T4240", .revision = "1.0", },
{ .family = "QorIQ T4240", .revision = "2.0", },
- { },
+ { /* sentinel */ }
};
static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
{ .family = "QorIQ LX2160A", .revision = "1.0", },
{ .family = "QorIQ LX2160A", .revision = "2.0", },
{ .family = "QorIQ LS1028A", .revision = "1.0", },
- { },
+ { /* sentinel */ }
};
static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
{ .family = "QorIQ LX2160A", .revision = "1.0", },
{ .family = "QorIQ LX2160A", .revision = "2.0", },
{ .family = "QorIQ LS1028A", .revision = "1.0", },
- { },
+ { /* sentinel */ }
};
static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 97035d77c18c..d09728c37d03 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -13,6 +13,7 @@
#include <linux/mmc/mmc.h>
#include <linux/delay.h>
#include <linux/of.h>
+#include <linux/iopoll.h>
#include "sdhci.h"
#include "sdhci-pci.h"
#include "cqhci.h"
@@ -63,6 +64,7 @@
#define GLI_9750_MISC_RX_INV_OFF 0x0
#define GLI_9750_MISC_RX_INV_VALUE GLI_9750_MISC_RX_INV_OFF
#define GLI_9750_MISC_TX1_DLY_VALUE 0x5
+#define SDHCI_GLI_9750_MISC_SSC_OFF BIT(26)
#define SDHCI_GLI_9750_TUNING_CONTROL 0x540
#define SDHCI_GLI_9750_TUNING_CONTROL_EN BIT(4)
@@ -137,6 +139,9 @@
#define PCI_GLI_9755_SerDes 0x70
#define PCI_GLI_9755_SCP_DIS BIT(19)
+#define PCI_GLI_9755_MISC 0x78
+#define PCI_GLI_9755_MISC_SSC_OFF BIT(26)
+
#define GLI_MAX_TUNING_LOOP 40
/* Genesys Logic chipset */
@@ -371,6 +376,19 @@ static void gl9750_set_pll(struct sdhci_host *host, u8 dir, u16 ldiv, u8 pdiv)
mdelay(1);
}
+static bool gl9750_ssc_enable(struct sdhci_host *host)
+{
+ u32 misc;
+ u8 off;
+
+ gl9750_wt_on(host);
+ misc = sdhci_readl(host, SDHCI_GLI_9750_MISC);
+ off = FIELD_GET(SDHCI_GLI_9750_MISC_SSC_OFF, misc);
+ gl9750_wt_off(host);
+
+ return !off;
+}
+
static void gl9750_set_ssc(struct sdhci_host *host, u8 enable, u8 step, u16 ppm)
{
u32 pll;
@@ -392,11 +410,31 @@ static void gl9750_set_ssc(struct sdhci_host *host, u8 enable, u8 step, u16 ppm)
static void gl9750_set_ssc_pll_205mhz(struct sdhci_host *host)
{
- /* set pll to 205MHz and enable ssc */
- gl9750_set_ssc(host, 0x1, 0x1F, 0xFFE7);
+ bool enable = gl9750_ssc_enable(host);
+
+ /* set pll to 205MHz and ssc */
+ gl9750_set_ssc(host, enable, 0xF, 0x5A1D);
gl9750_set_pll(host, 0x1, 0x246, 0x0);
}
+static void gl9750_set_ssc_pll_100mhz(struct sdhci_host *host)
+{
+ bool enable = gl9750_ssc_enable(host);
+
+ /* set pll to 100MHz and ssc */
+ gl9750_set_ssc(host, enable, 0xE, 0x51EC);
+ gl9750_set_pll(host, 0x1, 0x244, 0x1);
+}
+
+static void gl9750_set_ssc_pll_50mhz(struct sdhci_host *host)
+{
+ bool enable = gl9750_ssc_enable(host);
+
+ /* set pll to 50MHz and ssc */
+ gl9750_set_ssc(host, enable, 0xE, 0x51EC);
+ gl9750_set_pll(host, 0x1, 0x244, 0x3);
+}
+
static void sdhci_gl9750_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct mmc_ios *ios = &host->mmc->ios;
@@ -414,6 +452,10 @@ static void sdhci_gl9750_set_clock(struct sdhci_host *host, unsigned int clock)
if (clock == 200000000 && ios->timing == MMC_TIMING_UHS_SDR104) {
host->mmc->actual_clock = 205000000;
gl9750_set_ssc_pll_205mhz(host);
+ } else if (clock == 100000000) {
+ gl9750_set_ssc_pll_100mhz(host);
+ } else if (clock == 50000000) {
+ gl9750_set_ssc_pll_50mhz(host);
}
sdhci_enable_clk(host, clk);
@@ -514,6 +556,19 @@ static void gl9755_set_pll(struct pci_dev *pdev, u8 dir, u16 ldiv, u8 pdiv)
mdelay(1);
}
+static bool gl9755_ssc_enable(struct pci_dev *pdev)
+{
+ u32 misc;
+ u8 off;
+
+ gl9755_wt_on(pdev);
+ pci_read_config_dword(pdev, PCI_GLI_9755_MISC, &misc);
+ off = FIELD_GET(PCI_GLI_9755_MISC_SSC_OFF, misc);
+ gl9755_wt_off(pdev);
+
+ return !off;
+}
+
static void gl9755_set_ssc(struct pci_dev *pdev, u8 enable, u8 step, u16 ppm)
{
u32 pll;
@@ -535,11 +590,31 @@ static void gl9755_set_ssc(struct pci_dev *pdev, u8 enable, u8 step, u16 ppm)
static void gl9755_set_ssc_pll_205mhz(struct pci_dev *pdev)
{
- /* set pll to 205MHz and enable ssc */
- gl9755_set_ssc(pdev, 0x1, 0x1F, 0xFFE7);
+ bool enable = gl9755_ssc_enable(pdev);
+
+ /* set pll to 205MHz and ssc */
+ gl9755_set_ssc(pdev, enable, 0xF, 0x5A1D);
gl9755_set_pll(pdev, 0x1, 0x246, 0x0);
}
+static void gl9755_set_ssc_pll_100mhz(struct pci_dev *pdev)
+{
+ bool enable = gl9755_ssc_enable(pdev);
+
+ /* set pll to 100MHz and ssc */
+ gl9755_set_ssc(pdev, enable, 0xE, 0x51EC);
+ gl9755_set_pll(pdev, 0x1, 0x244, 0x1);
+}
+
+static void gl9755_set_ssc_pll_50mhz(struct pci_dev *pdev)
+{
+ bool enable = gl9755_ssc_enable(pdev);
+
+ /* set pll to 50MHz and ssc */
+ gl9755_set_ssc(pdev, enable, 0xE, 0x51EC);
+ gl9755_set_pll(pdev, 0x1, 0x244, 0x3);
+}
+
static void sdhci_gl9755_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
@@ -560,6 +635,10 @@ static void sdhci_gl9755_set_clock(struct sdhci_host *host, unsigned int clock)
if (clock == 200000000 && ios->timing == MMC_TIMING_UHS_SDR104) {
host->mmc->actual_clock = 205000000;
gl9755_set_ssc_pll_205mhz(pdev);
+ } else if (clock == 100000000) {
+ gl9755_set_ssc_pll_100mhz(pdev);
+ } else if (clock == 50000000) {
+ gl9755_set_ssc_pll_50mhz(pdev);
}
sdhci_enable_clk(host, clk);
@@ -873,6 +952,47 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
}
+#ifdef CONFIG_PM
+static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ struct sdhci_host *host = slot->host;
+ u16 clock;
+
+ clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clock &= ~(SDHCI_CLOCK_PLL_EN | SDHCI_CLOCK_CARD_EN);
+ sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
+
+ return 0;
+}
+
+static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ struct sdhci_host *host = slot->host;
+ u16 clock;
+
+ clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+
+ clock |= SDHCI_CLOCK_PLL_EN;
+ clock &= ~SDHCI_CLOCK_INT_STABLE;
+ sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
+
+ /* Wait max 150 ms */
+ if (read_poll_timeout(sdhci_readw, clock, (clock & SDHCI_CLOCK_INT_STABLE),
+ 1000, 150000, false, host, SDHCI_CLOCK_CONTROL)) {
+ pr_err("%s: PLL clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ }
+
+ clock |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
+
+ return 0;
+}
+#endif
+
static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
@@ -983,5 +1103,10 @@ const struct sdhci_pci_fixes sdhci_gl9763e = {
.resume = sdhci_cqhci_gli_resume,
.suspend = sdhci_cqhci_gli_suspend,
#endif
+#ifdef CONFIG_PM
+ .runtime_suspend = gl9763e_runtime_suspend,
+ .runtime_resume = gl9763e_runtime_resume,
+ .allow_runtime_pm = true,
+#endif
.add_host = gl9763e_add_host,
};
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 35ebba067e87..2d2d8260c681 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -1618,7 +1618,6 @@ cleanup:
static int sdhci_tegra_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
const struct sdhci_tegra_soc_data *soc_data;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
@@ -1626,10 +1625,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
struct clk *clk;
int rc;
- match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
- if (!match)
+ soc_data = of_device_get_match_data(&pdev->dev);
+ if (!soc_data)
return -EINVAL;
- soc_data = match->data;
host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
if (IS_ERR(host))
@@ -1673,6 +1671,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
/* HW busy detection is supported, but R1B responses are required. */
host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
+ /* GPIO CD can be set as a wakeup source */
+ host->mmc->caps |= MMC_CAP_CD_WAKE;
+
tegra_sdhci_parse_dt(host);
tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
@@ -1840,7 +1841,7 @@ static int sdhci_tegra_suspend(struct device *dev)
return ret;
}
- return 0;
+ return mmc_gpio_set_cd_wake(host->mmc, true);
}
static int sdhci_tegra_resume(struct device *dev)
@@ -1848,6 +1849,10 @@ static int sdhci_tegra_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
int ret;
+ ret = mmc_gpio_set_cd_wake(host->mmc, false);
+ if (ret)
+ return ret;
+
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index f654afbe8e83..e54fe24d47e7 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -514,26 +514,6 @@ static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = {
.flags = IOMUX_PRESENT,
};
-static const struct sdhci_pltfm_data sdhci_am64_8bit_pdata = {
- .ops = &sdhci_j721e_8bit_ops,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
-};
-
-static const struct sdhci_am654_driver_data sdhci_am64_8bit_drvdata = {
- .pdata = &sdhci_am64_8bit_pdata,
- .flags = DLL_PRESENT | DLL_CALIB,
-};
-
-static const struct sdhci_pltfm_data sdhci_am64_4bit_pdata = {
- .ops = &sdhci_j721e_4bit_ops,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
-};
-
-static const struct sdhci_am654_driver_data sdhci_am64_4bit_drvdata = {
- .pdata = &sdhci_am64_4bit_pdata,
- .flags = IOMUX_PRESENT,
-};
-
static const struct soc_device_attribute sdhci_am654_devices[] = {
{ .family = "AM65X",
.revision = "SR1.0",
@@ -759,11 +739,15 @@ static const struct of_device_id sdhci_am654_of_match[] = {
},
{
.compatible = "ti,am64-sdhci-8bit",
- .data = &sdhci_am64_8bit_drvdata,
+ .data = &sdhci_j721e_8bit_drvdata,
},
{
.compatible = "ti,am64-sdhci-4bit",
- .data = &sdhci_am64_4bit_drvdata,
+ .data = &sdhci_j721e_4bit_drvdata,
+ },
+ {
+ .compatible = "ti,am62-sdhci",
+ .data = &sdhci_j721e_4bit_drvdata,
},
{ /* sentinel */ }
};
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 104dcd702870..5f9ebf045b1c 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -521,8 +521,7 @@ static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
}
dev_dbg(dev, "clk %u/%u (%u, 0x%x)\n",
- (best_freq / (1 << (clkdiv + 1))), clk,
- best_freq, clkdiv);
+ (best_freq >> (clkdiv + 1)), clk, best_freq, clkdiv);
clk_set_rate(host->clk, best_freq);
clkdiv = clkdiv << 16;
@@ -1012,8 +1011,8 @@ static void sh_mmcif_clk_setup(struct sh_mmcif_host *host)
*/
host->clkdiv_map = 0x3ff;
- host->mmc->f_max = f_max / (1 << ffs(host->clkdiv_map));
- host->mmc->f_min = f_min / (1 << fls(host->clkdiv_map));
+ host->mmc->f_max = f_max >> ffs(host->clkdiv_map);
+ host->mmc->f_min = f_min >> fls(host->clkdiv_map);
} else {
unsigned int clk = clk_get_rate(host->clk);
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 2702736a1c57..c62afd212692 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1167,6 +1167,14 @@ static const struct sunxi_mmc_cfg sun9i_a80_cfg = {
.can_calibrate = false,
};
+static const struct sunxi_mmc_cfg sun20i_d1_cfg = {
+ .idma_des_size_bits = 13,
+ .idma_des_shift = 2,
+ .can_calibrate = true,
+ .mask_data0 = true,
+ .needs_new_timings = true,
+};
+
static const struct sunxi_mmc_cfg sun50i_a64_cfg = {
.idma_des_size_bits = 16,
.clk_delays = NULL,
@@ -1205,6 +1213,7 @@ static const struct of_device_id sunxi_mmc_of_match[] = {
{ .compatible = "allwinner,sun7i-a20-mmc", .data = &sun7i_a20_cfg },
{ .compatible = "allwinner,sun8i-a83t-emmc", .data = &sun8i_a83t_emmc_cfg },
{ .compatible = "allwinner,sun9i-a80-mmc", .data = &sun9i_a80_cfg },
+ { .compatible = "allwinner,sun20i-d1-mmc", .data = &sun20i_d1_cfg },
{ .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun50i-a64-emmc", .data = &sun50i_a64_emmc_cfg },
{ .compatible = "allwinner,sun50i-a100-mmc", .data = &sun50i_a100_cfg },
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index f936aad945ce..e754bb3f5c32 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -186,10 +186,6 @@ struct tmio_mmc_host {
void (*fixup_request)(struct tmio_mmc_host *host, struct mmc_request *mrq);
unsigned int (*get_timeout_cycles)(struct tmio_mmc_host *host);
- void (*prepare_hs400_tuning)(struct tmio_mmc_host *host);
- void (*hs400_downgrade)(struct tmio_mmc_host *host);
- void (*hs400_complete)(struct tmio_mmc_host *host);
-
const struct tmio_mmc_dma_ops *dma_ops;
};
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index cf10949fb0ac..163ac9df8cca 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -751,19 +751,16 @@ static int wmt_mci_probe(struct platform_device *pdev)
struct mmc_host *mmc;
struct wmt_mci_priv *priv;
struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *of_id =
- of_match_device(wmt_mci_dt_ids, &pdev->dev);
const struct wmt_mci_caps *wmt_caps;
int ret;
int regular_irq, dma_irq;
- if (!of_id || !of_id->data) {
+ wmt_caps = of_device_get_match_data(&pdev->dev);
+ if (!wmt_caps) {
dev_err(&pdev->dev, "Controller capabilities data missing\n");
return -EFAULT;
}
- wmt_caps = of_id->data;
-
if (!np) {
dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n");
return -EFAULT;
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
index a8b31bddf14b..008df9d8898d 100644
--- a/drivers/mtd/devices/mchp23k256.c
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -209,13 +209,11 @@ static int mchp23k256_probe(struct spi_device *spi)
return 0;
}
-static int mchp23k256_remove(struct spi_device *spi)
+static void mchp23k256_remove(struct spi_device *spi)
{
struct mchp23k256_flash *flash = spi_get_drvdata(spi);
WARN_ON(mtd_device_unregister(&flash->mtd));
-
- return 0;
}
static const struct of_device_id mchp23k256_of_table[] = {
diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c
index 231a10790196..a3fd426df74b 100644
--- a/drivers/mtd/devices/mchp48l640.c
+++ b/drivers/mtd/devices/mchp48l640.c
@@ -341,13 +341,11 @@ static int mchp48l640_probe(struct spi_device *spi)
return 0;
}
-static int mchp48l640_remove(struct spi_device *spi)
+static void mchp48l640_remove(struct spi_device *spi)
{
struct mchp48l640_flash *flash = spi_get_drvdata(spi);
WARN_ON(mtd_device_unregister(&flash->mtd));
-
- return 0;
}
static const struct of_device_id mchp48l640_of_table[] = {
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 734878abaa23..134e27328597 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -916,7 +916,7 @@ static int dataflash_probe(struct spi_device *spi)
return status;
}
-static int dataflash_remove(struct spi_device *spi)
+static void dataflash_remove(struct spi_device *spi)
{
struct dataflash *flash = spi_get_drvdata(spi);
@@ -925,8 +925,6 @@ static int dataflash_remove(struct spi_device *spi)
WARN_ON(mtd_device_unregister(&flash->mtd));
kfree(flash);
-
- return 0;
}
static struct spi_driver dataflash_driver = {
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 7f124c1bfa40..8813994ce9f4 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -398,13 +398,11 @@ static int sst25l_probe(struct spi_device *spi)
return 0;
}
-static int sst25l_remove(struct spi_device *spi)
+static void sst25l_remove(struct spi_device *spi)
{
struct sst25l_flash *flash = spi_get_drvdata(spi);
WARN_ON(mtd_device_unregister(&flash->mtd));
-
- return 0;
}
static struct spi_driver sst25l_driver = {
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index e86b04bc1d6b..dc7f1532a37f 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -19,7 +19,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/swap.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index d986ab4e4c35..820e5dc3bc9b 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -42,8 +42,7 @@ config MTD_NAND_OMAP2
tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller"
depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
depends on HAS_IOMEM
- select MEMORY
- select OMAP_GPMC
+ depends on OMAP_GPMC
help
Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
and Keystone platforms.
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
index 5612ee628425..52ce5162538a 100644
--- a/drivers/mtd/nand/raw/sharpsl.c
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -6,7 +6,6 @@
* Based on Sharp's NAND driver sharp_sl.c
*/
-#include <linux/genhd.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/delay.h>
diff --git a/drivers/mtd/spi-nor/controllers/Kconfig b/drivers/mtd/spi-nor/controllers/Kconfig
index 5c0e0ec2e6d1..50f4f3484d42 100644
--- a/drivers/mtd/spi-nor/controllers/Kconfig
+++ b/drivers/mtd/spi-nor/controllers/Kconfig
@@ -26,39 +26,3 @@ config SPI_NXP_SPIFI
SPIFI is a specialized controller for connecting serial SPI
Flash. Enable this option if you have a device with a SPIFI
controller and want to access the Flash as a mtd device.
-
-config SPI_INTEL_SPI
- tristate
-
-config SPI_INTEL_SPI_PCI
- tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
- depends on X86 && PCI
- select SPI_INTEL_SPI
- help
- This enables PCI support for the Intel PCH/PCU SPI controller in
- master mode. This controller is present in modern Intel hardware
- and is used to hold BIOS and other persistent settings. Using
- this driver it is possible to upgrade BIOS directly from Linux.
-
- Say N here unless you know what you are doing. Overwriting the
- SPI flash may render the system unbootable.
-
- To compile this driver as a module, choose M here: the module
- will be called intel-spi-pci.
-
-config SPI_INTEL_SPI_PLATFORM
- tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)"
- depends on X86
- select SPI_INTEL_SPI
- help
- This enables platform support for the Intel PCH/PCU SPI
- controller in master mode. This controller is present in modern
- Intel hardware and is used to hold BIOS and other persistent
- settings. Using this driver it is possible to upgrade BIOS
- directly from Linux.
-
- Say N here unless you know what you are doing. Overwriting the
- SPI flash may render the system unbootable.
-
- To compile this driver as a module, choose M here: the module
- will be called intel-spi-platform.
diff --git a/drivers/mtd/spi-nor/controllers/Makefile b/drivers/mtd/spi-nor/controllers/Makefile
index e7abba491d98..6e2a1dc68466 100644
--- a/drivers/mtd/spi-nor/controllers/Makefile
+++ b/drivers/mtd/spi-nor/controllers/Makefile
@@ -2,6 +2,3 @@
obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o
obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
-obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
-obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o
-obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o
diff --git a/drivers/mtd/spi-nor/controllers/intel-spi.h b/drivers/mtd/spi-nor/controllers/intel-spi.h
deleted file mode 100644
index f2871179fd34..000000000000
--- a/drivers/mtd/spi-nor/controllers/intel-spi.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel PCH/PCU SPI flash driver.
- *
- * Copyright (C) 2016, Intel Corporation
- * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- */
-
-#ifndef INTEL_SPI_H
-#define INTEL_SPI_H
-
-#include <linux/platform_data/x86/intel-spi.h>
-
-struct intel_spi;
-struct resource;
-
-struct intel_spi *intel_spi_probe(struct device *dev,
- struct resource *mem, const struct intel_spi_boardinfo *info);
-int intel_spi_remove(struct intel_spi *ispi);
-
-#endif /* INTEL_SPI_H */
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 50b23e71065f..3f1192d3c52d 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -31,7 +31,7 @@ obj-$(CONFIG_TUN) += tun.o
obj-$(CONFIG_TAP) += tap.o
obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
-obj-$(CONFIG_VXLAN) += vxlan.o
+obj-$(CONFIG_VXLAN) += vxlan/
obj-$(CONFIG_GENEVE) += geneve.o
obj-$(CONFIG_BAREUDP) += bareudp.o
obj-$(CONFIG_GTP) += gtp.o
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index f1a36d7e2151..10455c9b9da0 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -2373,7 +2373,7 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
skb->pkt_type = PACKET_MULTICAST;
skb->ip_summed = CHECKSUM_NONE;
len = skb->len;
- if (netif_rx(skb) == NET_RX_SUCCESS) {
+ if (__netif_rx(skb) == NET_RX_SUCCESS) {
amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
dev_sw_netstats_rx_add(amt->dev, len);
} else {
@@ -2470,7 +2470,7 @@ report:
skb->pkt_type = PACKET_MULTICAST;
skb->ip_summed = CHECKSUM_NONE;
len = skb->len;
- if (netif_rx(skb) == NET_RX_SUCCESS) {
+ if (__netif_rx(skb) == NET_RX_SUCCESS) {
amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE,
true);
dev_sw_netstats_rx_add(amt->dev, len);
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 6382e1937cca..c580acb8b1d3 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -138,6 +138,9 @@ static int com20020pci_probe(struct pci_dev *pdev,
return -ENOMEM;
ci = (struct com20020_pci_card_info *)id->driver_data;
+ if (!ci)
+ return -EINVAL;
+
priv->ci = ci;
mm = &ci->misc_map;
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index ba587e5fc24f..683203f87ae2 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -148,14 +148,14 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
- if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
+ if (!ipv6_mod_enabled() || family == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
else
err = IP6_ECN_decapsulate(oiph, skb);
if (unlikely(err)) {
if (log_ecn_error) {
- if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
+ if (!ipv6_mod_enabled() || family == AF_INET)
net_info_ratelimited("non-ECT from %pI4 "
"with TOS=%#x\n",
&((struct iphdr *)oiph)->saddr,
@@ -221,11 +221,12 @@ static struct socket *bareudp_create_sock(struct net *net, __be16 port)
int err;
memset(&udp_conf, 0, sizeof(udp_conf));
-#if IS_ENABLED(CONFIG_IPV6)
- udp_conf.family = AF_INET6;
-#else
- udp_conf.family = AF_INET;
-#endif
+
+ if (ipv6_mod_enabled())
+ udp_conf.family = AF_INET6;
+ else
+ udp_conf.family = AF_INET;
+
udp_conf.local_udp_port = port;
/* Open UDP socket */
err = udp_sock_create(net, &udp_conf, &sock);
@@ -448,7 +449,7 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
}
rcu_read_lock();
- if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6)
+ if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6)
err = bareudp6_xmit_skb(skb, dev, bareudp, info);
else
err = bareudp_xmit_skb(skb, dev, bareudp, info);
@@ -478,7 +479,7 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
use_cache = ip_tunnel_dst_cache_usable(skb, info);
- if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) {
+ if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) {
struct rtable *rt;
__be32 saddr;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 533e476988f2..303c8d32d451 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -19,6 +19,7 @@
#include <linux/in.h>
#include <net/arp.h>
#include <net/ipv6.h>
+#include <net/ndisc.h>
#include <asm/byteorder.h>
#include <net/bonding.h>
#include <net/bond_alb.h>
@@ -1269,6 +1270,27 @@ unwind:
return res;
}
+/* determine if the packet is NA or NS */
+static bool alb_determine_nd(struct sk_buff *skb, struct bonding *bond)
+{
+ struct ipv6hdr *ip6hdr;
+ struct icmp6hdr *hdr;
+
+ if (!pskb_network_may_pull(skb, sizeof(*ip6hdr)))
+ return true;
+
+ ip6hdr = ipv6_hdr(skb);
+ if (ip6hdr->nexthdr != IPPROTO_ICMPV6)
+ return false;
+
+ if (!pskb_network_may_pull(skb, sizeof(*ip6hdr) + sizeof(*hdr)))
+ return true;
+
+ hdr = icmp6_hdr(skb);
+ return hdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT ||
+ hdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION;
+}
+
/************************ exported alb functions ************************/
int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
@@ -1348,8 +1370,11 @@ struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
/* Do not TX balance any multicast or broadcast */
if (!is_multicast_ether_addr(eth_data->h_dest)) {
switch (skb->protocol) {
- case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
+ if (alb_determine_nd(skb, bond))
+ break;
+ fallthrough;
+ case htons(ETH_P_IP):
hash_index = bond_xmit_hash(bond, skb);
if (bond->params.tlb_dynamic_lb) {
tx_slave = tlb_choose_channel(bond,
@@ -1432,10 +1457,12 @@ struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
break;
}
- if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) {
+ if (alb_determine_nd(skb, bond)) {
do_tx_balance = false;
break;
}
+
+ /* The IPv6 header is pulled by alb_determine_nd */
/* Additionally, DAD probes should not be tx-balanced as that
* will lead to false positives for duplicate addresses and
* prevent address configuration from working.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index aebeb46e6fa6..15eddca7b4b6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -88,6 +88,7 @@
#if IS_ENABLED(CONFIG_TLS_DEVICE)
#include <net/tls.h>
#endif
+#include <net/ip6_route.h>
#include "bonding_priv.h"
@@ -2793,31 +2794,15 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
return ret;
}
-/* We go to the (large) trouble of VLAN tagging ARP frames because
- * switches in VLAN mode (especially if ports are configured as
- * "native" to a VLAN) might not pass non-tagged frames.
- */
-static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
- __be32 src_ip, struct bond_vlan_tag *tags)
+static bool bond_handle_vlan(struct slave *slave, struct bond_vlan_tag *tags,
+ struct sk_buff *skb)
{
- struct sk_buff *skb;
- struct bond_vlan_tag *outer_tag = tags;
- struct net_device *slave_dev = slave->dev;
struct net_device *bond_dev = slave->bond->dev;
-
- slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
- arp_op, &dest_ip, &src_ip);
-
- skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
- NULL, slave_dev->dev_addr, NULL);
-
- if (!skb) {
- net_err_ratelimited("ARP packet allocation failed\n");
- return;
- }
+ struct net_device *slave_dev = slave->dev;
+ struct bond_vlan_tag *outer_tag = tags;
if (!tags || tags->vlan_proto == VLAN_N_VID)
- goto xmit;
+ return true;
tags++;
@@ -2834,7 +2819,7 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
tags->vlan_id);
if (!skb) {
net_err_ratelimited("failed to insert inner VLAN tag\n");
- return;
+ return false;
}
tags++;
@@ -2847,8 +2832,34 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
outer_tag->vlan_id);
}
-xmit:
- arp_xmit(skb);
+ return true;
+}
+
+/* We go to the (large) trouble of VLAN tagging ARP frames because
+ * switches in VLAN mode (especially if ports are configured as
+ * "native" to a VLAN) might not pass non-tagged frames.
+ */
+static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
+ __be32 src_ip, struct bond_vlan_tag *tags)
+{
+ struct net_device *bond_dev = slave->bond->dev;
+ struct net_device *slave_dev = slave->dev;
+ struct sk_buff *skb;
+
+ slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
+ arp_op, &dest_ip, &src_ip);
+
+ skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
+ NULL, slave_dev->dev_addr, NULL);
+
+ if (!skb) {
+ net_err_ratelimited("ARP packet allocation failed\n");
+ return;
+ }
+
+ if (bond_handle_vlan(slave, tags, skb))
+ arp_xmit(skb);
+ return;
}
/* Validate the device path between the @start_dev and the @end_dev.
@@ -2965,30 +2976,17 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
slave->target_last_arp_rx[i] = jiffies;
}
-int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
- struct slave *slave)
+static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
struct arphdr *arp = (struct arphdr *)skb->data;
struct slave *curr_active_slave, *curr_arp_slave;
unsigned char *arp_ptr;
__be32 sip, tip;
- int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
unsigned int alen;
- if (!slave_do_arp_validate(bond, slave)) {
- if ((slave_do_arp_validate_only(bond) && is_arp) ||
- !slave_do_arp_validate_only(bond))
- slave->last_rx = jiffies;
- return RX_HANDLER_ANOTHER;
- } else if (!is_arp) {
- return RX_HANDLER_ANOTHER;
- }
-
alen = arp_hdr_len(bond->dev);
- slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
- __func__, skb->dev->name);
-
if (alen > skb_headlen(skb)) {
arp = kmalloc(alen, GFP_ATOMIC);
if (!arp)
@@ -3059,6 +3057,216 @@ out_unlock:
return RX_HANDLER_ANOTHER;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr,
+ const struct in6_addr *saddr, struct bond_vlan_tag *tags)
+{
+ struct net_device *bond_dev = slave->bond->dev;
+ struct net_device *slave_dev = slave->dev;
+ struct in6_addr mcaddr;
+ struct sk_buff *skb;
+
+ slave_dbg(bond_dev, slave_dev, "NS on slave: dst %pI6c src %pI6c\n",
+ daddr, saddr);
+
+ skb = ndisc_ns_create(slave_dev, daddr, saddr, 0);
+ if (!skb) {
+ net_err_ratelimited("NS packet allocation failed\n");
+ return;
+ }
+
+ addrconf_addr_solict_mult(daddr, &mcaddr);
+ if (bond_handle_vlan(slave, tags, skb))
+ ndisc_send_skb(skb, &mcaddr, saddr);
+}
+
+static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
+{
+ struct in6_addr *targets = bond->params.ns_targets;
+ struct bond_vlan_tag *tags;
+ struct dst_entry *dst;
+ struct in6_addr saddr;
+ struct flowi6 fl6;
+ int i;
+
+ for (i = 0; i < BOND_MAX_NS_TARGETS && !ipv6_addr_any(&targets[i]); i++) {
+ slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n",
+ __func__, &targets[i]);
+ tags = NULL;
+
+ /* Find out through which dev should the packet go */
+ memset(&fl6, 0, sizeof(struct flowi6));
+ fl6.daddr = targets[i];
+ fl6.flowi6_oif = bond->dev->ifindex;
+
+ dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
+ if (dst->error) {
+ dst_release(dst);
+ /* there's no route to target - try to send arp
+ * probe to generate any traffic (arp_validate=0)
+ */
+ if (bond->params.arp_validate)
+ pr_warn_once("%s: no route to ns_ip6_target %pI6c and arp_validate is set\n",
+ bond->dev->name,
+ &targets[i]);
+ bond_ns_send(slave, &targets[i], &in6addr_any, tags);
+ continue;
+ }
+
+ /* bond device itself */
+ if (dst->dev == bond->dev)
+ goto found;
+
+ rcu_read_lock();
+ tags = bond_verify_device_path(bond->dev, dst->dev, 0);
+ rcu_read_unlock();
+
+ if (!IS_ERR_OR_NULL(tags))
+ goto found;
+
+ /* Not our device - skip */
+ slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n",
+ &targets[i], dst->dev ? dst->dev->name : "NULL");
+
+ dst_release(dst);
+ continue;
+
+found:
+ if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr))
+ bond_ns_send(slave, &targets[i], &saddr, tags);
+ dst_release(dst);
+ kfree(tags);
+ }
+}
+
+static int bond_confirm_addr6(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ struct in6_addr *addr = (struct in6_addr *)priv->data;
+
+ return ipv6_chk_addr(dev_net(dev), addr, dev, 0);
+}
+
+static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
+{
+ struct netdev_nested_priv priv = {
+ .data = addr,
+ };
+ int ret = false;
+
+ if (bond_confirm_addr6(bond->dev, &priv))
+ return true;
+
+ rcu_read_lock();
+ if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_confirm_addr6, &priv))
+ ret = true;
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static void bond_validate_ns(struct bonding *bond, struct slave *slave,
+ struct in6_addr *saddr, struct in6_addr *daddr)
+{
+ int i;
+
+ if (ipv6_addr_any(saddr) || !bond_has_this_ip6(bond, daddr)) {
+ slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
+ __func__, saddr, daddr);
+ return;
+ }
+
+ i = bond_get_targets_ip6(bond->params.ns_targets, saddr);
+ if (i == -1) {
+ slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n",
+ __func__, saddr);
+ return;
+ }
+ slave->last_rx = jiffies;
+ slave->target_last_arp_rx[i] = jiffies;
+}
+
+static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
+{
+ struct slave *curr_active_slave, *curr_arp_slave;
+ struct icmp6hdr *hdr = icmp6_hdr(skb);
+ struct in6_addr *saddr, *daddr;
+
+ if (skb->pkt_type == PACKET_OTHERHOST ||
+ skb->pkt_type == PACKET_LOOPBACK ||
+ hdr->icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
+ goto out;
+
+ saddr = &ipv6_hdr(skb)->saddr;
+ daddr = &ipv6_hdr(skb)->daddr;
+
+ slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n",
+ __func__, slave->dev->name, bond_slave_state(slave),
+ bond->params.arp_validate, slave_do_arp_validate(bond, slave),
+ saddr, daddr);
+
+ curr_active_slave = rcu_dereference(bond->curr_active_slave);
+ curr_arp_slave = rcu_dereference(bond->current_arp_slave);
+
+ /* We 'trust' the received ARP enough to validate it if:
+ * see bond_arp_rcv().
+ */
+ if (bond_is_active_slave(slave))
+ bond_validate_ns(bond, slave, saddr, daddr);
+ else if (curr_active_slave &&
+ time_after(slave_last_rx(bond, curr_active_slave),
+ curr_active_slave->last_link_up))
+ bond_validate_ns(bond, slave, saddr, daddr);
+ else if (curr_arp_slave &&
+ bond_time_in_interval(bond,
+ dev_trans_start(curr_arp_slave->dev), 1))
+ bond_validate_ns(bond, slave, saddr, daddr);
+
+out:
+ return RX_HANDLER_ANOTHER;
+}
+#endif
+
+int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ bool is_ipv6 = skb->protocol == __cpu_to_be16(ETH_P_IPV6);
+#endif
+ bool is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
+
+ slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
+ __func__, skb->dev->name);
+
+ /* Use arp validate logic for both ARP and NS */
+ if (!slave_do_arp_validate(bond, slave)) {
+ if ((slave_do_arp_validate_only(bond) && is_arp) ||
+#if IS_ENABLED(CONFIG_IPV6)
+ (slave_do_arp_validate_only(bond) && is_ipv6) ||
+#endif
+ !slave_do_arp_validate_only(bond))
+ slave->last_rx = jiffies;
+ return RX_HANDLER_ANOTHER;
+ } else if (is_arp) {
+ return bond_arp_rcv(skb, bond, slave);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (is_ipv6) {
+ return bond_na_rcv(skb, bond, slave);
+#endif
+ } else {
+ return RX_HANDLER_ANOTHER;
+ }
+}
+
+static void bond_send_validate(struct bonding *bond, struct slave *slave)
+{
+ bond_arp_send_all(bond, slave);
+#if IS_ENABLED(CONFIG_IPV6)
+ bond_ns_send_all(bond, slave);
+#endif
+}
+
/* function to verify if we're in the arp_interval timeslice, returns true if
* (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
* arp_interval/2) . the arp_interval/2 is needed for really fast networks.
@@ -3154,7 +3362,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* to be unstable during low/no traffic periods
*/
if (bond_slave_is_up(slave))
- bond_arp_send_all(bond, slave);
+ bond_send_validate(bond, slave);
}
rcu_read_unlock();
@@ -3368,7 +3576,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
curr_active_slave->dev->name);
if (curr_active_slave) {
- bond_arp_send_all(bond, curr_active_slave);
+ bond_send_validate(bond, curr_active_slave);
return should_notify_rtnl;
}
@@ -3420,7 +3628,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
BOND_SLAVE_NOTIFY_LATER);
bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
- bond_arp_send_all(bond, new_slave);
+ bond_send_validate(bond, new_slave);
new_slave->last_link_up = jiffies;
rcu_assign_pointer(bond->current_arp_slave, new_slave);
@@ -3956,7 +4164,7 @@ static int bond_open(struct net_device *bond_dev)
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
queue_delayed_work(bond->wq, &bond->arp_work, 0);
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_rcv_validate;
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -4912,7 +5120,7 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
if (xmit_suc)
return NETDEV_TX_OK;
- atomic_long_inc(&bond_dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(bond_dev);
return NET_XMIT_DROP;
}
@@ -5937,6 +6145,7 @@ static int bond_check_params(struct bond_params *params)
strscpy_pad(params->primary, primary, sizeof(params->primary));
memcpy(params->arp_targets, arp_target, sizeof(arp_target));
+ memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS);
return 0;
}
@@ -6047,27 +6256,38 @@ static int __net_init bond_net_init(struct net *net)
return 0;
}
-static void __net_exit bond_net_exit(struct net *net)
+static void __net_exit bond_net_exit_batch(struct list_head *net_list)
{
- struct bond_net *bn = net_generic(net, bond_net_id);
- struct bonding *bond, *tmp_bond;
+ struct bond_net *bn;
+ struct net *net;
LIST_HEAD(list);
- bond_destroy_sysfs(bn);
+ list_for_each_entry(net, net_list, exit_list) {
+ bn = net_generic(net, bond_net_id);
+ bond_destroy_sysfs(bn);
+ }
/* Kill off any bonds created after unregistering bond rtnl ops */
rtnl_lock();
- list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
- unregister_netdevice_queue(bond->dev, &list);
+ list_for_each_entry(net, net_list, exit_list) {
+ struct bonding *bond, *tmp_bond;
+
+ bn = net_generic(net, bond_net_id);
+ list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
+ unregister_netdevice_queue(bond->dev, &list);
+ }
unregister_netdevice_many(&list);
rtnl_unlock();
- bond_destroy_proc_dir(bn);
+ list_for_each_entry(net, net_list, exit_list) {
+ bn = net_generic(net, bond_net_id);
+ bond_destroy_proc_dir(bn);
+ }
}
static struct pernet_operations bond_net_ops = {
.init = bond_net_init,
- .exit = bond_net_exit,
+ .exit_batch = bond_net_exit_batch,
.id = &bond_net_id,
.size = sizeof(struct bond_net),
};
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 1007bf6d385d..f427fa1737c7 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -14,6 +14,7 @@
#include <net/netlink.h>
#include <net/rtnetlink.h>
#include <net/bonding.h>
+#include <net/ipv6.h>
static size_t bond_get_slave_size(const struct net_device *bond_dev,
const struct net_device *slave_dev)
@@ -111,6 +112,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 },
[IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 },
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
+ [IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -272,6 +274,40 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
if (err)
return err;
}
+#if IS_ENABLED(CONFIG_IPV6)
+ if (data[IFLA_BOND_NS_IP6_TARGET]) {
+ struct nlattr *attr;
+ int i = 0, rem;
+
+ bond_option_ns_ip6_targets_clear(bond);
+ nla_for_each_nested(attr, data[IFLA_BOND_NS_IP6_TARGET], rem) {
+ struct in6_addr addr6;
+
+ if (nla_len(attr) < sizeof(addr6)) {
+ NL_SET_ERR_MSG(extack, "Invalid IPv6 address");
+ return -EINVAL;
+ }
+
+ addr6 = nla_get_in6_addr(attr);
+
+ if (ipv6_addr_type(&addr6) & IPV6_ADDR_LINKLOCAL) {
+ NL_SET_ERR_MSG(extack, "Invalid IPv6 addr6");
+ return -EINVAL;
+ }
+
+ bond_opt_initextra(&newval, &addr6, sizeof(addr6));
+ err = __bond_opt_set(bond, BOND_OPT_NS_TARGETS,
+ &newval);
+ if (err)
+ break;
+ i++;
+ }
+ if (i == 0 && bond->params.arp_interval)
+ netdev_warn(bond->dev, "Removing last ns target with arp_interval on\n");
+ if (err)
+ return err;
+ }
+#endif
if (data[IFLA_BOND_ARP_VALIDATE]) {
int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
@@ -526,6 +562,9 @@ static size_t bond_get_size(const struct net_device *bond_dev)
nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_PEER_NOTIF_DELAY */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_MISSED_MAX */
+ /* IFLA_BOND_NS_IP6_TARGET */
+ nla_total_size(sizeof(struct nlattr)) +
+ nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS +
0;
}
@@ -603,6 +642,26 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.arp_all_targets))
goto nla_put_failure;
+#if IS_ENABLED(CONFIG_IPV6)
+ targets = nla_nest_start(skb, IFLA_BOND_NS_IP6_TARGET);
+ if (!targets)
+ goto nla_put_failure;
+
+ targets_added = 0;
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
+ if (!ipv6_addr_any(&bond->params.ns_targets[i])) {
+ if (nla_put_in6_addr(skb, i, &bond->params.ns_targets[i]))
+ goto nla_put_failure;
+ targets_added = 1;
+ }
+ }
+
+ if (targets_added)
+ nla_nest_end(skb, targets);
+ else
+ nla_nest_cancel(skb, targets);
+#endif
+
primary = rtnl_dereference(bond->primary_slave);
if (primary &&
nla_put_u32(skb, IFLA_BOND_PRIMARY, primary->dev->ifindex))
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 2e8484a91a0e..64f7db2627ce 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -34,6 +34,10 @@ static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
static int bond_option_arp_ip_targets_set(struct bonding *bond,
const struct bond_opt_value *newval);
+#if IS_ENABLED(CONFIG_IPV6)
+static int bond_option_ns_ip6_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+#endif
static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_arp_all_targets_set(struct bonding *bond,
@@ -295,6 +299,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_arp_ip_targets_set
},
+#if IS_ENABLED(CONFIG_IPV6)
+ [BOND_OPT_NS_TARGETS] = {
+ .id = BOND_OPT_NS_TARGETS,
+ .name = "ns_ip6_target",
+ .desc = "NS targets in ffff:ffff::ffff:ffff form",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .set = bond_option_ns_ip6_targets_set
+ },
+#endif
[BOND_OPT_DOWNDELAY] = {
.id = BOND_OPT_DOWNDELAY,
.name = "downdelay",
@@ -1052,7 +1065,7 @@ static int bond_option_arp_interval_set(struct bonding *bond,
cancel_delayed_work_sync(&bond->arp_work);
} else {
/* arp_validate can be set only in active-backup mode */
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_rcv_validate;
cancel_delayed_work_sync(&bond->mii_work);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
@@ -1184,6 +1197,65 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
return ret;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
+ struct in6_addr *target,
+ unsigned long last_rx)
+{
+ struct in6_addr *targets = bond->params.ns_targets;
+ struct list_head *iter;
+ struct slave *slave;
+
+ if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) {
+ bond_for_each_slave(bond, slave, iter)
+ slave->target_last_arp_rx[slot] = last_rx;
+ targets[slot] = *target;
+ }
+}
+
+void bond_option_ns_ip6_targets_clear(struct bonding *bond)
+{
+ struct in6_addr addr_any = in6addr_any;
+ int i;
+
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++)
+ _bond_options_ns_ip6_target_set(bond, i, &addr_any, 0);
+}
+
+static int bond_option_ns_ip6_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ struct in6_addr *target = (struct in6_addr *)newval->extra;
+ struct in6_addr *targets = bond->params.ns_targets;
+ struct in6_addr addr_any = in6addr_any;
+ int index;
+
+ if (!bond_is_ip6_target_ok(target)) {
+ netdev_err(bond->dev, "invalid NS target %pI6c specified for addition\n",
+ target);
+ return -EINVAL;
+ }
+
+ if (bond_get_targets_ip6(targets, target) != -1) { /* dup */
+ netdev_err(bond->dev, "NS target %pI6c is already present\n",
+ target);
+ return -EINVAL;
+ }
+
+ index = bond_get_targets_ip6(targets, &addr_any); /* first free slot */
+ if (index == -1) {
+ netdev_err(bond->dev, "NS target table is full!\n");
+ return -EINVAL;
+ }
+
+ netdev_dbg(bond->dev, "Adding NS target %pI6c\n", target);
+
+ _bond_options_ns_ip6_target_set(bond, index, target, jiffies);
+
+ return 0;
+}
+#endif
+
static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 46b150e6289e..cfe37be42be4 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -307,7 +307,6 @@ void __net_init bond_create_proc_dir(struct bond_net *bn)
}
/* Destroy the bonding directory under /proc/net, if empty.
- * Caller must hold rtnl_lock.
*/
void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
{
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 6a6cdd0bb258..69b0a3751dff 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -15,14 +15,8 @@ struct slave_attribute {
ssize_t (*show)(struct slave *, char *);
};
-#define SLAVE_ATTR(_name, _mode, _show) \
-const struct slave_attribute slave_attr_##_name = { \
- .attr = {.name = __stringify(_name), \
- .mode = _mode }, \
- .show = _show, \
-};
#define SLAVE_ATTR_RO(_name) \
- SLAVE_ATTR(_name, 0444, _name##_show)
+const struct slave_attribute slave_attr_##_name = __ATTR_RO(_name)
static ssize_t state_show(struct slave *slave, char *buf)
{
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 2a7af611d43a..688075859ae4 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -196,7 +196,7 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
skb_reset_mac_header(skb);
debugfs_rx(ser, data, count);
/* Push received packet up the stack. */
- ret = netif_rx_ni(skb);
+ ret = netif_rx(skb);
if (!ret) {
ser->dev->stats.rx_packets++;
ser->dev->stats.rx_bytes += count;
diff --git a/drivers/net/can/c_can/c_can_ethtool.c b/drivers/net/can/c_can/c_can_ethtool.c
index 6655146294fc..8a826a6813bd 100644
--- a/drivers/net/can/c_can/c_can_ethtool.c
+++ b/drivers/net/can/c_can/c_can_ethtool.c
@@ -11,14 +11,6 @@
#include "c_can.h"
-static void c_can_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *info)
-{
- struct c_can_priv *priv = netdev_priv(netdev);
- strscpy(info->driver, "c_can", sizeof(info->driver));
- strscpy(info->bus_info, dev_name(priv->device), sizeof(info->bus_info));
-}
-
static void c_can_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
@@ -33,7 +25,6 @@ static void c_can_get_ringparam(struct net_device *netdev,
}
static const struct ethtool_ops c_can_ethtool_ops = {
- .get_drvinfo = c_can_get_drvinfo,
.get_ringparam = c_can_get_ringparam,
};
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index d5fca3bfaf9a..2103bcca9012 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -24,7 +24,7 @@
*/
static int
can_update_sample_point(const struct can_bittiming_const *btc,
- unsigned int sample_point_nominal, unsigned int tseg,
+ const unsigned int sample_point_nominal, const unsigned int tseg,
unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
unsigned int *sample_point_error_ptr)
{
@@ -63,7 +63,7 @@ can_update_sample_point(const struct can_bittiming_const *btc,
return best_sample_point;
}
-int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc)
{
struct can_priv *priv = netdev_priv(dev);
@@ -208,10 +208,10 @@ void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
* prescaler value brp. You can find more information in the header
* file linux/can/netlink.h.
*/
-static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
+static int can_fixup_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc)
{
- struct can_priv *priv = netdev_priv(dev);
+ const struct can_priv *priv = netdev_priv(dev);
unsigned int tseg1, alltseg;
u64 brp64;
@@ -244,25 +244,21 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
/* Checks the validity of predefined bitrate settings */
static int
-can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
+can_validate_bitrate(const struct net_device *dev, const struct can_bittiming *bt,
const u32 *bitrate_const,
const unsigned int bitrate_const_cnt)
{
- struct can_priv *priv = netdev_priv(dev);
unsigned int i;
for (i = 0; i < bitrate_const_cnt; i++) {
if (bt->bitrate == bitrate_const[i])
- break;
+ return 0;
}
- if (i >= priv->bitrate_const_cnt)
- return -EINVAL;
-
- return 0;
+ return -EINVAL;
}
-int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
+int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc,
const u32 *bitrate_const,
const unsigned int bitrate_const_cnt)
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index c192f25f9695..e7ab45f1c43b 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -154,7 +154,7 @@ static void can_restart(struct net_device *dev)
cf->can_id |= CAN_ERR_RESTARTED;
- netif_rx_ni(skb);
+ netif_rx(skb);
restart:
netdev_dbg(dev, "restarted\n");
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index 04687b15b250..41645a24384c 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -388,7 +388,7 @@ out_power:
return ret;
}
-static int tcan4x5x_can_remove(struct spi_device *spi)
+static void tcan4x5x_can_remove(struct spi_device *spi)
{
struct tcan4x5x_priv *priv = spi_get_drvdata(spi);
@@ -397,8 +397,6 @@ static int tcan4x5x_can_remove(struct spi_device *spi)
tcan4x5x_power_enable(priv->power, 0);
m_can_class_free_dev(priv->cdev.net);
-
- return 0;
}
static const struct of_device_id tcan4x5x_of_match[] = {
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index b7dc1c32875f..1e121e04208c 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -44,6 +44,7 @@
enum rcanfd_chip_id {
RENESAS_RCAR_GEN3 = 0,
RENESAS_RZG2L,
+ RENESAS_R8A779A0,
};
/* Global register bits */
@@ -79,6 +80,7 @@ enum rcanfd_chip_id {
#define RCANFD_GSTS_GNOPM (BIT(0) | BIT(1) | BIT(2) | BIT(3))
/* RSCFDnCFDGERFL / RSCFDnGERFL */
+#define RCANFD_GERFL_EEF0_7 GENMASK(23, 16)
#define RCANFD_GERFL_EEF1 BIT(17)
#define RCANFD_GERFL_EEF0 BIT(16)
#define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */
@@ -86,20 +88,26 @@ enum rcanfd_chip_id {
#define RCANFD_GERFL_MES BIT(1)
#define RCANFD_GERFL_DEF BIT(0)
-#define RCANFD_GERFL_ERR(gpriv, x) ((x) & (RCANFD_GERFL_EEF1 |\
- RCANFD_GERFL_EEF0 | RCANFD_GERFL_MES |\
- (gpriv->fdmode ?\
- RCANFD_GERFL_CMPOF : 0)))
+#define RCANFD_GERFL_ERR(gpriv, x) \
+ ((x) & (reg_v3u(gpriv, RCANFD_GERFL_EEF0_7, \
+ RCANFD_GERFL_EEF0 | RCANFD_GERFL_EEF1) | \
+ RCANFD_GERFL_MES | \
+ ((gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0)))
/* AFL Rx rules registers */
/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
-#define RCANFD_GAFLCFG_SETRNC(n, x) (((x) & 0xff) << (24 - n * 8))
-#define RCANFD_GAFLCFG_GETRNC(n, x) (((x) >> (24 - n * 8)) & 0xff)
+#define RCANFD_GAFLCFG_SETRNC(gpriv, n, x) \
+ (((x) & reg_v3u(gpriv, 0x1ff, 0xff)) << \
+ (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8)))
+
+#define RCANFD_GAFLCFG_GETRNC(gpriv, n, x) \
+ (((x) >> (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8))) & \
+ reg_v3u(gpriv, 0x1ff, 0xff))
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR_AFLDAE BIT(8)
-#define RCANFD_GAFLECTR_AFLPN(x) ((x) & 0x1f)
+#define RCANFD_GAFLECTR_AFLPN(gpriv, x) ((x) & reg_v3u(gpriv, 0x7f, 0x1f))
/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
#define RCANFD_GAFLID_GAFLLB BIT(29)
@@ -116,9 +124,15 @@ enum rcanfd_chip_id {
#define RCANFD_CFG_BRP(x) (((x) & 0x3ff) << 0)
/* RSCFDnCFDCmNCFG - CAN FD only */
-#define RCANFD_NCFG_NTSEG2(x) (((x) & 0x1f) << 24)
-#define RCANFD_NCFG_NTSEG1(x) (((x) & 0x7f) << 16)
-#define RCANFD_NCFG_NSJW(x) (((x) & 0x1f) << 11)
+#define RCANFD_NCFG_NTSEG2(gpriv, x) \
+ (((x) & reg_v3u(gpriv, 0x7f, 0x1f)) << reg_v3u(gpriv, 25, 24))
+
+#define RCANFD_NCFG_NTSEG1(gpriv, x) \
+ (((x) & reg_v3u(gpriv, 0xff, 0x7f)) << reg_v3u(gpriv, 17, 16))
+
+#define RCANFD_NCFG_NSJW(gpriv, x) \
+ (((x) & reg_v3u(gpriv, 0x7f, 0x1f)) << reg_v3u(gpriv, 10, 11))
+
#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0)
/* RSCFDnCFDCmCTR / RSCFDnCmCTR */
@@ -180,11 +194,18 @@ enum rcanfd_chip_id {
/* RSCFDnCFDCmDCFG */
#define RCANFD_DCFG_DSJW(x) (((x) & 0x7) << 24)
-#define RCANFD_DCFG_DTSEG2(x) (((x) & 0x7) << 20)
-#define RCANFD_DCFG_DTSEG1(x) (((x) & 0xf) << 16)
+
+#define RCANFD_DCFG_DTSEG2(gpriv, x) \
+ (((x) & reg_v3u(gpriv, 0x0f, 0x7)) << reg_v3u(gpriv, 16, 20))
+
+#define RCANFD_DCFG_DTSEG1(gpriv, x) \
+ (((x) & reg_v3u(gpriv, 0x1f, 0xf)) << reg_v3u(gpriv, 8, 16))
+
#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0)
/* RSCFDnCFDCmFDCFG */
+#define RCANFD_FDCFG_CLOE BIT(30)
+#define RCANFD_FDCFG_FDOE BIT(28)
#define RCANFD_FDCFG_TDCE BIT(9)
#define RCANFD_FDCFG_TDCOC BIT(8)
#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16)
@@ -219,10 +240,10 @@ enum rcanfd_chip_id {
/* Common FIFO bits */
/* RSCFDnCFDCFCCk */
-#define RCANFD_CFCC_CFTML(x) (((x) & 0xf) << 20)
-#define RCANFD_CFCC_CFM(x) (((x) & 0x3) << 16)
+#define RCANFD_CFCC_CFTML(gpriv, x) (((x) & 0xf) << reg_v3u(gpriv, 16, 20))
+#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << reg_v3u(gpriv, 8, 16))
#define RCANFD_CFCC_CFIM BIT(12)
-#define RCANFD_CFCC_CFDC(x) (((x) & 0x7) << 8)
+#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << reg_v3u(gpriv, 21, 8))
#define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4)
#define RCANFD_CFCC_CFTXIE BIT(2)
#define RCANFD_CFCC_CFE BIT(0)
@@ -282,33 +303,31 @@ enum rcanfd_chip_id {
#define RCANFD_GTSC (0x0094)
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR (0x0098)
-/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
-#define RCANFD_GAFLCFG0 (0x009c)
-/* RSCFDnCFDGAFLCFG1 / RSCFDnGAFLCFG1 */
-#define RCANFD_GAFLCFG1 (0x00a0)
+/* RSCFDnCFDGAFLCFG / RSCFDnGAFLCFG */
+#define RCANFD_GAFLCFG(ch) (0x009c + (0x04 * ((ch) / 2)))
/* RSCFDnCFDRMNB / RSCFDnRMNB */
#define RCANFD_RMNB (0x00a4)
/* RSCFDnCFDRMND / RSCFDnRMND */
#define RCANFD_RMND(y) (0x00a8 + (0x04 * (y)))
/* RSCFDnCFDRFCCx / RSCFDnRFCCx */
-#define RCANFD_RFCC(x) (0x00b8 + (0x04 * (x)))
+#define RCANFD_RFCC(gpriv, x) (reg_v3u(gpriv, 0x00c0, 0x00b8) + (0x04 * (x)))
/* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */
-#define RCANFD_RFSTS(x) (0x00d8 + (0x04 * (x)))
+#define RCANFD_RFSTS(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x20)
/* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */
-#define RCANFD_RFPCTR(x) (0x00f8 + (0x04 * (x)))
+#define RCANFD_RFPCTR(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x40)
/* Common FIFO Control registers */
/* RSCFDnCFDCFCCx / RSCFDnCFCCx */
-#define RCANFD_CFCC(ch, idx) (0x0118 + (0x0c * (ch)) + \
- (0x04 * (idx)))
+#define RCANFD_CFCC(gpriv, ch, idx) \
+ (reg_v3u(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */
-#define RCANFD_CFSTS(ch, idx) (0x0178 + (0x0c * (ch)) + \
- (0x04 * (idx)))
+#define RCANFD_CFSTS(gpriv, ch, idx) \
+ (reg_v3u(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */
-#define RCANFD_CFPCTR(ch, idx) (0x01d8 + (0x0c * (ch)) + \
- (0x04 * (idx)))
+#define RCANFD_CFPCTR(gpriv, ch, idx) \
+ (reg_v3u(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDFESTS / RSCFDnFESTS */
#define RCANFD_FESTS (0x0238)
@@ -387,22 +406,23 @@ enum rcanfd_chip_id {
#define RCANFD_C_RMDF1(q) (0x060c + (0x10 * (q)))
/* RSCFDnRFXXx -> RCANFD_C_RFXX(x) */
-#define RCANFD_C_RFOFFSET (0x0e00)
-#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x)))
-#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + \
- (0x10 * (x)))
-#define RCANFD_C_RFDF(x, df) (RCANFD_C_RFOFFSET + 0x08 + \
- (0x10 * (x)) + (0x04 * (df)))
+#define RCANFD_C_RFOFFSET (0x0e00)
+#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x)))
+#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + (0x10 * (x)))
+#define RCANFD_C_RFDF(x, df) \
+ (RCANFD_C_RFOFFSET + 0x08 + (0x10 * (x)) + (0x04 * (df)))
/* RSCFDnCFXXk -> RCANFD_C_CFXX(ch, k) */
#define RCANFD_C_CFOFFSET (0x0e80)
-#define RCANFD_C_CFID(ch, idx) (RCANFD_C_CFOFFSET + (0x30 * (ch)) + \
- (0x10 * (idx)))
-#define RCANFD_C_CFPTR(ch, idx) (RCANFD_C_CFOFFSET + 0x04 + \
- (0x30 * (ch)) + (0x10 * (idx)))
-#define RCANFD_C_CFDF(ch, idx, df) (RCANFD_C_CFOFFSET + 0x08 + \
- (0x30 * (ch)) + (0x10 * (idx)) + \
- (0x04 * (df)))
+
+#define RCANFD_C_CFID(ch, idx) \
+ (RCANFD_C_CFOFFSET + (0x30 * (ch)) + (0x10 * (idx)))
+
+#define RCANFD_C_CFPTR(ch, idx) \
+ (RCANFD_C_CFOFFSET + 0x04 + (0x30 * (ch)) + (0x10 * (idx)))
+
+#define RCANFD_C_CFDF(ch, idx, df) \
+ (RCANFD_C_CFOFFSET + 0x08 + (0x30 * (ch)) + (0x10 * (idx)) + (0x04 * (df)))
/* RSCFDnTMXXp -> RCANFD_C_TMXX(p) */
#define RCANFD_C_TMID(p) (0x1000 + (0x10 * (p)))
@@ -415,6 +435,12 @@ enum rcanfd_chip_id {
/* RSCFDnRPGACCr */
#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r)))
+/* R-Car V3U Classical and CAN FD mode specific register map */
+#define RCANFD_V3U_CFDCFG (0x1314)
+#define RCANFD_V3U_DCFG(m) (0x1400 + (0x20 * (m)))
+
+#define RCANFD_V3U_GAFL_OFFSET (0x1800)
+
/* CAN FD mode specific register map */
/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
@@ -434,26 +460,28 @@ enum rcanfd_chip_id {
#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q)))
/* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */
-#define RCANFD_F_RFOFFSET (0x3000)
-#define RCANFD_F_RFID(x) (RCANFD_F_RFOFFSET + (0x80 * (x)))
-#define RCANFD_F_RFPTR(x) (RCANFD_F_RFOFFSET + 0x04 + \
- (0x80 * (x)))
-#define RCANFD_F_RFFDSTS(x) (RCANFD_F_RFOFFSET + 0x08 + \
- (0x80 * (x)))
-#define RCANFD_F_RFDF(x, df) (RCANFD_F_RFOFFSET + 0x0c + \
- (0x80 * (x)) + (0x04 * (df)))
+#define RCANFD_F_RFOFFSET(gpriv) reg_v3u(gpriv, 0x6000, 0x3000)
+#define RCANFD_F_RFID(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x)))
+#define RCANFD_F_RFPTR(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x04 + (0x80 * (x)))
+#define RCANFD_F_RFFDSTS(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x08 + (0x80 * (x)))
+#define RCANFD_F_RFDF(gpriv, x, df) \
+ (RCANFD_F_RFOFFSET(gpriv) + 0x0c + (0x80 * (x)) + (0x04 * (df)))
/* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */
-#define RCANFD_F_CFOFFSET (0x3400)
-#define RCANFD_F_CFID(ch, idx) (RCANFD_F_CFOFFSET + (0x180 * (ch)) + \
- (0x80 * (idx)))
-#define RCANFD_F_CFPTR(ch, idx) (RCANFD_F_CFOFFSET + 0x04 + \
- (0x180 * (ch)) + (0x80 * (idx)))
-#define RCANFD_F_CFFDCSTS(ch, idx) (RCANFD_F_CFOFFSET + 0x08 + \
- (0x180 * (ch)) + (0x80 * (idx)))
-#define RCANFD_F_CFDF(ch, idx, df) (RCANFD_F_CFOFFSET + 0x0c + \
- (0x180 * (ch)) + (0x80 * (idx)) + \
- (0x04 * (df)))
+#define RCANFD_F_CFOFFSET(gpriv) reg_v3u(gpriv, 0x6400, 0x3400)
+
+#define RCANFD_F_CFID(gpriv, ch, idx) \
+ (RCANFD_F_CFOFFSET(gpriv) + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFPTR(gpriv, ch, idx) \
+ (RCANFD_F_CFOFFSET(gpriv) + 0x04 + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFFDCSTS(gpriv, ch, idx) \
+ (RCANFD_F_CFOFFSET(gpriv) + 0x08 + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFDF(gpriv, ch, idx, df) \
+ (RCANFD_F_CFOFFSET(gpriv) + 0x0c + (0x180 * (ch)) + (0x80 * (idx)) + \
+ (0x04 * (df)))
/* RSCFDnCFDTMXXp -> RCANFD_F_TMXX(p) */
#define RCANFD_F_TMID(p) (0x4000 + (0x20 * (p)))
@@ -470,7 +498,7 @@ enum rcanfd_chip_id {
#define RCANFD_FIFO_DEPTH 8 /* Tx FIFO depth */
#define RCANFD_NAPI_WEIGHT 8 /* Rx poll quota */
-#define RCANFD_NUM_CHANNELS 2 /* Two channels max */
+#define RCANFD_NUM_CHANNELS 8 /* Eight channels max */
#define RCANFD_CHANNELS_MASK BIT((RCANFD_NUM_CHANNELS) - 1)
#define RCANFD_GAFL_PAGENUM(entry) ((entry) / 16)
@@ -521,6 +549,7 @@ struct rcar_canfd_global {
struct reset_control *rstc1;
struct reset_control *rstc2;
enum rcanfd_chip_id chip_id;
+ u32 max_channels;
};
/* CAN FD mode nominal rate constants */
@@ -563,6 +592,17 @@ static const struct can_bittiming_const rcar_canfd_bittiming_const = {
};
/* Helper functions */
+static inline bool is_v3u(struct rcar_canfd_global *gpriv)
+{
+ return gpriv->chip_id == RENESAS_R8A779A0;
+}
+
+static inline u32 reg_v3u(struct rcar_canfd_global *gpriv,
+ u32 v3u, u32 not_v3u)
+{
+ return is_v3u(gpriv) ? v3u : not_v3u;
+}
+
static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
{
u32 data = readl(reg);
@@ -628,6 +668,25 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
can_free_echo_skb(ndev, i, NULL);
}
+static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
+{
+ if (is_v3u(gpriv)) {
+ if (gpriv->fdmode)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG,
+ RCANFD_FDCFG_FDOE);
+ else
+ rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG,
+ RCANFD_FDCFG_CLOE);
+ } else {
+ if (gpriv->fdmode)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ else
+ rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ }
+}
+
static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
{
u32 sts, ch;
@@ -660,15 +719,10 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0);
/* Set the controller into appropriate mode */
- if (gpriv->fdmode)
- rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
- RCANFD_GRMCFG_RCMC);
- else
- rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
- RCANFD_GRMCFG_RCMC);
+ rcar_canfd_set_mode(gpriv);
/* Transition all Channels to reset mode */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
rcar_canfd_clear_bit(gpriv->base,
RCANFD_CCTR(ch), RCANFD_CCTR_CSLPR);
@@ -709,7 +763,7 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
rcar_canfd_set_bit(gpriv->base, RCANFD_GCFG, cfg);
/* Channel configuration settings */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
rcar_canfd_set_bit(gpriv->base, RCANFD_CCTR(ch),
RCANFD_CCTR_ERRD);
rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch),
@@ -729,20 +783,22 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
start = 0; /* Channel 0 always starts from 0th rule */
} else {
/* Get number of Channel 0 rules and adjust */
- cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG0);
- start = RCANFD_GAFLCFG_GETRNC(0, cfg);
+ cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG(ch));
+ start = RCANFD_GAFLCFG_GETRNC(gpriv, 0, cfg);
}
/* Enable write access to entry */
page = RCANFD_GAFL_PAGENUM(start);
rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR,
- (RCANFD_GAFLECTR_AFLPN(page) |
+ (RCANFD_GAFLECTR_AFLPN(gpriv, page) |
RCANFD_GAFLECTR_AFLDAE));
/* Write number of rules for channel */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG0,
- RCANFD_GAFLCFG_SETRNC(ch, num_rules));
- if (gpriv->fdmode)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(ch),
+ RCANFD_GAFLCFG_SETRNC(gpriv, ch, num_rules));
+ if (is_v3u(gpriv))
+ offset = RCANFD_V3U_GAFL_OFFSET;
+ else if (gpriv->fdmode)
offset = RCANFD_F_GAFL_OFFSET;
else
offset = RCANFD_C_GAFL_OFFSET;
@@ -754,8 +810,8 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
/* Any data length accepted */
rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0);
/* Place the msg in corresponding Rx FIFO entry */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLP1(offset, start),
- RCANFD_GAFLP1_GAFLFDP(ridx));
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, start),
+ RCANFD_GAFLP1_GAFLFDP(ridx));
/* Disable write access to page */
rcar_canfd_clear_bit(gpriv->base,
@@ -779,7 +835,7 @@ static void rcar_canfd_configure_rx(struct rcar_canfd_global *gpriv, u32 ch)
cfg = (RCANFD_RFCC_RFIM | RCANFD_RFCC_RFDC(rfdc) |
RCANFD_RFCC_RFPLS(rfpls) | RCANFD_RFCC_RFIE);
- rcar_canfd_write(gpriv->base, RCANFD_RFCC(ridx), cfg);
+ rcar_canfd_write(gpriv->base, RCANFD_RFCC(gpriv, ridx), cfg);
}
static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch)
@@ -801,15 +857,15 @@ static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch)
else
cfpls = 0; /* b000 - Max 8 bytes payload */
- cfg = (RCANFD_CFCC_CFTML(cftml) | RCANFD_CFCC_CFM(cfm) |
- RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(cfdc) |
+ cfg = (RCANFD_CFCC_CFTML(gpriv, cftml) | RCANFD_CFCC_CFM(gpriv, cfm) |
+ RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(gpriv, cfdc) |
RCANFD_CFCC_CFPLS(cfpls) | RCANFD_CFCC_CFTXIE);
- rcar_canfd_write(gpriv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), cfg);
+ rcar_canfd_write(gpriv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX), cfg);
if (gpriv->fdmode)
/* Clear FD mode specific control/status register */
rcar_canfd_write(gpriv->base,
- RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), 0);
+ RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), 0);
}
static void rcar_canfd_enable_global_interrupts(struct rcar_canfd_global *gpriv)
@@ -890,20 +946,20 @@ static void rcar_canfd_global_error(struct net_device *ndev)
}
if (gerfl & RCANFD_GERFL_MES) {
sts = rcar_canfd_read(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
if (sts & RCANFD_CFSTS_CFMLT) {
netdev_dbg(ndev, "Tx Message Lost flag\n");
stats->tx_dropped++;
rcar_canfd_write(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX),
sts & ~RCANFD_CFSTS_CFMLT);
}
- sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
if (sts & RCANFD_RFSTS_RFMLT) {
netdev_dbg(ndev, "Rx Message Lost flag\n");
stats->rx_dropped++;
- rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
+ rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx),
sts & ~RCANFD_RFSTS_RFMLT);
}
}
@@ -1038,6 +1094,7 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
static void rcar_canfd_tx_done(struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
struct net_device_stats *stats = &ndev->stats;
u32 sts;
unsigned long flags;
@@ -1053,7 +1110,7 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_tail++;
sts = rcar_canfd_read(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
unsent = RCANFD_CFSTS_CFMC(sts);
/* Wake producer only when there is room */
@@ -1069,7 +1126,7 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
} while (1);
/* Clear interrupt */
- rcar_canfd_write(priv->base, RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
+ rcar_canfd_write(priv->base, RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX),
sts & ~RCANFD_CFSTS_CFTXIF);
can_led_event(ndev, CAN_LED_EVENT_TX);
}
@@ -1091,7 +1148,7 @@ static irqreturn_t rcar_canfd_global_err_interrupt(int irq, void *dev_id)
struct rcar_canfd_global *gpriv = dev_id;
u32 ch;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
rcar_canfd_handle_global_err(gpriv, ch);
return IRQ_HANDLED;
@@ -1104,12 +1161,12 @@ static void rcar_canfd_handle_global_receive(struct rcar_canfd_global *gpriv, u3
u32 sts;
/* Handle Rx interrupts */
- sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
if (likely(sts & RCANFD_RFSTS_RFIF)) {
if (napi_schedule_prep(&priv->napi)) {
/* Disable Rx FIFO interrupts */
rcar_canfd_clear_bit(priv->base,
- RCANFD_RFCC(ridx),
+ RCANFD_RFCC(gpriv, ridx),
RCANFD_RFCC_RFIE);
__napi_schedule(&priv->napi);
}
@@ -1121,7 +1178,7 @@ static irqreturn_t rcar_canfd_global_receive_fifo_interrupt(int irq, void *dev_i
struct rcar_canfd_global *gpriv = dev_id;
u32 ch;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
rcar_canfd_handle_global_receive(gpriv, ch);
return IRQ_HANDLED;
@@ -1135,7 +1192,7 @@ static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id)
/* Global error interrupts still indicate a condition specific
* to a channel. RxFIFO interrupt is a global interrupt.
*/
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
rcar_canfd_handle_global_err(gpriv, ch);
rcar_canfd_handle_global_receive(gpriv, ch);
}
@@ -1181,7 +1238,7 @@ static void rcar_canfd_handle_channel_tx(struct rcar_canfd_global *gpriv, u32 ch
/* Handle Tx interrupts */
sts = rcar_canfd_read(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
if (likely(sts & RCANFD_CFSTS_CFTXIF))
rcar_canfd_tx_done(ndev);
}
@@ -1191,7 +1248,7 @@ static irqreturn_t rcar_canfd_channel_tx_interrupt(int irq, void *dev_id)
struct rcar_canfd_global *gpriv = dev_id;
u32 ch;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
rcar_canfd_handle_channel_tx(gpriv, ch);
return IRQ_HANDLED;
@@ -1223,7 +1280,7 @@ static irqreturn_t rcar_canfd_channel_err_interrupt(int irq, void *dev_id)
struct rcar_canfd_global *gpriv = dev_id;
u32 ch;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
rcar_canfd_handle_channel_err(gpriv, ch);
return IRQ_HANDLED;
@@ -1235,7 +1292,7 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
u32 ch;
/* Common FIFO is a per channel resource */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
rcar_canfd_handle_channel_err(gpriv, ch);
rcar_canfd_handle_channel_tx(gpriv, ch);
}
@@ -1246,6 +1303,7 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
static void rcar_canfd_set_bittiming(struct net_device *dev)
{
struct rcar_canfd_channel *priv = netdev_priv(dev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
const struct can_bittiming *bt = &priv->can.bittiming;
const struct can_bittiming *dbt = &priv->can.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
@@ -1260,8 +1318,8 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
/* CAN FD only mode */
- cfg = (RCANFD_NCFG_NTSEG1(tseg1) | RCANFD_NCFG_NBRP(brp) |
- RCANFD_NCFG_NSJW(sjw) | RCANFD_NCFG_NTSEG2(tseg2));
+ cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | RCANFD_NCFG_NBRP(brp) |
+ RCANFD_NCFG_NSJW(gpriv, sjw) | RCANFD_NCFG_NTSEG2(gpriv, tseg2));
rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
@@ -1273,16 +1331,25 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
tseg2 = dbt->phase_seg2 - 1;
- cfg = (RCANFD_DCFG_DTSEG1(tseg1) | RCANFD_DCFG_DBRP(brp) |
- RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(tseg2));
+ cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
+ RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
brp, sjw, tseg1, tseg2);
} else {
/* Classical CAN only mode */
- cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) |
- RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2));
+ if (is_v3u(gpriv)) {
+ cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) |
+ RCANFD_NCFG_NBRP(brp) |
+ RCANFD_NCFG_NSJW(gpriv, sjw) |
+ RCANFD_NCFG_NTSEG2(gpriv, tseg2));
+ } else {
+ cfg = (RCANFD_CFG_TSEG1(tseg1) |
+ RCANFD_CFG_BRP(brp) |
+ RCANFD_CFG_SJW(sjw) |
+ RCANFD_CFG_TSEG2(tseg2));
+ }
rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
netdev_dbg(priv->ndev,
@@ -1294,6 +1361,7 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
static int rcar_canfd_start(struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
int err = -EOPNOTSUPP;
u32 sts, ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
@@ -1315,9 +1383,9 @@ static int rcar_canfd_start(struct net_device *ndev)
}
/* Enable Common & Rx FIFO */
- rcar_canfd_set_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
+ rcar_canfd_set_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX),
RCANFD_CFCC_CFE);
- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
return 0;
@@ -1365,6 +1433,7 @@ out_clock:
static void rcar_canfd_stop(struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
int err;
u32 sts, ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
@@ -1382,9 +1451,9 @@ static void rcar_canfd_stop(struct net_device *ndev)
rcar_canfd_disable_channel_interrupts(priv);
/* Disable Common & Rx FIFO */
- rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
+ rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX),
RCANFD_CFCC_CFE);
- rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
+ rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE);
/* Set the state as STOPPED */
priv->can.state = CAN_STATE_STOPPED;
@@ -1408,6 +1477,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
u32 sts = 0, id, dlc;
unsigned long flags;
@@ -1428,11 +1498,11 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len));
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_v3u(gpriv)) {
rcar_canfd_write(priv->base,
- RCANFD_F_CFID(ch, RCANFD_CFFIFO_IDX), id);
+ RCANFD_F_CFID(gpriv, ch, RCANFD_CFFIFO_IDX), id);
rcar_canfd_write(priv->base,
- RCANFD_F_CFPTR(ch, RCANFD_CFFIFO_IDX), dlc);
+ RCANFD_F_CFPTR(gpriv, ch, RCANFD_CFFIFO_IDX), dlc);
if (can_is_canfd_skb(skb)) {
/* CAN FD frame format */
@@ -1445,10 +1515,10 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
}
rcar_canfd_write(priv->base,
- RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), sts);
+ RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), sts);
rcar_canfd_put_data(priv, cf,
- RCANFD_F_CFDF(ch, RCANFD_CFFIFO_IDX, 0));
+ RCANFD_F_CFDF(gpriv, ch, RCANFD_CFFIFO_IDX, 0));
} else {
rcar_canfd_write(priv->base,
RCANFD_C_CFID(ch, RCANFD_CFFIFO_IDX), id);
@@ -1471,7 +1541,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
* pointer for the Common FIFO
*/
rcar_canfd_write(priv->base,
- RCANFD_CFPCTR(ch, RCANFD_CFFIFO_IDX), 0xff);
+ RCANFD_CFPCTR(gpriv, ch, RCANFD_CFFIFO_IDX), 0xff);
spin_unlock_irqrestore(&priv->tx_lock, flags);
return NETDEV_TX_OK;
@@ -1480,18 +1550,21 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
{
struct net_device_stats *stats = &priv->ndev->stats;
+ struct rcar_canfd_global *gpriv = priv->gpriv;
struct canfd_frame *cf;
struct sk_buff *skb;
u32 sts = 0, id, dlc;
u32 ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
- id = rcar_canfd_read(priv->base, RCANFD_F_RFID(ridx));
- dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(ridx));
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_v3u(gpriv)) {
+ id = rcar_canfd_read(priv->base, RCANFD_F_RFID(gpriv, ridx));
+ dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(gpriv, ridx));
- sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(ridx));
- if (sts & RCANFD_RFFDSTS_RFFDF)
+ sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(gpriv, ridx));
+
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
+ sts & RCANFD_RFFDSTS_RFFDF)
skb = alloc_canfd_skb(priv->ndev, &cf);
else
skb = alloc_can_skb(priv->ndev,
@@ -1529,12 +1602,14 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
if (sts & RCANFD_RFFDSTS_RFBRS)
cf->flags |= CANFD_BRS;
- rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(ridx, 0));
+ rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
}
} else {
cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
if (id & RCANFD_RFID_RFRTR)
cf->can_id |= CAN_RTR_FLAG;
+ else if (is_v3u(gpriv))
+ rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
else
rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0));
}
@@ -1542,7 +1617,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
/* Write 0xff to RFPC to increment the CPU-side
* pointer of the Rx FIFO
*/
- rcar_canfd_write(priv->base, RCANFD_RFPCTR(ridx), 0xff);
+ rcar_canfd_write(priv->base, RCANFD_RFPCTR(gpriv, ridx), 0xff);
can_led_event(priv->ndev, CAN_LED_EVENT_RX);
@@ -1556,13 +1631,14 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
{
struct rcar_canfd_channel *priv =
container_of(napi, struct rcar_canfd_channel, napi);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
int num_pkts;
u32 sts;
u32 ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
for (num_pkts = 0; num_pkts < quota; num_pkts++) {
- sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
/* Check FIFO empty condition */
if (sts & RCANFD_RFSTS_RFEMP)
break;
@@ -1571,7 +1647,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
/* Clear interrupt bit */
if (sts & RCANFD_RFSTS_RFIF)
- rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
+ rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx),
sts & ~RCANFD_RFSTS_RFIF);
}
@@ -1579,7 +1655,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
if (num_pkts < quota) {
if (napi_complete_done(napi, num_pkts)) {
/* Enable Rx FIFO interrupts */
- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx),
RCANFD_RFCC_RFIE);
}
}
@@ -1715,15 +1791,15 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll,
RCANFD_NAPI_WEIGHT);
+ spin_lock_init(&priv->tx_lock);
+ devm_can_led_init(ndev);
+ gpriv->ch[priv->channel] = priv;
err = register_candev(ndev);
if (err) {
dev_err(&pdev->dev,
"register_candev() failed, error %d\n", err);
goto fail_candev;
}
- spin_lock_init(&priv->tx_lock);
- devm_can_led_init(ndev);
- gpriv->ch[priv->channel] = priv;
dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel);
return 0;
@@ -1756,21 +1832,24 @@ static int rcar_canfd_probe(struct platform_device *pdev)
int g_err_irq, g_recc_irq;
bool fdmode = true; /* CAN FD only mode - default */
enum rcanfd_chip_id chip_id;
+ int max_channels;
+ char name[9] = "channelX";
+ int i;
chip_id = (uintptr_t)of_device_get_match_data(&pdev->dev);
+ max_channels = chip_id == RENESAS_R8A779A0 ? 8 : 2;
if (of_property_read_bool(pdev->dev.of_node, "renesas,no-can-fd"))
fdmode = false; /* Classical CAN only mode */
- of_child = of_get_child_by_name(pdev->dev.of_node, "channel0");
- if (of_child && of_device_is_available(of_child))
- channels_mask |= BIT(0); /* Channel 0 */
-
- of_child = of_get_child_by_name(pdev->dev.of_node, "channel1");
- if (of_child && of_device_is_available(of_child))
- channels_mask |= BIT(1); /* Channel 1 */
+ for (i = 0; i < max_channels; ++i) {
+ name[7] = '0' + i;
+ of_child = of_get_child_by_name(pdev->dev.of_node, name);
+ if (of_child && of_device_is_available(of_child))
+ channels_mask |= BIT(i);
+ }
- if (chip_id == RENESAS_RCAR_GEN3) {
+ if (chip_id != RENESAS_RZG2L) {
ch_irq = platform_get_irq_byname_optional(pdev, "ch_int");
if (ch_irq < 0) {
/* For backward compatibility get irq by index */
@@ -1806,6 +1885,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->channels_mask = channels_mask;
gpriv->fdmode = fdmode;
gpriv->chip_id = chip_id;
+ gpriv->max_channels = max_channels;
if (gpriv->chip_id == RENESAS_RZG2L) {
gpriv->rstc1 = devm_reset_control_get_exclusive(&pdev->dev, "rstp_n");
@@ -1847,7 +1927,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
fcan_freq = clk_get_rate(gpriv->can_clk);
- if (gpriv->fcan == RCANFD_CANFDCLK && gpriv->chip_id == RENESAS_RCAR_GEN3)
+ if (gpriv->fcan == RCANFD_CANFDCLK && gpriv->chip_id != RENESAS_RZG2L)
/* CANFD clock is further divided by (1/2) within the IP */
fcan_freq /= 2;
@@ -1859,7 +1939,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->base = addr;
/* Request IRQ that's common for both channels */
- if (gpriv->chip_id == RENESAS_RCAR_GEN3) {
+ if (gpriv->chip_id != RENESAS_RZG2L) {
err = devm_request_irq(&pdev->dev, ch_irq,
rcar_canfd_channel_interrupt, 0,
"canfd.ch_int", gpriv);
@@ -1925,7 +2005,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
rcar_canfd_configure_controller(gpriv);
/* Configure per channel attributes */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, max_channels) {
/* Configure Channel's Rx fifo */
rcar_canfd_configure_rx(gpriv, ch);
@@ -1951,7 +2031,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
goto fail_mode;
}
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, max_channels) {
err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq);
if (err)
goto fail_channel;
@@ -1963,7 +2043,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
return 0;
fail_channel:
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, max_channels)
rcar_canfd_channel_remove(gpriv, ch);
fail_mode:
rcar_canfd_disable_global_interrupts(gpriv);
@@ -1984,7 +2064,7 @@ static int rcar_canfd_remove(struct platform_device *pdev)
rcar_canfd_reset_controller(gpriv);
rcar_canfd_disable_global_interrupts(gpriv);
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
rcar_canfd_disable_channel_interrupts(gpriv->ch[ch]);
rcar_canfd_channel_remove(gpriv, ch);
}
@@ -2014,6 +2094,7 @@ static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = {
{ .compatible = "renesas,rcar-gen3-canfd", .data = (void *)RENESAS_RCAR_GEN3 },
{ .compatible = "renesas,rzg2l-canfd", .data = (void *)RENESAS_RZG2L },
+ { .compatible = "renesas,r8a779a0-canfd", .data = (void *)RENESAS_R8A779A0 },
{ }
};
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 27783fbf011f..ec294d0c5722 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -221,7 +221,7 @@ static void slc_bump(struct slcan *sl)
if (!(cf.can_id & CAN_RTR_FLAG))
sl->dev->stats.rx_bytes += cf.len;
- netif_rx_ni(skb);
+ netif_rx(skb);
}
/* parse tty input stream */
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index d74e895bddf7..8d27ac66ca7f 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -392,13 +392,10 @@ static int softing_netdev_open(struct net_device *ndev)
static int softing_netdev_stop(struct net_device *ndev)
{
- int ret;
-
netif_stop_queue(ndev);
/* softing cycle does close_candev() */
- ret = softing_startstop(ndev, 0);
- return ret;
+ return softing_startstop(ndev, 0);
}
static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode)
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index cfcc14fe3e42..a5b2952b8d0f 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -356,7 +356,7 @@ static void hi3110_hw_rx(struct spi_device *spi)
can_led_event(priv->net, CAN_LED_EVENT_RX);
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static void hi3110_hw_sleep(struct spi_device *spi)
@@ -677,7 +677,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
tx_state = txerr >= rxerr ? new_state : 0;
rx_state = txerr <= rxerr ? new_state : 0;
can_change_state(net, cf, tx_state, rx_state);
- netif_rx_ni(skb);
+ netif_rx(skb);
if (new_state == CAN_STATE_BUS_OFF) {
can_bus_off(net);
@@ -718,7 +718,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
netdev_dbg(priv->net, "Bus Error\n");
- netif_rx_ni(skb);
+ netif_rx(skb);
}
}
@@ -948,7 +948,7 @@ static int hi3110_can_probe(struct spi_device *spi)
return dev_err_probe(dev, ret, "Probe failed\n");
}
-static int hi3110_can_remove(struct spi_device *spi)
+static void hi3110_can_remove(struct spi_device *spi)
{
struct hi3110_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net;
@@ -960,8 +960,6 @@ static int hi3110_can_remove(struct spi_device *spi)
clk_disable_unprepare(priv->clk);
free_candev(net);
-
- return 0;
}
static int __maybe_unused hi3110_can_suspend(struct device *dev)
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 025e07cb7439..fc747bff5eeb 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -740,7 +740,7 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
can_led_event(priv->net, CAN_LED_EVENT_RX);
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static void mcp251x_hw_sleep(struct spi_device *spi)
@@ -987,7 +987,7 @@ static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
if (skb) {
frame->can_id |= can_id;
frame->data[1] = data1;
- netif_rx_ni(skb);
+ netif_rx(skb);
} else {
netdev_err(net, "cannot allocate error skb\n");
}
@@ -1427,7 +1427,7 @@ out_free:
return ret;
}
-static int mcp251x_can_remove(struct spi_device *spi)
+static void mcp251x_can_remove(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net;
@@ -1442,8 +1442,6 @@ static int mcp251x_can_remove(struct spi_device *spi)
clk_disable_unprepare(priv->clk);
free_candev(net);
-
- return 0;
}
static int __maybe_unused mcp251x_can_suspend(struct device *dev)
diff --git a/drivers/net/can/spi/mcp251xfd/Makefile b/drivers/net/can/spi/mcp251xfd/Makefile
index a83d685d64e0..94d7de954294 100644
--- a/drivers/net/can/spi/mcp251xfd/Makefile
+++ b/drivers/net/can/spi/mcp251xfd/Makefile
@@ -6,6 +6,8 @@ mcp251xfd-objs :=
mcp251xfd-objs += mcp251xfd-chip-fifo.o
mcp251xfd-objs += mcp251xfd-core.o
mcp251xfd-objs += mcp251xfd-crc16.o
+mcp251xfd-objs += mcp251xfd-ethtool.o
+mcp251xfd-objs += mcp251xfd-ram.o
mcp251xfd-objs += mcp251xfd-regmap.o
mcp251xfd-objs += mcp251xfd-ring.o
mcp251xfd-objs += mcp251xfd-rx.o
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c
index 2f9a623d381d..0d96097a2547 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c
@@ -78,7 +78,7 @@ int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
if (err)
return err;
- /* FIFO 1 - TX */
+ /* TX FIFO */
val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
tx_ring->obj_num - 1) |
MCP251XFD_REG_FIFOCON_TXEN |
@@ -99,7 +99,7 @@ int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
err = regmap_write(priv->map_reg,
- MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
+ MCP251XFD_REG_FIFOCON(priv->tx->fifo_nr),
val);
if (err)
return err;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index b5986df6eca0..325024be7b04 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -112,6 +112,22 @@ static const char *mcp251xfd_get_mode_str(const u8 mode)
return "<unknown>";
}
+static const char *
+mcp251xfd_get_osc_str(const u32 osc, const u32 osc_reference)
+{
+ switch (~osc & osc_reference &
+ (MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY)) {
+ case MCP251XFD_REG_OSC_PLLRDY:
+ return "PLL";
+ case MCP251XFD_REG_OSC_OSCRDY:
+ return "Oscillator";
+ case MCP251XFD_REG_OSC_PLLRDY | MCP251XFD_REG_OSC_OSCRDY:
+ return "Oscillator/PLL";
+ }
+
+ return "<unknown>";
+}
+
static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv)
{
if (!priv->reg_vdd)
@@ -178,6 +194,11 @@ static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv)
return 0;
}
+static inline bool mcp251xfd_reg_invalid(u32 reg)
+{
+ return reg == 0x0 || reg == 0xffffffff;
+}
+
static inline int
mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode)
{
@@ -197,34 +218,55 @@ static int
__mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
const u8 mode_req, bool nowait)
{
- u32 con, con_reqop;
+ u32 con = 0, con_reqop, osc = 0;
+ u8 mode;
int err;
con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req);
err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON,
MCP251XFD_REG_CON_REQOP_MASK, con_reqop);
- if (err)
+ if (err == -EBADMSG) {
+ netdev_err(priv->ndev,
+ "Failed to set Requested Operation Mode.\n");
+
+ return -ENODEV;
+ } else if (err) {
return err;
+ }
if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
return 0;
err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
+ !mcp251xfd_reg_invalid(con) &&
FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
con) == mode_req,
MCP251XFD_POLL_SLEEP_US,
MCP251XFD_POLL_TIMEOUT_US);
- if (err) {
- u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
+ if (err != -ETIMEDOUT && err != -EBADMSG)
+ return err;
+
+ /* Ignore return value.
+ * Print below error messages, even if this fails.
+ */
+ regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
+ if (mcp251xfd_reg_invalid(con)) {
netdev_err(priv->ndev,
- "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n",
- mcp251xfd_get_mode_str(mode_req), mode_req,
- mcp251xfd_get_mode_str(mode), mode);
- return err;
+ "Failed to read CAN Control Register (con=0x%08x, osc=0x%08x).\n",
+ con, osc);
+
+ return -ENODEV;
}
- return 0;
+ mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
+ netdev_err(priv->ndev,
+ "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u) (con=0x%08x, osc=0x%08x).\n",
+ mcp251xfd_get_mode_str(mode_req), mode_req,
+ mcp251xfd_get_mode_str(mode), mode,
+ con, osc);
+
+ return -ETIMEDOUT;
}
static inline int
@@ -241,27 +283,58 @@ mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
return __mcp251xfd_chip_set_mode(priv, mode_req, true);
}
-static inline bool mcp251xfd_osc_invalid(u32 reg)
+static int
+mcp251xfd_chip_wait_for_osc_ready(const struct mcp251xfd_priv *priv,
+ u32 osc_reference, u32 osc_mask)
{
- return reg == 0x0 || reg == 0xffffffff;
+ u32 osc;
+ int err;
+
+ err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
+ !mcp251xfd_reg_invalid(osc) &&
+ (osc & osc_mask) == osc_reference,
+ MCP251XFD_OSC_STAB_SLEEP_US,
+ MCP251XFD_OSC_STAB_TIMEOUT_US);
+ if (err != -ETIMEDOUT)
+ return err;
+
+ if (mcp251xfd_reg_invalid(osc)) {
+ netdev_err(priv->ndev,
+ "Failed to read Oscillator Configuration Register (osc=0x%08x).\n",
+ osc);
+ return -ENODEV;
+ }
+
+ netdev_err(priv->ndev,
+ "Timeout waiting for %s ready (osc=0x%08x, osc_reference=0x%08x, osc_mask=0x%08x).\n",
+ mcp251xfd_get_osc_str(osc, osc_reference),
+ osc, osc_reference, osc_mask);
+
+ return -ETIMEDOUT;
}
-static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
+static int mcp251xfd_chip_wake(const struct mcp251xfd_priv *priv)
{
u32 osc, osc_reference, osc_mask;
int err;
- /* Set Power On Defaults for "Clock Output Divisor" and remove
- * "Oscillator Disable" bit.
+ /* For normal sleep on MCP2517FD and MCP2518FD, clearing
+ * "Oscillator Disable" will wake the chip. For low power mode
+ * on MCP2518FD, asserting the chip select will wake the
+ * chip. Writing to the Oscillator register will wake it in
+ * both cases.
*/
osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
MCP251XFD_REG_OSC_CLKODIV_10);
+
+ /* We cannot check for the PLL ready bit (either set or
+ * unset), as the PLL might be enabled. This can happen if the
+ * system reboots, while the mcp251xfd stays powered.
+ */
osc_reference = MCP251XFD_REG_OSC_OSCRDY;
- osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
+ osc_mask = MCP251XFD_REG_OSC_OSCRDY;
- /* Note:
- *
- * If the controller is in Sleep Mode the following write only
+ /* If the controller is in Sleep Mode the following write only
* removes the "Oscillator Disable" bit and powers it up. All
* other bits are unaffected.
*/
@@ -269,24 +342,31 @@ static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
if (err)
return err;
- /* Wait for "Oscillator Ready" bit */
- err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
- (osc & osc_mask) == osc_reference,
- MCP251XFD_OSC_STAB_SLEEP_US,
- MCP251XFD_OSC_STAB_TIMEOUT_US);
- if (mcp251xfd_osc_invalid(osc)) {
- netdev_err(priv->ndev,
- "Failed to detect %s (osc=0x%08x).\n",
- mcp251xfd_get_model_str(priv), osc);
- return -ENODEV;
- } else if (err == -ETIMEDOUT) {
- netdev_err(priv->ndev,
- "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
- osc, osc_reference);
- return -ETIMEDOUT;
+ /* Sometimes the PLL is stuck enabled, the controller never
+ * sets the OSC Ready bit, and we get an -ETIMEDOUT. Our
+ * caller takes care of retry.
+ */
+ return mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask);
+}
+
+static inline int mcp251xfd_chip_sleep(const struct mcp251xfd_priv *priv)
+{
+ if (priv->pll_enable) {
+ u32 osc;
+ int err;
+
+ /* Turn off PLL */
+ osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
+ MCP251XFD_REG_OSC_CLKODIV_10);
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
+ if (err)
+ netdev_err(priv->ndev,
+ "Failed to disable PLL.\n");
+
+ priv->spi->max_speed_hz = priv->spi_max_speed_hz_slow;
}
- return err;
+ return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
}
static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
@@ -294,10 +374,10 @@ static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
const __be16 cmd = mcp251xfd_cmd_reset();
int err;
- /* The Set Mode and SPI Reset command only seems to works if
- * the controller is not in Sleep Mode.
+ /* The Set Mode and SPI Reset command only works if the
+ * controller is not in Sleep Mode.
*/
- err = mcp251xfd_chip_clock_enable(priv);
+ err = mcp251xfd_chip_wake(priv);
if (err)
return err;
@@ -311,34 +391,29 @@ static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv)
{
- u32 osc, osc_reference;
+ u32 osc_reference, osc_mask;
u8 mode;
int err;
- err = mcp251xfd_chip_get_mode(priv, &mode);
- if (err)
- return err;
-
- if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
- netdev_info(priv->ndev,
- "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
- mcp251xfd_get_mode_str(mode), mode);
- return -ETIMEDOUT;
- }
-
+ /* Check for reset defaults of OSC reg.
+ * This will take care of stabilization period.
+ */
osc_reference = MCP251XFD_REG_OSC_OSCRDY |
FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
MCP251XFD_REG_OSC_CLKODIV_10);
+ osc_mask = osc_reference | MCP251XFD_REG_OSC_PLLRDY;
+ err = mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask);
+ if (err)
+ return err;
- /* check reset defaults of OSC reg */
- err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
+ err = mcp251xfd_chip_get_mode(priv, &mode);
if (err)
return err;
- if (osc != osc_reference) {
+ if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
netdev_info(priv->ndev,
- "Controller failed to reset. osc=0x%08x, reference value=0x%08x.\n",
- osc, osc_reference);
+ "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
+ mcp251xfd_get_mode_str(mode), mode);
return -ETIMEDOUT;
}
@@ -374,7 +449,7 @@ static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
{
- u32 osc;
+ u32 osc, osc_reference, osc_mask;
int err;
/* Activate Low Power Mode on Oscillator Disable. This only
@@ -384,10 +459,29 @@ static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
osc = MCP251XFD_REG_OSC_LPMEN |
FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
MCP251XFD_REG_OSC_CLKODIV_10);
+ osc_reference = MCP251XFD_REG_OSC_OSCRDY;
+ osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
+
+ if (priv->pll_enable) {
+ osc |= MCP251XFD_REG_OSC_PLLEN;
+ osc_reference |= MCP251XFD_REG_OSC_PLLRDY;
+ }
+
err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
if (err)
return err;
+ err = mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask);
+ if (err)
+ return err;
+
+ priv->spi->max_speed_hz = priv->spi_max_speed_hz_fast;
+
+ return 0;
+}
+
+static int mcp251xfd_chip_timestamp_init(const struct mcp251xfd_priv *priv)
+{
/* Set Time Base Counter Prescaler to 1.
*
* This means an overflow of the 32 bit Time Base Counter
@@ -628,14 +722,14 @@ static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv)
return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0);
}
-static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
- const enum can_state state)
+static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
+ const enum can_state state)
{
priv->can.state = state;
mcp251xfd_chip_interrupts_disable(priv);
mcp251xfd_chip_rx_int_disable(priv);
- return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ mcp251xfd_chip_sleep(priv);
}
static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
@@ -650,6 +744,10 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
if (err)
goto out_chip_stop;
+ err = mcp251xfd_chip_timestamp_init(priv);
+ if (err)
+ goto out_chip_stop;
+
err = mcp251xfd_set_bittiming(priv);
if (err)
goto out_chip_stop;
@@ -662,7 +760,9 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
if (err)
goto out_chip_stop;
- mcp251xfd_ring_init(priv);
+ err = mcp251xfd_ring_init(priv);
+ if (err)
+ goto out_chip_stop;
err = mcp251xfd_chip_fifo_init(priv);
if (err)
@@ -1284,6 +1384,20 @@ static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
return 0;
}
+static int mcp251xfd_read_regs_status(struct mcp251xfd_priv *priv)
+{
+ const int val_bytes = regmap_get_val_bytes(priv->map_reg);
+ size_t len;
+
+ if (priv->rx_ring_num == 1)
+ len = sizeof(priv->regs_status.intf);
+ else
+ len = sizeof(priv->regs_status);
+
+ return regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
+ &priv->regs_status, len / val_bytes);
+}
+
#define mcp251xfd_handle(priv, irq, ...) \
({ \
struct mcp251xfd_priv *_priv = (priv); \
@@ -1300,7 +1414,6 @@ static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
{
struct mcp251xfd_priv *priv = dev_id;
- const int val_bytes = regmap_get_val_bytes(priv->map_reg);
irqreturn_t handled = IRQ_NONE;
int err;
@@ -1312,21 +1425,28 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
if (!rx_pending)
break;
+ /* Assume 1st RX-FIFO pending, if other FIFOs
+ * are pending the main IRQ handler will take
+ * care.
+ */
+ priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr);
err = mcp251xfd_handle(priv, rxif);
if (err)
goto out_fail;
handled = IRQ_HANDLED;
- } while (1);
+
+ /* We don't know which RX-FIFO is pending, but only
+ * handle the 1st RX-FIFO. Leave loop here if we have
+ * more than 1 RX-FIFO to avoid starvation.
+ */
+ } while (priv->rx_ring_num == 1);
do {
u32 intf_pending, intf_pending_clearable;
bool set_normal_mode = false;
- err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
- &priv->regs_status,
- sizeof(priv->regs_status) /
- val_bytes);
+ err = mcp251xfd_read_regs_status(priv);
if (err)
goto out_fail;
@@ -1478,6 +1598,7 @@ static int mcp251xfd_open(struct net_device *ndev)
goto out_transceiver_disable;
mcp251xfd_timestamp_init(priv);
+ clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
can_rx_offload_enable(&priv->offload);
err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
@@ -1498,6 +1619,7 @@ static int mcp251xfd_open(struct net_device *ndev)
free_irq(spi->irq, priv);
out_can_rx_offload_disable:
can_rx_offload_disable(&priv->offload);
+ set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
mcp251xfd_timestamp_stop(priv);
out_transceiver_disable:
mcp251xfd_transceiver_disable(priv);
@@ -1517,6 +1639,8 @@ static int mcp251xfd_stop(struct net_device *ndev)
struct mcp251xfd_priv *priv = netdev_priv(ndev);
netif_stop_queue(ndev);
+ set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+ hrtimer_cancel(&priv->rx_irq_timer);
mcp251xfd_chip_interrupts_disable(priv);
free_irq(ndev->irq, priv);
can_rx_offload_disable(&priv->offload);
@@ -1621,8 +1745,9 @@ static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
}
static int
-mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
- u32 *dev_id, u32 *effective_speed_hz)
+mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
+ u32 *effective_speed_hz_slow,
+ u32 *effective_speed_hz_fast)
{
struct mcp251xfd_map_buf_nocrc *buf_rx;
struct mcp251xfd_map_buf_nocrc *buf_tx;
@@ -1641,16 +1766,20 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
xfer[0].tx_buf = buf_tx;
xfer[0].len = sizeof(buf_tx->cmd);
+ xfer[0].speed_hz = priv->spi_max_speed_hz_slow;
xfer[1].rx_buf = buf_rx->data;
xfer[1].len = sizeof(dev_id);
+ xfer[1].speed_hz = priv->spi_max_speed_hz_fast;
mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
+
err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer));
if (err)
goto out_kfree_buf_tx;
*dev_id = be32_to_cpup((__be32 *)buf_rx->data);
- *effective_speed_hz = xfer->effective_speed_hz;
+ *effective_speed_hz_slow = xfer[0].effective_speed_hz;
+ *effective_speed_hz_fast = xfer[1].effective_speed_hz;
out_kfree_buf_tx:
kfree(buf_tx);
@@ -1666,34 +1795,45 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
static int
mcp251xfd_register_done(const struct mcp251xfd_priv *priv)
{
- u32 dev_id, effective_speed_hz;
+ u32 dev_id, effective_speed_hz_slow, effective_speed_hz_fast;
+ unsigned long clk_rate;
int err;
err = mcp251xfd_register_get_dev_id(priv, &dev_id,
- &effective_speed_hz);
+ &effective_speed_hz_slow,
+ &effective_speed_hz_fast);
if (err)
return err;
+ clk_rate = clk_get_rate(priv->clk);
+
netdev_info(priv->ndev,
- "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n",
+ "%s rev%lu.%lu (%cRX_INT %cPLL %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD o:%lu.%02luMHz c:%u.%02uMHz m:%u.%02uMHz rs:%u.%02uMHz es:%u.%02uMHz rf:%u.%02uMHz ef:%u.%02uMHz) successfully initialized.\n",
mcp251xfd_get_model_str(priv),
FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id),
FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id),
priv->rx_int ? '+' : '-',
+ priv->pll_enable ? '+' : '-',
MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN),
MCP251XFD_QUIRK_ACTIVE(CRC_REG),
MCP251XFD_QUIRK_ACTIVE(CRC_RX),
MCP251XFD_QUIRK_ACTIVE(CRC_TX),
MCP251XFD_QUIRK_ACTIVE(ECC),
MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX),
+ clk_rate / 1000000,
+ clk_rate % 1000000 / 1000 / 10,
priv->can.clock.freq / 1000000,
priv->can.clock.freq % 1000000 / 1000 / 10,
priv->spi_max_speed_hz_orig / 1000000,
priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10,
- priv->spi->max_speed_hz / 1000000,
- priv->spi->max_speed_hz % 1000000 / 1000 / 10,
- effective_speed_hz / 1000000,
- effective_speed_hz % 1000000 / 1000 / 10);
+ priv->spi_max_speed_hz_slow / 1000000,
+ priv->spi_max_speed_hz_slow % 1000000 / 1000 / 10,
+ effective_speed_hz_slow / 1000000,
+ effective_speed_hz_slow % 1000000 / 1000 / 10,
+ priv->spi_max_speed_hz_fast / 1000000,
+ priv->spi_max_speed_hz_fast % 1000000 / 1000 / 10,
+ effective_speed_hz_fast / 1000000,
+ effective_speed_hz_fast % 1000000 / 1000 / 10);
return 0;
}
@@ -1719,19 +1859,27 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
if (err == -ENODEV)
goto out_runtime_disable;
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_chip_sleep;
+
+ err = mcp251xfd_chip_clock_init(priv);
+ if (err == -ENODEV)
+ goto out_runtime_disable;
+ if (err)
+ goto out_chip_sleep;
err = mcp251xfd_register_chip_detect(priv);
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_chip_sleep;
err = mcp251xfd_register_check_rx_int(priv);
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_chip_sleep;
+
+ mcp251xfd_ethtool_init(priv);
err = register_candev(ndev);
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_chip_sleep;
err = mcp251xfd_register_done(priv);
if (err)
@@ -1741,7 +1889,7 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
* disable the clocks and vdd. If CONFIG_PM is not enabled,
* the clocks and vdd will stay powered.
*/
- err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ err = mcp251xfd_chip_sleep(priv);
if (err)
goto out_unregister_candev;
@@ -1751,8 +1899,8 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
out_unregister_candev:
unregister_candev(ndev);
- out_chip_set_mode_sleep:
- mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ out_chip_sleep:
+ mcp251xfd_chip_sleep(priv);
out_runtime_disable:
pm_runtime_disable(ndev->dev.parent);
out_runtime_put_noidle:
@@ -1768,10 +1916,10 @@ static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
unregister_candev(ndev);
- pm_runtime_get_sync(ndev->dev.parent);
- pm_runtime_put_noidle(ndev->dev.parent);
- mcp251xfd_clks_and_vdd_disable(priv);
- pm_runtime_disable(ndev->dev.parent);
+ if (pm_runtime_enabled(ndev->dev.parent))
+ pm_runtime_disable(ndev->dev.parent);
+ else
+ mcp251xfd_clks_and_vdd_disable(priv);
}
static const struct of_device_id mcp251xfd_of_match[] = {
@@ -1814,6 +1962,7 @@ static int mcp251xfd_probe(struct spi_device *spi)
struct gpio_desc *rx_int;
struct regulator *reg_vdd, *reg_xceiver;
struct clk *clk;
+ bool pll_enable = false;
u32 freq = 0;
int err;
@@ -1864,12 +2013,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
return -ERANGE;
}
- if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) {
- dev_err(&spi->dev,
- "Oscillator frequency (%u Hz) is too low and PLL is not supported.\n",
- freq);
- return -ERANGE;
- }
+ if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER)
+ pll_enable = true;
ndev = alloc_candev(sizeof(struct mcp251xfd_priv),
MCP251XFD_TX_OBJ_NUM_MAX);
@@ -1885,6 +2030,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
priv = netdev_priv(ndev);
spi_set_drvdata(spi, priv);
priv->can.clock.freq = freq;
+ if (pll_enable)
+ priv->can.clock.freq *= MCP251XFD_OSC_PLL_MULTIPLIER;
priv->can.do_set_mode = mcp251xfd_set_mode;
priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
priv->can.bittiming_const = &mcp251xfd_bittiming_const;
@@ -1893,10 +2040,12 @@ static int mcp251xfd_probe(struct spi_device *spi)
CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
CAN_CTRLMODE_CC_LEN8_DLC;
+ set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
priv->ndev = ndev;
priv->spi = spi;
priv->rx_int = rx_int;
priv->clk = clk;
+ priv->pll_enable = pll_enable;
priv->reg_vdd = reg_vdd;
priv->reg_xceiver = reg_xceiver;
@@ -1934,7 +2083,16 @@ static int mcp251xfd_probe(struct spi_device *spi)
*
*/
priv->spi_max_speed_hz_orig = spi->max_speed_hz;
- spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850);
+ priv->spi_max_speed_hz_slow = min(spi->max_speed_hz,
+ freq / 2 / 1000 * 850);
+ if (priv->pll_enable)
+ priv->spi_max_speed_hz_fast = min(spi->max_speed_hz,
+ freq *
+ MCP251XFD_OSC_PLL_MULTIPLIER /
+ 2 / 1000 * 850);
+ else
+ priv->spi_max_speed_hz_fast = priv->spi_max_speed_hz_slow;
+ spi->max_speed_hz = priv->spi_max_speed_hz_slow;
spi->bits_per_word = 8;
spi->rt = true;
err = spi_setup(spi);
@@ -1951,8 +2109,11 @@ static int mcp251xfd_probe(struct spi_device *spi)
goto out_free_candev;
err = mcp251xfd_register(priv);
- if (err)
+ if (err) {
+ dev_err_probe(&spi->dev, err, "Failed to detect %s.\n",
+ mcp251xfd_get_model_str(priv));
goto out_can_rx_offload_del;
+ }
return 0;
@@ -1966,7 +2127,7 @@ static int mcp251xfd_probe(struct spi_device *spi)
return err;
}
-static int mcp251xfd_remove(struct spi_device *spi)
+static void mcp251xfd_remove(struct spi_device *spi)
{
struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
struct net_device *ndev = priv->ndev;
@@ -1975,8 +2136,6 @@ static int mcp251xfd_remove(struct spi_device *spi)
mcp251xfd_unregister(priv);
spi->max_speed_hz = priv->spi_max_speed_hz_orig;
free_candev(ndev);
-
- return 0;
}
static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
index ffae8fdd3af0..c991b30bc9f0 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
@@ -207,10 +207,10 @@ static void mcp251xfd_dump_tx_ring(const struct mcp251xfd_priv *priv,
.val = tx->base,
}, {
.key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR,
- .val = 0,
+ .val = tx->nr,
}, {
.key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR,
- .val = MCP251XFD_TX_FIFO,
+ .val = tx->fifo_nr,
}, {
.key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM,
.val = tx->obj_num,
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
new file mode 100644
index 000000000000..6c7a57f16cc6
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2021, 2022 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <linux/ethtool.h>
+
+#include "mcp251xfd.h"
+#include "mcp251xfd-ram.h"
+
+static void
+mcp251xfd_ring_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ const struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
+ ring->rx_max_pending = layout.max_rx;
+ ring->tx_max_pending = layout.max_tx;
+
+ ring->rx_pending = priv->rx_obj_num;
+ ring->tx_pending = priv->tx->obj_num;
+}
+
+static int
+mcp251xfd_ring_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, ring, NULL, fd_mode);
+ if ((layout.cur_rx != priv->rx_obj_num ||
+ layout.cur_tx != priv->tx->obj_num) &&
+ netif_running(ndev))
+ return -EBUSY;
+
+ priv->rx_obj_num = layout.cur_rx;
+ priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
+ priv->tx->obj_num = layout.cur_tx;
+
+ return 0;
+}
+
+static int mcp251xfd_ring_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ u32 rx_max_frames, tx_max_frames;
+
+ /* The ethtool doc says:
+ * To disable coalescing, set usecs = 0 and max_frames = 1.
+ */
+ if (priv->rx_obj_num_coalesce_irq == 0)
+ rx_max_frames = 1;
+ else
+ rx_max_frames = priv->rx_obj_num_coalesce_irq;
+
+ ec->rx_max_coalesced_frames_irq = rx_max_frames;
+ ec->rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq;
+
+ if (priv->tx_obj_num_coalesce_irq == 0)
+ tx_max_frames = 1;
+ else
+ tx_max_frames = priv->tx_obj_num_coalesce_irq;
+
+ ec->tx_max_coalesced_frames_irq = tx_max_frames;
+ ec->tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq;
+
+ return 0;
+}
+
+static int mcp251xfd_ring_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ const struct ethtool_ringparam ring = {
+ .rx_pending = priv->rx_obj_num,
+ .tx_pending = priv->tx->obj_num,
+ };
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, ec, fd_mode);
+
+ if ((layout.rx_coalesce != priv->rx_obj_num_coalesce_irq ||
+ ec->rx_coalesce_usecs_irq != priv->rx_coalesce_usecs_irq ||
+ layout.tx_coalesce != priv->tx_obj_num_coalesce_irq ||
+ ec->tx_coalesce_usecs_irq != priv->tx_coalesce_usecs_irq) &&
+ netif_running(ndev))
+ return -EBUSY;
+
+ priv->rx_obj_num = layout.cur_rx;
+ priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
+ priv->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+
+ priv->tx->obj_num = layout.cur_tx;
+ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
+ priv->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
+
+ return 0;
+}
+
+static const struct ethtool_ops mcp251xfd_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_TX_USECS_IRQ |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+ .get_ringparam = mcp251xfd_ring_get_ringparam,
+ .set_ringparam = mcp251xfd_ring_set_ringparam,
+ .get_coalesce = mcp251xfd_ring_get_coalesce,
+ .set_coalesce = mcp251xfd_ring_set_coalesce,
+};
+
+void mcp251xfd_ethtool_init(struct mcp251xfd_priv *priv)
+{
+ struct can_ram_layout layout;
+
+ priv->ndev->ethtool_ops = &mcp251xfd_ethtool_ops;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, false);
+ priv->rx_obj_num = layout.default_rx;
+ priv->tx->obj_num = layout.default_tx;
+
+ priv->rx_obj_num_coalesce_irq = 0;
+ priv->tx_obj_num_coalesce_irq = 0;
+ priv->rx_coalesce_usecs_irq = 0;
+ priv->tx_coalesce_usecs_irq = 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
new file mode 100644
index 000000000000..9e8e82cdba46
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2021, 2022 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include "mcp251xfd-ram.h"
+
+static inline u8 can_ram_clamp(const struct can_ram_config *config,
+ const struct can_ram_obj_config *obj,
+ u8 val)
+{
+ u8 max;
+
+ max = min_t(u8, obj->max, obj->fifo_num * config->fifo_depth);
+ return clamp(val, obj->min, max);
+}
+
+static u8
+can_ram_rounddown_pow_of_two(const struct can_ram_config *config,
+ const struct can_ram_obj_config *obj,
+ const u8 coalesce, u8 val)
+{
+ u8 fifo_num = obj->fifo_num;
+ u8 ret = 0, i;
+
+ val = can_ram_clamp(config, obj, val);
+
+ if (coalesce) {
+ /* Use 1st FIFO for coalescing, if requested.
+ *
+ * Either use complete FIFO (and FIFO Full IRQ) for
+ * coalescing or only half of FIFO (FIFO Half Full
+ * IRQ) and use remaining half for normal objects.
+ */
+ ret = min_t(u8, coalesce * 2, config->fifo_depth);
+ val -= ret;
+ fifo_num--;
+ }
+
+ for (i = 0; i < fifo_num && val; i++) {
+ u8 n;
+
+ n = min_t(u8, rounddown_pow_of_two(val),
+ config->fifo_depth);
+
+ /* skip small FIFOs */
+ if (n < obj->fifo_depth_min)
+ return ret;
+
+ ret += n;
+ val -= n;
+ }
+
+ return ret;
+}
+
+void can_ram_get_layout(struct can_ram_layout *layout,
+ const struct can_ram_config *config,
+ const struct ethtool_ringparam *ring,
+ const struct ethtool_coalesce *ec,
+ const bool fd_mode)
+{
+ u8 num_rx, num_tx;
+ u16 ram_free;
+
+ /* default CAN */
+
+ num_tx = config->tx.def[fd_mode];
+ num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+ ram_free = config->size;
+ ram_free -= config->tx.size[fd_mode] * num_tx;
+
+ num_rx = ram_free / config->rx.size[fd_mode];
+
+ layout->default_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
+ layout->default_tx = num_tx;
+
+ /* MAX CAN */
+
+ ram_free = config->size;
+ ram_free -= config->tx.size[fd_mode] * config->tx.min;
+ num_rx = ram_free / config->rx.size[fd_mode];
+
+ ram_free = config->size;
+ ram_free -= config->rx.size[fd_mode] * config->rx.min;
+ num_tx = ram_free / config->tx.size[fd_mode];
+
+ layout->max_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
+ layout->max_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+ /* cur CAN */
+
+ if (ring) {
+ u8 num_rx_coalesce = 0, num_tx_coalesce = 0;
+
+ num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, ring->rx_pending);
+
+ /* The ethtool doc says:
+ * To disable coalescing, set usecs = 0 and max_frames = 1.
+ */
+ if (ec && !(ec->rx_coalesce_usecs_irq == 0 &&
+ ec->rx_max_coalesced_frames_irq == 1)) {
+ u8 max;
+
+ /* use only max half of available objects for coalescing */
+ max = min_t(u8, num_rx / 2, config->fifo_depth);
+ num_rx_coalesce = clamp(ec->rx_max_coalesced_frames_irq,
+ (u32)config->rx.fifo_depth_coalesce_min,
+ (u32)max);
+ num_rx_coalesce = rounddown_pow_of_two(num_rx_coalesce);
+
+ num_rx = can_ram_rounddown_pow_of_two(config, &config->rx,
+ num_rx_coalesce, num_rx);
+ }
+
+ ram_free = config->size - config->rx.size[fd_mode] * num_rx;
+ num_tx = ram_free / config->tx.size[fd_mode];
+ num_tx = min_t(u8, ring->tx_pending, num_tx);
+ num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+ /* The ethtool doc says:
+ * To disable coalescing, set usecs = 0 and max_frames = 1.
+ */
+ if (ec && !(ec->tx_coalesce_usecs_irq == 0 &&
+ ec->tx_max_coalesced_frames_irq == 1)) {
+ u8 max;
+
+ /* use only max half of available objects for coalescing */
+ max = min_t(u8, num_tx / 2, config->fifo_depth);
+ num_tx_coalesce = clamp(ec->tx_max_coalesced_frames_irq,
+ (u32)config->tx.fifo_depth_coalesce_min,
+ (u32)max);
+ num_tx_coalesce = rounddown_pow_of_two(num_tx_coalesce);
+
+ num_tx = can_ram_rounddown_pow_of_two(config, &config->tx,
+ num_tx_coalesce, num_tx);
+ }
+
+ layout->cur_rx = num_rx;
+ layout->cur_tx = num_tx;
+ layout->rx_coalesce = num_rx_coalesce;
+ layout->tx_coalesce = num_tx_coalesce;
+ } else {
+ layout->cur_rx = layout->default_rx;
+ layout->cur_tx = layout->default_tx;
+ layout->rx_coalesce = 0;
+ layout->tx_coalesce = 0;
+ }
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h
new file mode 100644
index 000000000000..7558c1510cbf
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ *
+ * Copyright (c) 2021, 2022 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _MCP251XFD_RAM_H
+#define _MCP251XFD_RAM_H
+
+#include <linux/ethtool.h>
+
+#define CAN_RAM_NUM_MAX (-1)
+
+enum can_ram_mode {
+ CAN_RAM_MODE_CAN,
+ CAN_RAM_MODE_CANFD,
+ __CAN_RAM_MODE_MAX
+};
+
+struct can_ram_obj_config {
+ u8 size[__CAN_RAM_MODE_MAX];
+
+ u8 def[__CAN_RAM_MODE_MAX];
+ u8 min;
+ u8 max;
+
+ u8 fifo_num;
+ u8 fifo_depth_min;
+ u8 fifo_depth_coalesce_min;
+};
+
+struct can_ram_config {
+ const struct can_ram_obj_config rx;
+ const struct can_ram_obj_config tx;
+
+ u16 size;
+ u8 fifo_depth;
+};
+
+struct can_ram_layout {
+ u8 default_rx;
+ u8 default_tx;
+
+ u8 max_rx;
+ u8 max_tx;
+
+ u8 cur_rx;
+ u8 cur_tx;
+
+ u8 rx_coalesce;
+ u8 tx_coalesce;
+};
+
+void can_ram_get_layout(struct can_ram_layout *layout,
+ const struct can_ram_config *config,
+ const struct ethtool_ringparam *ring,
+ const struct ethtool_coalesce *ec,
+ const bool fd_mode);
+
+#endif
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
index 7b120c716228..217510c12af5 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -2,8 +2,8 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020 Pengutronix,
-// Marc Kleine-Budde <kernel@pengutronix.de>
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
//
#include "mcp251xfd.h"
@@ -47,22 +47,32 @@ mcp251xfd_regmap_nocrc_gather_write(void *context,
return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
}
-static inline bool mcp251xfd_update_bits_read_reg(unsigned int reg)
+static inline bool
+mcp251xfd_update_bits_read_reg(const struct mcp251xfd_priv *priv,
+ unsigned int reg)
{
+ struct mcp251xfd_rx_ring *ring;
+ int n;
+
switch (reg) {
case MCP251XFD_REG_INT:
case MCP251XFD_REG_TEFCON:
- case MCP251XFD_REG_FIFOCON(MCP251XFD_RX_FIFO(0)):
case MCP251XFD_REG_FLTCON(0):
case MCP251XFD_REG_ECCSTAT:
case MCP251XFD_REG_CRC:
return false;
case MCP251XFD_REG_CON:
- case MCP251XFD_REG_FIFOSTA(MCP251XFD_RX_FIFO(0)):
case MCP251XFD_REG_OSC:
case MCP251XFD_REG_ECCCON:
return true;
default:
+ mcp251xfd_for_each_rx_ring(priv, ring, n) {
+ if (reg == MCP251XFD_REG_FIFOCON(ring->fifo_nr))
+ return false;
+ if (reg == MCP251XFD_REG_FIFOSTA(ring->fifo_nr))
+ return true;
+ }
+
WARN(1, "Status of reg 0x%04x unknown.\n", reg);
}
@@ -92,7 +102,7 @@ mcp251xfd_regmap_nocrc_update_bits(void *context, unsigned int reg,
last_byte = mcp251xfd_last_byte_set(mask);
len = last_byte - first_byte + 1;
- if (mcp251xfd_update_bits_read_reg(reg)) {
+ if (mcp251xfd_update_bits_read_reg(priv, reg)) {
struct spi_transfer xfer[2] = { };
struct spi_message msg;
@@ -368,7 +378,7 @@ mcp251xfd_regmap_crc_read(void *context,
* to the caller. It will take care of both cases.
*
*/
- if (reg == MCP251XFD_REG_OSC) {
+ if (reg == MCP251XFD_REG_OSC && val_len == sizeof(__le32)) {
err = 0;
goto out;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
index 92f9e9b01289..bf3f0f150199 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -15,6 +15,7 @@
#include <asm/unaligned.h>
#include "mcp251xfd.h"
+#include "mcp251xfd-ram.h"
static inline u8
mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
@@ -53,6 +54,72 @@ mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
}
static void
+mcp251xfd_ring_init_tef(struct mcp251xfd_priv *priv, u16 *base)
+{
+ struct mcp251xfd_tef_ring *tef_ring;
+ struct spi_transfer *xfer;
+ u32 val;
+ u16 addr;
+ u8 len;
+ int i;
+
+ /* TEF */
+ tef_ring = priv->tef;
+ tef_ring->head = 0;
+ tef_ring->tail = 0;
+
+ /* TEF- and TX-FIFO have same number of objects */
+ *base = mcp251xfd_get_tef_obj_addr(priv->tx->obj_num);
+
+ /* FIFO IRQ enable */
+ addr = MCP251XFD_REG_TEFCON;
+ val = MCP251XFD_REG_TEFCON_TEFOVIE | MCP251XFD_REG_TEFCON_TEFNEIE;
+
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->irq_enable_buf,
+ addr, val, val);
+ tef_ring->irq_enable_xfer.tx_buf = &tef_ring->irq_enable_buf;
+ tef_ring->irq_enable_xfer.len = len;
+ spi_message_init_with_transfers(&tef_ring->irq_enable_msg,
+ &tef_ring->irq_enable_xfer, 1);
+
+ /* FIFO increment TEF tail pointer */
+ addr = MCP251XFD_REG_TEFCON;
+ val = MCP251XFD_REG_TEFCON_UINC;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
+ addr, val, val);
+
+ for (i = 0; i < ARRAY_SIZE(tef_ring->uinc_xfer); i++) {
+ xfer = &tef_ring->uinc_xfer[i];
+ xfer->tx_buf = &tef_ring->uinc_buf;
+ xfer->len = len;
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = 0;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ }
+
+ /* "cs_change == 1" on the last transfer results in an active
+ * chip select after the complete SPI message. This causes the
+ * controller to interpret the next register access as
+ * data. Set "cs_change" of the last transfer to "0" to
+ * properly deactivate the chip select at the end of the
+ * message.
+ */
+ xfer->cs_change = 0;
+
+ if (priv->tx_coalesce_usecs_irq || priv->tx_obj_num_coalesce_irq) {
+ val = MCP251XFD_REG_TEFCON_UINC |
+ MCP251XFD_REG_TEFCON_TEFOVIE |
+ MCP251XFD_REG_TEFCON_TEFHIE;
+
+ len = mcp251xfd_cmd_prepare_write_reg(priv,
+ &tef_ring->uinc_irq_disable_buf,
+ addr, val, val);
+ xfer->tx_buf = &tef_ring->uinc_irq_disable_buf;
+ xfer->len = len;
+ }
+}
+
+static void
mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
const struct mcp251xfd_tx_ring *ring,
struct mcp251xfd_tx_obj *tx_obj,
@@ -88,84 +155,68 @@ mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
ARRAY_SIZE(tx_obj->xfer));
}
-void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
+static void
+mcp251xfd_ring_init_tx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
{
- struct mcp251xfd_tef_ring *tef_ring;
struct mcp251xfd_tx_ring *tx_ring;
- struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
struct mcp251xfd_tx_obj *tx_obj;
- struct spi_transfer *xfer;
u32 val;
u16 addr;
u8 len;
- int i, j;
-
- netdev_reset_queue(priv->ndev);
-
- /* TEF */
- tef_ring = priv->tef;
- tef_ring->head = 0;
- tef_ring->tail = 0;
-
- /* FIFO increment TEF tail pointer */
- addr = MCP251XFD_REG_TEFCON;
- val = MCP251XFD_REG_TEFCON_UINC;
- len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
- addr, val, val);
-
- for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) {
- xfer = &tef_ring->uinc_xfer[j];
- xfer->tx_buf = &tef_ring->uinc_buf;
- xfer->len = len;
- xfer->cs_change = 1;
- xfer->cs_change_delay.value = 0;
- xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
- }
-
- /* "cs_change == 1" on the last transfer results in an active
- * chip select after the complete SPI message. This causes the
- * controller to interpret the next register access as
- * data. Set "cs_change" of the last transfer to "0" to
- * properly deactivate the chip select at the end of the
- * message.
- */
- xfer->cs_change = 0;
+ int i;
- /* TX */
tx_ring = priv->tx;
tx_ring->head = 0;
tx_ring->tail = 0;
- tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
+ tx_ring->base = *base;
+ tx_ring->nr = 0;
+ tx_ring->fifo_nr = *fifo_nr;
+
+ *base = mcp251xfd_get_tx_obj_addr(tx_ring, tx_ring->obj_num);
+ *fifo_nr += 1;
/* FIFO request to send */
- addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
+ addr = MCP251XFD_REG_FIFOCON(tx_ring->fifo_nr);
val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
addr, val, val);
mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
+}
+
+static void
+mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
+{
+ struct mcp251xfd_rx_ring *rx_ring;
+ struct spi_transfer *xfer;
+ u32 val;
+ u16 addr;
+ u8 len;
+ int i, j;
- /* RX */
mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
rx_ring->head = 0;
rx_ring->tail = 0;
+ rx_ring->base = *base;
rx_ring->nr = i;
- rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
+ rx_ring->fifo_nr = *fifo_nr;
- if (!prev_rx_ring)
- rx_ring->base =
- mcp251xfd_get_tx_obj_addr(tx_ring,
- tx_ring->obj_num);
- else
- rx_ring->base = prev_rx_ring->base +
- prev_rx_ring->obj_size *
- prev_rx_ring->obj_num;
+ *base = mcp251xfd_get_rx_obj_addr(rx_ring, rx_ring->obj_num);
+ *fifo_nr += 1;
- prev_rx_ring = rx_ring;
+ /* FIFO IRQ enable */
+ addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
+ val = MCP251XFD_REG_FIFOCON_RXOVIE |
+ MCP251XFD_REG_FIFOCON_TFNRFNIE;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->irq_enable_buf,
+ addr, val, val);
+ rx_ring->irq_enable_xfer.tx_buf = &rx_ring->irq_enable_buf;
+ rx_ring->irq_enable_xfer.len = len;
+ spi_message_init_with_transfers(&rx_ring->irq_enable_msg,
+ &rx_ring->irq_enable_xfer, 1);
/* FIFO increment RX tail pointer */
- addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
val = MCP251XFD_REG_FIFOCON_UINC;
len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
addr, val, val);
@@ -187,9 +238,149 @@ void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
* the chip select at the end of the message.
*/
xfer->cs_change = 0;
+
+ /* Use 1st RX-FIFO for IRQ coalescing. If enabled
+ * (rx_coalesce_usecs_irq or rx_max_coalesce_frames_irq
+ * is activated), use the last transfer to disable:
+ *
+ * - TFNRFNIE (Receive FIFO Not Empty Interrupt)
+ *
+ * and enable:
+ *
+ * - TFHRFHIE (Receive FIFO Half Full Interrupt)
+ * - or -
+ * - TFERFFIE (Receive FIFO Full Interrupt)
+ *
+ * depending on rx_max_coalesce_frames_irq.
+ *
+ * The RXOVIE (Overflow Interrupt) is always enabled.
+ */
+ if (rx_ring->nr == 0 && (priv->rx_coalesce_usecs_irq ||
+ priv->rx_obj_num_coalesce_irq)) {
+ val = MCP251XFD_REG_FIFOCON_UINC |
+ MCP251XFD_REG_FIFOCON_RXOVIE;
+
+ if (priv->rx_obj_num_coalesce_irq == rx_ring->obj_num)
+ val |= MCP251XFD_REG_FIFOCON_TFERFFIE;
+ else if (priv->rx_obj_num_coalesce_irq)
+ val |= MCP251XFD_REG_FIFOCON_TFHRFHIE;
+
+ len = mcp251xfd_cmd_prepare_write_reg(priv,
+ &rx_ring->uinc_irq_disable_buf,
+ addr, val, val);
+ xfer->tx_buf = &rx_ring->uinc_irq_disable_buf;
+ xfer->len = len;
+ }
}
}
+int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
+{
+ const struct mcp251xfd_rx_ring *rx_ring;
+ u16 base = 0, ram_used;
+ u8 fifo_nr = 1;
+ int i;
+
+ netdev_reset_queue(priv->ndev);
+
+ mcp251xfd_ring_init_tef(priv, &base);
+ mcp251xfd_ring_init_rx(priv, &base, &fifo_nr);
+ mcp251xfd_ring_init_tx(priv, &base, &fifo_nr);
+
+ /* mcp251xfd_handle_rxif() will iterate over all RX rings.
+ * Rings with their corresponding bit set in
+ * priv->regs_status.rxif are read out.
+ *
+ * If the chip is configured for only 1 RX-FIFO, and if there
+ * is an RX interrupt pending (RXIF in INT register is set),
+ * it must be the 1st RX-FIFO.
+ *
+ * We mark the RXIF of the 1st FIFO as pending here, so that
+ * we can skip the read of the RXIF register in
+ * mcp251xfd_read_regs_status() for the 1 RX-FIFO only case.
+ *
+ * If we use more than 1 RX-FIFO, this value gets overwritten
+ * in mcp251xfd_read_regs_status(), so set it unconditionally
+ * here.
+ */
+ priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr);
+
+ if (priv->tx_obj_num_coalesce_irq) {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes (coalesce)\n",
+ mcp251xfd_get_tef_obj_addr(0),
+ priv->tx_obj_num_coalesce_irq,
+ sizeof(struct mcp251xfd_hw_tef_obj),
+ priv->tx_obj_num_coalesce_irq *
+ sizeof(struct mcp251xfd_hw_tef_obj));
+
+ netdev_dbg(priv->ndev,
+ " 0x%03x: %2d*%zu bytes = %4zu bytes\n",
+ mcp251xfd_get_tef_obj_addr(priv->tx_obj_num_coalesce_irq),
+ priv->tx->obj_num - priv->tx_obj_num_coalesce_irq,
+ sizeof(struct mcp251xfd_hw_tef_obj),
+ (priv->tx->obj_num - priv->tx_obj_num_coalesce_irq) *
+ sizeof(struct mcp251xfd_hw_tef_obj));
+ } else {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes\n",
+ mcp251xfd_get_tef_obj_addr(0),
+ priv->tx->obj_num, sizeof(struct mcp251xfd_hw_tef_obj),
+ priv->tx->obj_num * sizeof(struct mcp251xfd_hw_tef_obj));
+ }
+
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+ if (rx_ring->nr == 0 && priv->rx_obj_num_coalesce_irq) {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes (coalesce)\n",
+ rx_ring->nr, rx_ring->fifo_nr,
+ mcp251xfd_get_rx_obj_addr(rx_ring, 0),
+ priv->rx_obj_num_coalesce_irq, rx_ring->obj_size,
+ priv->rx_obj_num_coalesce_irq * rx_ring->obj_size);
+
+ if (priv->rx_obj_num_coalesce_irq == MCP251XFD_FIFO_DEPTH)
+ continue;
+
+ netdev_dbg(priv->ndev,
+ " 0x%03x: %2u*%u bytes = %4u bytes\n",
+ mcp251xfd_get_rx_obj_addr(rx_ring,
+ priv->rx_obj_num_coalesce_irq),
+ rx_ring->obj_num - priv->rx_obj_num_coalesce_irq,
+ rx_ring->obj_size,
+ (rx_ring->obj_num - priv->rx_obj_num_coalesce_irq) *
+ rx_ring->obj_size);
+ } else {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
+ rx_ring->nr, rx_ring->fifo_nr,
+ mcp251xfd_get_rx_obj_addr(rx_ring, 0),
+ rx_ring->obj_num, rx_ring->obj_size,
+ rx_ring->obj_num * rx_ring->obj_size);
+ }
+ }
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TX: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
+ priv->tx->fifo_nr,
+ mcp251xfd_get_tx_obj_addr(priv->tx, 0),
+ priv->tx->obj_num, priv->tx->obj_size,
+ priv->tx->obj_num * priv->tx->obj_size);
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: free: %4d bytes\n",
+ MCP251XFD_RAM_SIZE - (base - MCP251XFD_RAM_START));
+
+ ram_used = base - MCP251XFD_RAM_START;
+ if (ram_used > MCP251XFD_RAM_SIZE) {
+ netdev_err(priv->ndev,
+ "Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n",
+ ram_used, MCP251XFD_RAM_SIZE);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
{
int i;
@@ -200,40 +391,103 @@ void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
}
}
+static enum hrtimer_restart mcp251xfd_rx_irq_timer(struct hrtimer *t)
+{
+ struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
+ rx_irq_timer);
+ struct mcp251xfd_rx_ring *ring = priv->rx[0];
+
+ if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
+ return HRTIMER_NORESTART;
+
+ spi_async(priv->spi, &ring->irq_enable_msg);
+
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart mcp251xfd_tx_irq_timer(struct hrtimer *t)
+{
+ struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
+ tx_irq_timer);
+ struct mcp251xfd_tef_ring *ring = priv->tef;
+
+ if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
+ return HRTIMER_NORESTART;
+
+ spi_async(priv->spi, &ring->irq_enable_msg);
+
+ return HRTIMER_NORESTART;
+}
+
+const struct can_ram_config mcp251xfd_ram_config = {
+ .rx = {
+ .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_rx_obj_can),
+ .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_rx_obj_canfd),
+ .min = MCP251XFD_RX_OBJ_NUM_MIN,
+ .max = MCP251XFD_RX_OBJ_NUM_MAX,
+ .def[CAN_RAM_MODE_CAN] = CAN_RAM_NUM_MAX,
+ .def[CAN_RAM_MODE_CANFD] = CAN_RAM_NUM_MAX,
+ .fifo_num = MCP251XFD_FIFO_RX_NUM,
+ .fifo_depth_min = MCP251XFD_RX_FIFO_DEPTH_MIN,
+ .fifo_depth_coalesce_min = MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN,
+ },
+ .tx = {
+ .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_tef_obj) +
+ sizeof(struct mcp251xfd_hw_tx_obj_can),
+ .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_tef_obj) +
+ sizeof(struct mcp251xfd_hw_tx_obj_canfd),
+ .min = MCP251XFD_TX_OBJ_NUM_MIN,
+ .max = MCP251XFD_TX_OBJ_NUM_MAX,
+ .def[CAN_RAM_MODE_CAN] = MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT,
+ .def[CAN_RAM_MODE_CANFD] = MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT,
+ .fifo_num = MCP251XFD_FIFO_TX_NUM,
+ .fifo_depth_min = MCP251XFD_TX_FIFO_DEPTH_MIN,
+ .fifo_depth_coalesce_min = MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN,
+ },
+ .size = MCP251XFD_RAM_SIZE,
+ .fifo_depth = MCP251XFD_FIFO_DEPTH,
+};
+
int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
{
- struct mcp251xfd_tx_ring *tx_ring;
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
struct mcp251xfd_rx_ring *rx_ring;
- int tef_obj_size, tx_obj_size, rx_obj_size;
- int tx_obj_num;
- int ram_free, i;
+ u8 tx_obj_size, rx_obj_size;
+ u8 rem, i;
- tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
- if (mcp251xfd_is_fd_mode(priv)) {
- tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
+ /* switching from CAN-2.0 to CAN-FD mode or vice versa */
+ if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
+ priv->rx_obj_num = layout.default_rx;
+ tx_ring->obj_num = layout.default_tx;
+ }
+
+ if (fd_mode) {
tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
+ set_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
} else {
- tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
+ clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
}
- tx_ring = priv->tx;
- tx_ring->obj_num = tx_obj_num;
tx_ring->obj_size = tx_obj_size;
- ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
- (tef_obj_size + tx_obj_size);
-
- for (i = 0;
- i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
- i++) {
- int rx_obj_num;
+ rem = priv->rx_obj_num;
+ for (i = 0; i < ARRAY_SIZE(priv->rx) && rem; i++) {
+ u8 rx_obj_num;
- rx_obj_num = ram_free / rx_obj_size;
- rx_obj_num = min(1 << (fls(rx_obj_num) - 1),
- MCP251XFD_RX_OBJ_NUM_MAX);
+ if (i == 0 && priv->rx_obj_num_coalesce_irq)
+ rx_obj_num = min_t(u8, priv->rx_obj_num_coalesce_irq * 2,
+ MCP251XFD_FIFO_DEPTH);
+ else
+ rx_obj_num = min_t(u8, rounddown_pow_of_two(rem),
+ MCP251XFD_FIFO_DEPTH);
+ rem -= rx_obj_num;
rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
GFP_KERNEL);
@@ -241,29 +495,18 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
mcp251xfd_ring_free(priv);
return -ENOMEM;
}
+
rx_ring->obj_num = rx_obj_num;
rx_ring->obj_size = rx_obj_size;
priv->rx[i] = rx_ring;
-
- ram_free -= rx_ring->obj_num * rx_ring->obj_size;
}
priv->rx_ring_num = i;
- netdev_dbg(priv->ndev,
- "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
- tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
- tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
+ hrtimer_init(&priv->rx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ priv->rx_irq_timer.function = mcp251xfd_rx_irq_timer;
- mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
- netdev_dbg(priv->ndev,
- "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
- i, rx_ring->obj_num, rx_ring->obj_size,
- rx_ring->obj_size * rx_ring->obj_num);
- }
-
- netdev_dbg(priv->ndev,
- "FIFO setup: free: %d bytes\n",
- ram_free);
+ hrtimer_init(&priv->tx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ priv->tx_irq_timer.function = mcp251xfd_tx_irq_timer;
return 0;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
index 63f2526464b3..d09f7fbf2ba7 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
@@ -19,7 +19,7 @@
static inline int
mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
const struct mcp251xfd_rx_ring *ring,
- u8 *rx_head)
+ u8 *rx_head, bool *fifo_empty)
{
u32 fifo_sta;
int err;
@@ -30,6 +30,7 @@ mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
return err;
*rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+ *fifo_empty = !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
return 0;
}
@@ -84,10 +85,12 @@ mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
{
u32 new_head;
u8 chip_rx_head;
+ bool fifo_empty;
int err;
- err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
- if (err)
+ err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head,
+ &fifo_empty);
+ if (err || fifo_empty)
return err;
/* chip_rx_head, is the next RX-Object filled by the HW.
@@ -251,10 +254,23 @@ int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
int err, n;
mcp251xfd_for_each_rx_ring(priv, ring, n) {
+ /* - if RX IRQ coalescing is active always handle ring 0
+ * - only handle rings if RX IRQ is active
+ */
+ if ((ring->nr > 0 || !priv->rx_obj_num_coalesce_irq) &&
+ !(priv->regs_status.rxif & BIT(ring->fifo_nr)))
+ continue;
+
err = mcp251xfd_handle_rxif_ring(priv, ring);
if (err)
return err;
}
+ if (priv->rx_coalesce_usecs_irq)
+ hrtimer_start(&priv->rx_irq_timer,
+ ns_to_ktime(priv->rx_coalesce_usecs_irq *
+ NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+
return 0;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
index 406166005b99..237617b0c125 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
@@ -256,5 +256,11 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
netif_wake_queue(priv->ndev);
}
+ if (priv->tx_coalesce_usecs_irq)
+ hrtimer_start(&priv->tx_irq_timer,
+ ns_to_ktime(priv->tx_coalesce_usecs_irq *
+ NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+
return 0;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index f551c900803e..9cb6b5ad8dda 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -2,8 +2,8 @@
*
* mcp251xfd - Microchip MCP251xFD Family CAN controller driver
*
- * Copyright (c) 2019 Pengutronix,
- * Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2019, 2020, 2021 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
* Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
*/
@@ -367,25 +367,6 @@
#define MCP251XFD_REG_DEVID_ID_MASK GENMASK(7, 4)
#define MCP251XFD_REG_DEVID_REV_MASK GENMASK(3, 0)
-/* number of TX FIFO objects, depending on CAN mode
- *
- * FIFO setup: tef: 8*12 bytes = 96 bytes, tx: 8*16 bytes = 128 bytes
- * FIFO setup: tef: 4*12 bytes = 48 bytes, tx: 4*72 bytes = 288 bytes
- */
-#define MCP251XFD_RX_OBJ_NUM_MAX 32
-#define MCP251XFD_TX_OBJ_NUM_CAN 8
-#define MCP251XFD_TX_OBJ_NUM_CANFD 4
-
-#if MCP251XFD_TX_OBJ_NUM_CAN > MCP251XFD_TX_OBJ_NUM_CANFD
-#define MCP251XFD_TX_OBJ_NUM_MAX MCP251XFD_TX_OBJ_NUM_CAN
-#else
-#define MCP251XFD_TX_OBJ_NUM_MAX MCP251XFD_TX_OBJ_NUM_CANFD
-#endif
-
-#define MCP251XFD_NAPI_WEIGHT 32
-#define MCP251XFD_TX_FIFO 1
-#define MCP251XFD_RX_FIFO(x) (MCP251XFD_TX_FIFO + 1 + (x))
-
/* SPI commands */
#define MCP251XFD_SPI_INSTRUCTION_RESET 0x0000
#define MCP251XFD_SPI_INSTRUCTION_WRITE 0x2000
@@ -406,12 +387,38 @@ static_assert(MCP251XFD_TIMESTAMP_WORK_DELAY_SEC <
#define MCP251XFD_OSC_STAB_TIMEOUT_US (10 * MCP251XFD_OSC_STAB_SLEEP_US)
#define MCP251XFD_POLL_SLEEP_US (10)
#define MCP251XFD_POLL_TIMEOUT_US (USEC_PER_MSEC)
+
+/* Misc */
+#define MCP251XFD_NAPI_WEIGHT 32
#define MCP251XFD_SOFTRESET_RETRIES_MAX 3
#define MCP251XFD_READ_CRC_RETRIES_MAX 3
#define MCP251XFD_ECC_CNT_MAX 2
#define MCP251XFD_SANITIZE_SPI 1
#define MCP251XFD_SANITIZE_CAN 1
+/* FIFO and Ring */
+#define MCP251XFD_FIFO_TEF_NUM 1U
+#define MCP251XFD_FIFO_RX_NUM 3U
+#define MCP251XFD_FIFO_TX_NUM 1U
+
+#define MCP251XFD_FIFO_DEPTH 32U
+
+#define MCP251XFD_RX_OBJ_NUM_MIN 16U
+#define MCP251XFD_RX_OBJ_NUM_MAX (MCP251XFD_FIFO_RX_NUM * MCP251XFD_FIFO_DEPTH)
+#define MCP251XFD_RX_FIFO_DEPTH_MIN 4U
+#define MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN 8U
+
+#define MCP251XFD_TX_OBJ_NUM_MIN 2U
+#define MCP251XFD_TX_OBJ_NUM_MAX 16U
+#define MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT 8U
+#define MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT 4U
+#define MCP251XFD_TX_FIFO_DEPTH_MIN 2U
+#define MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN 2U
+
+static_assert(MCP251XFD_FIFO_TEF_NUM == 1U);
+static_assert(MCP251XFD_FIFO_TEF_NUM == MCP251XFD_FIFO_TX_NUM);
+static_assert(MCP251XFD_FIFO_RX_NUM <= 4U);
+
/* Silence TX MAB overflow warnings */
#define MCP251XFD_QUIRK_MAB_NO_WARN BIT(0)
/* Use CRC to access registers */
@@ -512,7 +519,12 @@ struct mcp251xfd_tef_ring {
/* u8 obj_num equals tx_ring->obj_num */
/* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */
+ union mcp251xfd_write_reg_buf irq_enable_buf;
+ struct spi_transfer irq_enable_xfer;
+ struct spi_message irq_enable_msg;
+
union mcp251xfd_write_reg_buf uinc_buf;
+ union mcp251xfd_write_reg_buf uinc_irq_disable_buf;
struct spi_transfer uinc_xfer[MCP251XFD_TX_OBJ_NUM_MAX];
};
@@ -521,6 +533,8 @@ struct mcp251xfd_tx_ring {
unsigned int tail;
u16 base;
+ u8 nr;
+ u8 fifo_nr;
u8 obj_num;
u8 obj_size;
@@ -538,8 +552,13 @@ struct mcp251xfd_rx_ring {
u8 obj_num;
u8 obj_size;
+ union mcp251xfd_write_reg_buf irq_enable_buf;
+ struct spi_transfer irq_enable_xfer;
+ struct spi_message irq_enable_msg;
+
union mcp251xfd_write_reg_buf uinc_buf;
- struct spi_transfer uinc_xfer[MCP251XFD_RX_OBJ_NUM_MAX];
+ union mcp251xfd_write_reg_buf uinc_irq_disable_buf;
+ struct spi_transfer uinc_xfer[MCP251XFD_FIFO_DEPTH];
struct mcp251xfd_hw_rx_obj_canfd obj[];
};
@@ -561,6 +580,7 @@ struct mcp251xfd_ecc {
struct mcp251xfd_regs_status {
u32 intf;
+ u32 rxif;
};
enum mcp251xfd_model {
@@ -574,6 +594,13 @@ struct mcp251xfd_devtype_data {
u32 quirks;
};
+enum mcp251xfd_flags {
+ MCP251XFD_FLAGS_DOWN,
+ MCP251XFD_FLAGS_FD_MODE,
+
+ __MCP251XFD_FLAGS_SIZE__
+};
+
struct mcp251xfd_priv {
struct can_priv can;
struct can_rx_offload offload;
@@ -592,12 +619,24 @@ struct mcp251xfd_priv {
struct spi_device *spi;
u32 spi_max_speed_hz_orig;
+ u32 spi_max_speed_hz_fast;
+ u32 spi_max_speed_hz_slow;
+
+ struct mcp251xfd_tef_ring tef[MCP251XFD_FIFO_TEF_NUM];
+ struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM];
+ struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM];
- struct mcp251xfd_tef_ring tef[1];
- struct mcp251xfd_tx_ring tx[1];
- struct mcp251xfd_rx_ring *rx[1];
+ DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__);
u8 rx_ring_num;
+ u8 rx_obj_num;
+ u8 rx_obj_num_coalesce_irq;
+ u8 tx_obj_num_coalesce_irq;
+
+ u32 rx_coalesce_usecs_irq;
+ u32 tx_coalesce_usecs_irq;
+ struct hrtimer rx_irq_timer;
+ struct hrtimer tx_irq_timer;
struct mcp251xfd_ecc ecc;
struct mcp251xfd_regs_status regs_status;
@@ -608,6 +647,7 @@ struct mcp251xfd_priv {
struct gpio_desc *rx_int;
struct clk *clk;
+ bool pll_enable;
struct regulator *reg_vdd;
struct regulator *reg_xceiver;
@@ -776,7 +816,7 @@ mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
int err;
err = regmap_read(priv->map_reg,
- MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
+ MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr),
&fifo_sta);
if (err)
return err;
@@ -878,8 +918,10 @@ int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv);
u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size,
const void *data, size_t data_size);
u16 mcp251xfd_crc16_compute(const void *data, size_t data_size);
+void mcp251xfd_ethtool_init(struct mcp251xfd_priv *priv);
int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv);
-void mcp251xfd_ring_init(struct mcp251xfd_priv *priv);
+extern const struct can_ram_config mcp251xfd_ram_config;
+int mcp251xfd_ring_init(struct mcp251xfd_priv *priv);
void mcp251xfd_ring_free(struct mcp251xfd_priv *priv);
int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv);
int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv);
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 2ed2370a3166..2d73ebbf3836 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -1787,7 +1787,7 @@ static int es58x_open(struct net_device *netdev)
struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev;
int ret;
- if (atomic_inc_return(&es58x_dev->opened_channel_cnt) == 1) {
+ if (!es58x_dev->opened_channel_cnt) {
ret = es58x_alloc_rx_urbs(es58x_dev);
if (ret)
return ret;
@@ -1805,12 +1805,13 @@ static int es58x_open(struct net_device *netdev)
if (ret)
goto free_urbs;
+ es58x_dev->opened_channel_cnt++;
netif_start_queue(netdev);
return ret;
free_urbs:
- if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt))
+ if (!es58x_dev->opened_channel_cnt)
es58x_free_urbs(es58x_dev);
netdev_err(netdev, "%s: Could not open the network device: %pe\n",
__func__, ERR_PTR(ret));
@@ -1845,7 +1846,8 @@ static int es58x_stop(struct net_device *netdev)
es58x_flush_pending_tx_msg(netdev);
- if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt))
+ es58x_dev->opened_channel_cnt--;
+ if (!es58x_dev->opened_channel_cnt)
es58x_free_urbs(es58x_dev);
return 0;
@@ -2215,7 +2217,6 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
init_usb_anchor(&es58x_dev->tx_urbs_idle);
init_usb_anchor(&es58x_dev->tx_urbs_busy);
atomic_set(&es58x_dev->tx_urbs_idle_cnt, 0);
- atomic_set(&es58x_dev->opened_channel_cnt, 0);
usb_set_intfdata(intf, es58x_dev);
es58x_dev->rx_pipe = usb_rcvbulkpipe(es58x_dev->udev,
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
index 826a15871573..e5033cb5e695 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
@@ -373,8 +373,6 @@ struct es58x_operators {
* queue wake/stop logic should prevent this URB from getting
* empty. Please refer to es58x_get_tx_urb() for more details.
* @tx_urbs_idle_cnt: number of urbs in @tx_urbs_idle.
- * @opened_channel_cnt: number of channels opened (c.f. es58x_open()
- * and es58x_stop()).
* @ktime_req_ns: kernel timestamp when es58x_set_realtime_diff_ns()
* was called.
* @realtime_diff_ns: difference in nanoseconds between the clocks of
@@ -384,6 +382,10 @@ struct es58x_operators {
* in RX branches.
* @rx_max_packet_size: Maximum length of bulk-in URB.
* @num_can_ch: Number of CAN channel (i.e. number of elements of @netdev).
+ * @opened_channel_cnt: number of channels opened. Free of race
+ * conditions because its two users (net_device_ops:ndo_open()
+ * and net_device_ops:ndo_close()) guarantee that the network
+ * stack big kernel lock (a.k.a. rtnl_mutex) is being hold.
* @rx_cmd_buf_len: Length of @rx_cmd_buf.
* @rx_cmd_buf: The device might split the URB commands in an
* arbitrary amount of pieces. This buffer is used to concatenate
@@ -406,7 +408,6 @@ struct es58x_device {
struct usb_anchor tx_urbs_busy;
struct usb_anchor tx_urbs_idle;
atomic_t tx_urbs_idle_cnt;
- atomic_t opened_channel_cnt;
u64 ktime_req_ns;
s64 realtime_diff_ns;
@@ -415,6 +416,7 @@ struct es58x_device {
u16 rx_max_packet_size;
u8 num_can_ch;
+ u8 opened_channel_cnt;
u16 rx_cmd_buf_len;
union es58x_urb_cmd rx_cmd_buf;
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
index ec87126e1a7d..c97ffa71fd75 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -69,7 +69,8 @@ static int es58x_fd_echo_msg(struct net_device *netdev,
int i, num_element;
u32 rcv_packet_idx;
- const u32 mask = GENMASK(31, sizeof(echo_msg->packet_idx) * 8);
+ const u32 mask = GENMASK(BITS_PER_TYPE(mask) - 1,
+ BITS_PER_TYPE(echo_msg->packet_idx));
num_element = es58x_msg_num_element(es58x_dev->dev,
es58x_fd_urb_cmd->echo_msg,
@@ -172,12 +173,11 @@ static int es58x_fd_rx_event_msg(struct net_device *netdev,
const struct es58x_fd_rx_event_msg *rx_event_msg;
int ret;
+ rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg;
ret = es58x_check_msg_len(es58x_dev->dev, *rx_event_msg, msg_len);
if (ret)
return ret;
- rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg;
-
return es58x_rx_err_msg(netdev, rx_event_msg->error_code,
rx_event_msg->event_code,
get_unaligned_le64(&rx_event_msg->timestamp));
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index b487e3fe770a..67408e316062 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -9,11 +9,12 @@
* Many thanks to all socketcan devs!
*/
+#include <linux/bitfield.h>
#include <linux/ethtool.h>
#include <linux/init.h>
-#include <linux/signal.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/signal.h>
#include <linux/usb.h>
#include <linux/can.h>
@@ -21,14 +22,20 @@
#include <linux/can/error.h>
/* Device specific constants */
-#define USB_GSUSB_1_VENDOR_ID 0x1d50
-#define USB_GSUSB_1_PRODUCT_ID 0x606f
+#define USB_GSUSB_1_VENDOR_ID 0x1d50
+#define USB_GSUSB_1_PRODUCT_ID 0x606f
-#define USB_CANDLELIGHT_VENDOR_ID 0x1209
+#define USB_CANDLELIGHT_VENDOR_ID 0x1209
#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
-#define GSUSB_ENDPOINT_IN 1
-#define GSUSB_ENDPOINT_OUT 2
+#define USB_CES_CANEXT_FD_VENDOR_ID 0x1cd2
+#define USB_CES_CANEXT_FD_PRODUCT_ID 0x606f
+
+#define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0
+#define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8
+
+#define GSUSB_ENDPOINT_IN 1
+#define GSUSB_ENDPOINT_OUT 2
/* Device specific constants */
enum gs_usb_breq {
@@ -40,6 +47,11 @@ enum gs_usb_breq {
GS_USB_BREQ_DEVICE_CONFIG,
GS_USB_BREQ_TIMESTAMP,
GS_USB_BREQ_IDENTIFY,
+ GS_USB_BREQ_GET_USER_ID,
+ GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING = GS_USB_BREQ_GET_USER_ID,
+ GS_USB_BREQ_SET_USER_ID,
+ GS_USB_BREQ_DATA_BITTIMING,
+ GS_USB_BREQ_BT_CONST_EXT,
};
enum gs_can_mode {
@@ -87,11 +99,18 @@ struct gs_device_config {
__le32 hw_version;
} __packed;
-#define GS_CAN_MODE_NORMAL 0
-#define GS_CAN_MODE_LISTEN_ONLY BIT(0)
-#define GS_CAN_MODE_LOOP_BACK BIT(1)
-#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2)
-#define GS_CAN_MODE_ONE_SHOT BIT(3)
+#define GS_CAN_MODE_NORMAL 0
+#define GS_CAN_MODE_LISTEN_ONLY BIT(0)
+#define GS_CAN_MODE_LOOP_BACK BIT(1)
+#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2)
+#define GS_CAN_MODE_ONE_SHOT BIT(3)
+#define GS_CAN_MODE_HW_TIMESTAMP BIT(4)
+/* GS_CAN_FEATURE_IDENTIFY BIT(5) */
+/* GS_CAN_FEATURE_USER_ID BIT(6) */
+#define GS_CAN_MODE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7)
+#define GS_CAN_MODE_FD BIT(8)
+/* GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9) */
+/* GS_CAN_FEATURE_BT_CONST_EXT BIT(10) */
struct gs_device_mode {
__le32 mode;
@@ -116,12 +135,25 @@ struct gs_identify_mode {
__le32 mode;
} __packed;
-#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
-#define GS_CAN_FEATURE_LOOP_BACK BIT(1)
-#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2)
-#define GS_CAN_FEATURE_ONE_SHOT BIT(3)
-#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4)
-#define GS_CAN_FEATURE_IDENTIFY BIT(5)
+#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
+#define GS_CAN_FEATURE_LOOP_BACK BIT(1)
+#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2)
+#define GS_CAN_FEATURE_ONE_SHOT BIT(3)
+#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4)
+#define GS_CAN_FEATURE_IDENTIFY BIT(5)
+#define GS_CAN_FEATURE_USER_ID BIT(6)
+#define GS_CAN_FEATURE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7)
+#define GS_CAN_FEATURE_FD BIT(8)
+#define GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9)
+#define GS_CAN_FEATURE_BT_CONST_EXT BIT(10)
+#define GS_CAN_FEATURE_MASK GENMASK(10, 0)
+
+/* internal quirks - keep in GS_CAN_FEATURE space for now */
+
+/* CANtact Pro original firmware:
+ * BREQ DATA_BITTIMING overlaps with GET_USER_ID
+ */
+#define GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO BIT(31)
struct gs_device_bt_const {
__le32 feature;
@@ -136,7 +168,50 @@ struct gs_device_bt_const {
__le32 brp_inc;
} __packed;
-#define GS_CAN_FLAG_OVERFLOW 1
+struct gs_device_bt_const_extended {
+ __le32 feature;
+ __le32 fclk_can;
+ __le32 tseg1_min;
+ __le32 tseg1_max;
+ __le32 tseg2_min;
+ __le32 tseg2_max;
+ __le32 sjw_max;
+ __le32 brp_min;
+ __le32 brp_max;
+ __le32 brp_inc;
+
+ __le32 dtseg1_min;
+ __le32 dtseg1_max;
+ __le32 dtseg2_min;
+ __le32 dtseg2_max;
+ __le32 dsjw_max;
+ __le32 dbrp_min;
+ __le32 dbrp_max;
+ __le32 dbrp_inc;
+} __packed;
+
+#define GS_CAN_FLAG_OVERFLOW BIT(0)
+#define GS_CAN_FLAG_FD BIT(1)
+#define GS_CAN_FLAG_BRS BIT(2)
+#define GS_CAN_FLAG_ESI BIT(3)
+
+struct classic_can {
+ u8 data[8];
+} __packed;
+
+struct classic_can_quirk {
+ u8 data[8];
+ u8 quirk;
+} __packed;
+
+struct canfd {
+ u8 data[64];
+} __packed;
+
+struct canfd_quirk {
+ u8 data[64];
+ u8 quirk;
+} __packed;
struct gs_host_frame {
u32 echo_id;
@@ -147,7 +222,12 @@ struct gs_host_frame {
u8 flags;
u8 reserved;
- u8 data[8];
+ union {
+ DECLARE_FLEX_ARRAY(struct classic_can, classic_can);
+ DECLARE_FLEX_ARRAY(struct classic_can_quirk, classic_can_quirk);
+ DECLARE_FLEX_ARRAY(struct canfd, canfd);
+ DECLARE_FLEX_ARRAY(struct canfd_quirk, canfd_quirk);
+ };
} __packed;
/* The GS USB devices make use of the same flags and masks as in
* linux/can.h and linux/can/error.h, and no additional mapping is necessary.
@@ -158,9 +238,9 @@ struct gs_host_frame {
/* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */
#define GS_MAX_RX_URBS 30
/* Maximum number of interfaces the driver supports per device.
- * Current hardware only supports 2 interfaces. The future may vary.
+ * Current hardware only supports 3 interfaces. The future may vary.
*/
-#define GS_MAX_INTF 2
+#define GS_MAX_INTF 3
struct gs_tx_context {
struct gs_can *dev;
@@ -176,9 +256,12 @@ struct gs_can {
struct usb_device *udev;
struct usb_interface *iface;
- struct can_bittiming_const bt_const;
+ struct can_bittiming_const bt_const, data_bt_const;
unsigned int channel; /* channel number */
+ u32 feature;
+ unsigned int hf_size_tx;
+
/* This lock prevents a race condition between xmit and receive. */
spinlock_t tx_ctx_lock;
struct gs_tx_context tx_context[GS_MAX_TX_URBS];
@@ -191,8 +274,9 @@ struct gs_can {
struct gs_usb {
struct gs_can *canch[GS_MAX_INTF];
struct usb_anchor rx_submitted;
- atomic_t active_channels;
struct usb_device *udev;
+ unsigned int hf_size_rx;
+ u8 active_channels;
};
/* 'allocate' a tx context.
@@ -258,11 +342,7 @@ static int gs_cmd_reset(struct gs_can *gsdev)
usb_sndctrlpipe(interface_to_usbdev(intf), 0),
GS_USB_BREQ_MODE,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- gsdev->channel,
- 0,
- dm,
- sizeof(*dm),
- 1000);
+ gsdev->channel, 0, dm, sizeof(*dm), 1000);
kfree(dm);
@@ -304,6 +384,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
struct gs_host_frame *hf = urb->transfer_buffer;
struct gs_tx_context *txc;
struct can_frame *cf;
+ struct canfd_frame *cfd;
struct sk_buff *skb;
BUG_ON(!usbcan);
@@ -332,18 +413,33 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
return;
if (hf->echo_id == -1) { /* normal rx */
- skb = alloc_can_skb(dev->netdev, &cf);
- if (!skb)
- return;
+ if (hf->flags & GS_CAN_FLAG_FD) {
+ skb = alloc_canfd_skb(dev->netdev, &cfd);
+ if (!skb)
+ return;
+
+ cfd->can_id = le32_to_cpu(hf->can_id);
+ cfd->len = can_fd_dlc2len(hf->can_dlc);
+ if (hf->flags & GS_CAN_FLAG_BRS)
+ cfd->flags |= CANFD_BRS;
+ if (hf->flags & GS_CAN_FLAG_ESI)
+ cfd->flags |= CANFD_ESI;
+
+ memcpy(cfd->data, hf->canfd->data, cfd->len);
+ } else {
+ skb = alloc_can_skb(dev->netdev, &cf);
+ if (!skb)
+ return;
- cf->can_id = le32_to_cpu(hf->can_id);
+ cf->can_id = le32_to_cpu(hf->can_id);
+ can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
- can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
- memcpy(cf->data, hf->data, 8);
+ memcpy(cf->data, hf->classic_can->data, 8);
- /* ERROR frames tell us information about the controller */
- if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
- gs_update_state(dev, cf);
+ /* ERROR frames tell us information about the controller */
+ if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
+ gs_update_state(dev, cf);
+ }
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += hf->can_dlc;
@@ -392,14 +488,10 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
}
resubmit_urb:
- usb_fill_bulk_urb(urb,
- usbcan->udev,
+ usb_fill_bulk_urb(urb, usbcan->udev,
usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
- hf,
- sizeof(struct gs_host_frame),
- gs_usb_receive_bulk_callback,
- usbcan
- );
+ hf, dev->parent->hf_size_rx,
+ gs_usb_receive_bulk_callback, usbcan);
rc = usb_submit_urb(urb, GFP_ATOMIC);
@@ -436,11 +528,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
usb_sndctrlpipe(interface_to_usbdev(intf), 0),
GS_USB_BREQ_BITTIMING,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- dev->channel,
- 0,
- dbt,
- sizeof(*dbt),
- 1000);
+ dev->channel, 0, dbt, sizeof(*dbt), 1000);
kfree(dbt);
@@ -451,6 +539,44 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
return (rc > 0) ? 0 : rc;
}
+static int gs_usb_set_data_bittiming(struct net_device *netdev)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct can_bittiming *bt = &dev->can.data_bittiming;
+ struct usb_interface *intf = dev->iface;
+ struct gs_device_bittiming *dbt;
+ u8 request = GS_USB_BREQ_DATA_BITTIMING;
+ int rc;
+
+ dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
+ if (!dbt)
+ return -ENOMEM;
+
+ dbt->prop_seg = cpu_to_le32(bt->prop_seg);
+ dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
+ dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
+ dbt->sjw = cpu_to_le32(bt->sjw);
+ dbt->brp = cpu_to_le32(bt->brp);
+
+ if (dev->feature & GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO)
+ request = GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING;
+
+ /* request bit timings */
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+ request,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, dbt, sizeof(*dbt), 1000);
+
+ kfree(dbt);
+
+ if (rc < 0)
+ dev_err(netdev->dev.parent,
+ "Couldn't set data bittimings (err=%d)", rc);
+
+ return (rc > 0) ? 0 : rc;
+}
+
static void gs_usb_xmit_callback(struct urb *urb)
{
struct gs_tx_context *txc = urb->context;
@@ -460,10 +586,8 @@ static void gs_usb_xmit_callback(struct urb *urb)
if (urb->status)
netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id);
- usb_free_coherent(urb->dev,
- urb->transfer_buffer_length,
- urb->transfer_buffer,
- urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
}
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
@@ -474,6 +598,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
struct urb *urb;
struct gs_host_frame *hf;
struct can_frame *cf;
+ struct canfd_frame *cfd;
int rc;
unsigned int idx;
struct gs_tx_context *txc;
@@ -491,7 +616,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
if (!urb)
goto nomem_urb;
- hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC,
+ hf = usb_alloc_coherent(dev->udev, dev->hf_size_tx, GFP_ATOMIC,
&urb->transfer_dma);
if (!hf) {
netdev_err(netdev, "No memory left for USB buffer\n");
@@ -510,19 +635,31 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
hf->flags = 0;
hf->reserved = 0;
- cf = (struct can_frame *)skb->data;
+ if (can_is_canfd_skb(skb)) {
+ cfd = (struct canfd_frame *)skb->data;
- hf->can_id = cpu_to_le32(cf->can_id);
- hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode);
+ hf->can_id = cpu_to_le32(cfd->can_id);
+ hf->can_dlc = can_fd_len2dlc(cfd->len);
+ hf->flags |= GS_CAN_FLAG_FD;
+ if (cfd->flags & CANFD_BRS)
+ hf->flags |= GS_CAN_FLAG_BRS;
+ if (cfd->flags & CANFD_ESI)
+ hf->flags |= GS_CAN_FLAG_ESI;
- memcpy(hf->data, cf->data, cf->len);
+ memcpy(hf->canfd->data, cfd->data, cfd->len);
+ } else {
+ cf = (struct can_frame *)skb->data;
+
+ hf->can_id = cpu_to_le32(cf->can_id);
+ hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode);
+
+ memcpy(hf->classic_can->data, cf->data, cf->len);
+ }
usb_fill_bulk_urb(urb, dev->udev,
usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
- hf,
- sizeof(*hf),
- gs_usb_xmit_callback,
- txc);
+ hf, dev->hf_size_tx,
+ gs_usb_xmit_callback, txc);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->tx_submitted);
@@ -539,10 +676,8 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
gs_free_tx_context(txc);
usb_unanchor_urb(urb);
- usb_free_coherent(dev->udev,
- sizeof(*hf),
- hf,
- urb->transfer_dma);
+ usb_free_coherent(dev->udev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
if (rc == -ENODEV) {
netif_device_detach(netdev);
@@ -562,10 +697,8 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
badidx:
- usb_free_coherent(dev->udev,
- sizeof(*hf),
- hf,
- urb->transfer_dma);
+ usb_free_coherent(dev->udev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
nomem_hf:
usb_free_urb(urb);
@@ -582,6 +715,7 @@ static int gs_can_open(struct net_device *netdev)
struct gs_usb *parent = dev->parent;
int rc, i;
struct gs_device_mode *dm;
+ struct gs_host_frame *hf;
u32 ctrlmode;
u32 flags = 0;
@@ -589,7 +723,22 @@ static int gs_can_open(struct net_device *netdev)
if (rc)
return rc;
- if (atomic_add_return(1, &parent->active_channels) == 1) {
+ ctrlmode = dev->can.ctrlmode;
+ if (ctrlmode & CAN_CTRLMODE_FD) {
+ flags |= GS_CAN_MODE_FD;
+
+ if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX)
+ dev->hf_size_tx = struct_size(hf, canfd_quirk, 1);
+ else
+ dev->hf_size_tx = struct_size(hf, canfd, 1);
+ } else {
+ if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX)
+ dev->hf_size_tx = struct_size(hf, classic_can_quirk, 1);
+ else
+ dev->hf_size_tx = struct_size(hf, classic_can, 1);
+ }
+
+ if (!parent->active_channels) {
for (i = 0; i < GS_MAX_RX_URBS; i++) {
struct urb *urb;
u8 *buf;
@@ -601,7 +750,7 @@ static int gs_can_open(struct net_device *netdev)
/* alloc rx buffer */
buf = usb_alloc_coherent(dev->udev,
- sizeof(struct gs_host_frame),
+ dev->parent->hf_size_rx,
GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
@@ -617,9 +766,8 @@ static int gs_can_open(struct net_device *netdev)
usb_rcvbulkpipe(dev->udev,
GSUSB_ENDPOINT_IN),
buf,
- sizeof(struct gs_host_frame),
- gs_usb_receive_bulk_callback,
- parent);
+ dev->parent->hf_size_rx,
+ gs_usb_receive_bulk_callback, parent);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &parent->rx_submitted);
@@ -630,8 +778,7 @@ static int gs_can_open(struct net_device *netdev)
netif_device_detach(dev->netdev);
netdev_err(netdev,
- "usb_submit failed (err=%d)\n",
- rc);
+ "usb_submit failed (err=%d)\n", rc);
usb_unanchor_urb(urb);
usb_free_urb(urb);
@@ -650,8 +797,6 @@ static int gs_can_open(struct net_device *netdev)
return -ENOMEM;
/* flags */
- ctrlmode = dev->can.ctrlmode;
-
if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
flags |= GS_CAN_MODE_LOOP_BACK;
else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
@@ -672,13 +817,8 @@ static int gs_can_open(struct net_device *netdev)
rc = usb_control_msg(interface_to_usbdev(dev->iface),
usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
GS_USB_BREQ_MODE,
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_INTERFACE,
- dev->channel,
- 0,
- dm,
- sizeof(*dm),
- 1000);
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, dm, sizeof(*dm), 1000);
if (rc < 0) {
netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
@@ -690,6 +830,7 @@ static int gs_can_open(struct net_device *netdev)
dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ parent->active_channels++;
if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(netdev);
@@ -705,7 +846,8 @@ static int gs_can_close(struct net_device *netdev)
netif_stop_queue(netdev);
/* Stop polling */
- if (atomic_dec_and_test(&parent->active_channels))
+ parent->active_channels--;
+ if (!parent->active_channels)
usb_kill_anchored_urbs(&parent->rx_submitted);
/* Stop sending URBs */
@@ -753,16 +895,10 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
rc = usb_control_msg(interface_to_usbdev(dev->iface),
- usb_sndctrlpipe(interface_to_usbdev(dev->iface),
- 0),
+ usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
GS_USB_BREQ_IDENTIFY,
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_INTERFACE,
- dev->channel,
- 0,
- imode,
- sizeof(*imode),
- 100);
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, imode, sizeof(*imode), 100);
kfree(imode);
@@ -801,6 +937,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
struct net_device *netdev;
int rc;
struct gs_device_bt_const *bt_const;
+ struct gs_device_bt_const_extended *bt_const_extended;
u32 feature;
bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
@@ -812,11 +949,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
GS_USB_BREQ_BT_CONST,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- channel,
- 0,
- bt_const,
- sizeof(*bt_const),
- 1000);
+ channel, 0, bt_const, sizeof(*bt_const), 1000);
if (rc < 0) {
dev_err(&intf->dev,
@@ -873,6 +1006,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
feature = le32_to_cpu(bt_const->feature);
+ dev->feature = FIELD_GET(GS_CAN_FEATURE_MASK, feature);
if (feature & GS_CAN_FEATURE_LISTEN_ONLY)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
@@ -885,7 +1019,37 @@ static struct gs_can *gs_make_candev(unsigned int channel,
if (feature & GS_CAN_FEATURE_ONE_SHOT)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
- SET_NETDEV_DEV(netdev, &intf->dev);
+ if (feature & GS_CAN_FEATURE_FD) {
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ /* The data bit timing will be overwritten, if
+ * GS_CAN_FEATURE_BT_CONST_EXT is set.
+ */
+ dev->can.data_bittiming_const = &dev->bt_const;
+ dev->can.do_set_data_bittiming = gs_usb_set_data_bittiming;
+ }
+
+ /* The CANtact Pro from LinkLayer Labs is based on the
+ * LPC54616 µC, which is affected by the NXP LPC USB transfer
+ * erratum. However, the current firmware (version 2) doesn't
+ * set the GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX bit. Set the
+ * feature GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX to workaround
+ * this issue.
+ *
+ * For the GS_USB_BREQ_DATA_BITTIMING USB control message the
+ * CANtact Pro firmware uses a request value, which is already
+ * used by the candleLight firmware for a different purpose
+ * (GS_USB_BREQ_GET_USER_ID). Set the feature
+ * GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO to workaround this
+ * issue.
+ */
+ if (dev->udev->descriptor.idVendor == cpu_to_le16(USB_GSUSB_1_VENDOR_ID) &&
+ dev->udev->descriptor.idProduct == cpu_to_le16(USB_GSUSB_1_PRODUCT_ID) &&
+ dev->udev->manufacturer && dev->udev->product &&
+ !strcmp(dev->udev->manufacturer, "LinkLayer Labs") &&
+ !strcmp(dev->udev->product, "CANtact Pro") &&
+ (le32_to_cpu(dconf->sw_version) <= 2))
+ dev->feature |= GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX |
+ GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO;
if (le32_to_cpu(dconf->sw_version) > 1)
if (feature & GS_CAN_FEATURE_IDENTIFY)
@@ -893,6 +1057,45 @@ static struct gs_can *gs_make_candev(unsigned int channel,
kfree(bt_const);
+ /* fetch extended bit timing constants if device has feature
+ * GS_CAN_FEATURE_FD and GS_CAN_FEATURE_BT_CONST_EXT
+ */
+ if (feature & GS_CAN_FEATURE_FD &&
+ feature & GS_CAN_FEATURE_BT_CONST_EXT) {
+ bt_const_extended = kmalloc(sizeof(*bt_const_extended), GFP_KERNEL);
+ if (!bt_const_extended)
+ return ERR_PTR(-ENOMEM);
+
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
+ GS_USB_BREQ_BT_CONST_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ channel, 0, bt_const_extended,
+ sizeof(*bt_const_extended),
+ 1000);
+ if (rc < 0) {
+ dev_err(&intf->dev,
+ "Couldn't get extended bit timing const for channel (err=%d)\n",
+ rc);
+ kfree(bt_const_extended);
+ return ERR_PTR(rc);
+ }
+
+ strcpy(dev->data_bt_const.name, "gs_usb");
+ dev->data_bt_const.tseg1_min = le32_to_cpu(bt_const_extended->dtseg1_min);
+ dev->data_bt_const.tseg1_max = le32_to_cpu(bt_const_extended->dtseg1_max);
+ dev->data_bt_const.tseg2_min = le32_to_cpu(bt_const_extended->dtseg2_min);
+ dev->data_bt_const.tseg2_max = le32_to_cpu(bt_const_extended->dtseg2_max);
+ dev->data_bt_const.sjw_max = le32_to_cpu(bt_const_extended->dsjw_max);
+ dev->data_bt_const.brp_min = le32_to_cpu(bt_const_extended->dbrp_min);
+ dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended->dbrp_max);
+ dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended->dbrp_inc);
+
+ dev->can.data_bittiming_const = &dev->data_bt_const;
+ }
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
rc = register_candev(dev->netdev);
if (rc) {
free_candev(dev->netdev);
@@ -913,6 +1116,8 @@ static void gs_destroy_candev(struct gs_can *dev)
static int gs_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct gs_host_frame *hf;
struct gs_usb *dev;
int rc = -ENOMEM;
unsigned int icount, i;
@@ -926,21 +1131,16 @@ static int gs_usb_probe(struct usb_interface *intf,
hconf->byte_order = cpu_to_le32(0x0000beef);
/* send host config */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+ rc = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
GS_USB_BREQ_HOST_FORMAT,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- 1,
- intf->cur_altsetting->desc.bInterfaceNumber,
- hconf,
- sizeof(*hconf),
- 1000);
+ 1, intf->cur_altsetting->desc.bInterfaceNumber,
+ hconf, sizeof(*hconf), 1000);
kfree(hconf);
if (rc < 0) {
- dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
- rc);
+ dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc);
return rc;
}
@@ -949,15 +1149,11 @@ static int gs_usb_probe(struct usb_interface *intf,
return -ENOMEM;
/* read device config */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
+ rc = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
GS_USB_BREQ_DEVICE_CONFIG,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- 1,
- intf->cur_altsetting->desc.bInterfaceNumber,
- dconf,
- sizeof(*dconf),
- 1000);
+ 1, intf->cur_altsetting->desc.bInterfaceNumber,
+ dconf, sizeof(*dconf), 1000);
if (rc < 0) {
dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
rc);
@@ -983,11 +1179,13 @@ static int gs_usb_probe(struct usb_interface *intf,
}
init_usb_anchor(&dev->rx_submitted);
-
- atomic_set(&dev->active_channels, 0);
+ /* default to classic CAN, switch to CAN-FD if at least one of
+ * our channels support CAN-FD.
+ */
+ dev->hf_size_rx = struct_size(hf, classic_can, 1);
usb_set_intfdata(intf, dev);
- dev->udev = interface_to_usbdev(intf);
+ dev->udev = udev;
for (i = 0; i < icount; i++) {
dev->canch[i] = gs_make_candev(i, intf, dconf);
@@ -1006,6 +1204,9 @@ static int gs_usb_probe(struct usb_interface *intf,
return rc;
}
dev->canch[i]->parent = dev;
+
+ if (dev->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD)
+ dev->hf_size_rx = struct_size(hf, canfd, 1);
}
kfree(dconf);
@@ -1015,8 +1216,9 @@ static int gs_usb_probe(struct usb_interface *intf,
static void gs_usb_disconnect(struct usb_interface *intf)
{
- unsigned i;
struct gs_usb *dev = usb_get_intfdata(intf);
+ unsigned int i;
+
usb_set_intfdata(intf, NULL);
if (!dev) {
@@ -1037,16 +1239,20 @@ static const struct usb_device_id gs_usb_table[] = {
USB_GSUSB_1_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
USB_CANDLELIGHT_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_CES_CANEXT_FD_VENDOR_ID,
+ USB_CES_CANEXT_FD_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_ABE_CANDEBUGGER_FD_VENDOR_ID,
+ USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) },
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, gs_usb_table);
static struct usb_driver gs_usb_driver = {
- .name = "gs_usb",
- .probe = gs_usb_probe,
+ .name = "gs_usb",
+ .probe = gs_usb_probe,
.disconnect = gs_usb_disconnect,
- .id_table = gs_usb_table,
+ .id_table = gs_usb_table,
};
module_usb_driver(gs_usb_driver);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index c4b4d3d0a387..e67658b53d02 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -205,12 +205,10 @@ MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len)
{
- int actual_len; /* Not used */
-
return usb_bulk_msg(dev->udev,
usb_sndbulkpipe(dev->udev,
dev->bulk_out->bEndpointAddress),
- cmd, len, &actual_len, KVASER_USB_TIMEOUT);
+ cmd, len, NULL, KVASER_USB_TIMEOUT);
}
int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index c7c41d1fd038..5ae0d7c017cc 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -1392,7 +1392,7 @@ static int ucan_probe(struct usb_interface *intf,
* Stage 3 for the final driver initialisation.
*/
- /* Prepare Memory for control transferes */
+ /* Prepare Memory for control transfers */
ctl_msg_buffer = devm_kzalloc(&udev->dev,
sizeof(union ucan_ctl_payload),
GFP_KERNEL);
@@ -1526,7 +1526,7 @@ static int ucan_probe(struct usb_interface *intf,
ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
sizeof(union ucan_ctl_payload));
if (ret > 0) {
- /* copy string while ensuring zero terminiation */
+ /* copy string while ensuring zero termination */
strncpy(firmware_str, up->ctl_msg_buffer->raw,
sizeof(union ucan_ctl_payload));
firmware_str[sizeof(union ucan_ctl_payload)] = '\0';
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index c42f18845b02..a15619d883ec 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -80,7 +80,7 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
skb->dev = dev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 47ccc15a3486..577a80300514 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -33,28 +33,33 @@ struct vxcan_priv {
struct net_device __rcu *peer;
};
-static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
{
struct vxcan_priv *priv = netdev_priv(dev);
struct net_device *peer;
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ struct canfd_frame *cfd = (struct canfd_frame *)oskb->data;
struct net_device_stats *peerstats, *srcstats = &dev->stats;
+ struct sk_buff *skb;
u8 len;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dropped_invalid_skb(dev, oskb))
return NETDEV_TX_OK;
rcu_read_lock();
peer = rcu_dereference(priv->peer);
if (unlikely(!peer)) {
- kfree_skb(skb);
+ kfree_skb(oskb);
dev->stats.tx_dropped++;
goto out_unlock;
}
- skb = can_create_echo_skb(skb);
- if (!skb)
+ skb = skb_clone(oskb, GFP_ATOMIC);
+ if (skb) {
+ consume_skb(oskb);
+ } else {
+ kfree_skb(oskb);
goto out_unlock;
+ }
/* reset CAN GW hop counter */
skb->csum_start = 0;
@@ -63,7 +68,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
skb->ip_summed = CHECKSUM_UNNECESSARY;
len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len;
- if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
+ if (netif_rx(skb) == NET_RX_SUCCESS) {
srcstats->tx_packets++;
srcstats->tx_bytes += len;
peerstats = &peer->stats;
@@ -148,7 +153,7 @@ static void vxcan_setup(struct net_device *dev)
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
- dev->flags = (IFF_NOARP|IFF_ECHO);
+ dev->flags = IFF_NOARP;
dev->netdev_ops = &vxcan_netdev_ops;
dev->needs_free_netdev = true;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 1674b561c9a2..e562c5ab1149 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -1215,10 +1215,11 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
}
if (work_done < quota) {
- napi_complete_done(napi, work_done);
- ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier |= xcan_rx_int_mask(priv);
- priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+ if (napi_complete_done(napi, work_done)) {
+ ier = priv->read_reg(priv, XCAN_IER_OFFSET);
+ ier |= xcan_rx_int_mask(priv);
+ priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+ }
}
return work_done;
}
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 0029d279616f..37a3dabdce31 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -68,17 +68,7 @@ config NET_DSA_QCA8K
This enables support for the Qualcomm Atheros QCA8K Ethernet
switch chips.
-config NET_DSA_REALTEK_SMI
- tristate "Realtek SMI Ethernet switch family support"
- select NET_DSA_TAG_RTL4_A
- select NET_DSA_TAG_RTL8_4
- select FIXED_PHY
- select IRQ_DOMAIN
- select REALTEK_PHY
- select REGMAP
- help
- This enables support for the Realtek SMI-based switch
- chips, currently only RTL8366RB.
+source "drivers/net/dsa/realtek/Kconfig"
config NET_DSA_SMSC_LAN9303
tristate
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 8da1569a34e6..e73838c12256 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -9,8 +9,6 @@ obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
-obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
-realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o rtl8365mb.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
@@ -23,5 +21,6 @@ obj-y += microchip/
obj-y += mv88e6xxx/
obj-y += ocelot/
obj-y += qca/
+obj-y += realtek/
obj-y += sja1105/
obj-y += xrs700x/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 3867f3d4545f..77501f9c5915 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1309,46 +1309,50 @@ void b53_port_event(struct dsa_switch *ds, int port)
}
EXPORT_SYMBOL(b53_port_event);
-void b53_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
struct b53_device *dev = ds->priv;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- if (dev->ops->serdes_phylink_validate)
- dev->ops->serdes_phylink_validate(dev, port, mask, state);
+ /* Internal ports need GMII for PHYLIB */
+ __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces);
+
+ /* These switches appear to support MII and RevMII too, but beyond
+ * this, the code gives very few clues. FIXME: We probably need more
+ * interface modes here.
+ *
+ * According to b53_srab_mux_init(), ports 3..5 can support:
+ * SGMII, MII, GMII, RGMII or INTERNAL depending on the MUX setting.
+ * However, the interface mode read from the MUX configuration is
+ * not passed back to DSA, so phylink uses NA.
+ * DT can specify RGMII for ports 0, 1.
+ * For MDIO, port 8 can be RGMII_TXID.
+ */
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces);
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
- /* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we
- * support Gigabit, including Half duplex.
+ /* 5325/5365 are not capable of gigabit speeds, everything else is.
+ * Note: the original code also exclulded Gigagbit for MII, RevMII
+ * and 802.3z modes. MII and RevMII are not able to work above 100M,
+ * so will be excluded by the generic validator implementation.
+ * However, the exclusion of Gigabit for 802.3z just seems wrong.
*/
- if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII &&
- !phy_interface_mode_is_8023z(state->interface) &&
- !(is5325(dev) || is5365(dev))) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- }
+ if (!(is5325(dev) || is5365(dev)))
+ config->mac_capabilities |= MAC_1000;
- if (!phy_interface_mode_is_8023z(state->interface)) {
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- }
+ /* Get the implementation specific capabilities */
+ if (dev->ops->phylink_get_caps)
+ dev->ops->phylink_get_caps(dev, port, config);
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- phylink_helper_basex_speed(state);
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
}
-EXPORT_SYMBOL(b53_phylink_validate);
int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
struct phylink_link_state *state)
@@ -1704,7 +1708,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
}
int b53_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1724,7 +1729,8 @@ int b53_fdb_add(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_fdb_add);
int b53_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1825,7 +1831,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_fdb_dump);
int b53_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1845,7 +1852,8 @@ int b53_mdb_add(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_mdb_add);
int b53_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1861,7 +1869,7 @@ int b53_mdb_del(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_mdb_del);
int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload, struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
@@ -2102,7 +2110,8 @@ out:
EXPORT_SYMBOL(b53_get_tag_protocol);
int b53_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
+ struct dsa_mall_mirror_tc_entry *mirror, bool ingress,
+ struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
u16 reg, loc;
@@ -2186,7 +2195,7 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
{
int ret;
- ret = phy_init_eee(phy, 0);
+ ret = phy_init_eee(phy, false);
if (ret)
return 0;
@@ -2259,7 +2268,7 @@ static const struct dsa_switch_ops b53_switch_ops = {
.phy_read = b53_phy_read16,
.phy_write = b53_phy_write16,
.adjust_link = b53_adjust_link,
- .phylink_validate = b53_phylink_validate,
+ .phylink_get_caps = b53_phylink_get_caps,
.phylink_mac_link_state = b53_phylink_mac_link_state,
.phylink_mac_config = b53_phylink_mac_config,
.phylink_mac_an_restart = b53_phylink_mac_an_restart,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index b41dc8ac2ca8..3085b6cc7d40 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -46,6 +46,8 @@ struct b53_io_ops {
int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value);
int (*irq_enable)(struct b53_device *dev, int port);
void (*irq_disable)(struct b53_device *dev, int port);
+ void (*phylink_get_caps)(struct b53_device *dev, int port,
+ struct phylink_config *config);
u8 (*serdes_map_lane)(struct b53_device *dev, int port);
int (*serdes_link_state)(struct b53_device *dev, int port,
struct phylink_link_state *state);
@@ -56,9 +58,6 @@ struct b53_io_ops {
void (*serdes_link_set)(struct b53_device *dev, int port,
unsigned int mode, phy_interface_t interface,
bool link_up);
- void (*serdes_phylink_validate)(struct b53_device *dev, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
};
#define B53_INVALID_LANE 0xff
@@ -325,7 +324,7 @@ void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
int b53_get_sset_count(struct dsa_switch *ds, int port, int sset);
void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data);
int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
- bool *tx_fwd_offload);
+ bool *tx_fwd_offload, struct netlink_ext_ack *extack);
void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge);
void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
void b53_br_fast_age(struct dsa_switch *ds, int port);
@@ -337,9 +336,6 @@ int b53_br_flags(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack);
int b53_setup_devlink_resources(struct dsa_switch *ds);
void b53_port_event(struct dsa_switch *ds, int port);
-void b53_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
struct phylink_link_state *state);
void b53_phylink_mac_config(struct dsa_switch *ds, int port,
@@ -363,17 +359,22 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
int b53_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
int b53_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
int b53_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data);
int b53_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
int b53_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
int b53_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
+ struct dsa_mall_mirror_tc_entry *mirror, bool ingress,
+ struct netlink_ext_ack *extack);
enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mprot);
void b53_mirror_del(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c
index 5ae3d9783b68..555e5b372321 100644
--- a/drivers/net/dsa/b53/b53_serdes.c
+++ b/drivers/net/dsa/b53/b53_serdes.c
@@ -158,9 +158,8 @@ void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
}
EXPORT_SYMBOL(b53_serdes_link_set);
-void b53_serdes_phylink_validate(struct b53_device *dev, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+void b53_serdes_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config)
{
u8 lane = b53_serdes_map_lane(dev, port);
@@ -169,16 +168,24 @@ void b53_serdes_phylink_validate(struct b53_device *dev, int port,
switch (lane) {
case 0:
- phylink_set(supported, 2500baseX_Full);
+ /* It appears lane 0 supports 2500base-X and 1000base-X */
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_2500FD;
fallthrough;
case 1:
- phylink_set(supported, 1000baseX_Full);
+ /* It appears lane 1 only supports 1000base-X and SGMII */
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_1000FD;
break;
default:
break;
}
}
-EXPORT_SYMBOL(b53_serdes_phylink_validate);
+EXPORT_SYMBOL(b53_serdes_phylink_get_caps);
int b53_serdes_init(struct b53_device *dev, int port)
{
diff --git a/drivers/net/dsa/b53/b53_serdes.h b/drivers/net/dsa/b53/b53_serdes.h
index 55d280fe38e4..f47d5caa7557 100644
--- a/drivers/net/dsa/b53/b53_serdes.h
+++ b/drivers/net/dsa/b53/b53_serdes.h
@@ -115,9 +115,8 @@ void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode,
void b53_serdes_an_restart(struct b53_device *dev, int port);
void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
phy_interface_t interface, bool link_up);
-void b53_serdes_phylink_validate(struct b53_device *dev, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
+void b53_serdes_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config);
#if IS_ENABLED(CONFIG_B53_SERDES)
int b53_serdes_init(struct b53_device *dev, int port);
#else
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
index 2b88f03e5252..0e54b2a0c211 100644
--- a/drivers/net/dsa/b53/b53_spi.c
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -314,7 +314,7 @@ static int b53_spi_probe(struct spi_device *spi)
return 0;
}
-static int b53_spi_remove(struct spi_device *spi)
+static void b53_spi_remove(struct spi_device *spi)
{
struct b53_device *dev = spi_get_drvdata(spi);
@@ -322,8 +322,6 @@ static int b53_spi_remove(struct spi_device *spi)
b53_switch_remove(dev);
spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void b53_spi_shutdown(struct spi_device *spi)
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 4591bb1c05d2..c51b716657db 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -443,6 +443,39 @@ static void b53_srab_irq_disable(struct b53_device *dev, int port)
}
}
+static void b53_srab_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ struct b53_srab_port_priv *p = &priv->port_intrs[port];
+
+ switch (p->mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+#if IS_ENABLED(CONFIG_B53_SERDES)
+ /* If p->mode indicates SGMII mode, that essentially means we
+ * are using a serdes. As the serdes for the capabilities.
+ */
+ b53_serdes_phylink_get_caps(dev, port, config);
+#endif
+ break;
+
+ case PHY_INTERFACE_MODE_NA:
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ /* If we support RGMII, support all RGMII modes, since
+ * that dictates the PHY delay settings.
+ */
+ phy_interface_set_rgmii(config->supported_interfaces);
+ break;
+
+ default:
+ /* Some other mode (e.g. MII, GMII etc) */
+ __set_bit(p->mode, config->supported_interfaces);
+ break;
+ }
+}
+
static const struct b53_io_ops b53_srab_ops = {
.read8 = b53_srab_read8,
.read16 = b53_srab_read16,
@@ -456,13 +489,13 @@ static const struct b53_io_ops b53_srab_ops = {
.write64 = b53_srab_write64,
.irq_enable = b53_srab_irq_enable,
.irq_disable = b53_srab_irq_disable,
+ .phylink_get_caps = b53_srab_phylink_get_caps,
#if IS_ENABLED(CONFIG_B53_SERDES)
.serdes_map_lane = b53_srab_serdes_map_lane,
.serdes_link_state = b53_serdes_link_state,
.serdes_config = b53_serdes_config,
.serdes_an_restart = b53_serdes_an_restart,
.serdes_link_set = b53_serdes_link_set,
- .serdes_phylink_validate = b53_serdes_phylink_validate,
#endif
};
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 6afb5db8244c..cf82b1fa9725 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -712,49 +712,25 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
PHY_BRCM_IDDQ_SUSPEND;
}
-static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void bcm_sf2_sw_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
+ unsigned long *interfaces = config->supported_interfaces;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_MOCA) {
- linkmode_zero(supported);
- if (port != core_readl(priv, CORE_IMP0_PRT_ID))
- dev_err(ds->dev,
- "Unsupported interface: %d for port %d\n",
- state->interface, port);
- return;
- }
-
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- /* With the exclusion of MII and Reverse MII, we support Gigabit,
- * including Half duplex
- */
- if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
+ if (priv->int_phy_mask & BIT(port)) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
+ } else if (priv->moca_port == port) {
+ __set_bit(PHY_INTERFACE_MODE_MOCA, interfaces);
+ } else {
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
+ phy_interface_set_rgmii(interfaces);
}
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
}
static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
@@ -1221,7 +1197,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.get_sset_count = bcm_sf2_sw_get_sset_count,
.get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
.get_phy_flags = bcm_sf2_sw_get_phy_flags,
- .phylink_validate = bcm_sf2_sw_validate,
+ .phylink_get_caps = bcm_sf2_sw_get_caps,
.phylink_mac_config = bcm_sf2_sw_mac_config,
.phylink_mac_link_down = bcm_sf2_sw_mac_link_down,
.phylink_mac_link_up = bcm_sf2_sw_mac_link_up,
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index 33daaf10c488..263e41191c29 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -168,7 +168,8 @@ static int dsa_loop_phy_write(struct dsa_switch *ds, int port,
static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
__func__, port, bridge.dev->name);
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 726f267cb228..ac1f3b3a7040 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -675,7 +675,8 @@ static int hellcreek_bridge_flags(struct dsa_switch *ds, int port,
static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
@@ -827,7 +828,8 @@ static int hellcreek_fdb_get(struct hellcreek *hellcreek,
}
static int hellcreek_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct hellcreek_fdb_entry entry = { 0 };
struct hellcreek *hellcreek = ds->priv;
@@ -872,7 +874,8 @@ out:
}
static int hellcreek_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct hellcreek_fdb_entry entry = { 0 };
struct hellcreek *hellcreek = ds->priv;
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
index b3bc948d6145..ffd06cf8c44f 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
@@ -331,7 +331,7 @@ static void hellcreek_get_rxts(struct hellcreek *hellcreek,
shwt = skb_hwtstamps(skb);
memset(shwt, 0, sizeof(*shwt));
shwt->hwtstamp = ns_to_ktime(ns);
- netif_rx_ni(skb);
+ netif_rx(skb);
}
}
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index 3969d89fa4db..e03ff1f267bb 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1111,7 +1111,8 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port)
static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct lan9303 *chip = ds->priv;
@@ -1188,7 +1189,8 @@ static void lan9303_port_fast_age(struct dsa_switch *ds, int port)
}
static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
@@ -1200,8 +1202,8 @@ static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
}
static int lan9303_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
-
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
@@ -1245,7 +1247,8 @@ static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
}
static int lan9303_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
int err;
@@ -1260,7 +1263,8 @@ static int lan9303_port_mdb_add(struct dsa_switch *ds, int port,
}
static int lan9303_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 8a7a8093a156..a416240d001b 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -213,6 +213,7 @@
#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */
#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
/* Ethernet Switch Fetch DMA Port Control Register */
@@ -239,6 +240,15 @@
#define XRX200_GPHY_FW_ALIGN (16 * 1024)
+/* Maximum packet size supported by the switch. In theory this should be 10240,
+ * but long packets currently cause lock-ups with an MTU of over 2526. Medium
+ * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
+ * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
+ * packet reception. This is probably caused by the PPA engine, which is on the
+ * RX part of the device. Packet transmission works properly up to 10240.
+ */
+#define GSWIP_MAX_PACKET_LENGTH 2400
+
struct gswip_hw_info {
int max_ports;
int cpu_port;
@@ -863,10 +873,6 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
GSWIP_PCE_PCTRL_0p(cpu_port));
- gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
- GSWIP_MAC_CTRL_2p(cpu_port));
- gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN,
- GSWIP_MAC_FLEN);
gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
GSWIP_BM_QUEUE_GCTRL);
@@ -883,6 +889,8 @@ static int gswip_setup(struct dsa_switch *ds)
return err;
}
+ ds->mtu_enforcement_ingress = true;
+
gswip_port_enable(ds, cpu_port, NULL);
ds->configure_vlan_while_not_filtering = false;
@@ -1152,7 +1160,8 @@ static int gswip_vlan_remove(struct gswip_priv *priv,
static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct net_device *br = bridge.dev;
struct gswip_priv *priv = ds->priv;
@@ -1389,13 +1398,15 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port,
}
static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
return gswip_port_fdb(ds, port, addr, vid, true);
}
static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
return gswip_port_fdb(ds, port, addr, vid, false);
}
@@ -1446,6 +1457,39 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
return 0;
}
+static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ /* Includes 8 bytes for special header. */
+ return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
+static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct gswip_priv *priv = ds->priv;
+ int cpu_port = priv->hw_info->cpu_port;
+
+ /* CPU port always has maximum mtu of user ports, so use it to set
+ * switch frame size, including 8 byte special header.
+ */
+ if (port == cpu_port) {
+ new_mtu += 8;
+ gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN,
+ GSWIP_MAC_FLEN);
+ }
+
+ /* Enable MLEN for ports with non-standard MTUs, including the special
+ * header on the CPU port added above.
+ */
+ if (new_mtu != ETH_DATA_LEN)
+ gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
+ GSWIP_MAC_CTRL_2p(port));
+ else
+ gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0,
+ GSWIP_MAC_CTRL_2p(port));
+
+ return 0;
+}
+
static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
@@ -1791,6 +1835,8 @@ static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
.port_fdb_add = gswip_port_fdb_add,
.port_fdb_del = gswip_port_fdb_del,
.port_fdb_dump = gswip_port_fdb_dump,
+ .port_change_mtu = gswip_port_change_mtu,
+ .port_max_mtu = gswip_port_max_mtu,
.phylink_get_caps = gswip_xrx200_phylink_get_caps,
.phylink_mac_config = gswip_phylink_mac_config,
.phylink_mac_link_down = gswip_phylink_mac_link_down,
@@ -1815,6 +1861,8 @@ static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
.port_fdb_add = gswip_port_fdb_add,
.port_fdb_del = gswip_port_fdb_del,
.port_fdb_dump = gswip_port_fdb_dump,
+ .port_change_mtu = gswip_port_change_mtu,
+ .port_max_mtu = gswip_port_max_mtu,
.phylink_get_caps = gswip_xrx300_phylink_get_caps,
.phylink_mac_config = gswip_phylink_mac_config,
.phylink_mac_link_down = gswip_phylink_mac_link_down,
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index 9d611895d3cf..03da369675c6 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -16,6 +16,7 @@ enum ksz_regs {
REG_IND_DATA_HI,
REG_IND_DATA_LO,
REG_IND_MIB_CHECK,
+ REG_IND_BYTE,
P_FORCE_CTRL,
P_LINK_STATUS,
P_LOCAL_CTRL,
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 991b9c6b6ce7..b2752978cb09 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -33,6 +33,7 @@ static const u8 ksz8795_regs[] = {
[REG_IND_DATA_HI] = 0x71,
[REG_IND_DATA_LO] = 0x75,
[REG_IND_MIB_CHECK] = 0x74,
+ [REG_IND_BYTE] = 0xA0,
[P_FORCE_CTRL] = 0x0C,
[P_LINK_STATUS] = 0x0E,
[P_LOCAL_CTRL] = 0x07,
@@ -222,6 +223,25 @@ static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
bits, set ? bits : 0);
}
+static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
+{
+ struct ksz8 *ksz8 = dev->priv;
+ const u8 *regs = ksz8->regs;
+ u16 ctrl_addr;
+ int ret = 0;
+
+ mutex_lock(&dev->alu_mutex);
+
+ ctrl_addr = IND_ACC_TABLE(table) | addr;
+ ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
+ if (!ret)
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
static int ksz8_reset_switch(struct ksz_device *dev)
{
if (ksz_is_ksz88x3(dev)) {
@@ -1213,7 +1233,7 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
static int ksz8_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress, struct netlink_ext_ack *extack)
{
struct ksz_device *dev = ds->priv;
@@ -1391,6 +1411,23 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds)
}
}
+static int ksz8_handle_global_errata(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret = 0;
+
+ /* KSZ87xx Errata DS80000687C.
+ * Module 2: Link drops with some EEE link partners.
+ * An issue with the EEE next page exchange between the
+ * KSZ879x/KSZ877x/KSZ876x and some EEE link partners may result in
+ * the link dropping.
+ */
+ if (dev->ksz87xx_eee_link_erratum)
+ ret = ksz8_ind_write8(dev, TABLE_EEE, REG_IND_EEE_GLOB2_HI, 0);
+
+ return ret;
+}
+
static int ksz8_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
@@ -1458,30 +1495,25 @@ static int ksz8_setup(struct dsa_switch *ds)
ds->configure_vlan_while_not_filtering = false;
- return 0;
+ return ksz8_handle_global_errata(ds);
}
-static void ksz8_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void ksz8_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct ksz_device *dev = ds->priv;
if (port == dev->cpu_port) {
- if (state->interface != PHY_INTERFACE_MODE_RMII &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_NA)
- goto unsupported;
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
} else {
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_NA)
- goto unsupported;
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
}
- /* Allow all the expected bits */
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
+ config->mac_capabilities = MAC_10 | MAC_100;
/* Silicon Errata Sheet (DS80000830A):
* "Port 1 does not respond to received flow control PAUSE frames"
@@ -1489,27 +1521,11 @@ static void ksz8_validate(struct dsa_switch *ds, int port,
* switches.
*/
if (!ksz_is_ksz88x3(dev) || port)
- phylink_set(mask, Pause);
+ config->mac_capabilities |= MAC_SYM_PAUSE;
/* Asym pause is not supported on KSZ8863 and KSZ8873 */
if (!ksz_is_ksz88x3(dev))
- phylink_set(mask, Asym_Pause);
-
- /* 10M and 100M are only supported */
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-
-unsupported:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported interface: %s, port: %d\n",
- phy_modes(state->interface), port);
+ config->mac_capabilities |= MAC_ASYM_PAUSE;
}
static const struct dsa_switch_ops ksz8_switch_ops = {
@@ -1518,7 +1534,7 @@ static const struct dsa_switch_ops ksz8_switch_ops = {
.setup = ksz8_setup,
.phy_read = ksz_phy_read16,
.phy_write = ksz_phy_write16,
- .phylink_validate = ksz8_validate,
+ .phylink_get_caps = ksz8_get_caps,
.phylink_mac_link_down = ksz_mac_link_down,
.port_enable = ksz_enable_port,
.get_strings = ksz8_get_strings,
@@ -1596,6 +1612,7 @@ struct ksz_chip_data {
int num_statics;
int cpu_ports;
int port_cnt;
+ bool ksz87xx_eee_link_erratum;
};
static const struct ksz_chip_data ksz8_switch_chips[] = {
@@ -1607,6 +1624,7 @@ static const struct ksz_chip_data ksz8_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
+ .ksz87xx_eee_link_erratum = true,
},
{
/*
@@ -1630,6 +1648,7 @@ static const struct ksz_chip_data ksz8_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 4, /* total cpu and user ports */
+ .ksz87xx_eee_link_erratum = true,
},
{
.chip_id = 0x8765,
@@ -1639,6 +1658,7 @@ static const struct ksz_chip_data ksz8_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
+ .ksz87xx_eee_link_erratum = true,
},
{
.chip_id = 0x8830,
@@ -1673,6 +1693,8 @@ static int ksz8_switch_init(struct ksz_device *dev)
dev->host_mask = chip->cpu_ports;
dev->port_mask = (BIT(dev->phy_port_cnt) - 1) |
chip->cpu_ports;
+ dev->ksz87xx_eee_link_erratum =
+ chip->ksz87xx_eee_link_erratum;
break;
}
}
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index 6b40bc25f7ff..d74defcd86b4 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -812,6 +812,10 @@
#define IND_ACC_TABLE(table) ((table) << 8)
+/* */
+#define REG_IND_EEE_GLOB2_LO 0x34
+#define REG_IND_EEE_GLOB2_HI 0x35
+
/* Driver set switch broadcast storm protection at 10% rate. */
#define BROADCAST_STORM_PROT_RATE 10
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c
index 866767b70d65..5f8d94aee774 100644
--- a/drivers/net/dsa/microchip/ksz8795_spi.c
+++ b/drivers/net/dsa/microchip/ksz8795_spi.c
@@ -87,7 +87,7 @@ static int ksz8795_spi_probe(struct spi_device *spi)
return 0;
}
-static int ksz8795_spi_remove(struct spi_device *spi)
+static void ksz8795_spi_remove(struct spi_device *spi)
{
struct ksz_device *dev = spi_get_drvdata(spi);
@@ -95,8 +95,6 @@ static int ksz8795_spi_remove(struct spi_device *spi)
ksz_switch_remove(dev);
spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void ksz8795_spi_shutdown(struct spi_device *spi)
@@ -124,12 +122,23 @@ static const struct of_device_id ksz8795_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, ksz8795_dt_ids);
+static const struct spi_device_id ksz8795_spi_ids[] = {
+ { "ksz8765" },
+ { "ksz8794" },
+ { "ksz8795" },
+ { "ksz8863" },
+ { "ksz8873" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, ksz8795_spi_ids);
+
static struct spi_driver ksz8795_spi_driver = {
.driver = {
.name = "ksz8795-switch",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(ksz8795_dt_ids),
},
+ .id_table = ksz8795_spi_ids,
.probe = ksz8795_spi_probe,
.remove = ksz8795_spi_remove,
.shutdown = ksz8795_spi_shutdown,
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 353b5f981740..8222c8a6c5ec 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -11,6 +11,7 @@
#include <linux/platform_data/microchip-ksz.h>
#include <linux/phy.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <net/dsa.h>
#include <net/switchdev.h>
@@ -64,6 +65,100 @@ static const struct {
{ 0x83, "tx_discards" },
};
+struct ksz9477_stats_raw {
+ u64 rx_hi;
+ u64 rx_undersize;
+ u64 rx_fragments;
+ u64 rx_oversize;
+ u64 rx_jabbers;
+ u64 rx_symbol_err;
+ u64 rx_crc_err;
+ u64 rx_align_err;
+ u64 rx_mac_ctrl;
+ u64 rx_pause;
+ u64 rx_bcast;
+ u64 rx_mcast;
+ u64 rx_ucast;
+ u64 rx_64_or_less;
+ u64 rx_65_127;
+ u64 rx_128_255;
+ u64 rx_256_511;
+ u64 rx_512_1023;
+ u64 rx_1024_1522;
+ u64 rx_1523_2000;
+ u64 rx_2001;
+ u64 tx_hi;
+ u64 tx_late_col;
+ u64 tx_pause;
+ u64 tx_bcast;
+ u64 tx_mcast;
+ u64 tx_ucast;
+ u64 tx_deferred;
+ u64 tx_total_col;
+ u64 tx_exc_col;
+ u64 tx_single_col;
+ u64 tx_mult_col;
+ u64 rx_total;
+ u64 tx_total;
+ u64 rx_discards;
+ u64 tx_discards;
+};
+
+static void ksz9477_r_mib_stats64(struct ksz_device *dev, int port)
+{
+ struct rtnl_link_stats64 *stats;
+ struct ksz9477_stats_raw *raw;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+ stats = &mib->stats64;
+ raw = (struct ksz9477_stats_raw *)mib->counters;
+
+ spin_lock(&mib->stats64_lock);
+
+ stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast;
+ stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast;
+
+ /* HW counters are counting bytes + FCS which is not acceptable
+ * for rtnl_link_stats64 interface
+ */
+ stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN;
+ stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN;
+
+ stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments +
+ raw->rx_oversize;
+
+ stats->rx_crc_errors = raw->rx_crc_err;
+ stats->rx_frame_errors = raw->rx_align_err;
+ stats->rx_dropped = raw->rx_discards;
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_dropped;
+
+ stats->tx_window_errors = raw->tx_late_col;
+ stats->tx_fifo_errors = raw->tx_discards;
+ stats->tx_aborted_errors = raw->tx_exc_col;
+ stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors +
+ stats->tx_aborted_errors;
+
+ stats->multicast = raw->rx_mcast;
+ stats->collisions = raw->tx_total_col;
+
+ spin_unlock(&mib->stats64_lock);
+}
+
+static void ksz9477_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+
+ spin_lock(&mib->stats64_lock);
+ memcpy(s, &mib->stats64, sizeof(*s));
+ spin_unlock(&mib->stats64_lock);
+}
+
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
@@ -88,6 +183,29 @@ static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
bits, set ? bits : 0);
}
+static int ksz9477_change_mtu(struct dsa_switch *ds, int port, int mtu)
+{
+ struct ksz_device *dev = ds->priv;
+ u16 frame_size, max_frame = 0;
+ int i;
+
+ frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ /* Cache the per-port MTU setting */
+ dev->ports[port].max_frame = frame_size;
+
+ for (i = 0; i < dev->port_cnt; i++)
+ max_frame = max(max_frame, dev->ports[i].max_frame);
+
+ return regmap_update_bits(dev->regmap[1], REG_SW_MTU__2,
+ REG_SW_MTU_MASK, max_frame);
+}
+
+static int ksz9477_max_mtu(struct dsa_switch *ds, int port)
+{
+ return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
{
unsigned int val;
@@ -222,9 +340,12 @@ static int ksz9477_reset_switch(struct ksz_device *dev)
(BROADCAST_STORM_VALUE *
BROADCAST_STORM_PROT_RATE) / 100);
- if (dev->synclko_125)
- ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
- SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ);
+ data8 = SW_ENABLE_REFCLKO;
+ if (dev->synclko_disable)
+ data8 = 0;
+ else if (dev->synclko_125)
+ data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ;
+ ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8);
return 0;
}
@@ -543,7 +664,8 @@ static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
}
static int ksz9477_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
u32 alu_table[4];
@@ -600,7 +722,8 @@ exit:
}
static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
u32 alu_table[4];
@@ -742,7 +865,8 @@ exit:
}
static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
u32 static_table[4];
@@ -817,7 +941,8 @@ exit:
}
static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
u32 static_table[4];
@@ -893,7 +1018,7 @@ exit:
static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress, struct netlink_ext_ack *extack)
{
struct ksz_device *dev = ds->priv;
@@ -1315,8 +1440,14 @@ static int ksz9477_setup(struct dsa_switch *ds)
/* Do not work correctly with tail tagging. */
ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
- /* accept packet up to 2000bytes */
- ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true);
+ /* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
+
+ /* Now we can configure default MTU value */
+ ret = regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, REG_SW_MTU_MASK,
+ VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
+ if (ret)
+ return ret;
ksz9477_config_cpu_port(ds);
@@ -1362,6 +1493,9 @@ static const struct dsa_switch_ops ksz9477_switch_ops = {
.port_mdb_del = ksz9477_port_mdb_del,
.port_mirror_add = ksz9477_port_mirror_add,
.port_mirror_del = ksz9477_port_mirror_del,
+ .get_stats64 = ksz9477_get_stats64,
+ .port_change_mtu = ksz9477_change_mtu,
+ .port_max_mtu = ksz9477_max_mtu,
};
static u32 ksz9477_get_port_addr(int port, int offset)
@@ -1521,6 +1655,7 @@ static int ksz9477_switch_init(struct ksz_device *dev)
if (!dev->ports)
return -ENOMEM;
for (i = 0; i < dev->port_cnt; i++) {
+ spin_lock_init(&dev->ports[i].mib.stats64_lock);
mutex_init(&dev->ports[i].mib.cnt_mutex);
dev->ports[i].mib.counters =
devm_kzalloc(dev->dev,
@@ -1549,6 +1684,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.port_setup = ksz9477_port_setup,
.r_mib_cnt = ksz9477_r_mib_cnt,
.r_mib_pkt = ksz9477_r_mib_pkt,
+ .r_mib_stat64 = ksz9477_r_mib_stats64,
.freeze_mib = ksz9477_freeze_mib,
.port_init_cnt = ksz9477_port_init_cnt,
.shutdown = ksz9477_reset_switch,
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index f3afb8b8c4cc..cbc0b20e7e1b 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -92,6 +92,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
{ .compatible = "microchip,ksz9893" },
{ .compatible = "microchip,ksz9563" },
{ .compatible = "microchip,ksz9567" },
+ { .compatible = "microchip,ksz8563" },
{},
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 16939f29faa5..0bd58467181f 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -176,6 +176,7 @@
#define REG_SW_MAC_ADDR_5 0x0307
#define REG_SW_MTU__2 0x0308
+#define REG_SW_MTU_MASK GENMASK(13, 0)
#define REG_SW_ISP_TPID__2 0x030A
@@ -1662,4 +1663,6 @@
/* 148,800 frames * 67 ms / 100 */
#define BROADCAST_STORM_VALUE 9969
+#define KSZ9477_MAX_FRAME_SIZE 9000
+
#endif /* KSZ9477_REGS_H */
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index e3cb0e6c9f6f..87ca464dad32 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -65,7 +65,7 @@ static int ksz9477_spi_probe(struct spi_device *spi)
return 0;
}
-static int ksz9477_spi_remove(struct spi_device *spi)
+static void ksz9477_spi_remove(struct spi_device *spi)
{
struct ksz_device *dev = spi_get_drvdata(spi);
@@ -73,8 +73,6 @@ static int ksz9477_spi_remove(struct spi_device *spi)
ksz_switch_remove(dev);
spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void ksz9477_spi_shutdown(struct spi_device *spi)
@@ -98,12 +96,24 @@ static const struct of_device_id ksz9477_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
+static const struct spi_device_id ksz9477_spi_ids[] = {
+ { "ksz9477" },
+ { "ksz9897" },
+ { "ksz9893" },
+ { "ksz9563" },
+ { "ksz8563" },
+ { "ksz9567" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, ksz9477_spi_ids);
+
static struct spi_driver ksz9477_spi_driver = {
.driver = {
.name = "ksz9477-switch",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(ksz9477_dt_ids),
},
+ .id_table = ksz9477_spi_ids,
.probe = ksz9477_spi_probe,
.remove = ksz9477_spi_remove,
.shutdown = ksz9477_spi_shutdown,
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 243f8ad6d06e..8014b18d9391 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -130,6 +130,10 @@ static void ksz_mib_read_work(struct work_struct *work)
}
port_r_cnt(dev, i);
p->read = false;
+
+ if (dev->dev_ops->r_mib_stat64)
+ dev->dev_ops->r_mib_stat64(dev, i);
+
mutex_unlock(&mib->cnt_mutex);
}
@@ -213,7 +217,8 @@ EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
int ksz_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
/* port_stp_state_set() will be called after to put the port in
* appropriate state so there is no need to do anything.
@@ -272,7 +277,8 @@ int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
int ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
struct alu_struct alu;
@@ -317,7 +323,8 @@ int ksz_port_mdb_add(struct dsa_switch *ds, int port,
EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
int ksz_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
struct alu_struct alu;
@@ -454,6 +461,12 @@ int ksz_switch_register(struct ksz_device *dev,
}
dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
"microchip,synclko-125");
+ dev->synclko_disable = of_property_read_bool(dev->dev->of_node,
+ "microchip,synclko-disable");
+ if (dev->synclko_125 && dev->synclko_disable) {
+ dev_err(dev->dev, "inconsistent synclko settings\n");
+ return -EINVAL;
+ }
}
ret = dsa_register_switch(dev->ds);
@@ -463,7 +476,7 @@ int ksz_switch_register(struct ksz_device *dev,
}
/* Read MIB counters every 30 seconds to avoid overflow. */
- dev->mib_read_interval = msecs_to_jiffies(30000);
+ dev->mib_read_interval = msecs_to_jiffies(5000);
/* Start the MIB timer. */
schedule_delayed_work(&dev->mib_read, 0);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index df8ae59c8525..485d4a948c38 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -22,6 +22,8 @@ struct ksz_port_mib {
struct mutex cnt_mutex; /* structure access */
u8 cnt_ptr;
u64 *counters;
+ struct rtnl_link_stats64 stats64;
+ struct spinlock stats64_lock;
};
struct ksz_port {
@@ -39,6 +41,7 @@ struct ksz_port {
struct ksz_port_mib mib;
phy_interface_t interface;
+ u16 max_frame;
};
struct ksz_device {
@@ -74,7 +77,9 @@ struct ksz_device {
phy_interface_t compat_interface;
u32 regs_size;
bool phy_errata_9477;
+ bool ksz87xx_eee_link_erratum;
bool synclko_125;
+ bool synclko_disable;
struct vlan_table *vlan_cache;
@@ -127,6 +132,7 @@ struct ksz_dev_ops {
u64 *cnt);
void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt);
+ void (*r_mib_stat64)(struct ksz_device *dev, int port);
void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
void (*port_init_cnt)(struct ksz_device *dev, int port);
int (*shutdown)(struct ksz_device *dev);
@@ -155,16 +161,19 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
int ksz_sset_count(struct dsa_switch *ds, int port, int sset);
void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf);
int ksz_port_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge, bool *tx_fwd_offload);
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack);
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge);
void ksz_port_fast_age(struct dsa_switch *ds, int port);
int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
void *data);
int ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
int ksz_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
/* Common register access functions */
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index ff3c267d0f26..19f0035d4410 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1186,7 +1186,8 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
static int
mt7530_port_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge, bool *tx_fwd_offload)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
u32 port_bitmap = BIT(MT7530_CPU_PORT);
@@ -1349,7 +1350,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
static int
mt7530_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
int ret;
@@ -1365,7 +1367,8 @@ mt7530_port_fdb_add(struct dsa_switch *ds, int port,
static int
mt7530_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
int ret;
@@ -1416,7 +1419,8 @@ err:
static int
mt7530_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
@@ -1442,7 +1446,8 @@ mt7530_port_mdb_add(struct dsa_switch *ds, int port,
static int
mt7530_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
@@ -1709,7 +1714,7 @@ static int mt753x_mirror_port_set(unsigned int id, u32 val)
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress, struct netlink_ext_ack *extack)
{
struct mt7530_priv *priv = ds->priv;
int monitor_port;
@@ -2846,7 +2851,7 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
mcr |= PMCR_RX_FC_EN;
}
- if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, 0) >= 0) {
+ if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
switch (speed) {
case SPEED_1000:
mcr |= PMCR_FORCE_EEE1G;
@@ -2936,7 +2941,7 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port,
phylink_set_port_modes(mask);
- if (state->interface != PHY_INTERFACE_MODE_TRGMII ||
+ if (state->interface != PHY_INTERFACE_MODE_TRGMII &&
!phy_interface_mode_is_8023z(state->interface)) {
phylink_set(mask, 10baseT_Half);
phylink_set(mask, 10baseT_Full);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index ab1676553714..64f4fdd02902 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -86,12 +86,16 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val)
int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
u16 mask, u16 val)
{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(50);
u16 data;
int err;
int i;
- /* There's no bus specific operation to wait for a mask */
- for (i = 0; i < 16; i++) {
+ /* There's no bus specific operation to wait for a mask. Even
+ * if the initial poll takes longer than 50ms, always do at
+ * least one more attempt.
+ */
+ for (i = 0; time_before(jiffies, timeout) || (i < 2); i++) {
err = mv88e6xxx_read(chip, addr, reg, &data);
if (err)
return err;
@@ -99,7 +103,10 @@ int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
if ((data & mask) == val)
return 0;
- usleep_range(1000, 2000);
+ if (i < 2)
+ cpu_relax();
+ else
+ usleep_range(1000, 2000);
}
dev_err(chip->dev, "Timeout while waiting for switch\n");
@@ -563,133 +570,268 @@ static int mv88e6xxx_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
return 0;
}
-static void mv88e6065_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static const u8 mv88e6185_phy_interface_modes[] = {
+ [MV88E6185_PORT_STS_CMODE_GMII_FD] = PHY_INTERFACE_MODE_GMII,
+ [MV88E6185_PORT_STS_CMODE_MII_100_FD_PS] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_MII_100] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_MII_10] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_SERDES] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6185_PORT_STS_CMODE_1000BASE_X] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6185_PORT_STS_CMODE_PHY] = PHY_INTERFACE_MODE_SGMII,
+};
+
+static void mv88e6095_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (!phy_interface_mode_is_8023z(state->interface)) {
- /* 10M and 100M are only supported in non-802.3z mode */
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
+ u8 cmode = chip->ports[port].cmode;
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
+
+ if (mv88e6xxx_phy_is_internal(chip->ds, port)) {
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ } else {
+ if (cmode < ARRAY_SIZE(mv88e6185_phy_interface_modes) &&
+ mv88e6185_phy_interface_modes[cmode])
+ __set_bit(mv88e6185_phy_interface_modes[cmode],
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_1000FD;
}
}
-static void mv88e6185_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6185_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- /* FIXME: if the port is in 1000Base-X mode, then it only supports
- * 1000M FD speeds. In this case, CMODE will indicate 5.
+ u8 cmode = chip->ports[port].cmode;
+
+ if (cmode < ARRAY_SIZE(mv88e6185_phy_interface_modes) &&
+ mv88e6185_phy_interface_modes[cmode])
+ __set_bit(mv88e6185_phy_interface_modes[cmode],
+ config->supported_interfaces);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+}
+
+static const u8 mv88e6xxx_phy_interface_modes[] = {
+ [MV88E6XXX_PORT_STS_CMODE_MII_PHY] = PHY_INTERFACE_MODE_MII,
+ [MV88E6XXX_PORT_STS_CMODE_MII] = PHY_INTERFACE_MODE_MII,
+ [MV88E6XXX_PORT_STS_CMODE_GMII] = PHY_INTERFACE_MODE_GMII,
+ [MV88E6XXX_PORT_STS_CMODE_RMII_PHY] = PHY_INTERFACE_MODE_RMII,
+ [MV88E6XXX_PORT_STS_CMODE_RMII] = PHY_INTERFACE_MODE_RMII,
+ [MV88E6XXX_PORT_STS_CMODE_100BASEX] = PHY_INTERFACE_MODE_100BASEX,
+ [MV88E6XXX_PORT_STS_CMODE_1000BASEX] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6XXX_PORT_STS_CMODE_SGMII] = PHY_INTERFACE_MODE_SGMII,
+ /* higher interface modes are not needed here, since ports supporting
+ * them are writable, and so the supported interfaces are filled in the
+ * corresponding .phylink_set_interfaces() implementation below
*/
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+};
- mv88e6065_phylink_validate(chip, port, mask, state);
+static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
+{
+ if (cmode < ARRAY_SIZE(mv88e6xxx_phy_interface_modes) &&
+ mv88e6xxx_phy_interface_modes[cmode])
+ __set_bit(mv88e6xxx_phy_interface_modes[cmode], supported);
+ else if (cmode == MV88E6XXX_PORT_STS_CMODE_RGMII)
+ phy_interface_set_rgmii(supported);
}
-static void mv88e6341_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (port >= 5)
- phylink_set(mask, 2500baseX_Full);
+ unsigned long *supported = config->supported_interfaces;
- /* No ethtool bits for 200Mbps */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
- mv88e6065_phylink_validate(chip, port, mask, state);
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
}
-static void mv88e6352_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
{
- /* No ethtool bits for 200Mbps */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ u16 reg, val;
+ int err;
- mv88e6065_phylink_validate(chip, port, mask, state);
+ err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &reg);
+ if (err)
+ return err;
+
+ /* If PHY_DETECT is zero, then we are not in auto-media mode */
+ if (!(reg & MV88E6XXX_PORT_STS_PHY_DETECT))
+ return 0xf;
+
+ val = reg & ~MV88E6XXX_PORT_STS_PHY_DETECT;
+ err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, val);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &val);
+ if (err)
+ return err;
+
+ /* Restore PHY_DETECT value */
+ err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, reg);
+ if (err)
+ return err;
+
+ return val & MV88E6XXX_PORT_STS_CMODE_MASK;
}
-static void mv88e6390_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (port >= 9) {
- phylink_set(mask, 2500baseX_Full);
- phylink_set(mask, 2500baseT_Full);
+ unsigned long *supported = config->supported_interfaces;
+ int err, cmode;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* Port 4 supports automedia if the serdes is associated with it. */
+ if (port == 4) {
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err < 0)
+ dev_err(chip->dev, "p%d: failed to read scratch\n",
+ port);
+ if (err <= 0)
+ goto unlock;
+
+ cmode = mv88e6352_get_port4_serdes_cmode(chip);
+ if (cmode < 0)
+ dev_err(chip->dev, "p%d: failed to read serdes cmode\n",
+ port);
+ else
+ mv88e6xxx_translate_cmode(cmode, supported);
+unlock:
+ mv88e6xxx_reg_unlock(chip);
}
+}
+
+static void mv88e6341_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
/* No ethtool bits for 200Mbps */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
- mv88e6065_phylink_validate(chip, port, mask, state);
+ /* The C_Mode field is programmable on port 5 */
+ if (port == 5) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+
+ config->mac_capabilities |= MAC_2500FD;
+ }
}
-static void mv88e6390x_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6390_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (port >= 9) {
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseKR_Full);
+ unsigned long *supported = config->supported_interfaces;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ /* No ethtool bits for 200Mbps */
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* The C_Mode field is programmable on ports 9 and 10 */
+ if (port == 9 || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+
+ config->mac_capabilities |= MAC_2500FD;
}
+}
- mv88e6390_phylink_validate(chip, port, mask, state);
+static void mv88e6390x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+
+ mv88e6390_phylink_get_caps(chip, port, config);
+
+ /* For the 6x90X, ports 2-7 can be in automedia mode.
+ * (Note that 6x90 doesn't support RXAUI nor XAUI).
+ *
+ * Port 2 can also support 1000BASE-X in automedia mode if port 9 is
+ * configured for 1000BASE-X, SGMII or 2500BASE-X.
+ * Port 3-4 can also support 1000BASE-X in automedia mode if port 9 is
+ * configured for RXAUI, 1000BASE-X, SGMII or 2500BASE-X.
+ *
+ * Port 5 can also support 1000BASE-X in automedia mode if port 10 is
+ * configured for 1000BASE-X, SGMII or 2500BASE-X.
+ * Port 6-7 can also support 1000BASE-X in automedia mode if port 10 is
+ * configured for RXAUI, 1000BASE-X, SGMII or 2500BASE-X.
+ *
+ * For now, be permissive (as the old code was) and allow 1000BASE-X
+ * on ports 2..7.
+ */
+ if (port >= 2 && port <= 7)
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+
+ /* The C_Mode field can also be programmed for 10G speeds */
+ if (port == 9 || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_XAUI, supported);
+ __set_bit(PHY_INTERFACE_MODE_RXAUI, supported);
+
+ config->mac_capabilities |= MAC_10000FD;
+ }
}
-static void mv88e6393x_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6393x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
+ unsigned long *supported = config->supported_interfaces;
bool is_6191x =
chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6191X;
- if (((port == 0 || port == 9) && !is_6191x) || port == 10) {
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseKR_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
- phylink_set(mask, 5000baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- phylink_set(mask, 2500baseT_Full);
- }
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* The C_Mode field can be programmed for ports 0, 9 and 10 */
+ if (port == 0 || port == 9 || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ /* 6191X supports >1G modes only on port 10 */
+ if (!is_6191x || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+ __set_bit(PHY_INTERFACE_MODE_5GBASER, supported);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, supported);
+ /* FIXME: USXGMII is not supported yet */
+ /* __set_bit(PHY_INTERFACE_MODE_USXGMII, supported); */
- mv88e6065_phylink_validate(chip, port, mask, state);
+ config->mac_capabilities |= MAC_2500FD | MAC_5000FD |
+ MAC_10000FD;
+ }
+ }
}
-static void mv88e6xxx_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mv88e6xxx_chip *chip = ds->priv;
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set_port_modes(mask);
-
- if (chip->info->ops->phylink_validate)
- chip->info->ops->phylink_validate(chip, port, mask, state);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ chip->info->ops->phylink_get_caps(chip, port, config);
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
- */
- phylink_helper_basex_speed(state);
+ /* Internal ports need GMII for PHYLIB */
+ if (mv88e6xxx_phy_is_internal(ds, port))
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
}
static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
@@ -1283,8 +1425,15 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
pvlan = 0;
- /* Frames from user ports can egress any local DSA links and CPU ports,
- * as well as any local member of their bridge group.
+ /* Frames from standalone user ports can only egress on the
+ * upstream port.
+ */
+ if (!dsa_port_bridge_dev_get(dp))
+ return BIT(dsa_switch_upstream_port(ds));
+
+ /* Frames from bridged user ports can egress any local DSA
+ * links and CPU ports, as well as any local member of their
+ * bridge group.
*/
dsa_switch_for_each_port(other_dp, ds)
if (other_dp->type == DSA_PORT_TYPE_CPU ||
@@ -1476,15 +1625,16 @@ static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
ds = dsa_switch_find(dst->index, dev);
dp = ds ? dsa_to_port(ds, port) : NULL;
- if (dp && dp->lag_dev) {
+ if (dp && dp->lag) {
/* As the PVT is used to limit flooding of
* FORWARD frames, which use the LAG ID as the
* source port, we must translate dev/port to
* the special "LAG device" in the PVT, using
- * the LAG ID as the port number.
+ * the LAG ID (one-based) as the port number
+ * (zero-based).
*/
dev = MV88E6XXX_G2_PVT_ADDR_DEV_TRUNK;
- port = dsa_lag_id(dst, dp->lag_dev);
+ port = dsa_port_lag_id_get(dp) - 1;
}
}
@@ -1517,24 +1667,31 @@ static int mv88e6xxx_pvt_setup(struct mv88e6xxx_chip *chip)
return 0;
}
-static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
+static int mv88e6xxx_port_fast_age_fid(struct mv88e6xxx_chip *chip, int port,
+ u16 fid)
{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- if (dsa_to_port(ds, port)->lag_dev)
+ if (dsa_to_port(chip->ds, port)->lag)
/* Hardware is incapable of fast-aging a LAG through a
* regular ATU move operation. Until we have something
* more fancy in place this is a no-op.
*/
- return;
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_g1_atu_remove(chip, fid, port, false);
+}
+
+static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_g1_atu_remove(chip, 0, port, false);
+ err = mv88e6xxx_port_fast_age_fid(chip, port, 0);
mv88e6xxx_reg_unlock(chip);
if (err)
- dev_err(ds->dev, "p%d: failed to flush ATU\n", port);
+ dev_err(chip->ds->dev, "p%d: failed to flush ATU: %d\n",
+ port, err);
}
static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip)
@@ -1616,21 +1773,11 @@ static int mv88e6xxx_fid_map_vlan(struct mv88e6xxx_chip *chip,
int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap)
{
- int i, err;
- u16 fid;
-
bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
- /* Set every FID bit used by the (un)bridged ports */
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- err = mv88e6xxx_port_get_fid(chip, i, &fid);
- if (err)
- return err;
-
- set_bit(fid, fid_bitmap);
- }
-
- /* Set every FID bit used by the VLAN entries */
+ /* Every FID has an associated VID, so walking the VTU
+ * will discover the full set of FIDs in use.
+ */
return mv88e6xxx_vtu_walk(chip, mv88e6xxx_fid_map_vlan, fid_bitmap);
}
@@ -1643,10 +1790,7 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
if (err)
return err;
- /* The reset value 0x000 is used to indicate that multiple address
- * databases are not needed. Return the next positive available.
- */
- *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
+ *fid = find_first_zero_bit(fid_bitmap, MV88E6XXX_N_FID);
if (unlikely(*fid >= mv88e6xxx_num_databases(chip)))
return -ENOSPC;
@@ -1654,6 +1798,187 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
return mv88e6xxx_g1_atu_flush(chip, *fid, true);
}
+static int mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ if (!chip->info->ops->stu_loadpurge)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->stu_loadpurge(chip, entry);
+}
+
+static int mv88e6xxx_stu_setup(struct mv88e6xxx_chip *chip)
+{
+ struct mv88e6xxx_stu_entry stu = {
+ .valid = true,
+ .sid = 0
+ };
+
+ if (!mv88e6xxx_has_stu(chip))
+ return 0;
+
+ /* Make sure that SID 0 is always valid. This is used by VTU
+ * entries that do not make use of the STU, e.g. when creating
+ * a VLAN upper on a port that is also part of a VLAN
+ * filtering bridge.
+ */
+ return mv88e6xxx_stu_loadpurge(chip, &stu);
+}
+
+static int mv88e6xxx_sid_get(struct mv88e6xxx_chip *chip, u8 *sid)
+{
+ DECLARE_BITMAP(busy, MV88E6XXX_N_SID) = { 0 };
+ struct mv88e6xxx_mst *mst;
+
+ __set_bit(0, busy);
+
+ list_for_each_entry(mst, &chip->msts, node)
+ __set_bit(mst->stu.sid, busy);
+
+ *sid = find_first_zero_bit(busy, MV88E6XXX_N_SID);
+
+ return (*sid >= mv88e6xxx_max_sid(chip)) ? -ENOSPC : 0;
+}
+
+static int mv88e6xxx_mst_put(struct mv88e6xxx_chip *chip, u8 sid)
+{
+ struct mv88e6xxx_mst *mst, *tmp;
+ int err;
+
+ if (!sid)
+ return 0;
+
+ list_for_each_entry_safe(mst, tmp, &chip->msts, node) {
+ if (mst->stu.sid != sid)
+ continue;
+
+ if (!refcount_dec_and_test(&mst->refcnt))
+ return 0;
+
+ mst->stu.valid = false;
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ if (err) {
+ refcount_set(&mst->refcnt, 1);
+ return err;
+ }
+
+ list_del(&mst->node);
+ kfree(mst);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int mv88e6xxx_mst_get(struct mv88e6xxx_chip *chip, struct net_device *br,
+ u16 msti, u8 *sid)
+{
+ struct mv88e6xxx_mst *mst;
+ int err, i;
+
+ if (!mv88e6xxx_has_stu(chip)) {
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ if (!msti) {
+ *sid = 0;
+ return 0;
+ }
+
+ list_for_each_entry(mst, &chip->msts, node) {
+ if (mst->br == br && mst->msti == msti) {
+ refcount_inc(&mst->refcnt);
+ *sid = mst->stu.sid;
+ return 0;
+ }
+ }
+
+ err = mv88e6xxx_sid_get(chip, sid);
+ if (err)
+ goto err;
+
+ mst = kzalloc(sizeof(*mst), GFP_KERNEL);
+ if (!mst) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ INIT_LIST_HEAD(&mst->node);
+ refcount_set(&mst->refcnt, 1);
+ mst->br = br;
+ mst->msti = msti;
+ mst->stu.valid = true;
+ mst->stu.sid = *sid;
+
+ /* The bridge starts out all ports in the disabled state. But
+ * a STU state of disabled means to go by the port-global
+ * state. So we set all user port's initial state to blocking,
+ * to match the bridge's behavior.
+ */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ mst->stu.state[i] = dsa_is_user_port(chip->ds, i) ?
+ MV88E6XXX_PORT_CTL0_STATE_BLOCKING :
+ MV88E6XXX_PORT_CTL0_STATE_DISABLED;
+
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ if (err)
+ goto err_free;
+
+ list_add_tail(&mst->node, &chip->msts);
+ return 0;
+
+err_free:
+ kfree(mst);
+err:
+ return err;
+}
+
+static int mv88e6xxx_port_mst_state_set(struct dsa_switch *ds, int port,
+ const struct switchdev_mst_state *st)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_mst *mst;
+ u8 state;
+ int err;
+
+ if (!mv88e6xxx_has_stu(chip))
+ return -EOPNOTSUPP;
+
+ switch (st->state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ state = MV88E6XXX_PORT_CTL0_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ state = MV88E6XXX_PORT_CTL0_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ state = MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ list_for_each_entry(mst, &chip->msts, node) {
+ if (mst->br == dsa_port_bridge_dev_get(dp) &&
+ mst->msti == st->msti) {
+ if (mst->stu.state[port] == state)
+ return 0;
+
+ mst->stu.state[port] = state;
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+ }
+ }
+
+ return -ENOENT;
+}
+
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid)
{
@@ -2138,6 +2463,9 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
if (!vlan.valid) {
memset(&vlan, 0, sizeof(vlan));
+ if (vid == MV88E6XXX_VID_STANDALONE)
+ vlan.policy = true;
+
err = mv88e6xxx_atu_new(chip, &vlan.fid);
if (err)
return err;
@@ -2270,6 +2598,12 @@ static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
if (err)
return err;
+ if (!vlan.valid) {
+ err = mv88e6xxx_mst_put(chip, vlan.sid);
+ if (err)
+ return err;
+ }
+
return mv88e6xxx_g1_atu_remove(chip, vlan.fid, port, false);
}
@@ -2315,8 +2649,75 @@ unlock:
return err;
}
+static int mv88e6xxx_port_vlan_fast_age(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry vlan;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_port_fast_age_fid(chip, port, vlan.fid);
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_vlan_msti_set(struct dsa_switch *ds,
+ struct dsa_bridge bridge,
+ const struct switchdev_vlan_msti *msti)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry vlan;
+ u8 old_sid, new_sid;
+ int err;
+
+ if (!mv88e6xxx_has_stu(chip))
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_vtu_get(chip, msti->vid, &vlan);
+ if (err)
+ goto unlock;
+
+ if (!vlan.valid) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ old_sid = vlan.sid;
+
+ err = mv88e6xxx_mst_get(chip, bridge.dev, msti->msti, &new_sid);
+ if (err)
+ goto unlock;
+
+ if (new_sid != old_sid) {
+ vlan.sid = new_sid;
+
+ err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ if (err) {
+ mv88e6xxx_mst_put(chip, new_sid);
+ goto unlock;
+ }
+ }
+
+ err = mv88e6xxx_mst_put(chip, old_sid);
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2330,7 +2731,8 @@ static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2476,7 +2878,8 @@ static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds,
static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2487,6 +2890,10 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
if (err)
goto unlock;
+ err = mv88e6xxx_port_set_map_da(chip, port, true);
+ if (err)
+ goto unlock;
+
err = mv88e6xxx_port_commit_pvid(chip, port);
if (err)
goto unlock;
@@ -2521,6 +2928,12 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
mv88e6xxx_port_vlan_map(chip, port))
dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
+ err = mv88e6xxx_port_set_map_da(chip, port, false);
+ if (err)
+ dev_err(ds->dev,
+ "port %d failed to restore map-DA: %pe\n",
+ port, ERR_PTR(err));
+
err = mv88e6xxx_port_commit_pvid(chip, port);
if (err)
dev_err(ds->dev,
@@ -2532,7 +2945,8 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
int tree_index, int sw_index,
- int port, struct dsa_bridge bridge)
+ int port, struct dsa_bridge bridge,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2864,7 +3278,10 @@ static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
+ struct device_node *phy_handle = NULL;
struct dsa_switch *ds = chip->ds;
+ struct dsa_port *dp;
+ int tx_amp;
int err;
u16 reg;
@@ -2918,12 +3335,13 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
/* Port Control 2: don't force a good FCS, set the MTU size to
- * 10222 bytes, disable 802.1q tags checking, don't discard tagged or
- * untagged frames on this port, do a destination address lookup on all
- * received packets as usual, disable ARP mirroring and don't send a
- * copy of all transmitted/received frames on this port to the CPU.
+ * 10222 bytes, disable 802.1q tags checking, don't discard
+ * tagged or untagged frames on this port, skip destination
+ * address lookup on user ports, disable ARP mirroring and don't
+ * send a copy of all transmitted/received frames on this port
+ * to the CPU.
*/
- err = mv88e6xxx_port_set_map_da(chip, port);
+ err = mv88e6xxx_port_set_map_da(chip, port, !dsa_is_user_port(ds, port));
if (err)
return err;
@@ -2931,8 +3349,44 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
+ /* On chips that support it, set all downstream DSA ports'
+ * VLAN policy to TRAP. In combination with loading
+ * MV88E6XXX_VID_STANDALONE as a policy entry in the VTU, this
+ * provides a better isolation barrier between standalone
+ * ports, as the ATU is bypassed on any intermediate switches
+ * between the incoming port and the CPU.
+ */
+ if (dsa_is_downstream_port(ds, port) &&
+ chip->info->ops->port_set_policy) {
+ err = chip->info->ops->port_set_policy(chip, port,
+ MV88E6XXX_POLICY_MAPPING_VTU,
+ MV88E6XXX_POLICY_ACTION_TRAP);
+ if (err)
+ return err;
+ }
+
+ /* User ports start out in standalone mode and 802.1Q is
+ * therefore disabled. On DSA ports, all valid VIDs are always
+ * loaded in the VTU - therefore, enable 802.1Q in order to take
+ * advantage of VLAN policy on chips that supports it.
+ */
err = mv88e6xxx_port_set_8021q_mode(chip, port,
- MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED);
+ dsa_is_user_port(ds, port) ?
+ MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED :
+ MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE);
+ if (err)
+ return err;
+
+ /* Bind MV88E6XXX_VID_STANDALONE to MV88E6XXX_FID_STANDALONE by
+ * virtue of the fact that mv88e6xxx_atu_new() will pick it as
+ * the first free FID. This will be used as the private PVID for
+ * unbridged ports. Shared (DSA and CPU) ports must also be
+ * members of this VID, in order to trap all frames assigned to
+ * it to the CPU.
+ */
+ err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_STANDALONE,
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED,
+ false);
if (err)
return err;
@@ -2945,7 +3399,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* relying on their port default FID.
*/
err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_BRIDGED,
- MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED,
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED,
false);
if (err)
return err;
@@ -3018,6 +3472,23 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
}
+ if (chip->info->ops->serdes_set_tx_amplitude) {
+ dp = dsa_to_port(ds, port);
+ if (dp)
+ phy_handle = of_parse_phandle(dp->dn, "phy-handle", 0);
+
+ if (phy_handle && !of_property_read_u32(phy_handle,
+ "tx-p2p-microvolt",
+ &tx_amp))
+ err = chip->info->ops->serdes_set_tx_amplitude(chip,
+ port, tx_amp);
+ if (phy_handle) {
+ of_node_put(phy_handle);
+ if (err)
+ return err;
+ }
+ }
+
/* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
@@ -3216,6 +3687,13 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
if (err)
goto unlock;
+ /* Must be called after mv88e6xxx_vtu_setup (which flushes the
+ * VTU, thereby also flushing the STU).
+ */
+ err = mv88e6xxx_stu_setup(chip);
+ if (err)
+ goto unlock;
+
/* Setup Switch Port Registers */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
if (dsa_is_unused_port(ds, i))
@@ -3589,7 +4067,9 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3623,7 +4103,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6095_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3639,6 +4119,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.port_sync_link = mv88e6185_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -3669,7 +4150,9 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6095_phylink_get_caps,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3706,7 +4189,9 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3747,7 +4232,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6141_ops = {
@@ -3795,6 +4280,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6341_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -3811,7 +4298,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
- .phylink_validate = mv88e6341_phylink_validate,
+ .phylink_get_caps = mv88e6341_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6161_ops = {
@@ -3851,9 +4338,11 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
.ptp_ops = &mv88e6165_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3887,9 +4376,11 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
.ptp_ops = &mv88e6165_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6171_ops = {
@@ -3931,7 +4422,9 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6172_ops = {
@@ -3977,6 +4470,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
.serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -3986,7 +4481,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6352_phylink_validate,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6175_ops = {
@@ -4028,7 +4523,9 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6176_ops = {
@@ -4074,6 +4571,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
.serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -4085,8 +4584,9 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.serdes_irq_status = mv88e6352_serdes_irq_status,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6352_phylink_validate,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6185_ops = {
@@ -4125,7 +4625,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -4172,6 +4672,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4187,7 +4689,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6190x_ops = {
@@ -4233,6 +4735,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4248,7 +4752,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6390x_phylink_validate,
+ .phylink_get_caps = mv88e6390x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6191_ops = {
@@ -4292,6 +4796,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4308,7 +4814,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.serdes_get_regs = mv88e6390_serdes_get_regs,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6240_ops = {
@@ -4354,6 +4860,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
.serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -4365,10 +4873,11 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.serdes_irq_status = mv88e6352_serdes_irq_status,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6352_phylink_validate,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6250_ops = {
@@ -4408,7 +4917,7 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6250_ptp_ops,
- .phylink_validate = mv88e6065_phylink_validate,
+ .phylink_get_caps = mv88e6250_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6290_ops = {
@@ -4453,6 +4962,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4470,7 +4981,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6320_ops = {
@@ -4514,7 +5025,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6321_ops = {
@@ -4556,7 +5067,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6341_ops = {
@@ -4604,6 +5115,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6341_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4622,7 +5135,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
- .phylink_validate = mv88e6341_phylink_validate,
+ .phylink_get_caps = mv88e6341_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6350_ops = {
@@ -4664,7 +5177,9 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6351_ops = {
@@ -4706,9 +5221,11 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6352_ops = {
@@ -4754,6 +5271,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
.serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -4771,7 +5290,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.serdes_get_stats = mv88e6352_serdes_get_stats,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
- .phylink_validate = mv88e6352_phylink_validate,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6390_ops = {
@@ -4818,6 +5338,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4836,7 +5358,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6390x_ops = {
@@ -4883,6 +5405,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
.serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
@@ -4900,7 +5424,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6390x_phylink_validate,
+ .phylink_get_caps = mv88e6390x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6393x_ops = {
@@ -4951,6 +5475,8 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6393x_serdes_power,
.serdes_get_lane = mv88e6393x_serdes_get_lane,
.serdes_pcs_get_state = mv88e6393x_serdes_pcs_get_state,
@@ -4964,7 +5490,7 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6393x_phylink_validate,
+ .phylink_get_caps = mv88e6393x_phylink_get_caps,
};
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
@@ -4977,6 +5503,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 10,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5019,6 +5546,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11,
.num_internal_phys = 8,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5042,6 +5570,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 3,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5086,6 +5615,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 11,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x10,
.global1_addr = 0x1b,
@@ -5109,6 +5639,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 6,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5133,6 +5664,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 6,
.num_internal_phys = 0,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5156,6 +5688,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5180,6 +5713,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5203,6 +5737,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5227,6 +5762,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5272,6 +5808,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5295,6 +5832,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5317,6 +5855,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5339,6 +5878,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5361,6 +5901,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5411,6 +5952,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5456,6 +5998,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5529,6 +6072,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 6,
.num_gpio = 11,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x10,
.global1_addr = 0x1b,
@@ -5553,6 +6097,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5576,6 +6121,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5600,6 +6146,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5624,6 +6171,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5648,6 +6196,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5671,6 +6220,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5739,6 +6289,7 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
mutex_init(&chip->reg_lock);
INIT_LIST_HEAD(&chip->mdios);
idr_init(&chip->policies);
+ INIT_LIST_HEAD(&chip->msts);
return chip;
}
@@ -5791,7 +6342,8 @@ static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -5805,7 +6357,8 @@ static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -5819,7 +6372,8 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress,
+ struct netlink_ext_ack *extack)
{
enum mv88e6xxx_egress_direction direction = ingress ?
MV88E6XXX_EGRESS_DIR_INGRESS :
@@ -5893,7 +6447,7 @@ static int mv88e6xxx_port_pre_bridge_flags(struct dsa_switch *ds, int port,
const struct mv88e6xxx_ops *ops;
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
- BR_BCAST_FLOOD))
+ BR_BCAST_FLOOD | BR_PORT_LOCKED))
return -EINVAL;
ops = chip->info->ops;
@@ -5951,6 +6505,13 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
goto out;
}
+ if (flags.mask & BR_PORT_LOCKED) {
+ bool locked = !!(flags.val & BR_PORT_LOCKED);
+
+ err = mv88e6xxx_port_set_lock(chip, port, locked);
+ if (err)
+ goto out;
+ }
out:
mv88e6xxx_reg_unlock(chip);
@@ -5958,21 +6519,20 @@ out:
}
static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
- struct net_device *lag,
+ struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
- int id, members = 0;
+ int members = 0;
if (!mv88e6xxx_has_lag(chip))
return false;
- id = dsa_lag_id(ds->dst, lag);
- if (id < 0 || id >= ds->num_lag_ids)
+ if (!lag.id)
return false;
- dsa_lag_foreach_port(dp, ds->dst, lag)
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
/* Includes the port joining the LAG */
members++;
@@ -5992,20 +6552,21 @@ static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
return true;
}
-static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct net_device *lag)
+static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
u16 map = 0;
int id;
- id = dsa_lag_id(ds->dst, lag);
+ /* DSA LAG IDs are one-based, hardware is zero-based */
+ id = lag.id - 1;
/* Build the map of all ports to distribute flows destined for
* this LAG. This can be either a local user port, or a DSA
* port if the LAG port is on a remote chip.
*/
- dsa_lag_foreach_port(dp, ds->dst, lag)
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
map |= BIT(dsa_towards_port(ds, dp->ds->index, dp->index));
return mv88e6xxx_g2_trunk_mapping_write(chip, id, map);
@@ -6050,8 +6611,8 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
unsigned int id, num_tx;
- struct net_device *lag;
struct dsa_port *dp;
+ struct dsa_lag *lag;
int i, err, nth;
u16 mask[8];
u16 ivec;
@@ -6060,8 +6621,8 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
ivec = BIT(mv88e6xxx_num_ports(chip)) - 1;
/* Disable all masks for ports that _are_ members of a LAG. */
- list_for_each_entry(dp, &ds->dst->ports, list) {
- if (!dp->lag_dev || dp->ds != ds)
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dp->lag)
continue;
ivec &= ~BIT(dp->index);
@@ -6074,7 +6635,7 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
* are in the Tx set.
*/
dsa_lags_foreach_id(id, ds->dst) {
- lag = dsa_lag_dev(ds->dst, id);
+ lag = dsa_lag_by_id(ds->dst, id);
if (!lag)
continue;
@@ -6110,7 +6671,7 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
}
static int mv88e6xxx_lag_sync_masks_map(struct dsa_switch *ds,
- struct net_device *lag)
+ struct dsa_lag lag)
{
int err;
@@ -6134,7 +6695,7 @@ static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port)
}
static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
- struct net_device *lag,
+ struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct mv88e6xxx_chip *chip = ds->priv;
@@ -6143,7 +6704,8 @@ static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
if (!mv88e6xxx_lag_can_offload(ds, lag, info))
return -EOPNOTSUPP;
- id = dsa_lag_id(ds->dst, lag);
+ /* DSA LAG IDs are one-based */
+ id = lag.id - 1;
mv88e6xxx_reg_lock(chip);
@@ -6166,7 +6728,7 @@ err_unlock:
}
static int mv88e6xxx_port_lag_leave(struct dsa_switch *ds, int port,
- struct net_device *lag)
+ struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_trunk;
@@ -6191,7 +6753,7 @@ static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index,
}
static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
- int port, struct net_device *lag,
+ int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct mv88e6xxx_chip *chip = ds->priv;
@@ -6214,7 +6776,7 @@ unlock:
}
static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
- int port, struct net_device *lag)
+ int port, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_pvt;
@@ -6233,7 +6795,7 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.teardown = mv88e6xxx_teardown,
.port_setup = mv88e6xxx_port_setup,
.port_teardown = mv88e6xxx_port_teardown,
- .phylink_validate = mv88e6xxx_validate,
+ .phylink_get_caps = mv88e6xxx_get_caps,
.phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state,
.phylink_mac_config = mv88e6xxx_mac_config,
.phylink_mac_an_restart = mv88e6xxx_serdes_pcs_an_restart,
@@ -6261,10 +6823,13 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_pre_bridge_flags = mv88e6xxx_port_pre_bridge_flags,
.port_bridge_flags = mv88e6xxx_port_bridge_flags,
.port_stp_state_set = mv88e6xxx_port_stp_state_set,
+ .port_mst_state_set = mv88e6xxx_port_mst_state_set,
.port_fast_age = mv88e6xxx_port_fast_age,
+ .port_vlan_fast_age = mv88e6xxx_port_vlan_fast_age,
.port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
.port_vlan_add = mv88e6xxx_port_vlan_add,
.port_vlan_del = mv88e6xxx_port_vlan_del,
+ .vlan_msti_set = mv88e6xxx_vlan_msti_set,
.port_fdb_add = mv88e6xxx_port_fdb_add,
.port_fdb_del = mv88e6xxx_port_fdb_del,
.port_fdb_dump = mv88e6xxx_port_fdb_dump,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 8271b8aa7b71..5e03cfe50156 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -20,6 +20,7 @@
#define EDSA_HLEN 8
#define MV88E6XXX_N_FID 4096
+#define MV88E6XXX_N_SID 64
#define MV88E6XXX_FID_STANDALONE 0
#define MV88E6XXX_FID_BRIDGED 1
@@ -130,6 +131,7 @@ struct mv88e6xxx_info {
unsigned int num_internal_phys;
unsigned int num_gpio;
unsigned int max_vid;
+ unsigned int max_sid;
unsigned int port_base_addr;
unsigned int phy_base_addr;
unsigned int global1_addr;
@@ -179,7 +181,14 @@ struct mv88e6xxx_vtu_entry {
u16 fid;
u8 sid;
bool valid;
+ bool policy;
u8 member[DSA_MAX_PORTS];
+ u8 state[DSA_MAX_PORTS]; /* Older silicon has no STU */
+};
+
+struct mv88e6xxx_stu_entry {
+ u8 sid;
+ bool valid;
u8 state[DSA_MAX_PORTS];
};
@@ -278,6 +287,7 @@ enum mv88e6xxx_region_id {
MV88E6XXX_REGION_GLOBAL2,
MV88E6XXX_REGION_ATU,
MV88E6XXX_REGION_VTU,
+ MV88E6XXX_REGION_STU,
MV88E6XXX_REGION_PVT,
_MV88E6XXX_REGION_MAX,
@@ -287,6 +297,16 @@ struct mv88e6xxx_region_priv {
enum mv88e6xxx_region_id id;
};
+struct mv88e6xxx_mst {
+ struct list_head node;
+
+ refcount_t refcnt;
+ struct net_device *br;
+ u16 msti;
+
+ struct mv88e6xxx_stu_entry stu;
+};
+
struct mv88e6xxx_chip {
const struct mv88e6xxx_info *info;
@@ -387,11 +407,15 @@ struct mv88e6xxx_chip {
/* devlink regions */
struct devlink_region *regions[_MV88E6XXX_REGION_MAX];
+
+ /* Bridge MST to SID mappings */
+ struct list_head msts;
};
struct mv88e6xxx_bus_ops {
int (*read)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
+ int (*init)(struct mv88e6xxx_chip *chip);
};
struct mv88e6xxx_mdio_bus {
@@ -586,6 +610,10 @@ struct mv88e6xxx_ops {
void (*serdes_get_regs)(struct mv88e6xxx_chip *chip, int port,
void *_p);
+ /* SERDES SGMII/Fiber Output Amplitude */
+ int (*serdes_set_tx_amplitude)(struct mv88e6xxx_chip *chip, int port,
+ int val);
+
/* Address Translation Unit operations */
int (*atu_get_hash)(struct mv88e6xxx_chip *chip, u8 *hash);
int (*atu_set_hash)(struct mv88e6xxx_chip *chip, u8 hash);
@@ -596,6 +624,12 @@ struct mv88e6xxx_ops {
int (*vtu_loadpurge)(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
+ /* Spanning Tree Unit operations */
+ int (*stu_getnext)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+ int (*stu_loadpurge)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+
/* GPIO operations */
const struct mv88e6xxx_gpio_ops *gpio_ops;
@@ -609,9 +643,8 @@ struct mv88e6xxx_ops {
const struct mv88e6xxx_ptp_ops *ptp_ops;
/* Phylink */
- void (*phylink_validate)(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state);
+ void (*phylink_get_caps)(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config);
/* Max Frame Size */
int (*set_max_frame_size)(struct mv88e6xxx_chip *chip, int mtu);
@@ -695,6 +728,13 @@ struct mv88e6xxx_hw_stat {
int type;
};
+static inline bool mv88e6xxx_has_stu(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->max_sid > 0 &&
+ chip->info->ops->stu_loadpurge &&
+ chip->info->ops->stu_getnext;
+}
+
static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip)
{
return chip->info->pvt;
@@ -725,6 +765,11 @@ static inline unsigned int mv88e6xxx_max_vid(struct mv88e6xxx_chip *chip)
return chip->info->max_vid;
}
+static inline unsigned int mv88e6xxx_max_sid(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->max_sid;
+}
+
static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip)
{
return GENMASK((s32)mv88e6xxx_num_ports(chip) - 1, 0);
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index 381068395c63..1266eabee086 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -503,6 +503,85 @@ static int mv88e6xxx_region_vtu_snapshot(struct devlink *dl,
return 0;
}
+/**
+ * struct mv88e6xxx_devlink_stu_entry - Devlink STU entry
+ * @sid: Global1/3: SID, unknown filters and learning.
+ * @vid: Global1/6: Valid bit.
+ * @data: Global1/7-9: Membership data and priority override.
+ * @resvd: Reserved. In case we forgot something.
+ *
+ * The STU entry format varies between chipset generations. Peridot
+ * and Amethyst packs the STU data into Global1/7-8. Older silicon
+ * spreads the information across all three VTU data registers -
+ * inheriting the layout of even older hardware that had no STU at
+ * all. Since this is a low-level debug interface, copy all data
+ * verbatim and defer parsing to the consumer.
+ */
+struct mv88e6xxx_devlink_stu_entry {
+ u16 sid;
+ u16 vid;
+ u16 data[3];
+ u16 resvd;
+};
+
+static int mv88e6xxx_region_stu_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct mv88e6xxx_devlink_stu_entry *table, *entry;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_stu_entry stu;
+ int err;
+
+ table = kcalloc(mv88e6xxx_max_sid(chip) + 1,
+ sizeof(struct mv88e6xxx_devlink_stu_entry),
+ GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ entry = table;
+ stu.sid = mv88e6xxx_max_sid(chip);
+ stu.valid = false;
+
+ mv88e6xxx_reg_lock(chip);
+
+ do {
+ err = mv88e6xxx_g1_stu_getnext(chip, &stu);
+ if (err)
+ break;
+
+ if (!stu.valid)
+ break;
+
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_SID,
+ &entry->sid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_VID,
+ &entry->vid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1,
+ &entry->data[0]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA2,
+ &entry->data[1]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA3,
+ &entry->data[2]);
+ if (err)
+ break;
+
+ entry++;
+ } while (stu.sid < mv88e6xxx_max_sid(chip));
+
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err) {
+ kfree(table);
+ return err;
+ }
+
+ *data = (u8 *)table;
+ return 0;
+}
+
static int mv88e6xxx_region_pvt_snapshot(struct devlink *dl,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack,
@@ -605,6 +684,12 @@ static struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
.destructor = kfree,
};
+static struct devlink_region_ops mv88e6xxx_region_stu_ops = {
+ .name = "stu",
+ .snapshot = mv88e6xxx_region_stu_snapshot,
+ .destructor = kfree,
+};
+
static struct devlink_region_ops mv88e6xxx_region_pvt_ops = {
.name = "pvt",
.snapshot = mv88e6xxx_region_pvt_snapshot,
@@ -640,6 +725,11 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = {
.ops = &mv88e6xxx_region_vtu_ops
/* calculated at runtime */
},
+ [MV88E6XXX_REGION_STU] = {
+ .ops = &mv88e6xxx_region_stu_ops,
+ .cond = mv88e6xxx_has_stu,
+ /* calculated at runtime */
+ },
[MV88E6XXX_REGION_PVT] = {
.ops = &mv88e6xxx_region_pvt_ops,
.size = MV88E6XXX_MAX_PVT_ENTRIES * sizeof(u16),
@@ -706,6 +796,10 @@ int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
size = (mv88e6xxx_max_vid(chip) + 1) *
sizeof(struct mv88e6xxx_devlink_vtu_entry);
break;
+ case MV88E6XXX_REGION_STU:
+ size = (mv88e6xxx_max_sid(chip) + 1) *
+ sizeof(struct mv88e6xxx_devlink_stu_entry);
+ break;
}
region = dsa_devlink_region_create(ds, ops, 1, size);
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 4f3dbb015f77..65958b2a0d3a 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -46,6 +46,7 @@
/* Offset 0x02: VTU FID Register */
#define MV88E6352_G1_VTU_FID 0x02
+#define MV88E6352_G1_VTU_FID_VID_POLICY 0x1000
#define MV88E6352_G1_VTU_FID_MASK 0x0fff
/* Offset 0x03: VTU SID Register */
@@ -347,6 +348,16 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6352_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6352_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6390_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6390_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index ae12c981923e..38e18f5811bf 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -27,7 +27,7 @@ static int mv88e6xxx_g1_vtu_fid_read(struct mv88e6xxx_chip *chip,
return err;
entry->fid = val & MV88E6352_G1_VTU_FID_MASK;
-
+ entry->policy = !!(val & MV88E6352_G1_VTU_FID_VID_POLICY);
return 0;
}
@@ -36,13 +36,15 @@ static int mv88e6xxx_g1_vtu_fid_write(struct mv88e6xxx_chip *chip,
{
u16 val = entry->fid & MV88E6352_G1_VTU_FID_MASK;
+ if (entry->policy)
+ val |= MV88E6352_G1_VTU_FID_VID_POLICY;
+
return mv88e6xxx_g1_write(chip, MV88E6352_G1_VTU_FID, val);
}
/* Offset 0x03: VTU SID Register */
-static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip, u8 *sid)
{
u16 val;
int err;
@@ -51,15 +53,14 @@ static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip,
if (err)
return err;
- entry->sid = val & MV88E6352_G1_VTU_SID_MASK;
+ *sid = val & MV88E6352_G1_VTU_SID_MASK;
return 0;
}
-static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip, u8 sid)
{
- u16 val = entry->sid & MV88E6352_G1_VTU_SID_MASK;
+ u16 val = sid & MV88E6352_G1_VTU_SID_MASK;
return mv88e6xxx_g1_write(chip, MV88E6352_G1_VTU_SID, val);
}
@@ -88,7 +89,7 @@ static int mv88e6xxx_g1_vtu_op(struct mv88e6xxx_chip *chip, u16 op)
/* Offset 0x06: VTU VID Register */
static int mv88e6xxx_g1_vtu_vid_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ bool *valid, u16 *vid)
{
u16 val;
int err;
@@ -97,25 +98,28 @@ static int mv88e6xxx_g1_vtu_vid_read(struct mv88e6xxx_chip *chip,
if (err)
return err;
- entry->vid = val & 0xfff;
+ if (vid) {
+ *vid = val & 0xfff;
- if (val & MV88E6390_G1_VTU_VID_PAGE)
- entry->vid |= 0x1000;
+ if (val & MV88E6390_G1_VTU_VID_PAGE)
+ *vid |= 0x1000;
+ }
- entry->valid = !!(val & MV88E6XXX_G1_VTU_VID_VALID);
+ if (valid)
+ *valid = !!(val & MV88E6XXX_G1_VTU_VID_VALID);
return 0;
}
static int mv88e6xxx_g1_vtu_vid_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ bool valid, u16 vid)
{
- u16 val = entry->vid & 0xfff;
+ u16 val = vid & 0xfff;
- if (entry->vid & 0x1000)
+ if (vid & 0x1000)
val |= MV88E6390_G1_VTU_VID_PAGE;
- if (entry->valid)
+ if (valid)
val |= MV88E6XXX_G1_VTU_VID_VALID;
return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_VID, val);
@@ -144,7 +148,7 @@ static int mv88e6185_g1_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
}
static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ u8 *member, u8 *state)
{
u16 regs[3];
int err;
@@ -157,36 +161,20 @@ static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
/* Extract MemberTag data */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int member_offset = (i % 4) * 4;
+ unsigned int state_offset = member_offset + 2;
- entry->member[i] = (regs[i / 4] >> member_offset) & 0x3;
- }
-
- return 0;
-}
-
-static int mv88e6185_g1_stu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- u16 regs[3];
- int err;
- int i;
-
- err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
- if (err)
- return err;
-
- /* Extract PortState data */
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- unsigned int state_offset = (i % 4) * 4 + 2;
+ if (member)
+ member[i] = (regs[i / 4] >> member_offset) & 0x3;
- entry->state[i] = (regs[i / 4] >> state_offset) & 0x3;
+ if (state)
+ state[i] = (regs[i / 4] >> state_offset) & 0x3;
}
return 0;
}
static int mv88e6185_g1_vtu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ u8 *member, u8 *state)
{
u16 regs[3] = { 0 };
int i;
@@ -196,8 +184,11 @@ static int mv88e6185_g1_vtu_data_write(struct mv88e6xxx_chip *chip,
unsigned int member_offset = (i % 4) * 4;
unsigned int state_offset = member_offset + 2;
- regs[i / 4] |= (entry->member[i] & 0x3) << member_offset;
- regs[i / 4] |= (entry->state[i] & 0x3) << state_offset;
+ if (member)
+ regs[i / 4] |= (member[i] & 0x3) << member_offset;
+
+ if (state)
+ regs[i / 4] |= (state[i] & 0x3) << state_offset;
}
/* Write all 3 VTU/STU Data registers */
@@ -265,48 +256,6 @@ static int mv88e6390_g1_vtu_data_write(struct mv88e6xxx_chip *chip, u8 *data)
/* VLAN Translation Unit Operations */
-static int mv88e6xxx_g1_vtu_stu_getnext(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- int err;
-
- err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_GET_NEXT);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_vtu_sid_read(chip, entry);
- if (err)
- return err;
-
- return mv88e6xxx_g1_vtu_vid_read(chip, entry);
-}
-
-static int mv88e6xxx_g1_vtu_stu_get(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *vtu)
-{
- struct mv88e6xxx_vtu_entry stu;
- int err;
-
- err = mv88e6xxx_g1_vtu_sid_read(chip, vtu);
- if (err)
- return err;
-
- stu.sid = vtu->sid - 1;
-
- err = mv88e6xxx_g1_vtu_stu_getnext(chip, &stu);
- if (err)
- return err;
-
- if (stu.sid != vtu->sid || !stu.valid)
- return -EINVAL;
-
- return 0;
-}
-
int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
@@ -324,7 +273,7 @@ int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
* write the VID only once, when the entry is given as invalid.
*/
if (!entry->valid) {
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, false, entry->vid);
if (err)
return err;
}
@@ -333,7 +282,7 @@ int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
- return mv88e6xxx_g1_vtu_vid_read(chip, entry);
+ return mv88e6xxx_g1_vtu_vid_read(chip, &entry->valid, &entry->vid);
}
int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
@@ -347,11 +296,7 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return err;
if (entry->valid) {
- err = mv88e6185_g1_vtu_data_read(chip, entry);
- if (err)
- return err;
-
- err = mv88e6185_g1_stu_data_read(chip, entry);
+ err = mv88e6185_g1_vtu_data_read(chip, entry->member, entry->state);
if (err)
return err;
@@ -381,7 +326,7 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return err;
if (entry->valid) {
- err = mv88e6185_g1_vtu_data_read(chip, entry);
+ err = mv88e6185_g1_vtu_data_read(chip, entry->member, NULL);
if (err)
return err;
@@ -389,12 +334,7 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
- /* Fetch VLAN PortState data from the STU */
- err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
- if (err)
- return err;
-
- err = mv88e6185_g1_stu_data_read(chip, entry);
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
@@ -417,16 +357,11 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
- /* Fetch VLAN PortState data from the STU */
- err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
- if (err)
- return err;
-
- err = mv88e6390_g1_vtu_data_read(chip, entry->state);
+ err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
@@ -444,12 +379,12 @@ int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
- err = mv88e6185_g1_vtu_data_write(chip, entry);
+ err = mv88e6185_g1_vtu_data_write(chip, entry->member, entry->state);
if (err)
return err;
@@ -476,27 +411,21 @@ int mv88e6352_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
- /* Write MemberTag and PortState data */
- err = mv88e6185_g1_vtu_data_write(chip, entry);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
+ /* Write MemberTag data */
+ err = mv88e6185_g1_vtu_data_write(chip, entry->member, NULL);
if (err)
return err;
- /* Load STU entry */
- err = mv88e6xxx_g1_vtu_op(chip,
- MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+ err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
}
@@ -514,41 +443,113 @@ int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
- /* Write PortState data */
- err = mv88e6390_g1_vtu_data_write(chip, entry->state);
+ /* Write MemberTag data */
+ err = mv88e6390_g1_vtu_data_write(chip, entry->member);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
if (err)
return err;
- /* Load STU entry */
- err = mv88e6xxx_g1_vtu_op(chip,
- MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
+ }
- /* Write MemberTag data */
- err = mv88e6390_g1_vtu_data_write(chip, entry->member);
+ /* Load/Purge VTU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
+}
+
+int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
+}
+
+/* Spanning Tree Unit Operations */
+
+int mv88e6xxx_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ /* To get the next higher active SID, the STU GetNext operation can be
+ * started again without setting the SID registers since it already
+ * contains the last SID.
+ *
+ * To save a few hardware accesses and abstract this to the caller,
+ * write the SID only once, when the entry is given as invalid.
+ */
+ if (!entry->valid) {
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
+ }
- err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_GET_NEXT);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_read(chip, &entry->valid, NULL);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
- /* Load/Purge VTU entry */
- return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
+ return 0;
}
-int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
+int mv88e6352_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_stu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (!entry->valid)
+ return 0;
+
+ return mv88e6185_g1_vtu_data_read(chip, NULL, entry->state);
+}
+
+int mv88e6390_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_stu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (!entry->valid)
+ return 0;
+
+ return mv88e6390_g1_vtu_data_read(chip, entry->state);
+}
+
+int mv88e6352_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
{
int err;
@@ -556,16 +557,59 @@ int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
if (err)
return err;
- return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, 0);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6185_g1_vtu_data_write(chip, NULL, entry->state);
+ if (err)
+ return err;
+ }
+
+ /* Load/Purge STU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
}
+int mv88e6390_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, 0);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6390_g1_vtu_data_write(chip, entry->state);
+ if (err)
+ return err;
+ }
+
+ /* Load/Purge STU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+}
+
+/* VTU Violation Management */
+
static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
- struct mv88e6xxx_vtu_entry entry;
+ u16 val, vid;
int spid;
int err;
- u16 val;
mv88e6xxx_reg_lock(chip);
@@ -577,7 +621,7 @@ static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
if (err)
goto out;
- err = mv88e6xxx_g1_vtu_vid_read(chip, &entry);
+ err = mv88e6xxx_g1_vtu_vid_read(chip, NULL, &vid);
if (err)
goto out;
@@ -585,13 +629,13 @@ static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
if (val & MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION) {
dev_err_ratelimited(chip->dev, "VTU member violation for vid %d, source port %d\n",
- entry.vid, spid);
+ vid, spid);
chip->ports[spid].vtu_member_violation++;
}
if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION) {
dev_dbg_ratelimited(chip->dev, "VTU miss violation for vid %d, source port %d\n",
- entry.vid, spid);
+ vid, spid);
chip->ports[spid].vtu_miss_violation++;
}
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index f3e27573a386..807aeaad9830 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -299,6 +299,8 @@
#define MV88E6352_G2_SCRATCH_CONFIG_DATA1_NO_CPU BIT(2)
#define MV88E6352_G2_SCRATCH_CONFIG_DATA2 0x72
#define MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK 0x3
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA3 0x73
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA3_S_SEL BIT(1)
#define MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO 0
#define MV88E6352_G2_SCRATCH_GPIO_PCTL_TRIG 1
@@ -370,6 +372,7 @@ extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops;
int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external);
+int mv88e6352_g2_scratch_port_has_serdes(struct mv88e6xxx_chip *chip, int port);
int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin);
int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats);
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
index eda710062933..a9d6e40321a2 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -289,3 +289,31 @@ int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g2_scratch_write(chip, misc_cfg, val);
}
+
+/**
+ * mv88e6352_g2_scratch_port_has_serdes - indicate if a port can have a serdes
+ * @chip: chip private data
+ * @port: port number to check for serdes
+ *
+ * Indicates whether the port may have a serdes attached according to the
+ * pin strapping. Returns negative error number, 0 if the port is not
+ * configured to have a serdes, and 1 if the port is configured to have a
+ * serdes attached.
+ */
+int mv88e6352_g2_scratch_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
+{
+ u8 config3, p;
+ int err;
+
+ err = mv88e6xxx_g2_scratch_read(chip, MV88E6352_G2_SCRATCH_CONFIG_DATA3,
+ &config3);
+ if (err)
+ return err;
+
+ if (config3 & MV88E6352_G2_SCRATCH_CONFIG_DATA3_S_SEL)
+ p = 5;
+ else
+ p = 4;
+
+ return port == p;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index 389f8a6ec0ab..331b4ca089ff 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -301,7 +301,7 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
shwt->hwtstamp = ns_to_ktime(ns);
status &= ~MV88E6XXX_PTP_TS_VALID;
}
- netif_rx_ni(skb);
+ netif_rx(skb);
}
}
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index ab41619a809b..795b3128768f 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -550,6 +550,9 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
mode = PHY_INTERFACE_MODE_1000BASEX;
switch (mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ cmode = MV88E6XXX_PORT_STS_CMODE_RMII;
+ break;
case PHY_INTERFACE_MODE_1000BASEX:
cmode = MV88E6XXX_PORT_STS_CMODE_1000BASEX;
break;
@@ -610,6 +613,8 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
chip->ports[port].cmode = cmode;
lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane == -ENODEV)
+ return 0;
if (lane < 0)
return lane;
@@ -1234,6 +1239,35 @@ int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
return err;
}
+int mv88e6xxx_port_set_lock(struct mv88e6xxx_chip *chip, int port,
+ bool locked)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_CTL0_SA_FILT_MASK;
+ if (locked)
+ reg |= MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_LOCK;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT;
+ if (locked)
+ reg |= MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, reg);
+}
+
int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
u16 mode)
{
@@ -1278,7 +1312,7 @@ int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, new);
}
-int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port)
+int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map)
{
u16 reg;
int err;
@@ -1287,7 +1321,10 @@ int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- reg |= MV88E6XXX_PORT_CTL2_MAP_DA;
+ if (map)
+ reg |= MV88E6XXX_PORT_CTL2_MAP_DA;
+ else
+ reg &= ~MV88E6XXX_PORT_CTL2_MAP_DA;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 03382b66f800..e0a705d82019 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -42,6 +42,11 @@
#define MV88E6XXX_PORT_STS_TX_PAUSED 0x0020
#define MV88E6XXX_PORT_STS_FLOW_CTL 0x0010
#define MV88E6XXX_PORT_STS_CMODE_MASK 0x000f
+#define MV88E6XXX_PORT_STS_CMODE_MII_PHY 0x0001
+#define MV88E6XXX_PORT_STS_CMODE_MII 0x0002
+#define MV88E6XXX_PORT_STS_CMODE_GMII 0x0003
+#define MV88E6XXX_PORT_STS_CMODE_RMII_PHY 0x0004
+#define MV88E6XXX_PORT_STS_CMODE_RMII 0x0005
#define MV88E6XXX_PORT_STS_CMODE_RGMII 0x0007
#define MV88E6XXX_PORT_STS_CMODE_100BASEX 0x0008
#define MV88E6XXX_PORT_STS_CMODE_1000BASEX 0x0009
@@ -142,7 +147,11 @@
/* Offset 0x04: Port Control Register */
#define MV88E6XXX_PORT_CTL0 0x04
#define MV88E6XXX_PORT_CTL0_USE_CORE_TAG 0x8000
-#define MV88E6XXX_PORT_CTL0_DROP_ON_LOCK 0x4000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_MASK 0xc000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DISABLED 0x0000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_LOCK 0x4000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_UNLOCK 0x8000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_CPU 0xc000
#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_MASK 0x3000
#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNMODIFIED 0x0000
#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNTAGGED 0x1000
@@ -365,6 +374,9 @@ int mv88e6xxx_port_set_fid(struct mv88e6xxx_chip *chip, int port, u16 fid);
int mv88e6xxx_port_get_pvid(struct mv88e6xxx_chip *chip, int port, u16 *pvid);
int mv88e6xxx_port_set_pvid(struct mv88e6xxx_chip *chip, int port, u16 pvid);
+int mv88e6xxx_port_set_lock(struct mv88e6xxx_chip *chip, int port,
+ bool locked);
+
int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
u16 mode);
int mv88e6095_port_tag_remap(struct mv88e6xxx_chip *chip, int port);
@@ -425,7 +437,7 @@ int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
bool drop_untagged);
-int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
+int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map);
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 2b05ead515cd..7b37d45bc9fb 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -272,14 +272,6 @@ int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
return lane;
}
-static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
-{
- if (mv88e6xxx_serdes_get_lane(chip, port) >= 0)
- return true;
-
- return false;
-}
-
struct mv88e6352_serdes_hw_stat {
char string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -293,20 +285,24 @@ static struct mv88e6352_serdes_hw_stat mv88e6352_serdes_hw_stats[] = {
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
{
- if (mv88e6352_port_has_serdes(chip, port))
- return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
+ int err;
- return 0;
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
+
+ return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
int port, uint8_t *data)
{
struct mv88e6352_serdes_hw_stat *stat;
- int i;
+ int err, i;
- if (!mv88e6352_port_has_serdes(chip, port))
- return 0;
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_hw_stats); i++) {
stat = &mv88e6352_serdes_hw_stats[i];
@@ -348,11 +344,12 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
{
struct mv88e6xxx_port *mv88e6xxx_port = &chip->ports[port];
struct mv88e6352_serdes_hw_stat *stat;
+ int i, err;
u64 value;
- int i;
- if (!mv88e6352_port_has_serdes(chip, port))
- return 0;
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
BUILD_BUG_ON(ARRAY_SIZE(mv88e6352_serdes_hw_stats) >
ARRAY_SIZE(mv88e6xxx_port->serdes_stats));
@@ -419,8 +416,13 @@ unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
int mv88e6352_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port)
{
- if (!mv88e6352_port_has_serdes(chip, port))
- return 0;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ mv88e6xxx_reg_unlock(chip);
+ if (err <= 0)
+ return err;
return 32 * sizeof(u16);
}
@@ -432,7 +434,8 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
int err;
int i;
- if (!mv88e6352_port_has_serdes(chip, port))
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
return;
for (i = 0 ; i < 32; i++) {
@@ -1310,6 +1313,44 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
}
}
+static const int mv88e6352_serdes_p2p_to_reg[] = {
+ /* Index of value in microvolts corresponds to the register value */
+ 14000, 112000, 210000, 308000, 406000, 504000, 602000, 700000,
+};
+
+int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
+ int val)
+{
+ bool found = false;
+ u16 ctrl, reg;
+ int err;
+ int i;
+
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_p2p_to_reg); ++i) {
+ if (mv88e6352_serdes_p2p_to_reg[i] == val) {
+ reg = i;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_SPEC_CTRL2, &ctrl);
+ if (err)
+ return err;
+
+ ctrl &= ~MV88E6352_SERDES_OUT_AMP_MASK;
+ ctrl |= reg;
+
+ return mv88e6352_serdes_write(chip, MV88E6352_SERDES_SPEC_CTRL2, ctrl);
+}
+
static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane,
bool on)
{
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index 8dd8ed225b45..29bb4e91e2f6 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -27,6 +27,8 @@
#define MV88E6352_SERDES_INT_FIBRE_ENERGY BIT(4)
#define MV88E6352_SERDES_INT_STATUS 0x13
+#define MV88E6352_SERDES_SPEC_CTRL2 0x1a
+#define MV88E6352_SERDES_OUT_AMP_MASK 0x0007
#define MV88E6341_PORT5_LANE 0x15
@@ -176,6 +178,9 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
+int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
+ int val);
+
/* Return the (first) SERDES lane address a port is using, -errno otherwise. */
static inline int mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
int port)
diff --git a/drivers/net/dsa/mv88e6xxx/smi.c b/drivers/net/dsa/mv88e6xxx/smi.c
index 282fe08db050..a990271b7482 100644
--- a/drivers/net/dsa/mv88e6xxx/smi.c
+++ b/drivers/net/dsa/mv88e6xxx/smi.c
@@ -55,11 +55,15 @@ static int mv88e6xxx_smi_direct_write(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip,
int dev, int reg, int bit, int val)
{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(50);
u16 data;
int err;
int i;
- for (i = 0; i < 16; i++) {
+ /* Even if the initial poll takes longer than 50ms, always do
+ * at least one more attempt.
+ */
+ for (i = 0; time_before(jiffies, timeout) || (i < 2); i++) {
err = mv88e6xxx_smi_direct_read(chip, dev, reg, &data);
if (err)
return err;
@@ -67,7 +71,10 @@ static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip,
if (!!(data & BIT(bit)) == !!val)
return 0;
- usleep_range(1000, 2000);
+ if (i < 2)
+ cpu_relax();
+ else
+ usleep_range(1000, 2000);
}
return -ETIMEDOUT;
@@ -104,11 +111,6 @@ static int mv88e6xxx_smi_indirect_read(struct mv88e6xxx_chip *chip,
{
int err;
- err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
- MV88E6XXX_SMI_CMD, 15, 0);
- if (err)
- return err;
-
err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
MV88E6XXX_SMI_CMD,
MV88E6XXX_SMI_CMD_BUSY |
@@ -132,11 +134,6 @@ static int mv88e6xxx_smi_indirect_write(struct mv88e6xxx_chip *chip,
{
int err;
- err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
- MV88E6XXX_SMI_CMD, 15, 0);
- if (err)
- return err;
-
err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
MV88E6XXX_SMI_DATA, data);
if (err)
@@ -155,9 +152,20 @@ static int mv88e6xxx_smi_indirect_write(struct mv88e6xxx_chip *chip,
MV88E6XXX_SMI_CMD, 15, 0);
}
+static int mv88e6xxx_smi_indirect_init(struct mv88e6xxx_chip *chip)
+{
+ /* Ensure that the chip starts out in the ready state. As both
+ * reads and writes always ensure this on return, they can
+ * safely depend on the chip not being busy on entry.
+ */
+ return mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD, 15, 0);
+}
+
static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_indirect_ops = {
.read = mv88e6xxx_smi_indirect_read,
.write = mv88e6xxx_smi_indirect_write,
+ .init = mv88e6xxx_smi_indirect_init,
};
int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
@@ -175,5 +183,8 @@ int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
chip->bus = bus;
chip->sw_addr = sw_addr;
+ if (chip->smi_ops->init)
+ return chip->smi_ops->init(chip);
+
return 0;
}
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 9957772201d5..413b0006e9a2 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -25,21 +25,151 @@
#include <net/dsa.h>
#include "felix.h"
-static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
- bool pvid, bool untagged)
+/* Translate the DSA database API into the ocelot switch library API,
+ * which uses VID 0 for all ports that aren't part of a bridge,
+ * and expects the bridge_dev to be NULL in that case.
+ */
+static struct net_device *felix_classify_db(struct dsa_db db)
+{
+ switch (db.type) {
+ case DSA_DB_PORT:
+ case DSA_DB_LAG:
+ return NULL;
+ case DSA_DB_BRIDGE:
+ return db.bridge.dev;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+}
+
+/* We are called before felix_npi_port_init(), so ocelot->npi is -1. */
+static int felix_migrate_fdbs_to_npi_port(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+ int cpu = ocelot->num_phys_ports;
+ int err;
+
+ err = ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev);
+ if (err)
+ return err;
+
+ return ocelot_fdb_add(ocelot, cpu, addr, vid, bridge_dev);
+}
+
+static int felix_migrate_mdbs_to_npi_port(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct switchdev_obj_port_mdb mdb;
+ struct ocelot *ocelot = ds->priv;
+ int cpu = ocelot->num_phys_ports;
+ int err;
+
+ memset(&mdb, 0, sizeof(mdb));
+ ether_addr_copy(mdb.addr, addr);
+ mdb.vid = vid;
+
+ err = ocelot_port_mdb_del(ocelot, port, &mdb, bridge_dev);
+ if (err)
+ return err;
+
+ return ocelot_port_mdb_add(ocelot, cpu, &mdb, bridge_dev);
+}
+
+static void felix_migrate_pgid_bit(struct dsa_switch *ds, int from, int to,
+ int pgid)
+{
+ struct ocelot *ocelot = ds->priv;
+ bool on;
+ u32 val;
+
+ val = ocelot_read_rix(ocelot, ANA_PGID_PGID, pgid);
+ on = !!(val & BIT(from));
+ val &= ~BIT(from);
+ if (on)
+ val |= BIT(to);
+ else
+ val &= ~BIT(to);
+
+ ocelot_write_rix(ocelot, val, ANA_PGID_PGID, pgid);
+}
+
+static void felix_migrate_flood_to_npi_port(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_UC);
+ felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_MC);
+ felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_BC);
+}
+
+static void
+felix_migrate_flood_to_tag_8021q_port(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_UC);
+ felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_MC);
+ felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_BC);
+}
+
+/* ocelot->npi was already set to -1 by felix_npi_port_deinit, so
+ * ocelot_fdb_add() will not redirect FDB entries towards the
+ * CPU port module here, which is what we want.
+ */
+static int
+felix_migrate_fdbs_to_tag_8021q_port(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+ int cpu = ocelot->num_phys_ports;
+ int err;
+
+ err = ocelot_fdb_del(ocelot, cpu, addr, vid, bridge_dev);
+ if (err)
+ return err;
+
+ return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev);
+}
+
+static int
+felix_migrate_mdbs_to_tag_8021q_port(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct switchdev_obj_port_mdb mdb;
+ struct ocelot *ocelot = ds->priv;
+ int cpu = ocelot->num_phys_ports;
+ int err;
+
+ memset(&mdb, 0, sizeof(mdb));
+ ether_addr_copy(mdb.addr, addr);
+ mdb.vid = vid;
+
+ err = ocelot_port_mdb_del(ocelot, cpu, &mdb, bridge_dev);
+ if (err)
+ return err;
+
+ return ocelot_port_mdb_add(ocelot, port, &mdb, bridge_dev);
+}
+
+/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
+ * the tagger can perform RX source port identification.
+ */
+static int felix_tag_8021q_vlan_add_rx(struct felix *felix, int port, u16 vid)
{
struct ocelot_vcap_filter *outer_tagging_rule;
struct ocelot *ocelot = &felix->ocelot;
struct dsa_switch *ds = felix->ds;
int key_length, upstream, err;
- /* We don't need to install the rxvlan into the other ports' filtering
- * tables, because we're just pushing the rxvlan when sending towards
- * the CPU
- */
- if (!pvid)
- return 0;
-
key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length;
upstream = dsa_upstream_port(ds, port);
@@ -50,7 +180,7 @@ static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
outer_tagging_rule->prio = 1;
- outer_tagging_rule->id.cookie = port;
+ outer_tagging_rule->id.cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port);
outer_tagging_rule->id.tc_offload = false;
outer_tagging_rule->block_id = VCAP_ES0;
outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -71,21 +201,32 @@ static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
return err;
}
-static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
- bool pvid, bool untagged)
+static int felix_tag_8021q_vlan_del_rx(struct felix *felix, int port, u16 vid)
+{
+ struct ocelot_vcap_filter *outer_tagging_rule;
+ struct ocelot_vcap_block *block_vcap_es0;
+ struct ocelot *ocelot = &felix->ocelot;
+
+ block_vcap_es0 = &ocelot->block[VCAP_ES0];
+
+ outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
+ port, false);
+ if (!outer_tagging_rule)
+ return -ENOENT;
+
+ return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
+}
+
+/* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2
+ * rules for steering those tagged packets towards the correct destination port
+ */
+static int felix_tag_8021q_vlan_add_tx(struct felix *felix, int port, u16 vid)
{
struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
struct ocelot *ocelot = &felix->ocelot;
struct dsa_switch *ds = felix->ds;
int upstream, err;
- /* tag_8021q.c assumes we are implementing this via port VLAN
- * membership, which we aren't. So we don't need to add any VCAP filter
- * for the CPU port.
- */
- if (ocelot->ports[port]->is_dsa_8021q_cpu)
- return 0;
-
untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
if (!untagging_rule)
return -ENOMEM;
@@ -103,7 +244,7 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
untagging_rule->vlan.vid.value = vid;
untagging_rule->vlan.vid.mask = VLAN_VID_MASK;
untagging_rule->prio = 1;
- untagging_rule->id.cookie = port;
+ untagging_rule->id.cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
untagging_rule->id.tc_offload = false;
untagging_rule->block_id = VCAP_IS1;
untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -124,7 +265,7 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
redirect_rule->ingress_port_mask = BIT(upstream);
redirect_rule->pag = port;
redirect_rule->prio = 1;
- redirect_rule->id.cookie = port;
+ redirect_rule->id.cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
redirect_rule->id.tc_offload = false;
redirect_rule->block_id = VCAP_IS2;
redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -142,49 +283,7 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
return 0;
}
-static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
- u16 flags)
-{
- bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
- struct ocelot *ocelot = ds->priv;
-
- if (vid_is_dsa_8021q_rxvlan(vid))
- return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot),
- port, vid, pvid, untagged);
-
- if (vid_is_dsa_8021q_txvlan(vid))
- return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot),
- port, vid, pvid, untagged);
-
- return 0;
-}
-
-static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid)
-{
- struct ocelot_vcap_filter *outer_tagging_rule;
- struct ocelot_vcap_block *block_vcap_es0;
- struct ocelot *ocelot = &felix->ocelot;
-
- block_vcap_es0 = &ocelot->block[VCAP_ES0];
-
- outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
- port, false);
- /* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid
- * installing outer tagging ES0 rules where they weren't needed.
- * But in rxvlan_del, the API doesn't give us the "flags" anymore,
- * so that forces us to be slightly sloppy here, and just assume that
- * if we didn't find an outer_tagging_rule it means that there was
- * none in the first place, i.e. rxvlan_del is called on a non-pvid
- * port. This is most probably true though.
- */
- if (!outer_tagging_rule)
- return 0;
-
- return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
-}
-
-static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid)
+static int felix_tag_8021q_vlan_del_tx(struct felix *felix, int port, u16 vid)
{
struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
struct ocelot_vcap_block *block_vcap_is1;
@@ -192,16 +291,13 @@ static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid)
struct ocelot *ocelot = &felix->ocelot;
int err;
- if (ocelot->ports[port]->is_dsa_8021q_cpu)
- return 0;
-
block_vcap_is1 = &ocelot->block[VCAP_IS1];
block_vcap_is2 = &ocelot->block[VCAP_IS2];
untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
port, false);
if (!untagging_rule)
- return 0;
+ return -ENOENT;
err = ocelot_vcap_filter_del(ocelot, untagging_rule);
if (err)
@@ -210,22 +306,54 @@ static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid)
redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
port, false);
if (!redirect_rule)
- return 0;
+ return -ENOENT;
return ocelot_vcap_filter_del(ocelot, redirect_rule);
}
+static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags)
+{
+ struct ocelot *ocelot = ds->priv;
+ int err;
+
+ /* tag_8021q.c assumes we are implementing this via port VLAN
+ * membership, which we aren't. So we don't need to add any VCAP filter
+ * for the CPU port.
+ */
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+ err = felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid);
+ if (err)
+ return err;
+
+ err = felix_tag_8021q_vlan_add_tx(ocelot_to_felix(ocelot), port, vid);
+ if (err) {
+ felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid);
+ return err;
+ }
+
+ return 0;
+}
+
static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
{
struct ocelot *ocelot = ds->priv;
+ int err;
- if (vid_is_dsa_8021q_rxvlan(vid))
- return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot),
- port, vid);
+ if (!dsa_is_user_port(ds, port))
+ return 0;
- if (vid_is_dsa_8021q_txvlan(vid))
- return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot),
- port, vid);
+ err = felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid);
+ if (err)
+ return err;
+
+ err = felix_tag_8021q_vlan_del_tx(ocelot_to_felix(ocelot), port, vid);
+ if (err) {
+ felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid);
+ return err;
+ }
return 0;
}
@@ -241,8 +369,7 @@ static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port)
{
mutex_lock(&ocelot->fwd_domain_lock);
- ocelot->ports[port]->is_dsa_8021q_cpu = true;
- ocelot->npi = -1;
+ ocelot_port_set_dsa_8021q_cpu(ocelot, port);
/* Overwrite PGID_CPU with the non-tagging port */
ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU);
@@ -256,7 +383,7 @@ static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
{
mutex_lock(&ocelot->fwd_domain_lock);
- ocelot->ports[port]->is_dsa_8021q_cpu = false;
+ ocelot_port_unset_dsa_8021q_cpu(ocelot, port);
/* Restore PGID_CPU */
ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID,
@@ -267,148 +394,81 @@ static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
mutex_unlock(&ocelot->fwd_domain_lock);
}
-/* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module.
- * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the
- * tag_8021q CPU port.
+/* On switches with no extraction IRQ wired, trapped packets need to be
+ * replicated over Ethernet as well, otherwise we'd get no notification of
+ * their arrival when using the ocelot-8021q tagging protocol.
*/
-static int felix_setup_mmio_filtering(struct felix *felix)
+static int felix_update_trapping_destinations(struct dsa_switch *ds,
+ bool using_tag_8021q)
{
- unsigned long user_ports = dsa_user_ports(felix->ds);
- struct ocelot_vcap_filter *redirect_rule;
- struct ocelot_vcap_filter *tagging_rule;
- struct ocelot *ocelot = &felix->ocelot;
- struct dsa_switch *ds = felix->ds;
- int cpu = -1, port, ret;
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct ocelot_vcap_filter *trap;
+ enum ocelot_mask_mode mask_mode;
+ unsigned long port_mask;
+ struct dsa_port *dp;
+ bool cpu_copy_ena;
+ int cpu = -1, err;
- tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
- if (!tagging_rule)
- return -ENOMEM;
+ if (!felix->info->quirk_no_xtr_irq)
+ return 0;
- redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
- if (!redirect_rule) {
- kfree(tagging_rule);
- return -ENOMEM;
+ /* Figure out the current CPU port */
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ cpu = dp->index;
+ break;
}
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_cpu_port(ds, port)) {
- cpu = port;
- break;
- }
- }
+ /* We are sure that "cpu" was found, otherwise
+ * dsa_tree_setup_default_cpu() would have failed earlier.
+ */
- if (cpu < 0) {
- kfree(tagging_rule);
- kfree(redirect_rule);
- return -EINVAL;
- }
+ /* Make sure all traps are set up for that destination */
+ list_for_each_entry(trap, &ocelot->traps, trap_list) {
+ /* Figure out the current trapping destination */
+ if (using_tag_8021q) {
+ /* Redirect to the tag_8021q CPU port. If timestamps
+ * are necessary, also copy trapped packets to the CPU
+ * port module.
+ */
+ mask_mode = OCELOT_MASK_MODE_REDIRECT;
+ port_mask = BIT(cpu);
+ cpu_copy_ena = !!trap->take_ts;
+ } else {
+ /* Trap packets only to the CPU port module, which is
+ * redirected to the NPI port (the DSA CPU port)
+ */
+ mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ port_mask = 0;
+ cpu_copy_ena = true;
+ }
- tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
- *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
- *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff);
- tagging_rule->ingress_port_mask = user_ports;
- tagging_rule->prio = 1;
- tagging_rule->id.cookie = ocelot->num_phys_ports;
- tagging_rule->id.tc_offload = false;
- tagging_rule->block_id = VCAP_IS1;
- tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
- tagging_rule->lookup = 0;
- tagging_rule->action.pag_override_mask = 0xff;
- tagging_rule->action.pag_val = ocelot->num_phys_ports;
-
- ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL);
- if (ret) {
- kfree(tagging_rule);
- kfree(redirect_rule);
- return ret;
- }
+ if (trap->action.mask_mode == mask_mode &&
+ trap->action.port_mask == port_mask &&
+ trap->action.cpu_copy_ena == cpu_copy_ena)
+ continue;
- redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
- redirect_rule->ingress_port_mask = user_ports;
- redirect_rule->pag = ocelot->num_phys_ports;
- redirect_rule->prio = 1;
- redirect_rule->id.cookie = ocelot->num_phys_ports;
- redirect_rule->id.tc_offload = false;
- redirect_rule->block_id = VCAP_IS2;
- redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
- redirect_rule->lookup = 0;
- redirect_rule->action.cpu_copy_ena = true;
- if (felix->info->quirk_no_xtr_irq) {
- /* Redirect to the tag_8021q CPU but also copy PTP packets to
- * the CPU port module
- */
- redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
- redirect_rule->action.port_mask = BIT(cpu);
- } else {
- /* Trap PTP packets only to the CPU port module (which is
- * redirected to the NPI port)
- */
- redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
- redirect_rule->action.port_mask = 0;
- }
+ trap->action.mask_mode = mask_mode;
+ trap->action.port_mask = port_mask;
+ trap->action.cpu_copy_ena = cpu_copy_ena;
- ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
- if (ret) {
- ocelot_vcap_filter_del(ocelot, tagging_rule);
- kfree(redirect_rule);
- return ret;
+ err = ocelot_vcap_filter_replace(ocelot, trap);
+ if (err)
+ return err;
}
- /* The ownership of the CPU port module's queues might have just been
- * transferred to the tag_8021q tagger from the NPI-based tagger.
- * So there might still be all sorts of crap in the queues. On the
- * other hand, the MMIO-based matching of PTP frames is very brittle,
- * so we need to be careful that there are no extra frames to be
- * dequeued over MMIO, since we would never know to discard them.
- */
- ocelot_drain_cpu_queue(ocelot, 0);
-
return 0;
}
-static int felix_teardown_mmio_filtering(struct felix *felix)
-{
- struct ocelot_vcap_filter *tagging_rule, *redirect_rule;
- struct ocelot_vcap_block *block_vcap_is1;
- struct ocelot_vcap_block *block_vcap_is2;
- struct ocelot *ocelot = &felix->ocelot;
- int err;
-
- block_vcap_is1 = &ocelot->block[VCAP_IS1];
- block_vcap_is2 = &ocelot->block[VCAP_IS2];
-
- tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
- ocelot->num_phys_ports,
- false);
- if (!tagging_rule)
- return -ENOENT;
-
- err = ocelot_vcap_filter_del(ocelot, tagging_rule);
- if (err)
- return err;
-
- redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
- ocelot->num_phys_ports,
- false);
- if (!redirect_rule)
- return -ENOENT;
-
- return ocelot_vcap_filter_del(ocelot, redirect_rule);
-}
-
static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
{
struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- unsigned long cpu_flood;
- int port, err;
+ struct dsa_port *dp;
+ int err;
felix_8021q_cpu_port_init(ocelot, cpu);
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
+ dsa_switch_for_each_available_port(dp, ds) {
/* This overwrites ocelot_init():
* Do not forward BPDU frames to the CPU port module,
* for 2 reasons:
@@ -421,28 +481,43 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
- ANA_PORT_CPU_FWD_BPDU_CFG, port);
+ ANA_PORT_CPU_FWD_BPDU_CFG, dp->index);
}
- /* In tag_8021q mode, the CPU port module is unused, except for PTP
- * frames. So we want to disable flooding of any kind to the CPU port
- * module, since packets going there will end in a black hole.
- */
- cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC);
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC);
-
err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
if (err)
return err;
- err = felix_setup_mmio_filtering(felix);
+ err = dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_tag_8021q_port);
if (err)
goto out_tag_8021q_unregister;
+ err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_tag_8021q_port);
+ if (err)
+ goto out_migrate_fdbs;
+
+ felix_migrate_flood_to_tag_8021q_port(ds, cpu);
+
+ err = felix_update_trapping_destinations(ds, true);
+ if (err)
+ goto out_migrate_flood;
+
+ /* The ownership of the CPU port module's queues might have just been
+ * transferred to the tag_8021q tagger from the NPI-based tagger.
+ * So there might still be all sorts of crap in the queues. On the
+ * other hand, the MMIO-based matching of PTP frames is very brittle,
+ * so we need to be careful that there are no extra frames to be
+ * dequeued over MMIO, since we would never know to discard them.
+ */
+ ocelot_drain_cpu_queue(ocelot, 0);
+
return 0;
+out_migrate_flood:
+ felix_migrate_flood_to_npi_port(ds, cpu);
+ dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port);
+out_migrate_fdbs:
+ dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_npi_port);
out_tag_8021q_unregister:
dsa_tag_8021q_unregister(ds);
return err;
@@ -451,27 +526,24 @@ out_tag_8021q_unregister:
static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu)
{
struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- int err, port;
+ struct dsa_port *dp;
+ int err;
- err = felix_teardown_mmio_filtering(felix);
+ err = felix_update_trapping_destinations(ds, false);
if (err)
dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d",
err);
dsa_tag_8021q_unregister(ds);
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
+ dsa_switch_for_each_available_port(dp, ds) {
/* Restore the logic from ocelot_init:
* do not forward BPDU frames to the front ports.
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
ANA_PORT_CPU_FWD_BPDU_CFG,
- port);
+ dp->index);
}
felix_8021q_cpu_port_deinit(ocelot, cpu);
@@ -523,27 +595,26 @@ static void felix_npi_port_deinit(struct ocelot *ocelot, int port)
static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu)
{
struct ocelot *ocelot = ds->priv;
- unsigned long cpu_flood;
+ int err;
- felix_npi_port_init(ocelot, cpu);
+ err = dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_npi_port);
+ if (err)
+ return err;
- /* Include the CPU port module (and indirectly, the NPI port)
- * in the forwarding mask for unknown unicast - the hardware
- * default value for ANA_FLOODING_FLD_UNICAST excludes
- * BIT(ocelot->num_phys_ports), and so does ocelot_init,
- * since Ocelot relies on whitelisting MAC addresses towards
- * PGID_CPU.
- * We do this because DSA does not yet perform RX filtering,
- * and the NPI port does not perform source address learning,
- * so traffic sent to Linux is effectively unknown from the
- * switch's perspective.
- */
- cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC);
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC);
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC);
+ err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port);
+ if (err)
+ goto out_migrate_fdbs;
+
+ felix_migrate_flood_to_npi_port(ds, cpu);
+
+ felix_npi_port_init(ocelot, cpu);
return 0;
+
+out_migrate_fdbs:
+ dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_tag_8021q_port);
+
+ return err;
}
static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu)
@@ -659,35 +730,97 @@ static int felix_fdb_dump(struct dsa_switch *ds, int port,
}
static int felix_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_fdb_add(ocelot, port, addr, vid);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
+ return 0;
+
+ return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev);
}
static int felix_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_fdb_del(ocelot, port, addr, vid);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
+ return 0;
+
+ return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev);
+}
+
+static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid, bridge_dev);
+}
+
+static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid, bridge_dev);
}
static int felix_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_port_mdb_add(ocelot, port, mdb);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_mdb_present_in_other_db(ds, port, mdb, db))
+ return 0;
+
+ return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev);
}
static int felix_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_port_mdb_del(ocelot, port, mdb);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_mdb_present_in_other_db(ds, port, mdb, db))
+ return 0;
+
+ return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev);
}
static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port,
@@ -719,13 +852,13 @@ static int felix_bridge_flags(struct dsa_switch *ds, int port,
}
static int felix_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge, bool *tx_fwd_offload)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_bridge_join(ocelot, port, bridge.dev);
-
- return 0;
+ return ocelot_port_bridge_join(ocelot, port, bridge.dev, bridge.num,
+ extack);
}
static void felix_bridge_leave(struct dsa_switch *ds, int port,
@@ -737,20 +870,20 @@ static void felix_bridge_leave(struct dsa_switch *ds, int port,
}
static int felix_lag_join(struct dsa_switch *ds, int port,
- struct net_device *bond,
+ struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct ocelot *ocelot = ds->priv;
- return ocelot_port_lag_join(ocelot, port, bond, info);
+ return ocelot_port_lag_join(ocelot, port, lag.dev, info);
}
static int felix_lag_leave(struct dsa_switch *ds, int port,
- struct net_device *bond)
+ struct dsa_lag lag)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_lag_leave(ocelot, port, bond);
+ ocelot_port_lag_leave(ocelot, port, lag.dev);
return 0;
}
@@ -822,6 +955,21 @@ static int felix_vlan_del(struct dsa_switch *ds, int port,
return ocelot_vlan_del(ocelot, port, vlan->vid);
}
+static void felix_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
+
+ __set_bit(ocelot->ports[port]->phy_mode,
+ config->supported_interfaces);
+}
+
static void felix_phylink_validate(struct dsa_switch *ds, int port,
unsigned long *supported,
struct phylink_link_state *state)
@@ -833,16 +981,18 @@ static void felix_phylink_validate(struct dsa_switch *ds, int port,
felix->info->phylink_validate(ocelot, port, supported, state);
}
-static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int link_an_mode,
- const struct phylink_link_state *state)
+static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds,
+ int port,
+ phy_interface_t iface)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- struct dsa_port *dp = dsa_to_port(ds, port);
+ struct phylink_pcs *pcs = NULL;
if (felix->pcs && felix->pcs[port])
- phylink_set_pcs(dp->pl, felix->pcs[port]);
+ pcs = felix->pcs[port];
+
+ return pcs;
}
static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
@@ -924,11 +1074,28 @@ static int felix_get_ts_info(struct dsa_switch *ds, int port,
return ocelot_get_ts_info(ocelot, port, info);
}
+static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = {
+ [PHY_INTERFACE_MODE_INTERNAL] = OCELOT_PORT_MODE_INTERNAL,
+ [PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII,
+ [PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII,
+ [PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII,
+ [PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX,
+};
+
+static int felix_validate_phy_mode(struct felix *felix, int port,
+ phy_interface_t phy_mode)
+{
+ u32 modes = felix->info->port_modes[port];
+
+ if (felix_phy_match_table[phy_mode] & modes)
+ return 0;
+ return -EOPNOTSUPP;
+}
+
static int felix_parse_ports_node(struct felix *felix,
struct device_node *ports_node,
phy_interface_t *port_phy_modes)
{
- struct ocelot *ocelot = &felix->ocelot;
struct device *dev = felix->ocelot.dev;
struct device_node *child;
@@ -955,7 +1122,7 @@ static int felix_parse_ports_node(struct felix *felix,
return -ENODEV;
}
- err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode);
+ err = felix_validate_phy_mode(felix, port, phy_mode);
if (err < 0) {
dev_err(dev, "Unsupported PHY mode %s on port %d\n",
phy_modes(phy_mode), port);
@@ -1192,7 +1359,9 @@ static int felix_setup(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- int port, err;
+ unsigned long cpu_flood;
+ struct dsa_port *dp;
+ int err;
err = felix_init_structs(felix, ds->num_ports);
if (err)
@@ -1211,45 +1380,45 @@ static int felix_setup(struct dsa_switch *ds)
}
}
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
- ocelot_init_port(ocelot, port);
+ dsa_switch_for_each_available_port(dp, ds) {
+ ocelot_init_port(ocelot, dp->index);
/* Set the default QoS Classification based on PCP and DEI
* bits of vlan tag.
*/
- felix_port_qos_map_init(ocelot, port);
+ felix_port_qos_map_init(ocelot, dp->index);
}
err = ocelot_devlink_sb_register(ocelot);
if (err)
goto out_deinit_ports;
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
-
+ dsa_switch_for_each_cpu_port(dp, ds) {
/* The initial tag protocol is NPI which always returns 0, so
* there's no real point in checking for errors.
*/
- felix_set_tag_protocol(ds, port, felix->tag_proto);
+ felix_set_tag_protocol(ds, dp->index, felix->tag_proto);
+
+ /* Start off with flooding disabled towards the NPI port
+ * (actually CPU port module).
+ */
+ cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
+ ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC);
+ ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
+
break;
}
ds->mtu_enforcement_ingress = true;
ds->assisted_learning_on_cpu_port = true;
+ ds->fdb_isolation = true;
+ ds->max_num_bridges = ds->num_ports;
return 0;
out_deinit_ports:
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
- ocelot_deinit_port(ocelot, port);
- }
+ dsa_switch_for_each_available_port(dp, ds)
+ ocelot_deinit_port(ocelot, dp->index);
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
@@ -1265,22 +1434,15 @@ static void felix_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- int port;
-
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
+ struct dsa_port *dp;
- felix_del_tag_protocol(ds, port, felix->tag_proto);
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ felix_del_tag_protocol(ds, dp->index, felix->tag_proto);
break;
}
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
- ocelot_deinit_port(ocelot, port);
- }
+ dsa_switch_for_each_available_port(dp, ds)
+ ocelot_deinit_port(ocelot, dp->index);
ocelot_devlink_sb_unregister(ocelot);
ocelot_deinit_timestamp(ocelot);
@@ -1302,14 +1464,23 @@ static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
struct ifreq *ifr)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ bool using_tag_8021q;
+ int err;
- return ocelot_hwstamp_set(ocelot, port, ifr);
+ err = ocelot_hwstamp_set(ocelot, port, ifr);
+ if (err)
+ return err;
+
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
+
+ return felix_update_trapping_destinations(ds, using_tag_8021q);
}
-static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
+static bool felix_check_xtr_pkt(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
- int err, grp = 0;
+ int err = 0, grp = 0;
if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q)
return false;
@@ -1317,9 +1488,6 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
if (!felix->info->quirk_no_xtr_irq)
return false;
- if (ptp_type == PTP_CLASS_NONE)
- return false;
-
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
unsigned int type;
@@ -1349,8 +1517,12 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
}
out:
- if (err < 0)
+ if (err < 0) {
+ dev_err_ratelimited(ocelot->dev,
+ "Error during packet extraction: %pe\n",
+ ERR_PTR(err));
ocelot_drain_cpu_queue(ocelot, 0);
+ }
return true;
}
@@ -1370,7 +1542,7 @@ static bool felix_rxtstamp(struct dsa_switch *ds, int port,
* MMIO in the CPU port module, and inject that into the stack from
* ocelot_xtr_poll().
*/
- if (felix_check_xtr_pkt(ocelot, type)) {
+ if (felix_check_xtr_pkt(ocelot)) {
kfree_skb(skb);
return true;
}
@@ -1430,8 +1602,17 @@ static int felix_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ bool using_tag_8021q;
+ int err;
+
+ err = ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+ if (err)
+ return err;
+
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
- return ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+ return felix_update_trapping_destinations(ds, using_tag_8021q);
}
static int felix_cls_flower_del(struct dsa_switch *ds, int port,
@@ -1469,6 +1650,24 @@ static void felix_port_policer_del(struct dsa_switch *ds, int port)
ocelot_port_policer_del(ocelot, port);
}
+static int felix_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_mirror_add(ocelot, port, mirror->to_local_port,
+ ingress, extack);
+}
+
+static void felix_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_mirror_del(ocelot, port, mirror->ingress);
+}
+
static int felix_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type,
void *type_data)
@@ -1618,6 +1817,44 @@ felix_mrp_del_ring_role(struct dsa_switch *ds, int port,
return ocelot_mrp_del_ring_role(ocelot, port, mrp);
}
+static int felix_port_get_default_prio(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_default_prio(ocelot, port);
+}
+
+static int felix_port_set_default_prio(struct dsa_switch *ds, int port,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_set_default_prio(ocelot, port, prio);
+}
+
+static int felix_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_dscp_prio(ocelot, port, dscp);
+}
+
+static int felix_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_add_dscp_prio(ocelot, port, dscp, prio);
+}
+
+static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio);
+}
+
const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.change_tag_protocol = felix_change_tag_protocol,
@@ -1629,14 +1866,17 @@ const struct dsa_switch_ops felix_switch_ops = {
.get_ethtool_stats = felix_get_ethtool_stats,
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
+ .phylink_get_caps = felix_phylink_get_caps,
.phylink_validate = felix_phylink_validate,
- .phylink_mac_config = felix_phylink_mac_config,
+ .phylink_mac_select_pcs = felix_phylink_mac_select_pcs,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
.port_fast_age = felix_port_fast_age,
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
.port_fdb_del = felix_fdb_del,
+ .lag_fdb_add = felix_lag_fdb_add,
+ .lag_fdb_del = felix_lag_fdb_del,
.port_mdb_add = felix_mdb_add,
.port_mdb_del = felix_mdb_del,
.port_pre_bridge_flags = felix_pre_bridge_flags,
@@ -1658,6 +1898,8 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_max_mtu = felix_get_max_mtu,
.port_policer_add = felix_port_policer_add,
.port_policer_del = felix_port_policer_del,
+ .port_mirror_add = felix_port_mirror_add,
+ .port_mirror_del = felix_port_mirror_del,
.cls_flower_add = felix_cls_flower_add,
.cls_flower_del = felix_cls_flower_del,
.cls_flower_stats = felix_cls_flower_stats,
@@ -1678,6 +1920,11 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_mrp_del_ring_role = felix_mrp_del_ring_role,
.tag_8021q_vlan_add = felix_tag_8021q_vlan_add,
.tag_8021q_vlan_del = felix_tag_8021q_vlan_del,
+ .port_get_default_prio = felix_port_get_default_prio,
+ .port_set_default_prio = felix_port_set_default_prio,
+ .port_get_dscp_prio = felix_port_get_dscp_prio,
+ .port_add_dscp_prio = felix_port_add_dscp_prio,
+ .port_del_dscp_prio = felix_port_del_dscp_prio,
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 9395ac119d33..f083b06fdfe9 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -7,6 +7,12 @@
#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
#define FELIX_MAC_QUIRKS OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION
+#define OCELOT_PORT_MODE_INTERNAL BIT(0)
+#define OCELOT_PORT_MODE_SGMII BIT(1)
+#define OCELOT_PORT_MODE_QSGMII BIT(2)
+#define OCELOT_PORT_MODE_2500BASEX BIT(3)
+#define OCELOT_PORT_MODE_USXGMII BIT(4)
+
/* Platform-specific information */
struct felix_info {
const struct resource *target_io_res;
@@ -15,6 +21,7 @@ struct felix_info {
const struct reg_field *regfields;
const u32 *const *map;
const struct ocelot_ops *ops;
+ const u32 *port_modes;
int num_mact_rows;
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
@@ -44,8 +51,6 @@ struct felix_info {
void (*phylink_validate)(struct ocelot *ocelot, int port,
unsigned long *supported,
struct phylink_link_state *state);
- int (*prevalidate_phy_mode)(struct ocelot *ocelot, int port,
- phy_interface_t phy_mode);
int (*port_setup_tc)(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data);
void (*port_sched_speed_set)(struct ocelot *ocelot, int port,
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 33f0ceae381d..62d52e0874e9 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -18,12 +18,28 @@
#include <linux/pci.h>
#include "felix.h"
+#define VSC9959_NUM_PORTS 6
+
#define VSC9959_TAS_GCL_ENTRY_MAX 63
#define VSC9959_VCAP_POLICER_BASE 63
#define VSC9959_VCAP_POLICER_MAX 383
#define VSC9959_SWITCH_PCI_BAR 4
#define VSC9959_IMDIO_PCI_BAR 0
+#define VSC9959_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \
+ OCELOT_PORT_MODE_QSGMII | \
+ OCELOT_PORT_MODE_2500BASEX | \
+ OCELOT_PORT_MODE_USXGMII)
+
+static const u32 vsc9959_port_modes[VSC9959_NUM_PORTS] = {
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+};
+
static const u32 vsc9959_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x0089a0),
REG(ANA_VLANMASK, 0x0089a4),
@@ -944,15 +960,8 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
unsigned long *supported,
struct phylink_link_state *state)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != ocelot_port->phy_mode) {
- linkmode_zero(supported);
- return;
- }
-
phylink_set_port_modes(mask);
phylink_set(mask, Autoneg);
phylink_set(mask, Pause);
@@ -975,27 +984,6 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
linkmode_and(state->advertising, state->advertising, mask);
}
-static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port,
- phy_interface_t phy_mode)
-{
- switch (phy_mode) {
- case PHY_INTERFACE_MODE_INTERNAL:
- if (port != 4 && port != 5)
- return -ENOTSUPP;
- return 0;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_USXGMII:
- case PHY_INTERFACE_MODE_2500BASEX:
- /* Not supported on internal to-CPU ports */
- if (port == 4 || port == 5)
- return -ENOTSUPP;
- return 0;
- default:
- return -ENOTSUPP;
- }
-}
-
/* Watermark encode
* Bit 8: Unit; 0:1, 1:16
* Bit 7-0: Value to be multiplied with unit
@@ -2231,14 +2219,14 @@ static const struct felix_info felix_info_vsc9959 = {
.vcap_pol_base2 = 0,
.vcap_pol_max2 = 0,
.num_mact_rows = 2048,
- .num_ports = 6,
+ .num_ports = VSC9959_NUM_PORTS,
.num_tx_queues = OCELOT_NUM_TC,
.quirk_no_xtr_irq = true,
.ptp_caps = &vsc9959_ptp_caps,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
.mdio_bus_free = vsc9959_mdio_bus_free,
.phylink_validate = vsc9959_phylink_validate,
- .prevalidate_phy_mode = vsc9959_prevalidate_phy_mode,
+ .port_modes = vsc9959_port_modes,
.port_setup_tc = vsc9959_port_setup_tc,
.port_sched_speed_set = vsc9959_sched_speed_set,
.init_regmap = ocelot_regmap_init,
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index f2f1608a476c..68ef8f111bbe 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -14,11 +14,29 @@
#include <linux/iopoll.h>
#include "felix.h"
+#define VSC9953_NUM_PORTS 10
+
#define VSC9953_VCAP_POLICER_BASE 11
#define VSC9953_VCAP_POLICER_MAX 31
#define VSC9953_VCAP_POLICER_BASE2 120
#define VSC9953_VCAP_POLICER_MAX2 161
+#define VSC9953_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \
+ OCELOT_PORT_MODE_QSGMII)
+
+static const u32 vsc9953_port_modes[VSC9953_NUM_PORTS] = {
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+};
+
static const u32 vsc9953_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x00b500),
REG(ANA_VLANMASK, 0x00b504),
@@ -917,15 +935,8 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
unsigned long *supported,
struct phylink_link_state *state)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != ocelot_port->phy_mode) {
- linkmode_zero(supported);
- return;
- }
-
phylink_set_port_modes(mask);
phylink_set(mask, Autoneg);
phylink_set(mask, Pause);
@@ -945,25 +956,6 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
linkmode_and(state->advertising, state->advertising, mask);
}
-static int vsc9953_prevalidate_phy_mode(struct ocelot *ocelot, int port,
- phy_interface_t phy_mode)
-{
- switch (phy_mode) {
- case PHY_INTERFACE_MODE_INTERNAL:
- if (port != 8 && port != 9)
- return -ENOTSUPP;
- return 0;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- /* Not supported on internal to-CPU ports */
- if (port == 8 || port == 9)
- return -ENOTSUPP;
- return 0;
- default:
- return -ENOTSUPP;
- }
-}
-
/* Watermark encode
* Bit 9: Unit; 0:1, 1:16
* Bit 8-0: Value to be multiplied with unit
@@ -1101,12 +1093,12 @@ static const struct felix_info seville_info_vsc9953 = {
.vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2,
.vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2,
.num_mact_rows = 2048,
- .num_ports = 10,
+ .num_ports = VSC9953_NUM_PORTS,
.num_tx_queues = OCELOT_NUM_TC,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
.mdio_bus_free = vsc9953_mdio_bus_free,
.phylink_validate = vsc9953_phylink_validate,
- .prevalidate_phy_mode = vsc9953_prevalidate_phy_mode,
+ .port_modes = vsc9953_port_modes,
.init_regmap = ocelot_regmap_init,
};
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index c39de2a4c1fe..e5098cfe44bc 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -499,52 +499,27 @@ static enum dsa_tag_protocol ar9331_sw_get_tag_protocol(struct dsa_switch *ds,
return DSA_TAG_PROTO_AR9331;
}
-static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void ar9331_sw_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
switch (port) {
case 0:
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- goto unsupported;
-
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_1000;
break;
case 1:
case 2:
case 3:
case 4:
case 5:
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
break;
- default:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported port: %i\n", port);
- return;
}
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-
-unsupported:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported interface: %d, port: %d\n",
- state->interface, port);
}
static void ar9331_sw_phylink_mac_config(struct dsa_switch *ds, int port,
@@ -697,7 +672,7 @@ static const struct dsa_switch_ops ar9331_sw_ops = {
.get_tag_protocol = ar9331_sw_get_tag_protocol,
.setup = ar9331_sw_setup,
.port_disable = ar9331_sw_port_disable,
- .phylink_validate = ar9331_sw_phylink_validate,
+ .phylink_get_caps = ar9331_sw_phylink_get_caps,
.phylink_mac_config = ar9331_sw_phylink_mac_config,
.phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
.phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 039694518788..d3ed0a7f8077 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -20,6 +20,7 @@
#include <linux/phylink.h>
#include <linux/gpio/consumer.h>
#include <linux/etherdevice.h>
+#include <linux/dsa/tag_qca.h>
#include "qca8k.h"
@@ -74,12 +75,6 @@ static const struct qca8k_mib_desc ar8327_mib[] = {
MIB_DESC(1, 0xac, "TXUnicast"),
};
-/* The 32bit switch registers are accessed indirectly. To achieve this we need
- * to set the page of the register. Track the last page that was set to reduce
- * mdio writes
- */
-static u16 qca8k_current_page = 0xffff;
-
static void
qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
{
@@ -94,6 +89,44 @@ qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
}
static int
+qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
+{
+ u16 *cached_lo = &priv->mdio_cache.lo;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ if (lo == *cached_lo)
+ return 0;
+
+ ret = bus->write(bus, phy_id, regnum, lo);
+ if (ret < 0)
+ dev_err_ratelimited(&bus->dev,
+ "failed to write qca8k 32bit lo register\n");
+
+ *cached_lo = lo;
+ return 0;
+}
+
+static int
+qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
+{
+ u16 *cached_hi = &priv->mdio_cache.hi;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ if (hi == *cached_hi)
+ return 0;
+
+ ret = bus->write(bus, phy_id, regnum, hi);
+ if (ret < 0)
+ dev_err_ratelimited(&bus->dev,
+ "failed to write qca8k 32bit hi register\n");
+
+ *cached_hi = hi;
+ return 0;
+}
+
+static int
qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
{
int ret;
@@ -116,7 +149,7 @@ qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
}
static void
-qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
+qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
{
u16 lo, hi;
int ret;
@@ -124,20 +157,19 @@ qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
lo = val & 0xffff;
hi = (u16)(val >> 16);
- ret = bus->write(bus, phy_id, regnum, lo);
+ ret = qca8k_set_lo(priv, phy_id, regnum, lo);
if (ret >= 0)
- ret = bus->write(bus, phy_id, regnum + 1, hi);
- if (ret < 0)
- dev_err_ratelimited(&bus->dev,
- "failed to write qca8k 32bit register\n");
+ ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
}
static int
-qca8k_set_page(struct mii_bus *bus, u16 page)
+qca8k_set_page(struct qca8k_priv *priv, u16 page)
{
+ u16 *cached_page = &priv->mdio_cache.page;
+ struct mii_bus *bus = priv->bus;
int ret;
- if (page == qca8k_current_page)
+ if (page == *cached_page)
return 0;
ret = bus->write(bus, 0x18, 0, page);
@@ -147,7 +179,7 @@ qca8k_set_page(struct mii_bus *bus, u16 page)
return ret;
}
- qca8k_current_page = page;
+ *cached_page = page;
usleep_range(1000, 2000);
return 0;
}
@@ -170,6 +202,252 @@ qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
return regmap_update_bits(priv->regmap, reg, mask, write_val);
}
+static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ u8 len, cmd;
+
+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
+ mgmt_eth_data = &priv->mgmt_eth_data;
+
+ cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
+ len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
+
+ /* Make sure the seq match the requested packet */
+ if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
+ mgmt_eth_data->ack = true;
+
+ if (cmd == MDIO_READ) {
+ mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
+
+ /* Get the rest of the 12 byte of data.
+ * The read/write function will extract the requested data.
+ */
+ if (len > QCA_HDR_MGMT_DATA1_LEN)
+ memcpy(mgmt_eth_data->data + 1, skb->data,
+ QCA_HDR_MGMT_DATA2_LEN);
+ }
+
+ complete(&mgmt_eth_data->rw_done);
+}
+
+static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
+ int priority, unsigned int len)
+{
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ unsigned int real_len;
+ struct sk_buff *skb;
+ u32 *data2;
+ u16 hdr;
+
+ skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
+ if (!skb)
+ return NULL;
+
+ /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
+ * Actually for some reason the steps are:
+ * 0: nothing
+ * 1-4: first 4 byte
+ * 5-6: first 12 byte
+ * 7-15: all 16 byte
+ */
+ if (len == 16)
+ real_len = 15;
+ else
+ real_len = len;
+
+ skb_reset_mac_header(skb);
+ skb_set_network_header(skb, skb->len);
+
+ mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
+
+ hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
+ hdr |= QCA_HDR_XMIT_FROM_CPU;
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
+
+ mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
+ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
+ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
+ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
+ QCA_HDR_MGMT_CHECK_CODE_VAL);
+
+ if (cmd == MDIO_WRITE)
+ mgmt_ethhdr->mdio_data = *val;
+
+ mgmt_ethhdr->hdr = htons(hdr);
+
+ data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
+ if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
+ memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
+
+ return skb;
+}
+
+static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
+{
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+
+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
+ mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
+}
+
+static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+ struct sk_buff *skb;
+ bool ack;
+ int ret;
+
+ skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
+ QCA8K_ETHERNET_MDIO_PRIORITY, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check mgmt_master if is operational */
+ if (!priv->mgmt_master) {
+ kfree_skb(skb);
+ mutex_unlock(&mgmt_eth_data->mutex);
+ return -EINVAL;
+ }
+
+ skb->dev = priv->mgmt_master;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the mdio pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+
+ *val = mgmt_eth_data->data[0];
+ if (len > QCA_HDR_MGMT_DATA1_LEN)
+ memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
+
+ ack = mgmt_eth_data->ack;
+
+ mutex_unlock(&mgmt_eth_data->mutex);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ if (!ack)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+ struct sk_buff *skb;
+ bool ack;
+ int ret;
+
+ skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
+ QCA8K_ETHERNET_MDIO_PRIORITY, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check mgmt_master if is operational */
+ if (!priv->mgmt_master) {
+ kfree_skb(skb);
+ mutex_unlock(&mgmt_eth_data->mutex);
+ return -EINVAL;
+ }
+
+ skb->dev = priv->mgmt_master;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the mdio pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+
+ ack = mgmt_eth_data->ack;
+
+ mutex_unlock(&mgmt_eth_data->mutex);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ if (!ack)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+{
+ u32 val = 0;
+ int ret;
+
+ ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ val &= ~mask;
+ val |= write_val;
+
+ return qca8k_write_eth(priv, reg, &val, sizeof(val));
+}
+
+static int
+qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ int i, count = len / sizeof(u32), ret;
+
+ if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len))
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ int i, count = len / sizeof(u32), ret;
+ u32 tmp;
+
+ if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len))
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ tmp = val[i];
+
+ ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int
qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
{
@@ -178,11 +456,14 @@ qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
u16 r1, r2, page;
int ret;
+ if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
+ return 0;
+
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- ret = qca8k_set_page(bus, page);
+ ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
@@ -201,15 +482,18 @@ qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
u16 r1, r2, page;
int ret;
+ if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
+ return 0;
+
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- ret = qca8k_set_page(bus, page);
+ ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
- qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+ qca8k_mii_write32(priv, 0x10 | r2, r1, val);
exit:
mutex_unlock(&bus->mdio_lock);
@@ -225,11 +509,14 @@ qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_
u32 val;
int ret;
+ if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
+ return 0;
+
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- ret = qca8k_set_page(bus, page);
+ ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
@@ -239,7 +526,7 @@ qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_
val &= ~mask;
val |= write_val;
- qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+ qca8k_mii_write32(priv, 0x10 | r2, r1, val);
exit:
mutex_unlock(&bus->mdio_lock);
@@ -296,17 +583,13 @@ qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
static int
qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
{
- u32 reg[4], val;
- int i, ret;
+ u32 reg[3];
+ int ret;
/* load the ARL table into an array */
- for (i = 0; i < 4; i++) {
- ret = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4), &val);
- if (ret < 0)
- return ret;
-
- reg[i] = val;
- }
+ ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
+ if (ret)
+ return ret;
/* vid - 83:72 */
fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
@@ -330,7 +613,6 @@ qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
u8 aging)
{
u32 reg[3] = { 0 };
- int i;
/* vid - 83:72 */
reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
@@ -347,8 +629,7 @@ qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
/* load the array into the ARL table */
- for (i = 0; i < 3; i++)
- qca8k_write(priv, QCA8K_REG_ATU_DATA0 + (i * 4), reg[i]);
+ qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
}
static int
@@ -632,7 +913,10 @@ qca8k_mib_init(struct qca8k_priv *priv)
int ret;
mutex_lock(&priv->reg_mutex);
- ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
+ QCA8K_MIB_BUSY);
if (ret)
goto exit;
@@ -666,6 +950,199 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
}
+static int
+qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
+ struct sk_buff *read_skb, u32 *val)
+{
+ struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
+ bool ack;
+ int ret;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the copy pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ ack = mgmt_eth_data->ack;
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ if (!ack)
+ return -EINVAL;
+
+ *val = mgmt_eth_data->data[0];
+
+ return 0;
+}
+
+static int
+qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
+ int regnum, u16 data)
+{
+ struct sk_buff *write_skb, *clear_skb, *read_skb;
+ struct qca8k_mgmt_eth_data *mgmt_eth_data;
+ u32 write_val, clear_val = 0, val;
+ struct net_device *mgmt_master;
+ int ret, ret1;
+ bool ack;
+
+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+ return -EINVAL;
+
+ mgmt_eth_data = &priv->mgmt_eth_data;
+
+ write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+ QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+ QCA8K_MDIO_MASTER_REG_ADDR(regnum);
+
+ if (read) {
+ write_val |= QCA8K_MDIO_MASTER_READ;
+ } else {
+ write_val |= QCA8K_MDIO_MASTER_WRITE;
+ write_val |= QCA8K_MDIO_MASTER_DATA(data);
+ }
+
+ /* Prealloc all the needed skb before the lock */
+ write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
+ if (!write_skb)
+ return -ENOMEM;
+
+ clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
+ if (!clear_skb) {
+ ret = -ENOMEM;
+ goto err_clear_skb;
+ }
+
+ read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
+ if (!read_skb) {
+ ret = -ENOMEM;
+ goto err_read_skb;
+ }
+
+ /* Actually start the request:
+ * 1. Send mdio master packet
+ * 2. Busy Wait for mdio master command
+ * 3. Get the data if we are reading
+ * 4. Reset the mdio master (even with error)
+ */
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check if mgmt_master is operational */
+ mgmt_master = priv->mgmt_master;
+ if (!mgmt_master) {
+ mutex_unlock(&mgmt_eth_data->mutex);
+ ret = -EINVAL;
+ goto err_mgmt_master;
+ }
+
+ read_skb->dev = mgmt_master;
+ clear_skb->dev = mgmt_master;
+ write_skb->dev = mgmt_master;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the write pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(write_skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ ack = mgmt_eth_data->ack;
+
+ if (ret <= 0) {
+ ret = -ETIMEDOUT;
+ kfree_skb(read_skb);
+ goto exit;
+ }
+
+ if (!ack) {
+ ret = -EINVAL;
+ kfree_skb(read_skb);
+ goto exit;
+ }
+
+ ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
+ !(val & QCA8K_MDIO_MASTER_BUSY), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
+ mgmt_eth_data, read_skb, &val);
+
+ if (ret < 0 && ret1 < 0) {
+ ret = ret1;
+ goto exit;
+ }
+
+ if (read) {
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the read pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(read_skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ ack = mgmt_eth_data->ack;
+
+ if (ret <= 0) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+
+ if (!ack) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
+ } else {
+ kfree_skb(read_skb);
+ }
+exit:
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the clear pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(clear_skb);
+
+ wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ mutex_unlock(&mgmt_eth_data->mutex);
+
+ return ret;
+
+ /* Error handling before lock */
+err_mgmt_master:
+ kfree_skb(read_skb);
+err_read_skb:
+ kfree_skb(clear_skb);
+err_clear_skb:
+ kfree_skb(write_skb);
+
+ return ret;
+}
+
static u32
qca8k_port_to_phy(int port)
{
@@ -704,8 +1181,9 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
}
static int
-qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
+qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
{
+ struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
@@ -722,18 +1200,18 @@ qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- ret = qca8k_set_page(bus, page);
+ ret = qca8k_set_page(priv, page);
if (ret)
goto exit;
- qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+ qca8k_mii_write32(priv, 0x10 | r2, r1, val);
ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
- qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
+ qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
mutex_unlock(&bus->mdio_lock);
@@ -741,8 +1219,9 @@ exit:
}
static int
-qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
+qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
{
+ struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
@@ -758,11 +1237,11 @@ qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- ret = qca8k_set_page(bus, page);
+ ret = qca8k_set_page(priv, page);
if (ret)
goto exit;
- qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+ qca8k_mii_write32(priv, 0x10 | r2, r1, val);
ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
@@ -773,7 +1252,7 @@ qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
- qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
+ qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
mutex_unlock(&bus->mdio_lock);
@@ -787,24 +1266,35 @@ static int
qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
{
struct qca8k_priv *priv = slave_bus->priv;
- struct mii_bus *bus = priv->bus;
+ int ret;
- return qca8k_mdio_write(bus, phy, regnum, data);
+ /* Use mdio Ethernet when available, fallback to legacy one on error */
+ ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
+ if (!ret)
+ return 0;
+
+ return qca8k_mdio_write(priv, phy, regnum, data);
}
static int
qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
{
struct qca8k_priv *priv = slave_bus->priv;
- struct mii_bus *bus = priv->bus;
+ int ret;
+
+ /* Use mdio Ethernet when available, fallback to legacy one on error */
+ ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
+ if (ret >= 0)
+ return ret;
- return qca8k_mdio_read(bus, phy, regnum);
+ return qca8k_mdio_read(priv, phy, regnum);
}
static int
qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
{
struct qca8k_priv *priv = ds->priv;
+ int ret;
/* Check if the legacy mapping should be used and the
* port is not correctly mapped to the right PHY in the
@@ -813,7 +1303,12 @@ qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
if (priv->legacy_phy_port_mapping)
port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
- return qca8k_mdio_write(priv->bus, port, regnum, data);
+ /* Use mdio Ethernet when available, fallback to legacy one on error */
+ ret = qca8k_phy_eth_command(priv, false, port, regnum, 0);
+ if (!ret)
+ return ret;
+
+ return qca8k_mdio_write(priv, port, regnum, data);
}
static int
@@ -829,7 +1324,12 @@ qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
if (priv->legacy_phy_port_mapping)
port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
- ret = qca8k_mdio_read(priv->bus, port, regnum);
+ /* Use mdio Ethernet when available, fallback to legacy one on error */
+ ret = qca8k_phy_eth_command(priv, true, port, regnum, 0);
+ if (ret >= 0)
+ return ret;
+
+ ret = qca8k_mdio_read(priv, port, regnum);
if (ret < 0)
return 0xffff;
@@ -1132,220 +1632,6 @@ qca8k_parse_port_config(struct qca8k_priv *priv)
return 0;
}
-static int
-qca8k_setup(struct dsa_switch *ds)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int cpu_port, ret, i;
- u32 mask;
-
- cpu_port = qca8k_find_cpu_port(ds);
- if (cpu_port < 0) {
- dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
- return cpu_port;
- }
-
- /* Parse CPU port config to be later used in phy_link mac_config */
- ret = qca8k_parse_port_config(priv);
- if (ret)
- return ret;
-
- ret = qca8k_setup_mdio_bus(priv);
- if (ret)
- return ret;
-
- ret = qca8k_setup_of_pws_reg(priv);
- if (ret)
- return ret;
-
- ret = qca8k_setup_mac_pwr_sel(priv);
- if (ret)
- return ret;
-
- /* Make sure MAC06 is disabled */
- ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
- QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
- if (ret) {
- dev_err(priv->dev, "failed disabling MAC06 exchange");
- return ret;
- }
-
- /* Enable CPU Port */
- ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
- QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
- if (ret) {
- dev_err(priv->dev, "failed enabling CPU port");
- return ret;
- }
-
- /* Enable MIB counters */
- ret = qca8k_mib_init(priv);
- if (ret)
- dev_warn(priv->dev, "mib init failed");
-
- /* Initial setup of all ports */
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- /* Disable forwarding by default on all ports */
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER, 0);
- if (ret)
- return ret;
-
- /* Enable QCA header mode on all cpu ports */
- if (dsa_is_cpu_port(ds, i)) {
- ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
- FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
- FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
- if (ret) {
- dev_err(priv->dev, "failed enabling QCA header mode");
- return ret;
- }
- }
-
- /* Disable MAC by default on all user ports */
- if (dsa_is_user_port(ds, i))
- qca8k_port_set_status(priv, i, 0);
- }
-
- /* Forward all unknown frames to CPU port for Linux processing
- * Notice that in multi-cpu config only one port should be set
- * for igmp, unknown, multicast and broadcast packet
- */
- ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
- if (ret)
- return ret;
-
- /* Setup connection between CPU port & user ports
- * Configure specific switch configuration for ports
- */
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- /* CPU port gets connected to all user ports of the switch */
- if (dsa_is_cpu_port(ds, i)) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
- if (ret)
- return ret;
- }
-
- /* Individual user ports get connected to CPU port only */
- if (dsa_is_user_port(ds, i)) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER,
- BIT(cpu_port));
- if (ret)
- return ret;
-
- /* Enable ARP Auto-learning by default */
- ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_LEARN);
- if (ret)
- return ret;
-
- /* For port based vlans to work we need to set the
- * default egress vid
- */
- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
- QCA8K_EGREES_VLAN_PORT_MASK(i),
- QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
- if (ret)
- return ret;
-
- ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
- QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
- QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
- if (ret)
- return ret;
- }
-
- /* The port 5 of the qca8337 have some problem in flood condition. The
- * original legacy driver had some specific buffer and priority settings
- * for the different port suggested by the QCA switch team. Add this
- * missing settings to improve switch stability under load condition.
- * This problem is limited to qca8337 and other qca8k switch are not affected.
- */
- if (priv->switch_id == QCA8K_ID_QCA8337) {
- switch (i) {
- /* The 2 CPU port and port 5 requires some different
- * priority than any other ports.
- */
- case 0:
- case 5:
- case 6:
- mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
- QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
- break;
- default:
- mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
- QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
- }
- qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
-
- mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
- QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_WRED_EN;
- qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
- QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
- QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_WRED_EN,
- mask);
- }
-
- /* Set initial MTU for every port.
- * We have only have a general MTU setting. So track
- * every port and set the max across all port.
- * Set per port MTU to 1500 as the MTU change function
- * will add the overhead and if its set to 1518 then it
- * will apply the overhead again and we will end up with
- * MTU of 1536 instead of 1518
- */
- priv->port_mtu[i] = ETH_DATA_LEN;
- }
-
- /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
- if (priv->switch_id == QCA8K_ID_QCA8327) {
- mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
- QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
- qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
- QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
- QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
- mask);
- }
-
- /* Setup our port MTUs to match power on defaults */
- ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
- if (ret)
- dev_warn(priv->dev, "failed setting MTU settings");
-
- /* Flush the FDB table */
- qca8k_fdb_flush(priv);
-
- /* We don't have interrupts for link changes, so we need to poll */
- ds->pcs_poll = true;
-
- /* Set min a max ageing value supported */
- ds->ageing_time_min = 7000;
- ds->ageing_time_max = 458745000;
-
- /* Set max number of LAGs supported */
- ds->num_lag_ids = QCA8K_NUM_LAGS;
-
- return 0;
-}
-
static void
qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
u32 reg)
@@ -1387,13 +1673,41 @@ qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_inde
cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
}
+static struct phylink_pcs *
+qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct qca8k_priv *priv = ds->priv;
+ struct phylink_pcs *pcs = NULL;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ switch (port) {
+ case 0:
+ pcs = &priv->pcs_port_0.pcs;
+ break;
+
+ case 6:
+ pcs = &priv->pcs_port_6.pcs;
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return pcs;
+}
+
static void
qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
{
struct qca8k_priv *priv = ds->priv;
- int cpu_port_index, ret;
- u32 reg, val;
+ int cpu_port_index;
+ u32 reg;
switch (port) {
case 0: /* 1st CPU port */
@@ -1459,70 +1773,6 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
case PHY_INTERFACE_MODE_1000BASEX:
/* Enable SGMII on the port */
qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
-
- /* Enable/disable SerDes auto-negotiation as necessary */
- ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
- if (ret)
- return;
- if (phylink_autoneg_inband(mode))
- val &= ~QCA8K_PWS_SERDES_AEN_DIS;
- else
- val |= QCA8K_PWS_SERDES_AEN_DIS;
- qca8k_write(priv, QCA8K_REG_PWS, val);
-
- /* Configure the SGMII parameters */
- ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
- if (ret)
- return;
-
- val |= QCA8K_SGMII_EN_SD;
-
- if (priv->ports_config.sgmii_enable_pll)
- val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
- QCA8K_SGMII_EN_TX;
-
- if (dsa_is_cpu_port(ds, port)) {
- /* CPU port, we're talking to the CPU MAC, be a PHY */
- val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
- val |= QCA8K_SGMII_MODE_CTRL_PHY;
- } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
- val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
- val |= QCA8K_SGMII_MODE_CTRL_MAC;
- } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
- val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
- val |= QCA8K_SGMII_MODE_CTRL_BASEX;
- }
-
- qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
-
- /* From original code is reported port instability as SGMII also
- * require delay set. Apply advised values here or take them from DT.
- */
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
-
- /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
- * falling edge is set writing in the PORT0 PAD reg
- */
- if (priv->switch_id == QCA8K_ID_QCA8327 ||
- priv->switch_id == QCA8K_ID_QCA8337)
- reg = QCA8K_REG_PORT0_PAD_CTRL;
-
- val = 0;
-
- /* SGMII Clock phase configuration */
- if (priv->ports_config.sgmii_rx_clk_falling_edge)
- val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
-
- if (priv->ports_config.sgmii_tx_clk_falling_edge)
- val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
-
- if (val)
- ret = qca8k_rmw(priv, reg,
- QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
- QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
- val);
-
break;
default:
dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
@@ -1531,109 +1781,41 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
}
}
-static void
-qca8k_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
switch (port) {
case 0: /* 1st CPU port */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
- state->interface != PHY_INTERFACE_MODE_SGMII)
- goto unsupported;
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
break;
+
case 1:
case 2:
case 3:
case 4:
case 5:
/* Internal PHY */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
- break;
- case 6: /* 2nd CPU port / external PHY */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_1000BASEX)
- goto unsupported;
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
break;
- default:
-unsupported:
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
-
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
- phylink_set(mask, 1000baseX_Full);
-
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
-static int
-qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
-{
- struct qca8k_priv *priv = ds->priv;
- u32 reg;
- int ret;
-
- ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
- if (ret < 0)
- return ret;
-
- state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
- state->an_complete = state->link;
- state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
- state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
- DUPLEX_HALF;
-
- switch (reg & QCA8K_PORT_STATUS_SPEED) {
- case QCA8K_PORT_STATUS_SPEED_10:
- state->speed = SPEED_10;
- break;
- case QCA8K_PORT_STATUS_SPEED_100:
- state->speed = SPEED_100;
- break;
- case QCA8K_PORT_STATUS_SPEED_1000:
- state->speed = SPEED_1000;
- break;
- default:
- state->speed = SPEED_UNKNOWN;
+ case 6: /* 2nd CPU port / external PHY */
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
break;
}
- state->pause = MLO_PAUSE_NONE;
- if (reg & QCA8K_PORT_STATUS_RXFLOW)
- state->pause |= MLO_PAUSE_RX;
- if (reg & QCA8K_PORT_STATUS_TXFLOW)
- state->pause |= MLO_PAUSE_TX;
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
- return 1;
+ config->legacy_pre_march2020 = false;
}
static void
@@ -1686,6 +1868,164 @@ qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
}
+static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct qca8k_pcs, pcs);
+}
+
+static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
+ int port = pcs_to_qca8k_pcs(pcs)->port;
+ u32 reg;
+ int ret;
+
+ ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
+ if (ret < 0) {
+ state->link = false;
+ return;
+ }
+
+ state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
+ state->an_complete = state->link;
+ state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
+ state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
+ DUPLEX_HALF;
+
+ switch (reg & QCA8K_PORT_STATUS_SPEED) {
+ case QCA8K_PORT_STATUS_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ case QCA8K_PORT_STATUS_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case QCA8K_PORT_STATUS_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ if (reg & QCA8K_PORT_STATUS_RXFLOW)
+ state->pause |= MLO_PAUSE_RX;
+ if (reg & QCA8K_PORT_STATUS_TXFLOW)
+ state->pause |= MLO_PAUSE_TX;
+}
+
+static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
+ int cpu_port_index, ret, port;
+ u32 reg, val;
+
+ port = pcs_to_qca8k_pcs(pcs)->port;
+ switch (port) {
+ case 0:
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT0;
+ break;
+
+ case 6:
+ reg = QCA8K_REG_PORT6_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT6;
+ break;
+
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /* Enable/disable SerDes auto-negotiation as necessary */
+ ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
+ if (ret)
+ return ret;
+ if (phylink_autoneg_inband(mode))
+ val &= ~QCA8K_PWS_SERDES_AEN_DIS;
+ else
+ val |= QCA8K_PWS_SERDES_AEN_DIS;
+ qca8k_write(priv, QCA8K_REG_PWS, val);
+
+ /* Configure the SGMII parameters */
+ ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
+ if (ret)
+ return ret;
+
+ val |= QCA8K_SGMII_EN_SD;
+
+ if (priv->ports_config.sgmii_enable_pll)
+ val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
+ QCA8K_SGMII_EN_TX;
+
+ if (dsa_is_cpu_port(priv->ds, port)) {
+ /* CPU port, we're talking to the CPU MAC, be a PHY */
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_PHY;
+ } else if (interface == PHY_INTERFACE_MODE_SGMII) {
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_MAC;
+ } else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_BASEX;
+ }
+
+ qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
+
+ /* From original code is reported port instability as SGMII also
+ * require delay set. Apply advised values here or take them from DT.
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+ /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
+ * falling edge is set writing in the PORT0 PAD reg
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8327 ||
+ priv->switch_id == QCA8K_ID_QCA8337)
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+
+ val = 0;
+
+ /* SGMII Clock phase configuration */
+ if (priv->ports_config.sgmii_rx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
+
+ if (priv->ports_config.sgmii_tx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
+
+ if (val)
+ ret = qca8k_rmw(priv, reg,
+ QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
+ QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
+ val);
+
+ return 0;
+}
+
+static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
+{
+}
+
+static const struct phylink_pcs_ops qca8k_pcs_ops = {
+ .pcs_get_state = qca8k_pcs_get_state,
+ .pcs_config = qca8k_pcs_config,
+ .pcs_an_restart = qca8k_pcs_an_restart,
+};
+
+static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
+ int port)
+{
+ qpcs->pcs.ops = &qca8k_pcs_ops;
+
+ /* We don't have interrupts for link changes, so we need to poll */
+ qpcs->pcs.poll = true;
+ qpcs->priv = priv;
+ qpcs->port = port;
+}
+
static void
qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
{
@@ -1703,6 +2043,97 @@ qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
ETH_GSTRING_LEN);
}
+static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
+{
+ const struct qca8k_match_data *match_data;
+ struct qca8k_mib_eth_data *mib_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ const struct qca8k_mib_desc *mib;
+ struct mib_ethhdr *mib_ethhdr;
+ int i, mib_len, offset = 0;
+ u64 *data;
+ u8 port;
+
+ mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
+ mib_eth_data = &priv->mib_eth_data;
+
+ /* The switch autocast every port. Ignore other packet and
+ * parse only the requested one.
+ */
+ port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
+ if (port != mib_eth_data->req_port)
+ goto exit;
+
+ match_data = device_get_match_data(priv->dev);
+ data = mib_eth_data->data;
+
+ for (i = 0; i < match_data->mib_count; i++) {
+ mib = &ar8327_mib[i];
+
+ /* First 3 mib are present in the skb head */
+ if (i < 3) {
+ data[i] = mib_ethhdr->data[i];
+ continue;
+ }
+
+ mib_len = sizeof(uint32_t);
+
+ /* Some mib are 64 bit wide */
+ if (mib->size == 2)
+ mib_len = sizeof(uint64_t);
+
+ /* Copy the mib value from packet to the */
+ memcpy(data + i, skb->data + offset, mib_len);
+
+ /* Set the offset for the next mib */
+ offset += mib_len;
+ }
+
+exit:
+ /* Complete on receiving all the mib packet */
+ if (refcount_dec_and_test(&mib_eth_data->port_parsed))
+ complete(&mib_eth_data->rw_done);
+}
+
+static int
+qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct qca8k_mib_eth_data *mib_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ mib_eth_data = &priv->mib_eth_data;
+
+ mutex_lock(&mib_eth_data->mutex);
+
+ reinit_completion(&mib_eth_data->rw_done);
+
+ mib_eth_data->req_port = dp->index;
+ mib_eth_data->data = data;
+ refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
+
+ mutex_lock(&priv->reg_mutex);
+
+ /* Send mib autocast request */
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
+ QCA8K_MIB_BUSY);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ if (ret)
+ goto exit;
+
+ ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
+
+exit:
+ mutex_unlock(&mib_eth_data->mutex);
+
+ return ret;
+}
+
static void
qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
@@ -1714,6 +2145,10 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
u32 hi = 0;
int ret;
+ if (priv->mgmt_master &&
+ qca8k_get_ethtool_stats_eth(ds, port, data) > 0)
+ return;
+
match_data = of_device_get_match_data(priv->dev);
for (i = 0; i < match_data->mib_count; i++) {
@@ -1812,7 +2247,8 @@ qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
static int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
int port_mask, cpu_port;
@@ -1963,7 +2399,8 @@ qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
static int
qca8k_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
u16 port_mask = BIT(port);
@@ -1973,7 +2410,8 @@ qca8k_port_fdb_add(struct dsa_switch *ds, int port,
static int
qca8k_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
u16 port_mask = BIT(port);
@@ -2010,7 +2448,8 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
static int
qca8k_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct qca8k_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
@@ -2021,7 +2460,8 @@ qca8k_port_mdb_add(struct dsa_switch *ds, int port,
static int
qca8k_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct qca8k_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
@@ -2033,7 +2473,7 @@ qca8k_port_mdb_del(struct dsa_switch *ds, int port,
static int
qca8k_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress, struct netlink_ext_ack *extack)
{
struct qca8k_priv *priv = ds->priv;
int monitor_port, ret;
@@ -2212,18 +2652,16 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
}
static bool
-qca8k_lag_can_offload(struct dsa_switch *ds,
- struct net_device *lag,
+qca8k_lag_can_offload(struct dsa_switch *ds, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct dsa_port *dp;
- int id, members = 0;
+ int members = 0;
- id = dsa_lag_id(ds->dst, lag);
- if (id < 0 || id >= ds->num_lag_ids)
+ if (!lag.id)
return false;
- dsa_lag_foreach_port(dp, ds->dst, lag)
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
/* Includes the port joining the LAG */
members++;
@@ -2241,16 +2679,14 @@ qca8k_lag_can_offload(struct dsa_switch *ds,
}
static int
-qca8k_lag_setup_hash(struct dsa_switch *ds,
- struct net_device *lag,
+qca8k_lag_setup_hash(struct dsa_switch *ds, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
+ struct net_device *lag_dev = lag.dev;
struct qca8k_priv *priv = ds->priv;
bool unique_lag = true;
+ unsigned int i;
u32 hash = 0;
- int i, id;
-
- id = dsa_lag_id(ds->dst, lag);
switch (info->hash_type) {
case NETDEV_LAG_HASH_L23:
@@ -2267,7 +2703,7 @@ qca8k_lag_setup_hash(struct dsa_switch *ds,
/* Check if we are the unique configured LAG */
dsa_lags_foreach_id(i, ds->dst)
- if (i != id && dsa_lag_dev(ds->dst, i)) {
+ if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
unique_lag = false;
break;
}
@@ -2282,7 +2718,7 @@ qca8k_lag_setup_hash(struct dsa_switch *ds,
if (unique_lag) {
priv->lag_hash_mode = hash;
} else if (priv->lag_hash_mode != hash) {
- netdev_err(lag, "Error: Mismatched Hash Mode across different lag is not supported\n");
+ netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
return -EOPNOTSUPP;
}
@@ -2292,13 +2728,14 @@ qca8k_lag_setup_hash(struct dsa_switch *ds,
static int
qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
- struct net_device *lag, bool delete)
+ struct dsa_lag lag, bool delete)
{
struct qca8k_priv *priv = ds->priv;
int ret, id, i;
u32 val;
- id = dsa_lag_id(ds->dst, lag);
+ /* DSA LAG IDs are one-based, hardware is zero-based */
+ id = lag.id - 1;
/* Read current port member */
ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
@@ -2360,8 +2797,7 @@ qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
}
static int
-qca8k_port_lag_join(struct dsa_switch *ds, int port,
- struct net_device *lag,
+qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
int ret;
@@ -2378,11 +2814,265 @@ qca8k_port_lag_join(struct dsa_switch *ds, int port,
static int
qca8k_port_lag_leave(struct dsa_switch *ds, int port,
- struct net_device *lag)
+ struct dsa_lag lag)
{
return qca8k_lag_refresh_portmap(ds, port, lag, true);
}
+static void
+qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
+ bool operational)
+{
+ struct dsa_port *dp = master->dsa_ptr;
+ struct qca8k_priv *priv = ds->priv;
+
+ /* Ethernet MIB/MDIO is only supported for CPU port 0 */
+ if (dp->index != 0)
+ return;
+
+ mutex_lock(&priv->mgmt_eth_data.mutex);
+ mutex_lock(&priv->mib_eth_data.mutex);
+
+ priv->mgmt_master = operational ? (struct net_device *)master : NULL;
+
+ mutex_unlock(&priv->mib_eth_data.mutex);
+ mutex_unlock(&priv->mgmt_eth_data.mutex);
+}
+
+static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct qca_tagger_data *tagger_data;
+
+ switch (proto) {
+ case DSA_TAG_PROTO_QCA:
+ tagger_data = ds->tagger_data;
+
+ tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
+ tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_setup(struct dsa_switch *ds)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ int cpu_port, ret, i;
+ u32 mask;
+
+ cpu_port = qca8k_find_cpu_port(ds);
+ if (cpu_port < 0) {
+ dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
+ return cpu_port;
+ }
+
+ /* Parse CPU port config to be later used in phy_link mac_config */
+ ret = qca8k_parse_port_config(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_mdio_bus(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_of_pws_reg(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_mac_pwr_sel(priv);
+ if (ret)
+ return ret;
+
+ qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
+ qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
+
+ /* Make sure MAC06 is disabled */
+ ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
+ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
+ if (ret) {
+ dev_err(priv->dev, "failed disabling MAC06 exchange");
+ return ret;
+ }
+
+ /* Enable CPU Port */
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
+ if (ret) {
+ dev_err(priv->dev, "failed enabling CPU port");
+ return ret;
+ }
+
+ /* Enable MIB counters */
+ ret = qca8k_mib_init(priv);
+ if (ret)
+ dev_warn(priv->dev, "mib init failed");
+
+ /* Initial setup of all ports */
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* Disable forwarding by default on all ports */
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER, 0);
+ if (ret)
+ return ret;
+
+ /* Enable QCA header mode on all cpu ports */
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
+ if (ret) {
+ dev_err(priv->dev, "failed enabling QCA header mode");
+ return ret;
+ }
+ }
+
+ /* Disable MAC by default on all user ports */
+ if (dsa_is_user_port(ds, i))
+ qca8k_port_set_status(priv, i, 0);
+ }
+
+ /* Forward all unknown frames to CPU port for Linux processing
+ * Notice that in multi-cpu config only one port should be set
+ * for igmp, unknown, multicast and broadcast packet
+ */
+ ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
+ if (ret)
+ return ret;
+
+ /* Setup connection between CPU port & user ports
+ * Configure specific switch configuration for ports
+ */
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* CPU port gets connected to all user ports of the switch */
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
+ if (ret)
+ return ret;
+ }
+
+ /* Individual user ports get connected to CPU port only */
+ if (dsa_is_user_port(ds, i)) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER,
+ BIT(cpu_port));
+ if (ret)
+ return ret;
+
+ /* Enable ARP Auto-learning by default */
+ ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_LEARN);
+ if (ret)
+ return ret;
+
+ /* For port based vlans to work we need to set the
+ * default egress vid
+ */
+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
+ QCA8K_EGREES_VLAN_PORT_MASK(i),
+ QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
+ if (ret)
+ return ret;
+
+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
+ QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
+ QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
+ if (ret)
+ return ret;
+ }
+
+ /* The port 5 of the qca8337 have some problem in flood condition. The
+ * original legacy driver had some specific buffer and priority settings
+ * for the different port suggested by the QCA switch team. Add this
+ * missing settings to improve switch stability under load condition.
+ * This problem is limited to qca8337 and other qca8k switch are not affected.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8337) {
+ switch (i) {
+ /* The 2 CPU port and port 5 requires some different
+ * priority than any other ports.
+ */
+ case 0:
+ case 5:
+ case 6:
+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
+ break;
+ default:
+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
+ }
+ qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
+
+ mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_WRED_EN;
+ qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
+ QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_WRED_EN,
+ mask);
+ }
+
+ /* Set initial MTU for every port.
+ * We have only have a general MTU setting. So track
+ * every port and set the max across all port.
+ * Set per port MTU to 1500 as the MTU change function
+ * will add the overhead and if its set to 1518 then it
+ * will apply the overhead again and we will end up with
+ * MTU of 1536 instead of 1518
+ */
+ priv->port_mtu[i] = ETH_DATA_LEN;
+ }
+
+ /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
+ qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
+ QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
+ mask);
+ }
+
+ /* Setup our port MTUs to match power on defaults */
+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
+ if (ret)
+ dev_warn(priv->dev, "failed setting MTU settings");
+
+ /* Flush the FDB table */
+ qca8k_fdb_flush(priv);
+
+ /* Set min a max ageing value supported */
+ ds->ageing_time_min = 7000;
+ ds->ageing_time_max = 458745000;
+
+ /* Set max number of LAGs supported */
+ ds->num_lag_ids = QCA8K_NUM_LAGS;
+
+ return 0;
+}
+
static const struct dsa_switch_ops qca8k_switch_ops = {
.get_tag_protocol = qca8k_get_tag_protocol,
.setup = qca8k_setup,
@@ -2410,14 +3100,16 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.port_vlan_filtering = qca8k_port_vlan_filtering,
.port_vlan_add = qca8k_port_vlan_add,
.port_vlan_del = qca8k_port_vlan_del,
- .phylink_validate = qca8k_phylink_validate,
- .phylink_mac_link_state = qca8k_phylink_mac_link_state,
+ .phylink_get_caps = qca8k_phylink_get_caps,
+ .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs,
.phylink_mac_config = qca8k_phylink_mac_config,
.phylink_mac_link_down = qca8k_phylink_mac_link_down,
.phylink_mac_link_up = qca8k_phylink_mac_link_up,
.get_phy_flags = qca8k_get_phy_flags,
.port_lag_join = qca8k_port_lag_join,
.port_lag_leave = qca8k_port_lag_leave,
+ .master_state_change = qca8k_master_change,
+ .connect_tag_protocol = qca8k_connect_tag_protocol,
};
static int qca8k_read_switch_id(struct qca8k_priv *priv)
@@ -2488,6 +3180,10 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
return PTR_ERR(priv->regmap);
}
+ priv->mdio_cache.page = 0xffff;
+ priv->mdio_cache.lo = 0xffff;
+ priv->mdio_cache.hi = 0xffff;
+
/* Check the detected switch id */
ret = qca8k_read_switch_id(priv);
if (ret)
@@ -2497,6 +3193,12 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (!priv->ds)
return -ENOMEM;
+ mutex_init(&priv->mgmt_eth_data.mutex);
+ init_completion(&priv->mgmt_eth_data.rw_done);
+
+ mutex_init(&priv->mib_eth_data.mutex);
+ init_completion(&priv->mib_eth_data.rw_done);
+
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = QCA8K_NUM_PORTS;
priv->ds->priv = priv;
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index ab4a417b25a9..f375627174c8 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -11,6 +11,11 @@
#include <linux/delay.h>
#include <linux/regmap.h>
#include <linux/gpio.h>
+#include <linux/dsa/tag_qca.h>
+
+#define QCA8K_ETHERNET_MDIO_PRIORITY 7
+#define QCA8K_ETHERNET_PHY_PRIORITY 6
+#define QCA8K_ETHERNET_TIMEOUT 100
#define QCA8K_NUM_PORTS 7
#define QCA8K_NUM_CPU_PORTS 2
@@ -63,7 +68,7 @@
#define QCA8K_REG_MODULE_EN 0x030
#define QCA8K_MODULE_EN_MIB BIT(0)
#define QCA8K_REG_MIB 0x034
-#define QCA8K_MIB_FLUSH BIT(24)
+#define QCA8K_MIB_FUNC GENMASK(26, 24)
#define QCA8K_MIB_CPU_KEEP BIT(20)
#define QCA8K_MIB_BUSY BIT(17)
#define QCA8K_MDIO_MASTER_CTRL 0x3c
@@ -313,6 +318,12 @@ enum qca8k_vlan_cmd {
QCA8K_VLAN_READ = 6,
};
+enum qca8k_mid_cmd {
+ QCA8K_MIB_FLUSH = 1,
+ QCA8K_MIB_FLUSH_PORT = 2,
+ QCA8K_MIB_CAST = 3,
+};
+
struct ar8xxx_port_status {
int enabled;
};
@@ -328,6 +339,22 @@ enum {
QCA8K_CPU_PORT6,
};
+struct qca8k_mgmt_eth_data {
+ struct completion rw_done;
+ struct mutex mutex; /* Enforce one mdio read/write at time */
+ bool ack;
+ u32 seq;
+ u32 data[4];
+};
+
+struct qca8k_mib_eth_data {
+ struct completion rw_done;
+ struct mutex mutex; /* Process one command at time */
+ refcount_t port_parsed; /* Counter to track parsed port */
+ u8 req_port;
+ u64 *data; /* pointer to ethtool data */
+};
+
struct qca8k_ports_config {
bool sgmii_rx_clk_falling_edge;
bool sgmii_tx_clk_falling_edge;
@@ -336,6 +363,25 @@ struct qca8k_ports_config {
u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
};
+struct qca8k_mdio_cache {
+/* The 32bit switch registers are accessed indirectly. To achieve this we need
+ * to set the page of the register. Track the last page that was set to reduce
+ * mdio writes
+ */
+ u16 page;
+/* lo and hi can also be cached and from Documentation we can skip one
+ * extra mdio write if lo or hi is didn't change.
+ */
+ u16 lo;
+ u16 hi;
+};
+
+struct qca8k_pcs {
+ struct phylink_pcs pcs;
+ struct qca8k_priv *priv;
+ int port;
+};
+
struct qca8k_priv {
u8 switch_id;
u8 switch_revision;
@@ -353,6 +399,12 @@ struct qca8k_priv {
struct dsa_switch_ops ops;
struct gpio_desc *reset_gpio;
unsigned int port_mtu[QCA8K_NUM_PORTS];
+ struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */
+ struct qca8k_mgmt_eth_data mgmt_eth_data;
+ struct qca8k_mib_eth_data mib_eth_data;
+ struct qca8k_mdio_cache mdio_cache;
+ struct qca8k_pcs pcs_port_0;
+ struct qca8k_pcs pcs_port_6;
};
struct qca8k_mib_desc {
diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
deleted file mode 100644
index aae46ada8d83..000000000000
--- a/drivers/net/dsa/realtek-smi-core.c
+++ /dev/null
@@ -1,523 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/* Realtek Simple Management Interface (SMI) driver
- * It can be discussed how "simple" this interface is.
- *
- * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
- * but the protocol is not MDIO at all. Instead it is a Realtek
- * pecularity that need to bit-bang the lines in a special way to
- * communicate with the switch.
- *
- * ASICs we intend to support with this driver:
- *
- * RTL8366 - The original version, apparently
- * RTL8369 - Similar enough to have the same datsheet as RTL8366
- * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
- * different register layout from the other two
- * RTL8366S - Is this "RTL8366 super"?
- * RTL8367 - Has an OpenWRT driver as well
- * RTL8368S - Seems to be an alternative name for RTL8366RB
- * RTL8370 - Also uses SMI
- *
- * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
- * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
- * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
- * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
- * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <linux/skbuff.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_mdio.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/bitops.h>
-#include <linux/if_bridge.h>
-
-#include "realtek-smi-core.h"
-
-#define REALTEK_SMI_ACK_RETRY_COUNT 5
-#define REALTEK_SMI_HW_STOP_DELAY 25 /* msecs */
-#define REALTEK_SMI_HW_START_DELAY 100 /* msecs */
-
-static inline void realtek_smi_clk_delay(struct realtek_smi *smi)
-{
- ndelay(smi->clk_delay);
-}
-
-static void realtek_smi_start(struct realtek_smi *smi)
-{
- /* Set GPIO pins to output mode, with initial state:
- * SCK = 0, SDA = 1
- */
- gpiod_direction_output(smi->mdc, 0);
- gpiod_direction_output(smi->mdio, 1);
- realtek_smi_clk_delay(smi);
-
- /* CLK 1: 0 -> 1, 1 -> 0 */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
-
- /* CLK 2: */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 1);
-}
-
-static void realtek_smi_stop(struct realtek_smi *smi)
-{
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 0);
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 1);
-
- /* Add a click */
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 1);
-
- /* Set GPIO pins to input mode */
- gpiod_direction_input(smi->mdio);
- gpiod_direction_input(smi->mdc);
-}
-
-static void realtek_smi_write_bits(struct realtek_smi *smi, u32 data, u32 len)
-{
- for (; len > 0; len--) {
- realtek_smi_clk_delay(smi);
-
- /* Prepare data */
- gpiod_set_value(smi->mdio, !!(data & (1 << (len - 1))));
- realtek_smi_clk_delay(smi);
-
- /* Clocking */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- }
-}
-
-static void realtek_smi_read_bits(struct realtek_smi *smi, u32 len, u32 *data)
-{
- gpiod_direction_input(smi->mdio);
-
- for (*data = 0; len > 0; len--) {
- u32 u;
-
- realtek_smi_clk_delay(smi);
-
- /* Clocking */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- u = !!gpiod_get_value(smi->mdio);
- gpiod_set_value(smi->mdc, 0);
-
- *data |= (u << (len - 1));
- }
-
- gpiod_direction_output(smi->mdio, 0);
-}
-
-static int realtek_smi_wait_for_ack(struct realtek_smi *smi)
-{
- int retry_cnt;
-
- retry_cnt = 0;
- do {
- u32 ack;
-
- realtek_smi_read_bits(smi, 1, &ack);
- if (ack == 0)
- break;
-
- if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
- dev_err(smi->dev, "ACK timeout\n");
- return -ETIMEDOUT;
- }
- } while (1);
-
- return 0;
-}
-
-static int realtek_smi_write_byte(struct realtek_smi *smi, u8 data)
-{
- realtek_smi_write_bits(smi, data, 8);
- return realtek_smi_wait_for_ack(smi);
-}
-
-static int realtek_smi_write_byte_noack(struct realtek_smi *smi, u8 data)
-{
- realtek_smi_write_bits(smi, data, 8);
- return 0;
-}
-
-static int realtek_smi_read_byte0(struct realtek_smi *smi, u8 *data)
-{
- u32 t;
-
- /* Read data */
- realtek_smi_read_bits(smi, 8, &t);
- *data = (t & 0xff);
-
- /* Send an ACK */
- realtek_smi_write_bits(smi, 0x00, 1);
-
- return 0;
-}
-
-static int realtek_smi_read_byte1(struct realtek_smi *smi, u8 *data)
-{
- u32 t;
-
- /* Read data */
- realtek_smi_read_bits(smi, 8, &t);
- *data = (t & 0xff);
-
- /* Send an ACK */
- realtek_smi_write_bits(smi, 0x01, 1);
-
- return 0;
-}
-
-static int realtek_smi_read_reg(struct realtek_smi *smi, u32 addr, u32 *data)
-{
- unsigned long flags;
- u8 lo = 0;
- u8 hi = 0;
- int ret;
-
- spin_lock_irqsave(&smi->lock, flags);
-
- realtek_smi_start(smi);
-
- /* Send READ command */
- ret = realtek_smi_write_byte(smi, smi->cmd_read);
- if (ret)
- goto out;
-
- /* Set ADDR[7:0] */
- ret = realtek_smi_write_byte(smi, addr & 0xff);
- if (ret)
- goto out;
-
- /* Set ADDR[15:8] */
- ret = realtek_smi_write_byte(smi, addr >> 8);
- if (ret)
- goto out;
-
- /* Read DATA[7:0] */
- realtek_smi_read_byte0(smi, &lo);
- /* Read DATA[15:8] */
- realtek_smi_read_byte1(smi, &hi);
-
- *data = ((u32)lo) | (((u32)hi) << 8);
-
- ret = 0;
-
- out:
- realtek_smi_stop(smi);
- spin_unlock_irqrestore(&smi->lock, flags);
-
- return ret;
-}
-
-static int realtek_smi_write_reg(struct realtek_smi *smi,
- u32 addr, u32 data, bool ack)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&smi->lock, flags);
-
- realtek_smi_start(smi);
-
- /* Send WRITE command */
- ret = realtek_smi_write_byte(smi, smi->cmd_write);
- if (ret)
- goto out;
-
- /* Set ADDR[7:0] */
- ret = realtek_smi_write_byte(smi, addr & 0xff);
- if (ret)
- goto out;
-
- /* Set ADDR[15:8] */
- ret = realtek_smi_write_byte(smi, addr >> 8);
- if (ret)
- goto out;
-
- /* Write DATA[7:0] */
- ret = realtek_smi_write_byte(smi, data & 0xff);
- if (ret)
- goto out;
-
- /* Write DATA[15:8] */
- if (ack)
- ret = realtek_smi_write_byte(smi, data >> 8);
- else
- ret = realtek_smi_write_byte_noack(smi, data >> 8);
- if (ret)
- goto out;
-
- ret = 0;
-
- out:
- realtek_smi_stop(smi);
- spin_unlock_irqrestore(&smi->lock, flags);
-
- return ret;
-}
-
-/* There is one single case when we need to use this accessor and that
- * is when issueing soft reset. Since the device reset as soon as we write
- * that bit, no ACK will come back for natural reasons.
- */
-int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
- u32 data)
-{
- return realtek_smi_write_reg(smi, addr, data, false);
-}
-EXPORT_SYMBOL_GPL(realtek_smi_write_reg_noack);
-
-/* Regmap accessors */
-
-static int realtek_smi_write(void *ctx, u32 reg, u32 val)
-{
- struct realtek_smi *smi = ctx;
-
- return realtek_smi_write_reg(smi, reg, val, true);
-}
-
-static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
-{
- struct realtek_smi *smi = ctx;
-
- return realtek_smi_read_reg(smi, reg, val);
-}
-
-static const struct regmap_config realtek_smi_mdio_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .reg_read = realtek_smi_read,
- .reg_write = realtek_smi_write,
- .cache_type = REGCACHE_NONE,
-};
-
-static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
-{
- struct realtek_smi *smi = bus->priv;
-
- return smi->ops->phy_read(smi, addr, regnum);
-}
-
-static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
- u16 val)
-{
- struct realtek_smi *smi = bus->priv;
-
- return smi->ops->phy_write(smi, addr, regnum, val);
-}
-
-int realtek_smi_setup_mdio(struct realtek_smi *smi)
-{
- struct device_node *mdio_np;
- int ret;
-
- mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
- if (!mdio_np) {
- dev_err(smi->dev, "no MDIO bus node\n");
- return -ENODEV;
- }
-
- smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
- if (!smi->slave_mii_bus) {
- ret = -ENOMEM;
- goto err_put_node;
- }
- smi->slave_mii_bus->priv = smi;
- smi->slave_mii_bus->name = "SMI slave MII";
- smi->slave_mii_bus->read = realtek_smi_mdio_read;
- smi->slave_mii_bus->write = realtek_smi_mdio_write;
- snprintf(smi->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
- smi->ds->index);
- smi->slave_mii_bus->dev.of_node = mdio_np;
- smi->slave_mii_bus->parent = smi->dev;
- smi->ds->slave_mii_bus = smi->slave_mii_bus;
-
- ret = devm_of_mdiobus_register(smi->dev, smi->slave_mii_bus, mdio_np);
- if (ret) {
- dev_err(smi->dev, "unable to register MDIO bus %s\n",
- smi->slave_mii_bus->id);
- goto err_put_node;
- }
-
- return 0;
-
-err_put_node:
- of_node_put(mdio_np);
-
- return ret;
-}
-
-static int realtek_smi_probe(struct platform_device *pdev)
-{
- const struct realtek_smi_variant *var;
- struct device *dev = &pdev->dev;
- struct realtek_smi *smi;
- struct device_node *np;
- int ret;
-
- var = of_device_get_match_data(dev);
- np = dev->of_node;
-
- smi = devm_kzalloc(dev, sizeof(*smi) + var->chip_data_sz, GFP_KERNEL);
- if (!smi)
- return -ENOMEM;
- smi->chip_data = (void *)smi + sizeof(*smi);
- smi->map = devm_regmap_init(dev, NULL, smi,
- &realtek_smi_mdio_regmap_config);
- if (IS_ERR(smi->map)) {
- ret = PTR_ERR(smi->map);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- /* Link forward and backward */
- smi->dev = dev;
- smi->clk_delay = var->clk_delay;
- smi->cmd_read = var->cmd_read;
- smi->cmd_write = var->cmd_write;
- smi->ops = var->ops;
-
- dev_set_drvdata(dev, smi);
- spin_lock_init(&smi->lock);
-
- /* TODO: if power is software controlled, set up any regulators here */
-
- /* Assert then deassert RESET */
- smi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(smi->reset)) {
- dev_err(dev, "failed to get RESET GPIO\n");
- return PTR_ERR(smi->reset);
- }
- msleep(REALTEK_SMI_HW_STOP_DELAY);
- gpiod_set_value(smi->reset, 0);
- msleep(REALTEK_SMI_HW_START_DELAY);
- dev_info(dev, "deasserted RESET\n");
-
- /* Fetch MDIO pins */
- smi->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
- if (IS_ERR(smi->mdc))
- return PTR_ERR(smi->mdc);
- smi->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
- if (IS_ERR(smi->mdio))
- return PTR_ERR(smi->mdio);
-
- smi->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
-
- ret = smi->ops->detect(smi);
- if (ret) {
- dev_err(dev, "unable to detect switch\n");
- return ret;
- }
-
- smi->ds = devm_kzalloc(dev, sizeof(*smi->ds), GFP_KERNEL);
- if (!smi->ds)
- return -ENOMEM;
-
- smi->ds->dev = dev;
- smi->ds->num_ports = smi->num_ports;
- smi->ds->priv = smi;
-
- smi->ds->ops = var->ds_ops;
- ret = dsa_register_switch(smi->ds);
- if (ret) {
- dev_err_probe(dev, ret, "unable to register switch\n");
- return ret;
- }
- return 0;
-}
-
-static int realtek_smi_remove(struct platform_device *pdev)
-{
- struct realtek_smi *smi = platform_get_drvdata(pdev);
-
- if (!smi)
- return 0;
-
- dsa_unregister_switch(smi->ds);
- if (smi->slave_mii_bus)
- of_node_put(smi->slave_mii_bus->dev.of_node);
- gpiod_set_value(smi->reset, 1);
-
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static void realtek_smi_shutdown(struct platform_device *pdev)
-{
- struct realtek_smi *smi = platform_get_drvdata(pdev);
-
- if (!smi)
- return;
-
- dsa_switch_shutdown(smi->ds);
-
- platform_set_drvdata(pdev, NULL);
-}
-
-static const struct of_device_id realtek_smi_of_match[] = {
- {
- .compatible = "realtek,rtl8366rb",
- .data = &rtl8366rb_variant,
- },
- {
- /* FIXME: add support for RTL8366S and more */
- .compatible = "realtek,rtl8366s",
- .data = NULL,
- },
- {
- .compatible = "realtek,rtl8365mb",
- .data = &rtl8365mb_variant,
- },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
-
-static struct platform_driver realtek_smi_driver = {
- .driver = {
- .name = "realtek-smi",
- .of_match_table = of_match_ptr(realtek_smi_of_match),
- },
- .probe = realtek_smi_probe,
- .remove = realtek_smi_remove,
- .shutdown = realtek_smi_shutdown,
-};
-module_platform_driver(realtek_smi_driver);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
new file mode 100644
index 000000000000..b7427a8292b2
--- /dev/null
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig NET_DSA_REALTEK
+ tristate "Realtek Ethernet switch family support"
+ depends on NET_DSA
+ select FIXED_PHY
+ select IRQ_DOMAIN
+ select REALTEK_PHY
+ select REGMAP
+ help
+ Select to enable support for Realtek Ethernet switch chips.
+
+config NET_DSA_REALTEK_MDIO
+ tristate "Realtek MDIO connected switch driver"
+ depends on NET_DSA_REALTEK
+ help
+ Select to enable support for registering switches configured
+ through MDIO.
+
+config NET_DSA_REALTEK_SMI
+ tristate "Realtek SMI connected switch driver"
+ depends on NET_DSA_REALTEK
+ help
+ Select to enable support for registering switches connected
+ through SMI.
+
+config NET_DSA_REALTEK_RTL8365MB
+ tristate "Realtek RTL8365MB switch subdriver"
+ depends on NET_DSA_REALTEK
+ depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+ select NET_DSA_TAG_RTL8_4
+ help
+ Select to enable support for Realtek RTL8365MB-VC and RTL8367S.
+
+config NET_DSA_REALTEK_RTL8366RB
+ tristate "Realtek RTL8366RB switch subdriver"
+ depends on NET_DSA_REALTEK
+ depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+ select NET_DSA_TAG_RTL4_A
+ help
+ Select to enable support for Realtek RTL8366RB
diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
new file mode 100644
index 000000000000..0aab57252a7c
--- /dev/null
+++ b/drivers/net/dsa/realtek/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_NET_DSA_REALTEK_MDIO) += realtek-mdio.o
+obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
+obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
+rtl8366-objs := rtl8366-core.o rtl8366rb.o
+obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
new file mode 100644
index 000000000000..31e1f100e48e
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Realtek MDIO interface driver
+ *
+ * ASICs we intend to support with this driver:
+ *
+ * RTL8366 - The original version, apparently
+ * RTL8369 - Similar enough to have the same datsheet as RTL8366
+ * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
+ * different register layout from the other two
+ * RTL8366S - Is this "RTL8366 super"?
+ * RTL8367 - Has an OpenWRT driver as well
+ * RTL8368S - Seems to be an alternative name for RTL8366RB
+ * RTL8370 - Also uses SMI
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include "realtek.h"
+
+/* Read/write via mdiobus */
+#define REALTEK_MDIO_CTRL0_REG 31
+#define REALTEK_MDIO_START_REG 29
+#define REALTEK_MDIO_CTRL1_REG 21
+#define REALTEK_MDIO_ADDRESS_REG 23
+#define REALTEK_MDIO_DATA_WRITE_REG 24
+#define REALTEK_MDIO_DATA_READ_REG 25
+
+#define REALTEK_MDIO_START_OP 0xFFFF
+#define REALTEK_MDIO_ADDR_OP 0x000E
+#define REALTEK_MDIO_READ_OP 0x0001
+#define REALTEK_MDIO_WRITE_OP 0x0003
+
+static int realtek_mdio_write(void *ctx, u32 reg, u32 val)
+{
+ struct realtek_priv *priv = ctx;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ mutex_lock(&bus->mdio_lock);
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL0_REG, REALTEK_MDIO_ADDR_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_ADDRESS_REG, reg);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_DATA_WRITE_REG, val);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL1_REG, REALTEK_MDIO_WRITE_OP);
+
+out_unlock:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int realtek_mdio_read(void *ctx, u32 reg, u32 *val)
+{
+ struct realtek_priv *priv = ctx;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ mutex_lock(&bus->mdio_lock);
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL0_REG, REALTEK_MDIO_ADDR_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_ADDRESS_REG, reg);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL1_REG, REALTEK_MDIO_READ_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->read(bus, priv->mdio_addr, REALTEK_MDIO_DATA_READ_REG);
+ if (ret >= 0) {
+ *val = ret;
+ ret = 0;
+ }
+
+out_unlock:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static void realtek_mdio_lock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_lock(&priv->map_lock);
+}
+
+static void realtek_mdio_unlock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_unlock(&priv->map_lock);
+}
+
+static const struct regmap_config realtek_mdio_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_mdio_read,
+ .reg_write = realtek_mdio_write,
+ .cache_type = REGCACHE_NONE,
+ .lock = realtek_mdio_lock,
+ .unlock = realtek_mdio_unlock,
+};
+
+static const struct regmap_config realtek_mdio_nolock_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_mdio_read,
+ .reg_write = realtek_mdio_write,
+ .cache_type = REGCACHE_NONE,
+ .disable_locking = true,
+};
+
+static int realtek_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct realtek_priv *priv;
+ struct device *dev = &mdiodev->dev;
+ const struct realtek_variant *var;
+ struct regmap_config rc;
+ struct device_node *np;
+ int ret;
+
+ var = of_device_get_match_data(dev);
+ if (!var)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->map_lock);
+
+ rc = realtek_mdio_regmap_config;
+ rc.lock_arg = priv;
+ priv->map = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map)) {
+ ret = PTR_ERR(priv->map);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ rc = realtek_mdio_nolock_regmap_config;
+ priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map_nolock)) {
+ ret = PTR_ERR(priv->map_nolock);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ priv->mdio_addr = mdiodev->addr;
+ priv->bus = mdiodev->bus;
+ priv->dev = &mdiodev->dev;
+ priv->chip_data = (void *)priv + sizeof(*priv);
+
+ priv->clk_delay = var->clk_delay;
+ priv->cmd_read = var->cmd_read;
+ priv->cmd_write = var->cmd_write;
+ priv->ops = var->ops;
+
+ priv->write_reg_noack = realtek_mdio_write;
+
+ np = dev->of_node;
+
+ dev_set_drvdata(dev, priv);
+
+ /* TODO: if power is software controlled, set up any regulators here */
+ priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(priv->reset);
+ }
+
+ if (priv->reset) {
+ gpiod_set_value(priv->reset, 1);
+ dev_dbg(dev, "asserted RESET\n");
+ msleep(REALTEK_HW_STOP_DELAY);
+ gpiod_set_value(priv->reset, 0);
+ msleep(REALTEK_HW_START_DELAY);
+ dev_dbg(dev, "deasserted RESET\n");
+ }
+
+ ret = priv->ops->detect(priv);
+ if (ret) {
+ dev_err(dev, "unable to detect switch\n");
+ return ret;
+ }
+
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->dev = dev;
+ priv->ds->num_ports = priv->num_ports;
+ priv->ds->priv = priv;
+ priv->ds->ops = var->ds_ops_mdio;
+
+ ret = dsa_register_switch(priv->ds);
+ if (ret) {
+ dev_err(priv->dev, "unable to register switch ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void realtek_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_unregister_switch(priv->ds);
+
+ /* leave the device reset asserted */
+ if (priv->reset)
+ gpiod_set_value(priv->reset, 1);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void realtek_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static const struct of_device_id realtek_mdio_of_match[] = {
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
+ { .compatible = "realtek,rtl8366rb", .data = &rtl8366rb_variant, },
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
+ { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
+ { .compatible = "realtek,rtl8367s", .data = &rtl8365mb_variant, },
+#endif
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, realtek_mdio_of_match);
+
+static struct mdio_driver realtek_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "realtek-mdio",
+ .of_match_table = of_match_ptr(realtek_mdio_of_match),
+ },
+ .probe = realtek_mdio_probe,
+ .remove = realtek_mdio_remove,
+ .shutdown = realtek_mdio_shutdown,
+};
+
+mdio_module_driver(realtek_mdio_driver);
+
+MODULE_AUTHOR("Luiz Angelo Daros de Luca <luizluca@gmail.com>");
+MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via MDIO interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
new file mode 100644
index 000000000000..2243d3da55b2
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Realtek Simple Management Interface (SMI) driver
+ * It can be discussed how "simple" this interface is.
+ *
+ * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
+ * but the protocol is not MDIO at all. Instead it is a Realtek
+ * pecularity that need to bit-bang the lines in a special way to
+ * communicate with the switch.
+ *
+ * ASICs we intend to support with this driver:
+ *
+ * RTL8366 - The original version, apparently
+ * RTL8369 - Similar enough to have the same datsheet as RTL8366
+ * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
+ * different register layout from the other two
+ * RTL8366S - Is this "RTL8366 super"?
+ * RTL8367 - Has an OpenWRT driver as well
+ * RTL8368S - Seems to be an alternative name for RTL8366RB
+ * RTL8370 - Also uses SMI
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+
+#include "realtek.h"
+
+#define REALTEK_SMI_ACK_RETRY_COUNT 5
+
+static inline void realtek_smi_clk_delay(struct realtek_priv *priv)
+{
+ ndelay(priv->clk_delay);
+}
+
+static void realtek_smi_start(struct realtek_priv *priv)
+{
+ /* Set GPIO pins to output mode, with initial state:
+ * SCK = 0, SDA = 1
+ */
+ gpiod_direction_output(priv->mdc, 0);
+ gpiod_direction_output(priv->mdio, 1);
+ realtek_smi_clk_delay(priv);
+
+ /* CLK 1: 0 -> 1, 1 -> 0 */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+
+ /* CLK 2: */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 1);
+}
+
+static void realtek_smi_stop(struct realtek_priv *priv)
+{
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 0);
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+
+ /* Add a click */
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+
+ /* Set GPIO pins to input mode */
+ gpiod_direction_input(priv->mdio);
+ gpiod_direction_input(priv->mdc);
+}
+
+static void realtek_smi_write_bits(struct realtek_priv *priv, u32 data, u32 len)
+{
+ for (; len > 0; len--) {
+ realtek_smi_clk_delay(priv);
+
+ /* Prepare data */
+ gpiod_set_value(priv->mdio, !!(data & (1 << (len - 1))));
+ realtek_smi_clk_delay(priv);
+
+ /* Clocking */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ }
+}
+
+static void realtek_smi_read_bits(struct realtek_priv *priv, u32 len, u32 *data)
+{
+ gpiod_direction_input(priv->mdio);
+
+ for (*data = 0; len > 0; len--) {
+ u32 u;
+
+ realtek_smi_clk_delay(priv);
+
+ /* Clocking */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ u = !!gpiod_get_value(priv->mdio);
+ gpiod_set_value(priv->mdc, 0);
+
+ *data |= (u << (len - 1));
+ }
+
+ gpiod_direction_output(priv->mdio, 0);
+}
+
+static int realtek_smi_wait_for_ack(struct realtek_priv *priv)
+{
+ int retry_cnt;
+
+ retry_cnt = 0;
+ do {
+ u32 ack;
+
+ realtek_smi_read_bits(priv, 1, &ack);
+ if (ack == 0)
+ break;
+
+ if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
+ dev_err(priv->dev, "ACK timeout\n");
+ return -ETIMEDOUT;
+ }
+ } while (1);
+
+ return 0;
+}
+
+static int realtek_smi_write_byte(struct realtek_priv *priv, u8 data)
+{
+ realtek_smi_write_bits(priv, data, 8);
+ return realtek_smi_wait_for_ack(priv);
+}
+
+static int realtek_smi_write_byte_noack(struct realtek_priv *priv, u8 data)
+{
+ realtek_smi_write_bits(priv, data, 8);
+ return 0;
+}
+
+static int realtek_smi_read_byte0(struct realtek_priv *priv, u8 *data)
+{
+ u32 t;
+
+ /* Read data */
+ realtek_smi_read_bits(priv, 8, &t);
+ *data = (t & 0xff);
+
+ /* Send an ACK */
+ realtek_smi_write_bits(priv, 0x00, 1);
+
+ return 0;
+}
+
+static int realtek_smi_read_byte1(struct realtek_priv *priv, u8 *data)
+{
+ u32 t;
+
+ /* Read data */
+ realtek_smi_read_bits(priv, 8, &t);
+ *data = (t & 0xff);
+
+ /* Send an ACK */
+ realtek_smi_write_bits(priv, 0x01, 1);
+
+ return 0;
+}
+
+static int realtek_smi_read_reg(struct realtek_priv *priv, u32 addr, u32 *data)
+{
+ unsigned long flags;
+ u8 lo = 0;
+ u8 hi = 0;
+ int ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ realtek_smi_start(priv);
+
+ /* Send READ command */
+ ret = realtek_smi_write_byte(priv, priv->cmd_read);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[7:0] */
+ ret = realtek_smi_write_byte(priv, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[15:8] */
+ ret = realtek_smi_write_byte(priv, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* Read DATA[7:0] */
+ realtek_smi_read_byte0(priv, &lo);
+ /* Read DATA[15:8] */
+ realtek_smi_read_byte1(priv, &hi);
+
+ *data = ((u32)lo) | (((u32)hi) << 8);
+
+ ret = 0;
+
+ out:
+ realtek_smi_stop(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+static int realtek_smi_write_reg(struct realtek_priv *priv,
+ u32 addr, u32 data, bool ack)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ realtek_smi_start(priv);
+
+ /* Send WRITE command */
+ ret = realtek_smi_write_byte(priv, priv->cmd_write);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[7:0] */
+ ret = realtek_smi_write_byte(priv, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[15:8] */
+ ret = realtek_smi_write_byte(priv, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* Write DATA[7:0] */
+ ret = realtek_smi_write_byte(priv, data & 0xff);
+ if (ret)
+ goto out;
+
+ /* Write DATA[15:8] */
+ if (ack)
+ ret = realtek_smi_write_byte(priv, data >> 8);
+ else
+ ret = realtek_smi_write_byte_noack(priv, data >> 8);
+ if (ret)
+ goto out;
+
+ ret = 0;
+
+ out:
+ realtek_smi_stop(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+/* There is one single case when we need to use this accessor and that
+ * is when issueing soft reset. Since the device reset as soon as we write
+ * that bit, no ACK will come back for natural reasons.
+ */
+static int realtek_smi_write_reg_noack(void *ctx, u32 reg, u32 val)
+{
+ return realtek_smi_write_reg(ctx, reg, val, false);
+}
+
+/* Regmap accessors */
+
+static int realtek_smi_write(void *ctx, u32 reg, u32 val)
+{
+ struct realtek_priv *priv = ctx;
+
+ return realtek_smi_write_reg(priv, reg, val, true);
+}
+
+static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
+{
+ struct realtek_priv *priv = ctx;
+
+ return realtek_smi_read_reg(priv, reg, val);
+}
+
+static void realtek_smi_lock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_lock(&priv->map_lock);
+}
+
+static void realtek_smi_unlock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_unlock(&priv->map_lock);
+}
+
+static const struct regmap_config realtek_smi_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_smi_read,
+ .reg_write = realtek_smi_write,
+ .cache_type = REGCACHE_NONE,
+ .lock = realtek_smi_lock,
+ .unlock = realtek_smi_unlock,
+};
+
+static const struct regmap_config realtek_smi_nolock_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_smi_read,
+ .reg_write = realtek_smi_write,
+ .cache_type = REGCACHE_NONE,
+ .disable_locking = true,
+};
+
+static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_read(priv, addr, regnum);
+}
+
+static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_write(priv, addr, regnum, val);
+}
+
+static int realtek_smi_setup_mdio(struct dsa_switch *ds)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct device_node *mdio_np;
+ int ret;
+
+ mdio_np = of_get_compatible_child(priv->dev->of_node, "realtek,smi-mdio");
+ if (!mdio_np) {
+ dev_err(priv->dev, "no MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ priv->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
+ if (!priv->slave_mii_bus) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+ priv->slave_mii_bus->priv = priv;
+ priv->slave_mii_bus->name = "SMI slave MII";
+ priv->slave_mii_bus->read = realtek_smi_mdio_read;
+ priv->slave_mii_bus->write = realtek_smi_mdio_write;
+ snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
+ ds->index);
+ priv->slave_mii_bus->dev.of_node = mdio_np;
+ priv->slave_mii_bus->parent = priv->dev;
+ ds->slave_mii_bus = priv->slave_mii_bus;
+
+ ret = devm_of_mdiobus_register(priv->dev, priv->slave_mii_bus, mdio_np);
+ if (ret) {
+ dev_err(priv->dev, "unable to register MDIO bus %s\n",
+ priv->slave_mii_bus->id);
+ goto err_put_node;
+ }
+
+ return 0;
+
+err_put_node:
+ of_node_put(mdio_np);
+
+ return ret;
+}
+
+static int realtek_smi_probe(struct platform_device *pdev)
+{
+ const struct realtek_variant *var;
+ struct device *dev = &pdev->dev;
+ struct realtek_priv *priv;
+ struct regmap_config rc;
+ struct device_node *np;
+ int ret;
+
+ var = of_device_get_match_data(dev);
+ np = dev->of_node;
+
+ priv = devm_kzalloc(dev, sizeof(*priv) + var->chip_data_sz, GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->chip_data = (void *)priv + sizeof(*priv);
+
+ mutex_init(&priv->map_lock);
+
+ rc = realtek_smi_regmap_config;
+ rc.lock_arg = priv;
+ priv->map = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map)) {
+ ret = PTR_ERR(priv->map);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ rc = realtek_smi_nolock_regmap_config;
+ priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map_nolock)) {
+ ret = PTR_ERR(priv->map_nolock);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Link forward and backward */
+ priv->dev = dev;
+ priv->clk_delay = var->clk_delay;
+ priv->cmd_read = var->cmd_read;
+ priv->cmd_write = var->cmd_write;
+ priv->ops = var->ops;
+
+ priv->setup_interface = realtek_smi_setup_mdio;
+ priv->write_reg_noack = realtek_smi_write_reg_noack;
+
+ dev_set_drvdata(dev, priv);
+ spin_lock_init(&priv->lock);
+
+ /* TODO: if power is software controlled, set up any regulators here */
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(priv->reset);
+ }
+ if (priv->reset) {
+ gpiod_set_value(priv->reset, 1);
+ dev_dbg(dev, "asserted RESET\n");
+ msleep(REALTEK_HW_STOP_DELAY);
+ gpiod_set_value(priv->reset, 0);
+ msleep(REALTEK_HW_START_DELAY);
+ dev_dbg(dev, "deasserted RESET\n");
+ }
+
+ /* Fetch MDIO pins */
+ priv->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->mdc))
+ return PTR_ERR(priv->mdc);
+ priv->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->mdio))
+ return PTR_ERR(priv->mdio);
+
+ priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
+
+ ret = priv->ops->detect(priv);
+ if (ret) {
+ dev_err(dev, "unable to detect switch\n");
+ return ret;
+ }
+
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->dev = dev;
+ priv->ds->num_ports = priv->num_ports;
+ priv->ds->priv = priv;
+
+ priv->ds->ops = var->ds_ops_smi;
+ ret = dsa_register_switch(priv->ds);
+ if (ret) {
+ dev_err_probe(dev, ret, "unable to register switch\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int realtek_smi_remove(struct platform_device *pdev)
+{
+ struct realtek_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return 0;
+
+ dsa_unregister_switch(priv->ds);
+ if (priv->slave_mii_bus)
+ of_node_put(priv->slave_mii_bus->dev.of_node);
+
+ /* leave the device reset asserted */
+ if (priv->reset)
+ gpiod_set_value(priv->reset, 1);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static void realtek_smi_shutdown(struct platform_device *pdev)
+{
+ struct realtek_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct of_device_id realtek_smi_of_match[] = {
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
+ {
+ .compatible = "realtek,rtl8366rb",
+ .data = &rtl8366rb_variant,
+ },
+#endif
+ {
+ /* FIXME: add support for RTL8366S and more */
+ .compatible = "realtek,rtl8366s",
+ .data = NULL,
+ },
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
+ {
+ .compatible = "realtek,rtl8365mb",
+ .data = &rtl8365mb_variant,
+ },
+ {
+ .compatible = "realtek,rtl8367s",
+ .data = &rtl8365mb_variant,
+ },
+#endif
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
+
+static struct platform_driver realtek_smi_driver = {
+ .driver = {
+ .name = "realtek-smi",
+ .of_match_table = of_match_ptr(realtek_smi_of_match),
+ },
+ .probe = realtek_smi_probe,
+ .remove = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
+};
+module_platform_driver(realtek_smi_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via SMI interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek/realtek.h
index 5bfa53e2480a..4fa7c6ba874a 100644
--- a/drivers/net/dsa/realtek-smi-core.h
+++ b/drivers/net/dsa/realtek/realtek.h
@@ -5,15 +5,18 @@
* Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
*/
-#ifndef _REALTEK_SMI_H
-#define _REALTEK_SMI_H
+#ifndef _REALTEK_H
+#define _REALTEK_H
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <net/dsa.h>
-struct realtek_smi_ops;
+#define REALTEK_HW_STOP_DELAY 25 /* msecs */
+#define REALTEK_HW_START_DELAY 100 /* msecs */
+
+struct realtek_ops;
struct dentry;
struct inode;
struct file;
@@ -25,7 +28,7 @@ struct rtl8366_mib_counter {
const char *name;
};
-/**
+/*
* struct rtl8366_vlan_mc - Virtual LAN member configuration
*/
struct rtl8366_vlan_mc {
@@ -43,13 +46,17 @@ struct rtl8366_vlan_4k {
u8 fid;
};
-struct realtek_smi {
+struct realtek_priv {
struct device *dev;
struct gpio_desc *reset;
struct gpio_desc *mdc;
struct gpio_desc *mdio;
struct regmap *map;
+ struct regmap *map_nolock;
+ struct mutex map_lock;
struct mii_bus *slave_mii_bus;
+ struct mii_bus *bus;
+ int mdio_addr;
unsigned int clk_delay;
u8 cmd_read;
@@ -65,7 +72,9 @@ struct realtek_smi {
unsigned int num_mib_counters;
struct rtl8366_mib_counter *mib_counters;
- const struct realtek_smi_ops *ops;
+ const struct realtek_ops *ops;
+ int (*setup_interface)(struct dsa_switch *ds);
+ int (*write_reg_noack)(void *ctx, u32 addr, u32 data);
int vlan_enabled;
int vlan4k_enabled;
@@ -74,61 +83,57 @@ struct realtek_smi {
void *chip_data; /* Per-chip extra variant data */
};
-/**
- * struct realtek_smi_ops - vtable for the per-SMI-chiptype operations
+/*
+ * struct realtek_ops - vtable for the per-SMI-chiptype operations
* @detect: detects the chiptype
*/
-struct realtek_smi_ops {
- int (*detect)(struct realtek_smi *smi);
- int (*reset_chip)(struct realtek_smi *smi);
- int (*setup)(struct realtek_smi *smi);
- void (*cleanup)(struct realtek_smi *smi);
- int (*get_mib_counter)(struct realtek_smi *smi,
+struct realtek_ops {
+ int (*detect)(struct realtek_priv *priv);
+ int (*reset_chip)(struct realtek_priv *priv);
+ int (*setup)(struct realtek_priv *priv);
+ void (*cleanup)(struct realtek_priv *priv);
+ int (*get_mib_counter)(struct realtek_priv *priv,
int port,
struct rtl8366_mib_counter *mib,
u64 *mibvalue);
- int (*get_vlan_mc)(struct realtek_smi *smi, u32 index,
+ int (*get_vlan_mc)(struct realtek_priv *priv, u32 index,
struct rtl8366_vlan_mc *vlanmc);
- int (*set_vlan_mc)(struct realtek_smi *smi, u32 index,
+ int (*set_vlan_mc)(struct realtek_priv *priv, u32 index,
const struct rtl8366_vlan_mc *vlanmc);
- int (*get_vlan_4k)(struct realtek_smi *smi, u32 vid,
+ int (*get_vlan_4k)(struct realtek_priv *priv, u32 vid,
struct rtl8366_vlan_4k *vlan4k);
- int (*set_vlan_4k)(struct realtek_smi *smi,
+ int (*set_vlan_4k)(struct realtek_priv *priv,
const struct rtl8366_vlan_4k *vlan4k);
- int (*get_mc_index)(struct realtek_smi *smi, int port, int *val);
- int (*set_mc_index)(struct realtek_smi *smi, int port, int index);
- bool (*is_vlan_valid)(struct realtek_smi *smi, unsigned int vlan);
- int (*enable_vlan)(struct realtek_smi *smi, bool enable);
- int (*enable_vlan4k)(struct realtek_smi *smi, bool enable);
- int (*enable_port)(struct realtek_smi *smi, int port, bool enable);
- int (*phy_read)(struct realtek_smi *smi, int phy, int regnum);
- int (*phy_write)(struct realtek_smi *smi, int phy, int regnum,
+ int (*get_mc_index)(struct realtek_priv *priv, int port, int *val);
+ int (*set_mc_index)(struct realtek_priv *priv, int port, int index);
+ bool (*is_vlan_valid)(struct realtek_priv *priv, unsigned int vlan);
+ int (*enable_vlan)(struct realtek_priv *priv, bool enable);
+ int (*enable_vlan4k)(struct realtek_priv *priv, bool enable);
+ int (*enable_port)(struct realtek_priv *priv, int port, bool enable);
+ int (*phy_read)(struct realtek_priv *priv, int phy, int regnum);
+ int (*phy_write)(struct realtek_priv *priv, int phy, int regnum,
u16 val);
};
-struct realtek_smi_variant {
- const struct dsa_switch_ops *ds_ops;
- const struct realtek_smi_ops *ops;
+struct realtek_variant {
+ const struct dsa_switch_ops *ds_ops_smi;
+ const struct dsa_switch_ops *ds_ops_mdio;
+ const struct realtek_ops *ops;
unsigned int clk_delay;
u8 cmd_read;
u8 cmd_write;
size_t chip_data_sz;
};
-/* SMI core calls */
-int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
- u32 data);
-int realtek_smi_setup_mdio(struct realtek_smi *smi);
-
/* RTL8366 library helpers */
-int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
-int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used);
+int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
u32 untag, u32 fid);
-int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
unsigned int vid);
-int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
-int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
-int rtl8366_reset_vlan(struct realtek_smi *smi);
+int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable);
+int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable);
+int rtl8366_reset_vlan(struct realtek_priv *priv);
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack);
@@ -139,7 +144,7 @@ void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
-extern const struct realtek_smi_variant rtl8366rb_variant;
-extern const struct realtek_smi_variant rtl8365mb_variant;
+extern const struct realtek_variant rtl8366rb_variant;
+extern const struct realtek_variant rtl8365mb_variant;
-#endif /* _REALTEK_SMI_H */
+#endif /* _REALTEK_H */
diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index 3b729544798b..3d70e8a77ecf 100644
--- a/drivers/net/dsa/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -99,18 +99,28 @@
#include <linux/regmap.h>
#include <linux/if_bridge.h>
-#include "realtek-smi-core.h"
+#include "realtek.h"
/* Chip-specific data and limits */
-#define RTL8365MB_CHIP_ID_8365MB_VC 0x6367
-#define RTL8365MB_CPU_PORT_NUM_8365MB_VC 6
-#define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112
+#define RTL8365MB_CHIP_ID_8365MB_VC 0x6367
+#define RTL8365MB_CHIP_VER_8365MB_VC 0x0040
+
+#define RTL8365MB_CHIP_ID_8367S 0x6367
+#define RTL8365MB_CHIP_VER_8367S 0x00A0
+
+#define RTL8365MB_CHIP_ID_8367RB 0x6367
+#define RTL8365MB_CHIP_VER_8367RB 0x0020
/* Family-specific data and limits */
-#define RTL8365MB_PHYADDRMAX 7
-#define RTL8365MB_NUM_PHYREGS 32
-#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
-#define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1)
+#define RTL8365MB_PHYADDRMAX 7
+#define RTL8365MB_NUM_PHYREGS 32
+#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
+/* RTL8370MB and RTL8310SR, possibly suportable by this driver, have 10 ports */
+#define RTL8365MB_MAX_NUM_PORTS 10
+#define RTL8365MB_LEARN_LIMIT_MAX 2112
+
+/* valid for all 6-port or less variants */
+static const int rtl8365mb_extint_port_map[] = { -1, -1, -1, -1, -1, -1, 1, 2, -1, -1};
/* Chip identification registers */
#define RTL8365MB_CHIP_ID_REG 0x1300
@@ -191,7 +201,7 @@
/* The PHY OCP addresses of PHY registers 0~31 start here */
#define RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE 0xA400
-/* EXT port interface mode values - used in DIGITAL_INTERFACE_SELECT */
+/* EXT interface port mode values - used in DIGITAL_INTERFACE_SELECT */
#define RTL8365MB_EXT_PORT_MODE_DISABLE 0
#define RTL8365MB_EXT_PORT_MODE_RGMII 1
#define RTL8365MB_EXT_PORT_MODE_MII_MAC 2
@@ -207,39 +217,56 @@
#define RTL8365MB_EXT_PORT_MODE_1000X 12
#define RTL8365MB_EXT_PORT_MODE_100FX 13
-/* EXT port interface mode configuration registers 0~1 */
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extport) \
- (RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 + \
- ((_extport) >> 1) * (0x13C3 - 0x1305))
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extport) \
- (0xF << (((_extport) % 2)))
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extport) \
- (((_extport) % 2) * 4)
-
-/* EXT port RGMII TX/RX delay configuration registers 1~2 */
-#define RTL8365MB_EXT_RGMXF_REG1 0x1307
-#define RTL8365MB_EXT_RGMXF_REG2 0x13C5
-#define RTL8365MB_EXT_RGMXF_REG(_extport) \
- (RTL8365MB_EXT_RGMXF_REG1 + \
- (((_extport) >> 1) * (0x13C5 - 0x1307)))
+/* Realtek docs and driver uses logic number as EXT_PORT0=16, EXT_PORT1=17,
+ * EXT_PORT2=18, to interact with switch ports. That logic number is internally
+ * converted to either a physical port number (0..9) or an external interface id (0..2),
+ * depending on which function was called. The external interface id is calculated as
+ * (ext_id=logic_port-15), while the logical to physical map depends on the chip id/version.
+ *
+ * EXT_PORT0 mentioned in datasheets and rtl8367c driver is used in this driver
+ * as extid==1, EXT_PORT2, mentioned in Realtek rtl8367c driver for 10-port switches,
+ * would have an ext_id of 3 (out of range for most extint macros) and ext_id 0 does
+ * not seem to be used as well for this family.
+ */
+
+/* EXT interface mode configuration registers 0~1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305 /* EXT1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3 /* EXT2 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extint) \
+ ((_extint) == 1 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 : \
+ (_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 : \
+ 0x0)
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extint) \
+ (0xF << (((_extint) % 2)))
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extint) \
+ (((_extint) % 2) * 4)
+
+/* EXT interface RGMII TX/RX delay configuration registers 0~2 */
+#define RTL8365MB_EXT_RGMXF_REG0 0x1306 /* EXT0 */
+#define RTL8365MB_EXT_RGMXF_REG1 0x1307 /* EXT1 */
+#define RTL8365MB_EXT_RGMXF_REG2 0x13C5 /* EXT2 */
+#define RTL8365MB_EXT_RGMXF_REG(_extint) \
+ ((_extint) == 0 ? RTL8365MB_EXT_RGMXF_REG0 : \
+ (_extint) == 1 ? RTL8365MB_EXT_RGMXF_REG1 : \
+ (_extint) == 2 ? RTL8365MB_EXT_RGMXF_REG2 : \
+ 0x0)
#define RTL8365MB_EXT_RGMXF_RXDELAY_MASK 0x0007
#define RTL8365MB_EXT_RGMXF_TXDELAY_MASK 0x0008
-/* External port speed values - used in DIGITAL_INTERFACE_FORCE */
+/* External interface port speed values - used in DIGITAL_INTERFACE_FORCE */
#define RTL8365MB_PORT_SPEED_10M 0
#define RTL8365MB_PORT_SPEED_100M 1
#define RTL8365MB_PORT_SPEED_1000M 2
-/* EXT port force configuration registers 0~2 */
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extport) \
- (RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 + \
- ((_extport) & 0x1) + \
- ((((_extport) >> 1) & 0x1) * (0x13C4 - 0x1310)))
+/* EXT interface force configuration registers 0~2 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310 /* EXT0 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311 /* EXT1 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4 /* EXT2 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extint) \
+ ((_extint) == 0 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 : \
+ (_extint) == 1 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 : \
+ (_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 : \
+ 0x0)
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK 0x1000
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_NWAY_MASK 0x0080
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK 0x0040
@@ -516,7 +543,7 @@ struct rtl8365mb_cpu {
/**
* struct rtl8365mb_port - private per-port data
- * @smi: pointer to parent realtek_smi data
+ * @priv: pointer to parent realtek_priv data
* @index: DSA port index, same as dsa_port::index
* @stats: link statistics populated by rtl8365mb_stats_poll, ready for atomic
* access via rtl8365mb_get_stats64
@@ -524,7 +551,7 @@ struct rtl8365mb_cpu {
* @mib_work: delayed work for polling MIB counters
*/
struct rtl8365mb_port {
- struct realtek_smi *smi;
+ struct realtek_priv *priv;
unsigned int index;
struct rtnl_link_stats64 stats;
spinlock_t stats_lock;
@@ -533,7 +560,7 @@ struct rtl8365mb_port {
/**
* struct rtl8365mb - private chip-specific driver data
- * @smi: pointer to parent realtek_smi data
+ * @priv: pointer to parent realtek_priv data
* @irq: registered IRQ or zero
* @chip_id: chip identifier
* @chip_ver: chip silicon revision
@@ -548,7 +575,7 @@ struct rtl8365mb_port {
* Private data for this driver.
*/
struct rtl8365mb {
- struct realtek_smi *smi;
+ struct realtek_priv *priv;
int irq;
u32 chip_id;
u32 chip_ver;
@@ -561,16 +588,16 @@ struct rtl8365mb {
size_t jam_size;
};
-static int rtl8365mb_phy_poll_busy(struct realtek_smi *smi)
+static int rtl8365mb_phy_poll_busy(struct realtek_priv *priv)
{
u32 val;
- return regmap_read_poll_timeout(smi->map,
+ return regmap_read_poll_timeout(priv->map_nolock,
RTL8365MB_INDIRECT_ACCESS_STATUS_REG,
val, !val, 10, 100);
}
-static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
+static int rtl8365mb_phy_ocp_prepare(struct realtek_priv *priv, int phy,
u32 ocp_addr)
{
u32 val;
@@ -579,7 +606,7 @@ static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
/* Set OCP prefix */
val = FIELD_GET(RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK, ocp_addr);
ret = regmap_update_bits(
- smi->map, RTL8365MB_GPHY_OCP_MSB_0_REG,
+ priv->map_nolock, RTL8365MB_GPHY_OCP_MSB_0_REG,
RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK,
FIELD_PREP(RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, val));
if (ret)
@@ -592,89 +619,101 @@ static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
ocp_addr >> 1);
val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK,
ocp_addr >> 6);
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG,
- val);
+ ret = regmap_write(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG, val);
if (ret)
return ret;
return 0;
}
-static int rtl8365mb_phy_ocp_read(struct realtek_smi *smi, int phy,
+static int rtl8365mb_phy_ocp_read(struct realtek_priv *priv, int phy,
u32 ocp_addr, u16 *data)
{
u32 val;
int ret;
- ret = rtl8365mb_phy_poll_busy(smi);
+ mutex_lock(&priv->map_lock);
+
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ ret = rtl8365mb_phy_ocp_prepare(priv, phy, ocp_addr);
if (ret)
- return ret;
+ goto out;
/* Execute read operation */
val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ);
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ ret = regmap_write(priv->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG,
+ val);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_poll_busy(smi);
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
/* Get PHY register data */
- ret = regmap_read(smi->map, RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG,
- &val);
+ ret = regmap_read(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG, &val);
if (ret)
- return ret;
+ goto out;
*data = val & 0xFFFF;
- return 0;
+out:
+ mutex_unlock(&priv->map_lock);
+
+ return ret;
}
-static int rtl8365mb_phy_ocp_write(struct realtek_smi *smi, int phy,
+static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
u32 ocp_addr, u16 data)
{
u32 val;
int ret;
- ret = rtl8365mb_phy_poll_busy(smi);
+ mutex_lock(&priv->map_lock);
+
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ ret = rtl8365mb_phy_ocp_prepare(priv, phy, ocp_addr);
if (ret)
- return ret;
+ goto out;
/* Set PHY register data */
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG,
- data);
+ ret = regmap_write(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG, data);
if (ret)
- return ret;
+ goto out;
/* Execute write operation */
val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE);
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ ret = regmap_write(priv->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG,
+ val);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_poll_busy(smi);
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
+
+out:
+ mutex_unlock(&priv->map_lock);
return 0;
}
-static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
+static int rtl8365mb_phy_read(struct realtek_priv *priv, int phy, int regnum)
{
u32 ocp_addr;
u16 val;
@@ -688,21 +727,21 @@ static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
- ret = rtl8365mb_phy_ocp_read(smi, phy, ocp_addr, &val);
+ ret = rtl8365mb_phy_ocp_read(priv, phy, ocp_addr, &val);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to read PHY%d reg %02x @ %04x, ret %d\n", phy,
regnum, ocp_addr, ret);
return ret;
}
- dev_dbg(smi->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
+ dev_dbg(priv->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
phy, regnum, ocp_addr, val);
return val;
}
-static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
+static int rtl8365mb_phy_write(struct realtek_priv *priv, int phy, int regnum,
u16 val)
{
u32 ocp_addr;
@@ -716,46 +755,67 @@ static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
- ret = rtl8365mb_phy_ocp_write(smi, phy, ocp_addr, val);
+ ret = rtl8365mb_phy_ocp_write(priv, phy, ocp_addr, val);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to write PHY%d reg %02x @ %04x, ret %d\n", phy,
regnum, ocp_addr, ret);
return ret;
}
- dev_dbg(smi->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
+ dev_dbg(priv->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
phy, regnum, ocp_addr, val);
return 0;
}
+static int rtl8365mb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ return rtl8365mb_phy_read(ds->priv, phy, regnum);
+}
+
+static int rtl8365mb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ u16 val)
+{
+ return rtl8365mb_phy_write(ds->priv, phy, regnum, val);
+}
+
static enum dsa_tag_protocol
rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct rtl8365mb *mb;
+
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
+
+ if (cpu->position == RTL8365MB_CPU_POS_BEFORE_CRC)
+ return DSA_TAG_PROTO_RTL8_4T;
+
return DSA_TAG_PROTO_RTL8_4;
}
-static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
+static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
phy_interface_t interface)
{
struct device_node *dn;
struct dsa_port *dp;
int tx_delay = 0;
int rx_delay = 0;
- int ext_port;
+ int ext_int;
u32 val;
int ret;
- if (port == smi->cpu_port) {
- ext_port = 1;
- } else {
- dev_err(smi->dev, "only one EXT port is currently supported\n");
+ ext_int = rtl8365mb_extint_port_map[port];
+
+ if (ext_int <= 0) {
+ dev_err(priv->dev, "Port %d is not an external interface port\n", port);
return -EINVAL;
}
- dp = dsa_to_port(smi->ds, port);
+ dp = dsa_to_port(priv->ds, port);
dn = dp->dn;
/* Set the RGMII TX/RX delay
@@ -786,8 +846,8 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
if (val == 0 || val == 2)
tx_delay = val / 2;
else
- dev_warn(smi->dev,
- "EXT port TX delay must be 0 or 2 ns\n");
+ dev_warn(priv->dev,
+ "EXT interface TX delay must be 0 or 2 ns\n");
}
if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) {
@@ -796,12 +856,12 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
if (val <= 7)
rx_delay = val;
else
- dev_warn(smi->dev,
- "EXT port RX delay must be 0 to 2.1 ns\n");
+ dev_warn(priv->dev,
+ "EXT interface RX delay must be 0 to 2.1 ns\n");
}
ret = regmap_update_bits(
- smi->map, RTL8365MB_EXT_RGMXF_REG(ext_port),
+ priv->map, RTL8365MB_EXT_RGMXF_REG(ext_int),
RTL8365MB_EXT_RGMXF_TXDELAY_MASK |
RTL8365MB_EXT_RGMXF_RXDELAY_MASK,
FIELD_PREP(RTL8365MB_EXT_RGMXF_TXDELAY_MASK, tx_delay) |
@@ -810,18 +870,18 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
return ret;
ret = regmap_update_bits(
- smi->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(ext_port),
- RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(ext_port),
+ priv->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(ext_int),
+ RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(ext_int),
RTL8365MB_EXT_PORT_MODE_RGMII
<< RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(
- ext_port));
+ ext_int));
if (ret)
return ret;
return 0;
}
-static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
+static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port,
bool link, int speed, int duplex,
bool tx_pause, bool rx_pause)
{
@@ -830,14 +890,14 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
u32 r_duplex;
u32 r_speed;
u32 r_link;
- int ext_port;
+ int ext_int;
int val;
int ret;
- if (port == smi->cpu_port) {
- ext_port = 1;
- } else {
- dev_err(smi->dev, "only one EXT port is currently supported\n");
+ ext_int = rtl8365mb_extint_port_map[port];
+
+ if (ext_int <= 0) {
+ dev_err(priv->dev, "Port %d is not an external interface port\n", port);
return -EINVAL;
}
@@ -854,7 +914,7 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
} else if (speed == SPEED_10) {
r_speed = RTL8365MB_PORT_SPEED_10M;
} else {
- dev_err(smi->dev, "unsupported port speed %s\n",
+ dev_err(priv->dev, "unsupported port speed %s\n",
phy_speed_to_str(speed));
return -EINVAL;
}
@@ -864,7 +924,7 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
} else if (duplex == DUPLEX_HALF) {
r_duplex = 0;
} else {
- dev_err(smi->dev, "unsupported duplex %s\n",
+ dev_err(priv->dev, "unsupported duplex %s\n",
phy_duplex_to_str(duplex));
return -EINVAL;
}
@@ -886,8 +946,8 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK,
r_duplex) |
FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK, r_speed);
- ret = regmap_write(smi->map,
- RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(ext_port),
+ ret = regmap_write(priv->map,
+ RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(ext_int),
val);
if (ret)
return ret;
@@ -898,13 +958,17 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
- if (dsa_is_user_port(ds, port) &&
+ int ext_int;
+
+ ext_int = rtl8365mb_extint_port_map[port];
+
+ if (ext_int < 0 &&
(interface == PHY_INTERFACE_MODE_NA ||
interface == PHY_INTERFACE_MODE_INTERNAL ||
interface == PHY_INTERFACE_MODE_GMII))
/* Internal PHY */
return true;
- else if (dsa_is_cpu_port(ds, port) &&
+ else if ((ext_int >= 1) &&
phy_interface_mode_is_rgmii(interface))
/* Extension MAC */
return true;
@@ -912,65 +976,43 @@ static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
return false;
}
-static void rtl8365mb_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- struct realtek_smi *smi = ds->priv;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0 };
-
- /* include/linux/phylink.h says:
- * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
- * expects the MAC driver to return all supported link modes.
- */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- !rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
- dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
- phy_modes(state->interface), port);
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
-
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ if (dsa_is_user_port(ds, port))
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ else if (dsa_is_cpu_port(ds, port))
+ phy_interface_set_rgmii(config->supported_interfaces);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
}
static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
- dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
+ dev_err(priv->dev, "phy mode %s is unsupported on port %d\n",
phy_modes(state->interface), port);
return;
}
if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"port %d supports only conventional PHY or fixed-link\n",
port);
return;
}
if (phy_interface_mode_is_rgmii(state->interface)) {
- ret = rtl8365mb_ext_config_rgmii(smi, port, state->interface);
+ ret = rtl8365mb_ext_config_rgmii(priv, port, state->interface);
if (ret)
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to configure RGMII mode on port %d: %d\n",
port, ret);
return;
@@ -985,20 +1027,20 @@ static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
int ret;
- mb = smi->chip_data;
+ mb = priv->chip_data;
p = &mb->ports[port];
cancel_delayed_work_sync(&p->mib_work);
if (phy_interface_mode_is_rgmii(interface)) {
- ret = rtl8365mb_ext_config_forcemode(smi, port, false, 0, 0,
+ ret = rtl8365mb_ext_config_forcemode(priv, port, false, 0, 0,
false, false);
if (ret)
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to reset forced mode on port %d: %d\n",
port, ret);
@@ -1013,21 +1055,21 @@ static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
int duplex, bool tx_pause,
bool rx_pause)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
int ret;
- mb = smi->chip_data;
+ mb = priv->chip_data;
p = &mb->ports[port];
schedule_delayed_work(&p->mib_work, 0);
if (phy_interface_mode_is_rgmii(interface)) {
- ret = rtl8365mb_ext_config_forcemode(smi, port, true, speed,
+ ret = rtl8365mb_ext_config_forcemode(priv, port, true, speed,
duplex, tx_pause,
rx_pause);
if (ret)
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to force mode on port %d: %d\n", port,
ret);
@@ -1038,7 +1080,7 @@ static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
enum rtl8365mb_stp_state val;
int msti = 0;
@@ -1057,36 +1099,36 @@ static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
val = RTL8365MB_STP_STATE_FORWARDING;
break;
default:
- dev_err(smi->dev, "invalid STP state: %u\n", state);
+ dev_err(priv->dev, "invalid STP state: %u\n", state);
return;
}
- regmap_update_bits(smi->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
+ regmap_update_bits(priv->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(port),
val << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(port));
}
-static int rtl8365mb_port_set_learning(struct realtek_smi *smi, int port,
+static int rtl8365mb_port_set_learning(struct realtek_priv *priv, int port,
bool enable)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
/* Enable/disable learning by limiting the number of L2 addresses the
* port can learn. Realtek documentation states that a limit of zero
* disables learning. When enabling learning, set it to the chip's
* maximum.
*/
- return regmap_write(smi->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
+ return regmap_write(priv->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
enable ? mb->learn_limit_max : 0);
}
-static int rtl8365mb_port_set_isolation(struct realtek_smi *smi, int port,
+static int rtl8365mb_port_set_isolation(struct realtek_priv *priv, int port,
u32 mask)
{
- return regmap_write(smi->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
+ return regmap_write(priv->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
}
-static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
+static int rtl8365mb_mib_counter_read(struct realtek_priv *priv, int port,
u32 offset, u32 length, u64 *mibvalue)
{
u64 tmpvalue = 0;
@@ -1098,13 +1140,13 @@ static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
* and then poll the control register before reading the value from some
* counter registers.
*/
- ret = regmap_write(smi->map, RTL8365MB_MIB_ADDRESS_REG,
+ ret = regmap_write(priv->map, RTL8365MB_MIB_ADDRESS_REG,
RTL8365MB_MIB_ADDRESS(port, offset));
if (ret)
return ret;
/* Poll for completion */
- ret = regmap_read_poll_timeout(smi->map, RTL8365MB_MIB_CTRL0_REG, val,
+ ret = regmap_read_poll_timeout(priv->map, RTL8365MB_MIB_CTRL0_REG, val,
!(val & RTL8365MB_MIB_CTRL0_BUSY_MASK),
10, 100);
if (ret)
@@ -1126,7 +1168,7 @@ static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
/* Read the MIB counter 16 bits at a time */
for (i = 0; i < length; i++) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8365MB_MIB_COUNTER_REG(offset - i), &val);
if (ret)
return ret;
@@ -1142,21 +1184,21 @@ static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
static void rtl8365mb_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb *mb;
int ret;
int i;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mutex_lock(&mb->mib_lock);
for (i = 0; i < RTL8365MB_MIB_END; i++) {
struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
- ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ ret = rtl8365mb_mib_counter_read(priv, port, mib->offset,
mib->length, &data[i]);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to read port %d counters: %d\n", port,
ret);
break;
@@ -1190,15 +1232,15 @@ static int rtl8365mb_get_sset_count(struct dsa_switch *ds, int port, int sset)
static void rtl8365mb_get_phy_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_phy_stats *phy_stats)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_mib_counter *mib;
struct rtl8365mb *mb;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3StatsSymbolErrors];
mutex_lock(&mb->mib_lock);
- rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ rtl8365mb_mib_counter_read(priv, port, mib->offset, mib->length,
&phy_stats->SymbolErrorDuringCarrier);
mutex_unlock(&mb->mib_lock);
}
@@ -1226,12 +1268,12 @@ static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
[RTL8365MB_MIB_dot3StatsExcessiveCollisions] = 1,
};
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb *mb;
int ret;
int i;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mutex_lock(&mb->mib_lock);
for (i = 0; i < RTL8365MB_MIB_END; i++) {
@@ -1241,7 +1283,7 @@ static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
if (!cnt[i])
continue;
- ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ ret = rtl8365mb_mib_counter_read(priv, port, mib->offset,
mib->length, &cnt[i]);
if (ret)
break;
@@ -1291,20 +1333,20 @@ static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
static void rtl8365mb_get_ctrl_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_ctrl_stats *ctrl_stats)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_mib_counter *mib;
struct rtl8365mb *mb;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3ControlInUnknownOpcodes];
mutex_lock(&mb->mib_lock);
- rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ rtl8365mb_mib_counter_read(priv, port, mib->offset, mib->length,
&ctrl_stats->UnsupportedOpcodesReceived);
mutex_unlock(&mb->mib_lock);
}
-static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
+static void rtl8365mb_stats_update(struct realtek_priv *priv, int port)
{
u64 cnt[RTL8365MB_MIB_END] = {
[RTL8365MB_MIB_ifOutOctets] = 1,
@@ -1323,7 +1365,7 @@ static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
[RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
[RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
};
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
struct rtnl_link_stats64 *stats;
int ret;
int i;
@@ -1338,7 +1380,7 @@ static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
if (!cnt[i])
continue;
- ret = rtl8365mb_mib_counter_read(smi, port, c->offset,
+ ret = rtl8365mb_mib_counter_read(priv, port, c->offset,
c->length, &cnt[i]);
if (ret)
break;
@@ -1388,9 +1430,9 @@ static void rtl8365mb_stats_poll(struct work_struct *work)
struct rtl8365mb_port *p = container_of(to_delayed_work(work),
struct rtl8365mb_port,
mib_work);
- struct realtek_smi *smi = p->smi;
+ struct realtek_priv *priv = p->priv;
- rtl8365mb_stats_update(smi, p->index);
+ rtl8365mb_stats_update(priv, p->index);
schedule_delayed_work(&p->mib_work, RTL8365MB_STATS_INTERVAL_JIFFIES);
}
@@ -1398,11 +1440,11 @@ static void rtl8365mb_stats_poll(struct work_struct *work)
static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *s)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
- mb = smi->chip_data;
+ mb = priv->chip_data;
p = &mb->ports[port];
spin_lock(&p->stats_lock);
@@ -1410,9 +1452,9 @@ static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
spin_unlock(&p->stats_lock);
}
-static void rtl8365mb_stats_setup(struct realtek_smi *smi)
+static void rtl8365mb_stats_setup(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
int i;
/* Per-chip global mutex to protect MIB counter access, since doing
@@ -1420,10 +1462,10 @@ static void rtl8365mb_stats_setup(struct realtek_smi *smi)
*/
mutex_init(&mb->mib_lock);
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(smi->ds, i))
+ if (dsa_is_unused_port(priv->ds, i))
continue;
/* Per-port spinlock to protect the stats64 data */
@@ -1436,45 +1478,45 @@ static void rtl8365mb_stats_setup(struct realtek_smi *smi)
}
}
-static void rtl8365mb_stats_teardown(struct realtek_smi *smi)
+static void rtl8365mb_stats_teardown(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
int i;
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(smi->ds, i))
+ if (dsa_is_unused_port(priv->ds, i))
continue;
cancel_delayed_work_sync(&p->mib_work);
}
}
-static int rtl8365mb_get_and_clear_status_reg(struct realtek_smi *smi, u32 reg,
+static int rtl8365mb_get_and_clear_status_reg(struct realtek_priv *priv, u32 reg,
u32 *val)
{
int ret;
- ret = regmap_read(smi->map, reg, val);
+ ret = regmap_read(priv->map, reg, val);
if (ret)
return ret;
- return regmap_write(smi->map, reg, *val);
+ return regmap_write(priv->map, reg, *val);
}
static irqreturn_t rtl8365mb_irq(int irq, void *data)
{
- struct realtek_smi *smi = data;
+ struct realtek_priv *priv = data;
unsigned long line_changes = 0;
struct rtl8365mb *mb;
u32 stat;
int line;
int ret;
- mb = smi->chip_data;
+ mb = priv->chip_data;
- ret = rtl8365mb_get_and_clear_status_reg(smi, RTL8365MB_INTR_STATUS_REG,
+ ret = rtl8365mb_get_and_clear_status_reg(priv, RTL8365MB_INTR_STATUS_REG,
&stat);
if (ret)
goto out_error;
@@ -1485,14 +1527,14 @@ static irqreturn_t rtl8365mb_irq(int irq, void *data)
u32 val;
ret = rtl8365mb_get_and_clear_status_reg(
- smi, RTL8365MB_PORT_LINKUP_IND_REG, &val);
+ priv, RTL8365MB_PORT_LINKUP_IND_REG, &val);
if (ret)
goto out_error;
linkup_ind = FIELD_GET(RTL8365MB_PORT_LINKUP_IND_MASK, val);
ret = rtl8365mb_get_and_clear_status_reg(
- smi, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
+ priv, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
if (ret)
goto out_error;
@@ -1504,8 +1546,8 @@ static irqreturn_t rtl8365mb_irq(int irq, void *data)
if (!line_changes)
goto out_none;
- for_each_set_bit(line, &line_changes, smi->num_ports) {
- int child_irq = irq_find_mapping(smi->irqdomain, line);
+ for_each_set_bit(line, &line_changes, priv->num_ports) {
+ int child_irq = irq_find_mapping(priv->irqdomain, line);
handle_nested_irq(child_irq);
}
@@ -1513,7 +1555,7 @@ static irqreturn_t rtl8365mb_irq(int irq, void *data)
return IRQ_HANDLED;
out_error:
- dev_err(smi->dev, "failed to read interrupt status: %d\n", ret);
+ dev_err(priv->dev, "failed to read interrupt status: %d\n", ret);
out_none:
return IRQ_NONE;
@@ -1548,27 +1590,27 @@ static const struct irq_domain_ops rtl8365mb_irqdomain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static int rtl8365mb_set_irq_enable(struct realtek_smi *smi, bool enable)
+static int rtl8365mb_set_irq_enable(struct realtek_priv *priv, bool enable)
{
- return regmap_update_bits(smi->map, RTL8365MB_INTR_CTRL_REG,
+ return regmap_update_bits(priv->map, RTL8365MB_INTR_CTRL_REG,
RTL8365MB_INTR_LINK_CHANGE_MASK,
FIELD_PREP(RTL8365MB_INTR_LINK_CHANGE_MASK,
enable ? 1 : 0));
}
-static int rtl8365mb_irq_enable(struct realtek_smi *smi)
+static int rtl8365mb_irq_enable(struct realtek_priv *priv)
{
- return rtl8365mb_set_irq_enable(smi, true);
+ return rtl8365mb_set_irq_enable(priv, true);
}
-static int rtl8365mb_irq_disable(struct realtek_smi *smi)
+static int rtl8365mb_irq_disable(struct realtek_priv *priv)
{
- return rtl8365mb_set_irq_enable(smi, false);
+ return rtl8365mb_set_irq_enable(priv, false);
}
-static int rtl8365mb_irq_setup(struct realtek_smi *smi)
+static int rtl8365mb_irq_setup(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
struct device_node *intc;
u32 irq_trig;
int virq;
@@ -1577,9 +1619,9 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
int ret;
int i;
- intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
+ intc = of_get_child_by_name(priv->dev->of_node, "interrupt-controller");
if (!intc) {
- dev_err(smi->dev, "missing child interrupt-controller node\n");
+ dev_err(priv->dev, "missing child interrupt-controller node\n");
return -EINVAL;
}
@@ -1587,24 +1629,24 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
irq = of_irq_get(intc, 0);
if (irq <= 0) {
if (irq != -EPROBE_DEFER)
- dev_err(smi->dev, "failed to get parent irq: %d\n",
+ dev_err(priv->dev, "failed to get parent irq: %d\n",
irq);
ret = irq ? irq : -EINVAL;
goto out_put_node;
}
- smi->irqdomain = irq_domain_add_linear(intc, smi->num_ports,
- &rtl8365mb_irqdomain_ops, smi);
- if (!smi->irqdomain) {
- dev_err(smi->dev, "failed to add irq domain\n");
+ priv->irqdomain = irq_domain_add_linear(intc, priv->num_ports,
+ &rtl8365mb_irqdomain_ops, priv);
+ if (!priv->irqdomain) {
+ dev_err(priv->dev, "failed to add irq domain\n");
ret = -ENOMEM;
goto out_put_node;
}
- for (i = 0; i < smi->num_ports; i++) {
- virq = irq_create_mapping(smi->irqdomain, i);
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_create_mapping(priv->irqdomain, i);
if (!virq) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to create irq domain mapping\n");
ret = -EINVAL;
goto out_remove_irqdomain;
@@ -1625,40 +1667,40 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
val = RTL8365MB_INTR_POLARITY_LOW;
break;
default:
- dev_err(smi->dev, "unsupported irq trigger type %u\n",
+ dev_err(priv->dev, "unsupported irq trigger type %u\n",
irq_trig);
ret = -EINVAL;
goto out_remove_irqdomain;
}
- ret = regmap_update_bits(smi->map, RTL8365MB_INTR_POLARITY_REG,
+ ret = regmap_update_bits(priv->map, RTL8365MB_INTR_POLARITY_REG,
RTL8365MB_INTR_POLARITY_MASK,
FIELD_PREP(RTL8365MB_INTR_POLARITY_MASK, val));
if (ret)
goto out_remove_irqdomain;
/* Disable the interrupt in case the chip has it enabled on reset */
- ret = rtl8365mb_irq_disable(smi);
+ ret = rtl8365mb_irq_disable(priv);
if (ret)
goto out_remove_irqdomain;
/* Clear the interrupt status register */
- ret = regmap_write(smi->map, RTL8365MB_INTR_STATUS_REG,
+ ret = regmap_write(priv->map, RTL8365MB_INTR_STATUS_REG,
RTL8365MB_INTR_ALL_MASK);
if (ret)
goto out_remove_irqdomain;
ret = request_threaded_irq(irq, NULL, rtl8365mb_irq, IRQF_ONESHOT,
- "rtl8365mb", smi);
+ "rtl8365mb", priv);
if (ret) {
- dev_err(smi->dev, "failed to request irq: %d\n", ret);
+ dev_err(priv->dev, "failed to request irq: %d\n", ret);
goto out_remove_irqdomain;
}
/* Store the irq so that we know to free it during teardown */
mb->irq = irq;
- ret = rtl8365mb_irq_enable(smi);
+ ret = rtl8365mb_irq_enable(priv);
if (ret)
goto out_free_irq;
@@ -1667,17 +1709,17 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
return 0;
out_free_irq:
- free_irq(mb->irq, smi);
+ free_irq(mb->irq, priv);
mb->irq = 0;
out_remove_irqdomain:
- for (i = 0; i < smi->num_ports; i++) {
- virq = irq_find_mapping(smi->irqdomain, i);
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_find_mapping(priv->irqdomain, i);
irq_dispose_mapping(virq);
}
- irq_domain_remove(smi->irqdomain);
- smi->irqdomain = NULL;
+ irq_domain_remove(priv->irqdomain);
+ priv->irqdomain = NULL;
out_put_node:
of_node_put(intc);
@@ -1685,36 +1727,36 @@ out_put_node:
return ret;
}
-static void rtl8365mb_irq_teardown(struct realtek_smi *smi)
+static void rtl8365mb_irq_teardown(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
int virq;
int i;
if (mb->irq) {
- free_irq(mb->irq, smi);
+ free_irq(mb->irq, priv);
mb->irq = 0;
}
- if (smi->irqdomain) {
- for (i = 0; i < smi->num_ports; i++) {
- virq = irq_find_mapping(smi->irqdomain, i);
+ if (priv->irqdomain) {
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_find_mapping(priv->irqdomain, i);
irq_dispose_mapping(virq);
}
- irq_domain_remove(smi->irqdomain);
- smi->irqdomain = NULL;
+ irq_domain_remove(priv->irqdomain);
+ priv->irqdomain = NULL;
}
}
-static int rtl8365mb_cpu_config(struct realtek_smi *smi)
+static int rtl8365mb_cpu_config(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
struct rtl8365mb_cpu *cpu = &mb->cpu;
u32 val;
int ret;
- ret = regmap_update_bits(smi->map, RTL8365MB_CPU_PORT_MASK_REG,
+ ret = regmap_update_bits(priv->map, RTL8365MB_CPU_PORT_MASK_REG,
RTL8365MB_CPU_PORT_MASK_MASK,
FIELD_PREP(RTL8365MB_CPU_PORT_MASK_MASK,
cpu->mask));
@@ -1726,26 +1768,57 @@ static int rtl8365mb_cpu_config(struct realtek_smi *smi)
FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_POSITION_MASK, cpu->position) |
FIELD_PREP(RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK, cpu->rx_length) |
FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK, cpu->format) |
- FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port & 0x7) |
FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK,
- cpu->trap_port >> 3);
- ret = regmap_write(smi->map, RTL8365MB_CPU_CTRL_REG, val);
+ cpu->trap_port >> 3 & 0x1);
+ ret = regmap_write(priv->map, RTL8365MB_CPU_CTRL_REG, val);
if (ret)
return ret;
return 0;
}
-static int rtl8365mb_switch_init(struct realtek_smi *smi)
+static int rtl8365mb_change_tag_protocol(struct dsa_switch *ds, int cpu_index,
+ enum dsa_tag_protocol proto)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct rtl8365mb *mb;
+
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
+
+ switch (proto) {
+ case DSA_TAG_PROTO_RTL8_4:
+ cpu->format = RTL8365MB_CPU_FORMAT_8BYTES;
+ cpu->position = RTL8365MB_CPU_POS_AFTER_SA;
+ break;
+ case DSA_TAG_PROTO_RTL8_4T:
+ cpu->format = RTL8365MB_CPU_FORMAT_8BYTES;
+ cpu->position = RTL8365MB_CPU_POS_BEFORE_CRC;
+ break;
+ /* The switch also supports a 4-byte format, similar to rtl4a but with
+ * the same 0x04 8-bit version and probably 8-bit port source/dest.
+ * There is no public doc about it. Not supported yet and it will probably
+ * never be.
+ */
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ return rtl8365mb_cpu_config(priv);
+}
+
+static int rtl8365mb_switch_init(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
int ret;
int i;
/* Do any chip-specific init jam before getting to the common stuff */
if (mb->jam_table) {
for (i = 0; i < mb->jam_size; i++) {
- ret = regmap_write(smi->map, mb->jam_table[i].reg,
+ ret = regmap_write(priv->map, mb->jam_table[i].reg,
mb->jam_table[i].val);
if (ret)
return ret;
@@ -1754,7 +1827,7 @@ static int rtl8365mb_switch_init(struct realtek_smi *smi)
/* Common init jam */
for (i = 0; i < ARRAY_SIZE(rtl8365mb_init_jam_common); i++) {
- ret = regmap_write(smi->map, rtl8365mb_init_jam_common[i].reg,
+ ret = regmap_write(priv->map, rtl8365mb_init_jam_common[i].reg,
rtl8365mb_init_jam_common[i].val);
if (ret)
return ret;
@@ -1763,75 +1836,80 @@ static int rtl8365mb_switch_init(struct realtek_smi *smi)
return 0;
}
-static int rtl8365mb_reset_chip(struct realtek_smi *smi)
+static int rtl8365mb_reset_chip(struct realtek_priv *priv)
{
u32 val;
- realtek_smi_write_reg_noack(smi, RTL8365MB_CHIP_RESET_REG,
- FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK,
- 1));
+ priv->write_reg_noack(priv, RTL8365MB_CHIP_RESET_REG,
+ FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK, 1));
/* Realtek documentation says the chip needs 1 second to reset. Sleep
* for 100 ms before accessing any registers to prevent ACK timeouts.
*/
msleep(100);
- return regmap_read_poll_timeout(smi->map, RTL8365MB_CHIP_RESET_REG, val,
+ return regmap_read_poll_timeout(priv->map, RTL8365MB_CHIP_RESET_REG, val,
!(val & RTL8365MB_CHIP_RESET_HW_MASK),
20000, 1e6);
}
static int rtl8365mb_setup(struct dsa_switch *ds)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct dsa_port *cpu_dp;
struct rtl8365mb *mb;
int ret;
int i;
- mb = smi->chip_data;
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
- ret = rtl8365mb_reset_chip(smi);
+ ret = rtl8365mb_reset_chip(priv);
if (ret) {
- dev_err(smi->dev, "failed to reset chip: %d\n", ret);
+ dev_err(priv->dev, "failed to reset chip: %d\n", ret);
goto out_error;
}
/* Configure switch to vendor-defined initial state */
- ret = rtl8365mb_switch_init(smi);
+ ret = rtl8365mb_switch_init(priv);
if (ret) {
- dev_err(smi->dev, "failed to initialize switch: %d\n", ret);
+ dev_err(priv->dev, "failed to initialize switch: %d\n", ret);
goto out_error;
}
/* Set up cascading IRQs */
- ret = rtl8365mb_irq_setup(smi);
+ ret = rtl8365mb_irq_setup(priv);
if (ret == -EPROBE_DEFER)
return ret;
else if (ret)
- dev_info(smi->dev, "no interrupt support\n");
+ dev_info(priv->dev, "no interrupt support\n");
/* Configure CPU tagging */
- ret = rtl8365mb_cpu_config(smi);
+ dsa_switch_for_each_cpu_port(cpu_dp, priv->ds) {
+ cpu->mask |= BIT(cpu_dp->index);
+
+ if (cpu->trap_port == RTL8365MB_MAX_NUM_PORTS)
+ cpu->trap_port = cpu_dp->index;
+ }
+ cpu->enable = cpu->mask > 0;
+ ret = rtl8365mb_cpu_config(priv);
if (ret)
goto out_teardown_irq;
/* Configure ports */
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(smi->ds, i))
+ if (dsa_is_unused_port(priv->ds, i))
continue;
- /* Set up per-port private data */
- p->smi = smi;
- p->index = i;
-
/* Forward only to the CPU */
- ret = rtl8365mb_port_set_isolation(smi, i, BIT(smi->cpu_port));
+ ret = rtl8365mb_port_set_isolation(priv, i, cpu->mask);
if (ret)
goto out_teardown_irq;
/* Disable learning */
- ret = rtl8365mb_port_set_learning(smi, i, false);
+ ret = rtl8365mb_port_set_learning(priv, i, false);
if (ret)
goto out_teardown_irq;
@@ -1839,29 +1917,35 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
* ports will still forward frames to the CPU despite being
* administratively down by default.
*/
- rtl8365mb_port_stp_state_set(smi->ds, i, BR_STATE_DISABLED);
+ rtl8365mb_port_stp_state_set(priv->ds, i, BR_STATE_DISABLED);
+
+ /* Set up per-port private data */
+ p->priv = priv;
+ p->index = i;
}
/* Set maximum packet length to 1536 bytes */
- ret = regmap_update_bits(smi->map, RTL8365MB_CFG0_MAX_LEN_REG,
+ ret = regmap_update_bits(priv->map, RTL8365MB_CFG0_MAX_LEN_REG,
RTL8365MB_CFG0_MAX_LEN_MASK,
FIELD_PREP(RTL8365MB_CFG0_MAX_LEN_MASK, 1536));
if (ret)
goto out_teardown_irq;
- ret = realtek_smi_setup_mdio(smi);
- if (ret) {
- dev_err(smi->dev, "could not set up MDIO bus\n");
- goto out_teardown_irq;
+ if (priv->setup_interface) {
+ ret = priv->setup_interface(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ goto out_teardown_irq;
+ }
}
/* Start statistics counter polling */
- rtl8365mb_stats_setup(smi);
+ rtl8365mb_stats_setup(priv);
return 0;
out_teardown_irq:
- rtl8365mb_irq_teardown(smi);
+ rtl8365mb_irq_teardown(priv);
out_error:
return ret;
@@ -1869,10 +1953,10 @@ out_error:
static void rtl8365mb_teardown(struct dsa_switch *ds)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
- rtl8365mb_stats_teardown(smi);
- rtl8365mb_irq_teardown(smi);
+ rtl8365mb_stats_teardown(priv);
+ rtl8365mb_irq_teardown(priv);
}
static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
@@ -1902,40 +1986,55 @@ static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
return 0;
}
-static int rtl8365mb_detect(struct realtek_smi *smi)
+static int rtl8365mb_detect(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
u32 chip_id;
u32 chip_ver;
int ret;
- ret = rtl8365mb_get_chip_id_and_ver(smi->map, &chip_id, &chip_ver);
+ ret = rtl8365mb_get_chip_id_and_ver(priv->map, &chip_id, &chip_ver);
if (ret) {
- dev_err(smi->dev, "failed to read chip id and version: %d\n",
+ dev_err(priv->dev, "failed to read chip id and version: %d\n",
ret);
return ret;
}
switch (chip_id) {
case RTL8365MB_CHIP_ID_8365MB_VC:
- dev_info(smi->dev,
- "found an RTL8365MB-VC switch (ver=0x%04x)\n",
- chip_ver);
+ switch (chip_ver) {
+ case RTL8365MB_CHIP_VER_8365MB_VC:
+ dev_info(priv->dev,
+ "found an RTL8365MB-VC switch (ver=0x%04x)\n",
+ chip_ver);
+ break;
+ case RTL8365MB_CHIP_VER_8367RB:
+ dev_info(priv->dev,
+ "found an RTL8367RB-VB switch (ver=0x%04x)\n",
+ chip_ver);
+ break;
+ case RTL8365MB_CHIP_VER_8367S:
+ dev_info(priv->dev,
+ "found an RTL8367S switch (ver=0x%04x)\n",
+ chip_ver);
+ break;
+ default:
+ dev_err(priv->dev, "unrecognized switch version (ver=0x%04x)",
+ chip_ver);
+ return -ENODEV;
+ }
- smi->cpu_port = RTL8365MB_CPU_PORT_NUM_8365MB_VC;
- smi->num_ports = smi->cpu_port + 1;
+ priv->num_ports = RTL8365MB_MAX_NUM_PORTS;
- mb->smi = smi;
+ mb->priv = priv;
mb->chip_id = chip_id;
mb->chip_ver = chip_ver;
- mb->port_mask = BIT(smi->num_ports) - 1;
- mb->learn_limit_max = RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC;
+ mb->port_mask = GENMASK(priv->num_ports - 1, 0);
+ mb->learn_limit_max = RTL8365MB_LEARN_LIMIT_MAX;
mb->jam_table = rtl8365mb_init_jam_8365mb_vc;
mb->jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc);
- mb->cpu.enable = 1;
- mb->cpu.mask = BIT(smi->cpu_port);
- mb->cpu.trap_port = smi->cpu_port;
+ mb->cpu.trap_port = RTL8365MB_MAX_NUM_PORTS;
mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
@@ -1943,7 +2042,7 @@ static int rtl8365mb_detect(struct realtek_smi *smi)
break;
default:
- dev_err(smi->dev,
+ dev_err(priv->dev,
"found an unknown Realtek switch (id=0x%04x, ver=0x%04x)\n",
chip_id, chip_ver);
return -ENODEV;
@@ -1952,14 +2051,36 @@ static int rtl8365mb_detect(struct realtek_smi *smi)
return 0;
}
-static const struct dsa_switch_ops rtl8365mb_switch_ops = {
+static const struct dsa_switch_ops rtl8365mb_switch_ops_smi = {
+ .get_tag_protocol = rtl8365mb_get_tag_protocol,
+ .change_tag_protocol = rtl8365mb_change_tag_protocol,
+ .setup = rtl8365mb_setup,
+ .teardown = rtl8365mb_teardown,
+ .phylink_get_caps = rtl8365mb_phylink_get_caps,
+ .phylink_mac_config = rtl8365mb_phylink_mac_config,
+ .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
+ .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
+ .port_stp_state_set = rtl8365mb_port_stp_state_set,
+ .get_strings = rtl8365mb_get_strings,
+ .get_ethtool_stats = rtl8365mb_get_ethtool_stats,
+ .get_sset_count = rtl8365mb_get_sset_count,
+ .get_eth_phy_stats = rtl8365mb_get_phy_stats,
+ .get_eth_mac_stats = rtl8365mb_get_mac_stats,
+ .get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
+ .get_stats64 = rtl8365mb_get_stats64,
+};
+
+static const struct dsa_switch_ops rtl8365mb_switch_ops_mdio = {
.get_tag_protocol = rtl8365mb_get_tag_protocol,
+ .change_tag_protocol = rtl8365mb_change_tag_protocol,
.setup = rtl8365mb_setup,
.teardown = rtl8365mb_teardown,
- .phylink_validate = rtl8365mb_phylink_validate,
+ .phylink_get_caps = rtl8365mb_phylink_get_caps,
.phylink_mac_config = rtl8365mb_phylink_mac_config,
.phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
.phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
+ .phy_read = rtl8365mb_dsa_phy_read,
+ .phy_write = rtl8365mb_dsa_phy_write,
.port_stp_state_set = rtl8365mb_port_stp_state_set,
.get_strings = rtl8365mb_get_strings,
.get_ethtool_stats = rtl8365mb_get_ethtool_stats,
@@ -1970,18 +2091,23 @@ static const struct dsa_switch_ops rtl8365mb_switch_ops = {
.get_stats64 = rtl8365mb_get_stats64,
};
-static const struct realtek_smi_ops rtl8365mb_smi_ops = {
+static const struct realtek_ops rtl8365mb_ops = {
.detect = rtl8365mb_detect,
.phy_read = rtl8365mb_phy_read,
.phy_write = rtl8365mb_phy_write,
};
-const struct realtek_smi_variant rtl8365mb_variant = {
- .ds_ops = &rtl8365mb_switch_ops,
- .ops = &rtl8365mb_smi_ops,
+const struct realtek_variant rtl8365mb_variant = {
+ .ds_ops_smi = &rtl8365mb_switch_ops_smi,
+ .ds_ops_mdio = &rtl8365mb_switch_ops_mdio,
+ .ops = &rtl8365mb_ops,
.clk_delay = 10,
.cmd_read = 0xb9,
.cmd_write = 0xb8,
.chip_data_sz = sizeof(struct rtl8365mb),
};
EXPORT_SYMBOL_GPL(rtl8365mb_variant);
+
+MODULE_AUTHOR("Alvin Å ipraga <alsi@bang-olufsen.dk>");
+MODULE_DESCRIPTION("Driver for RTL8365MB-VC ethernet switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/realtek/rtl8366-core.c
index bdb8d8d34880..dc5f75be3017 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/realtek/rtl8366-core.c
@@ -11,18 +11,18 @@
#include <linux/if_bridge.h>
#include <net/dsa.h>
-#include "realtek-smi-core.h"
+#include "realtek.h"
-int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
+int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used)
{
int ret;
int i;
*used = 0;
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
int index = 0;
- ret = smi->ops->get_mc_index(smi, i, &index);
+ ret = priv->ops->get_mc_index(priv, i, &index);
if (ret)
return ret;
@@ -38,13 +38,13 @@ EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
/**
* rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
- * @smi: the Realtek SMI device instance
+ * @priv: the Realtek SMI device instance
* @vid: the VLAN ID to look up or allocate
* @vlanmc: the pointer will be assigned to a pointer to a valid member config
* if successful
* @return: index of a new member config or negative error number
*/
-static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
+static int rtl8366_obtain_mc(struct realtek_priv *priv, int vid,
struct rtl8366_vlan_mc *vlanmc)
{
struct rtl8366_vlan_4k vlan4k;
@@ -52,10 +52,10 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
int i;
/* Try to find an existing member config entry for this VID */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->get_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "error searching for VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
@@ -65,19 +65,19 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
}
/* We have no MC entry for this VID, try to find an empty one */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->get_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "error searching for VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
if (vlanmc->vid == 0 && vlanmc->member == 0) {
/* Update the entry from the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret) {
- dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "error looking for 4K VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
@@ -86,30 +86,30 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
vlanmc->member = vlan4k.member;
vlanmc->untag = vlan4k.untag;
vlanmc->fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "unable to set/update VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
- dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
+ dev_dbg(priv->dev, "created new MC at index %d for VID %d\n",
i, vid);
return i;
}
}
/* MC table is full, try to find an unused entry and replace it */
- for (i = 0; i < smi->num_vlan_mc; i++) {
+ for (i = 0; i < priv->num_vlan_mc; i++) {
int used;
- ret = rtl8366_mc_is_used(smi, i, &used);
+ ret = rtl8366_mc_is_used(priv, i, &used);
if (ret)
return ret;
if (!used) {
/* Update the entry from the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret)
return ret;
@@ -117,23 +117,23 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
vlanmc->member = vlan4k.member;
vlanmc->untag = vlan4k.untag;
vlanmc->fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "unable to set/update VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
- dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
+ dev_dbg(priv->dev, "recycled MC at index %i for VID %d\n",
i, vid);
return i;
}
}
- dev_err(smi->dev, "all VLAN member configurations are in use\n");
+ dev_err(priv->dev, "all VLAN member configurations are in use\n");
return -ENOSPC;
}
-int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
u32 untag, u32 fid)
{
struct rtl8366_vlan_mc vlanmc;
@@ -141,31 +141,31 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
int mc;
int ret;
- if (!smi->ops->is_vlan_valid(smi, vid))
+ if (!priv->ops->is_vlan_valid(priv, vid))
return -EINVAL;
- dev_dbg(smi->dev,
+ dev_dbg(priv->dev,
"setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
vid, member, untag);
/* Update the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret)
return ret;
vlan4k.member |= member;
vlan4k.untag |= untag;
vlan4k.fid = fid;
- ret = smi->ops->set_vlan_4k(smi, &vlan4k);
+ ret = priv->ops->set_vlan_4k(priv, &vlan4k);
if (ret)
return ret;
- dev_dbg(smi->dev,
+ dev_dbg(priv->dev,
"resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
vid, vlan4k.member, vlan4k.untag);
/* Find or allocate a member config for this VID */
- ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ ret = rtl8366_obtain_mc(priv, vid, &vlanmc);
if (ret < 0)
return ret;
mc = ret;
@@ -176,12 +176,12 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
vlanmc.fid = fid;
/* Commit updates to the MC entry */
- ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, mc, &vlanmc);
if (ret)
- dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
+ dev_err(priv->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
mc, vid);
else
- dev_dbg(smi->dev,
+ dev_dbg(priv->dev,
"resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
vid, vlanmc.member, vlanmc.untag);
@@ -189,37 +189,37 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
}
EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
-int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
unsigned int vid)
{
struct rtl8366_vlan_mc vlanmc;
int mc;
int ret;
- if (!smi->ops->is_vlan_valid(smi, vid))
+ if (!priv->ops->is_vlan_valid(priv, vid))
return -EINVAL;
/* Find or allocate a member config for this VID */
- ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ ret = rtl8366_obtain_mc(priv, vid, &vlanmc);
if (ret < 0)
return ret;
mc = ret;
- ret = smi->ops->set_mc_index(smi, port, mc);
+ ret = priv->ops->set_mc_index(priv, port, mc);
if (ret) {
- dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
+ dev_err(priv->dev, "set PVID: failed to set MC index %d for port %d\n",
mc, port);
return ret;
}
- dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
+ dev_dbg(priv->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
port, vid, mc);
return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
-int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable)
+int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
int ret;
@@ -229,52 +229,52 @@ int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable)
*/
if (enable) {
/* Make sure VLAN is ON */
- ret = smi->ops->enable_vlan(smi, true);
+ ret = priv->ops->enable_vlan(priv, true);
if (ret)
return ret;
- smi->vlan_enabled = true;
+ priv->vlan_enabled = true;
}
- ret = smi->ops->enable_vlan4k(smi, enable);
+ ret = priv->ops->enable_vlan4k(priv, enable);
if (ret)
return ret;
- smi->vlan4k_enabled = enable;
+ priv->vlan4k_enabled = enable;
return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k);
-int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable)
+int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
{
int ret;
- ret = smi->ops->enable_vlan(smi, enable);
+ ret = priv->ops->enable_vlan(priv, enable);
if (ret)
return ret;
- smi->vlan_enabled = enable;
+ priv->vlan_enabled = enable;
/* If we turn VLAN off, make sure that we turn off
* 4k VLAN as well, if that happened to be on.
*/
if (!enable) {
- smi->vlan4k_enabled = false;
- ret = smi->ops->enable_vlan4k(smi, false);
+ priv->vlan4k_enabled = false;
+ ret = priv->ops->enable_vlan4k(priv, false);
}
return ret;
}
EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
-int rtl8366_reset_vlan(struct realtek_smi *smi)
+int rtl8366_reset_vlan(struct realtek_priv *priv)
{
struct rtl8366_vlan_mc vlanmc;
int ret;
int i;
- rtl8366_enable_vlan(smi, false);
- rtl8366_enable_vlan4k(smi, false);
+ rtl8366_enable_vlan(priv, false);
+ rtl8366_enable_vlan4k(priv, false);
/* Clear the 16 VLAN member configurations */
vlanmc.vid = 0;
@@ -282,8 +282,8 @@ int rtl8366_reset_vlan(struct realtek_smi *smi)
vlanmc.member = 0;
vlanmc.untag = 0;
vlanmc.fid = 0;
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->set_vlan_mc(priv, i, &vlanmc);
if (ret)
return ret;
}
@@ -298,12 +298,12 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
{
bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID);
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
u32 member = 0;
u32 untag = 0;
int ret;
- if (!smi->ops->is_vlan_valid(smi, vlan->vid)) {
+ if (!priv->ops->is_vlan_valid(priv, vlan->vid)) {
NL_SET_ERR_MSG_MOD(extack, "VLAN ID not valid");
return -EINVAL;
}
@@ -312,13 +312,13 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
* FIXME: what's with this 4k business?
* Just rtl8366_enable_vlan() seems inconclusive.
*/
- ret = rtl8366_enable_vlan4k(smi, true);
+ ret = rtl8366_enable_vlan4k(priv, true);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to enable VLAN 4K");
return ret;
}
- dev_dbg(smi->dev, "add VLAN %d on port %d, %s, %s\n",
+ dev_dbg(priv->dev, "add VLAN %d on port %d, %s, %s\n",
vlan->vid, port, untagged ? "untagged" : "tagged",
pvid ? "PVID" : "no PVID");
@@ -327,18 +327,18 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
if (untagged)
untag |= BIT(port);
- ret = rtl8366_set_vlan(smi, vlan->vid, member, untag, 0);
+ ret = rtl8366_set_vlan(priv, vlan->vid, member, untag, 0);
if (ret) {
- dev_err(smi->dev, "failed to set up VLAN %04x", vlan->vid);
+ dev_err(priv->dev, "failed to set up VLAN %04x", vlan->vid);
return ret;
}
if (!pvid)
return 0;
- ret = rtl8366_set_pvid(smi, port, vlan->vid);
+ ret = rtl8366_set_pvid(priv, port, vlan->vid);
if (ret) {
- dev_err(smi->dev, "failed to set PVID on port %d to VLAN %04x",
+ dev_err(priv->dev, "failed to set PVID on port %d to VLAN %04x",
port, vlan->vid);
return ret;
}
@@ -350,15 +350,15 @@ EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret, i;
- dev_dbg(smi->dev, "del VLAN %d on port %d\n", vlan->vid, port);
+ dev_dbg(priv->dev, "del VLAN %d on port %d\n", vlan->vid, port);
- for (i = 0; i < smi->num_vlan_mc; i++) {
+ for (i = 0; i < priv->num_vlan_mc; i++) {
struct rtl8366_vlan_mc vlanmc;
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ ret = priv->ops->get_vlan_mc(priv, i, &vlanmc);
if (ret)
return ret;
@@ -376,9 +376,9 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
vlanmc.priority = 0;
vlanmc.fid = 0;
}
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, i, &vlanmc);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to remove VLAN %04x\n",
vlan->vid);
return ret;
@@ -394,15 +394,15 @@ EXPORT_SYMBOL_GPL(rtl8366_vlan_del);
void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8366_mib_counter *mib;
int i;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return;
- for (i = 0; i < smi->num_mib_counters; i++) {
- mib = &smi->mib_counters[i];
+ for (i = 0; i < priv->num_mib_counters; i++) {
+ mib = &priv->mib_counters[i];
strncpy(data + i * ETH_GSTRING_LEN,
mib->name, ETH_GSTRING_LEN);
}
@@ -411,35 +411,35 @@ EXPORT_SYMBOL_GPL(rtl8366_get_strings);
int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
/* We only support SS_STATS */
if (sset != ETH_SS_STATS)
return 0;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return -EINVAL;
- return smi->num_mib_counters;
+ return priv->num_mib_counters;
}
EXPORT_SYMBOL_GPL(rtl8366_get_sset_count);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int i;
int ret;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return;
- for (i = 0; i < smi->num_mib_counters; i++) {
+ for (i = 0; i < priv->num_mib_counters; i++) {
struct rtl8366_mib_counter *mib;
u64 mibvalue = 0;
- mib = &smi->mib_counters[i];
- ret = smi->ops->get_mib_counter(smi, port, mib, &mibvalue);
+ mib = &priv->mib_counters[i];
+ ret = priv->ops->get_mib_counter(priv, port, mib, &mibvalue);
if (ret) {
- dev_err(smi->dev, "error reading MIB counter %s\n",
+ dev_err(priv->dev, "error reading MIB counter %s\n",
mib->name);
}
data[i] = mibvalue;
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index ecc19bd5115f..1a3406b9e64c 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -21,7 +21,7 @@
#include <linux/of_irq.h>
#include <linux/regmap.h>
-#include "realtek-smi-core.h"
+#include "realtek.h"
#define RTL8366RB_PORT_NUM_CPU 5
#define RTL8366RB_NUM_PORTS 6
@@ -396,7 +396,7 @@ static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
{ 0, 70, 2, "IfOutBroadcastPkts" },
};
-static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
+static int rtl8366rb_get_mib_counter(struct realtek_priv *priv,
int port,
struct rtl8366_mib_counter *mib,
u64 *mibvalue)
@@ -412,12 +412,12 @@ static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
/* Writing access counter address first
* then ASIC will prepare 64bits counter wait for being retrived
*/
- ret = regmap_write(smi->map, addr, 0); /* Write whatever */
+ ret = regmap_write(priv->map, addr, 0); /* Write whatever */
if (ret)
return ret;
/* Read MIB control register */
- ret = regmap_read(smi->map, RTL8366RB_MIB_CTRL_REG, &val);
+ ret = regmap_read(priv->map, RTL8366RB_MIB_CTRL_REG, &val);
if (ret)
return -EIO;
@@ -430,7 +430,7 @@ static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
/* Read each individual MIB 16 bits at the time */
*mibvalue = 0;
for (i = mib->length; i > 0; i--) {
- ret = regmap_read(smi->map, addr + (i - 1), &val);
+ ret = regmap_read(priv->map, addr + (i - 1), &val);
if (ret)
return ret;
*mibvalue = (*mibvalue << 16) | (val & 0xFFFF);
@@ -455,38 +455,38 @@ static u32 rtl8366rb_get_irqmask(struct irq_data *d)
static void rtl8366rb_mask_irq(struct irq_data *d)
{
- struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
+ struct realtek_priv *priv = irq_data_get_irq_chip_data(d);
int ret;
- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_MASK_REG,
rtl8366rb_get_irqmask(d), 0);
if (ret)
- dev_err(smi->dev, "could not mask IRQ\n");
+ dev_err(priv->dev, "could not mask IRQ\n");
}
static void rtl8366rb_unmask_irq(struct irq_data *d)
{
- struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
+ struct realtek_priv *priv = irq_data_get_irq_chip_data(d);
int ret;
- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_MASK_REG,
rtl8366rb_get_irqmask(d),
rtl8366rb_get_irqmask(d));
if (ret)
- dev_err(smi->dev, "could not unmask IRQ\n");
+ dev_err(priv->dev, "could not unmask IRQ\n");
}
static irqreturn_t rtl8366rb_irq(int irq, void *data)
{
- struct realtek_smi *smi = data;
+ struct realtek_priv *priv = data;
u32 stat;
int ret;
/* This clears the IRQ status register */
- ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
+ ret = regmap_read(priv->map, RTL8366RB_INTERRUPT_STATUS_REG,
&stat);
if (ret) {
- dev_err(smi->dev, "can't read interrupt status\n");
+ dev_err(priv->dev, "can't read interrupt status\n");
return IRQ_NONE;
}
stat &= RTL8366RB_INTERRUPT_VALID;
@@ -502,7 +502,7 @@ static irqreturn_t rtl8366rb_irq(int irq, void *data)
*/
if (line < 12 && line > 5)
line -= 5;
- child_irq = irq_find_mapping(smi->irqdomain, line);
+ child_irq = irq_find_mapping(priv->irqdomain, line);
handle_nested_irq(child_irq);
}
return IRQ_HANDLED;
@@ -538,7 +538,7 @@ static const struct irq_domain_ops rtl8366rb_irqdomain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
+static int rtl8366rb_setup_cascaded_irq(struct realtek_priv *priv)
{
struct device_node *intc;
unsigned long irq_trig;
@@ -547,24 +547,24 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
u32 val;
int i;
- intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
+ intc = of_get_child_by_name(priv->dev->of_node, "interrupt-controller");
if (!intc) {
- dev_err(smi->dev, "missing child interrupt-controller node\n");
+ dev_err(priv->dev, "missing child interrupt-controller node\n");
return -EINVAL;
}
/* RB8366RB IRQs cascade off this one */
irq = of_irq_get(intc, 0);
if (irq <= 0) {
- dev_err(smi->dev, "failed to get parent IRQ\n");
+ dev_err(priv->dev, "failed to get parent IRQ\n");
ret = irq ? irq : -EINVAL;
goto out_put_node;
}
/* This clears the IRQ status register */
- ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
+ ret = regmap_read(priv->map, RTL8366RB_INTERRUPT_STATUS_REG,
&val);
if (ret) {
- dev_err(smi->dev, "can't read interrupt status\n");
+ dev_err(priv->dev, "can't read interrupt status\n");
goto out_put_node;
}
@@ -573,48 +573,48 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
switch (irq_trig) {
case IRQF_TRIGGER_RISING:
case IRQF_TRIGGER_HIGH:
- dev_info(smi->dev, "active high/rising IRQ\n");
+ dev_info(priv->dev, "active high/rising IRQ\n");
val = 0;
break;
case IRQF_TRIGGER_FALLING:
case IRQF_TRIGGER_LOW:
- dev_info(smi->dev, "active low/falling IRQ\n");
+ dev_info(priv->dev, "active low/falling IRQ\n");
val = RTL8366RB_INTERRUPT_POLARITY;
break;
}
- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_CONTROL_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_CONTROL_REG,
RTL8366RB_INTERRUPT_POLARITY,
val);
if (ret) {
- dev_err(smi->dev, "could not configure IRQ polarity\n");
+ dev_err(priv->dev, "could not configure IRQ polarity\n");
goto out_put_node;
}
- ret = devm_request_threaded_irq(smi->dev, irq, NULL,
+ ret = devm_request_threaded_irq(priv->dev, irq, NULL,
rtl8366rb_irq, IRQF_ONESHOT,
- "RTL8366RB", smi);
+ "RTL8366RB", priv);
if (ret) {
- dev_err(smi->dev, "unable to request irq: %d\n", ret);
+ dev_err(priv->dev, "unable to request irq: %d\n", ret);
goto out_put_node;
}
- smi->irqdomain = irq_domain_add_linear(intc,
- RTL8366RB_NUM_INTERRUPT,
- &rtl8366rb_irqdomain_ops,
- smi);
- if (!smi->irqdomain) {
- dev_err(smi->dev, "failed to create IRQ domain\n");
+ priv->irqdomain = irq_domain_add_linear(intc,
+ RTL8366RB_NUM_INTERRUPT,
+ &rtl8366rb_irqdomain_ops,
+ priv);
+ if (!priv->irqdomain) {
+ dev_err(priv->dev, "failed to create IRQ domain\n");
ret = -EINVAL;
goto out_put_node;
}
- for (i = 0; i < smi->num_ports; i++)
- irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
+ for (i = 0; i < priv->num_ports; i++)
+ irq_set_parent(irq_create_mapping(priv->irqdomain, i), irq);
out_put_node:
of_node_put(intc);
return ret;
}
-static int rtl8366rb_set_addr(struct realtek_smi *smi)
+static int rtl8366rb_set_addr(struct realtek_priv *priv)
{
u8 addr[ETH_ALEN];
u16 val;
@@ -622,18 +622,18 @@ static int rtl8366rb_set_addr(struct realtek_smi *smi)
eth_random_addr(addr);
- dev_info(smi->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ dev_info(priv->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
val = addr[0] << 8 | addr[1];
- ret = regmap_write(smi->map, RTL8366RB_SMAR0, val);
+ ret = regmap_write(priv->map, RTL8366RB_SMAR0, val);
if (ret)
return ret;
val = addr[2] << 8 | addr[3];
- ret = regmap_write(smi->map, RTL8366RB_SMAR1, val);
+ ret = regmap_write(priv->map, RTL8366RB_SMAR1, val);
if (ret)
return ret;
val = addr[4] << 8 | addr[5];
- ret = regmap_write(smi->map, RTL8366RB_SMAR2, val);
+ ret = regmap_write(priv->map, RTL8366RB_SMAR2, val);
if (ret)
return ret;
@@ -765,7 +765,7 @@ static const struct rtl8366rb_jam_tbl_entry rtl8366rb_green_jam[] = {
/* Function that jams the tables in the proper registers */
static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
- int jam_size, struct realtek_smi *smi,
+ int jam_size, struct realtek_priv *priv,
bool write_dbg)
{
u32 val;
@@ -774,24 +774,24 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
for (i = 0; i < jam_size; i++) {
if ((jam_table[i].reg & 0xBE00) == 0xBE00) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8366RB_PHY_ACCESS_BUSY_REG,
&val);
if (ret)
return ret;
if (!(val & RTL8366RB_PHY_INT_BUSY)) {
- ret = regmap_write(smi->map,
- RTL8366RB_PHY_ACCESS_CTRL_REG,
- RTL8366RB_PHY_CTRL_WRITE);
+ ret = regmap_write(priv->map,
+ RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_WRITE);
if (ret)
return ret;
}
}
if (write_dbg)
- dev_dbg(smi->dev, "jam %04x into register %04x\n",
+ dev_dbg(priv->dev, "jam %04x into register %04x\n",
jam_table[i].val,
jam_table[i].reg);
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
jam_table[i].reg,
jam_table[i].val);
if (ret)
@@ -802,7 +802,7 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
static int rtl8366rb_setup(struct dsa_switch *ds)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
const struct rtl8366rb_jam_tbl_entry *jam_table;
struct rtl8366rb *rb;
u32 chip_ver = 0;
@@ -812,11 +812,11 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
int ret;
int i;
- rb = smi->chip_data;
+ rb = priv->chip_data;
- ret = regmap_read(smi->map, RTL8366RB_CHIP_ID_REG, &chip_id);
+ ret = regmap_read(priv->map, RTL8366RB_CHIP_ID_REG, &chip_id);
if (ret) {
- dev_err(smi->dev, "unable to read chip id\n");
+ dev_err(priv->dev, "unable to read chip id\n");
return ret;
}
@@ -824,18 +824,18 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
case RTL8366RB_CHIP_ID_8366:
break;
default:
- dev_err(smi->dev, "unknown chip id (%04x)\n", chip_id);
+ dev_err(priv->dev, "unknown chip id (%04x)\n", chip_id);
return -ENODEV;
}
- ret = regmap_read(smi->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
+ ret = regmap_read(priv->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
&chip_ver);
if (ret) {
- dev_err(smi->dev, "unable to read chip version\n");
+ dev_err(priv->dev, "unable to read chip version\n");
return ret;
}
- dev_info(smi->dev, "RTL%04x ver %u chip found\n",
+ dev_info(priv->dev, "RTL%04x ver %u chip found\n",
chip_id, chip_ver & RTL8366RB_CHIP_VERSION_MASK);
/* Do the init dance using the right jam table */
@@ -872,20 +872,20 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500);
}
- ret = rtl8366rb_jam_table(jam_table, jam_size, smi, true);
+ ret = rtl8366rb_jam_table(jam_table, jam_size, priv, true);
if (ret)
return ret;
/* Isolate all user ports so they can only send packets to itself and the CPU port */
for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
- ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(i),
+ ret = regmap_write(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) |
RTL8366RB_PORT_ISO_EN);
if (ret)
return ret;
}
/* CPU port can send packets to all ports */
- ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
+ ret = regmap_write(priv->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) |
RTL8366RB_PORT_ISO_EN);
if (ret)
@@ -893,26 +893,26 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
/* Set up the "green ethernet" feature */
ret = rtl8366rb_jam_table(rtl8366rb_green_jam,
- ARRAY_SIZE(rtl8366rb_green_jam), smi, false);
+ ARRAY_SIZE(rtl8366rb_green_jam), priv, false);
if (ret)
return ret;
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
RTL8366RB_GREEN_FEATURE_REG,
(chip_ver == 1) ? 0x0007 : 0x0003);
if (ret)
return ret;
/* Vendor driver sets 0x240 in registers 0xc and 0xd (undocumented) */
- ret = regmap_write(smi->map, 0x0c, 0x240);
+ ret = regmap_write(priv->map, 0x0c, 0x240);
if (ret)
return ret;
- ret = regmap_write(smi->map, 0x0d, 0x240);
+ ret = regmap_write(priv->map, 0x0d, 0x240);
if (ret)
return ret;
/* Set some random MAC address */
- ret = rtl8366rb_set_addr(smi);
+ ret = rtl8366rb_set_addr(priv);
if (ret)
return ret;
@@ -921,21 +921,21 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
* If you set RTL8368RB_CPU_NO_TAG (bit 15) in this registers
* the custom tag is turned off.
*/
- ret = regmap_update_bits(smi->map, RTL8368RB_CPU_CTRL_REG,
+ ret = regmap_update_bits(priv->map, RTL8368RB_CPU_CTRL_REG,
0xFFFF,
- BIT(smi->cpu_port));
+ BIT(priv->cpu_port));
if (ret)
return ret;
/* Make sure we default-enable the fixed CPU port */
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR,
- BIT(smi->cpu_port),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR,
+ BIT(priv->cpu_port),
0);
if (ret)
return ret;
/* Set maximum packet length to 1536 bytes */
- ret = regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ ret = regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_MAX_LENGTH_MASK,
RTL8366RB_SGCR_MAX_LENGTH_1536);
if (ret)
@@ -945,13 +945,13 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
rb->max_mtu[i] = 1532;
/* Disable learning for all ports */
- ret = regmap_write(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ ret = regmap_write(priv->map, RTL8366RB_PORT_LEARNDIS_CTRL,
RTL8366RB_PORT_ALL);
if (ret)
return ret;
/* Enable auto ageing for all ports */
- ret = regmap_write(smi->map, RTL8366RB_SECURITY_CTRL, 0);
+ ret = regmap_write(priv->map, RTL8366RB_SECURITY_CTRL, 0);
if (ret)
return ret;
@@ -962,30 +962,30 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
* connected to something exotic such as fiber, then this might
* be worth experimenting with.
*/
- ret = regmap_update_bits(smi->map, RTL8366RB_PMC0,
+ ret = regmap_update_bits(priv->map, RTL8366RB_PMC0,
RTL8366RB_PMC0_P4_IOMODE_MASK,
0 << RTL8366RB_PMC0_P4_IOMODE_SHIFT);
if (ret)
return ret;
/* Accept all packets by default, we enable filtering on-demand */
- ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
0);
if (ret)
return ret;
- ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
0);
if (ret)
return ret;
/* Don't drop packets whose DA has not been learned */
- ret = regmap_update_bits(smi->map, RTL8366RB_SSCR2,
+ ret = regmap_update_bits(priv->map, RTL8366RB_SSCR2,
RTL8366RB_SSCR2_DROP_UNKNOWN_DA, 0);
if (ret)
return ret;
/* Set blinking, TODO: make this configurable */
- ret = regmap_update_bits(smi->map, RTL8366RB_LED_BLINKRATE_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_LED_BLINKRATE_REG,
RTL8366RB_LED_BLINKRATE_MASK,
RTL8366RB_LED_BLINKRATE_56MS);
if (ret)
@@ -996,15 +996,15 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
* behaviour (no individual config) but we can set up each
* LED separately.
*/
- if (smi->leds_disabled) {
+ if (priv->leds_disabled) {
/* Turn everything off */
- regmap_update_bits(smi->map,
+ regmap_update_bits(priv->map,
RTL8366RB_LED_0_1_CTRL_REG,
0x0FFF, 0);
- regmap_update_bits(smi->map,
+ regmap_update_bits(priv->map,
RTL8366RB_LED_2_3_CTRL_REG,
0x0FFF, 0);
- regmap_update_bits(smi->map,
+ regmap_update_bits(priv->map,
RTL8366RB_INTERRUPT_CONTROL_REG,
RTL8366RB_P4_RGMII_LED,
0);
@@ -1014,7 +1014,7 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
val = RTL8366RB_LED_FORCE;
}
for (i = 0; i < 4; i++) {
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_CTRL_REG,
0xf << (i * 4),
val << (i * 4));
@@ -1022,18 +1022,20 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return ret;
}
- ret = rtl8366_reset_vlan(smi);
+ ret = rtl8366_reset_vlan(priv);
if (ret)
return ret;
- ret = rtl8366rb_setup_cascaded_irq(smi);
+ ret = rtl8366rb_setup_cascaded_irq(priv);
if (ret)
- dev_info(smi->dev, "no interrupt support\n");
+ dev_info(priv->dev, "no interrupt support\n");
- ret = realtek_smi_setup_mdio(smi);
- if (ret) {
- dev_info(smi->dev, "could not set up MDIO bus\n");
- return -ENODEV;
+ if (priv->setup_interface) {
+ ret = priv->setup_interface(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ return -ENODEV;
+ }
}
return 0;
@@ -1052,35 +1054,35 @@ rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface, struct phy_device *phydev,
int speed, int duplex, bool tx_pause, bool rx_pause)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- if (port != smi->cpu_port)
+ if (port != priv->cpu_port)
return;
- dev_dbg(smi->dev, "MAC link up on CPU port (%d)\n", port);
+ dev_dbg(priv->dev, "MAC link up on CPU port (%d)\n", port);
/* Force the fixed CPU port into 1Gbit mode, no autonegotiation */
- ret = regmap_update_bits(smi->map, RTL8366RB_MAC_FORCE_CTRL_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_MAC_FORCE_CTRL_REG,
BIT(port), BIT(port));
if (ret) {
- dev_err(smi->dev, "failed to force 1Gbit on CPU port\n");
+ dev_err(priv->dev, "failed to force 1Gbit on CPU port\n");
return;
}
- ret = regmap_update_bits(smi->map, RTL8366RB_PAACR2,
+ ret = regmap_update_bits(priv->map, RTL8366RB_PAACR2,
0xFF00U,
RTL8366RB_PAACR_CPU_PORT << 8);
if (ret) {
- dev_err(smi->dev, "failed to set PAACR on CPU port\n");
+ dev_err(priv->dev, "failed to set PAACR on CPU port\n");
return;
}
/* Enable the CPU port */
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
0);
if (ret) {
- dev_err(smi->dev, "failed to enable the CPU port\n");
+ dev_err(priv->dev, "failed to enable the CPU port\n");
return;
}
}
@@ -1089,107 +1091,108 @@ static void
rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- if (port != smi->cpu_port)
+ if (port != priv->cpu_port)
return;
- dev_dbg(smi->dev, "MAC link down on CPU port (%d)\n", port);
+ dev_dbg(priv->dev, "MAC link down on CPU port (%d)\n", port);
/* Disable the CPU port */
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
BIT(port));
if (ret) {
- dev_err(smi->dev, "failed to disable the CPU port\n");
+ dev_err(priv->dev, "failed to disable the CPU port\n");
return;
}
}
-static void rb8366rb_set_port_led(struct realtek_smi *smi,
+static void rb8366rb_set_port_led(struct realtek_priv *priv,
int port, bool enable)
{
u16 val = enable ? 0x3f : 0;
int ret;
- if (smi->leds_disabled)
+ if (priv->leds_disabled)
return;
switch (port) {
case 0:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_0_1_CTRL_REG,
0x3F, val);
break;
case 1:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_0_1_CTRL_REG,
0x3F << RTL8366RB_LED_1_OFFSET,
val << RTL8366RB_LED_1_OFFSET);
break;
case 2:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_2_3_CTRL_REG,
0x3F, val);
break;
case 3:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_2_3_CTRL_REG,
0x3F << RTL8366RB_LED_3_OFFSET,
val << RTL8366RB_LED_3_OFFSET);
break;
case 4:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_INTERRUPT_CONTROL_REG,
RTL8366RB_P4_RGMII_LED,
enable ? RTL8366RB_P4_RGMII_LED : 0);
break;
default:
- dev_err(smi->dev, "no LED for port %d\n", port);
+ dev_err(priv->dev, "no LED for port %d\n", port);
return;
}
if (ret)
- dev_err(smi->dev, "error updating LED on port %d\n", port);
+ dev_err(priv->dev, "error updating LED on port %d\n", port);
}
static int
rtl8366rb_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- dev_dbg(smi->dev, "enable port %d\n", port);
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ dev_dbg(priv->dev, "enable port %d\n", port);
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
0);
if (ret)
return ret;
- rb8366rb_set_port_led(smi, port, true);
+ rb8366rb_set_port_led(priv, port, true);
return 0;
}
static void
rtl8366rb_port_disable(struct dsa_switch *ds, int port)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- dev_dbg(smi->dev, "disable port %d\n", port);
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ dev_dbg(priv->dev, "disable port %d\n", port);
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
BIT(port));
if (ret)
return;
- rb8366rb_set_port_led(smi, port, false);
+ rb8366rb_set_port_led(priv, port, false);
}
static int
rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
unsigned int port_bitmap = 0;
int ret, i;
@@ -1202,17 +1205,17 @@ rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Join this port to each other port on the bridge */
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(port)),
RTL8366RB_PORT_ISO_PORTS(BIT(port)));
if (ret)
- dev_err(smi->dev, "failed to join port %d\n", port);
+ dev_err(priv->dev, "failed to join port %d\n", port);
port_bitmap |= BIT(i);
}
/* Set the bits for the ports we can access */
- return regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ return regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(port),
RTL8366RB_PORT_ISO_PORTS(port_bitmap),
RTL8366RB_PORT_ISO_PORTS(port_bitmap));
}
@@ -1221,7 +1224,7 @@ static void
rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
unsigned int port_bitmap = 0;
int ret, i;
@@ -1234,28 +1237,30 @@ rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Remove this port from any other port on the bridge */
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0);
if (ret)
- dev_err(smi->dev, "failed to leave port %d\n", port);
+ dev_err(priv->dev, "failed to leave port %d\n", port);
port_bitmap |= BIT(i);
}
/* Clear the bits for the ports we can not access, leave ourselves */
- regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(port),
RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0);
}
/**
* rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames
- * @smi: SMI state container
+ * @priv: SMI state container
* @port: the port to drop untagged and C-tagged frames on
* @drop: whether to drop or pass untagged and C-tagged frames
+ *
+ * Return: zero for success, a negative number on error.
*/
-static int rtl8366rb_drop_untagged(struct realtek_smi *smi, int port, bool drop)
+static int rtl8366rb_drop_untagged(struct realtek_priv *priv, int port, bool drop)
{
- return regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ return regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port),
drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0);
}
@@ -1264,17 +1269,17 @@ static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
struct netlink_ext_ack *extack)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8366rb *rb;
int ret;
- rb = smi->chip_data;
+ rb = priv->chip_data;
- dev_dbg(smi->dev, "port %d: %s VLAN filtering\n", port,
+ dev_dbg(priv->dev, "port %d: %s VLAN filtering\n", port,
vlan_filtering ? "enable" : "disable");
/* If the port is not in the member set, the frame will be dropped */
- ret = regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
BIT(port), vlan_filtering ? BIT(port) : 0);
if (ret)
return ret;
@@ -1284,9 +1289,9 @@ static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
* filtering on a port, we need to accept any frames.
*/
if (vlan_filtering)
- ret = rtl8366rb_drop_untagged(smi, port, !rb->pvid_enabled[port]);
+ ret = rtl8366rb_drop_untagged(priv, port, !rb->pvid_enabled[port]);
else
- ret = rtl8366rb_drop_untagged(smi, port, false);
+ ret = rtl8366rb_drop_untagged(priv, port, false);
return ret;
}
@@ -1308,11 +1313,11 @@ rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
if (flags.mask & BR_LEARNING) {
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_LEARNDIS_CTRL,
BIT(port),
(flags.val & BR_LEARNING) ? 0 : BIT(port));
if (ret)
@@ -1325,7 +1330,7 @@ rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
static void
rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
u32 val;
int i;
@@ -1344,13 +1349,13 @@ rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
val = RTL8366RB_STP_STATE_FORWARDING;
break;
default:
- dev_err(smi->dev, "unknown bridge state requested\n");
+ dev_err(priv->dev, "unknown bridge state requested\n");
return;
}
/* Set the same status for the port on all the FIDs */
for (i = 0; i < RTL8366RB_NUM_FIDS; i++) {
- regmap_update_bits(smi->map, RTL8366RB_STP_STATE_BASE + i,
+ regmap_update_bits(priv->map, RTL8366RB_STP_STATE_BASE + i,
RTL8366RB_STP_STATE_MASK(port),
RTL8366RB_STP_STATE(port, val));
}
@@ -1359,26 +1364,26 @@ rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
static void
rtl8366rb_port_fast_age(struct dsa_switch *ds, int port)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
/* This will age out any learned L2 entries */
- regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ regmap_update_bits(priv->map, RTL8366RB_SECURITY_CTRL,
BIT(port), BIT(port));
/* Restore the normal state of things */
- regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ regmap_update_bits(priv->map, RTL8366RB_SECURITY_CTRL,
BIT(port), 0);
}
static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8366rb *rb;
unsigned int max_mtu;
u32 len;
int i;
/* Cache the per-port MTU setting */
- rb = smi->chip_data;
+ rb = priv->chip_data;
rb->max_mtu[port] = new_mtu;
/* Roof out the MTU for the entire switch to the greatest
@@ -1406,7 +1411,7 @@ static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
else
len = RTL8366RB_SGCR_MAX_LENGTH_16000;
- return regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ return regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_MAX_LENGTH_MASK,
len);
}
@@ -1419,7 +1424,7 @@ static int rtl8366rb_max_mtu(struct dsa_switch *ds, int port)
return 15996;
}
-static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
+static int rtl8366rb_get_vlan_4k(struct realtek_priv *priv, u32 vid,
struct rtl8366_vlan_4k *vlan4k)
{
u32 data[3];
@@ -1432,19 +1437,19 @@ static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
return -EINVAL;
/* write VID */
- ret = regmap_write(smi->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
vid & RTL8366RB_VLAN_VID_MASK);
if (ret)
return ret;
/* write table access control word */
- ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ ret = regmap_write(priv->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
RTL8366RB_TABLE_VLAN_READ_CTRL);
if (ret)
return ret;
for (i = 0; i < 3; i++) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8366RB_VLAN_TABLE_READ_BASE + i,
&data[i]);
if (ret)
@@ -1460,7 +1465,7 @@ static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
return 0;
}
-static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
+static int rtl8366rb_set_vlan_4k(struct realtek_priv *priv,
const struct rtl8366_vlan_4k *vlan4k)
{
u32 data[3];
@@ -1480,7 +1485,7 @@ static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
data[2] = vlan4k->fid & RTL8366RB_VLAN_FID_MASK;
for (i = 0; i < 3; i++) {
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
RTL8366RB_VLAN_TABLE_WRITE_BASE + i,
data[i]);
if (ret)
@@ -1488,13 +1493,13 @@ static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
}
/* write table access control word */
- ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ ret = regmap_write(priv->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
RTL8366RB_TABLE_VLAN_WRITE_CTRL);
return ret;
}
-static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
+static int rtl8366rb_get_vlan_mc(struct realtek_priv *priv, u32 index,
struct rtl8366_vlan_mc *vlanmc)
{
u32 data[3];
@@ -1507,7 +1512,7 @@ static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
return -EINVAL;
for (i = 0; i < 3; i++) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8366RB_VLAN_MC_BASE(index) + i,
&data[i]);
if (ret)
@@ -1525,7 +1530,7 @@ static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
return 0;
}
-static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
+static int rtl8366rb_set_vlan_mc(struct realtek_priv *priv, u32 index,
const struct rtl8366_vlan_mc *vlanmc)
{
u32 data[3];
@@ -1549,7 +1554,7 @@ static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
data[2] = vlanmc->fid & RTL8366RB_VLAN_FID_MASK;
for (i = 0; i < 3; i++) {
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
RTL8366RB_VLAN_MC_BASE(index) + i,
data[i]);
if (ret)
@@ -1559,15 +1564,15 @@ static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
return 0;
}
-static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
+static int rtl8366rb_get_mc_index(struct realtek_priv *priv, int port, int *val)
{
u32 data;
int ret;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return -EINVAL;
- ret = regmap_read(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ ret = regmap_read(priv->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
&data);
if (ret)
return ret;
@@ -1578,22 +1583,22 @@ static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
return 0;
}
-static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
+static int rtl8366rb_set_mc_index(struct realtek_priv *priv, int port, int index)
{
struct rtl8366rb *rb;
bool pvid_enabled;
int ret;
- rb = smi->chip_data;
+ rb = priv->chip_data;
pvid_enabled = !!index;
- if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)
+ if (port >= priv->num_ports || index >= RTL8366RB_NUM_VLANS)
return -EINVAL;
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
- RTL8366RB_PORT_VLAN_CTRL_MASK <<
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ RTL8366RB_PORT_VLAN_CTRL_MASK <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
- (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
+ (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
if (ret)
return ret;
@@ -1604,17 +1609,17 @@ static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
* not drop any untagged or C-tagged frames. Make sure to update the
* filtering setting.
*/
- if (dsa_port_is_vlan_filtering(dsa_to_port(smi->ds, port)))
- ret = rtl8366rb_drop_untagged(smi, port, !pvid_enabled);
+ if (dsa_port_is_vlan_filtering(dsa_to_port(priv->ds, port)))
+ ret = rtl8366rb_drop_untagged(priv, port, !pvid_enabled);
return ret;
}
-static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
+static bool rtl8366rb_is_vlan_valid(struct realtek_priv *priv, unsigned int vlan)
{
unsigned int max = RTL8366RB_NUM_VLANS - 1;
- if (smi->vlan4k_enabled)
+ if (priv->vlan4k_enabled)
max = RTL8366RB_NUM_VIDS - 1;
if (vlan > max)
@@ -1623,23 +1628,23 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
return true;
}
-static int rtl8366rb_enable_vlan(struct realtek_smi *smi, bool enable)
+static int rtl8366rb_enable_vlan(struct realtek_priv *priv, bool enable)
{
- dev_dbg(smi->dev, "%s VLAN\n", enable ? "enable" : "disable");
- return regmap_update_bits(smi->map,
+ dev_dbg(priv->dev, "%s VLAN\n", enable ? "enable" : "disable");
+ return regmap_update_bits(priv->map,
RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
enable ? RTL8366RB_SGCR_EN_VLAN : 0);
}
-static int rtl8366rb_enable_vlan4k(struct realtek_smi *smi, bool enable)
+static int rtl8366rb_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
- dev_dbg(smi->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
- return regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ dev_dbg(priv->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
+ return regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_EN_VLAN_4KTB,
enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
}
-static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum)
+static int rtl8366rb_phy_read(struct realtek_priv *priv, int phy, int regnum)
{
u32 val;
u32 reg;
@@ -1648,32 +1653,32 @@ static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum)
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ ret = regmap_write(priv->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_READ);
if (ret)
return ret;
reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
- ret = regmap_write(smi->map, reg, 0);
+ ret = regmap_write(priv->map, reg, 0);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to write PHY%d reg %04x @ %04x, ret %d\n",
phy, regnum, reg, ret);
return ret;
}
- ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val);
+ ret = regmap_read(priv->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val);
if (ret)
return ret;
- dev_dbg(smi->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
+ dev_dbg(priv->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
phy, regnum, reg, val);
return val;
}
-static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum,
+static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum,
u16 val)
{
u32 reg;
@@ -1682,34 +1687,45 @@ static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum,
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ ret = regmap_write(priv->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_WRITE);
if (ret)
return ret;
reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
- dev_dbg(smi->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
+ dev_dbg(priv->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
phy, regnum, reg, val);
- ret = regmap_write(smi->map, reg, val);
+ ret = regmap_write(priv->map, reg, val);
if (ret)
return ret;
return 0;
}
-static int rtl8366rb_reset_chip(struct realtek_smi *smi)
+static int rtl8366rb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ return rtl8366rb_phy_read(ds->priv, phy, regnum);
+}
+
+static int rtl8366rb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ u16 val)
+{
+ return rtl8366rb_phy_write(ds->priv, phy, regnum, val);
+}
+
+static int rtl8366rb_reset_chip(struct realtek_priv *priv)
{
int timeout = 10;
u32 val;
int ret;
- realtek_smi_write_reg_noack(smi, RTL8366RB_RESET_CTRL_REG,
- RTL8366RB_CHIP_CTRL_RESET_HW);
+ priv->write_reg_noack(priv, RTL8366RB_RESET_CTRL_REG,
+ RTL8366RB_CHIP_CTRL_RESET_HW);
do {
usleep_range(20000, 25000);
- ret = regmap_read(smi->map, RTL8366RB_RESET_CTRL_REG, &val);
+ ret = regmap_read(priv->map, RTL8366RB_RESET_CTRL_REG, &val);
if (ret)
return ret;
@@ -1718,21 +1734,21 @@ static int rtl8366rb_reset_chip(struct realtek_smi *smi)
} while (--timeout);
if (!timeout) {
- dev_err(smi->dev, "timeout waiting for the switch to reset\n");
+ dev_err(priv->dev, "timeout waiting for the switch to reset\n");
return -EIO;
}
return 0;
}
-static int rtl8366rb_detect(struct realtek_smi *smi)
+static int rtl8366rb_detect(struct realtek_priv *priv)
{
- struct device *dev = smi->dev;
+ struct device *dev = priv->dev;
int ret;
u32 val;
/* Detect device */
- ret = regmap_read(smi->map, 0x5c, &val);
+ ret = regmap_read(priv->map, 0x5c, &val);
if (ret) {
dev_err(dev, "can't get chip ID (%d)\n", ret);
return ret;
@@ -1745,11 +1761,11 @@ static int rtl8366rb_detect(struct realtek_smi *smi)
return -ENODEV;
case 0x5937:
dev_info(dev, "found an RTL8366RB switch\n");
- smi->cpu_port = RTL8366RB_PORT_NUM_CPU;
- smi->num_ports = RTL8366RB_NUM_PORTS;
- smi->num_vlan_mc = RTL8366RB_NUM_VLANS;
- smi->mib_counters = rtl8366rb_mib_counters;
- smi->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
+ priv->cpu_port = RTL8366RB_PORT_NUM_CPU;
+ priv->num_ports = RTL8366RB_NUM_PORTS;
+ priv->num_vlan_mc = RTL8366RB_NUM_VLANS;
+ priv->mib_counters = rtl8366rb_mib_counters;
+ priv->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
break;
default:
dev_info(dev, "found an Unknown Realtek switch (id=0x%04x)\n",
@@ -1757,14 +1773,14 @@ static int rtl8366rb_detect(struct realtek_smi *smi)
break;
}
- ret = rtl8366rb_reset_chip(smi);
+ ret = rtl8366rb_reset_chip(priv);
if (ret)
return ret;
return 0;
}
-static const struct dsa_switch_ops rtl8366rb_switch_ops = {
+static const struct dsa_switch_ops rtl8366rb_switch_ops_smi = {
.get_tag_protocol = rtl8366_get_tag_protocol,
.setup = rtl8366rb_setup,
.phylink_mac_link_up = rtl8366rb_mac_link_up,
@@ -1787,7 +1803,32 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.port_max_mtu = rtl8366rb_max_mtu,
};
-static const struct realtek_smi_ops rtl8366rb_smi_ops = {
+static const struct dsa_switch_ops rtl8366rb_switch_ops_mdio = {
+ .get_tag_protocol = rtl8366_get_tag_protocol,
+ .setup = rtl8366rb_setup,
+ .phy_read = rtl8366rb_dsa_phy_read,
+ .phy_write = rtl8366rb_dsa_phy_write,
+ .phylink_mac_link_up = rtl8366rb_mac_link_up,
+ .phylink_mac_link_down = rtl8366rb_mac_link_down,
+ .get_strings = rtl8366_get_strings,
+ .get_ethtool_stats = rtl8366_get_ethtool_stats,
+ .get_sset_count = rtl8366_get_sset_count,
+ .port_bridge_join = rtl8366rb_port_bridge_join,
+ .port_bridge_leave = rtl8366rb_port_bridge_leave,
+ .port_vlan_filtering = rtl8366rb_vlan_filtering,
+ .port_vlan_add = rtl8366_vlan_add,
+ .port_vlan_del = rtl8366_vlan_del,
+ .port_enable = rtl8366rb_port_enable,
+ .port_disable = rtl8366rb_port_disable,
+ .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
+ .port_bridge_flags = rtl8366rb_port_bridge_flags,
+ .port_stp_state_set = rtl8366rb_port_stp_state_set,
+ .port_fast_age = rtl8366rb_port_fast_age,
+ .port_change_mtu = rtl8366rb_change_mtu,
+ .port_max_mtu = rtl8366rb_max_mtu,
+};
+
+static const struct realtek_ops rtl8366rb_ops = {
.detect = rtl8366rb_detect,
.get_vlan_mc = rtl8366rb_get_vlan_mc,
.set_vlan_mc = rtl8366rb_set_vlan_mc,
@@ -1803,12 +1844,17 @@ static const struct realtek_smi_ops rtl8366rb_smi_ops = {
.phy_write = rtl8366rb_phy_write,
};
-const struct realtek_smi_variant rtl8366rb_variant = {
- .ds_ops = &rtl8366rb_switch_ops,
- .ops = &rtl8366rb_smi_ops,
+const struct realtek_variant rtl8366rb_variant = {
+ .ds_ops_smi = &rtl8366rb_switch_ops_smi,
+ .ds_ops_mdio = &rtl8366rb_switch_ops_mdio,
+ .ops = &rtl8366rb_ops,
.clk_delay = 10,
.cmd_read = 0xa9,
.cmd_write = 0xa8,
.chip_data_sz = sizeof(struct rtl8366rb),
};
EXPORT_SYMBOL_GPL(rtl8366rb_variant);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Driver for RTL8366RB ethernet switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 7dcdd784aea4..fad5afe3819c 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -300,6 +300,46 @@ static int sja1105_flower_parse_key(struct sja1105_private *priv,
return -EOPNOTSUPP;
}
+static int sja1105_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
@@ -321,12 +361,9 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
- if (act->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- rc = -EOPNOTSUPP;
+ rc = sja1105_policer_validate(&rule->action, act, extack);
+ if (rc)
goto out;
- }
rc = sja1105_flower_policer(priv, port, extack, cookie,
&key,
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index b513713be610..b33841c6507a 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -393,10 +393,8 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
.start_dynspc = 0,
/* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
.poly = 0x97,
- /* This selects between Independent VLAN Learning (IVL) and
- * Shared VLAN Learning (SVL)
- */
- .shared_learn = true,
+ /* Always use Independent VLAN Learning (IVL) */
+ .shared_learn = false,
/* Don't discard management traffic based on ENFPORT -
* we don't perform SMAC port enforcement anyway, so
* what we are setting here doesn't matter.
@@ -1358,37 +1356,16 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
return sja1105_clocking_setup_port(priv, port);
}
-/* The SJA1105 MAC programming model is through the static config (the xMII
- * Mode table cannot be dynamically reconfigured), and we have to program
- * that early (earlier than PHYLINK calls us, anyway).
- * So just error out in case the connected PHY attempts to change the initial
- * system interface MII protocol from what is defined in the DT, at least for
- * now.
- */
-static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
- phy_interface_t interface)
-{
- return priv->phy_mode[port] != interface;
-}
-
-static void sja1105_mac_config(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state)
+static struct phylink_pcs *
+sja1105_mac_select_pcs(struct dsa_switch *ds, int port, phy_interface_t iface)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
- struct dw_xpcs *xpcs;
-
- if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
- dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
- phy_modes(state->interface));
- return;
- }
-
- xpcs = priv->xpcs[port];
+ struct dw_xpcs *xpcs = priv->xpcs[port];
if (xpcs)
- phylink_set_pcs(dp->pl, &xpcs->pcs);
+ return &xpcs->pcs;
+
+ return NULL;
}
static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
@@ -1412,48 +1389,53 @@ static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
sja1105_inhibit_tx(priv, BIT(port), false);
}
-static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void sja1105_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- /* Construct a new mask which exhaustively contains all link features
- * supported by the MAC, and then apply that (logical AND) to what will
- * be sent to the PHY for "marketing".
- */
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct sja1105_private *priv = ds->priv;
struct sja1105_xmii_params_entry *mii;
+ phy_interface_t phy_mode;
- mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
-
- /* include/linux/phylink.h says:
- * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
- * expects the MAC driver to return all supported link modes.
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
*/
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- sja1105_phy_mode_mismatch(priv, port, state->interface)) {
- linkmode_zero(supported);
- return;
+ config->legacy_pre_march2020 = false;
+
+ phy_mode = priv->phy_mode[port];
+ if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
+ /* Changing the PHY mode on SERDES ports is possible and makes
+ * sense, because that is done through the XPCS. We allow
+ * changes between SGMII and 2500base-X.
+ */
+ if (priv->info->supports_sgmii[port])
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+
+ if (priv->info->supports_2500basex[port])
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ } else {
+ /* The SJA1105 MAC programming model is through the static
+ * config (the xMII Mode table cannot be dynamically
+ * reconfigured), and we have to program that early.
+ */
+ __set_bit(phy_mode, config->supported_interfaces);
}
/* The MAC does not support pause frames, and also doesn't
* support half-duplex traffic modes.
*/
- phylink_set(mask, Autoneg);
- phylink_set(mask, MII);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 100baseT1_Full);
+ config->mac_capabilities = MAC_10FD | MAC_100FD;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
mii->xmii_mode[port] == XMII_MODE_SGMII)
- phylink_set(mask, 1000baseT_Full);
- if (priv->info->supports_2500basex[port]) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
+ config->mac_capabilities |= MAC_1000FD;
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ if (priv->info->supports_2500basex[port])
+ config->mac_capabilities |= MAC_2500FD;
}
static int
@@ -1819,25 +1801,52 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
}
static int sja1105_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
return priv->info->fdb_add_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
return priv->info->fdb_del_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
struct device *dev = ds->dev;
int i;
@@ -1874,7 +1883,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
- if (!dsa_port_is_vlan_filtering(dp))
+ if (vid_is_dsa_8021q(l2_lookup.vlanid))
l2_lookup.vlanid = 0;
rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
if (rc)
@@ -1885,7 +1894,15 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
static void sja1105_fast_age(struct dsa_switch *ds, int port)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
+ struct dsa_db db = {
+ .type = DSA_DB_BRIDGE,
+ .bridge = {
+ .dev = dsa_port_bridge_dev_get(dp),
+ .num = dsa_port_bridge_num_get(dp),
+ },
+ };
int i;
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
@@ -1913,7 +1930,7 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
- rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid);
+ rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
if (rc) {
dev_err(ds->dev,
"Failed to delete FDB entry %pM vid %lld: %pe\n",
@@ -1924,15 +1941,17 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
}
static int sja1105_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
- return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
+ return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid, db);
}
static int sja1105_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
- return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
+ return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid, db);
}
/* Common function for unicast and broadcast flood configuration.
@@ -2075,7 +2094,8 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
static int sja1105_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
int rc;
@@ -2083,7 +2103,7 @@ static int sja1105_bridge_join(struct dsa_switch *ds, int port,
if (rc)
return rc;
- rc = dsa_tag_8021q_bridge_tx_fwd_offload(ds, port, bridge);
+ rc = dsa_tag_8021q_bridge_join(ds, port, bridge);
if (rc) {
sja1105_bridge_member(ds, port, bridge, false);
return rc;
@@ -2097,7 +2117,7 @@ static int sja1105_bridge_join(struct dsa_switch *ds, int port,
static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
- dsa_tag_8021q_bridge_tx_fwd_unoffload(ds, port, bridge);
+ dsa_tag_8021q_bridge_leave(ds, port, bridge);
sja1105_bridge_member(ds, port, bridge, false);
}
@@ -2357,7 +2377,6 @@ sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
struct netlink_ext_ack *extack)
{
- struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_general_params_entry *general_params;
struct sja1105_private *priv = ds->priv;
struct sja1105_table *table;
@@ -2395,28 +2414,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
general_params->incl_srcpt1 = enabled;
general_params->incl_srcpt0 = enabled;
- /* VLAN filtering => independent VLAN learning.
- * No VLAN filtering (or best effort) => shared VLAN learning.
- *
- * In shared VLAN learning mode, untagged traffic still gets
- * pvid-tagged, and the FDB table gets populated with entries
- * containing the "real" (pvid or from VLAN tag) VLAN ID.
- * However the switch performs a masked L2 lookup in the FDB,
- * effectively only looking up a frame's DMAC (and not VID) for the
- * forwarding decision.
- *
- * This is extremely convenient for us, because in modes with
- * vlan_filtering=0, dsa_8021q actually installs unique pvid's into
- * each front panel port. This is good for identification but breaks
- * learning badly - the VID of the learnt FDB entry is unique, aka
- * no frames coming from any other port are going to have it. So
- * for forwarding purposes, this is as though learning was broken
- * (all frames get flooded).
- */
- table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
- l2_lookup_params = table->entries;
- l2_lookup_params->shared_learn = !enabled;
-
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
@@ -2525,7 +2522,7 @@ static int sja1105_bridge_vlan_add(struct dsa_switch *ds, int port,
*/
if (vid_is_dsa_8021q(vlan->vid)) {
NL_SET_ERR_MSG_MOD(extack,
- "Range 1024-3071 reserved for dsa_8021q operation");
+ "Range 3072-4095 reserved for dsa_8021q operation");
return -EBUSY;
}
@@ -2850,7 +2847,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
static int sja1105_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress, struct netlink_ext_ack *extack)
{
return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
ingress, true);
@@ -3102,6 +3099,7 @@ static int sja1105_setup(struct dsa_switch *ds)
*/
ds->vlan_filtering_is_global = true;
ds->untag_bridge_pvid = true;
+ ds->fdb_isolation = true;
/* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */
ds->max_num_bridges = 7;
@@ -3152,8 +3150,8 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.set_ageing_time = sja1105_set_ageing_time,
.port_change_mtu = sja1105_change_mtu,
.port_max_mtu = sja1105_get_max_mtu,
- .phylink_validate = sja1105_phylink_validate,
- .phylink_mac_config = sja1105_mac_config,
+ .phylink_get_caps = sja1105_phylink_get_caps,
+ .phylink_mac_select_pcs = sja1105_mac_select_pcs,
.phylink_mac_link_up = sja1105_mac_link_up,
.phylink_mac_link_down = sja1105_mac_link_down,
.get_strings = sja1105_get_strings,
@@ -3346,18 +3344,16 @@ static int sja1105_probe(struct spi_device *spi)
return dsa_register_switch(priv->ds);
}
-static int sja1105_remove(struct spi_device *spi)
+static void sja1105_remove(struct spi_device *spi)
{
struct sja1105_private *priv = spi_get_drvdata(spi);
if (!priv)
- return 0;
+ return;
dsa_unregister_switch(priv->ds);
spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void sja1105_shutdown(struct spi_device *spi)
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index be3068a935af..30fb2cc40164 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -399,7 +399,7 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
- netif_rx_ni(skb);
+ netif_rx(skb);
}
if (ptp_data->extts_enabled)
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
index f5dca6a9b0f9..b7e95d60a6e4 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.c
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -296,6 +296,19 @@ static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a,
return false;
}
+/* FIXME: this should change when the bridge upper of the port changes. */
+static u16 sja1105_port_get_tag_8021q_vid(struct dsa_port *dp)
+{
+ unsigned long bridge_num;
+
+ if (!dp->bridge)
+ return dsa_tag_8021q_standalone_vid(dp);
+
+ bridge_num = dsa_port_bridge_num_get(dp);
+
+ return dsa_tag_8021q_bridge_vid(bridge_num);
+}
+
static int sja1105_init_virtual_links(struct sja1105_private *priv,
struct netlink_ext_ack *extack)
{
@@ -394,8 +407,9 @@ static int sja1105_init_virtual_links(struct sja1105_private *priv,
vl_lookup[k].vlanid = rule->key.vl.vid;
vl_lookup[k].vlanprior = rule->key.vl.pcp;
} else {
+ /* FIXME */
struct dsa_port *dp = dsa_to_port(priv->ds, port);
- u16 vid = dsa_tag_8021q_rx_vid(dp);
+ u16 vid = sja1105_port_get_tag_8021q_vid(dp);
vl_lookup[k].vlanid = vid;
vl_lookup[k].vlanprior = 0;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c
index 645398901e05..3110895358d8 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-spi.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c
@@ -159,18 +159,16 @@ static int vsc73xx_spi_probe(struct spi_device *spi)
return vsc73xx_probe(&vsc_spi->vsc);
}
-static int vsc73xx_spi_remove(struct spi_device *spi)
+static void vsc73xx_spi_remove(struct spi_device *spi)
{
struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
if (!vsc_spi)
- return 0;
+ return;
vsc73xx_remove(&vsc_spi->vsc);
spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void vsc73xx_spi_shutdown(struct spi_device *spi)
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 0730352cdd57..3887ed33c5fe 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -442,34 +442,27 @@ static void xrs700x_teardown(struct dsa_switch *ds)
cancel_delayed_work_sync(&priv->mib_work);
}
-static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void xrs700x_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
switch (port) {
case 0:
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ config->mac_capabilities = MAC_10FD | MAC_100FD;
break;
+
case 1:
case 2:
case 3:
- phylink_set(mask, 1000baseT_Full);
+ phy_interface_set_rgmii(config->supported_interfaces);
+ config->mac_capabilities = MAC_10FD | MAC_100FD | MAC_1000FD;
break;
+
default:
- linkmode_zero(supported);
dev_err(ds->dev, "Unsupported port: %i\n", port);
- return;
+ break;
}
-
- phylink_set_port_modes(mask);
-
- /* The switch only supports full duplex. */
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
}
static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
@@ -541,7 +534,8 @@ static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
}
static int xrs700x_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge, bool *tx_fwd_offload)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
return xrs700x_bridge_common(ds, port, bridge, true);
}
@@ -703,7 +697,7 @@ static const struct dsa_switch_ops xrs700x_ops = {
.setup = xrs700x_setup,
.teardown = xrs700x_teardown,
.port_stp_state_set = xrs700x_port_stp_state_set,
- .phylink_validate = xrs700x_phylink_validate,
+ .phylink_get_caps = xrs700x_phylink_get_caps,
.phylink_mac_link_up = xrs700x_mac_link_up,
.get_strings = xrs700x_get_strings,
.get_sset_count = xrs700x_get_sset_count,
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 8aec5d9fbfef..ad57209007e1 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -138,11 +138,6 @@ MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
module_param(rx_copybreak, int, 0);
module_param(use_mmio, int, 0);
-#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
-#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
-#undef NETIF_F_TSO
-#endif
-
#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
#error TX ring too small!
#endif
@@ -2261,9 +2256,28 @@ out:
return mode;
}
+#if MAX_SKB_FRAGS > 32
+
+#include <net/vxlan.h>
+
+static netdev_features_t typhoon_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb_shinfo(skb)->nr_frags > 32 && skb_is_gso(skb))
+ features &= ~NETIF_F_GSO_MASK;
+
+ features = vlan_features_check(skb, features);
+ return vxlan_features_check(skb, features);
+}
+#endif
+
static const struct net_device_ops typhoon_netdev_ops = {
.ndo_open = typhoon_open,
.ndo_stop = typhoon_close,
+#if MAX_SKB_FRAGS > 32
+ .ndo_features_check = typhoon_features_check,
+#endif
.ndo_start_xmit = typhoon_start_tx,
.ndo_set_rx_mode = typhoon_set_rx_mode,
.ndo_tx_timeout = typhoon_tx_timeout,
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index e320cccba61a..21047ae1bc3d 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -405,15 +405,13 @@ static int mcf8390_init(struct net_device *dev)
static int mcf8390_probe(struct platform_device *pdev)
{
struct net_device *dev;
- struct resource *mem, *irq;
+ struct resource *mem;
resource_size_t msize;
- int ret;
+ int ret, irq;
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (irq == NULL) {
- dev_err(&pdev->dev, "no IRQ specified?\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
return -ENXIO;
- }
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (mem == NULL) {
@@ -433,7 +431,7 @@ static int mcf8390_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
platform_set_drvdata(pdev, dev);
- dev->irq = irq->start;
+ dev->irq = irq;
dev->base_addr = mem->start;
ret = mcf8390_init(dev);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index db3ec4768159..bd4cb9d7c35d 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -78,6 +78,7 @@ source "drivers/net/ethernet/ezchip/Kconfig"
source "drivers/net/ethernet/faraday/Kconfig"
source "drivers/net/ethernet/freescale/Kconfig"
source "drivers/net/ethernet/fujitsu/Kconfig"
+source "drivers/net/ethernet/fungible/Kconfig"
source "drivers/net/ethernet/google/Kconfig"
source "drivers/net/ethernet/hisilicon/Kconfig"
source "drivers/net/ethernet/huawei/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 8a87c1083d1d..8ef43e0c33c0 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
+obj-$(CONFIG_NET_VENDOR_FUNGIBLE) += fungible/
obj-$(CONFIG_NET_VENDOR_GOOGLE) += google/
obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 537e6a85e18d..fbf4588994ac 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2413,11 +2413,13 @@ static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
kfree(tx_ring->tcb_ring);
}
+#define MAX_TX_DESC_PER_PKT 24
+
/* nic_send_packet - NIC specific send handler for version B silicon. */
static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
{
u32 i;
- struct tx_desc desc[24];
+ struct tx_desc desc[MAX_TX_DESC_PER_PKT];
u32 frag = 0;
u32 thiscopy, remainder;
struct sk_buff *skb = tcb->skb;
@@ -2432,9 +2434,6 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
* more than 5 fragments.
*/
- /* nr_frags should be no more than 18. */
- BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23);
-
memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
for (i = 0; i < nr_frags; i++) {
@@ -3762,6 +3761,13 @@ static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
struct et131x_adapter *adapter = netdev_priv(netdev);
struct tx_ring *tx_ring = &adapter->tx_ring;
+ /* This driver does not support TSO, it is very unlikely
+ * this condition is true.
+ */
+ if (unlikely(skb_shinfo(skb)->nr_frags > MAX_TX_DESC_PER_PKT - 2)) {
+ if (skb_linearize(skb))
+ goto drop_err;
+ }
/* stop the queue if it's getting full */
if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
netif_stop_queue(netdev);
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index db97170da8c7..7f247ccbe6ba 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -513,7 +513,7 @@ static int sgdma_txbusy(struct altera_tse_private *priv)
{
int delay = 0;
- /* if DMA is busy, wait for current transactino to finish */
+ /* if DMA is busy, wait for current transaction to finish */
while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
& SGDMA_STSREG_BUSY) && (delay++ < 100))
udelay(1);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 993b2fb42961..a3816264c35c 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -72,7 +72,7 @@ MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
*/
#define ALTERA_RXDMABUFFER_SIZE 2048
-/* Allow network stack to resume queueing packets after we've
+/* Allow network stack to resume queuing packets after we've
* finished transmitting at least 1/4 of the packets in the queue.
*/
#define TSE_TX_THRESH(x) (x->tx_ring_size / 4)
@@ -390,7 +390,7 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
"RCV pktstatus %08X pktlength %08X\n",
pktstatus, pktlength);
- /* DMA trasfer from TSE starts with 2 aditional bytes for
+ /* DMA transfer from TSE starts with 2 additional bytes for
* IP payload alignment. Status returned by get_rx_status()
* contains DMA transfer length. Packet is 2 bytes shorter.
*/
@@ -1044,7 +1044,7 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
}
-/* Set or clear the multicast filter for this adaptor
+/* Set or clear the multicast filter for this adapter
*/
static void tse_set_rx_mode_hashfilter(struct net_device *dev)
{
@@ -1064,7 +1064,7 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
spin_unlock(&priv->mac_cfg_lock);
}
-/* Set or clear the multicast filter for this adaptor
+/* Set or clear the multicast filter for this adapter
*/
static void tse_set_rx_mode(struct net_device *dev)
{
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 53080fd143dc..07444aead3fd 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1400,10 +1400,9 @@ static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag)
struct sk_buff *skb;
if (!first_frag)
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_copybreak);
+ skb = napi_alloc_skb(rx_ring->napi, rx_ring->rx_copybreak);
else
- skb = build_skb(first_frag, ENA_PAGE_SIZE);
+ skb = napi_build_skb(first_frag, ENA_PAGE_SIZE);
if (unlikely(!skb)) {
ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index ff2d099aab21..53dc8d5fede8 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -696,6 +696,12 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
buf_pool->rx_skb[skb_index] = NULL;
datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
+
+ /* strip off CRC as HW isn't doing this */
+ nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
+ if (!nv)
+ datalen -= 4;
+
skb_put(skb, datalen);
prefetch(skb->data - NET_IP_ALIGN);
skb->protocol = eth_type_trans(skb, ndev);
@@ -717,12 +723,8 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
}
}
- nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
- if (!nv) {
- /* strip off CRC as HW isn't doing this */
- datalen -= 4;
+ if (!nv)
goto skip_jumbo;
- }
slots = page_pool->slots - 1;
head = page_pool->head;
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 9acf589b1178..87f40c2ba904 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -132,6 +132,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
{
struct arc_emac_mdio_bus_data *data = &priv->bus_data;
struct device_node *np = priv->dev->of_node;
+ const char *name = "Synopsys MII Bus";
struct mii_bus *bus;
int error;
@@ -142,7 +143,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
priv->bus = bus;
bus->priv = priv;
bus->parent = priv->dev;
- bus->name = "Synopsys MII Bus";
+ bus->name = name;
bus->read = &arc_mdio_read;
bus->write = &arc_mdio_write;
bus->reset = &arc_mdio_reset;
@@ -167,7 +168,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
if (error) {
mdiobus_free(bus);
return dev_err_probe(priv->dev, error,
- "cannot register MDIO bus %s\n", bus->name);
+ "cannot register MDIO bus %s\n", name);
}
return 0;
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
index e7a9f9863258..6ba5b024a7be 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.c
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -433,7 +433,7 @@ ax88796c_skb_return(struct ax88796c_device *ax_local,
netif_info(ax_local, rx_status, ndev, "< rx, len %zu, type 0x%x\n",
skb->len + sizeof(struct ethhdr), skb->protocol);
- status = netif_rx_ni(skb);
+ status = netif_rx(skb);
if (status != NET_RX_SUCCESS && net_ratelimit())
netif_info(ax_local, rx_err, ndev,
"netif_rx status %d\n", status);
@@ -1102,7 +1102,7 @@ err:
return ret;
}
-static int ax88796c_remove(struct spi_device *spi)
+static void ax88796c_remove(struct spi_device *spi)
{
struct ax88796c_device *ax_local = dev_get_drvdata(&spi->dev);
struct net_device *ndev = ax_local->ndev;
@@ -1112,8 +1112,6 @@ static int ax88796c_remove(struct spi_device *spi)
netif_info(ax_local, probe, ndev, "removing network device %s %s\n",
dev_driver_string(&spi->dev),
dev_name(&spi->dev));
-
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 4ad3fc72e74e..a89b93cb4e26 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1181,8 +1181,11 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
netdev_update_features(netdev);
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ mutex_lock(&alx->mtx);
alx_reinit(alx);
+ mutex_unlock(&alx->mtx);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index f50604f3e541..49459397993e 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1051,7 +1051,7 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
* each ring/block may need up to 8 bytes for alignment, hence the
* additional bytes tacked onto the end.
*/
- ring_header->size = size =
+ ring_header->size =
sizeof(struct atl1c_tpd_desc) * tpd_ring->count * tqc +
sizeof(struct atl1c_rx_free_desc) * rfd_ring->count * rqc +
sizeof(struct atl1c_recv_ret_status) * rfd_ring->count * rqc +
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index b04e423c446a..c1b97e8c55ef 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1716,17 +1716,17 @@ static int bcm_enet_probe(struct platform_device *pdev)
struct bcm_enet_priv *priv;
struct net_device *dev;
struct bcm63xx_enet_platform_data *pd;
- struct resource *res_irq, *res_irq_rx, *res_irq_tx;
+ int irq, irq_rx, irq_tx;
struct mii_bus *bus;
int i, ret;
if (!bcm_enet_shared_base[0])
return -EPROBE_DEFER;
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
- res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
- if (!res_irq || !res_irq_rx || !res_irq_tx)
+ irq = platform_get_irq(pdev, 0);
+ irq_rx = platform_get_irq(pdev, 1);
+ irq_tx = platform_get_irq(pdev, 2);
+ if (irq < 0 || irq_rx < 0 || irq_tx < 0)
return -ENODEV;
dev = alloc_etherdev(sizeof(*priv));
@@ -1748,9 +1748,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
goto out;
}
- dev->irq = priv->irq = res_irq->start;
- priv->irq_rx = res_irq_rx->start;
- priv->irq_tx = res_irq_tx->start;
+ dev->irq = priv->irq = irq;
+ priv->irq_rx = irq_rx;
+ priv->irq_tx = irq_tx;
priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
if (IS_ERR(priv->mac_clk)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index e20aafeb4ca9..b97ed9b5f685 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8216,7 +8216,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask);
if (rc) {
dev_err(&pdev->dev,
- "pci_set_consistent_dma_mask failed, aborting\n");
+ "dma_set_coherent_mask failed, aborting\n");
goto err_out_unmap;
}
} else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index a19dd6797070..dd5945c4bfec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1271,7 +1271,7 @@ struct bnx2x_fw_stats_data {
struct per_port_stats port;
struct per_pf_stats pf;
struct fcoe_statistics_params fcoe;
- struct per_queue_stats queue_stats[1];
+ struct per_queue_stats queue_stats[];
};
/* Public slow path states */
@@ -2533,6 +2533,4 @@ void bnx2x_register_phc(struct bnx2x *bp);
* Meant for implicit re-load flows.
*/
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
-int bnx2x_init_firmware(struct bnx2x *bp);
-void bnx2x_release_firmware(struct bnx2x *bp);
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8d36ebbf08e1..5729a5ab059d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2364,24 +2364,30 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
/* is another pf loaded on this engine? */
if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
- /* build my FW version dword */
- u32 my_fw = (bp->fw_major) + (bp->fw_minor << 8) +
- (bp->fw_rev << 16) + (bp->fw_eng << 24);
+ u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng;
+ u32 loaded_fw;
/* read loaded FW from chip */
- u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+ loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
- DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
- loaded_fw, my_fw);
+ loaded_fw_major = loaded_fw & 0xff;
+ loaded_fw_minor = (loaded_fw >> 8) & 0xff;
+ loaded_fw_rev = (loaded_fw >> 16) & 0xff;
+ loaded_fw_eng = (loaded_fw >> 24) & 0xff;
+
+ DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n",
+ loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng);
/* abort nic load if version mismatch */
- if (my_fw != loaded_fw) {
+ if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION ||
+ loaded_fw_minor != BCM_5710_FW_MINOR_VERSION ||
+ loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION ||
+ loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) {
if (print_err)
- BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
- loaded_fw, my_fw);
+ BNX2X_ERR("loaded FW incompatible. Aborting\n");
else
- BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
- loaded_fw, my_fw);
+ BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n");
+
return -EBUSY;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 4e85e7dbc2be..7071604f9984 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6178,7 +6178,8 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
return -EINVAL;
}
- ret = scnprintf(str, *len, "%hx.%hx", num >> 16, num);
+ ret = scnprintf(str, *len, "%x.%x", (num >> 16) & 0xFFFF,
+ num & 0xFFFF);
*len -= ret;
return 0;
}
@@ -6193,7 +6194,8 @@ static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
return -EINVAL;
}
- ret = scnprintf(str, *len, "%hhx.%hhx.%hhx", num >> 16, num >> 8, num);
+ ret = scnprintf(str, *len, "%x.%x.%x", (num >> 16) & 0xFF,
+ (num >> 8) & 0xFF, num & 0xFF);
*len -= ret;
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index eedb48d945ed..c19b072f3a23 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12319,15 +12319,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bnx2x_read_fwinfo(bp);
- if (IS_PF(bp)) {
- rc = bnx2x_init_firmware(bp);
-
- if (rc) {
- bnx2x_free_mem_bp(bp);
- return rc;
- }
- }
-
func = BP_FUNC(bp);
/* need to reset chip if undi was active */
@@ -12340,7 +12331,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
rc = bnx2x_prev_unload(bp);
if (rc) {
- bnx2x_release_firmware(bp);
bnx2x_free_mem_bp(bp);
return rc;
}
@@ -13409,7 +13399,7 @@ do { \
(u8 *)bp->arr, len); \
} while (0)
-int bnx2x_init_firmware(struct bnx2x *bp)
+static int bnx2x_init_firmware(struct bnx2x *bp)
{
const char *fw_file_name, *fw_file_name_v15;
struct bnx2x_fw_file_hdr *fw_hdr;
@@ -13509,7 +13499,7 @@ request_firmware_exit:
return rc;
}
-void bnx2x_release_firmware(struct bnx2x *bp)
+static void bnx2x_release_firmware(struct bnx2x *bp)
{
kfree(bp->init_ops_offsets);
kfree(bp->init_ops);
@@ -14026,7 +14016,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
return 0;
init_one_freemem:
- bnx2x_release_firmware(bp);
bnx2x_free_mem_bp(bp);
init_one_exit:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b1c98d1408b8..1c28495875cf 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -233,6 +233,7 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
+ ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -369,7 +370,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
dev_kfree_skb_any(skb);
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
return NETDEV_TX_OK;
}
@@ -645,7 +646,7 @@ tx_kick_pending:
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
return NETDEV_TX_OK;
}
@@ -2079,6 +2080,16 @@ static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
(BNXT_EVENT_RING_TYPE(data2) == \
ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
+#define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
+ (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
+ ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
+
+#define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
+ (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
+ ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
+
+#define BNXT_PHC_BITS 48
+
static int bnxt_async_event_process(struct bnxt *bp,
struct hwrm_async_event_cmpl *cmpl)
{
@@ -2258,6 +2269,24 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_event_error_report(bp, data1, data2);
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
+ switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
+ case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
+ if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u64 ns;
+
+ spin_lock_bh(&ptp->ptp_lock);
+ bnxt_ptp_update_current_time(bp);
+ ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
+ BNXT_PHC_BITS) | ptp->current_time);
+ bnxt_ptp_rtc_timecounter_init(ptp, ns);
+ spin_unlock_bh(&ptp->ptp_lock);
+ }
+ break;
+ }
+ goto async_event_process_exit;
+ }
case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
@@ -7416,6 +7445,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
struct hwrm_port_mac_ptp_qcfg_output *resp;
struct hwrm_port_mac_ptp_qcfg_input *req;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ bool phc_cfg;
u8 flags;
int rc;
@@ -7458,7 +7488,8 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
rc = -ENODEV;
goto exit;
}
- rc = bnxt_ptp_init(bp);
+ phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
+ rc = bnxt_ptp_init(bp, phc_cfg);
if (rc)
netdev_warn(bp->dev, "PTP initialization failed.\n");
exit:
@@ -7516,6 +7547,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
@@ -9267,7 +9300,7 @@ void bnxt_tx_enable(struct bnxt *bp)
/* Make sure napi polls see @dev_state change */
synchronize_net();
netif_tx_wake_all_queues(bp->dev);
- if (bp->link_info.link_up)
+ if (BNXT_LINK_IS_UP(bp))
netif_carrier_on(bp->dev);
}
@@ -9297,7 +9330,7 @@ static char *bnxt_report_fec(struct bnxt_link_info *link_info)
void bnxt_report_link(struct bnxt *bp)
{
- if (bp->link_info.link_up) {
+ if (BNXT_LINK_IS_UP(bp)) {
const char *signal = "";
const char *flow_ctrl;
const char *duplex;
@@ -9383,7 +9416,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_phy_qcaps_exit;
- bp->phy_flags = resp->flags;
+ bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
struct ethtool_eee *eee = &bp->eee;
u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
@@ -9433,7 +9466,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
struct bnxt_link_info *link_info = &bp->link_info;
struct hwrm_port_phy_qcfg_output *resp;
struct hwrm_port_phy_qcfg_input *req;
- u8 link_up = link_info->link_up;
+ u8 link_state = link_info->link_state;
bool support_changed = false;
int rc;
@@ -9534,14 +9567,14 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
/* TODO: need to add more logic to report VF link */
if (chng_link_state) {
if (link_info->phy_link_status == BNXT_LINK_LINK)
- link_info->link_up = 1;
+ link_info->link_state = BNXT_LINK_STATE_UP;
else
- link_info->link_up = 0;
- if (link_up != link_info->link_up)
+ link_info->link_state = BNXT_LINK_STATE_DOWN;
+ if (link_state != link_info->link_state)
bnxt_report_link(bp);
} else {
- /* alwasy link down if not require to update link state */
- link_info->link_up = 0;
+ /* always link down if not require to update link state */
+ link_info->link_state = BNXT_LINK_STATE_DOWN;
}
hwrm_req_drop(bp, req);
@@ -9741,7 +9774,18 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return rc;
req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
- return hwrm_req_send(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ mutex_lock(&bp->link_lock);
+ /* Device is not obliged link down in certain scenarios, even
+ * when forced. Setting the state unknown is consistent with
+ * driver startup and will force link state to be reported
+ * during subsequent open based on PORT_PHY_QCFG.
+ */
+ bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
+ mutex_unlock(&bp->link_lock);
+ }
+ return rc;
}
static int bnxt_fw_reset_via_optee(struct bnxt *bp)
@@ -10172,7 +10216,7 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
/* The last close may have shutdown the link, so need to call
* PHY_CFG to bring it back up.
*/
- if (!bp->link_info.link_up)
+ if (!BNXT_LINK_IS_UP(bp))
update_link = true;
if (!bnxt_eee_config_ok(bp))
@@ -10307,6 +10351,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
+ bnxt_ptp_init_rtc(bp, true);
return 0;
open_err_irq:
@@ -11403,7 +11448,7 @@ static void bnxt_timer(struct timer_list *t)
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
bnxt_fw_health_check(bp);
- if (bp->link_info.link_up && bp->stats_coal_ticks) {
+ if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) {
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
@@ -12104,11 +12149,6 @@ int bnxt_fw_init_one(struct bnxt *bp)
if (rc)
return rc;
- /* In case fw capabilities have changed, destroy the unneeded
- * reporters and create newly capable ones.
- */
- bnxt_dl_fw_reporters_destroy(bp, false);
- bnxt_dl_fw_reporters_create(bp);
bnxt_fw_init_one_p3(bp);
return 0;
}
@@ -12937,7 +12977,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
cancel_delayed_work_sync(&bp->fw_reset_task);
bp->sp_event = 0;
- bnxt_dl_fw_reporters_destroy(bp, true);
+ bnxt_dl_fw_reporters_destroy(bp);
bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
@@ -13430,7 +13470,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
- mutex_init(&bp->sriov_lock);
#endif
if (BNXT_SUPPORTS_TPA(bp)) {
bp->gro_func = bnxt_gro_func_5730x;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 666fc1e7a7d2..61aa3e8c5952 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1175,7 +1175,11 @@ struct bnxt_link_info {
#define BNXT_PHY_STATE_ENABLED 0
#define BNXT_PHY_STATE_DISABLED 1
- u8 link_up;
+ u8 link_state;
+#define BNXT_LINK_STATE_UNKNOWN 0
+#define BNXT_LINK_STATE_DOWN 1
+#define BNXT_LINK_STATE_UP 2
+#define BNXT_LINK_IS_UP(bp) ((bp)->link_info.link_state == BNXT_LINK_STATE_UP)
u8 duplex;
#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
@@ -1958,6 +1962,7 @@ struct bnxt {
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
#define BNXT_FW_CAP_HOT_RESET 0x00200000
+ #define BNXT_FW_CAP_PTP_RTC 0x00400000
#define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
#define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
@@ -2067,12 +2072,6 @@ struct bnxt {
wait_queue_head_t sriov_cfg_wait;
bool sriov_cfg;
#define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000)
-
- /* lock to protect VF-rep creation/cleanup via
- * multiple paths such as ->sriov_configure() and
- * devlink ->eswitch_mode_set()
- */
- struct mutex sriov_lock;
#endif
#if BITS_PER_LONG == 32
@@ -2099,8 +2098,8 @@ struct bnxt {
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
- /* copied from flags in hwrm_port_phy_qcaps_output */
- u8 phy_flags;
+ /* copied from flags and flags2 in hwrm_port_phy_qcaps_output */
+ u32 phy_flags;
#define BNXT_PHY_FL_EEE_CAP PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED
#define BNXT_PHY_FL_EXT_LPBK PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED
#define BNXT_PHY_FL_AN_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED
@@ -2109,6 +2108,8 @@ struct bnxt {
#define BNXT_PHY_FL_NO_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
#define BNXT_PHY_FL_FW_MANAGED_LKDN PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN
#define BNXT_PHY_FL_NO_FCS PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS
+#define BNXT_PHY_FL_NO_PAUSE (PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED << 8)
+#define BNXT_PHY_FL_NO_PFC (PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED << 8)
u8 num_tests;
struct bnxt_test_info *test_info;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 217ff597cdf2..caab3d626a2a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -627,7 +627,8 @@ static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
int rc;
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
- !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
+ !(bp->dcbx_cap & DCB_CAP_DCBX_HOST) ||
+ (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
return -EINVAL;
if (!my_pfc) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index f6e21fac0e69..0c17f90d44a2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -241,37 +241,37 @@ static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
.recover = bnxt_fw_recover,
};
-void bnxt_dl_fw_reporters_create(struct bnxt *bp)
+static struct devlink_health_reporter *
+__bnxt_dl_reporter_create(struct bnxt *bp,
+ const struct devlink_health_reporter_ops *ops)
{
- struct bnxt_fw_health *health = bp->fw_health;
-
- if (!health || health->fw_reporter)
- return;
+ struct devlink_health_reporter *reporter;
- health->fw_reporter =
- devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops,
- 0, bp);
- if (IS_ERR(health->fw_reporter)) {
- netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
- PTR_ERR(health->fw_reporter));
- health->fw_reporter = NULL;
- bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+ reporter = devlink_health_reporter_create(bp->dl, ops, 0, bp);
+ if (IS_ERR(reporter)) {
+ netdev_warn(bp->dev, "Failed to create %s health reporter, rc = %ld\n",
+ ops->name, PTR_ERR(reporter));
+ return NULL;
}
+
+ return reporter;
}
-void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
+void bnxt_dl_fw_reporters_create(struct bnxt *bp)
{
- struct bnxt_fw_health *health = bp->fw_health;
+ struct bnxt_fw_health *fw_health = bp->fw_health;
- if (!health)
- return;
+ if (fw_health && !fw_health->fw_reporter)
+ fw_health->fw_reporter = __bnxt_dl_reporter_create(bp, &bnxt_dl_fw_reporter_ops);
+}
- if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all)
- return;
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
- if (health->fw_reporter) {
- devlink_health_reporter_destroy(health->fw_reporter);
- health->fw_reporter = NULL;
+ if (fw_health && fw_health->fw_reporter) {
+ devlink_health_reporter_destroy(fw_health->fw_reporter);
+ fw_health->fw_reporter = NULL;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index a715458abc30..b8105065367b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -75,7 +75,7 @@ void bnxt_devlink_health_fw_report(struct bnxt *bp);
void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy);
void bnxt_dl_health_fw_recovery_done(struct bnxt *bp);
void bnxt_dl_fw_reporters_create(struct bnxt *bp);
-void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all);
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp);
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8aaa2335f848..22e965e18fbc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -11,6 +11,7 @@
#include <linux/ctype.h>
#include <linux/stringify.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/linkmode.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
@@ -803,9 +804,11 @@ static void bnxt_get_ringparam(struct net_device *dev,
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
+ kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
} else {
ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
ering->rx_jumbo_max_pending = 0;
+ kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
@@ -1659,15 +1662,19 @@ static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
struct ethtool_link_ksettings *lk_ksettings)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
u16 fw_speeds = link_info->support_speeds;
BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
fw_speeds = link_info->support_pam4_speeds;
BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported);
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
- Asym_Pause);
+ if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) {
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ Pause);
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ Asym_Pause);
+ }
if (link_info->support_auto_speeds ||
link_info->support_pam4_auto_speeds)
@@ -1898,7 +1905,8 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
/* any change to autoneg will cause link change, therefore the
* driver should put back the original pause setting in autoneg
*/
- set_pause = true;
+ if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
+ set_pause = true;
} else {
u8 phy_type = link_info->phy_type;
@@ -2090,7 +2098,7 @@ static int bnxt_set_pauseparam(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
- if (!BNXT_PHY_CFG_ABLE(bp))
+ if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
return -EOPNOTSUPP;
mutex_lock(&bp->link_lock);
@@ -2101,9 +2109,7 @@ static int bnxt_set_pauseparam(struct net_device *dev,
}
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
- if (bp->hwrm_spec_code >= 0x10201)
- link_info->req_flow_ctrl =
- PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
+ link_info->req_flow_ctrl = 0;
} else {
/* when transition from auto pause to force pause,
* force a link change
@@ -2132,7 +2138,7 @@ static u32 bnxt_get_link(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
/* TODO: handle MF, VF, driver close case */
- return bp->link_info.link_up;
+ return BNXT_LINK_IS_UP(bp);
}
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
@@ -2509,6 +2515,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
u8 *kmem = NULL;
u32 modify_len;
u32 item_len;
+ u8 cmd_err;
u16 index;
int rc;
@@ -2592,6 +2599,8 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
}
rc = hwrm_req_send_silent(bp, install);
+ if (!rc)
+ break;
if (defrag_attempted) {
/* We have tried to defragment already in the previous
@@ -2600,15 +2609,24 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
break;
}
- if (rc && ((struct hwrm_err_output *)resp)->cmd_err ==
- NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
+ cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
+
+ switch (cmd_err) {
+ case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK:
+ netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure Anti-rollback detected\n");
+ rc = -EALREADY;
+ break;
+ case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR:
install->flags =
cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
rc = hwrm_req_send_silent(bp, install);
+ if (!rc)
+ break;
+
+ cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
- if (rc && ((struct hwrm_err_output *)resp)->cmd_err ==
- NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
+ if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
/* FW has cleared NVM area, driver will create
* UPDATE directory and try the flash again
*/
@@ -2618,11 +2636,13 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
BNX_DIR_TYPE_UPDATE,
BNX_DIR_ORDINAL_FIRST,
0, 0, item_len, NULL, 0);
- } else if (rc) {
- netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
+ if (!rc)
+ break;
}
- } else if (rc) {
- netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
+ fallthrough;
+ default:
+ netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x cmd_err :%x\n",
+ rc, cmd_err);
}
} while (defrag_attempted && !rc);
@@ -3324,7 +3344,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
return rc;
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
- if (bp->link_info.link_up)
+ if (BNXT_LINK_IS_UP(bp))
fw_speed = bp->link_info.link_speed;
else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index ea86c54247c7..b7100edbd6dd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -369,6 +369,12 @@ struct cmd_nums {
#define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
#define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
#define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2 0x1a3UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2 0x1a4UL
+ #define HWRM_FUNC_DBR_PACING_CFG 0x1a5UL
+ #define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL
+ #define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -390,6 +396,9 @@ struct cmd_nums {
#define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
#define HWRM_MFG_PRVSN_GET_STATE 0x213UL
#define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL
+ #define HWRM_MFG_PSOC_QSTATUS 0x215UL
+ #define HWRM_MFG_SELFTEST_QLIST 0x216UL
+ #define HWRM_MFG_SELFTEST_EXEC 0x217UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -532,8 +541,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 63
-#define HWRM_VERSION_STR "1.10.2.63"
+#define HWRM_VERSION_RSVD 73
+#define HWRM_VERSION_STR "1.10.2.73"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -757,10 +766,11 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
#define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_MASTER 0x43UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
#define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x46UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x47UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1112,34 +1122,37 @@ struct hwrm_async_event_cmpl_echo_request {
__le32 event_data1;
};
-/* hwrm_async_event_cmpl_phc_master (size:128b/16B) */
-struct hwrm_async_event_cmpl_phc_master {
+/* hwrm_async_event_cmpl_phc_update (size:128b/16B) */
+struct hwrm_async_event_cmpl_phc_update {
__le16 type;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_LAST ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_HWRM_ASYNC_EVENT
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT
__le16 event_id;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_PHC_MASTER 0x43UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_PHC_MASTER
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE
__le32 event_data2;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_MASTER_FID_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_SEC_FID_SFT 16
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_SFT 16
u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_V 0x1UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_MASK 0xfUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_FAILOVER
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK 0xfUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE 0x4UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT 4
};
/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */
@@ -1330,6 +1343,30 @@ struct hwrm_async_event_cmpl_error_report_nvm {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE
};
+/* hwrm_async_event_cmpl_error_report_doorbell_drop_threshold (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -1589,6 +1626,10 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL
u8 max_schqs;
u8 mpc_chnls_cap;
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
@@ -2455,7 +2496,7 @@ struct hwrm_func_backing_store_qcaps_output {
__le16 rkc_entry_size;
__le32 tkc_max_entries;
__le32 rkc_max_entries;
- u8 rsvd[7];
+ u8 rsvd1[7];
u8 valid;
};
@@ -3164,7 +3205,7 @@ struct hwrm_func_ptp_pin_cfg_output {
u8 valid;
};
-/* hwrm_func_ptp_cfg_input (size:320b/40B) */
+/* hwrm_func_ptp_cfg_input (size:384b/48B) */
struct hwrm_func_ptp_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3178,6 +3219,7 @@ struct hwrm_func_ptp_cfg_input {
#define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL
#define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL
#define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME 0x40UL
u8 ptp_pps_event;
#define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL
#define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL
@@ -3204,6 +3246,7 @@ struct hwrm_func_ptp_cfg_input {
__le32 ptp_freq_adj_ext_up;
__le32 ptp_freq_adj_ext_phase_lower;
__le32 ptp_freq_adj_ext_phase_upper;
+ __le64 ptp_set_time;
};
/* hwrm_func_ptp_cfg_output (size:128b/16B) */
@@ -3243,6 +3286,308 @@ struct hwrm_func_ptp_ts_query_output {
u8 valid;
};
+/* hwrm_func_ptp_ext_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_ext_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_MASTER_FID 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_FID 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_MODE 0x4UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_FAILOVER_TIMER 0x8UL
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ u8 phc_sec_mode;
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_SWITCH 0x0UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_ALL 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_LAST FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY
+ u8 unused_0;
+ __le32 failover_timer;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_ptp_ext_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_ext_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_ext_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_ext_qcfg_output (size:256b/32B) */
+struct hwrm_func_ptp_ext_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ __le16 phc_active_fid0;
+ __le16 phc_active_fid1;
+ __le32 last_failover_event;
+ __le16 from_fid;
+ __le16 to_fid;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_cfg_v2_input (size:448b/56B) */
+struct hwrm_func_backing_store_cfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ __le64 page_dir;
+ __le32 num_entries;
+ __le16 entry_size;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+};
+
+/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 rsvd0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_qcfg_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ u8 rsvd[4];
+};
+
+/* hwrm_func_backing_store_qcfg_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ __le64 page_dir;
+ __le32 num_entries;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ u8 rsvd[2];
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ u8 rsvd2[7];
+ u8 valid;
+};
+
+/* qpc_split_entries (size:128b/16B) */
+struct qpc_split_entries {
+ __le32 qp_num_l2_entries;
+ __le32 qp_num_qp1_entries;
+ __le32 rsvd[2];
+};
+
+/* srq_split_entries (size:128b/16B) */
+struct srq_split_entries {
+ __le32 srq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* cq_split_entries (size:128b/16B) */
+struct cq_split_entries {
+ __le32 cq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* vnic_split_entries (size:128b/16B) */
+struct vnic_split_entries {
+ __le32 vnic_num_vnic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* mrav_split_entries (size:128b/16B) */
+struct mrav_split_entries {
+ __le32 mrav_num_av_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcaps_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ u8 rsvd[6];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcaps_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ __le16 entry_size;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
+ __le32 instance_bit_map;
+ u8 ctx_init_value;
+ u8 ctx_init_offset;
+ u8 entry_multiple;
+ u8 rsvd;
+ __le32 max_num_entries;
+ __le32 min_num_entries;
+ __le16 next_valid_type;
+ u8 subtype_valid_cnt;
+ u8 rsvd2;
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ u8 rsvd3[3];
+ u8 valid;
+};
+
/* hwrm_func_drv_if_change_input (size:192b/24B) */
struct hwrm_func_drv_if_change_input {
__le16 req_type;
@@ -3741,7 +4086,7 @@ struct hwrm_port_phy_qcfg_output {
u8 valid;
};
-/* hwrm_port_mac_cfg_input (size:384b/48B) */
+/* hwrm_port_mac_cfg_input (size:448b/56B) */
struct hwrm_port_mac_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3807,7 +4152,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
u8 unused_0[3];
__le32 ptp_freq_adj_ppb;
- __le32 ptp_adj_phase;
+ u8 unused_1[4];
+ __le64 ptp_adj_phase;
};
/* hwrm_port_mac_cfg_output (size:128b/16B) */
@@ -3850,6 +4196,7 @@ struct hwrm_port_mac_ptp_qcfg_output {
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL
u8 unused_0[3];
__le32 rx_ts_reg_off_lower;
__le32 rx_ts_reg_off_upper;
@@ -4339,7 +4686,8 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_4
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_12 0xcUL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_12
__le16 supported_speeds_force_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
@@ -4399,7 +4747,7 @@ struct hwrm_port_phy_qcaps_output {
__le16 flags2;
#define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
#define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
- u8 unused_0[1];
+ u8 internal_port_cnt;
u8 valid;
};
@@ -6221,12 +6569,13 @@ struct hwrm_vnic_rss_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 hash_type;
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
__le16 vnic_id;
u8 ring_table_pair_index;
u8 hash_mode_flags;
@@ -7898,6 +8247,7 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
u8 valid;
};
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_query_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -8909,6 +9259,50 @@ struct hwrm_dbg_qcfg_output {
u8 valid;
};
+/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */
+struct hwrm_dbg_crashdump_medium_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 output_dest_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR 0x1UL
+ __le16 pg_size_lvl;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_MASK 0x3UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_SFT 0
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_0 0x0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_1 0x1UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 0x2UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_MASK 0x1cUL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_SFT 2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K (0x0UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K (0x1UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K (0x2UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_2M (0x3UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8M (0x4UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G (0x5UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_MASK 0xffe0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_SFT 5
+ __le32 size;
+ __le32 coredump_component_disable_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_NVRAM 0x1UL
+ __le32 unused_0;
+ __le64 pbl;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */
+struct hwrm_dbg_crashdump_medium_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_1[7];
+ u8 valid;
+};
+
/* coredump_segment_record (size:128b/16B) */
struct coredump_segment_record {
__le16 component_id;
@@ -9372,8 +9766,35 @@ struct hwrm_nvm_install_update_output {
__le16 resp_len;
__le64 installed_items;
u8 result;
- #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_FAILURE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_MALLOC_FAILURE 0xfdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER 0xfbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER 0xf3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE 0xf2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER 0xecUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE 0xebUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM 0xeaUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH 0xe9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST 0xe8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER 0xe7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM 0xe6UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM 0xe5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH 0xe4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE 0xe1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV 0xceUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID 0xcdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR 0xccUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID 0xcbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM 0xc5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM 0xc4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM 0xc3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR 0xb9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR 0xb8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR 0xb7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND 0xb0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED 0xa7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED
u8 problem_item;
#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 48520967746f..a0b321a19361 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -19,6 +19,20 @@
#include "bnxt_hwrm.h"
#include "bnxt_ptp.h"
+static int bnxt_ptp_cfg_settime(struct bnxt *bp, u64 time)
+{
+ struct hwrm_func_ptp_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME);
+ req->ptp_set_time = cpu_to_le64(time);
+ return hwrm_req_send(bp, req);
+}
+
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off)
{
unsigned int ptp_class;
@@ -48,6 +62,9 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
ptp_info);
u64 ns = timespec64_to_ns(ts);
+ if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
+ return bnxt_ptp_cfg_settime(ptp->bp, ns);
+
spin_lock_bh(&ptp->ptp_lock);
timecounter_init(&ptp->tc, &ptp->cc, ns);
spin_unlock_bh(&ptp->ptp_lock);
@@ -131,11 +148,47 @@ static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info,
return 0;
}
+/* Caller holds ptp_lock */
+void bnxt_ptp_update_current_time(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ bnxt_refclk_read(ptp->bp, NULL, &ptp->current_time);
+ WRITE_ONCE(ptp->old_time, ptp->current_time);
+}
+
+static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta)
+{
+ struct hwrm_port_mac_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(ptp->bp, req, HWRM_PORT_MAC_CFG);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE);
+ req->ptp_adj_phase = cpu_to_le64(delta);
+
+ rc = hwrm_req_send(ptp->bp, req);
+ if (rc) {
+ netdev_err(ptp->bp->dev, "ptp adjphc failed. rc = %x\n", rc);
+ } else {
+ spin_lock_bh(&ptp->ptp_lock);
+ bnxt_ptp_update_current_time(ptp->bp);
+ spin_unlock_bh(&ptp->ptp_lock);
+ }
+
+ return rc;
+}
+
static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
+ if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
+ return bnxt_ptp_adjphc(ptp, delta);
+
spin_lock_bh(&ptp->ptp_lock);
timecounter_adjtime(&ptp->tc, delta);
spin_unlock_bh(&ptp->ptp_lock);
@@ -714,7 +767,70 @@ static bool bnxt_pps_config_ok(struct bnxt *bp)
return !(bp->fw_cap & BNXT_FW_CAP_PTP_PPS) == !ptp->ptp_info.pin_config;
}
-int bnxt_ptp_init(struct bnxt *bp)
+static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp->ptp_clock) {
+ memset(&ptp->cc, 0, sizeof(ptp->cc));
+ ptp->cc.read = bnxt_cc_read;
+ ptp->cc.mask = CYCLECOUNTER_MASK(48);
+ ptp->cc.shift = 0;
+ ptp->cc.mult = 1;
+ ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
+ }
+ if (init_tc)
+ timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+}
+
+/* Caller holds ptp_lock */
+void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns)
+{
+ timecounter_init(&ptp->tc, &ptp->cc, ns);
+ /* For RTC, cycle_last must be in sync with the timecounter value. */
+ ptp->tc.cycle_last = ns & ptp->cc.mask;
+}
+
+int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
+{
+ struct timespec64 tsp;
+ u64 ns;
+ int rc;
+
+ if (!bp->ptp_cfg || !(bp->fw_cap & BNXT_FW_CAP_PTP_RTC))
+ return -ENODEV;
+
+ if (!phc_cfg) {
+ ktime_get_real_ts64(&tsp);
+ ns = timespec64_to_ns(&tsp);
+ rc = bnxt_ptp_cfg_settime(bp, ns);
+ if (rc)
+ return rc;
+ } else {
+ rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, &ns);
+ if (rc)
+ return rc;
+ }
+ spin_lock_bh(&bp->ptp_cfg->ptp_lock);
+ bnxt_ptp_rtc_timecounter_init(bp->ptp_cfg, ns);
+ spin_unlock_bh(&bp->ptp_cfg->ptp_lock);
+
+ return 0;
+}
+
+static void bnxt_ptp_free(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (ptp->ptp_clock) {
+ ptp_clock_unregister(ptp->ptp_clock);
+ ptp->ptp_clock = NULL;
+ kfree(ptp->ptp_info.pin_config);
+ ptp->ptp_info.pin_config = NULL;
+ }
+}
+
+int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
int rc;
@@ -726,26 +842,23 @@ int bnxt_ptp_init(struct bnxt *bp)
if (rc)
return rc;
+ if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
+ bnxt_ptp_timecounter_init(bp, false);
+ rc = bnxt_ptp_init_rtc(bp, phc_cfg);
+ if (rc)
+ goto out;
+ }
+
if (ptp->ptp_clock && bnxt_pps_config_ok(bp))
return 0;
- if (ptp->ptp_clock) {
- ptp_clock_unregister(ptp->ptp_clock);
- ptp->ptp_clock = NULL;
- kfree(ptp->ptp_info.pin_config);
- ptp->ptp_info.pin_config = NULL;
- }
+ bnxt_ptp_free(bp);
+
atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
spin_lock_init(&ptp->ptp_lock);
- memset(&ptp->cc, 0, sizeof(ptp->cc));
- ptp->cc.read = bnxt_cc_read;
- ptp->cc.mask = CYCLECOUNTER_MASK(48);
- ptp->cc.shift = 0;
- ptp->cc.mult = 1;
-
- ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
- timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+ if (!(bp->fw_cap & BNXT_FW_CAP_PTP_RTC))
+ bnxt_ptp_timecounter_init(bp, true);
ptp->ptp_info = bnxt_ptp_caps;
if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {
@@ -757,8 +870,8 @@ int bnxt_ptp_init(struct bnxt *bp)
int err = PTR_ERR(ptp->ptp_clock);
ptp->ptp_clock = NULL;
- bnxt_unmap_ptp_regs(bp);
- return err;
+ rc = err;
+ goto out;
}
if (bp->flags & BNXT_FLAG_CHIP_P5) {
spin_lock_bh(&ptp->ptp_lock);
@@ -768,6 +881,11 @@ int bnxt_ptp_init(struct bnxt *bp)
ptp_schedule_worker(ptp->ptp_clock, 0);
}
return 0;
+
+out:
+ bnxt_ptp_free(bp);
+ bnxt_unmap_ptp_regs(bp);
+ return rc;
}
void bnxt_ptp_clear(struct bnxt *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index 7c528e1f8713..373baf45884b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -131,12 +131,15 @@ do { \
#endif
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
+void bnxt_ptp_update_current_time(struct bnxt *bp);
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
-int bnxt_ptp_init(struct bnxt *bp);
+void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns);
+int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg);
+int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg);
void bnxt_ptp_clear(struct bnxt *bp);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 1d177fed44a6..ddf2f3963abe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -846,7 +846,7 @@ void bnxt_sriov_disable(struct bnxt *bp)
return;
/* synchronize VF and VF-rep create and destroy */
- mutex_lock(&bp->sriov_lock);
+ devl_lock(bp->dl);
bnxt_vf_reps_destroy(bp);
if (pci_vfs_assigned(bp->pdev)) {
@@ -859,7 +859,7 @@ void bnxt_sriov_disable(struct bnxt *bp)
/* Free the HW resources reserved for various VF's */
bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
}
- mutex_unlock(&bp->sriov_lock);
+ devl_unlock(bp->dl);
bnxt_free_vf_resources(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 8eb28e088582..eb4803b11c0e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -559,44 +559,34 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
- int rc = 0;
- mutex_lock(&bp->sriov_lock);
if (bp->eswitch_mode == mode) {
netdev_info(bp->dev, "already in %s eswitch mode\n",
mode == DEVLINK_ESWITCH_MODE_LEGACY ?
"legacy" : "switchdev");
- rc = -EINVAL;
- goto done;
+ return -EINVAL;
}
switch (mode) {
case DEVLINK_ESWITCH_MODE_LEGACY:
bnxt_vf_reps_destroy(bp);
- break;
+ return 0;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
if (bp->hwrm_spec_code < 0x10803) {
netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n");
- rc = -ENOTSUPP;
- goto done;
+ return -ENOTSUPP;
}
if (pci_num_vf(bp->pdev) == 0) {
netdev_info(bp->dev, "Enable VFs before setting switchdev mode\n");
- rc = -EPERM;
- goto done;
+ return -EPERM;
}
- rc = bnxt_vf_reps_create(bp);
- break;
+ return bnxt_vf_reps_create(bp);
default:
- rc = -EINVAL;
- goto done;
+ return -EINVAL;
}
-done:
- mutex_unlock(&bp->sriov_lock);
- return rc;
}
#endif
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 87f1056e29ff..2dd79af9411b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset)
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(value, offset);
else
- writel_relaxed(value, offset);
+ writel(value, offset);
}
static inline u32 bcmgenet_readl(void __iomem *offset)
@@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset)
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(offset);
else
- return readl_relaxed(offset);
+ return readl(offset);
}
static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
@@ -1368,7 +1368,7 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
if (!p->eee_enabled) {
bcmgenet_eee_enable_set(dev, false);
} else {
- ret = phy_init_eee(dev->phydev, 0);
+ ret = phy_init_eee(dev->phydev, false);
if (ret) {
netif_err(priv, hw, dev, "EEE initialization failed\n");
return ret;
@@ -2287,8 +2287,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
dma_length_status = status->length_status;
if (dev->features & NETIF_F_RXCSUM) {
rx_csum = (__force __be16)(status->rx_csum & 0xffff);
- skb->csum = (__force __wsum)ntohs(rx_csum);
- skb->ip_summed = CHECKSUM_COMPLETE;
+ if (rx_csum) {
+ skb->csum = (__force __wsum)ntohs(rx_csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
}
/* DMA flags and length are still valid no matter how
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index e31a5a397f11..f55d9d9c01a8 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -40,6 +40,13 @@
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+
+ if (!device_can_wakeup(kdev)) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ return;
+ }
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
wol->wolopts = priv->wolopts;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 9ddbee7de72b..f0a7d8396a4a 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -12,6 +12,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/interrupt.h>
+#include <linux/phy/phy.h>
#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP)
#define MACB_EXT_DESC
@@ -1291,6 +1292,9 @@ struct macb {
u32 wol;
struct macb_ptp_info *ptp_info; /* macb-ptp interface */
+
+ struct phy *sgmii_phy; /* for ZynqMP SGMII mode */
+
#ifdef MACB_EXT_DESC
uint8_t hw_dma_cap;
#endif
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 98498a76ae16..800d5ced5800 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -34,7 +34,9 @@
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -1573,7 +1575,14 @@ static int macb_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete_done(napi, work_done);
- /* Packets received while interrupts were disabled */
+ /* RSR bits only seem to propagate to raise interrupts when
+ * interrupts are enabled at the time, so if bits are already
+ * set due to packets received while interrupts were disabled,
+ * they will not cause another interrupt to be generated when
+ * interrupts are re-enabled.
+ * Check for this case here. This has been seen to happen
+ * around 30% of the time under heavy network load.
+ */
status = macb_readl(bp, RSR);
if (status) {
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -1581,6 +1590,22 @@ static int macb_poll(struct napi_struct *napi, int budget)
napi_reschedule(napi);
} else {
queue_writel(queue, IER, bp->rx_intr_mask);
+
+ /* In rare cases, packets could have been received in
+ * the window between the check above and re-enabling
+ * interrupts. Therefore, a double-check is required
+ * to avoid losing a wakeup. This can potentially race
+ * with the interrupt handler doing the same actions
+ * if an interrupt is raised just after enabling them,
+ * but this should be harmless.
+ */
+ status = macb_readl(bp, RSR);
+ if (unlikely(status)) {
+ queue_writel(queue, IDR, bp->rx_intr_mask);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(RCOMP));
+ napi_schedule(napi);
+ }
}
}
@@ -2739,10 +2764,14 @@ static int macb_open(struct net_device *dev)
macb_init_hw(bp);
- err = macb_phylink_connect(bp);
+ err = phy_power_on(bp->sgmii_phy);
if (err)
goto reset_hw;
+ err = macb_phylink_connect(bp);
+ if (err)
+ goto phy_off;
+
netif_tx_start_all_queues(dev);
if (bp->ptp_info)
@@ -2750,6 +2779,9 @@ static int macb_open(struct net_device *dev)
return 0;
+phy_off:
+ phy_power_off(bp->sgmii_phy);
+
reset_hw:
macb_reset_hw(bp);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
@@ -2775,6 +2807,8 @@ static int macb_close(struct net_device *dev)
phylink_stop(bp->phylink);
phylink_disconnect_phy(bp->phylink);
+ phy_power_off(bp->sgmii_phy);
+
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
netif_carrier_off(dev);
@@ -4544,13 +4578,55 @@ static const struct macb_config np4_config = {
.usrio = &macb_default_usrio,
};
+static int zynqmp_init(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(dev);
+ int ret;
+
+ if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ /* Ensure PS-GTR PHY device used in SGMII mode is ready */
+ bp->sgmii_phy = devm_phy_get(&pdev->dev, "sgmii-phy");
+
+ if (IS_ERR(bp->sgmii_phy)) {
+ ret = PTR_ERR(bp->sgmii_phy);
+ dev_err_probe(&pdev->dev, ret,
+ "failed to get PS-GTR PHY\n");
+ return ret;
+ }
+
+ ret = phy_init(bp->sgmii_phy);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init PS-GTR PHY: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ /* Fully reset GEM controller at hardware level using zynqmp-reset driver,
+ * if mapped in device tree.
+ */
+ ret = device_reset_optional(&pdev->dev);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "failed to reset controller");
+ phy_exit(bp->sgmii_phy);
+ return ret;
+ }
+
+ ret = macb_init(pdev);
+ if (ret)
+ phy_exit(bp->sgmii_phy);
+
+ return ret;
+}
+
static const struct macb_config zynqmp_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
MACB_CAPS_JUMBO |
MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
- .init = macb_init,
+ .init = zynqmp_init,
.jumbo_max_len = 10240,
.usrio = &macb_default_usrio,
};
@@ -4767,7 +4843,7 @@ static int macb_probe(struct platform_device *pdev)
err = macb_mii_init(bp);
if (err)
- goto err_out_free_netdev;
+ goto err_out_phy_exit;
netif_carrier_off(dev);
@@ -4792,6 +4868,9 @@ err_out_unregister_mdio:
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
+err_out_phy_exit:
+ phy_exit(bp->sgmii_phy);
+
err_out_free_netdev:
free_netdev(dev);
@@ -4813,6 +4892,7 @@ static int macb_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
+ phy_exit(bp->sgmii_phy);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 574a32f23f96..2f6484dc186a 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1409,7 +1409,8 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
struct device *dev = &bgx->pdev->dev;
struct acpi_device *adev;
- if (acpi_bus_get_device(handle, &adev))
+ adev = acpi_fetch_acpi_dev(handle);
+ if (!adev)
goto out;
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 63521312cb90..174b1e156669 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3349,6 +3349,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
if (!adapter->registered_device_map) {
dev_err(&pdev->dev, "could not register any net devices\n");
+ err = -ENODEV;
goto out_free_dev;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index da41eee2f25c..a06003bfa04b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -3613,6 +3613,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
adapter->params.pci.vpd_cap_addr =
pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
+ if (!adapter->params.pci.vpd_cap_addr)
+ return -ENODEV;
ret = get_vpd_params(adapter, &adapter->params.vpd);
if (ret < 0)
return ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index 28fd2de9e4cf..1672d3afe5be 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -8,6 +8,46 @@
#include "cxgb4_filter.h"
#include "cxgb4_tc_flower.h"
+static int cxgb4_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int cxgb4_matchall_egress_validate(struct net_device *dev,
struct tc_cls_matchall_offload *cls)
{
@@ -48,11 +88,10 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
flow_action_for_each(i, entry, actions) {
switch (entry->id) {
case FLOW_ACTION_POLICE:
- if (entry->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+ ret = cxgb4_policer_validate(actions, entry, extack);
+ if (ret)
+ return ret;
+
/* Convert bytes per second to bits per second */
if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
NL_SET_ERR_MSG_MOD(extack,
@@ -150,11 +189,11 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
flow_action_for_each(i, entry, &cls->rule->action)
if (entry->id == FLOW_ACTION_POLICE)
break;
- if (entry->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+
+ ret = cxgb4_policer_validate(&cls->rule->action, entry, extack);
+ if (ret)
+ return ret;
+
/* Convert from bytes per second to Kbps */
p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
p.u.params.channel = pi->tx_chan;
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index c78b99a497df..8014eb33937c 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2363,11 +2363,13 @@ static void gemini_port_save_mac_addr(struct gemini_ethernet_port *port)
static int gemini_ethernet_port_probe(struct platform_device *pdev)
{
char *port_names[2] = { "ethernet0", "ethernet1" };
+ struct device_node *np = pdev->dev.of_node;
struct gemini_ethernet_port *port;
struct device *dev = &pdev->dev;
struct gemini_ethernet *geth;
struct net_device *netdev;
struct device *parent;
+ u8 mac[ETH_ALEN];
unsigned int id;
int irq;
int ret;
@@ -2473,6 +2475,12 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
netif_napi_add(netdev, &port->napi, gmac_napi_poll,
DEFAULT_NAPI_WEIGHT);
+ ret = of_get_mac_address(np, mac);
+ if (!ret) {
+ dev_info(dev, "Setting macaddr from DT %pM\n", mac);
+ memcpy(port->mac_addr, mac, ETH_ALEN);
+ }
+
if (is_valid_ether_addr((void *)port->mac_addr)) {
eth_hw_addr_set(netdev, (u8 *)port->mac_addr);
} else {
diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig
index 7af86b6d4150..02e0caff98e3 100644
--- a/drivers/net/ethernet/davicom/Kconfig
+++ b/drivers/net/ethernet/davicom/Kconfig
@@ -3,6 +3,19 @@
# Davicom device configuration
#
+config NET_VENDOR_DAVICOM
+ bool "Davicom devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Davicom devices. If you say Y, you will be asked
+ for your specific card in the following selections.
+
+if NET_VENDOR_DAVICOM
+
config DM9000
tristate "DM9000 support"
depends on ARM || MIPS || COLDFIRE || NIOS2 || COMPILE_TEST
@@ -22,3 +35,21 @@ config DM9000_FORCE_SIMPLE_PHY_POLL
bit to determine if the link is up or down instead of the more
costly MII PHY reads. Note, this will not work if the chip is
operating with an external PHY.
+
+config DM9051
+ tristate "DM9051 SPI support"
+ depends on SPI
+ select CRC32
+ select MDIO
+ select PHYLIB
+ select REGMAP_SPI
+ help
+ Support for DM9051 SPI chipset.
+
+ To compile this driver as a module, choose M here. The module
+ will be called dm9051.
+
+ The SPI mode for the host's SPI master to access DM9051 is mode
+ 0 on the SPI bus.
+
+endif # NET_VENDOR_DAVICOM
diff --git a/drivers/net/ethernet/davicom/Makefile b/drivers/net/ethernet/davicom/Makefile
index 173c87d21076..225f85bc1f53 100644
--- a/drivers/net/ethernet/davicom/Makefile
+++ b/drivers/net/ethernet/davicom/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_DM9000) += dm9000.o
+obj-$(CONFIG_DM9051) += dm9051.o
diff --git a/drivers/net/ethernet/davicom/dm9051.c b/drivers/net/ethernet/davicom/dm9051.c
new file mode 100644
index 000000000000..a523ddda7609
--- /dev/null
+++ b/drivers/net/ethernet/davicom/dm9051.c
@@ -0,0 +1,1260 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Davicom Semiconductor,Inc.
+ * Davicom DM9051 SPI Fast Ethernet Linux driver
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#include "dm9051.h"
+
+#define DRVNAME_9051 "dm9051"
+
+/**
+ * struct rx_ctl_mach - rx activities record
+ * @status_err_counter: rx status error counter
+ * @large_err_counter: rx get large packet length error counter
+ * @rx_err_counter: receive packet error counter
+ * @tx_err_counter: transmit packet error counter
+ * @fifo_rst_counter: reset operation counter
+ *
+ * To keep track for the driver operation statistics
+ */
+struct rx_ctl_mach {
+ u16 status_err_counter;
+ u16 large_err_counter;
+ u16 rx_err_counter;
+ u16 tx_err_counter;
+ u16 fifo_rst_counter;
+};
+
+/**
+ * struct dm9051_rxctrl - dm9051 driver rx control
+ * @hash_table: Multicast hash-table data
+ * @rcr_all: KS_RXCR1 register setting
+ *
+ * The settings needs to control the receive filtering
+ * such as the multicast hash-filter and the receive register settings
+ */
+struct dm9051_rxctrl {
+ u16 hash_table[4];
+ u8 rcr_all;
+};
+
+/**
+ * struct dm9051_rxhdr - rx packet data header
+ * @headbyte: lead byte equal to 0x01 notifies a valid packet
+ * @status: status bits for the received packet
+ * @rxlen: packet length
+ *
+ * The Rx packed, entered into the FIFO memory, start with these
+ * four bytes which is the Rx header, followed by the ethernet
+ * packet data and ends with an appended 4-byte CRC data.
+ * Both Rx packet and CRC data are for check purpose and finally
+ * are dropped by this driver
+ */
+struct dm9051_rxhdr {
+ u8 headbyte;
+ u8 status;
+ __le16 rxlen;
+};
+
+/**
+ * struct board_info - maintain the saved data
+ * @spidev: spi device structure
+ * @ndev: net device structure
+ * @mdiobus: mii bus structure
+ * @phydev: phy device structure
+ * @txq: tx queue structure
+ * @regmap_dm: regmap for register read/write
+ * @regmap_dmbulk: extra regmap for bulk read/write
+ * @rxctrl_work: Work queue for updating RX mode and multicast lists
+ * @tx_work: Work queue for tx packets
+ * @pause: ethtool pause parameter structure
+ * @spi_lockm: between threads lock structure
+ * @reg_mutex: regmap access lock structure
+ * @bc: rx control statistics structure
+ * @rxhdr: rx header structure
+ * @rctl: rx control setting structure
+ * @msg_enable: message level value
+ * @imr_all: to store operating imr value for register DM9051_IMR
+ * @lcr_all: to store operating rcr value for register DM9051_LMCR
+ *
+ * The saved data variables, keep up to date for retrieval back to use
+ */
+struct board_info {
+ u32 msg_enable;
+ struct spi_device *spidev;
+ struct net_device *ndev;
+ struct mii_bus *mdiobus;
+ struct phy_device *phydev;
+ struct sk_buff_head txq;
+ struct regmap *regmap_dm;
+ struct regmap *regmap_dmbulk;
+ struct work_struct rxctrl_work;
+ struct work_struct tx_work;
+ struct ethtool_pauseparam pause;
+ struct mutex spi_lockm;
+ struct mutex reg_mutex;
+ struct rx_ctl_mach bc;
+ struct dm9051_rxhdr rxhdr;
+ struct dm9051_rxctrl rctl;
+ u8 imr_all;
+ u8 lcr_all;
+};
+
+static int dm9051_set_reg(struct board_info *db, unsigned int reg, unsigned int val)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, reg, val);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d set reg %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_update_bits(struct board_info *db, unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ int ret;
+
+ ret = regmap_update_bits(db->regmap_dm, reg, mask, val);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d update bits reg %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+/* skb buffer exhausted, just discard the received data
+ */
+static int dm9051_dumpblk(struct board_info *db, u8 reg, size_t count)
+{
+ struct net_device *ndev = db->ndev;
+ unsigned int rb;
+ int ret;
+
+ /* no skb buffer,
+ * both reg and &rb must be noinc,
+ * read once one byte via regmap_read
+ */
+ do {
+ ret = regmap_read(db->regmap_dm, reg, &rb);
+ if (ret < 0) {
+ netif_err(db, drv, ndev, "%s: error %d dumping read reg %02x\n",
+ __func__, ret, reg);
+ break;
+ }
+ } while (--count);
+
+ return ret;
+}
+
+static int dm9051_set_regs(struct board_info *db, unsigned int reg, const void *val,
+ size_t val_count)
+{
+ int ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, reg, val, val_count);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d bulk writing regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_get_regs(struct board_info *db, unsigned int reg, void *val,
+ size_t val_count)
+{
+ int ret;
+
+ ret = regmap_bulk_read(db->regmap_dmbulk, reg, val, val_count);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d bulk reading regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_write_mem(struct board_info *db, unsigned int reg, const void *buff,
+ size_t len)
+{
+ int ret;
+
+ ret = regmap_noinc_write(db->regmap_dm, reg, buff, len);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d noinc writing regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_read_mem(struct board_info *db, unsigned int reg, void *buff,
+ size_t len)
+{
+ int ret;
+
+ ret = regmap_noinc_read(db->regmap_dm, reg, buff, len);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d noinc reading regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+/* waiting tx-end rather than tx-req
+ * got faster
+ */
+static int dm9051_nsr_poll(struct board_info *db)
+{
+ unsigned int mval;
+ int ret;
+
+ ret = regmap_read_poll_timeout(db->regmap_dm, DM9051_NSR, mval,
+ mval & (NSR_TX2END | NSR_TX1END), 1, 20);
+ if (ret == -ETIMEDOUT)
+ netdev_err(db->ndev, "timeout in checking for tx end\n");
+ return ret;
+}
+
+static int dm9051_epcr_poll(struct board_info *db)
+{
+ unsigned int mval;
+ int ret;
+
+ ret = regmap_read_poll_timeout(db->regmap_dm, DM9051_EPCR, mval,
+ !(mval & EPCR_ERRE), 100, 10000);
+ if (ret == -ETIMEDOUT)
+ netdev_err(db->ndev, "eeprom/phy in processing get timeout\n");
+ return ret;
+}
+
+static int dm9051_irq_flag(struct board_info *db)
+{
+ struct spi_device *spi = db->spidev;
+ int irq_type = irq_get_trigger_type(spi->irq);
+
+ if (irq_type)
+ return irq_type;
+
+ return IRQF_TRIGGER_LOW;
+}
+
+static unsigned int dm9051_intcr_value(struct board_info *db)
+{
+ return (dm9051_irq_flag(db) == IRQF_TRIGGER_LOW) ?
+ INTCR_POL_LOW : INTCR_POL_HIGH;
+}
+
+static int dm9051_set_fcr(struct board_info *db)
+{
+ u8 fcr = 0;
+
+ if (db->pause.rx_pause)
+ fcr |= FCR_BKPM | FCR_FLCE;
+ if (db->pause.tx_pause)
+ fcr |= FCR_TXPEN;
+
+ return dm9051_set_reg(db, DM9051_FCR, fcr);
+}
+
+static int dm9051_set_recv(struct board_info *db)
+{
+ int ret;
+
+ ret = dm9051_set_regs(db, DM9051_MAR, db->rctl.hash_table, sizeof(db->rctl.hash_table));
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_RCR, db->rctl.rcr_all); /* enable rx */
+}
+
+static int dm9051_core_reset(struct board_info *db)
+{
+ int ret;
+
+ db->bc.fifo_rst_counter++;
+
+ ret = regmap_write(db->regmap_dm, DM9051_NCR, NCR_RST); /* NCR reset */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_MBNDRY, MBNDRY_BYTE); /* MemBound */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_PPCR, PPCR_PAUSE_COUNT); /* Pause Count */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_LMCR, db->lcr_all); /* LEDMode1 */
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_INTCR, dm9051_intcr_value(db));
+}
+
+static int dm9051_update_fcr(struct board_info *db)
+{
+ u8 fcr = 0;
+
+ if (db->pause.rx_pause)
+ fcr |= FCR_BKPM | FCR_FLCE;
+ if (db->pause.tx_pause)
+ fcr |= FCR_TXPEN;
+
+ return dm9051_update_bits(db, DM9051_FCR, FCR_RXTX_BITS, fcr);
+}
+
+static int dm9051_disable_interrupt(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_IMR, IMR_PAR); /* disable int */
+}
+
+static int dm9051_enable_interrupt(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_IMR, db->imr_all); /* enable int */
+}
+
+static int dm9051_stop_mrcmd(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_ISR, ISR_STOP_MRCMD); /* to stop mrcmd */
+}
+
+static int dm9051_clear_interrupt(struct board_info *db)
+{
+ return dm9051_update_bits(db, DM9051_ISR, ISR_CLR_INT, ISR_CLR_INT);
+}
+
+static int dm9051_eeprom_read(struct board_info *db, int offset, u8 *to)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, offset);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_ERPRR);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+ if (ret)
+ return ret;
+
+ return regmap_bulk_read(db->regmap_dmbulk, DM9051_EPDRL, to, 2);
+}
+
+static int dm9051_eeprom_write(struct board_info *db, int offset, u8 *data)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, offset);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, DM9051_EPDRL, data, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_WEP | EPCR_ERPRW);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ return regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+}
+
+static int dm9051_phyread(void *context, unsigned int reg, unsigned int *val)
+{
+ struct board_info *db = context;
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, DM9051_PHY | reg);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_ERPRR | EPCR_EPOS);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+ if (ret)
+ return ret;
+
+ /* this is a 4 bytes data, clear to zero since following regmap_bulk_read
+ * only fill lower 2 bytes
+ */
+ *val = 0;
+ return regmap_bulk_read(db->regmap_dmbulk, DM9051_EPDRL, val, 2);
+}
+
+static int dm9051_phywrite(void *context, unsigned int reg, unsigned int val)
+{
+ struct board_info *db = context;
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, DM9051_PHY | reg);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, DM9051_EPDRL, &val, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_EPOS | EPCR_ERPRW);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ return regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+}
+
+static int dm9051_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct board_info *db = bus->priv;
+ unsigned int val = 0xffff;
+ int ret;
+
+ if (addr == DM9051_PHY_ADDR) {
+ ret = dm9051_phyread(db, regnum, &val);
+ if (ret)
+ return ret;
+ }
+
+ return val;
+}
+
+static int dm9051_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ struct board_info *db = bus->priv;
+
+ if (addr == DM9051_PHY_ADDR)
+ return dm9051_phywrite(db, regnum, val);
+
+ return -ENODEV;
+}
+
+static void dm9051_reg_lock_mutex(void *dbcontext)
+{
+ struct board_info *db = dbcontext;
+
+ mutex_lock(&db->reg_mutex);
+}
+
+static void dm9051_reg_unlock_mutex(void *dbcontext)
+{
+ struct board_info *db = dbcontext;
+
+ mutex_unlock(&db->reg_mutex);
+}
+
+static struct regmap_config regconfigdm = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .reg_stride = 1,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = 0,
+ .write_flag_mask = DM_SPI_WR,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .lock = dm9051_reg_lock_mutex,
+ .unlock = dm9051_reg_unlock_mutex,
+};
+
+static struct regmap_config regconfigdmbulk = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .reg_stride = 1,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = 0,
+ .write_flag_mask = DM_SPI_WR,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .lock = dm9051_reg_lock_mutex,
+ .unlock = dm9051_reg_unlock_mutex,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+static int dm9051_map_init(struct spi_device *spi, struct board_info *db)
+{
+ /* create two regmap instances,
+ * split read/write and bulk_read/bulk_write to individual regmap
+ * to resolve regmap execution confliction problem
+ */
+ regconfigdm.lock_arg = db;
+ db->regmap_dm = devm_regmap_init_spi(db->spidev, &regconfigdm);
+ if (IS_ERR(db->regmap_dm))
+ return PTR_ERR(db->regmap_dm);
+
+ regconfigdmbulk.lock_arg = db;
+ db->regmap_dmbulk = devm_regmap_init_spi(db->spidev, &regconfigdmbulk);
+ if (IS_ERR(db->regmap_dmbulk))
+ return PTR_ERR(db->regmap_dmbulk);
+
+ return 0;
+}
+
+static int dm9051_map_chipid(struct board_info *db)
+{
+ struct device *dev = &db->spidev->dev;
+ unsigned short wid;
+ u8 buff[6];
+ int ret;
+
+ ret = dm9051_get_regs(db, DM9051_VIDL, buff, sizeof(buff));
+ if (ret < 0)
+ return ret;
+
+ wid = get_unaligned_le16(buff + 2);
+ if (wid != DM9051_ID) {
+ dev_err(dev, "chipid error as %04x !\n", wid);
+ return -ENODEV;
+ }
+
+ dev_info(dev, "chip %04x found\n", wid);
+ return 0;
+}
+
+/* Read DM9051_PAR registers which is the mac address loaded from EEPROM while power-on
+ */
+static int dm9051_map_etherdev_par(struct net_device *ndev, struct board_info *db)
+{
+ u8 addr[ETH_ALEN];
+ int ret;
+
+ ret = dm9051_get_regs(db, DM9051_PAR, addr, sizeof(addr));
+ if (ret < 0)
+ return ret;
+
+ if (!is_valid_ether_addr(addr)) {
+ eth_hw_addr_random(ndev);
+
+ ret = dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(&db->spidev->dev, "Use random MAC address\n");
+ return 0;
+ }
+
+ eth_hw_addr_set(ndev, addr);
+ return 0;
+}
+
+/* ethtool-ops
+ */
+static void dm9051_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, DRVNAME_9051, sizeof(info->driver));
+}
+
+static void dm9051_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ db->msg_enable = value;
+}
+
+static u32 dm9051_get_msglevel(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ return db->msg_enable;
+}
+
+static int dm9051_get_eeprom_len(struct net_device *dev)
+{
+ return 128;
+}
+
+static int dm9051_get_eeprom(struct net_device *ndev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int offset = ee->offset;
+ int len = ee->len;
+ int i, ret;
+
+ if ((len | offset) & 1)
+ return -EINVAL;
+
+ ee->magic = DM_EEPROM_MAGIC;
+
+ for (i = 0; i < len; i += 2) {
+ ret = dm9051_eeprom_read(db, (offset + i) / 2, data + i);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int dm9051_set_eeprom(struct net_device *ndev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int offset = ee->offset;
+ int len = ee->len;
+ int i, ret;
+
+ if ((len | offset) & 1)
+ return -EINVAL;
+
+ if (ee->magic != DM_EEPROM_MAGIC)
+ return -EINVAL;
+
+ for (i = 0; i < len; i += 2) {
+ ret = dm9051_eeprom_write(db, (offset + i) / 2, data + i);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static void dm9051_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ *pause = db->pause;
+}
+
+static int dm9051_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ db->pause = *pause;
+
+ if (pause->autoneg == AUTONEG_DISABLE)
+ return dm9051_update_fcr(db);
+
+ phy_set_sym_pause(db->phydev, pause->rx_pause, pause->tx_pause,
+ pause->autoneg);
+ phy_start_aneg(db->phydev);
+ return 0;
+}
+
+static const struct ethtool_ops dm9051_ethtool_ops = {
+ .get_drvinfo = dm9051_get_drvinfo,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_msglevel = dm9051_get_msglevel,
+ .set_msglevel = dm9051_set_msglevel,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = dm9051_get_eeprom_len,
+ .get_eeprom = dm9051_get_eeprom,
+ .set_eeprom = dm9051_set_eeprom,
+ .get_pauseparam = dm9051_get_pauseparam,
+ .set_pauseparam = dm9051_set_pauseparam,
+};
+
+static int dm9051_all_start(struct board_info *db)
+{
+ int ret;
+
+ /* GPR power on of the internal phy
+ */
+ ret = dm9051_set_reg(db, DM9051_GPR, 0);
+ if (ret)
+ return ret;
+
+ /* dm9051 chip registers could not be accessed within 1 ms
+ * after GPR power on, delay 1 ms is essential
+ */
+ msleep(1);
+
+ ret = dm9051_core_reset(db);
+ if (ret)
+ return ret;
+
+ return dm9051_enable_interrupt(db);
+}
+
+static int dm9051_all_stop(struct board_info *db)
+{
+ int ret;
+
+ /* GPR power off of the internal phy,
+ * The internal phy still could be accessed after this GPR power off control
+ */
+ ret = dm9051_set_reg(db, DM9051_GPR, GPR_PHY_OFF);
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_RCR, RCR_RX_DISABLE);
+}
+
+/* fifo reset while rx error found
+ */
+static int dm9051_all_restart(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ int ret;
+
+ ret = dm9051_core_reset(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_enable_interrupt(db);
+ if (ret)
+ return ret;
+
+ netdev_dbg(ndev, " rxstatus_Er & rxlen_Er %d, RST_c %d\n",
+ db->bc.status_err_counter + db->bc.large_err_counter,
+ db->bc.fifo_rst_counter);
+
+ ret = dm9051_set_recv(db);
+ if (ret)
+ return ret;
+
+ return dm9051_set_fcr(db);
+}
+
+/* read packets from the fifo memory
+ * return value,
+ * > 0 - read packet number, caller can repeat the rx operation
+ * 0 - no error, caller need stop further rx operation
+ * -EBUSY - read data error, caller escape from rx operation
+ */
+static int dm9051_loop_rx(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ unsigned int rxbyte;
+ int ret, rxlen;
+ struct sk_buff *skb;
+ u8 *rdptr;
+ int scanrr = 0;
+
+ do {
+ ret = dm9051_read_mem(db, DM_SPI_MRCMDX, &rxbyte, 2);
+ if (ret)
+ return ret;
+
+ if ((rxbyte & GENMASK(7, 0)) != DM9051_PKT_RDY)
+ break; /* exhaust-empty */
+
+ ret = dm9051_read_mem(db, DM_SPI_MRCMD, &db->rxhdr, DM_RXHDR_SIZE);
+ if (ret)
+ return ret;
+
+ ret = dm9051_stop_mrcmd(db);
+ if (ret)
+ return ret;
+
+ rxlen = le16_to_cpu(db->rxhdr.rxlen);
+ if (db->rxhdr.status & RSR_ERR_BITS || rxlen > DM9051_PKT_MAX) {
+ netdev_dbg(ndev, "rxhdr-byte (%02x)\n",
+ db->rxhdr.headbyte);
+
+ if (db->rxhdr.status & RSR_ERR_BITS) {
+ db->bc.status_err_counter++;
+ netdev_dbg(ndev, "check rxstatus-error (%02x)\n",
+ db->rxhdr.status);
+ } else {
+ db->bc.large_err_counter++;
+ netdev_dbg(ndev, "check rxlen large-error (%d > %d)\n",
+ rxlen, DM9051_PKT_MAX);
+ }
+ return dm9051_all_restart(db);
+ }
+
+ skb = dev_alloc_skb(rxlen);
+ if (!skb) {
+ ret = dm9051_dumpblk(db, DM_SPI_MRCMD, rxlen);
+ if (ret)
+ return ret;
+ return scanrr;
+ }
+
+ rdptr = skb_put(skb, rxlen - 4);
+ ret = dm9051_read_mem(db, DM_SPI_MRCMD, rdptr, rxlen);
+ if (ret) {
+ db->bc.rx_err_counter++;
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ret = dm9051_stop_mrcmd(db);
+ if (ret)
+ return ret;
+
+ skb->protocol = eth_type_trans(skb, db->ndev);
+ if (db->ndev->features & NETIF_F_RXCSUM)
+ skb_checksum_none_assert(skb);
+ netif_rx(skb);
+ db->ndev->stats.rx_bytes += rxlen;
+ db->ndev->stats.rx_packets++;
+ scanrr++;
+ } while (!ret);
+
+ return scanrr;
+}
+
+/* transmit a packet,
+ * return value,
+ * 0 - succeed
+ * -ETIMEDOUT - timeout error
+ */
+static int dm9051_single_tx(struct board_info *db, u8 *buff, unsigned int len)
+{
+ int ret;
+
+ ret = dm9051_nsr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_write_mem(db, DM_SPI_MWCMD, buff, len);
+ if (ret)
+ return ret;
+
+ ret = dm9051_set_regs(db, DM9051_TXPLL, &len, 2);
+ if (ret < 0)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_TCR, TCR_TXREQ);
+}
+
+static int dm9051_loop_tx(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ int ntx = 0;
+ int ret;
+
+ while (!skb_queue_empty(&db->txq)) {
+ struct sk_buff *skb;
+ unsigned int len;
+
+ skb = skb_dequeue(&db->txq);
+ if (skb) {
+ ntx++;
+ ret = dm9051_single_tx(db, skb->data, skb->len);
+ len = skb->len;
+ dev_kfree_skb(skb);
+ if (ret < 0) {
+ db->bc.tx_err_counter++;
+ return 0;
+ }
+ ndev->stats.tx_bytes += len;
+ ndev->stats.tx_packets++;
+ }
+
+ if (netif_queue_stopped(ndev) &&
+ (skb_queue_len(&db->txq) < DM9051_TX_QUE_LO_WATER))
+ netif_wake_queue(ndev);
+ }
+
+ return ntx;
+}
+
+static irqreturn_t dm9051_rx_threaded_irq(int irq, void *pw)
+{
+ struct board_info *db = pw;
+ int result, result_tx;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_disable_interrupt(db);
+ if (result)
+ goto out_unlock;
+
+ result = dm9051_clear_interrupt(db);
+ if (result)
+ goto out_unlock;
+
+ do {
+ result = dm9051_loop_rx(db); /* threaded irq rx */
+ if (result < 0)
+ goto out_unlock;
+ result_tx = dm9051_loop_tx(db); /* more tx better performance */
+ if (result_tx < 0)
+ goto out_unlock;
+ } while (result > 0);
+
+ dm9051_enable_interrupt(db);
+
+ /* To exit and has mutex unlock while rx or tx error
+ */
+out_unlock:
+ mutex_unlock(&db->spi_lockm);
+
+ return IRQ_HANDLED;
+}
+
+static void dm9051_tx_delay(struct work_struct *work)
+{
+ struct board_info *db = container_of(work, struct board_info, tx_work);
+ int result;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_loop_tx(db);
+ if (result < 0)
+ netdev_err(db->ndev, "transmit packet error\n");
+
+ mutex_unlock(&db->spi_lockm);
+}
+
+static void dm9051_rxctl_delay(struct work_struct *work)
+{
+ struct board_info *db = container_of(work, struct board_info, rxctrl_work);
+ struct net_device *ndev = db->ndev;
+ int result;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+ if (result < 0)
+ goto out_unlock;
+
+ dm9051_set_recv(db);
+
+ /* To has mutex unlock and return from this function if regmap function fail
+ */
+out_unlock:
+ mutex_unlock(&db->spi_lockm);
+}
+
+/* Open network device
+ * Called when the network device is marked active, such as a user executing
+ * 'ifconfig up' on the device
+ */
+static int dm9051_open(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ struct spi_device *spi = db->spidev;
+ int ret;
+
+ db->imr_all = IMR_PAR | IMR_PRM;
+ db->lcr_all = LMCR_MODE1;
+ db->rctl.rcr_all = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
+ memset(db->rctl.hash_table, 0, sizeof(db->rctl.hash_table));
+
+ ndev->irq = spi->irq; /* by dts */
+ ret = request_threaded_irq(spi->irq, NULL, dm9051_rx_threaded_irq,
+ dm9051_irq_flag(db) | IRQF_ONESHOT,
+ ndev->name, db);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to get irq\n");
+ return ret;
+ }
+
+ phy_support_sym_pause(db->phydev);
+ phy_start(db->phydev);
+
+ /* flow control parameters init */
+ db->pause.rx_pause = true;
+ db->pause.tx_pause = true;
+ db->pause.autoneg = AUTONEG_DISABLE;
+
+ if (db->phydev->autoneg)
+ db->pause.autoneg = AUTONEG_ENABLE;
+
+ ret = dm9051_all_start(db);
+ if (ret) {
+ phy_stop(db->phydev);
+ free_irq(spi->irq, db);
+ return ret;
+ }
+
+ netif_wake_queue(ndev);
+
+ return 0;
+}
+
+/* Close network device
+ * Called to close down a network device which has been active. Cancel any
+ * work, shutdown the RX and TX process and then place the chip into a low
+ * power state while it is not being used
+ */
+static int dm9051_stop(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int ret;
+
+ ret = dm9051_all_stop(db);
+ if (ret)
+ return ret;
+
+ flush_work(&db->tx_work);
+ flush_work(&db->rxctrl_work);
+
+ phy_stop(db->phydev);
+
+ free_irq(db->spidev->irq, db);
+
+ netif_stop_queue(ndev);
+
+ skb_queue_purge(&db->txq);
+
+ return 0;
+}
+
+/* event: play a schedule starter in condition
+ */
+static netdev_tx_t dm9051_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ skb_queue_tail(&db->txq, skb);
+ if (skb_queue_len(&db->txq) > DM9051_TX_QUE_HI_WATER)
+ netif_stop_queue(ndev); /* enforce limit queue size */
+
+ schedule_work(&db->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+/* event: play with a schedule starter
+ */
+static void dm9051_set_rx_mode(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ struct dm9051_rxctrl rxctrl;
+ struct netdev_hw_addr *ha;
+ u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
+ u32 hash_val;
+
+ memset(&rxctrl, 0, sizeof(rxctrl));
+
+ /* rx control */
+ if (ndev->flags & IFF_PROMISC) {
+ rcr |= RCR_PRMSC;
+ netdev_dbg(ndev, "set_multicast rcr |= RCR_PRMSC, rcr= %02x\n", rcr);
+ }
+
+ if (ndev->flags & IFF_ALLMULTI) {
+ rcr |= RCR_ALL;
+ netdev_dbg(ndev, "set_multicast rcr |= RCR_ALLMULTI, rcr= %02x\n", rcr);
+ }
+
+ rxctrl.rcr_all = rcr;
+
+ /* broadcast address */
+ rxctrl.hash_table[0] = 0;
+ rxctrl.hash_table[1] = 0;
+ rxctrl.hash_table[2] = 0;
+ rxctrl.hash_table[3] = 0x8000;
+
+ /* the multicast address in Hash Table : 64 bits */
+ netdev_for_each_mc_addr(ha, ndev) {
+ hash_val = ether_crc_le(ETH_ALEN, ha->addr) & GENMASK(5, 0);
+ rxctrl.hash_table[hash_val / 16] |= BIT(0) << (hash_val % 16);
+ }
+
+ /* schedule work to do the actual set of the data if needed */
+
+ if (memcmp(&db->rctl, &rxctrl, sizeof(rxctrl))) {
+ memcpy(&db->rctl, &rxctrl, sizeof(rxctrl));
+ schedule_work(&db->rxctrl_work);
+ }
+}
+
+/* event: write into the mac registers and eeprom directly
+ */
+static int dm9051_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(ndev, p);
+ if (ret < 0)
+ return ret;
+
+ eth_commit_mac_addr_change(ndev, p);
+ return dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+}
+
+static const struct net_device_ops dm9051_netdev_ops = {
+ .ndo_open = dm9051_open,
+ .ndo_stop = dm9051_stop,
+ .ndo_start_xmit = dm9051_start_xmit,
+ .ndo_set_rx_mode = dm9051_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = dm9051_set_mac_address,
+};
+
+static void dm9051_operation_clear(struct board_info *db)
+{
+ db->bc.status_err_counter = 0;
+ db->bc.large_err_counter = 0;
+ db->bc.rx_err_counter = 0;
+ db->bc.tx_err_counter = 0;
+ db->bc.fifo_rst_counter = 0;
+}
+
+static int dm9051_mdio_register(struct board_info *db)
+{
+ struct spi_device *spi = db->spidev;
+ int ret;
+
+ db->mdiobus = devm_mdiobus_alloc(&spi->dev);
+ if (!db->mdiobus)
+ return -ENOMEM;
+
+ db->mdiobus->priv = db;
+ db->mdiobus->read = dm9051_mdio_read;
+ db->mdiobus->write = dm9051_mdio_write;
+ db->mdiobus->name = "dm9051-mdiobus";
+ db->mdiobus->phy_mask = (u32)~BIT(1);
+ db->mdiobus->parent = &spi->dev;
+ snprintf(db->mdiobus->id, MII_BUS_ID_SIZE,
+ "dm9051-%s.%u", dev_name(&spi->dev), spi->chip_select);
+
+ ret = devm_mdiobus_register(&spi->dev, db->mdiobus);
+ if (ret)
+ dev_err(&spi->dev, "Could not register MDIO bus\n");
+
+ return ret;
+}
+
+static void dm9051_handle_link_change(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ phy_print_status(db->phydev);
+
+ /* only write pause settings to mac. since mac and phy are integrated
+ * together, such as link state, speed and duplex are sync already
+ */
+ if (db->phydev->link) {
+ if (db->phydev->pause) {
+ db->pause.rx_pause = true;
+ db->pause.tx_pause = true;
+ }
+ dm9051_update_fcr(db);
+ }
+}
+
+/* phy connect as poll mode
+ */
+static int dm9051_phy_connect(struct board_info *db)
+{
+ char phy_id[MII_BUS_ID_SIZE + 3];
+
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+ db->mdiobus->id, DM9051_PHY_ADDR);
+
+ db->phydev = phy_connect(db->ndev, phy_id, dm9051_handle_link_change,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(db->phydev))
+ return PTR_ERR_OR_ZERO(db->phydev);
+ return 0;
+}
+
+static int dm9051_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct net_device *ndev;
+ struct board_info *db;
+ int ret;
+
+ ndev = devm_alloc_etherdev(dev, sizeof(struct board_info));
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, dev);
+ dev_set_drvdata(dev, ndev);
+
+ db = netdev_priv(ndev);
+
+ db->msg_enable = 0;
+ db->spidev = spi;
+ db->ndev = ndev;
+
+ ndev->netdev_ops = &dm9051_netdev_ops;
+ ndev->ethtool_ops = &dm9051_ethtool_ops;
+
+ mutex_init(&db->spi_lockm);
+ mutex_init(&db->reg_mutex);
+
+ INIT_WORK(&db->rxctrl_work, dm9051_rxctl_delay);
+ INIT_WORK(&db->tx_work, dm9051_tx_delay);
+
+ ret = dm9051_map_init(spi, db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_map_chipid(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_map_etherdev_par(ndev, db);
+ if (ret < 0)
+ return ret;
+
+ ret = dm9051_mdio_register(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_phy_connect(db);
+ if (ret)
+ return ret;
+
+ dm9051_operation_clear(db);
+ skb_queue_head_init(&db->txq);
+
+ ret = devm_register_netdev(dev, ndev);
+ if (ret) {
+ phy_disconnect(db->phydev);
+ return dev_err_probe(dev, ret, "device register failed");
+ }
+
+ return 0;
+}
+
+static void dm9051_drv_remove(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct board_info *db = to_dm9051_board(ndev);
+
+ phy_disconnect(db->phydev);
+}
+
+static const struct of_device_id dm9051_match_table[] = {
+ { .compatible = "davicom,dm9051" },
+ {}
+};
+
+static const struct spi_device_id dm9051_id_table[] = {
+ { "dm9051", 0 },
+ {}
+};
+
+static struct spi_driver dm9051_driver = {
+ .driver = {
+ .name = DRVNAME_9051,
+ .of_match_table = dm9051_match_table,
+ },
+ .probe = dm9051_probe,
+ .remove = dm9051_drv_remove,
+ .id_table = dm9051_id_table,
+};
+module_spi_driver(dm9051_driver);
+
+MODULE_AUTHOR("Joseph CHANG <joseph_chang@davicom.com.tw>");
+MODULE_DESCRIPTION("Davicom DM9051 network SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/davicom/dm9051.h b/drivers/net/ethernet/davicom/dm9051.h
new file mode 100644
index 000000000000..fef3120edd7c
--- /dev/null
+++ b/drivers/net/ethernet/davicom/dm9051.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Davicom Semiconductor,Inc.
+ * Davicom DM9051 SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _DM9051_H_
+#define _DM9051_H_
+
+#include <linux/bits.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+#define DM9051_ID 0x9051
+
+#define DM9051_NCR 0x00
+#define DM9051_NSR 0x01
+#define DM9051_TCR 0x02
+#define DM9051_RCR 0x05
+#define DM9051_BPTR 0x08
+#define DM9051_FCR 0x0A
+#define DM9051_EPCR 0x0B
+#define DM9051_EPAR 0x0C
+#define DM9051_EPDRL 0x0D
+#define DM9051_EPDRH 0x0E
+#define DM9051_PAR 0x10
+#define DM9051_MAR 0x16
+#define DM9051_GPCR 0x1E
+#define DM9051_GPR 0x1F
+
+#define DM9051_VIDL 0x28
+#define DM9051_VIDH 0x29
+#define DM9051_PIDL 0x2A
+#define DM9051_PIDH 0x2B
+#define DM9051_SMCR 0x2F
+#define DM9051_ATCR 0x30
+#define DM9051_SPIBCR 0x38
+#define DM9051_INTCR 0x39
+#define DM9051_PPCR 0x3D
+
+#define DM9051_MPCR 0x55
+#define DM9051_LMCR 0x57
+#define DM9051_MBNDRY 0x5E
+
+#define DM9051_MRRL 0x74
+#define DM9051_MRRH 0x75
+#define DM9051_MWRL 0x7A
+#define DM9051_MWRH 0x7B
+#define DM9051_TXPLL 0x7C
+#define DM9051_TXPLH 0x7D
+#define DM9051_ISR 0x7E
+#define DM9051_IMR 0x7F
+
+#define DM_SPI_MRCMDX 0x70
+#define DM_SPI_MRCMD 0x72
+#define DM_SPI_MWCMD 0x78
+
+#define DM_SPI_WR 0x80
+
+/* dm9051 Ethernet controller registers bits
+ */
+/* 0x00 */
+#define NCR_WAKEEN BIT(6)
+#define NCR_FDX BIT(3)
+#define NCR_RST BIT(0)
+/* 0x01 */
+#define NSR_SPEED BIT(7)
+#define NSR_LINKST BIT(6)
+#define NSR_WAKEST BIT(5)
+#define NSR_TX2END BIT(3)
+#define NSR_TX1END BIT(2)
+/* 0x02 */
+#define TCR_DIS_JABBER_TIMER BIT(6) /* for Jabber Packet support */
+#define TCR_TXREQ BIT(0)
+/* 0x05 */
+#define RCR_DIS_WATCHDOG_TIMER BIT(6) /* for Jabber Packet support */
+#define RCR_DIS_LONG BIT(5)
+#define RCR_DIS_CRC BIT(4)
+#define RCR_ALL BIT(3)
+#define RCR_PRMSC BIT(1)
+#define RCR_RXEN BIT(0)
+#define RCR_RX_DISABLE (RCR_DIS_LONG | RCR_DIS_CRC)
+/* 0x06 */
+#define RSR_RF BIT(7)
+#define RSR_MF BIT(6)
+#define RSR_LCS BIT(5)
+#define RSR_RWTO BIT(4)
+#define RSR_PLE BIT(3)
+#define RSR_AE BIT(2)
+#define RSR_CE BIT(1)
+#define RSR_FOE BIT(0)
+#define RSR_ERR_BITS (RSR_RF | RSR_LCS | RSR_RWTO | RSR_PLE | \
+ RSR_AE | RSR_CE | RSR_FOE)
+/* 0x0A */
+#define FCR_TXPEN BIT(5)
+#define FCR_BKPM BIT(3)
+#define FCR_FLCE BIT(0)
+#define FCR_RXTX_BITS (FCR_TXPEN | FCR_BKPM | FCR_FLCE)
+/* 0x0B */
+#define EPCR_WEP BIT(4)
+#define EPCR_EPOS BIT(3)
+#define EPCR_ERPRR BIT(2)
+#define EPCR_ERPRW BIT(1)
+#define EPCR_ERRE BIT(0)
+/* 0x1E */
+#define GPCR_GEP_CNTL BIT(0)
+/* 0x1F */
+#define GPR_PHY_OFF BIT(0)
+/* 0x30 */
+#define ATCR_AUTO_TX BIT(7)
+/* 0x39 */
+#define INTCR_POL_LOW (1 << 0)
+#define INTCR_POL_HIGH (0 << 0)
+/* 0x3D */
+/* Pause Packet Control Register - default = 1 */
+#define PPCR_PAUSE_COUNT 0x08
+/* 0x55 */
+#define MPCR_RSTTX BIT(1)
+#define MPCR_RSTRX BIT(0)
+/* 0x57 */
+/* LEDMode Control Register - LEDMode1 */
+/* Value 0x81 : bit[7] = 1, bit[2] = 0, bit[1:0] = 01b */
+#define LMCR_NEWMOD BIT(7)
+#define LMCR_TYPED1 BIT(1)
+#define LMCR_TYPED0 BIT(0)
+#define LMCR_MODE1 (LMCR_NEWMOD | LMCR_TYPED0)
+/* 0x5E */
+#define MBNDRY_BYTE BIT(7)
+/* 0xFE */
+#define ISR_MBS BIT(7)
+#define ISR_LNKCHG BIT(5)
+#define ISR_ROOS BIT(3)
+#define ISR_ROS BIT(2)
+#define ISR_PTS BIT(1)
+#define ISR_PRS BIT(0)
+#define ISR_CLR_INT (ISR_LNKCHG | ISR_ROOS | ISR_ROS | \
+ ISR_PTS | ISR_PRS)
+#define ISR_STOP_MRCMD (ISR_MBS)
+/* 0xFF */
+#define IMR_PAR BIT(7)
+#define IMR_LNKCHGI BIT(5)
+#define IMR_PTM BIT(1)
+#define IMR_PRM BIT(0)
+
+/* Const
+ */
+#define DM9051_PHY_ADDR 1 /* PHY id */
+#define DM9051_PHY 0x40 /* PHY address 0x01 */
+#define DM9051_PKT_RDY 0x01 /* Packet ready to receive */
+#define DM9051_PKT_MAX 1536 /* Received packet max size */
+#define DM9051_TX_QUE_HI_WATER 50
+#define DM9051_TX_QUE_LO_WATER 25
+#define DM_EEPROM_MAGIC 0x9051
+
+#define DM_RXHDR_SIZE sizeof(struct dm9051_rxhdr)
+
+static inline struct board_info *to_dm9051_board(struct net_device *ndev)
+{
+ return netdev_priv(ndev);
+}
+
+#endif /* _DM9051_H_ */
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 3fb39e32e1b4..653bde48ef44 100644
--- a/drivers/net/ethernet/dec/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -21,7 +21,7 @@ void pnic_do_nway(struct net_device *dev)
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
u32 phy_reg = ioread32(ioaddr + 0xB8);
- u32 new_csr6 = tp->csr6 & ~0x40C40200;
+ u32 new_csr6;
if (phy_reg & 0x78000000) { /* Ignore baseT4 */
if (phy_reg & 0x20000000) dev->if_port = 5;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index c710dc17be90..8dd7bf9014ec 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -340,7 +340,7 @@ enum wake_event_bits {
struct netdev_desc {
__le32 next_desc;
__le32 status;
- struct desc_frag { __le32 addr, length; } frag[1];
+ struct desc_frag { __le32 addr, length; } frag;
};
/* Bits in netdev_desc.status */
@@ -980,8 +980,8 @@ static void tx_timeout(struct net_device *dev, unsigned int txqueue)
le32_to_cpu(np->tx_ring[i].next_desc),
le32_to_cpu(np->tx_ring[i].status),
(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
- le32_to_cpu(np->tx_ring[i].frag[0].addr),
- le32_to_cpu(np->tx_ring[i].frag[0].length));
+ le32_to_cpu(np->tx_ring[i].frag.addr),
+ le32_to_cpu(np->tx_ring[i].frag.length));
}
printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
ioread32(np->base + TxListPtr),
@@ -1027,7 +1027,7 @@ static void init_ring(struct net_device *dev)
np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
np->rx_ring[i].status = 0;
- np->rx_ring[i].frag[0].length = 0;
+ np->rx_ring[i].frag.length = 0;
np->rx_skbuff[i] = NULL;
}
@@ -1039,16 +1039,16 @@ static void init_ring(struct net_device *dev)
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header. */
- np->rx_ring[i].frag[0].addr = cpu_to_le32(
+ np->rx_ring[i].frag.addr = cpu_to_le32(
dma_map_single(&np->pci_dev->dev, skb->data,
np->rx_buf_sz, DMA_FROM_DEVICE));
if (dma_mapping_error(&np->pci_dev->dev,
- np->rx_ring[i].frag[0].addr)) {
+ np->rx_ring[i].frag.addr)) {
dev_kfree_skb(skb);
np->rx_skbuff[i] = NULL;
break;
}
- np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
+ np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag);
}
np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -1097,12 +1097,12 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
txdesc->next_desc = 0;
txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
- txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
+ txdesc->frag.addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
skb->data, skb->len, DMA_TO_DEVICE));
if (dma_mapping_error(&np->pci_dev->dev,
- txdesc->frag[0].addr))
+ txdesc->frag.addr))
goto drop_frame;
- txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
+ txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag);
/* Increment cur_tx before tasklet_schedule() */
np->cur_tx++;
@@ -1151,7 +1151,7 @@ reset_tx (struct net_device *dev)
skb = np->tx_skbuff[i];
if (skb) {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[i].frag[0].addr),
+ le32_to_cpu(np->tx_ring[i].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
np->tx_skbuff[i] = NULL;
@@ -1271,12 +1271,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
skb = np->tx_skbuff[entry];
/* Free the original skb. */
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[entry].frag[0].addr),
+ le32_to_cpu(np->tx_ring[entry].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_consume_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
- np->tx_ring[entry].frag[0].addr = 0;
- np->tx_ring[entry].frag[0].length = 0;
+ np->tx_ring[entry].frag.addr = 0;
+ np->tx_ring[entry].frag.length = 0;
}
spin_unlock(&np->lock);
} else {
@@ -1290,12 +1290,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
skb = np->tx_skbuff[entry];
/* Free the original skb. */
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[entry].frag[0].addr),
+ le32_to_cpu(np->tx_ring[entry].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_consume_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
- np->tx_ring[entry].frag[0].addr = 0;
- np->tx_ring[entry].frag[0].length = 0;
+ np->tx_ring[entry].frag.addr = 0;
+ np->tx_ring[entry].frag.length = 0;
}
spin_unlock(&np->lock);
}
@@ -1372,16 +1372,16 @@ static void rx_poll(struct tasklet_struct *t)
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
dma_sync_single_for_cpu(&np->pci_dev->dev,
- le32_to_cpu(desc->frag[0].addr),
+ le32_to_cpu(desc->frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
dma_sync_single_for_device(&np->pci_dev->dev,
- le32_to_cpu(desc->frag[0].addr),
+ le32_to_cpu(desc->frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
} else {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(desc->frag[0].addr),
+ le32_to_cpu(desc->frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
skb_put(skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
@@ -1427,18 +1427,18 @@ static void refill_rx (struct net_device *dev)
if (skb == NULL)
break; /* Better luck next round. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- np->rx_ring[entry].frag[0].addr = cpu_to_le32(
+ np->rx_ring[entry].frag.addr = cpu_to_le32(
dma_map_single(&np->pci_dev->dev, skb->data,
np->rx_buf_sz, DMA_FROM_DEVICE));
if (dma_mapping_error(&np->pci_dev->dev,
- np->rx_ring[entry].frag[0].addr)) {
+ np->rx_ring[entry].frag.addr)) {
dev_kfree_skb_irq(skb);
np->rx_skbuff[entry] = NULL;
break;
}
}
/* Perhaps we need not reset this field. */
- np->rx_ring[entry].frag[0].length =
+ np->rx_ring[entry].frag.length =
cpu_to_le32(np->rx_buf_sz | LastFrag);
np->rx_ring[entry].status = 0;
cnt++;
@@ -1870,14 +1870,14 @@ static int netdev_close(struct net_device *dev)
(int)(np->tx_ring_dma));
for (i = 0; i < TX_RING_SIZE; i++)
printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
- i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
- np->tx_ring[i].frag[0].length);
+ i, np->tx_ring[i].status, np->tx_ring[i].frag.addr,
+ np->tx_ring[i].frag.length);
printk(KERN_DEBUG " Rx ring %8.8x:\n",
(int)(np->rx_ring_dma));
for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
- i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
- np->rx_ring[i].frag[0].length);
+ i, np->rx_ring[i].status, np->rx_ring[i].frag.addr,
+ np->rx_ring[i].frag.length);
}
}
#endif /* __i386__ debugging only */
@@ -1892,19 +1892,19 @@ static int netdev_close(struct net_device *dev)
skb = np->rx_skbuff[i];
if (skb) {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->rx_ring[i].frag[0].addr),
+ le32_to_cpu(np->rx_ring[i].frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
np->rx_skbuff[i] = NULL;
}
- np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
+ np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */
}
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_ring[i].next_desc = 0;
skb = np->tx_skbuff[i];
if (skb) {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[i].frag[0].addr),
+ le32_to_cpu(np->tx_ring[i].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
np->tx_skbuff[i] = NULL;
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 323340826dab..69dbf950d451 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -608,7 +608,6 @@ static s32 nps_enet_probe(struct platform_device *pdev)
/* Get IRQ number */
priv->irq = platform_get_irq(pdev, 0);
if (priv->irq < 0) {
- dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n");
err = -ENODEV;
goto out_netdev;
}
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index 3d1e9a302148..c699bd6bcbb9 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_FARADAY
bool "Faraday devices"
default y
- depends on ARM || NDS32 || COMPILE_TEST
+ depends on ARM || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -19,24 +19,22 @@ if NET_VENDOR_FARADAY
config FTMAC100
tristate "Faraday FTMAC100 10/100 Ethernet support"
- depends on ARM || NDS32 || COMPILE_TEST
+ depends on ARM || COMPILE_TEST
depends on !64BIT || BROKEN
select MII
help
This driver supports the FTMAC100 10/100 Ethernet controller
- from Faraday. It is used on Faraday A320, Andes AG101 and some
- other ARM/NDS32 SoC's.
+ from Faraday. It is used on Faraday A320 and some other ARM SoC's.
config FTGMAC100
tristate "Faraday FTGMAC100 Gigabit Ethernet support"
- depends on ARM || NDS32 || COMPILE_TEST
+ depends on ARM || COMPILE_TEST
depends on !64BIT || BROKEN
select PHYLIB
select MDIO_ASPEED if MACH_ASPEED_G6
select CRC32
help
This driver supports the FTGMAC100 Gigabit Ethernet controller
- from Faraday. It is used on Faraday A369, Andes AG102 and some
- other ARM/NDS32 SoC's.
+ from Faraday. It is used on Faraday A369 and some other ARM SoC's.
endif # NET_VENDOR_FARADAY
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 0f90d2d5bb60..4b047255d928 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -18,6 +18,7 @@
#include <linux/ptp_classify.h>
#include <net/pkt_cls.h>
#include <net/sock.h>
+#include <net/tso.h>
#include "dpaa2-eth.h"
@@ -34,6 +35,75 @@ MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
struct ptp_qoriq *dpaa2_ptp;
EXPORT_SYMBOL(dpaa2_ptp);
+static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv)
+{
+ priv->features = 0;
+
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_PTP_ONESTEP_VER_MAJOR,
+ DPNI_PTP_ONESTEP_VER_MINOR) >= 0)
+ priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT;
+}
+
+static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp)
+{
+ struct dpni_single_step_cfg cfg;
+
+ cfg.en = 1;
+ cfg.ch_update = udp;
+ cfg.offset = offset;
+ cfg.peer_delay = 0;
+
+ if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, &cfg))
+ WARN_ONCE(1, "Failed to set single step register");
+}
+
+static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp)
+{
+ u32 val = 0;
+
+ val = DPAA2_PTP_SINGLE_STEP_ENABLE |
+ DPAA2_PTP_SINGLE_CORRECTION_OFF(offset);
+
+ if (udp)
+ val |= DPAA2_PTP_SINGLE_STEP_CH;
+
+ if (priv->onestep_reg_base)
+ writel(val, priv->onestep_reg_base);
+}
+
+static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_single_step_cfg ptp_cfg;
+
+ priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect;
+
+ if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT))
+ return;
+
+ if (dpni_get_single_step_cfg(priv->mc_io, 0,
+ priv->mc_token, &ptp_cfg)) {
+ dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n");
+ return;
+ }
+
+ if (!ptp_cfg.ptp_onestep_reg_base) {
+ dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n");
+ return;
+ }
+
+ priv->onestep_reg_base = ioremap(ptp_cfg.ptp_onestep_reg_base,
+ sizeof(u32));
+ if (!priv->onestep_reg_base) {
+ dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n");
+ return;
+ }
+
+ priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct;
+}
+
static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
dma_addr_t iova_addr)
{
@@ -695,7 +765,6 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
struct sk_buff *skb)
{
struct ptp_tstamp origin_timestamp;
- struct dpni_single_step_cfg cfg;
u8 msgtype, twostep, udp;
struct dpaa2_faead *faead;
struct dpaa2_fas *fas;
@@ -749,17 +818,48 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
htonl(origin_timestamp.sec_lsb);
*(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
- cfg.en = 1;
- cfg.ch_update = udp;
- cfg.offset = offset1;
- cfg.peer_delay = 0;
+ if (priv->ptp_correction_off == offset1)
+ return;
+
+ priv->dpaa2_set_onestep_params_cb(priv, offset1, udp);
+ priv->ptp_correction_off = offset1;
- if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token,
- &cfg))
- WARN_ONCE(1, "Failed to set single step register");
}
}
+static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_sgt_cache *sgt_cache;
+ void *sgt_buf = NULL;
+ int sgt_buf_size;
+
+ sgt_cache = this_cpu_ptr(priv->sgt_cache);
+ sgt_buf_size = priv->tx_data_offset +
+ DPAA2_ETH_SG_ENTRIES_MAX * sizeof(struct dpaa2_sg_entry);
+
+ if (sgt_cache->count == 0)
+ sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
+ else
+ sgt_buf = sgt_cache->buf[--sgt_cache->count];
+ if (!sgt_buf)
+ return NULL;
+
+ memset(sgt_buf, 0, sgt_buf_size);
+
+ return sgt_buf;
+}
+
+static void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
+{
+ struct dpaa2_eth_sgt_cache *sgt_cache;
+
+ sgt_cache = this_cpu_ptr(priv->sgt_cache);
+ if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
+ skb_free_frag(sgt_buf);
+ else
+ sgt_cache->buf[sgt_cache->count++] = sgt_buf;
+}
+
/* Create a frame descriptor based on a fragmented skb */
static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
struct sk_buff *skb,
@@ -805,12 +905,11 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
/* Prepare the HW SGT structure */
sgt_buf_size = priv->tx_data_offset +
sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
- sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
+ sgt_buf = dpaa2_eth_sgt_get(priv);
if (unlikely(!sgt_buf)) {
err = -ENOMEM;
goto sgt_buf_alloc_failed;
}
- memset(sgt_buf, 0, sgt_buf_size);
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
@@ -846,6 +945,7 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
err = -ENOMEM;
goto dma_map_single_failed;
}
+ memset(fd, 0, sizeof(struct dpaa2_fd));
dpaa2_fd_set_offset(fd, priv->tx_data_offset);
dpaa2_fd_set_format(fd, dpaa2_fd_sg);
dpaa2_fd_set_addr(fd, addr);
@@ -855,7 +955,7 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
return 0;
dma_map_single_failed:
- skb_free_frag(sgt_buf);
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
sgt_buf_alloc_failed:
dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
dma_map_sg_failed:
@@ -875,7 +975,6 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
void **swa_addr)
{
struct device *dev = priv->net_dev->dev.parent;
- struct dpaa2_eth_sgt_cache *sgt_cache;
struct dpaa2_sg_entry *sgt;
struct dpaa2_eth_swa *swa;
dma_addr_t addr, sgt_addr;
@@ -884,18 +983,10 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
int err;
/* Prepare the HW SGT structure */
- sgt_cache = this_cpu_ptr(priv->sgt_cache);
sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
-
- if (sgt_cache->count == 0)
- sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN,
- GFP_ATOMIC);
- else
- sgt_buf = sgt_cache->buf[--sgt_cache->count];
+ sgt_buf = dpaa2_eth_sgt_get(priv);
if (unlikely(!sgt_buf))
return -ENOMEM;
-
- sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
@@ -923,6 +1014,7 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
goto sgt_map_failed;
}
+ memset(fd, 0, sizeof(struct dpaa2_fd));
dpaa2_fd_set_offset(fd, priv->tx_data_offset);
dpaa2_fd_set_format(fd, dpaa2_fd_sg);
dpaa2_fd_set_addr(fd, sgt_addr);
@@ -934,10 +1026,7 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
sgt_map_failed:
dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
data_map_failed:
- if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
- kfree(sgt_buf);
- else
- sgt_cache->buf[sgt_cache->count++] = sgt_buf;
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
return err;
}
@@ -978,6 +1067,7 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
if (unlikely(dma_mapping_error(dev, addr)))
return -ENOMEM;
+ memset(fd, 0, sizeof(struct dpaa2_fd));
dpaa2_fd_set_addr(fd, addr);
dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
dpaa2_fd_set_len(fd, skb->len);
@@ -1005,9 +1095,9 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_swa *swa;
u8 fd_format = dpaa2_fd_get_format(fd);
u32 fd_len = dpaa2_fd_get_len(fd);
-
- struct dpaa2_eth_sgt_cache *sgt_cache;
struct dpaa2_sg_entry *sgt;
+ int should_free_skb = 1;
+ int i;
fd_addr = dpaa2_fd_get_addr(fd);
buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
@@ -1039,6 +1129,28 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
/* Unmap the SGT buffer */
dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
DMA_BIDIRECTIONAL);
+ } else if (swa->type == DPAA2_ETH_SWA_SW_TSO) {
+ skb = swa->tso.skb;
+
+ sgt = (struct dpaa2_sg_entry *)(buffer_start +
+ priv->tx_data_offset);
+
+ /* Unmap and free the header */
+ dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE,
+ DMA_TO_DEVICE);
+ kfree(dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt)));
+
+ /* Unmap the other SG entries for the data */
+ for (i = 1; i < swa->tso.num_sg; i++)
+ dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
+ dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
+
+ /* Unmap the SGT buffer */
+ dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
+ DMA_BIDIRECTIONAL);
+
+ if (!swa->tso.is_last_fd)
+ should_free_skb = 0;
} else {
skb = swa->single.skb;
@@ -1067,55 +1179,195 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
}
/* Get the timestamp value */
- if (skb->cb[0] == TX_TSTAMP) {
- struct skb_shared_hwtstamps shhwtstamps;
- __le64 *ts = dpaa2_get_ts(buffer_start, true);
- u64 ns;
-
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-
- ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
- shhwtstamps.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &shhwtstamps);
- } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
- mutex_unlock(&priv->onestep_tstamp_lock);
+ if (swa->type != DPAA2_ETH_SWA_SW_TSO) {
+ if (skb->cb[0] == TX_TSTAMP) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ __le64 *ts = dpaa2_get_ts(buffer_start, true);
+ u64 ns;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+ ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ mutex_unlock(&priv->onestep_tstamp_lock);
+ }
}
/* Free SGT buffer allocated on tx */
- if (fd_format != dpaa2_fd_single) {
- sgt_cache = this_cpu_ptr(priv->sgt_cache);
- if (swa->type == DPAA2_ETH_SWA_SG) {
- skb_free_frag(buffer_start);
- } else {
- if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
- kfree(buffer_start);
- else
- sgt_cache->buf[sgt_cache->count++] = buffer_start;
+ if (fd_format != dpaa2_fd_single)
+ dpaa2_eth_sgt_recycle(priv, buffer_start);
+
+ /* Move on with skb release. If we are just confirming multiple FDs
+ * from the same TSO skb then only the last one will need to free the
+ * skb.
+ */
+ if (should_free_skb)
+ napi_consume_skb(skb, in_napi);
+}
+
+static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb, struct dpaa2_fd *fd,
+ int *num_fds, u32 *total_fds_len)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ int hdr_len, total_len, data_left, fd_len;
+ int num_sge, err, i, sgt_buf_size;
+ struct dpaa2_fd *fd_start = fd;
+ struct dpaa2_sg_entry *sgt;
+ struct dpaa2_eth_swa *swa;
+ dma_addr_t sgt_addr, addr;
+ dma_addr_t tso_hdr_dma;
+ unsigned int index = 0;
+ struct tso_t tso;
+ char *tso_hdr;
+ void *sgt_buf;
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ hdr_len = tso_start(skb, &tso);
+ *total_fds_len = 0;
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ /* Prepare the HW SGT structure for this frame */
+ sgt_buf = dpaa2_eth_sgt_get(priv);
+ if (unlikely(!sgt_buf)) {
+ netdev_err(priv->net_dev, "dpaa2_eth_sgt_get() failed\n");
+ err = -ENOMEM;
+ goto err_sgt_get;
+ }
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ /* Determine the data length of this frame */
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+ fd_len = data_left + hdr_len;
+
+ /* Prepare packet headers: MAC + IP + TCP */
+ tso_hdr = kmalloc(TSO_HEADER_SIZE, GFP_ATOMIC);
+ if (!tso_hdr) {
+ err = -ENOMEM;
+ goto err_alloc_tso_hdr;
+ }
+
+ tso_build_hdr(skb, tso_hdr, &tso, data_left, total_len == 0);
+ tso_hdr_dma = dma_map_single(dev, tso_hdr, TSO_HEADER_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, tso_hdr_dma)) {
+ netdev_err(priv->net_dev, "dma_map_single(tso_hdr) failed\n");
+ err = -ENOMEM;
+ goto err_map_tso_hdr;
+ }
+
+ /* Setup the SG entry for the header */
+ dpaa2_sg_set_addr(sgt, tso_hdr_dma);
+ dpaa2_sg_set_len(sgt, hdr_len);
+ dpaa2_sg_set_final(sgt, data_left <= 0);
+
+ /* Compose the SG entries for each fragment of data */
+ num_sge = 1;
+ while (data_left > 0) {
+ int size;
+
+ /* Move to the next SG entry */
+ sgt++;
+ size = min_t(int, tso.size, data_left);
+
+ addr = dma_map_single(dev, tso.data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, addr)) {
+ netdev_err(priv->net_dev, "dma_map_single(tso.data) failed\n");
+ err = -ENOMEM;
+ goto err_map_data;
+ }
+ dpaa2_sg_set_addr(sgt, addr);
+ dpaa2_sg_set_len(sgt, size);
+ dpaa2_sg_set_final(sgt, size == data_left);
+
+ num_sge++;
+
+ /* Build the data for the __next__ fragment */
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+
+ /* Store the skb backpointer in the SGT buffer */
+ sgt_buf_size = priv->tx_data_offset + num_sge * sizeof(struct dpaa2_sg_entry);
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
+ swa->type = DPAA2_ETH_SWA_SW_TSO;
+ swa->tso.skb = skb;
+ swa->tso.num_sg = num_sge;
+ swa->tso.sgt_size = sgt_buf_size;
+ swa->tso.is_last_fd = total_len == 0 ? 1 : 0;
+
+ /* Separately map the SGT buffer */
+ sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, sgt_addr))) {
+ netdev_err(priv->net_dev, "dma_map_single(sgt_buf) failed\n");
+ err = -ENOMEM;
+ goto err_map_sgt;
}
+
+ /* Setup the frame descriptor */
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, sgt_addr);
+ dpaa2_fd_set_len(fd, fd_len);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ *total_fds_len += fd_len;
+ /* Advance to the next frame descriptor */
+ fd++;
+ index++;
}
- /* Move on with skb release */
- napi_consume_skb(skb, in_napi);
+ *num_fds = index;
+
+ return 0;
+
+err_map_sgt:
+err_map_data:
+ /* Unmap all the data S/G entries for the current FD */
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+ for (i = 1; i < num_sge; i++)
+ dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
+ dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
+
+ /* Unmap the header entry */
+ dma_unmap_single(dev, tso_hdr_dma, TSO_HEADER_SIZE, DMA_TO_DEVICE);
+err_map_tso_hdr:
+ kfree(tso_hdr);
+err_alloc_tso_hdr:
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
+err_sgt_get:
+ /* Free all the other FDs that were already fully created */
+ for (i = 0; i < index; i++)
+ dpaa2_eth_free_tx_fd(priv, NULL, &fd_start[i], false);
+
+ return err;
}
static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- struct dpaa2_fd fd;
- struct rtnl_link_stats64 *percpu_stats;
+ int total_enqueued = 0, retries = 0, enqueued;
struct dpaa2_eth_drv_stats *percpu_extras;
+ struct rtnl_link_stats64 *percpu_stats;
+ unsigned int needed_headroom;
+ int num_fds = 1, max_retries;
struct dpaa2_eth_fq *fq;
struct netdev_queue *nq;
+ struct dpaa2_fd *fd;
u16 queue_mapping;
- unsigned int needed_headroom;
- u32 fd_len;
+ void *swa = NULL;
u8 prio = 0;
int err, i;
- void *swa;
+ u32 fd_len;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
percpu_extras = this_cpu_ptr(priv->percpu_extras);
+ fd = (this_cpu_ptr(priv->fd))->array;
needed_headroom = dpaa2_eth_needed_headroom(skb);
@@ -1130,20 +1382,28 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
}
/* Setup the FD fields */
- memset(&fd, 0, sizeof(fd));
- if (skb_is_nonlinear(skb)) {
- err = dpaa2_eth_build_sg_fd(priv, skb, &fd, &swa);
+ if (skb_is_gso(skb)) {
+ err = dpaa2_eth_build_gso_fd(priv, skb, fd, &num_fds, &fd_len);
+ percpu_extras->tx_sg_frames += num_fds;
+ percpu_extras->tx_sg_bytes += fd_len;
+ percpu_extras->tx_tso_frames += num_fds;
+ percpu_extras->tx_tso_bytes += fd_len;
+ } else if (skb_is_nonlinear(skb)) {
+ err = dpaa2_eth_build_sg_fd(priv, skb, fd, &swa);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
+ fd_len = dpaa2_fd_get_len(fd);
} else if (skb_headroom(skb) < needed_headroom) {
- err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd, &swa);
+ err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, fd, &swa);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
percpu_extras->tx_converted_sg_frames++;
percpu_extras->tx_converted_sg_bytes += skb->len;
+ fd_len = dpaa2_fd_get_len(fd);
} else {
- err = dpaa2_eth_build_single_fd(priv, skb, &fd, &swa);
+ err = dpaa2_eth_build_single_fd(priv, skb, fd, &swa);
+ fd_len = dpaa2_fd_get_len(fd);
}
if (unlikely(err)) {
@@ -1151,11 +1411,12 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
goto err_build_fd;
}
- if (skb->cb[0])
- dpaa2_eth_enable_tx_tstamp(priv, &fd, swa, skb);
+ if (swa && skb->cb[0])
+ dpaa2_eth_enable_tx_tstamp(priv, fd, swa, skb);
/* Tracing point */
- trace_dpaa2_tx_fd(net_dev, &fd);
+ for (i = 0; i < num_fds; i++)
+ trace_dpaa2_tx_fd(net_dev, &fd[i]);
/* TxConf FQ selection relies on queue id from the stack.
* In case of a forwarded frame from another DPNI interface, we choose
@@ -1175,27 +1436,32 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
queue_mapping %= dpaa2_eth_queue_count(priv);
}
fq = &priv->fq[queue_mapping];
-
- fd_len = dpaa2_fd_get_len(&fd);
nq = netdev_get_tx_queue(net_dev, queue_mapping);
netdev_tx_sent_queue(nq, fd_len);
/* Everything that happens after this enqueues might race with
* the Tx confirmation callback for this frame
*/
- for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = priv->enqueue(priv, fq, &fd, prio, 1, NULL);
- if (err != -EBUSY)
- break;
+ max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
+ while (total_enqueued < num_fds && retries < max_retries) {
+ err = priv->enqueue(priv, fq, &fd[total_enqueued],
+ prio, num_fds - total_enqueued, &enqueued);
+ if (err == -EBUSY) {
+ retries++;
+ continue;
+ }
+
+ total_enqueued += enqueued;
}
- percpu_extras->tx_portal_busy += i;
+ percpu_extras->tx_portal_busy += retries;
+
if (unlikely(err < 0)) {
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
- dpaa2_eth_free_tx_fd(priv, fq, &fd, false);
+ dpaa2_eth_free_tx_fd(priv, fq, fd, false);
netdev_tx_completed_queue(nq, 1, fd_len);
} else {
- percpu_stats->tx_packets++;
+ percpu_stats->tx_packets += total_enqueued;
percpu_stats->tx_bytes += fd_len;
}
@@ -1523,7 +1789,7 @@ static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
count = sgt_cache->count;
for (i = 0; i < count; i++)
- kfree(sgt_cache->buf[i]);
+ skb_free_frag(sgt_cache->buf[i]);
sgt_cache->count = 0;
}
}
@@ -1811,8 +2077,10 @@ static int dpaa2_eth_open(struct net_device *net_dev)
goto enable_err;
}
- if (dpaa2_eth_is_type_phy(priv))
+ if (dpaa2_eth_is_type_phy(priv)) {
+ dpaa2_mac_start(priv->mac);
phylink_start(priv->mac->phylink);
+ }
return 0;
@@ -1887,6 +2155,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
if (dpaa2_eth_is_type_phy(priv)) {
phylink_stop(priv->mac->phylink);
+ dpaa2_mac_stop(priv->mac);
} else {
netif_tx_stop_all_queues(net_dev);
netif_carrier_off(net_dev);
@@ -2207,6 +2476,9 @@ static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
config.rx_filter = HWTSTAMP_FILTER_ALL;
}
+ if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ dpaa2_ptp_onestep_reg_update_method(priv);
+
return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -4100,6 +4372,8 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
return err;
}
+ dpaa2_eth_detect_features(priv);
+
/* Capabilities listing */
supported |= IFF_LIVE_ADDR_CHANGE;
@@ -4115,7 +4389,8 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
net_dev->features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_SG | NETIF_F_HIGHDMA |
- NETIF_F_LLTX | NETIF_F_HW_TC;
+ NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
+ net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
net_dev->hw_features = net_dev->features;
if (priv->dpni_attrs.vlan_filter_entries)
@@ -4397,6 +4672,13 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
goto err_alloc_sgt_cache;
}
+ priv->fd = alloc_percpu(*priv->fd);
+ if (!priv->fd) {
+ dev_err(dev, "alloc_percpu(fds) failed\n");
+ err = -ENOMEM;
+ goto err_alloc_fds;
+ }
+
err = dpaa2_eth_netdev_init(net_dev);
if (err)
goto err_netdev_init;
@@ -4484,6 +4766,8 @@ err_poll_thread:
err_alloc_rings:
err_csum:
err_netdev_init:
+ free_percpu(priv->fd);
+err_alloc_fds:
free_percpu(priv->sgt_cache);
err_alloc_sgt_cache:
free_percpu(priv->percpu_extras);
@@ -4539,6 +4823,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
fsl_mc_free_irqs(ls_dev);
dpaa2_eth_free_rings(priv);
+ free_percpu(priv->fd);
free_percpu(priv->sgt_cache);
free_percpu(priv->percpu_stats);
free_percpu(priv->percpu_extras);
@@ -4547,6 +4832,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
dpaa2_eth_free_dpbp(priv);
dpaa2_eth_free_dpio(priv);
dpaa2_eth_free_dpni(priv);
+ if (priv->onestep_reg_base)
+ iounmap(priv->onestep_reg_base);
fsl_mc_portal_free(priv->mc_io);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index e54e70ebdd05..447718483ef4 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -122,6 +122,7 @@ enum dpaa2_eth_swa_type {
DPAA2_ETH_SWA_SINGLE,
DPAA2_ETH_SWA_SG,
DPAA2_ETH_SWA_XDP,
+ DPAA2_ETH_SWA_SW_TSO,
};
/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
@@ -142,6 +143,12 @@ struct dpaa2_eth_swa {
int dma_size;
struct xdp_frame *xdpf;
} xdp;
+ struct {
+ struct sk_buff *skb;
+ int num_sg;
+ int sgt_size;
+ int is_last_fd;
+ } tso;
};
};
@@ -354,6 +361,8 @@ struct dpaa2_eth_drv_stats {
__u64 tx_conf_bytes;
__u64 tx_sg_frames;
__u64 tx_sg_bytes;
+ __u64 tx_tso_frames;
+ __u64 tx_tso_bytes;
__u64 rx_sg_frames;
__u64 rx_sg_bytes;
/* Linear skbs sent as a S/G FD due to insufficient headroom */
@@ -493,8 +502,15 @@ struct dpaa2_eth_trap_data {
struct dpaa2_eth_priv *priv;
};
+#define DPAA2_ETH_SG_ENTRIES_MAX (PAGE_SIZE / sizeof(struct scatterlist))
+
#define DPAA2_ETH_DEFAULT_COPYBREAK 512
+#define DPAA2_ETH_ENQUEUE_MAX_FDS 200
+struct dpaa2_eth_fds {
+ struct dpaa2_fd array[DPAA2_ETH_ENQUEUE_MAX_FDS];
+};
+
/* Driver private data */
struct dpaa2_eth_priv {
struct net_device *net_dev;
@@ -510,12 +526,15 @@ struct dpaa2_eth_priv {
u8 num_channels;
struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
struct dpaa2_eth_sgt_cache __percpu *sgt_cache;
-
+ unsigned long features;
struct dpni_attr dpni_attrs;
u16 dpni_ver_major;
u16 dpni_ver_minor;
u16 tx_data_offset;
-
+ void __iomem *onestep_reg_base;
+ u8 ptp_correction_off;
+ void (*dpaa2_set_onestep_params_cb)(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp);
struct fsl_mc_device *dpbp_dev;
u16 rx_buf_size;
u16 bpid;
@@ -577,6 +596,8 @@ struct dpaa2_eth_priv {
struct devlink_port devlink_port;
u32 rx_copybreak;
+
+ struct dpaa2_eth_fds __percpu *fd;
};
struct dpaa2_eth_devlink_priv {
@@ -655,6 +676,13 @@ enum dpaa2_eth_rx_dist {
#define DPAA2_ETH_DIST_L4DST BIT(8)
#define DPAA2_ETH_DIST_ALL (~0ULL)
+#define DPNI_PTP_ONESTEP_VER_MAJOR 8
+#define DPNI_PTP_ONESTEP_VER_MINOR 2
+#define DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT BIT(0)
+#define DPAA2_PTP_SINGLE_STEP_ENABLE BIT(31)
+#define DPAA2_PTP_SINGLE_STEP_CH BIT(7)
+#define DPAA2_PTP_SINGLE_CORRECTION_OFF(v) ((v) << 8)
+
#define DPNI_PAUSE_VER_MAJOR 7
#define DPNI_PAUSE_VER_MINOR 13
#define dpaa2_eth_has_pause_support(priv) \
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 3fdbf87dccb1..eea7d7a07c00 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -44,6 +44,8 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
"[drv] tx conf bytes",
"[drv] tx sg frames",
"[drv] tx sg bytes",
+ "[drv] tx tso frames",
+ "[drv] tx tso bytes",
"[drv] rx sg frames",
"[drv] rx sg bytes",
"[drv] tx converted sg frames",
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 623d113b6581..c48811d3bcd5 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -3,6 +3,7 @@
#include <linux/acpi.h>
#include <linux/pcs-lynx.h>
+#include <linux/phy/phy.h>
#include <linux/property.h>
#include "dpaa2-eth.h"
@@ -11,6 +12,28 @@
#define phylink_to_dpaa2_mac(config) \
container_of((config), struct dpaa2_mac, phylink_config)
+#define DPMAC_PROTOCOL_CHANGE_VER_MAJOR 4
+#define DPMAC_PROTOCOL_CHANGE_VER_MINOR 8
+
+#define DPAA2_MAC_FEATURE_PROTOCOL_CHANGE BIT(0)
+
+static int dpaa2_mac_cmp_ver(struct dpaa2_mac *mac,
+ u16 ver_major, u16 ver_minor)
+{
+ if (mac->ver_major == ver_major)
+ return mac->ver_minor - ver_minor;
+ return mac->ver_major - ver_major;
+}
+
+static void dpaa2_mac_detect_features(struct dpaa2_mac *mac)
+{
+ mac->features = 0;
+
+ if (dpaa2_mac_cmp_ver(mac, DPMAC_PROTOCOL_CHANGE_VER_MAJOR,
+ DPMAC_PROTOCOL_CHANGE_VER_MINOR) >= 0)
+ mac->features |= DPAA2_MAC_FEATURE_PROTOCOL_CHANGE;
+}
+
static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
{
*if_mode = PHY_INTERFACE_MODE_NA;
@@ -38,6 +61,29 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
return 0;
}
+static enum dpmac_eth_if dpmac_eth_if_mode(phy_interface_t if_mode)
+{
+ switch (if_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return DPMAC_ETH_IF_RGMII;
+ case PHY_INTERFACE_MODE_USXGMII:
+ return DPMAC_ETH_IF_USXGMII;
+ case PHY_INTERFACE_MODE_QSGMII:
+ return DPMAC_ETH_IF_QSGMII;
+ case PHY_INTERFACE_MODE_SGMII:
+ return DPMAC_ETH_IF_SGMII;
+ case PHY_INTERFACE_MODE_10GBASER:
+ return DPMAC_ETH_IF_XFI;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ return DPMAC_ETH_IF_1000BASEX;
+ default:
+ return DPMAC_ETH_IF_MII;
+ }
+}
+
static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
u16 dpmac_id)
{
@@ -100,6 +146,14 @@ static int dpaa2_mac_get_if_mode(struct fwnode_handle *dpmac_node,
return err;
}
+static struct phylink_pcs *dpaa2_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+
+ return mac->pcs;
+}
+
static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -117,6 +171,19 @@ static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
if (err)
netdev_err(mac->net_dev, "%s: dpmac_set_link_state() = %d\n",
__func__, err);
+
+ if (!mac->serdes_phy)
+ return;
+
+ /* This happens only if we support changing of protocol at runtime */
+ err = dpmac_set_protocol(mac->mc_io, 0, mac->mc_dev->mc_handle,
+ dpmac_eth_if_mode(state->interface));
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_protocol() = %d\n", err);
+
+ err = phy_set_mode_ext(mac->serdes_phy, PHY_MODE_ETHERNET, state->interface);
+ if (err)
+ netdev_err(mac->net_dev, "phy_set_mode_ext() = %d\n", err);
}
static void dpaa2_mac_link_up(struct phylink_config *config,
@@ -172,6 +239,7 @@ static void dpaa2_mac_link_down(struct phylink_config *config,
static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
.validate = phylink_generic_validate,
+ .mac_select_pcs = dpaa2_mac_select_pcs,
.mac_config = dpaa2_mac_config,
.mac_link_up = dpaa2_mac_link_up,
.mac_link_down = dpaa2_mac_link_down,
@@ -226,10 +294,66 @@ static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
}
}
+static void dpaa2_mac_set_supported_interfaces(struct dpaa2_mac *mac)
+{
+ int intf, err;
+
+ /* We support the current interface mode, and if we have a PCS
+ * similar interface modes that do not require the SerDes lane to be
+ * reconfigured.
+ */
+ __set_bit(mac->if_mode, mac->phylink_config.supported_interfaces);
+ if (mac->pcs) {
+ switch (mac->if_mode) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ mac->phylink_config.supported_interfaces);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (!mac->serdes_phy)
+ return;
+
+ /* In case we have access to the SerDes phy/lane, then ask the SerDes
+ * driver what interfaces are supported based on the current PLL
+ * configuration.
+ */
+ for (intf = 0; intf < PHY_INTERFACE_MODE_MAX; intf++) {
+ if (intf == PHY_INTERFACE_MODE_NA)
+ continue;
+
+ err = phy_validate(mac->serdes_phy, PHY_MODE_ETHERNET, intf, NULL);
+ if (err)
+ continue;
+
+ __set_bit(intf, mac->phylink_config.supported_interfaces);
+ }
+}
+
+void dpaa2_mac_start(struct dpaa2_mac *mac)
+{
+ if (mac->serdes_phy)
+ phy_power_on(mac->serdes_phy);
+}
+
+void dpaa2_mac_stop(struct dpaa2_mac *mac)
+{
+ if (mac->serdes_phy)
+ phy_power_off(mac->serdes_phy);
+}
+
int dpaa2_mac_connect(struct dpaa2_mac *mac)
{
struct net_device *net_dev = mac->net_dev;
struct fwnode_handle *dpmac_node;
+ struct phy *serdes_phy = NULL;
struct phylink *phylink;
int err;
@@ -246,6 +370,20 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
return -EINVAL;
mac->if_mode = err;
+ if (mac->features & DPAA2_MAC_FEATURE_PROTOCOL_CHANGE &&
+ !phy_interface_mode_is_rgmii(mac->if_mode) &&
+ is_of_node(dpmac_node)) {
+ serdes_phy = of_phy_get(to_of_node(dpmac_node), NULL);
+
+ if (serdes_phy == ERR_PTR(-ENODEV))
+ serdes_phy = NULL;
+ else if (IS_ERR(serdes_phy))
+ return PTR_ERR(serdes_phy);
+ else
+ phy_init(serdes_phy);
+ }
+ mac->serdes_phy = serdes_phy;
+
/* The MAC does not have the capability to add RGMII delays so
* error out if the interface mode requests them and there is no PHY
* to act upon them
@@ -274,25 +412,7 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
MAC_10FD | MAC_100FD | MAC_1000FD | MAC_2500FD | MAC_5000FD |
MAC_10000FD;
- /* We support the current interface mode, and if we have a PCS
- * similar interface modes that do not require the PLLs to be
- * reconfigured.
- */
- __set_bit(mac->if_mode, mac->phylink_config.supported_interfaces);
- if (mac->pcs) {
- switch (mac->if_mode) {
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_SGMII:
- __set_bit(PHY_INTERFACE_MODE_1000BASEX,
- mac->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_SGMII,
- mac->phylink_config.supported_interfaces);
- break;
-
- default:
- break;
- }
- }
+ dpaa2_mac_set_supported_interfaces(mac);
phylink = phylink_create(&mac->phylink_config,
dpmac_node, mac->if_mode,
@@ -303,9 +423,6 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
}
mac->phylink = phylink;
- if (mac->pcs)
- phylink_set_pcs(mac->phylink, mac->pcs);
-
err = phylink_fwnode_phy_connect(mac->phylink, dpmac_node, 0);
if (err) {
netdev_err(net_dev, "phylink_fwnode_phy_connect() = %d\n", err);
@@ -330,6 +447,8 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
phylink_disconnect_phy(mac->phylink);
phylink_destroy(mac->phylink);
dpaa2_pcs_destroy(mac);
+ of_phy_put(mac->serdes_phy);
+ mac->serdes_phy = NULL;
}
int dpaa2_mac_open(struct dpaa2_mac *mac)
@@ -353,6 +472,14 @@ int dpaa2_mac_open(struct dpaa2_mac *mac)
goto err_close_dpmac;
}
+ err = dpmac_get_api_version(mac->mc_io, 0, &mac->ver_major, &mac->ver_minor);
+ if (err) {
+ netdev_err(net_dev, "dpmac_get_api_version() = %d\n", err);
+ goto err_close_dpmac;
+ }
+
+ dpaa2_mac_detect_features(mac);
+
/* Find the device node representing the MAC device and link the device
* behind the associated netdev to it.
*/
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 1331a8477fe4..a58cab188a99 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -17,6 +17,8 @@ struct dpaa2_mac {
struct net_device *net_dev;
struct fsl_mc_io *mc_io;
struct dpmac_attr attr;
+ u16 ver_major, ver_minor;
+ unsigned long features;
struct phylink_config phylink_config;
struct phylink *phylink;
@@ -24,6 +26,8 @@ struct dpaa2_mac {
enum dpmac_link_type if_link_type;
struct phylink_pcs *pcs;
struct fwnode_handle *fw_node;
+
+ struct phy *serdes_phy;
};
bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
@@ -43,4 +47,8 @@ void dpaa2_mac_get_strings(u8 *data);
void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data);
+void dpaa2_mac_start(struct dpaa2_mac *mac);
+
+void dpaa2_mac_stop(struct dpaa2_mac *mac);
+
#endif /* DPAA2_MAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 9a561072aa4a..e507e9065214 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -703,8 +703,10 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
dpaa2_switch_enable_ctrl_if_napi(ethsw);
- if (dpaa2_switch_port_is_type_phy(port_priv))
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ dpaa2_mac_start(port_priv->mac);
phylink_start(port_priv->mac->phylink);
+ }
return 0;
}
@@ -717,6 +719,7 @@ static int dpaa2_switch_port_stop(struct net_device *netdev)
if (dpaa2_switch_port_is_type_phy(port_priv)) {
phylink_stop(port_priv->mac->phylink);
+ dpaa2_mac_stop(port_priv->mac);
} else {
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
index a24b20f76938..e9ac2ecef3be 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
@@ -19,11 +19,15 @@
#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
+#define DPMAC_CMDID_GET_API_VERSION DPMAC_CMD(0xa0c)
+
#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD_V2(0x0c3)
#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
+#define DPMAC_CMDID_SET_PROTOCOL DPMAC_CMD(0x0c7)
+
/* Macros for accessing command fields smaller than 1byte */
#define DPMAC_MASK(field) \
GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
@@ -70,4 +74,12 @@ struct dpmac_rsp_get_counter {
__le64 counter;
};
+struct dpmac_rsp_get_api_version {
+ __le16 major;
+ __le16 minor;
+};
+
+struct dpmac_cmd_set_protocol {
+ u8 eth_if;
+};
#endif /* _FSL_DPMAC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.c b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
index d5997b654562..f440a4c3b70c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
@@ -181,3 +181,57 @@ int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
return 0;
}
+
+/**
+ * dpmac_get_api_version() - Get Data Path MAC version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path mac API
+ * @minor_ver: Minor version of data path mac API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
+ struct dpmac_rsp_get_api_version *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpmac_set_protocol() - Reconfigure the DPMAC protocol
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @protocol: New protocol for the DPMAC to be reconfigured in.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_set_protocol(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_eth_if protocol)
+{
+ struct dpmac_cmd_set_protocol *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PROTOCOL,
+ cmd_flags, token);
+ cmd_params = (struct dpmac_cmd_set_protocol *)cmd.params;
+ cmd_params->eth_if = protocol;
+
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.h b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
index 8f7ceb731282..17488819ef68 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
@@ -205,4 +205,9 @@ enum dpmac_counter_id {
int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
enum dpmac_counter_id id, u64 *value);
+int dpmac_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver);
+
+int dpmac_set_protocol(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_eth_if protocol);
#endif /* __FSL_DPMAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 9f80bdfeedec..828f538097af 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -98,7 +98,7 @@
#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
#define DPNI_CMDID_SET_SINGLE_STEP_CFG DPNI_CMD(0x279)
-#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD(0x27a)
+#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD_V2(0x27a)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
@@ -658,12 +658,16 @@ struct dpni_cmd_single_step_cfg {
__le16 flags;
__le16 offset;
__le32 peer_delay;
+ __le32 ptp_onestep_reg_base;
+ __le32 pad0;
};
struct dpni_rsp_single_step_cfg {
__le16 flags;
__le16 offset;
__le32 peer_delay;
+ __le32 ptp_onestep_reg_base;
+ __le32 pad0;
};
struct dpni_cmd_enable_vlan_filter {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index d6afada99fb6..6c3b36f20fb8 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -2136,6 +2136,8 @@ int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
ptp_cfg->ch_update = dpni_get_field(le16_to_cpu(rsp_params->flags),
PTP_CH_UPDATE) ? 1 : 0;
ptp_cfg->peer_delay = le32_to_cpu(rsp_params->peer_delay);
+ ptp_cfg->ptp_onestep_reg_base =
+ le32_to_cpu(rsp_params->ptp_onestep_reg_base);
return err;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index 7de0562bbf59..6fffd519aa00 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -1074,12 +1074,18 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
* @peer_delay: For peer-to-peer transparent clocks add this value to the
* correction field in addition to the transient time update.
* The value expresses nanoseconds.
+ * @ptp_onestep_reg_base: 1588 SINGLE_STEP register base address. This address
+ * is used to update directly the register contents.
+ * User has to create an address mapping for it.
+ *
+ *
*/
struct dpni_single_step_cfg {
u8 en;
u8 ch_update;
u16 offset;
u32 peer_delay;
+ u32 ptp_onestep_reg_base;
};
int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index fb39e406b7fc..68d806dc3701 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -18,6 +18,8 @@
#define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \
(ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
+#define ENETC_CBD_DATA_MEM_ALIGN 64
+
struct enetc_tx_swbd {
union {
struct sk_buff *skb;
@@ -415,6 +417,42 @@ int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
+static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si,
+ struct enetc_cbd *cbd,
+ int size, dma_addr_t *dma,
+ void **data_align)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+ dma_addr_t dma_align;
+ void *data;
+
+ data = dma_alloc_coherent(ring->dma_dev,
+ size + ENETC_CBD_DATA_MEM_ALIGN,
+ dma, GFP_KERNEL);
+ if (!data) {
+ dev_err(ring->dma_dev, "CBD alloc data memory failed!\n");
+ return NULL;
+ }
+
+ dma_align = ALIGN(*dma, ENETC_CBD_DATA_MEM_ALIGN);
+ *data_align = PTR_ALIGN(data, ENETC_CBD_DATA_MEM_ALIGN);
+
+ cbd->addr[0] = cpu_to_le32(lower_32_bits(dma_align));
+ cbd->addr[1] = cpu_to_le32(upper_32_bits(dma_align));
+ cbd->length = cpu_to_le16(size);
+
+ return data;
+}
+
+static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size,
+ void *data, dma_addr_t *dma)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+
+ dma_free_coherent(ring->dma_dev, size + ENETC_CBD_DATA_MEM_ALIGN,
+ data, *dma);
+}
+
#ifdef CONFIG_FSL_ENETC_QOS
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index 073e56dcca4e..af68dc46a795 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -166,70 +166,55 @@ int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
return enetc_send_cmd(si, &cbd);
}
-#define RFSE_ALIGN 64
/* Set entry in RFS table */
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
int index)
{
struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd cbd = {.cmd = 0};
- dma_addr_t dma, dma_align;
void *tmp, *tmp_align;
+ dma_addr_t dma;
int err;
/* fill up the "set" descriptor */
cbd.cmd = 0;
cbd.cls = 4;
cbd.index = cpu_to_le16(index);
- cbd.length = cpu_to_le16(sizeof(*rfse));
cbd.opt[3] = cpu_to_le32(0); /* SI */
- tmp = dma_alloc_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
- &dma, GFP_KERNEL);
- if (!tmp) {
- dev_err(ring->dma_dev, "DMA mapping of RFS entry failed!\n");
+ tmp = enetc_cbd_alloc_data_mem(si, &cbd, sizeof(*rfse),
+ &dma, &tmp_align);
+ if (!tmp)
return -ENOMEM;
- }
- dma_align = ALIGN(dma, RFSE_ALIGN);
- tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN);
memcpy(tmp_align, rfse, sizeof(*rfse));
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
-
err = enetc_send_cmd(si, &cbd);
if (err)
dev_err(ring->dma_dev, "FS entry add failed (%d)!", err);
- dma_free_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
- tmp, dma);
+ enetc_cbd_free_data_mem(si, sizeof(*rfse), tmp, &dma);
return err;
}
-#define RSSE_ALIGN 64
static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
bool read)
{
struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd cbd = {.cmd = 0};
- dma_addr_t dma, dma_align;
u8 *tmp, *tmp_align;
+ dma_addr_t dma;
int err, i;
- if (count < RSSE_ALIGN)
+ if (count < ENETC_CBD_DATA_MEM_ALIGN)
/* HW only takes in a full 64 entry table */
return -EINVAL;
- tmp = dma_alloc_coherent(ring->dma_dev, count + RSSE_ALIGN,
- &dma, GFP_KERNEL);
- if (!tmp) {
- dev_err(ring->dma_dev, "DMA mapping of RSS table failed!\n");
+ tmp = enetc_cbd_alloc_data_mem(si, &cbd, count,
+ &dma, (void *)&tmp_align);
+ if (!tmp)
return -ENOMEM;
- }
- dma_align = ALIGN(dma, RSSE_ALIGN);
- tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN);
if (!read)
for (i = 0; i < count; i++)
@@ -238,10 +223,6 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
/* fill up the descriptor */
cbd.cmd = read ? 2 : 1;
cbd.cls = 3;
- cbd.length = cpu_to_le16(count);
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
err = enetc_send_cmd(si, &cbd);
if (err)
@@ -251,7 +232,7 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
for (i = 0; i < count; i++)
table[i] = tmp_align[i];
- dma_free_coherent(ring->dma_dev, count + RSSE_ALIGN, tmp, dma);
+ enetc_cbd_free_data_mem(si, count, tmp, &dma);
return err;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 1514e6a4a3ff..ce5b677e8c2f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -881,7 +881,7 @@ struct sgcl_data {
u32 bth;
u32 ct;
u32 cte;
- struct sgce sgcl[0];
+ struct sgce sgcl[];
};
#define ENETC_CBDR_FMI_MR BIT(0)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index 70e6d97b380f..1c8f5cc6dec4 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -147,7 +147,7 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
/* return all Fs if nothing was there */
if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) {
dev_dbg(&bus->dev,
- "Error while reading PHY%d reg at %d.%hhu\n",
+ "Error while reading PHY%d reg at %d.%d\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index ed16a5ac9ad0..a0c75c717073 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -934,18 +934,21 @@ static void enetc_mdiobus_destroy(struct enetc_pf *pf)
enetc_imdio_remove(pf);
}
+static struct phylink_pcs *
+enetc_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ return pf->pcs;
+}
+
static void enetc_pl_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
struct enetc_pf *pf = phylink_to_enetc_pf(config);
- struct enetc_ndev_priv *priv;
enetc_mac_config(&pf->si->hw, state->interface);
-
- priv = netdev_priv(pf->si->ndev);
- if (pf->pcs)
- phylink_set_pcs(priv->phylink, pf->pcs);
}
static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex)
@@ -1062,6 +1065,7 @@ static void enetc_pl_mac_link_down(struct phylink_config *config,
static const struct phylink_mac_ops enetc_mac_phylink_ops = {
.validate = phylink_generic_validate,
+ .mac_select_pcs = enetc_pl_mac_select_pcs,
.mac_config = enetc_pl_mac_config,
.mac_link_up = enetc_pl_mac_link_up,
.mac_link_down = enetc_pl_mac_link_down,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 3555c12edb45..79afb1d7289b 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -52,10 +52,11 @@ static int enetc_setup_taprio(struct net_device *ndev,
struct enetc_cbd cbd = {.cmd = 0};
struct tgs_gcl_conf *gcl_config;
struct tgs_gcl_data *gcl_data;
- struct gce *gce;
dma_addr_t dma;
+ struct gce *gce;
u16 data_size;
u16 gcl_len;
+ void *tmp;
u32 tge;
int err;
int i;
@@ -82,8 +83,9 @@ static int enetc_setup_taprio(struct net_device *ndev,
gcl_config = &cbd.gcl_conf;
data_size = struct_size(gcl_data, entry, gcl_len);
- gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!gcl_data)
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&gcl_data);
+ if (!tmp)
return -ENOMEM;
gce = (struct gce *)(gcl_data + 1);
@@ -107,19 +109,8 @@ static int enetc_setup_taprio(struct net_device *ndev,
temp_gce->period = cpu_to_le32(temp_entry->interval);
}
- cbd.length = cpu_to_le16(data_size);
cbd.status_flags = 0;
- dma = dma_map_single(&priv->si->pdev->dev, gcl_data,
- data_size, DMA_TO_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- kfree(gcl_data);
- return -ENOMEM;
- }
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
cbd.cls = BDCR_CMD_PORT_GCL;
cbd.status_flags = 0;
@@ -132,8 +123,7 @@ static int enetc_setup_taprio(struct net_device *ndev,
ENETC_QBV_PTGCR_OFFSET,
tge & (~ENETC_QBV_TGE));
- dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE);
- kfree(gcl_data);
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
return err;
}
@@ -450,6 +440,7 @@ static struct actions_fwd enetc_act_fwd[] = {
};
static struct enetc_psfp epsfp = {
+ .dev_bitmap = 0,
.psfp_sfi_bitmap = NULL,
};
@@ -463,8 +454,9 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
struct enetc_cbd cbd = {.cmd = 0};
struct streamid_data *si_data;
struct streamid_conf *si_conf;
- u16 data_size;
dma_addr_t dma;
+ u16 data_size;
+ void *tmp;
int port;
int err;
@@ -485,21 +477,11 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
cbd.status_flags = 0;
data_size = sizeof(struct streamid_data);
- si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!si_data)
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&si_data);
+ if (!tmp)
return -ENOMEM;
- cbd.length = cpu_to_le16(data_size);
- dma = dma_map_single(&priv->si->pdev->dev, si_data,
- data_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- err = -ENOMEM;
- goto out;
- }
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
eth_broadcast_addr(si_data->dmac);
si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK
+ ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
@@ -520,11 +502,6 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
goto out;
/* Enable the entry overwrite again incase space flushed by hardware */
- memset(&cbd, 0, sizeof(cbd));
-
- cbd.index = cpu_to_le16((u16)sid->index);
- cbd.cmd = 0;
- cbd.cls = BDCR_CMD_STREAM_IDENTIFY;
cbd.status_flags = 0;
si_conf->en = 0x80;
@@ -537,11 +514,6 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
memset(si_data, 0, data_size);
- cbd.length = cpu_to_le16(data_size);
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
-
/* VIDM default to be 1.
* VID Match. If set (b1) then the VID must match, otherwise
* any VID is considered a match. VIDM setting is only used
@@ -561,10 +533,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
err = enetc_send_cmd(priv->si, &cbd);
out:
- if (!dma_mapping_error(&priv->si->pdev->dev, dma))
- dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_FROM_DEVICE);
-
- kfree(si_data);
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
return err;
}
@@ -635,6 +604,7 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
struct sfi_counter_data *data_buf;
dma_addr_t dma;
u16 data_size;
+ void *tmp;
int err;
cbd.index = cpu_to_le16((u16)index);
@@ -643,21 +613,11 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
cbd.status_flags = 0;
data_size = sizeof(struct sfi_counter_data);
- data_buf = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!data_buf)
- return -ENOMEM;
-
- dma = dma_map_single(&priv->si->pdev->dev, data_buf,
- data_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- err = -ENOMEM;
- goto exit;
- }
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
- cbd.length = cpu_to_le16(data_size);
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&data_buf);
+ if (!tmp)
+ return -ENOMEM;
err = enetc_send_cmd(priv->si, &cbd);
if (err)
@@ -684,7 +644,8 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
data_buf->flow_meter_dropl;
exit:
- kfree(data_buf);
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
+
return err;
}
@@ -726,6 +687,7 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
dma_addr_t dma;
u16 data_size;
int err, i;
+ void *tmp;
u64 now;
cbd.index = cpu_to_le16(sgi->index);
@@ -772,25 +734,11 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3;
data_size = struct_size(sgcl_data, sgcl, sgi->num_entries);
-
- sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!sgcl_data)
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&sgcl_data);
+ if (!tmp)
return -ENOMEM;
- cbd.length = cpu_to_le16(data_size);
-
- dma = dma_map_single(&priv->si->pdev->dev,
- sgcl_data, data_size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- kfree(sgcl_data);
- return -ENOMEM;
- }
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
-
sgce = &sgcl_data->sgcl[0];
sgcl_config->agtst = 0x80;
@@ -844,8 +792,7 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
err = enetc_send_cmd(priv->si, &cbd);
exit:
- kfree(sgcl_data);
-
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
return err;
}
@@ -1074,6 +1021,46 @@ static struct actions_fwd *enetc_check_flow_actions(u64 acts,
return NULL;
}
+static int enetc_psfp_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
struct flow_cls_offload *f)
{
@@ -1230,11 +1217,10 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
/* Flow meter and max frame size */
if (entryp) {
- if (entryp->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
- err = -EOPNOTSUPP;
+ err = enetc_psfp_policer_validate(&rule->action, entryp, extack);
+ if (err)
goto free_sfi;
- }
+
if (entryp->police.burst) {
fmi = kzalloc(sizeof(*fmi), GFP_KERNEL);
if (!fmi) {
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 796133de527e..11227f51404c 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2797,7 +2797,7 @@ static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
int ret = 0;
if (enable) {
- ret = phy_init_eee(ndev->phydev, 0);
+ ret = phy_init_eee(ndev->phydev, false);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index af99017a5453..7d49c28215f3 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -101,7 +101,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
u32 val, tempval;
struct timespec64 ts;
u64 ns;
- val = 0;
if (fep->pps_enable == enable)
return 0;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index ff756265d58f..9a2c16d69e2c 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1464,6 +1464,7 @@ static int gfar_get_ts_info(struct net_device *dev,
ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp");
if (ptp_node) {
ptp_dev = of_find_device_by_node(ptp_node);
+ of_node_put(ptp_node);
if (ptp_dev)
ptp = platform_get_drvdata(ptp_dev);
}
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 266e562bd67a..ec90da1de030 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -14,6 +14,7 @@
#include <linux/acpi.h>
#include <linux/acpi_mdio.h>
+#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mdio.h>
@@ -36,9 +37,10 @@ struct tgec_mdio_controller {
} __packed;
#define MDIO_STAT_ENC BIT(6)
-#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
+#define MDIO_STAT_CLKDIV(x) (((x) & 0x1ff) << 7)
#define MDIO_STAT_BSY BIT(0)
#define MDIO_STAT_RD_ER BIT(1)
+#define MDIO_STAT_PRE_DIS BIT(5)
#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
#define MDIO_CTL_PRE_DIS BIT(10)
@@ -50,6 +52,8 @@ struct tgec_mdio_controller {
struct mdio_fsl_priv {
struct tgec_mdio_controller __iomem *mdio_base;
+ struct clk *enet_clk;
+ u32 mdc_freq;
bool is_little_endian;
bool has_a009885;
bool has_a011043;
@@ -239,7 +243,7 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
!priv->has_a011043) {
dev_dbg(&bus->dev,
- "Error while reading PHY%d reg at %d.%hhu\n",
+ "Error while reading PHY%d reg at %d.%d\n",
phy_id, dev_addr, regnum);
ret = 0xffff;
} else {
@@ -254,6 +258,50 @@ irq_restore:
return ret;
}
+static int xgmac_mdio_set_mdc_freq(struct mii_bus *bus)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ struct device *dev = bus->parent;
+ u32 mdio_stat, div;
+
+ if (device_property_read_u32(dev, "clock-frequency", &priv->mdc_freq))
+ return 0;
+
+ priv->enet_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->enet_clk)) {
+ dev_err(dev, "Input clock unknown, not changing MDC frequency");
+ return PTR_ERR(priv->enet_clk);
+ }
+
+ div = ((clk_get_rate(priv->enet_clk) / priv->mdc_freq) - 1) / 2;
+ if (div < 5 || div > 0x1ff) {
+ dev_err(dev, "Requested MDC frequency is out of range, ignoring");
+ return -EINVAL;
+ }
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, priv->is_little_endian);
+ mdio_stat &= ~MDIO_STAT_CLKDIV(0x1ff);
+ mdio_stat |= MDIO_STAT_CLKDIV(div);
+ xgmac_write32(mdio_stat, &regs->mdio_stat, priv->is_little_endian);
+ return 0;
+}
+
+static void xgmac_mdio_set_suppress_preamble(struct mii_bus *bus)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ struct device *dev = bus->parent;
+ u32 mdio_stat;
+
+ if (!device_property_read_bool(dev, "suppress-preamble"))
+ return;
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, priv->is_little_endian);
+ mdio_stat |= MDIO_STAT_PRE_DIS;
+ xgmac_write32(mdio_stat, &regs->mdio_stat, priv->is_little_endian);
+}
+
static int xgmac_mdio_probe(struct platform_device *pdev)
{
struct fwnode_handle *fwnode;
@@ -273,7 +321,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
return -EINVAL;
}
- bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv));
+ bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(struct mdio_fsl_priv));
if (!bus)
return -ENOMEM;
@@ -284,13 +332,11 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
bus->probe_capabilities = MDIOBUS_C22_C45;
snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res->start);
- /* Set the PHY base address */
priv = bus->priv;
- priv->mdio_base = ioremap(res->start, resource_size(res));
- if (!priv->mdio_base) {
- ret = -ENOMEM;
- goto err_ioremap;
- }
+ priv->mdio_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!priv->mdio_base)
+ return -ENOMEM;
/* For both ACPI and DT cases, endianness of MDIO controller
* needs to be specified using "little-endian" property.
@@ -303,6 +349,12 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
priv->has_a011043 = device_property_read_bool(&pdev->dev,
"fsl,erratum-a011043");
+ xgmac_mdio_set_suppress_preamble(bus);
+
+ ret = xgmac_mdio_set_mdc_freq(bus);
+ if (ret)
+ return ret;
+
fwnode = pdev->dev.fwnode;
if (is_of_node(fwnode))
ret = of_mdiobus_register(bus, to_of_node(fwnode));
@@ -312,32 +364,12 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
ret = -EINVAL;
if (ret) {
dev_err(&pdev->dev, "cannot register MDIO bus\n");
- goto err_registration;
+ return ret;
}
platform_set_drvdata(pdev, bus);
return 0;
-
-err_registration:
- iounmap(priv->mdio_base);
-
-err_ioremap:
- mdiobus_free(bus);
-
- return ret;
-}
-
-static int xgmac_mdio_remove(struct platform_device *pdev)
-{
- struct mii_bus *bus = platform_get_drvdata(pdev);
- struct mdio_fsl_priv *priv = bus->priv;
-
- mdiobus_unregister(bus);
- iounmap(priv->mdio_base);
- mdiobus_free(bus);
-
- return 0;
}
static const struct of_device_id xgmac_mdio_match[] = {
@@ -364,7 +396,6 @@ static struct platform_driver xgmac_mdio_driver = {
.acpi_match_table = xgmac_acpi_match,
},
.probe = xgmac_mdio_probe,
- .remove = xgmac_mdio_remove,
};
module_platform_driver(xgmac_mdio_driver);
diff --git a/drivers/net/ethernet/fungible/Kconfig b/drivers/net/ethernet/fungible/Kconfig
new file mode 100644
index 000000000000..1ecedecc0f6c
--- /dev/null
+++ b/drivers/net/ethernet/fungible/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Fungible network driver configuration
+#
+
+config NET_VENDOR_FUNGIBLE
+ bool "Fungible devices"
+ default y
+ help
+ If you have a Fungible network device, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Fungible cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_FUNGIBLE
+
+config FUN_CORE
+ tristate
+ select SBITMAP
+ help
+ A service module offering basic common services to Fungible
+ device drivers.
+
+source "drivers/net/ethernet/fungible/funeth/Kconfig"
+
+endif # NET_VENDOR_FUNGIBLE
diff --git a/drivers/net/ethernet/fungible/Makefile b/drivers/net/ethernet/fungible/Makefile
new file mode 100644
index 000000000000..df759f1585a1
--- /dev/null
+++ b/drivers/net/ethernet/fungible/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+#
+# Makefile for the Fungible network device drivers.
+#
+
+obj-$(CONFIG_FUN_CORE) += funcore/
+obj-$(CONFIG_FUN_ETH) += funeth/
diff --git a/drivers/net/ethernet/fungible/funcore/Makefile b/drivers/net/ethernet/fungible/funcore/Makefile
new file mode 100644
index 000000000000..bc16b264b53e
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+obj-$(CONFIG_FUN_CORE) += funcore.o
+
+funcore-y := fun_dev.o fun_queue.o
diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.c b/drivers/net/ethernet/fungible/funcore/fun_dev.c
new file mode 100644
index 000000000000..5d7aef73df61
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_dev.c
@@ -0,0 +1,843 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/aer.h>
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/nvme.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+#include <linux/sched/signal.h>
+
+#include "fun_queue.h"
+#include "fun_dev.h"
+
+#define FUN_ADMIN_CMD_TO_MS 3000
+
+enum {
+ AQA_ASQS_SHIFT = 0,
+ AQA_ACQS_SHIFT = 16,
+ AQA_MIN_QUEUE_SIZE = 2,
+ AQA_MAX_QUEUE_SIZE = 4096
+};
+
+/* context for admin commands */
+struct fun_cmd_ctx {
+ fun_admin_callback_t cb; /* callback to invoke on completion */
+ void *cb_data; /* user data provided to callback */
+ int cpu; /* CPU where the cmd's tag was allocated */
+};
+
+/* Context for synchronous admin commands. */
+struct fun_sync_cmd_ctx {
+ struct completion compl;
+ u8 *rsp_buf; /* caller provided response buffer */
+ unsigned int rsp_len; /* response buffer size */
+ u8 rsp_status; /* command response status */
+};
+
+/* Wait for the CSTS.RDY bit to match @enabled. */
+static int fun_wait_ready(struct fun_dev *fdev, bool enabled)
+{
+ unsigned int cap_to = NVME_CAP_TIMEOUT(fdev->cap_reg);
+ u32 bit = enabled ? NVME_CSTS_RDY : 0;
+ unsigned long deadline;
+
+ deadline = ((cap_to + 1) * HZ / 2) + jiffies; /* CAP.TO is in 500ms */
+
+ for (;;) {
+ u32 csts = readl(fdev->bar + NVME_REG_CSTS);
+
+ if (csts == ~0) {
+ dev_err(fdev->dev, "CSTS register read %#x\n", csts);
+ return -EIO;
+ }
+
+ if ((csts & NVME_CSTS_RDY) == bit)
+ return 0;
+
+ if (time_is_before_jiffies(deadline))
+ break;
+
+ msleep(100);
+ }
+
+ dev_err(fdev->dev,
+ "Timed out waiting for device to indicate RDY %u; aborting %s\n",
+ enabled, enabled ? "initialization" : "reset");
+ return -ETIMEDOUT;
+}
+
+/* Check CSTS and return an error if it is unreadable or has unexpected
+ * RDY value.
+ */
+static int fun_check_csts_rdy(struct fun_dev *fdev, unsigned int expected_rdy)
+{
+ u32 csts = readl(fdev->bar + NVME_REG_CSTS);
+ u32 actual_rdy = csts & NVME_CSTS_RDY;
+
+ if (csts == ~0) {
+ dev_err(fdev->dev, "CSTS register read %#x\n", csts);
+ return -EIO;
+ }
+ if (actual_rdy != expected_rdy) {
+ dev_err(fdev->dev, "Unexpected CSTS RDY %u\n", actual_rdy);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Check that CSTS RDY has the expected value. Then write a new value to the CC
+ * register and wait for CSTS RDY to match the new CC ENABLE state.
+ */
+static int fun_update_cc_enable(struct fun_dev *fdev, unsigned int initial_rdy)
+{
+ int rc = fun_check_csts_rdy(fdev, initial_rdy);
+
+ if (rc)
+ return rc;
+ writel(fdev->cc_reg, fdev->bar + NVME_REG_CC);
+ return fun_wait_ready(fdev, !!(fdev->cc_reg & NVME_CC_ENABLE));
+}
+
+static int fun_disable_ctrl(struct fun_dev *fdev)
+{
+ fdev->cc_reg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE);
+ return fun_update_cc_enable(fdev, 1);
+}
+
+static int fun_enable_ctrl(struct fun_dev *fdev, u32 admin_cqesz_log2,
+ u32 admin_sqesz_log2)
+{
+ fdev->cc_reg = (admin_cqesz_log2 << NVME_CC_IOCQES_SHIFT) |
+ (admin_sqesz_log2 << NVME_CC_IOSQES_SHIFT) |
+ ((PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT) |
+ NVME_CC_ENABLE;
+
+ return fun_update_cc_enable(fdev, 0);
+}
+
+static int fun_map_bars(struct fun_dev *fdev, const char *name)
+{
+ struct pci_dev *pdev = to_pci_dev(fdev->dev);
+ int err;
+
+ err = pci_request_mem_regions(pdev, name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Couldn't get PCI memory resources, err %d\n", err);
+ return err;
+ }
+
+ fdev->bar = pci_ioremap_bar(pdev, 0);
+ if (!fdev->bar) {
+ dev_err(&pdev->dev, "Couldn't map BAR 0\n");
+ pci_release_mem_regions(pdev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void fun_unmap_bars(struct fun_dev *fdev)
+{
+ struct pci_dev *pdev = to_pci_dev(fdev->dev);
+
+ if (fdev->bar) {
+ iounmap(fdev->bar);
+ fdev->bar = NULL;
+ pci_release_mem_regions(pdev);
+ }
+}
+
+static int fun_set_dma_masks(struct device *dev)
+{
+ int err;
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (err)
+ dev_err(dev, "DMA mask configuration failed, err %d\n", err);
+ return err;
+}
+
+static irqreturn_t fun_admin_irq(int irq, void *data)
+{
+ struct fun_queue *funq = data;
+
+ return fun_process_cq(funq, 0) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static void fun_complete_admin_cmd(struct fun_queue *funq, void *data,
+ void *entry, const struct fun_cqe_info *info)
+{
+ const struct fun_admin_rsp_common *rsp_common = entry;
+ struct fun_dev *fdev = funq->fdev;
+ struct fun_cmd_ctx *cmd_ctx;
+ int cpu;
+ u16 cid;
+
+ if (info->sqhd == cpu_to_be16(0xffff)) {
+ dev_dbg(fdev->dev, "adminq event");
+ if (fdev->adminq_cb)
+ fdev->adminq_cb(fdev, entry);
+ return;
+ }
+
+ cid = be16_to_cpu(rsp_common->cid);
+ dev_dbg(fdev->dev, "admin CQE cid %u, op %u, ret %u\n", cid,
+ rsp_common->op, rsp_common->ret);
+
+ cmd_ctx = &fdev->cmd_ctx[cid];
+ if (cmd_ctx->cpu < 0) {
+ dev_err(fdev->dev,
+ "admin CQE with CID=%u, op=%u does not match a pending command\n",
+ cid, rsp_common->op);
+ return;
+ }
+
+ if (cmd_ctx->cb)
+ cmd_ctx->cb(fdev, entry, xchg(&cmd_ctx->cb_data, NULL));
+
+ cpu = cmd_ctx->cpu;
+ cmd_ctx->cpu = -1;
+ sbitmap_queue_clear(&fdev->admin_sbq, cid, cpu);
+}
+
+static int fun_init_cmd_ctx(struct fun_dev *fdev, unsigned int ntags)
+{
+ unsigned int i;
+
+ fdev->cmd_ctx = kvcalloc(ntags, sizeof(*fdev->cmd_ctx), GFP_KERNEL);
+ if (!fdev->cmd_ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < ntags; i++)
+ fdev->cmd_ctx[i].cpu = -1;
+
+ return 0;
+}
+
+/* Allocate and enable an admin queue and assign it the first IRQ vector. */
+static int fun_enable_admin_queue(struct fun_dev *fdev,
+ const struct fun_dev_params *areq)
+{
+ struct fun_queue_alloc_req qreq = {
+ .cqe_size_log2 = areq->cqe_size_log2,
+ .sqe_size_log2 = areq->sqe_size_log2,
+ .cq_depth = areq->cq_depth,
+ .sq_depth = areq->sq_depth,
+ .rq_depth = areq->rq_depth,
+ };
+ unsigned int ntags = areq->sq_depth - 1;
+ struct fun_queue *funq;
+ int rc;
+
+ if (fdev->admin_q)
+ return -EEXIST;
+
+ if (areq->sq_depth < AQA_MIN_QUEUE_SIZE ||
+ areq->sq_depth > AQA_MAX_QUEUE_SIZE ||
+ areq->cq_depth < AQA_MIN_QUEUE_SIZE ||
+ areq->cq_depth > AQA_MAX_QUEUE_SIZE)
+ return -EINVAL;
+
+ fdev->admin_q = fun_alloc_queue(fdev, 0, &qreq);
+ if (!fdev->admin_q)
+ return -ENOMEM;
+
+ rc = fun_init_cmd_ctx(fdev, ntags);
+ if (rc)
+ goto free_q;
+
+ rc = sbitmap_queue_init_node(&fdev->admin_sbq, ntags, -1, false,
+ GFP_KERNEL, dev_to_node(fdev->dev));
+ if (rc)
+ goto free_cmd_ctx;
+
+ funq = fdev->admin_q;
+ funq->cq_vector = 0;
+ rc = fun_request_irq(funq, dev_name(fdev->dev), fun_admin_irq, funq);
+ if (rc)
+ goto free_sbq;
+
+ fun_set_cq_callback(funq, fun_complete_admin_cmd, NULL);
+ fdev->adminq_cb = areq->event_cb;
+
+ writel((funq->sq_depth - 1) << AQA_ASQS_SHIFT |
+ (funq->cq_depth - 1) << AQA_ACQS_SHIFT,
+ fdev->bar + NVME_REG_AQA);
+
+ writeq(funq->sq_dma_addr, fdev->bar + NVME_REG_ASQ);
+ writeq(funq->cq_dma_addr, fdev->bar + NVME_REG_ACQ);
+
+ rc = fun_enable_ctrl(fdev, areq->cqe_size_log2, areq->sqe_size_log2);
+ if (rc)
+ goto free_irq;
+
+ if (areq->rq_depth) {
+ rc = fun_create_rq(funq);
+ if (rc)
+ goto disable_ctrl;
+
+ funq_rq_post(funq);
+ }
+
+ return 0;
+
+disable_ctrl:
+ fun_disable_ctrl(fdev);
+free_irq:
+ fun_free_irq(funq);
+free_sbq:
+ sbitmap_queue_free(&fdev->admin_sbq);
+free_cmd_ctx:
+ kvfree(fdev->cmd_ctx);
+ fdev->cmd_ctx = NULL;
+free_q:
+ fun_free_queue(fdev->admin_q);
+ fdev->admin_q = NULL;
+ return rc;
+}
+
+static void fun_disable_admin_queue(struct fun_dev *fdev)
+{
+ struct fun_queue *admq = fdev->admin_q;
+
+ if (!admq)
+ return;
+
+ fun_disable_ctrl(fdev);
+
+ fun_free_irq(admq);
+ __fun_process_cq(admq, 0);
+
+ sbitmap_queue_free(&fdev->admin_sbq);
+
+ kvfree(fdev->cmd_ctx);
+ fdev->cmd_ctx = NULL;
+
+ fun_free_queue(admq);
+ fdev->admin_q = NULL;
+}
+
+/* Return %true if the admin queue has stopped servicing commands as can be
+ * detected through registers. This isn't exhaustive and may provide false
+ * negatives.
+ */
+static bool fun_adminq_stopped(struct fun_dev *fdev)
+{
+ u32 csts = readl(fdev->bar + NVME_REG_CSTS);
+
+ return (csts & (NVME_CSTS_CFS | NVME_CSTS_RDY)) != NVME_CSTS_RDY;
+}
+
+static int fun_wait_for_tag(struct fun_dev *fdev, int *cpup)
+{
+ struct sbitmap_queue *sbq = &fdev->admin_sbq;
+ struct sbq_wait_state *ws = &sbq->ws[0];
+ DEFINE_SBQ_WAIT(wait);
+ int tag;
+
+ for (;;) {
+ sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_UNINTERRUPTIBLE);
+ if (fdev->suppress_cmds) {
+ tag = -ESHUTDOWN;
+ break;
+ }
+ tag = sbitmap_queue_get(sbq, cpup);
+ if (tag >= 0)
+ break;
+ schedule();
+ }
+
+ sbitmap_finish_wait(sbq, ws, &wait);
+ return tag;
+}
+
+/* Submit an asynchronous admin command. Caller is responsible for implementing
+ * any waiting or timeout. Upon command completion the callback @cb is called.
+ */
+int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd,
+ fun_admin_callback_t cb, void *cb_data, bool wait_ok)
+{
+ struct fun_queue *funq = fdev->admin_q;
+ unsigned int cmdsize = cmd->len8 * 8;
+ struct fun_cmd_ctx *cmd_ctx;
+ int tag, cpu, rc = 0;
+
+ if (WARN_ON(cmdsize > (1 << funq->sqe_size_log2)))
+ return -EMSGSIZE;
+
+ tag = sbitmap_queue_get(&fdev->admin_sbq, &cpu);
+ if (tag < 0) {
+ if (!wait_ok)
+ return -EAGAIN;
+ tag = fun_wait_for_tag(fdev, &cpu);
+ if (tag < 0)
+ return tag;
+ }
+
+ cmd->cid = cpu_to_be16(tag);
+
+ cmd_ctx = &fdev->cmd_ctx[tag];
+ cmd_ctx->cb = cb;
+ cmd_ctx->cb_data = cb_data;
+
+ spin_lock(&funq->sq_lock);
+
+ if (unlikely(fdev->suppress_cmds)) {
+ rc = -ESHUTDOWN;
+ sbitmap_queue_clear(&fdev->admin_sbq, tag, cpu);
+ } else {
+ cmd_ctx->cpu = cpu;
+ memcpy(fun_sqe_at(funq, funq->sq_tail), cmd, cmdsize);
+
+ dev_dbg(fdev->dev, "admin cmd @ %u: %8ph\n", funq->sq_tail,
+ cmd);
+
+ if (++funq->sq_tail == funq->sq_depth)
+ funq->sq_tail = 0;
+ writel(funq->sq_tail, funq->sq_db);
+ }
+ spin_unlock(&funq->sq_lock);
+ return rc;
+}
+
+/* Abandon a pending admin command by clearing the issuer's callback data.
+ * Failure indicates that the command either has already completed or its
+ * completion is racing with this call.
+ */
+static bool fun_abandon_admin_cmd(struct fun_dev *fd,
+ const struct fun_admin_req_common *cmd,
+ void *cb_data)
+{
+ u16 cid = be16_to_cpu(cmd->cid);
+ struct fun_cmd_ctx *cmd_ctx = &fd->cmd_ctx[cid];
+
+ return cmpxchg(&cmd_ctx->cb_data, cb_data, NULL) == cb_data;
+}
+
+/* Stop submission of new admin commands and wake up any processes waiting for
+ * tags. Already submitted commands are left to complete or time out.
+ */
+static void fun_admin_stop(struct fun_dev *fdev)
+{
+ spin_lock(&fdev->admin_q->sq_lock);
+ fdev->suppress_cmds = true;
+ spin_unlock(&fdev->admin_q->sq_lock);
+ sbitmap_queue_wake_all(&fdev->admin_sbq);
+}
+
+/* The callback for synchronous execution of admin commands. It copies the
+ * command response to the caller's buffer and signals completion.
+ */
+static void fun_admin_cmd_sync_cb(struct fun_dev *fd, void *rsp, void *cb_data)
+{
+ const struct fun_admin_rsp_common *rsp_common = rsp;
+ struct fun_sync_cmd_ctx *ctx = cb_data;
+
+ if (!ctx)
+ return; /* command issuer timed out and left */
+ if (ctx->rsp_buf) {
+ unsigned int rsp_len = rsp_common->len8 * 8;
+
+ if (unlikely(rsp_len > ctx->rsp_len)) {
+ dev_err(fd->dev,
+ "response for op %u is %uB > response buffer %uB\n",
+ rsp_common->op, rsp_len, ctx->rsp_len);
+ rsp_len = ctx->rsp_len;
+ }
+ memcpy(ctx->rsp_buf, rsp, rsp_len);
+ }
+ ctx->rsp_status = rsp_common->ret;
+ complete(&ctx->compl);
+}
+
+/* Submit a synchronous admin command. */
+int fun_submit_admin_sync_cmd(struct fun_dev *fdev,
+ struct fun_admin_req_common *cmd, void *rsp,
+ size_t rspsize, unsigned int timeout)
+{
+ struct fun_sync_cmd_ctx ctx = {
+ .compl = COMPLETION_INITIALIZER_ONSTACK(ctx.compl),
+ .rsp_buf = rsp,
+ .rsp_len = rspsize,
+ };
+ unsigned int cmdlen = cmd->len8 * 8;
+ unsigned long jiffies_left;
+ int ret;
+
+ ret = fun_submit_admin_cmd(fdev, cmd, fun_admin_cmd_sync_cb, &ctx,
+ true);
+ if (ret)
+ return ret;
+
+ if (!timeout)
+ timeout = FUN_ADMIN_CMD_TO_MS;
+
+ jiffies_left = wait_for_completion_timeout(&ctx.compl,
+ msecs_to_jiffies(timeout));
+ if (!jiffies_left) {
+ /* The command timed out. Attempt to cancel it so we can return.
+ * But if the command is in the process of completing we'll
+ * wait for it.
+ */
+ if (fun_abandon_admin_cmd(fdev, cmd, &ctx)) {
+ dev_err(fdev->dev, "admin command timed out: %*ph\n",
+ cmdlen, cmd);
+ fun_admin_stop(fdev);
+ /* see if the timeout was due to a queue failure */
+ if (fun_adminq_stopped(fdev))
+ dev_err(fdev->dev,
+ "device does not accept admin commands\n");
+
+ return -ETIMEDOUT;
+ }
+ wait_for_completion(&ctx.compl);
+ }
+
+ if (ctx.rsp_status) {
+ dev_err(fdev->dev, "admin command failed, err %d: %*ph\n",
+ ctx.rsp_status, cmdlen, cmd);
+ }
+
+ return -ctx.rsp_status;
+}
+EXPORT_SYMBOL_GPL(fun_submit_admin_sync_cmd);
+
+/* Return the number of device resources of the requested type. */
+int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res)
+{
+ union {
+ struct fun_admin_res_count_req req;
+ struct fun_admin_res_count_rsp rsp;
+ } cmd;
+ int rc;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(cmd.req));
+ cmd.req.count = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_RES_COUNT,
+ 0, 0);
+
+ rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common, &cmd.rsp,
+ sizeof(cmd), 0);
+ return rc ? rc : be32_to_cpu(cmd.rsp.count.data);
+}
+EXPORT_SYMBOL_GPL(fun_get_res_count);
+
+/* Request that the instance of resource @res with the given id be deleted. */
+int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res,
+ unsigned int flags, u32 id)
+{
+ struct fun_admin_generic_destroy_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(req)),
+ .destroy = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_DESTROY,
+ flags, id)
+ };
+
+ return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0);
+}
+EXPORT_SYMBOL_GPL(fun_res_destroy);
+
+/* Bind two entities of the given types and IDs. */
+int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
+ unsigned int id0, enum fun_admin_bind_type type1,
+ unsigned int id1)
+{
+ struct {
+ struct fun_admin_bind_req req;
+ struct fun_admin_bind_entry entry[2];
+ } cmd = {
+ .req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND,
+ sizeof(cmd)),
+ .entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0),
+ .entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1),
+ };
+
+ return fun_submit_admin_sync_cmd(fdev, &cmd.req.common, NULL, 0, 0);
+}
+EXPORT_SYMBOL_GPL(fun_bind);
+
+static int fun_get_dev_limits(struct fun_dev *fdev)
+{
+ struct pci_dev *pdev = to_pci_dev(fdev->dev);
+ unsigned int cq_count, sq_count, num_dbs;
+ int rc;
+
+ rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPCQ);
+ if (rc < 0)
+ return rc;
+ cq_count = rc;
+
+ rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPSQ);
+ if (rc < 0)
+ return rc;
+ sq_count = rc;
+
+ /* The admin queue consumes 1 CQ and at least 1 SQ. To be usable the
+ * device must provide additional queues.
+ */
+ if (cq_count < 2 || sq_count < 2 + !!fdev->admin_q->rq_depth)
+ return -EINVAL;
+
+ /* Calculate the max QID based on SQ/CQ/doorbell counts.
+ * SQ/CQ doorbells alternate.
+ */
+ num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) /
+ (fdev->db_stride * 4);
+ fdev->max_qid = min3(cq_count, sq_count, num_dbs / 2) - 1;
+ fdev->kern_end_qid = fdev->max_qid + 1;
+ return 0;
+}
+
+/* Allocate all MSI-X vectors available on a function and at least @min_vecs. */
+static int fun_alloc_irqs(struct pci_dev *pdev, unsigned int min_vecs)
+{
+ int vecs, num_msix = pci_msix_vec_count(pdev);
+
+ if (num_msix < 0)
+ return num_msix;
+ if (min_vecs > num_msix)
+ return -ERANGE;
+
+ vecs = pci_alloc_irq_vectors(pdev, min_vecs, num_msix, PCI_IRQ_MSIX);
+ if (vecs > 0) {
+ dev_info(&pdev->dev,
+ "Allocated %d IRQ vectors of %d requested\n",
+ vecs, num_msix);
+ } else {
+ dev_err(&pdev->dev,
+ "Unable to allocate at least %u IRQ vectors\n",
+ min_vecs);
+ }
+ return vecs;
+}
+
+/* Allocate and initialize the IRQ manager state. */
+static int fun_alloc_irq_mgr(struct fun_dev *fdev)
+{
+ fdev->irq_map = bitmap_zalloc(fdev->num_irqs, GFP_KERNEL);
+ if (!fdev->irq_map)
+ return -ENOMEM;
+
+ spin_lock_init(&fdev->irqmgr_lock);
+ /* mark IRQ 0 allocated, it is used by the admin queue */
+ __set_bit(0, fdev->irq_map);
+ fdev->irqs_avail = fdev->num_irqs - 1;
+ return 0;
+}
+
+/* Reserve @nirqs of the currently available IRQs and return their indices. */
+int fun_reserve_irqs(struct fun_dev *fdev, unsigned int nirqs, u16 *irq_indices)
+{
+ unsigned int b, n = 0;
+ int err = -ENOSPC;
+
+ if (!nirqs)
+ return 0;
+
+ spin_lock(&fdev->irqmgr_lock);
+ if (nirqs > fdev->irqs_avail)
+ goto unlock;
+
+ for_each_clear_bit(b, fdev->irq_map, fdev->num_irqs) {
+ __set_bit(b, fdev->irq_map);
+ irq_indices[n++] = b;
+ if (n >= nirqs)
+ break;
+ }
+
+ WARN_ON(n < nirqs);
+ fdev->irqs_avail -= n;
+ err = n;
+unlock:
+ spin_unlock(&fdev->irqmgr_lock);
+ return err;
+}
+EXPORT_SYMBOL(fun_reserve_irqs);
+
+/* Release @nirqs previously allocated IRQS with the supplied indices. */
+void fun_release_irqs(struct fun_dev *fdev, unsigned int nirqs,
+ u16 *irq_indices)
+{
+ unsigned int i;
+
+ spin_lock(&fdev->irqmgr_lock);
+ for (i = 0; i < nirqs; i++)
+ __clear_bit(irq_indices[i], fdev->irq_map);
+ fdev->irqs_avail += nirqs;
+ spin_unlock(&fdev->irqmgr_lock);
+}
+EXPORT_SYMBOL(fun_release_irqs);
+
+static void fun_serv_handler(struct work_struct *work)
+{
+ struct fun_dev *fd = container_of(work, struct fun_dev, service_task);
+
+ if (test_bit(FUN_SERV_DISABLED, &fd->service_flags))
+ return;
+ if (fd->serv_cb)
+ fd->serv_cb(fd);
+}
+
+void fun_serv_stop(struct fun_dev *fd)
+{
+ set_bit(FUN_SERV_DISABLED, &fd->service_flags);
+ cancel_work_sync(&fd->service_task);
+}
+EXPORT_SYMBOL_GPL(fun_serv_stop);
+
+void fun_serv_restart(struct fun_dev *fd)
+{
+ clear_bit(FUN_SERV_DISABLED, &fd->service_flags);
+ if (fd->service_flags)
+ schedule_work(&fd->service_task);
+}
+EXPORT_SYMBOL_GPL(fun_serv_restart);
+
+void fun_serv_sched(struct fun_dev *fd)
+{
+ if (!test_bit(FUN_SERV_DISABLED, &fd->service_flags))
+ schedule_work(&fd->service_task);
+}
+EXPORT_SYMBOL_GPL(fun_serv_sched);
+
+/* Check and try to get the device into a proper state for initialization,
+ * i.e., CSTS.RDY = CC.EN = 0.
+ */
+static int sanitize_dev(struct fun_dev *fdev)
+{
+ int rc;
+
+ fdev->cap_reg = readq(fdev->bar + NVME_REG_CAP);
+ fdev->cc_reg = readl(fdev->bar + NVME_REG_CC);
+
+ /* First get RDY to agree with the current EN. Give RDY the opportunity
+ * to complete a potential recent EN change.
+ */
+ rc = fun_wait_ready(fdev, fdev->cc_reg & NVME_CC_ENABLE);
+ if (rc)
+ return rc;
+
+ /* Next, reset the device if EN is currently 1. */
+ if (fdev->cc_reg & NVME_CC_ENABLE)
+ rc = fun_disable_ctrl(fdev);
+
+ return rc;
+}
+
+/* Undo the device initialization of fun_dev_enable(). */
+void fun_dev_disable(struct fun_dev *fdev)
+{
+ struct pci_dev *pdev = to_pci_dev(fdev->dev);
+
+ pci_set_drvdata(pdev, NULL);
+
+ if (fdev->fw_handle != FUN_HCI_ID_INVALID) {
+ fun_res_destroy(fdev, FUN_ADMIN_OP_SWUPGRADE, 0,
+ fdev->fw_handle);
+ fdev->fw_handle = FUN_HCI_ID_INVALID;
+ }
+
+ fun_disable_admin_queue(fdev);
+
+ bitmap_free(fdev->irq_map);
+ pci_free_irq_vectors(pdev);
+
+ pci_clear_master(pdev);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+
+ fun_unmap_bars(fdev);
+}
+EXPORT_SYMBOL(fun_dev_disable);
+
+/* Perform basic initialization of a device, including
+ * - PCI config space setup and BAR0 mapping
+ * - interrupt management initialization
+ * - 1 admin queue setup
+ * - determination of some device limits, such as number of queues.
+ */
+int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev,
+ const struct fun_dev_params *areq, const char *name)
+{
+ int rc;
+
+ fdev->dev = &pdev->dev;
+ rc = fun_map_bars(fdev, name);
+ if (rc)
+ return rc;
+
+ rc = fun_set_dma_masks(fdev->dev);
+ if (rc)
+ goto unmap;
+
+ rc = pci_enable_device_mem(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Couldn't enable device, err %d\n", rc);
+ goto unmap;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ rc = sanitize_dev(fdev);
+ if (rc)
+ goto disable_dev;
+
+ fdev->fw_handle = FUN_HCI_ID_INVALID;
+ fdev->q_depth = NVME_CAP_MQES(fdev->cap_reg) + 1;
+ fdev->db_stride = 1 << NVME_CAP_STRIDE(fdev->cap_reg);
+ fdev->dbs = fdev->bar + NVME_REG_DBS;
+
+ INIT_WORK(&fdev->service_task, fun_serv_handler);
+ fdev->service_flags = FUN_SERV_DISABLED;
+ fdev->serv_cb = areq->serv_cb;
+
+ rc = fun_alloc_irqs(pdev, areq->min_msix + 1); /* +1 for admin CQ */
+ if (rc < 0)
+ goto disable_dev;
+ fdev->num_irqs = rc;
+
+ rc = fun_alloc_irq_mgr(fdev);
+ if (rc)
+ goto free_irqs;
+
+ pci_set_master(pdev);
+ rc = fun_enable_admin_queue(fdev, areq);
+ if (rc)
+ goto free_irq_mgr;
+
+ rc = fun_get_dev_limits(fdev);
+ if (rc < 0)
+ goto disable_admin;
+
+ pci_save_state(pdev);
+ pci_set_drvdata(pdev, fdev);
+ pcie_print_link_status(pdev);
+ dev_dbg(fdev->dev, "q_depth %u, db_stride %u, max qid %d kern_end_qid %d\n",
+ fdev->q_depth, fdev->db_stride, fdev->max_qid,
+ fdev->kern_end_qid);
+ return 0;
+
+disable_admin:
+ fun_disable_admin_queue(fdev);
+free_irq_mgr:
+ pci_clear_master(pdev);
+ bitmap_free(fdev->irq_map);
+free_irqs:
+ pci_free_irq_vectors(pdev);
+disable_dev:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+unmap:
+ fun_unmap_bars(fdev);
+ return rc;
+}
+EXPORT_SYMBOL(fun_dev_enable);
+
+MODULE_AUTHOR("Dimitris Michailidis <dmichail@fungible.com>");
+MODULE_DESCRIPTION("Core services driver for Fungible devices");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.h b/drivers/net/ethernet/fungible/funcore/fun_dev.h
new file mode 100644
index 000000000000..9e8c17ce8887
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_dev.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUNDEV_H
+#define _FUNDEV_H
+
+#include <linux/sbitmap.h>
+#include <linux/spinlock_types.h>
+#include <linux/workqueue.h>
+#include "fun_hci.h"
+
+struct pci_dev;
+struct fun_dev;
+struct fun_queue;
+struct fun_cmd_ctx;
+struct fun_queue_alloc_req;
+
+/* doorbell fields */
+enum {
+ FUN_DB_QIDX_S = 0,
+ FUN_DB_INTCOAL_ENTRIES_S = 16,
+ FUN_DB_INTCOAL_ENTRIES_M = 0x7f,
+ FUN_DB_INTCOAL_USEC_S = 23,
+ FUN_DB_INTCOAL_USEC_M = 0x7f,
+ FUN_DB_IRQ_S = 30,
+ FUN_DB_IRQ_F = 1 << FUN_DB_IRQ_S,
+ FUN_DB_IRQ_ARM_S = 31,
+ FUN_DB_IRQ_ARM_F = 1U << FUN_DB_IRQ_ARM_S
+};
+
+/* Callback for asynchronous admin commands.
+ * Invoked on reception of command response.
+ */
+typedef void (*fun_admin_callback_t)(struct fun_dev *fdev, void *rsp,
+ void *cb_data);
+
+/* Callback for events/notifications received by an admin queue. */
+typedef void (*fun_admin_event_cb)(struct fun_dev *fdev, void *cqe);
+
+/* Callback for pending work handled by the service task. */
+typedef void (*fun_serv_cb)(struct fun_dev *fd);
+
+/* service task flags */
+enum {
+ FUN_SERV_DISABLED, /* service task is disabled */
+ FUN_SERV_FIRST_AVAIL
+};
+
+/* Driver state associated with a PCI function. */
+struct fun_dev {
+ struct device *dev;
+
+ void __iomem *bar; /* start of BAR0 mapping */
+ u32 __iomem *dbs; /* start of doorbells in BAR0 mapping */
+
+ /* admin queue */
+ struct fun_queue *admin_q;
+ struct sbitmap_queue admin_sbq;
+ struct fun_cmd_ctx *cmd_ctx;
+ fun_admin_event_cb adminq_cb;
+ bool suppress_cmds; /* if set don't write commands to SQ */
+
+ /* address increment between consecutive doorbells, in 4B units */
+ unsigned int db_stride;
+
+ /* SW versions of device registers */
+ u32 cc_reg; /* CC register */
+ u64 cap_reg; /* CAPability register */
+
+ unsigned int q_depth; /* max queue depth supported by device */
+ unsigned int max_qid; /* = #queues - 1, separately for SQs and CQs */
+ unsigned int kern_end_qid; /* last qid in the kernel range + 1 */
+
+ unsigned int fw_handle;
+
+ /* IRQ manager */
+ unsigned int num_irqs;
+ unsigned int irqs_avail;
+ spinlock_t irqmgr_lock;
+ unsigned long *irq_map;
+
+ /* The service task handles work that needs a process context */
+ struct work_struct service_task;
+ unsigned long service_flags;
+ fun_serv_cb serv_cb;
+};
+
+struct fun_dev_params {
+ u8 cqe_size_log2; /* admin q CQE size */
+ u8 sqe_size_log2; /* admin q SQE size */
+
+ /* admin q depths */
+ u16 cq_depth;
+ u16 sq_depth;
+ u16 rq_depth;
+
+ u16 min_msix; /* min vectors needed by requesting driver */
+
+ fun_admin_event_cb event_cb;
+ fun_serv_cb serv_cb;
+};
+
+/* Return the BAR address of a doorbell. */
+static inline u32 __iomem *fun_db_addr(const struct fun_dev *fdev,
+ unsigned int db_index)
+{
+ return &fdev->dbs[db_index * fdev->db_stride];
+}
+
+/* Return the BAR address of an SQ doorbell. SQ and CQ DBs alternate,
+ * SQs have even DB indices.
+ */
+static inline u32 __iomem *fun_sq_db_addr(const struct fun_dev *fdev,
+ unsigned int sqid)
+{
+ return fun_db_addr(fdev, sqid * 2);
+}
+
+static inline u32 __iomem *fun_cq_db_addr(const struct fun_dev *fdev,
+ unsigned int cqid)
+{
+ return fun_db_addr(fdev, cqid * 2 + 1);
+}
+
+int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res);
+int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res,
+ unsigned int flags, u32 id);
+int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
+ unsigned int id0, enum fun_admin_bind_type type1,
+ unsigned int id1);
+
+int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd,
+ fun_admin_callback_t cb, void *cb_data, bool wait_ok);
+int fun_submit_admin_sync_cmd(struct fun_dev *fdev,
+ struct fun_admin_req_common *cmd, void *rsp,
+ size_t rspsize, unsigned int timeout);
+
+int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev,
+ const struct fun_dev_params *areq, const char *name);
+void fun_dev_disable(struct fun_dev *fdev);
+
+int fun_reserve_irqs(struct fun_dev *fdev, unsigned int nirqs,
+ u16 *irq_indices);
+void fun_release_irqs(struct fun_dev *fdev, unsigned int nirqs,
+ u16 *irq_indices);
+
+void fun_serv_stop(struct fun_dev *fd);
+void fun_serv_restart(struct fun_dev *fd);
+void fun_serv_sched(struct fun_dev *fd);
+
+#endif /* _FUNDEV_H */
diff --git a/drivers/net/ethernet/fungible/funcore/fun_hci.h b/drivers/net/ethernet/fungible/funcore/fun_hci.h
new file mode 100644
index 000000000000..257203e94b68
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_hci.h
@@ -0,0 +1,1202 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef __FUN_HCI_H
+#define __FUN_HCI_H
+
+enum {
+ FUN_HCI_ID_INVALID = 0xffffffff,
+};
+
+enum fun_admin_op {
+ FUN_ADMIN_OP_BIND = 0x1,
+ FUN_ADMIN_OP_EPCQ = 0x11,
+ FUN_ADMIN_OP_EPSQ = 0x12,
+ FUN_ADMIN_OP_PORT = 0x13,
+ FUN_ADMIN_OP_ETH = 0x14,
+ FUN_ADMIN_OP_VI = 0x15,
+ FUN_ADMIN_OP_SWUPGRADE = 0x1f,
+ FUN_ADMIN_OP_RSS = 0x21,
+ FUN_ADMIN_OP_ADI = 0x25,
+ FUN_ADMIN_OP_KTLS = 0x26,
+};
+
+enum {
+ FUN_REQ_COMMON_FLAG_RSP = 0x1,
+ FUN_REQ_COMMON_FLAG_HEAD_WB = 0x2,
+ FUN_REQ_COMMON_FLAG_INT = 0x4,
+ FUN_REQ_COMMON_FLAG_CQE_IN_RQBUF = 0x8,
+};
+
+struct fun_admin_req_common {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 rsvd0;
+ __be16 cid;
+};
+
+#define FUN_ADMIN_REQ_COMMON_INIT(_op, _len8, _flags, _suboff8, _cid) \
+ (struct fun_admin_req_common) { \
+ .op = (_op), .len8 = (_len8), .flags = cpu_to_be16(_flags), \
+ .suboff8 = (_suboff8), .cid = cpu_to_be16(_cid), \
+ }
+
+#define FUN_ADMIN_REQ_COMMON_INIT2(_op, _len) \
+ (struct fun_admin_req_common) { \
+ .op = (_op), .len8 = (_len) / 8, \
+ }
+
+struct fun_admin_rsp_common {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 ret;
+ __be16 cid;
+};
+
+struct fun_admin_write48_req {
+ __be64 key_to_data;
+};
+
+#define FUN_ADMIN_WRITE48_REQ_KEY_S 56U
+#define FUN_ADMIN_WRITE48_REQ_KEY_M 0xff
+#define FUN_ADMIN_WRITE48_REQ_KEY_P_NOSWAP(x) \
+ (((__u64)x) << FUN_ADMIN_WRITE48_REQ_KEY_S)
+
+#define FUN_ADMIN_WRITE48_REQ_DATA_S 0U
+#define FUN_ADMIN_WRITE48_REQ_DATA_M 0xffffffffffff
+#define FUN_ADMIN_WRITE48_REQ_DATA_P_NOSWAP(x) \
+ (((__u64)x) << FUN_ADMIN_WRITE48_REQ_DATA_S)
+
+#define FUN_ADMIN_WRITE48_REQ_INIT(key, data) \
+ (struct fun_admin_write48_req) { \
+ .key_to_data = cpu_to_be64( \
+ FUN_ADMIN_WRITE48_REQ_KEY_P_NOSWAP(key) | \
+ FUN_ADMIN_WRITE48_REQ_DATA_P_NOSWAP(data)), \
+ }
+
+struct fun_admin_write48_rsp {
+ __be64 key_to_data;
+};
+
+struct fun_admin_read48_req {
+ __be64 key_pack;
+};
+
+#define FUN_ADMIN_READ48_REQ_KEY_S 56U
+#define FUN_ADMIN_READ48_REQ_KEY_M 0xff
+#define FUN_ADMIN_READ48_REQ_KEY_P_NOSWAP(x) \
+ (((__u64)x) << FUN_ADMIN_READ48_REQ_KEY_S)
+
+#define FUN_ADMIN_READ48_REQ_INIT(key) \
+ (struct fun_admin_read48_req) { \
+ .key_pack = \
+ cpu_to_be64(FUN_ADMIN_READ48_REQ_KEY_P_NOSWAP(key)), \
+ }
+
+struct fun_admin_read48_rsp {
+ __be64 key_to_data;
+};
+
+#define FUN_ADMIN_READ48_RSP_KEY_S 56U
+#define FUN_ADMIN_READ48_RSP_KEY_M 0xff
+#define FUN_ADMIN_READ48_RSP_KEY_G(x) \
+ ((be64_to_cpu(x) >> FUN_ADMIN_READ48_RSP_KEY_S) & \
+ FUN_ADMIN_READ48_RSP_KEY_M)
+
+#define FUN_ADMIN_READ48_RSP_RET_S 48U
+#define FUN_ADMIN_READ48_RSP_RET_M 0xff
+#define FUN_ADMIN_READ48_RSP_RET_G(x) \
+ ((be64_to_cpu(x) >> FUN_ADMIN_READ48_RSP_RET_S) & \
+ FUN_ADMIN_READ48_RSP_RET_M)
+
+#define FUN_ADMIN_READ48_RSP_DATA_S 0U
+#define FUN_ADMIN_READ48_RSP_DATA_M 0xffffffffffff
+#define FUN_ADMIN_READ48_RSP_DATA_G(x) \
+ ((be64_to_cpu(x) >> FUN_ADMIN_READ48_RSP_DATA_S) & \
+ FUN_ADMIN_READ48_RSP_DATA_M)
+
+enum fun_admin_bind_type {
+ FUN_ADMIN_BIND_TYPE_EPCQ = 0x1,
+ FUN_ADMIN_BIND_TYPE_EPSQ = 0x2,
+ FUN_ADMIN_BIND_TYPE_PORT = 0x3,
+ FUN_ADMIN_BIND_TYPE_RSS = 0x4,
+ FUN_ADMIN_BIND_TYPE_VI = 0x5,
+ FUN_ADMIN_BIND_TYPE_ETH = 0x6,
+};
+
+struct fun_admin_bind_entry {
+ __u8 type;
+ __u8 rsvd0[3];
+ __be32 id;
+};
+
+#define FUN_ADMIN_BIND_ENTRY_INIT(_type, _id) \
+ (struct fun_admin_bind_entry) { \
+ .type = (_type), .id = cpu_to_be32(_id), \
+ }
+
+struct fun_admin_bind_req {
+ struct fun_admin_req_common common;
+ struct fun_admin_bind_entry entry[];
+};
+
+struct fun_admin_bind_rsp {
+ struct fun_admin_rsp_common bind_rsp_common;
+};
+
+struct fun_admin_simple_subop {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 data;
+};
+
+#define FUN_ADMIN_SIMPLE_SUBOP_INIT(_subop, _flags, _data) \
+ (struct fun_admin_simple_subop) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .data = cpu_to_be32(_data), \
+ }
+
+enum fun_admin_subop {
+ FUN_ADMIN_SUBOP_CREATE = 0x10,
+ FUN_ADMIN_SUBOP_DESTROY = 0x11,
+ FUN_ADMIN_SUBOP_MODIFY = 0x12,
+ FUN_ADMIN_SUBOP_RES_COUNT = 0x14,
+ FUN_ADMIN_SUBOP_READ = 0x15,
+ FUN_ADMIN_SUBOP_WRITE = 0x16,
+ FUN_ADMIN_SUBOP_NOTIFY = 0x17,
+};
+
+enum {
+ FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR = 0x1,
+};
+
+struct fun_admin_generic_destroy_req {
+ struct fun_admin_req_common common;
+ struct fun_admin_simple_subop destroy;
+};
+
+struct fun_admin_generic_create_rsp {
+ struct fun_admin_rsp_common common;
+
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+};
+
+struct fun_admin_res_count_req {
+ struct fun_admin_req_common common;
+ struct fun_admin_simple_subop count;
+};
+
+struct fun_admin_res_count_rsp {
+ struct fun_admin_rsp_common common;
+ struct fun_admin_simple_subop count;
+};
+
+enum {
+ FUN_ADMIN_EPCQ_CREATE_FLAG_INT_EPCQ = 0x2,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_ENTRY_WR_TPH = 0x4,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_SL_WR_TPH = 0x8,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_RQ = 0x80,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_INT_IQ = 0x100,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_INT_NOARM = 0x200,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_DROP_ON_OVERFLOW = 0x400,
+};
+
+struct fun_admin_epcq_req {
+ struct fun_admin_req_common common;
+ union epcq_req_subop {
+ struct fun_admin_epcq_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 epsqid;
+ __u8 rsvd1;
+ __u8 entry_size_log2;
+ __be16 nentries;
+
+ __be64 address;
+
+ __be16 tailroom; /* per packet tailroom in bytes */
+ __u8 headroom; /* per packet headroom in 2B units */
+ __u8 intcoal_kbytes;
+ __u8 intcoal_holdoff_nentries;
+ __u8 intcoal_holdoff_usecs;
+ __be16 intid;
+
+ __be32 scan_start_id;
+ __be32 scan_end_id;
+
+ __be16 tph_cpuid;
+ __u8 rsvd3[6];
+ } create;
+
+ struct fun_admin_epcq_modify_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be16 headroom; /* headroom in bytes */
+ __u8 rsvd1[6];
+ } modify;
+ } u;
+};
+
+#define FUN_ADMIN_EPCQ_CREATE_REQ_INIT( \
+ _subop, _flags, _id, _epsqid, _entry_size_log2, _nentries, _address, \
+ _tailroom, _headroom, _intcoal_kbytes, _intcoal_holdoff_nentries, \
+ _intcoal_holdoff_usecs, _intid, _scan_start_id, _scan_end_id, \
+ _tph_cpuid) \
+ (struct fun_admin_epcq_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .epsqid = cpu_to_be32(_epsqid), \
+ .entry_size_log2 = _entry_size_log2, \
+ .nentries = cpu_to_be16(_nentries), \
+ .address = cpu_to_be64(_address), \
+ .tailroom = cpu_to_be16(_tailroom), .headroom = _headroom, \
+ .intcoal_kbytes = _intcoal_kbytes, \
+ .intcoal_holdoff_nentries = _intcoal_holdoff_nentries, \
+ .intcoal_holdoff_usecs = _intcoal_holdoff_usecs, \
+ .intid = cpu_to_be16(_intid), \
+ .scan_start_id = cpu_to_be32(_scan_start_id), \
+ .scan_end_id = cpu_to_be32(_scan_end_id), \
+ .tph_cpuid = cpu_to_be16(_tph_cpuid), \
+ }
+
+#define FUN_ADMIN_EPCQ_MODIFY_REQ_INIT(_subop, _flags, _id, _headroom) \
+ (struct fun_admin_epcq_modify_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .headroom = cpu_to_be16(_headroom), \
+ }
+
+enum {
+ FUN_ADMIN_EPSQ_CREATE_FLAG_INT_EPSQ = 0x2,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_ENTRY_RD_TPH = 0x4,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_GL_RD_TPH = 0x8,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS = 0x10,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS_TPH = 0x20,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_EPCQ = 0x40,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_RQ = 0x80,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_INT_IQ = 0x100,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_NO_CMPL = 0x200,
+};
+
+struct fun_admin_epsq_req {
+ struct fun_admin_req_common common;
+
+ union epsq_req_subop {
+ struct fun_admin_epsq_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 epcqid;
+ __u8 rsvd1;
+ __u8 entry_size_log2;
+ __be16 nentries;
+
+ __be64 address; /* DMA address of epsq */
+
+ __u8 rsvd2[3];
+ __u8 intcoal_kbytes;
+ __u8 intcoal_holdoff_nentries;
+ __u8 intcoal_holdoff_usecs;
+ __be16 intid;
+
+ __be32 scan_start_id;
+ __be32 scan_end_id;
+
+ __u8 rsvd3[4];
+ __be16 tph_cpuid;
+ __u8 buf_size_log2; /* log2 of RQ buffer size */
+ __u8 head_wb_size_log2; /* log2 of head write back size */
+
+ __be64 head_wb_address; /* DMA address for head writeback */
+ } create;
+ } u;
+};
+
+#define FUN_ADMIN_EPSQ_CREATE_REQ_INIT( \
+ _subop, _flags, _id, _epcqid, _entry_size_log2, _nentries, _address, \
+ _intcoal_kbytes, _intcoal_holdoff_nentries, _intcoal_holdoff_usecs, \
+ _intid, _scan_start_id, _scan_end_id, _tph_cpuid, _buf_size_log2, \
+ _head_wb_size_log2, _head_wb_address) \
+ (struct fun_admin_epsq_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .epcqid = cpu_to_be32(_epcqid), \
+ .entry_size_log2 = _entry_size_log2, \
+ .nentries = cpu_to_be16(_nentries), \
+ .address = cpu_to_be64(_address), \
+ .intcoal_kbytes = _intcoal_kbytes, \
+ .intcoal_holdoff_nentries = _intcoal_holdoff_nentries, \
+ .intcoal_holdoff_usecs = _intcoal_holdoff_usecs, \
+ .intid = cpu_to_be16(_intid), \
+ .scan_start_id = cpu_to_be32(_scan_start_id), \
+ .scan_end_id = cpu_to_be32(_scan_end_id), \
+ .tph_cpuid = cpu_to_be16(_tph_cpuid), \
+ .buf_size_log2 = _buf_size_log2, \
+ .head_wb_size_log2 = _head_wb_size_log2, \
+ .head_wb_address = cpu_to_be64(_head_wb_address), \
+ }
+
+enum {
+ FUN_PORT_CAP_OFFLOADS = 0x1,
+ FUN_PORT_CAP_STATS = 0x2,
+ FUN_PORT_CAP_LOOPBACK = 0x4,
+ FUN_PORT_CAP_VPORT = 0x8,
+ FUN_PORT_CAP_TX_PAUSE = 0x10,
+ FUN_PORT_CAP_RX_PAUSE = 0x20,
+ FUN_PORT_CAP_AUTONEG = 0x40,
+ FUN_PORT_CAP_RSS = 0x80,
+ FUN_PORT_CAP_VLAN_OFFLOADS = 0x100,
+ FUN_PORT_CAP_ENCAP_OFFLOADS = 0x200,
+ FUN_PORT_CAP_1000_X = 0x1000,
+ FUN_PORT_CAP_10G_R = 0x2000,
+ FUN_PORT_CAP_40G_R4 = 0x4000,
+ FUN_PORT_CAP_25G_R = 0x8000,
+ FUN_PORT_CAP_50G_R2 = 0x10000,
+ FUN_PORT_CAP_50G_R = 0x20000,
+ FUN_PORT_CAP_100G_R4 = 0x40000,
+ FUN_PORT_CAP_100G_R2 = 0x80000,
+ FUN_PORT_CAP_200G_R4 = 0x100000,
+ FUN_PORT_CAP_FEC_NONE = 0x10000000,
+ FUN_PORT_CAP_FEC_FC = 0x20000000,
+ FUN_PORT_CAP_FEC_RS = 0x40000000,
+};
+
+enum fun_port_brkout_mode {
+ FUN_PORT_BRKMODE_NA = 0x0,
+ FUN_PORT_BRKMODE_NONE = 0x1,
+ FUN_PORT_BRKMODE_2X = 0x2,
+ FUN_PORT_BRKMODE_4X = 0x3,
+};
+
+enum {
+ FUN_PORT_SPEED_AUTO = 0x0,
+ FUN_PORT_SPEED_10M = 0x1,
+ FUN_PORT_SPEED_100M = 0x2,
+ FUN_PORT_SPEED_1G = 0x4,
+ FUN_PORT_SPEED_10G = 0x8,
+ FUN_PORT_SPEED_25G = 0x10,
+ FUN_PORT_SPEED_40G = 0x20,
+ FUN_PORT_SPEED_50G = 0x40,
+ FUN_PORT_SPEED_100G = 0x80,
+ FUN_PORT_SPEED_200G = 0x100,
+};
+
+enum fun_port_duplex_mode {
+ FUN_PORT_FULL_DUPLEX = 0x0,
+ FUN_PORT_HALF_DUPLEX = 0x1,
+};
+
+enum {
+ FUN_PORT_FEC_NA = 0x0,
+ FUN_PORT_FEC_OFF = 0x1,
+ FUN_PORT_FEC_RS = 0x2,
+ FUN_PORT_FEC_FC = 0x4,
+ FUN_PORT_FEC_AUTO = 0x8,
+};
+
+enum fun_port_link_status {
+ FUN_PORT_LINK_UP = 0x0,
+ FUN_PORT_LINK_UP_WITH_ERR = 0x1,
+ FUN_PORT_LINK_DOWN = 0x2,
+};
+
+enum fun_port_led_type {
+ FUN_PORT_LED_OFF = 0x0,
+ FUN_PORT_LED_AMBER = 0x1,
+ FUN_PORT_LED_GREEN = 0x2,
+ FUN_PORT_LED_BEACON_ON = 0x3,
+ FUN_PORT_LED_BEACON_OFF = 0x4,
+};
+
+enum {
+ FUN_PORT_FLAG_MAC_DOWN = 0x1,
+ FUN_PORT_FLAG_MAC_UP = 0x2,
+ FUN_PORT_FLAG_NH_DOWN = 0x4,
+ FUN_PORT_FLAG_NH_UP = 0x8,
+};
+
+enum {
+ FUN_PORT_FLAG_ENABLE_NOTIFY = 0x1,
+};
+
+enum fun_port_lane_attr {
+ FUN_PORT_LANE_1 = 0x1,
+ FUN_PORT_LANE_2 = 0x2,
+ FUN_PORT_LANE_4 = 0x4,
+ FUN_PORT_LANE_SPEED_10G = 0x100,
+ FUN_PORT_LANE_SPEED_25G = 0x200,
+ FUN_PORT_LANE_SPEED_50G = 0x400,
+ FUN_PORT_LANE_SPLIT = 0x8000,
+};
+
+enum fun_admin_port_subop {
+ FUN_ADMIN_PORT_SUBOP_INETADDR_EVENT = 0x24,
+};
+
+enum fun_admin_port_key {
+ FUN_ADMIN_PORT_KEY_ILLEGAL = 0x0,
+ FUN_ADMIN_PORT_KEY_MTU = 0x1,
+ FUN_ADMIN_PORT_KEY_FEC = 0x2,
+ FUN_ADMIN_PORT_KEY_SPEED = 0x3,
+ FUN_ADMIN_PORT_KEY_DEBOUNCE = 0x4,
+ FUN_ADMIN_PORT_KEY_DUPLEX = 0x5,
+ FUN_ADMIN_PORT_KEY_MACADDR = 0x6,
+ FUN_ADMIN_PORT_KEY_LINKMODE = 0x7,
+ FUN_ADMIN_PORT_KEY_BREAKOUT = 0x8,
+ FUN_ADMIN_PORT_KEY_ENABLE = 0x9,
+ FUN_ADMIN_PORT_KEY_DISABLE = 0xa,
+ FUN_ADMIN_PORT_KEY_ERR_DISABLE = 0xb,
+ FUN_ADMIN_PORT_KEY_CAPABILITIES = 0xc,
+ FUN_ADMIN_PORT_KEY_LP_CAPABILITIES = 0xd,
+ FUN_ADMIN_PORT_KEY_STATS_DMA_LOW = 0xe,
+ FUN_ADMIN_PORT_KEY_STATS_DMA_HIGH = 0xf,
+ FUN_ADMIN_PORT_KEY_LANE_ATTRS = 0x10,
+ FUN_ADMIN_PORT_KEY_LED = 0x11,
+ FUN_ADMIN_PORT_KEY_ADVERT = 0x12,
+};
+
+struct fun_subop_imm {
+ __u8 subop; /* see fun_data_subop enum */
+ __u8 flags;
+ __u8 nsgl;
+ __u8 rsvd0;
+ __be32 len;
+
+ __u8 data[];
+};
+
+enum fun_subop_sgl_flags {
+ FUN_SUBOP_SGL_USE_OFF8 = 0x1,
+ FUN_SUBOP_FLAG_FREE_BUF = 0x2,
+ FUN_SUBOP_FLAG_IS_REFBUF = 0x4,
+ FUN_SUBOP_SGL_FLAG_LOCAL = 0x8,
+};
+
+enum fun_data_op {
+ FUN_DATAOP_INVALID = 0x0,
+ FUN_DATAOP_SL = 0x1, /* scatter */
+ FUN_DATAOP_GL = 0x2, /* gather */
+ FUN_DATAOP_SGL = 0x3, /* scatter-gather */
+ FUN_DATAOP_IMM = 0x4, /* immediate data */
+ FUN_DATAOP_RQBUF = 0x8, /* rq buffer */
+};
+
+struct fun_dataop_gl {
+ __u8 subop;
+ __u8 flags;
+ __be16 sgl_off;
+ __be32 sgl_len;
+
+ __be64 sgl_data;
+};
+
+static inline void fun_dataop_gl_init(struct fun_dataop_gl *s, u8 flags,
+ u16 sgl_off, u32 sgl_len, u64 sgl_data)
+{
+ s->subop = FUN_DATAOP_GL;
+ s->flags = flags;
+ s->sgl_off = cpu_to_be16(sgl_off);
+ s->sgl_len = cpu_to_be32(sgl_len);
+ s->sgl_data = cpu_to_be64(sgl_data);
+}
+
+struct fun_dataop_imm {
+ __u8 subop;
+ __u8 flags;
+ __be16 rsvd0;
+ __be32 sgl_len;
+};
+
+struct fun_subop_sgl {
+ __u8 subop;
+ __u8 flags;
+ __u8 nsgl;
+ __u8 rsvd0;
+ __be32 sgl_len;
+
+ __be64 sgl_data;
+};
+
+#define FUN_SUBOP_SGL_INIT(_subop, _flags, _nsgl, _sgl_len, _sgl_data) \
+ (struct fun_subop_sgl) { \
+ .subop = (_subop), .flags = (_flags), .nsgl = (_nsgl), \
+ .sgl_len = cpu_to_be32(_sgl_len), \
+ .sgl_data = cpu_to_be64(_sgl_data), \
+ }
+
+struct fun_dataop_rqbuf {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 cid;
+ __be32 bufoff;
+};
+
+struct fun_dataop_hdr {
+ __u8 nsgl;
+ __u8 flags;
+ __u8 ngather;
+ __u8 nscatter;
+ __be32 total_len;
+
+ struct fun_dataop_imm imm[];
+};
+
+#define FUN_DATAOP_HDR_INIT(_nsgl, _flags, _ngather, _nscatter, _total_len) \
+ (struct fun_dataop_hdr) { \
+ .nsgl = _nsgl, .flags = _flags, .ngather = _ngather, \
+ .nscatter = _nscatter, .total_len = cpu_to_be32(_total_len), \
+ }
+
+enum fun_port_inetaddr_event_type {
+ FUN_PORT_INETADDR_ADD = 0x1,
+ FUN_PORT_INETADDR_DEL = 0x2,
+};
+
+enum fun_port_inetaddr_addr_family {
+ FUN_PORT_INETADDR_IPV4 = 0x1,
+ FUN_PORT_INETADDR_IPV6 = 0x2,
+};
+
+struct fun_admin_port_req {
+ struct fun_admin_req_common common;
+
+ union port_req_subop {
+ struct fun_admin_port_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+ } create;
+ struct fun_admin_port_write_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id; /* portid */
+
+ struct fun_admin_write48_req write48[];
+ } write;
+ struct fun_admin_port_read_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id; /* portid */
+
+ struct fun_admin_read48_req read48[];
+ } read;
+ struct fun_admin_port_inetaddr_event_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __u8 event_type;
+ __u8 addr_family;
+ __be32 id;
+
+ __u8 addr[];
+ } inetaddr_event;
+ } u;
+};
+
+#define FUN_ADMIN_PORT_CREATE_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_port_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#define FUN_ADMIN_PORT_WRITE_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_port_write_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#define FUN_ADMIN_PORT_READ_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_port_read_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+struct fun_admin_port_rsp {
+ struct fun_admin_rsp_common common;
+
+ union port_rsp_subop {
+ struct fun_admin_port_create_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id;
+
+ __be16 lport;
+ __u8 rsvd1[6];
+ } create;
+ struct fun_admin_port_write_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id; /* portid */
+
+ struct fun_admin_write48_rsp write48[];
+ } write;
+ struct fun_admin_port_read_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id; /* portid */
+
+ struct fun_admin_read48_rsp read48[];
+ } read;
+ struct fun_admin_port_inetaddr_event_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id; /* portid */
+ } inetaddr_event;
+ } u;
+};
+
+enum fun_xcvr_type {
+ FUN_XCVR_BASET = 0x0,
+ FUN_XCVR_CU = 0x1,
+ FUN_XCVR_SMF = 0x2,
+ FUN_XCVR_MMF = 0x3,
+ FUN_XCVR_AOC = 0x4,
+ FUN_XCVR_SFPP = 0x10, /* SFP+ or later */
+ FUN_XCVR_QSFPP = 0x11, /* QSFP+ or later */
+ FUN_XCVR_QSFPDD = 0x12, /* QSFP-DD */
+};
+
+struct fun_admin_port_notif {
+ struct fun_admin_rsp_common common;
+
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 id;
+ __be32 speed; /* in 10 Mbps units */
+
+ __u8 link_state;
+ __u8 missed_events;
+ __u8 link_down_reason;
+ __u8 xcvr_type;
+ __u8 flow_ctrl;
+ __u8 fec;
+ __u8 active_lanes;
+ __u8 rsvd1;
+
+ __be64 advertising;
+
+ __be64 lp_advertising;
+};
+
+enum fun_eth_rss_const {
+ FUN_ETH_RSS_MAX_KEY_SIZE = 0x28,
+ FUN_ETH_RSS_MAX_INDIR_ENT = 0x40,
+};
+
+enum fun_eth_hash_alg {
+ FUN_ETH_RSS_ALG_INVALID = 0x0,
+ FUN_ETH_RSS_ALG_TOEPLITZ = 0x1,
+ FUN_ETH_RSS_ALG_CRC32 = 0x2,
+};
+
+struct fun_admin_rss_req {
+ struct fun_admin_req_common common;
+
+ union rss_req_subop {
+ struct fun_admin_rss_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 rsvd1;
+ __be32 viid; /* VI flow id */
+
+ __be64 metadata[1];
+
+ __u8 alg;
+ __u8 keylen;
+ __u8 indir_nent;
+ __u8 rsvd2;
+ __be16 key_off;
+ __be16 indir_off;
+
+ struct fun_dataop_hdr dataop;
+ } create;
+ } u;
+};
+
+#define FUN_ADMIN_RSS_CREATE_REQ_INIT(_subop, _flags, _id, _viid, _alg, \
+ _keylen, _indir_nent, _key_off, \
+ _indir_off) \
+ (struct fun_admin_rss_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .viid = cpu_to_be32(_viid), \
+ .alg = _alg, .keylen = _keylen, .indir_nent = _indir_nent, \
+ .key_off = cpu_to_be16(_key_off), \
+ .indir_off = cpu_to_be16(_indir_off), \
+ }
+
+struct fun_admin_vi_req {
+ struct fun_admin_req_common common;
+
+ union vi_req_subop {
+ struct fun_admin_vi_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 rsvd1;
+ __be32 portid; /* port flow id */
+ } create;
+ } u;
+};
+
+#define FUN_ADMIN_VI_CREATE_REQ_INIT(_subop, _flags, _id, _portid) \
+ (struct fun_admin_vi_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .portid = cpu_to_be32(_portid), \
+ }
+
+struct fun_admin_eth_req {
+ struct fun_admin_req_common common;
+
+ union eth_req_subop {
+ struct fun_admin_eth_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 rsvd1;
+ __be32 portid; /* port flow id */
+ } create;
+ } u;
+};
+
+#define FUN_ADMIN_ETH_CREATE_REQ_INIT(_subop, _flags, _id, _portid) \
+ (struct fun_admin_eth_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .portid = cpu_to_be32(_portid), \
+ }
+
+enum {
+ FUN_ADMIN_SWU_UPGRADE_FLAG_INIT = 0x10,
+ FUN_ADMIN_SWU_UPGRADE_FLAG_COMPLETE = 0x20,
+ FUN_ADMIN_SWU_UPGRADE_FLAG_DOWNGRADE = 0x40,
+ FUN_ADMIN_SWU_UPGRADE_FLAG_ACTIVE_IMAGE = 0x80,
+ FUN_ADMIN_SWU_UPGRADE_FLAG_ASYNC = 0x1,
+};
+
+enum fun_admin_swu_subop {
+ FUN_ADMIN_SWU_SUBOP_GET_VERSION = 0x20,
+ FUN_ADMIN_SWU_SUBOP_UPGRADE = 0x21,
+ FUN_ADMIN_SWU_SUBOP_UPGRADE_DATA = 0x22,
+ FUN_ADMIN_SWU_SUBOP_GET_ALL_VERSIONS = 0x23,
+};
+
+struct fun_admin_swu_req {
+ struct fun_admin_req_common common;
+
+ union swu_req_subop {
+ struct fun_admin_swu_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+ } create;
+ struct fun_admin_swu_upgrade_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 fourcc;
+ __be32 rsvd1;
+
+ __be64 image_size; /* upgrade image length */
+ } upgrade;
+ struct fun_admin_swu_upgrade_data_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 offset; /* offset of data in this command */
+ __be32 size; /* total size of data in this command */
+ } upgrade_data;
+ } u;
+
+ struct fun_subop_sgl sgl[]; /* in, out buffers through sgl */
+};
+
+#define FUN_ADMIN_SWU_CREATE_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_swu_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#define FUN_ADMIN_SWU_UPGRADE_REQ_INIT(_subop, _flags, _id, _fourcc, \
+ _image_size) \
+ (struct fun_admin_swu_upgrade_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .fourcc = cpu_to_be32(_fourcc), \
+ .image_size = cpu_to_be64(_image_size), \
+ }
+
+#define FUN_ADMIN_SWU_UPGRADE_DATA_REQ_INIT(_subop, _flags, _id, _offset, \
+ _size) \
+ (struct fun_admin_swu_upgrade_data_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .offset = cpu_to_be32(_offset), \
+ .size = cpu_to_be32(_size), \
+ }
+
+struct fun_admin_swu_rsp {
+ struct fun_admin_rsp_common common;
+
+ union swu_rsp_subop {
+ struct fun_admin_swu_create_rsp {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+ } create;
+ struct fun_admin_swu_upgrade_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id;
+
+ __be32 fourcc;
+ __be32 status;
+
+ __be32 progress;
+ __be32 unused;
+ } upgrade;
+ struct fun_admin_swu_upgrade_data_rsp {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 offset;
+ __be32 size;
+ } upgrade_data;
+ } u;
+};
+
+enum fun_ktls_version {
+ FUN_KTLS_TLSV2 = 0x20,
+ FUN_KTLS_TLSV3 = 0x30,
+};
+
+enum fun_ktls_cipher {
+ FUN_KTLS_CIPHER_AES_GCM_128 = 0x33,
+ FUN_KTLS_CIPHER_AES_GCM_256 = 0x34,
+ FUN_KTLS_CIPHER_AES_CCM_128 = 0x35,
+ FUN_KTLS_CIPHER_CHACHA20_POLY1305 = 0x36,
+};
+
+enum fun_ktls_modify_flags {
+ FUN_KTLS_MODIFY_REMOVE = 0x1,
+};
+
+struct fun_admin_ktls_create_req {
+ struct fun_admin_req_common common;
+
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+};
+
+#define FUN_ADMIN_KTLS_CREATE_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_ktls_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+struct fun_admin_ktls_create_rsp {
+ struct fun_admin_rsp_common common;
+
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id;
+};
+
+struct fun_admin_ktls_modify_req {
+ struct fun_admin_req_common common;
+
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be64 tlsid;
+
+ __be32 tcp_seq;
+ __u8 version;
+ __u8 cipher;
+ __u8 rsvd1[2];
+
+ __u8 record_seq[8];
+
+ __u8 key[32];
+
+ __u8 iv[16];
+
+ __u8 salt[8];
+};
+
+#define FUN_ADMIN_KTLS_MODIFY_REQ_INIT(_subop, _flags, _id, _tlsid, _tcp_seq, \
+ _version, _cipher) \
+ (struct fun_admin_ktls_modify_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .tlsid = cpu_to_be64(_tlsid), \
+ .tcp_seq = cpu_to_be32(_tcp_seq), .version = _version, \
+ .cipher = _cipher, \
+ }
+
+struct fun_admin_ktls_modify_rsp {
+ struct fun_admin_rsp_common common;
+
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id;
+
+ __be64 tlsid;
+};
+
+struct fun_req_common {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 rsvd0;
+ __be16 cid;
+};
+
+struct fun_rsp_common {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 ret;
+ __be16 cid;
+};
+
+struct fun_cqe_info {
+ __be16 sqhd;
+ __be16 sqid;
+ __be16 cid;
+ __be16 sf_p;
+};
+
+enum fun_eprq_def {
+ FUN_EPRQ_PKT_ALIGN = 0x80,
+};
+
+struct fun_eprq_rqbuf {
+ __be64 bufaddr;
+};
+
+#define FUN_EPRQ_RQBUF_INIT(_bufaddr) \
+ (struct fun_eprq_rqbuf) { \
+ .bufaddr = cpu_to_be64(_bufaddr), \
+ }
+
+enum fun_eth_op {
+ FUN_ETH_OP_TX = 0x1,
+ FUN_ETH_OP_RX = 0x2,
+};
+
+enum {
+ FUN_ETH_OFFLOAD_EN = 0x8000,
+ FUN_ETH_OUTER_EN = 0x4000,
+ FUN_ETH_INNER_LSO = 0x2000,
+ FUN_ETH_INNER_TSO = 0x1000,
+ FUN_ETH_OUTER_IPV6 = 0x800,
+ FUN_ETH_OUTER_UDP = 0x400,
+ FUN_ETH_INNER_IPV6 = 0x200,
+ FUN_ETH_INNER_UDP = 0x100,
+ FUN_ETH_UPDATE_OUTER_L3_LEN = 0x80,
+ FUN_ETH_UPDATE_OUTER_L3_CKSUM = 0x40,
+ FUN_ETH_UPDATE_OUTER_L4_LEN = 0x20,
+ FUN_ETH_UPDATE_OUTER_L4_CKSUM = 0x10,
+ FUN_ETH_UPDATE_INNER_L3_LEN = 0x8,
+ FUN_ETH_UPDATE_INNER_L3_CKSUM = 0x4,
+ FUN_ETH_UPDATE_INNER_L4_LEN = 0x2,
+ FUN_ETH_UPDATE_INNER_L4_CKSUM = 0x1,
+};
+
+struct fun_eth_offload {
+ __be16 flags; /* combination of above flags */
+ __be16 mss; /* TSO max seg size */
+ __be16 tcp_doff_flags; /* TCP data offset + flags 16b word */
+ __be16 vlan;
+
+ __be16 inner_l3_off; /* Inner L3 header offset */
+ __be16 inner_l4_off; /* Inner L4 header offset */
+ __be16 outer_l3_off; /* Outer L3 header offset */
+ __be16 outer_l4_off; /* Outer L4 header offset */
+};
+
+static inline void fun_eth_offload_init(struct fun_eth_offload *s, u16 flags,
+ u16 mss, __be16 tcp_doff_flags,
+ __be16 vlan, u16 inner_l3_off,
+ u16 inner_l4_off, u16 outer_l3_off,
+ u16 outer_l4_off)
+{
+ s->flags = cpu_to_be16(flags);
+ s->mss = cpu_to_be16(mss);
+ s->tcp_doff_flags = tcp_doff_flags;
+ s->vlan = vlan;
+ s->inner_l3_off = cpu_to_be16(inner_l3_off);
+ s->inner_l4_off = cpu_to_be16(inner_l4_off);
+ s->outer_l3_off = cpu_to_be16(outer_l3_off);
+ s->outer_l4_off = cpu_to_be16(outer_l4_off);
+}
+
+struct fun_eth_tls {
+ __be64 tlsid;
+};
+
+enum {
+ FUN_ETH_TX_TLS = 0x8000,
+};
+
+struct fun_eth_tx_req {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 repr_idn;
+ __be16 encap_proto;
+
+ struct fun_eth_offload offload;
+
+ struct fun_dataop_hdr dataop;
+};
+
+struct fun_eth_rx_cv {
+ __be16 il4_prot_to_l2_type;
+};
+
+#define FUN_ETH_RX_CV_IL4_PROT_S 13U
+#define FUN_ETH_RX_CV_IL4_PROT_M 0x3
+
+#define FUN_ETH_RX_CV_IL3_PROT_S 11U
+#define FUN_ETH_RX_CV_IL3_PROT_M 0x3
+
+#define FUN_ETH_RX_CV_OL4_PROT_S 8U
+#define FUN_ETH_RX_CV_OL4_PROT_M 0x7
+
+#define FUN_ETH_RX_CV_ENCAP_TYPE_S 6U
+#define FUN_ETH_RX_CV_ENCAP_TYPE_M 0x3
+
+#define FUN_ETH_RX_CV_OL3_PROT_S 4U
+#define FUN_ETH_RX_CV_OL3_PROT_M 0x3
+
+#define FUN_ETH_RX_CV_VLAN_TYPE_S 3U
+#define FUN_ETH_RX_CV_VLAN_TYPE_M 0x1
+
+#define FUN_ETH_RX_CV_L2_TYPE_S 2U
+#define FUN_ETH_RX_CV_L2_TYPE_M 0x1
+
+enum fun_rx_cv {
+ FUN_RX_CV_NONE = 0x0,
+ FUN_RX_CV_IP = 0x2,
+ FUN_RX_CV_IP6 = 0x3,
+ FUN_RX_CV_TCP = 0x2,
+ FUN_RX_CV_UDP = 0x3,
+ FUN_RX_CV_VXLAN = 0x2,
+ FUN_RX_CV_MPLS = 0x3,
+};
+
+struct fun_eth_cqe {
+ __u8 op;
+ __u8 len8;
+ __u8 nsgl;
+ __u8 repr_idn;
+ __be32 pkt_len;
+
+ __be64 timestamp;
+
+ __be16 pkt_cv;
+ __be16 rsvd0;
+ __be32 hash;
+
+ __be16 encap_proto;
+ __be16 vlan;
+ __be32 rsvd1;
+
+ __be32 buf_offset;
+ __be16 headroom;
+ __be16 csum;
+};
+
+enum fun_admin_adi_attr {
+ FUN_ADMIN_ADI_ATTR_MACADDR = 0x1,
+ FUN_ADMIN_ADI_ATTR_VLAN = 0x2,
+ FUN_ADMIN_ADI_ATTR_RATE = 0x3,
+};
+
+struct fun_adi_param {
+ union adi_param {
+ struct fun_adi_mac {
+ __be64 addr;
+ } mac;
+ struct fun_adi_vlan {
+ __be32 rsvd;
+ __be16 eth_type;
+ __be16 tci;
+ } vlan;
+ struct fun_adi_rate {
+ __be32 rsvd;
+ __be32 tx_mbps;
+ } rate;
+ } u;
+};
+
+#define FUN_ADI_MAC_INIT(_addr) \
+ (struct fun_adi_mac) { \
+ .addr = cpu_to_be64(_addr), \
+ }
+
+#define FUN_ADI_VLAN_INIT(_eth_type, _tci) \
+ (struct fun_adi_vlan) { \
+ .eth_type = cpu_to_be16(_eth_type), .tci = cpu_to_be16(_tci), \
+ }
+
+#define FUN_ADI_RATE_INIT(_tx_mbps) \
+ (struct fun_adi_rate) { \
+ .tx_mbps = cpu_to_be32(_tx_mbps), \
+ }
+
+struct fun_admin_adi_req {
+ struct fun_admin_req_common common;
+
+ union adi_req_subop {
+ struct fun_admin_adi_write_req {
+ __u8 subop;
+ __u8 attribute;
+ __be16 rsvd;
+ __be32 id;
+
+ struct fun_adi_param param;
+ } write;
+ } u;
+};
+
+#define FUN_ADMIN_ADI_WRITE_REQ_INIT(_subop, _attribute, _id) \
+ (struct fun_admin_adi_write_req) { \
+ .subop = (_subop), .attribute = (_attribute), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#endif /* __FUN_HCI_H */
diff --git a/drivers/net/ethernet/fungible/funcore/fun_queue.c b/drivers/net/ethernet/fungible/funcore/fun_queue.c
new file mode 100644
index 000000000000..8ab9f68434f5
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_queue.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/log2.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "fun_dev.h"
+#include "fun_queue.h"
+
+/* Allocate memory for a queue. This includes the memory for the HW descriptor
+ * ring, an optional 64b HW write-back area, and an optional SW state ring.
+ * Returns the virtual and DMA addresses of the HW ring, the VA of the SW ring,
+ * and the VA of the write-back area.
+ */
+void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth,
+ size_t hw_desc_sz, size_t sw_desc_sz, bool wb,
+ int numa_node, dma_addr_t *dma_addr, void **sw_va,
+ volatile __be64 **wb_va)
+{
+ int dev_node = dev_to_node(dma_dev);
+ size_t dma_sz;
+ void *va;
+
+ if (numa_node == NUMA_NO_NODE)
+ numa_node = dev_node;
+
+ /* Place optional write-back area at end of descriptor ring. */
+ dma_sz = hw_desc_sz * depth;
+ if (wb)
+ dma_sz += sizeof(u64);
+
+ set_dev_node(dma_dev, numa_node);
+ va = dma_alloc_coherent(dma_dev, dma_sz, dma_addr, GFP_KERNEL);
+ set_dev_node(dma_dev, dev_node);
+ if (!va)
+ return NULL;
+
+ if (sw_desc_sz) {
+ *sw_va = kvzalloc_node(sw_desc_sz * depth, GFP_KERNEL,
+ numa_node);
+ if (!*sw_va) {
+ dma_free_coherent(dma_dev, dma_sz, va, *dma_addr);
+ return NULL;
+ }
+ }
+
+ if (wb)
+ *wb_va = va + dma_sz - sizeof(u64);
+ return va;
+}
+EXPORT_SYMBOL_GPL(fun_alloc_ring_mem);
+
+void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz,
+ bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va)
+{
+ if (hw_va) {
+ size_t sz = depth * hw_desc_sz;
+
+ if (wb)
+ sz += sizeof(u64);
+ dma_free_coherent(dma_dev, sz, hw_va, dma_addr);
+ }
+ kvfree(sw_va);
+}
+EXPORT_SYMBOL_GPL(fun_free_ring_mem);
+
+/* Prepare and issue an admin command to create an SQ on the device with the
+ * provided parameters. If the queue ID is auto-allocated by the device it is
+ * returned in *sqidp.
+ */
+int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid,
+ u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
+ u8 coal_nentries, u8 coal_usec, u32 irq_num,
+ u32 scan_start_id, u32 scan_end_id,
+ u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp)
+{
+ union {
+ struct fun_admin_epsq_req req;
+ struct fun_admin_generic_create_rsp rsp;
+ } cmd;
+ dma_addr_t wb_addr;
+ u32 hw_qid;
+ int rc;
+
+ if (sq_depth > fdev->q_depth)
+ return -EINVAL;
+ if (flags & FUN_ADMIN_EPSQ_CREATE_FLAG_RQ)
+ sqe_size_log2 = ilog2(sizeof(struct fun_eprq_rqbuf));
+
+ wb_addr = dma_addr + (sq_depth << sqe_size_log2);
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPSQ,
+ sizeof(cmd.req));
+ cmd.req.u.create =
+ FUN_ADMIN_EPSQ_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, flags,
+ sqid, cqid, sqe_size_log2,
+ sq_depth - 1, dma_addr, 0,
+ coal_nentries, coal_usec,
+ irq_num, scan_start_id,
+ scan_end_id, 0,
+ rq_buf_size_log2,
+ ilog2(sizeof(u64)), wb_addr);
+
+ rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common,
+ &cmd.rsp, sizeof(cmd.rsp), 0);
+ if (rc)
+ return rc;
+
+ hw_qid = be32_to_cpu(cmd.rsp.id);
+ *dbp = fun_sq_db_addr(fdev, hw_qid);
+ if (flags & FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR)
+ *sqidp = hw_qid;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(fun_sq_create);
+
+/* Prepare and issue an admin command to create a CQ on the device with the
+ * provided parameters. If the queue ID is auto-allocated by the device it is
+ * returned in *cqidp.
+ */
+int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid,
+ u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
+ u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec,
+ u32 irq_num, u32 scan_start_id, u32 scan_end_id, u32 *cqidp,
+ u32 __iomem **dbp)
+{
+ union {
+ struct fun_admin_epcq_req req;
+ struct fun_admin_generic_create_rsp rsp;
+ } cmd;
+ u32 hw_qid;
+ int rc;
+
+ if (cq_depth > fdev->q_depth)
+ return -EINVAL;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPCQ,
+ sizeof(cmd.req));
+ cmd.req.u.create =
+ FUN_ADMIN_EPCQ_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, flags,
+ cqid, rqid, cqe_size_log2,
+ cq_depth - 1, dma_addr, tailroom,
+ headroom / 2, 0, coal_nentries,
+ coal_usec, irq_num,
+ scan_start_id, scan_end_id, 0);
+
+ rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common,
+ &cmd.rsp, sizeof(cmd.rsp), 0);
+ if (rc)
+ return rc;
+
+ hw_qid = be32_to_cpu(cmd.rsp.id);
+ *dbp = fun_cq_db_addr(fdev, hw_qid);
+ if (flags & FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR)
+ *cqidp = hw_qid;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(fun_cq_create);
+
+static bool fun_sq_is_head_wb(const struct fun_queue *funq)
+{
+ return funq->sq_flags & FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS;
+}
+
+static void fun_clean_rq(struct fun_queue *funq)
+{
+ struct fun_dev *fdev = funq->fdev;
+ struct fun_rq_info *rqinfo;
+ unsigned int i;
+
+ for (i = 0; i < funq->rq_depth; i++) {
+ rqinfo = &funq->rq_info[i];
+ if (rqinfo->page) {
+ dma_unmap_page(fdev->dev, rqinfo->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ put_page(rqinfo->page);
+ rqinfo->page = NULL;
+ }
+ }
+}
+
+static int fun_fill_rq(struct fun_queue *funq)
+{
+ struct device *dev = funq->fdev->dev;
+ int i, node = dev_to_node(dev);
+ struct fun_rq_info *rqinfo;
+
+ for (i = 0; i < funq->rq_depth; i++) {
+ rqinfo = &funq->rq_info[i];
+ rqinfo->page = alloc_pages_node(node, GFP_KERNEL, 0);
+ if (unlikely(!rqinfo->page))
+ return -ENOMEM;
+
+ rqinfo->dma = dma_map_page(dev, rqinfo->page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, rqinfo->dma))) {
+ put_page(rqinfo->page);
+ rqinfo->page = NULL;
+ return -ENOMEM;
+ }
+
+ funq->rqes[i] = FUN_EPRQ_RQBUF_INIT(rqinfo->dma);
+ }
+
+ funq->rq_tail = funq->rq_depth - 1;
+ return 0;
+}
+
+static void fun_rq_update_pos(struct fun_queue *funq, int buf_offset)
+{
+ if (buf_offset <= funq->rq_buf_offset) {
+ struct fun_rq_info *rqinfo = &funq->rq_info[funq->rq_buf_idx];
+ struct device *dev = funq->fdev->dev;
+
+ dma_sync_single_for_device(dev, rqinfo->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ funq->num_rqe_to_fill++;
+ if (++funq->rq_buf_idx == funq->rq_depth)
+ funq->rq_buf_idx = 0;
+ }
+ funq->rq_buf_offset = buf_offset;
+}
+
+/* Given a command response with data scattered across >= 1 RQ buffers return
+ * a pointer to a contiguous buffer containing all the data. If the data is in
+ * one RQ buffer the start address within that buffer is returned, otherwise a
+ * new buffer is allocated and the data is gathered into it.
+ */
+static void *fun_data_from_rq(struct fun_queue *funq,
+ const struct fun_rsp_common *rsp, bool *need_free)
+{
+ u32 bufoff, total_len, remaining, fragsize, dataoff;
+ struct device *dma_dev = funq->fdev->dev;
+ const struct fun_dataop_rqbuf *databuf;
+ const struct fun_dataop_hdr *dataop;
+ const struct fun_rq_info *rqinfo;
+ void *data;
+
+ dataop = (void *)rsp + rsp->suboff8 * 8;
+ total_len = be32_to_cpu(dataop->total_len);
+
+ if (likely(dataop->nsgl == 1)) {
+ databuf = (struct fun_dataop_rqbuf *)dataop->imm;
+ bufoff = be32_to_cpu(databuf->bufoff);
+ fun_rq_update_pos(funq, bufoff);
+ rqinfo = &funq->rq_info[funq->rq_buf_idx];
+ dma_sync_single_for_cpu(dma_dev, rqinfo->dma + bufoff,
+ total_len, DMA_FROM_DEVICE);
+ *need_free = false;
+ return page_address(rqinfo->page) + bufoff;
+ }
+
+ /* For scattered completions gather the fragments into one buffer. */
+
+ data = kmalloc(total_len, GFP_ATOMIC);
+ /* NULL is OK here. In case of failure we still need to consume the data
+ * for proper buffer accounting but indicate an error in the response.
+ */
+ if (likely(data))
+ *need_free = true;
+
+ dataoff = 0;
+ for (remaining = total_len; remaining; remaining -= fragsize) {
+ fun_rq_update_pos(funq, 0);
+ fragsize = min_t(unsigned int, PAGE_SIZE, remaining);
+ if (data) {
+ rqinfo = &funq->rq_info[funq->rq_buf_idx];
+ dma_sync_single_for_cpu(dma_dev, rqinfo->dma, fragsize,
+ DMA_FROM_DEVICE);
+ memcpy(data + dataoff, page_address(rqinfo->page),
+ fragsize);
+ dataoff += fragsize;
+ }
+ }
+ return data;
+}
+
+unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max)
+{
+ const struct fun_cqe_info *info;
+ struct fun_rsp_common *rsp;
+ unsigned int new_cqes;
+ u16 sf_p, flags;
+ bool need_free;
+ void *cqe;
+
+ if (!max)
+ max = funq->cq_depth - 1;
+
+ for (new_cqes = 0; new_cqes < max; new_cqes++) {
+ cqe = funq->cqes + (funq->cq_head << funq->cqe_size_log2);
+ info = funq_cqe_info(funq, cqe);
+ sf_p = be16_to_cpu(info->sf_p);
+
+ if ((sf_p & 1) != funq->cq_phase)
+ break;
+
+ /* ensure the phase tag is read before other CQE fields */
+ dma_rmb();
+
+ if (++funq->cq_head == funq->cq_depth) {
+ funq->cq_head = 0;
+ funq->cq_phase = !funq->cq_phase;
+ }
+
+ rsp = cqe;
+ flags = be16_to_cpu(rsp->flags);
+
+ need_free = false;
+ if (unlikely(flags & FUN_REQ_COMMON_FLAG_CQE_IN_RQBUF)) {
+ rsp = fun_data_from_rq(funq, rsp, &need_free);
+ if (!rsp) {
+ rsp = cqe;
+ rsp->len8 = 1;
+ if (rsp->ret == 0)
+ rsp->ret = ENOMEM;
+ }
+ }
+
+ if (funq->cq_cb)
+ funq->cq_cb(funq, funq->cb_data, rsp, info);
+ if (need_free)
+ kfree(rsp);
+ }
+
+ dev_dbg(funq->fdev->dev, "CQ %u, new CQEs %u/%u, head %u, phase %u\n",
+ funq->cqid, new_cqes, max, funq->cq_head, funq->cq_phase);
+ return new_cqes;
+}
+
+unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max)
+{
+ unsigned int processed;
+ u32 db;
+
+ processed = __fun_process_cq(funq, max);
+
+ if (funq->num_rqe_to_fill) {
+ funq->rq_tail = (funq->rq_tail + funq->num_rqe_to_fill) %
+ funq->rq_depth;
+ funq->num_rqe_to_fill = 0;
+ writel(funq->rq_tail, funq->rq_db);
+ }
+
+ db = funq->cq_head | FUN_DB_IRQ_ARM_F;
+ writel(db, funq->cq_db);
+ return processed;
+}
+
+static int fun_alloc_sqes(struct fun_queue *funq)
+{
+ funq->sq_cmds = fun_alloc_ring_mem(funq->fdev->dev, funq->sq_depth,
+ 1 << funq->sqe_size_log2, 0,
+ fun_sq_is_head_wb(funq),
+ NUMA_NO_NODE, &funq->sq_dma_addr,
+ NULL, &funq->sq_head);
+ return funq->sq_cmds ? 0 : -ENOMEM;
+}
+
+static int fun_alloc_cqes(struct fun_queue *funq)
+{
+ funq->cqes = fun_alloc_ring_mem(funq->fdev->dev, funq->cq_depth,
+ 1 << funq->cqe_size_log2, 0, false,
+ NUMA_NO_NODE, &funq->cq_dma_addr, NULL,
+ NULL);
+ return funq->cqes ? 0 : -ENOMEM;
+}
+
+static int fun_alloc_rqes(struct fun_queue *funq)
+{
+ funq->rqes = fun_alloc_ring_mem(funq->fdev->dev, funq->rq_depth,
+ sizeof(*funq->rqes),
+ sizeof(*funq->rq_info), false,
+ NUMA_NO_NODE, &funq->rq_dma_addr,
+ (void **)&funq->rq_info, NULL);
+ return funq->rqes ? 0 : -ENOMEM;
+}
+
+/* Free a queue's structures. */
+void fun_free_queue(struct fun_queue *funq)
+{
+ struct device *dev = funq->fdev->dev;
+
+ fun_free_ring_mem(dev, funq->cq_depth, 1 << funq->cqe_size_log2, false,
+ funq->cqes, funq->cq_dma_addr, NULL);
+ fun_free_ring_mem(dev, funq->sq_depth, 1 << funq->sqe_size_log2,
+ fun_sq_is_head_wb(funq), funq->sq_cmds,
+ funq->sq_dma_addr, NULL);
+
+ if (funq->rqes) {
+ fun_clean_rq(funq);
+ fun_free_ring_mem(dev, funq->rq_depth, sizeof(*funq->rqes),
+ false, funq->rqes, funq->rq_dma_addr,
+ funq->rq_info);
+ }
+
+ kfree(funq);
+}
+
+/* Allocate and initialize a funq's structures. */
+struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
+ const struct fun_queue_alloc_req *req)
+{
+ struct fun_queue *funq = kzalloc(sizeof(*funq), GFP_KERNEL);
+
+ if (!funq)
+ return NULL;
+
+ funq->fdev = fdev;
+ spin_lock_init(&funq->sq_lock);
+
+ funq->qid = qid;
+
+ /* Initial CQ/SQ/RQ ids */
+ if (req->rq_depth) {
+ funq->cqid = 2 * qid;
+ if (funq->qid) {
+ /* I/O Q: use rqid = cqid, sqid = +1 */
+ funq->rqid = funq->cqid;
+ funq->sqid = funq->rqid + 1;
+ } else {
+ /* Admin Q: sqid is always 0, use ID 1 for RQ */
+ funq->sqid = 0;
+ funq->rqid = 1;
+ }
+ } else {
+ funq->cqid = qid;
+ funq->sqid = qid;
+ }
+
+ funq->cq_flags = req->cq_flags;
+ funq->sq_flags = req->sq_flags;
+
+ funq->cqe_size_log2 = req->cqe_size_log2;
+ funq->sqe_size_log2 = req->sqe_size_log2;
+
+ funq->cq_depth = req->cq_depth;
+ funq->sq_depth = req->sq_depth;
+
+ funq->cq_intcoal_nentries = req->cq_intcoal_nentries;
+ funq->cq_intcoal_usec = req->cq_intcoal_usec;
+
+ funq->sq_intcoal_nentries = req->sq_intcoal_nentries;
+ funq->sq_intcoal_usec = req->sq_intcoal_usec;
+
+ if (fun_alloc_cqes(funq))
+ goto free_funq;
+
+ funq->cq_phase = 1;
+
+ if (fun_alloc_sqes(funq))
+ goto free_funq;
+
+ if (req->rq_depth) {
+ funq->rq_flags = req->rq_flags | FUN_ADMIN_EPSQ_CREATE_FLAG_RQ;
+ funq->rq_depth = req->rq_depth;
+ funq->rq_buf_offset = -1;
+
+ if (fun_alloc_rqes(funq) || fun_fill_rq(funq))
+ goto free_funq;
+ }
+
+ funq->cq_vector = -1;
+ funq->cqe_info_offset = (1 << funq->cqe_size_log2) - sizeof(struct fun_cqe_info);
+
+ /* SQ/CQ 0 are implicitly created, assign their doorbells now.
+ * Other queues are assigned doorbells at their explicit creation.
+ */
+ if (funq->sqid == 0)
+ funq->sq_db = fun_sq_db_addr(fdev, 0);
+ if (funq->cqid == 0)
+ funq->cq_db = fun_cq_db_addr(fdev, 0);
+
+ return funq;
+
+free_funq:
+ fun_free_queue(funq);
+ return NULL;
+}
+
+/* Create a funq's CQ on the device. */
+static int fun_create_cq(struct fun_queue *funq)
+{
+ struct fun_dev *fdev = funq->fdev;
+ unsigned int rqid;
+ int rc;
+
+ rqid = funq->cq_flags & FUN_ADMIN_EPCQ_CREATE_FLAG_RQ ?
+ funq->rqid : FUN_HCI_ID_INVALID;
+ rc = fun_cq_create(fdev, funq->cq_flags, funq->cqid, rqid,
+ funq->cqe_size_log2, funq->cq_depth,
+ funq->cq_dma_addr, 0, 0, funq->cq_intcoal_nentries,
+ funq->cq_intcoal_usec, funq->cq_vector, 0, 0,
+ &funq->cqid, &funq->cq_db);
+ if (!rc)
+ dev_dbg(fdev->dev, "created CQ %u\n", funq->cqid);
+
+ return rc;
+}
+
+/* Create a funq's SQ on the device. */
+static int fun_create_sq(struct fun_queue *funq)
+{
+ struct fun_dev *fdev = funq->fdev;
+ int rc;
+
+ rc = fun_sq_create(fdev, funq->sq_flags, funq->sqid, funq->cqid,
+ funq->sqe_size_log2, funq->sq_depth,
+ funq->sq_dma_addr, funq->sq_intcoal_nentries,
+ funq->sq_intcoal_usec, funq->cq_vector, 0, 0,
+ 0, &funq->sqid, &funq->sq_db);
+ if (!rc)
+ dev_dbg(fdev->dev, "created SQ %u\n", funq->sqid);
+
+ return rc;
+}
+
+/* Create a funq's RQ on the device. */
+int fun_create_rq(struct fun_queue *funq)
+{
+ struct fun_dev *fdev = funq->fdev;
+ int rc;
+
+ rc = fun_sq_create(fdev, funq->rq_flags, funq->rqid, funq->cqid, 0,
+ funq->rq_depth, funq->rq_dma_addr, 0, 0,
+ funq->cq_vector, 0, 0, PAGE_SHIFT, &funq->rqid,
+ &funq->rq_db);
+ if (!rc)
+ dev_dbg(fdev->dev, "created RQ %u\n", funq->rqid);
+
+ return rc;
+}
+
+static unsigned int funq_irq(struct fun_queue *funq)
+{
+ return pci_irq_vector(to_pci_dev(funq->fdev->dev), funq->cq_vector);
+}
+
+int fun_request_irq(struct fun_queue *funq, const char *devname,
+ irq_handler_t handler, void *data)
+{
+ int rc;
+
+ if (funq->cq_vector < 0)
+ return -EINVAL;
+
+ funq->irq_handler = handler;
+ funq->irq_data = data;
+
+ snprintf(funq->irqname, sizeof(funq->irqname),
+ funq->qid ? "%s-q[%d]" : "%s-adminq", devname, funq->qid);
+
+ rc = request_irq(funq_irq(funq), handler, 0, funq->irqname, data);
+ if (rc)
+ funq->irq_handler = NULL;
+
+ return rc;
+}
+
+/* Create all component queues of a funq on the device. */
+int fun_create_queue(struct fun_queue *funq)
+{
+ int rc;
+
+ rc = fun_create_cq(funq);
+ if (rc)
+ return rc;
+
+ if (funq->rq_depth) {
+ rc = fun_create_rq(funq);
+ if (rc)
+ goto release_cq;
+ }
+
+ rc = fun_create_sq(funq);
+ if (rc)
+ goto release_rq;
+
+ return 0;
+
+release_rq:
+ fun_destroy_sq(funq->fdev, funq->rqid);
+release_cq:
+ fun_destroy_cq(funq->fdev, funq->cqid);
+ return rc;
+}
+
+void fun_free_irq(struct fun_queue *funq)
+{
+ if (funq->irq_handler) {
+ unsigned int vector = funq_irq(funq);
+
+ free_irq(vector, funq->irq_data);
+ funq->irq_handler = NULL;
+ funq->irq_data = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/fungible/funcore/fun_queue.h b/drivers/net/ethernet/fungible/funcore/fun_queue.h
new file mode 100644
index 000000000000..7fb53d0ae8b0
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_queue.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUN_QEUEUE_H
+#define _FUN_QEUEUE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+struct device;
+struct fun_dev;
+struct fun_queue;
+struct fun_cqe_info;
+struct fun_rsp_common;
+
+typedef void (*cq_callback_t)(struct fun_queue *funq, void *data, void *msg,
+ const struct fun_cqe_info *info);
+
+struct fun_rq_info {
+ dma_addr_t dma;
+ struct page *page;
+};
+
+/* A queue group consisting of an SQ, a CQ, and an optional RQ. */
+struct fun_queue {
+ struct fun_dev *fdev;
+ spinlock_t sq_lock;
+
+ dma_addr_t cq_dma_addr;
+ dma_addr_t sq_dma_addr;
+ dma_addr_t rq_dma_addr;
+
+ u32 __iomem *cq_db;
+ u32 __iomem *sq_db;
+ u32 __iomem *rq_db;
+
+ void *cqes;
+ void *sq_cmds;
+ struct fun_eprq_rqbuf *rqes;
+ struct fun_rq_info *rq_info;
+
+ u32 cqid;
+ u32 sqid;
+ u32 rqid;
+
+ u32 cq_depth;
+ u32 sq_depth;
+ u32 rq_depth;
+
+ u16 cq_head;
+ u16 sq_tail;
+ u16 rq_tail;
+
+ u8 cqe_size_log2;
+ u8 sqe_size_log2;
+
+ u16 cqe_info_offset;
+
+ u16 rq_buf_idx;
+ int rq_buf_offset;
+ u16 num_rqe_to_fill;
+
+ u8 cq_intcoal_usec;
+ u8 cq_intcoal_nentries;
+ u8 sq_intcoal_usec;
+ u8 sq_intcoal_nentries;
+
+ u16 cq_flags;
+ u16 sq_flags;
+ u16 rq_flags;
+
+ /* SQ head writeback */
+ u16 sq_comp;
+
+ volatile __be64 *sq_head;
+
+ cq_callback_t cq_cb;
+ void *cb_data;
+
+ irq_handler_t irq_handler;
+ void *irq_data;
+ s16 cq_vector;
+ u8 cq_phase;
+
+ /* I/O q index */
+ u16 qid;
+
+ char irqname[24];
+};
+
+static inline void *fun_sqe_at(const struct fun_queue *funq, unsigned int pos)
+{
+ return funq->sq_cmds + (pos << funq->sqe_size_log2);
+}
+
+static inline void funq_sq_post_tail(struct fun_queue *funq, u16 tail)
+{
+ if (++tail == funq->sq_depth)
+ tail = 0;
+ funq->sq_tail = tail;
+ writel(tail, funq->sq_db);
+}
+
+static inline struct fun_cqe_info *funq_cqe_info(const struct fun_queue *funq,
+ void *cqe)
+{
+ return cqe + funq->cqe_info_offset;
+}
+
+static inline void funq_rq_post(struct fun_queue *funq)
+{
+ writel(funq->rq_tail, funq->rq_db);
+}
+
+struct fun_queue_alloc_req {
+ u8 cqe_size_log2;
+ u8 sqe_size_log2;
+
+ u16 cq_flags;
+ u16 sq_flags;
+ u16 rq_flags;
+
+ u32 cq_depth;
+ u32 sq_depth;
+ u32 rq_depth;
+
+ u8 cq_intcoal_usec;
+ u8 cq_intcoal_nentries;
+ u8 sq_intcoal_usec;
+ u8 sq_intcoal_nentries;
+};
+
+int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid,
+ u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
+ u8 coal_nentries, u8 coal_usec, u32 irq_num,
+ u32 scan_start_id, u32 scan_end_id,
+ u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp);
+int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid,
+ u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
+ u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec,
+ u32 irq_num, u32 scan_start_id, u32 scan_end_id,
+ u32 *cqidp, u32 __iomem **dbp);
+void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth,
+ size_t hw_desc_sz, size_t sw_desc_size, bool wb,
+ int numa_node, dma_addr_t *dma_addr, void **sw_va,
+ volatile __be64 **wb_va);
+void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz,
+ bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va);
+
+#define fun_destroy_sq(fdev, sqid) \
+ fun_res_destroy((fdev), FUN_ADMIN_OP_EPSQ, 0, (sqid))
+#define fun_destroy_cq(fdev, cqid) \
+ fun_res_destroy((fdev), FUN_ADMIN_OP_EPCQ, 0, (cqid))
+
+struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
+ const struct fun_queue_alloc_req *req);
+void fun_free_queue(struct fun_queue *funq);
+
+static inline void fun_set_cq_callback(struct fun_queue *funq, cq_callback_t cb,
+ void *cb_data)
+{
+ funq->cq_cb = cb;
+ funq->cb_data = cb_data;
+}
+
+int fun_create_rq(struct fun_queue *funq);
+int fun_create_queue(struct fun_queue *funq);
+
+void fun_free_irq(struct fun_queue *funq);
+int fun_request_irq(struct fun_queue *funq, const char *devname,
+ irq_handler_t handler, void *data);
+
+unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max);
+unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max);
+
+#endif /* _FUN_QEUEUE_H */
diff --git a/drivers/net/ethernet/fungible/funeth/Kconfig b/drivers/net/ethernet/fungible/funeth/Kconfig
new file mode 100644
index 000000000000..c72ad9386400
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Fungible Ethernet driver configuration
+#
+
+config FUN_ETH
+ tristate "Fungible Ethernet device driver"
+ depends on PCI && PCI_MSI
+ depends on TLS && TLS_DEVICE || TLS_DEVICE=n
+ select NET_DEVLINK
+ select FUN_CORE
+ help
+ This driver supports the Ethernet functionality of Fungible adapters.
+ It works with both physical and virtual functions.
+
+ To compile this driver as a module, choose M here. The module
+ will be called funeth.
diff --git a/drivers/net/ethernet/fungible/funeth/Makefile b/drivers/net/ethernet/fungible/funeth/Makefile
new file mode 100644
index 000000000000..646d69595b4f
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+ccflags-y += -I$(srctree)/$(src)/../funcore -I$(srctree)/$(src)
+
+obj-$(CONFIG_FUN_ETH) += funeth.o
+
+funeth-y := funeth_main.o funeth_rx.o funeth_tx.o funeth_devlink.o \
+ funeth_ethtool.o
+
+funeth-$(CONFIG_TLS_DEVICE) += funeth_ktls.o
diff --git a/drivers/net/ethernet/fungible/funeth/fun_port.h b/drivers/net/ethernet/fungible/funeth/fun_port.h
new file mode 100644
index 000000000000..0f9da44e3786
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/fun_port.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUN_PORT_H
+#define _FUN_PORT_H
+
+enum port_mac_rx_stats {
+ PORT_MAC_RX_etherStatsOctets = 0x0,
+ PORT_MAC_RX_OctetsReceivedOK = 0x1,
+ PORT_MAC_RX_aAlignmentErrors = 0x2,
+ PORT_MAC_RX_aPAUSEMACCtrlFramesReceived = 0x3,
+ PORT_MAC_RX_aFrameTooLongErrors = 0x4,
+ PORT_MAC_RX_aInRangeLengthErrors = 0x5,
+ PORT_MAC_RX_aFramesReceivedOK = 0x6,
+ PORT_MAC_RX_aFrameCheckSequenceErrors = 0x7,
+ PORT_MAC_RX_VLANReceivedOK = 0x8,
+ PORT_MAC_RX_ifInErrors = 0x9,
+ PORT_MAC_RX_ifInUcastPkts = 0xa,
+ PORT_MAC_RX_ifInMulticastPkts = 0xb,
+ PORT_MAC_RX_ifInBroadcastPkts = 0xc,
+ PORT_MAC_RX_etherStatsDropEvents = 0xd,
+ PORT_MAC_RX_etherStatsPkts = 0xe,
+ PORT_MAC_RX_etherStatsUndersizePkts = 0xf,
+ PORT_MAC_RX_etherStatsPkts64Octets = 0x10,
+ PORT_MAC_RX_etherStatsPkts65to127Octets = 0x11,
+ PORT_MAC_RX_etherStatsPkts128to255Octets = 0x12,
+ PORT_MAC_RX_etherStatsPkts256to511Octets = 0x13,
+ PORT_MAC_RX_etherStatsPkts512to1023Octets = 0x14,
+ PORT_MAC_RX_etherStatsPkts1024to1518Octets = 0x15,
+ PORT_MAC_RX_etherStatsPkts1519toMaxOctets = 0x16,
+ PORT_MAC_RX_etherStatsOversizePkts = 0x17,
+ PORT_MAC_RX_etherStatsJabbers = 0x18,
+ PORT_MAC_RX_etherStatsFragments = 0x19,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_0 = 0x1a,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_1 = 0x1b,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_2 = 0x1c,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_3 = 0x1d,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_4 = 0x1e,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_5 = 0x1f,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_6 = 0x20,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_7 = 0x21,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_8 = 0x22,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_9 = 0x23,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_10 = 0x24,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_11 = 0x25,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_12 = 0x26,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_13 = 0x27,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_14 = 0x28,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_15 = 0x29,
+ PORT_MAC_RX_MACControlFramesReceived = 0x2a,
+ PORT_MAC_RX_STATS_MAX = 0x2b,
+};
+
+enum port_mac_tx_stats {
+ PORT_MAC_TX_etherStatsOctets = 0x0,
+ PORT_MAC_TX_OctetsTransmittedOK = 0x1,
+ PORT_MAC_TX_aPAUSEMACCtrlFramesTransmitted = 0x2,
+ PORT_MAC_TX_aFramesTransmittedOK = 0x3,
+ PORT_MAC_TX_VLANTransmittedOK = 0x4,
+ PORT_MAC_TX_ifOutErrors = 0x5,
+ PORT_MAC_TX_ifOutUcastPkts = 0x6,
+ PORT_MAC_TX_ifOutMulticastPkts = 0x7,
+ PORT_MAC_TX_ifOutBroadcastPkts = 0x8,
+ PORT_MAC_TX_etherStatsPkts64Octets = 0x9,
+ PORT_MAC_TX_etherStatsPkts65to127Octets = 0xa,
+ PORT_MAC_TX_etherStatsPkts128to255Octets = 0xb,
+ PORT_MAC_TX_etherStatsPkts256to511Octets = 0xc,
+ PORT_MAC_TX_etherStatsPkts512to1023Octets = 0xd,
+ PORT_MAC_TX_etherStatsPkts1024to1518Octets = 0xe,
+ PORT_MAC_TX_etherStatsPkts1519toMaxOctets = 0xf,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_0 = 0x10,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_1 = 0x11,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_2 = 0x12,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_3 = 0x13,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_4 = 0x14,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_5 = 0x15,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_6 = 0x16,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_7 = 0x17,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_8 = 0x18,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_9 = 0x19,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_10 = 0x1a,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_11 = 0x1b,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_12 = 0x1c,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_13 = 0x1d,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_14 = 0x1e,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_15 = 0x1f,
+ PORT_MAC_TX_MACControlFramesTransmitted = 0x20,
+ PORT_MAC_TX_etherStatsPkts = 0x21,
+ PORT_MAC_TX_STATS_MAX = 0x22,
+};
+
+enum port_mac_fec_stats {
+ PORT_MAC_FEC_Correctable = 0x0,
+ PORT_MAC_FEC_Uncorrectable = 0x1,
+ PORT_MAC_FEC_STATS_MAX = 0x2,
+};
+
+#endif /* _FUN_PORT_H */
diff --git a/drivers/net/ethernet/fungible/funeth/funeth.h b/drivers/net/ethernet/fungible/funeth/funeth.h
new file mode 100644
index 000000000000..1250e10d21db
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUNETH_H
+#define _FUNETH_H
+
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/net_tstamp.h>
+#include <linux/mutex.h>
+#include <linux/seqlock.h>
+#include <linux/xarray.h>
+#include <net/devlink.h>
+#include "fun_dev.h"
+
+#define ADMIN_SQE_SIZE SZ_128
+#define ADMIN_CQE_SIZE SZ_64
+#define ADMIN_RSP_MAX_LEN (ADMIN_CQE_SIZE - sizeof(struct fun_cqe_info))
+
+#define FUN_MAX_MTU 9024
+
+#define SQ_DEPTH 512U
+#define CQ_DEPTH 1024U
+#define RQ_DEPTH (512U / (PAGE_SIZE / 4096))
+
+#define CQ_INTCOAL_USEC 10
+#define CQ_INTCOAL_NPKT 16
+#define SQ_INTCOAL_USEC 10
+#define SQ_INTCOAL_NPKT 16
+
+#define INVALID_LPORT 0xffff
+
+#define FUN_PORT_CAP_PAUSE_MASK (FUN_PORT_CAP_TX_PAUSE | FUN_PORT_CAP_RX_PAUSE)
+
+struct fun_vport_info {
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+ __be16 vlan_proto;
+ u8 qos;
+ u8 spoofchk:1;
+ u8 trusted:1;
+ unsigned int max_rate;
+};
+
+/* "subclass" of fun_dev for Ethernet functions */
+struct fun_ethdev {
+ struct fun_dev fdev;
+
+ /* the function's network ports */
+ struct net_device **netdevs;
+ unsigned int num_ports;
+
+ /* configuration for the function's virtual ports */
+ unsigned int num_vports;
+ struct fun_vport_info *vport_info;
+
+ struct mutex state_mutex; /* nests inside RTNL if both taken */
+
+ unsigned int nsqs_per_port;
+};
+
+static inline struct fun_ethdev *to_fun_ethdev(struct fun_dev *p)
+{
+ return container_of(p, struct fun_ethdev, fdev);
+}
+
+struct fun_qset {
+ struct funeth_rxq **rxqs;
+ struct funeth_txq **txqs;
+ struct funeth_txq **xdpqs;
+ unsigned int nrxqs;
+ unsigned int ntxqs;
+ unsigned int nxdpqs;
+ unsigned int rxq_start;
+ unsigned int txq_start;
+ unsigned int xdpq_start;
+ unsigned int cq_depth;
+ unsigned int rq_depth;
+ unsigned int sq_depth;
+ int state;
+};
+
+/* Per netdevice driver state, i.e., netdev_priv. */
+struct funeth_priv {
+ struct fun_dev *fdev;
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+
+ struct funeth_rxq * __rcu *rxqs;
+ struct funeth_txq **txqs;
+ struct funeth_txq * __rcu *xdpqs;
+
+ struct xarray irqs;
+ unsigned int num_tx_irqs;
+ unsigned int num_rx_irqs;
+ unsigned int rx_irq_ofst;
+
+ unsigned int lane_attrs;
+ u16 lport;
+
+ /* link settings */
+ u64 port_caps;
+ u64 advertising;
+ u64 lp_advertising;
+ unsigned int link_speed;
+ u8 xcvr_type;
+ u8 active_fc;
+ u8 active_fec;
+ u8 link_down_reason;
+ seqcount_t link_seq;
+
+ u32 msg_enable;
+
+ unsigned int num_xdpqs;
+
+ /* ethtool, etc. config parameters */
+ unsigned int sq_depth;
+ unsigned int rq_depth;
+ unsigned int cq_depth;
+ unsigned int cq_irq_db;
+ u8 tx_coal_usec;
+ u8 tx_coal_count;
+ u8 rx_coal_usec;
+ u8 rx_coal_count;
+
+ struct hwtstamp_config hwtstamp_cfg;
+
+ /* cumulative queue stats from earlier queue instances */
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+
+ /* RSS */
+ unsigned int rss_hw_id;
+ enum fun_eth_hash_alg hash_algo;
+ u8 rss_key[FUN_ETH_RSS_MAX_KEY_SIZE];
+ unsigned int indir_table_nentries;
+ u32 indir_table[FUN_ETH_RSS_MAX_INDIR_ENT];
+ dma_addr_t rss_dma_addr;
+ void *rss_cfg;
+
+ /* DMA area for port stats */
+ dma_addr_t stats_dma_addr;
+ __be64 *stats;
+
+ struct bpf_prog *xdp_prog;
+
+ struct devlink_port dl_port;
+
+ /* kTLS state */
+ unsigned int ktls_id;
+ atomic64_t tx_tls_add;
+ atomic64_t tx_tls_del;
+ atomic64_t tx_tls_resync;
+};
+
+void fun_set_ethtool_ops(struct net_device *netdev);
+int fun_port_write_cmd(struct funeth_priv *fp, int key, u64 data);
+int fun_port_read_cmd(struct funeth_priv *fp, int key, u64 *data);
+int fun_create_and_bind_tx(struct funeth_priv *fp, u32 sqid);
+int fun_replace_queues(struct net_device *dev, struct fun_qset *newqs,
+ struct netlink_ext_ack *extack);
+int fun_change_num_queues(struct net_device *dev, unsigned int ntx,
+ unsigned int nrx);
+void fun_set_ring_count(struct net_device *netdev, unsigned int ntx,
+ unsigned int nrx);
+int fun_config_rss(struct net_device *dev, int algo, const u8 *key,
+ const u32 *qtable, u8 op);
+
+#endif /* _FUNETH_H */
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_devlink.c b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c
new file mode 100644
index 000000000000..a849b3c6b01f
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include "funeth.h"
+#include "funeth_devlink.h"
+
+static int fun_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const struct devlink_ops fun_dl_ops = {
+ .info_get = fun_dl_info_get,
+};
+
+struct devlink *fun_devlink_alloc(struct device *dev)
+{
+ return devlink_alloc(&fun_dl_ops, sizeof(struct fun_ethdev), dev);
+}
+
+void fun_devlink_free(struct devlink *devlink)
+{
+ devlink_free(devlink);
+}
+
+void fun_devlink_register(struct devlink *devlink)
+{
+ devlink_register(devlink);
+}
+
+void fun_devlink_unregister(struct devlink *devlink)
+{
+ devlink_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_devlink.h b/drivers/net/ethernet/fungible/funeth/funeth_devlink.h
new file mode 100644
index 000000000000..e40464d57ff4
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_devlink.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef __FUNETH_DEVLINK_H
+#define __FUNETH_DEVLINK_H
+
+#include <net/devlink.h>
+
+struct devlink *fun_devlink_alloc(struct device *dev);
+void fun_devlink_free(struct devlink *devlink);
+void fun_devlink_register(struct devlink *devlink);
+void fun_devlink_unregister(struct devlink *devlink);
+
+#endif /* __FUNETH_DEVLINK_H */
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
new file mode 100644
index 000000000000..d081168c95fa
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -0,0 +1,1162 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/ethtool.h>
+#include <linux/linkmode.h>
+#include <linux/netdevice.h>
+#include <linux/nvme.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include "funeth.h"
+#include "fun_port.h"
+#include "funeth_txrx.h"
+
+/* Min queue depth. The smallest power-of-2 supporting jumbo frames with 4K
+ * pages is 8. Require it for all types of queues though some could work with
+ * fewer entries.
+ */
+#define FUNETH_MIN_QDEPTH 8
+
+static const char mac_tx_stat_names[][ETH_GSTRING_LEN] = {
+ "mac_tx_octets_total",
+ "mac_tx_frames_total",
+ "mac_tx_vlan_frames_ok",
+ "mac_tx_unicast_frames",
+ "mac_tx_multicast_frames",
+ "mac_tx_broadcast_frames",
+ "mac_tx_errors",
+ "mac_tx_CBFCPAUSE0",
+ "mac_tx_CBFCPAUSE1",
+ "mac_tx_CBFCPAUSE2",
+ "mac_tx_CBFCPAUSE3",
+ "mac_tx_CBFCPAUSE4",
+ "mac_tx_CBFCPAUSE5",
+ "mac_tx_CBFCPAUSE6",
+ "mac_tx_CBFCPAUSE7",
+ "mac_tx_CBFCPAUSE8",
+ "mac_tx_CBFCPAUSE9",
+ "mac_tx_CBFCPAUSE10",
+ "mac_tx_CBFCPAUSE11",
+ "mac_tx_CBFCPAUSE12",
+ "mac_tx_CBFCPAUSE13",
+ "mac_tx_CBFCPAUSE14",
+ "mac_tx_CBFCPAUSE15",
+};
+
+static const char mac_rx_stat_names[][ETH_GSTRING_LEN] = {
+ "mac_rx_octets_total",
+ "mac_rx_frames_total",
+ "mac_rx_VLAN_frames_ok",
+ "mac_rx_unicast_frames",
+ "mac_rx_multicast_frames",
+ "mac_rx_broadcast_frames",
+ "mac_rx_drop_events",
+ "mac_rx_errors",
+ "mac_rx_alignment_errors",
+ "mac_rx_CBFCPAUSE0",
+ "mac_rx_CBFCPAUSE1",
+ "mac_rx_CBFCPAUSE2",
+ "mac_rx_CBFCPAUSE3",
+ "mac_rx_CBFCPAUSE4",
+ "mac_rx_CBFCPAUSE5",
+ "mac_rx_CBFCPAUSE6",
+ "mac_rx_CBFCPAUSE7",
+ "mac_rx_CBFCPAUSE8",
+ "mac_rx_CBFCPAUSE9",
+ "mac_rx_CBFCPAUSE10",
+ "mac_rx_CBFCPAUSE11",
+ "mac_rx_CBFCPAUSE12",
+ "mac_rx_CBFCPAUSE13",
+ "mac_rx_CBFCPAUSE14",
+ "mac_rx_CBFCPAUSE15",
+};
+
+static const char * const txq_stat_names[] = {
+ "tx_pkts",
+ "tx_bytes",
+ "tx_cso",
+ "tx_tso",
+ "tx_encapsulated_tso",
+ "tx_more",
+ "tx_queue_stops",
+ "tx_queue_restarts",
+ "tx_mapping_errors",
+ "tx_tls_encrypted_packets",
+ "tx_tls_encrypted_bytes",
+ "tx_tls_ooo",
+ "tx_tls_drop_no_sync_data",
+};
+
+static const char * const xdpq_stat_names[] = {
+ "tx_xdp_pkts",
+ "tx_xdp_bytes",
+ "tx_xdp_full",
+ "tx_xdp_mapping_errors",
+};
+
+static const char * const rxq_stat_names[] = {
+ "rx_pkts",
+ "rx_bytes",
+ "rx_cso",
+ "gro_pkts",
+ "gro_merged",
+ "rx_xdp_tx",
+ "rx_xdp_redir",
+ "rx_xdp_drops",
+ "rx_buffers",
+ "rx_page_allocs",
+ "rx_drops",
+ "rx_budget_exhausted",
+ "rx_mapping_errors",
+};
+
+static const char * const tls_stat_names[] = {
+ "tx_tls_ctx",
+ "tx_tls_del",
+ "tx_tls_resync",
+};
+
+static void fun_link_modes_to_ethtool(u64 modes,
+ unsigned long *ethtool_modes_map)
+{
+#define ADD_LINK_MODE(mode) \
+ __set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, ethtool_modes_map)
+
+ if (modes & FUN_PORT_CAP_AUTONEG)
+ ADD_LINK_MODE(Autoneg);
+ if (modes & FUN_PORT_CAP_1000_X)
+ ADD_LINK_MODE(1000baseX_Full);
+ if (modes & FUN_PORT_CAP_10G_R) {
+ ADD_LINK_MODE(10000baseCR_Full);
+ ADD_LINK_MODE(10000baseSR_Full);
+ ADD_LINK_MODE(10000baseLR_Full);
+ ADD_LINK_MODE(10000baseER_Full);
+ }
+ if (modes & FUN_PORT_CAP_25G_R) {
+ ADD_LINK_MODE(25000baseCR_Full);
+ ADD_LINK_MODE(25000baseSR_Full);
+ }
+ if (modes & FUN_PORT_CAP_40G_R4) {
+ ADD_LINK_MODE(40000baseCR4_Full);
+ ADD_LINK_MODE(40000baseSR4_Full);
+ ADD_LINK_MODE(40000baseLR4_Full);
+ }
+ if (modes & FUN_PORT_CAP_50G_R2) {
+ ADD_LINK_MODE(50000baseCR2_Full);
+ ADD_LINK_MODE(50000baseSR2_Full);
+ }
+ if (modes & FUN_PORT_CAP_50G_R) {
+ ADD_LINK_MODE(50000baseCR_Full);
+ ADD_LINK_MODE(50000baseSR_Full);
+ ADD_LINK_MODE(50000baseLR_ER_FR_Full);
+ }
+ if (modes & FUN_PORT_CAP_100G_R4) {
+ ADD_LINK_MODE(100000baseCR4_Full);
+ ADD_LINK_MODE(100000baseSR4_Full);
+ ADD_LINK_MODE(100000baseLR4_ER4_Full);
+ }
+ if (modes & FUN_PORT_CAP_100G_R2) {
+ ADD_LINK_MODE(100000baseCR2_Full);
+ ADD_LINK_MODE(100000baseSR2_Full);
+ ADD_LINK_MODE(100000baseLR2_ER2_FR2_Full);
+ }
+ if (modes & FUN_PORT_CAP_FEC_NONE)
+ ADD_LINK_MODE(FEC_NONE);
+ if (modes & FUN_PORT_CAP_FEC_FC)
+ ADD_LINK_MODE(FEC_BASER);
+ if (modes & FUN_PORT_CAP_FEC_RS)
+ ADD_LINK_MODE(FEC_RS);
+ if (modes & FUN_PORT_CAP_RX_PAUSE)
+ ADD_LINK_MODE(Pause);
+
+#undef ADD_LINK_MODE
+}
+
+static void set_asym_pause(u64 advertising, struct ethtool_link_ksettings *ks)
+{
+ bool rx_pause, tx_pause;
+
+ rx_pause = advertising & FUN_PORT_CAP_RX_PAUSE;
+ tx_pause = advertising & FUN_PORT_CAP_TX_PAUSE;
+ if (tx_pause ^ rx_pause)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Asym_Pause);
+}
+
+static unsigned int fun_port_type(unsigned int xcvr)
+{
+ if (!xcvr)
+ return PORT_NONE;
+
+ switch (xcvr & 7) {
+ case FUN_XCVR_BASET:
+ return PORT_TP;
+ case FUN_XCVR_CU:
+ return PORT_DA;
+ default:
+ return PORT_FIBRE;
+ }
+}
+
+static int fun_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int seq, speed, xcvr;
+ u64 lp_advertising;
+ bool link_up;
+
+ ethtool_link_ksettings_zero_link_mode(ks, supported);
+ ethtool_link_ksettings_zero_link_mode(ks, advertising);
+ ethtool_link_ksettings_zero_link_mode(ks, lp_advertising);
+
+ /* Link settings change asynchronously, take a consistent snapshot */
+ do {
+ seq = read_seqcount_begin(&fp->link_seq);
+ link_up = netif_carrier_ok(netdev);
+ speed = fp->link_speed;
+ xcvr = fp->xcvr_type;
+ lp_advertising = fp->lp_advertising;
+ } while (read_seqcount_retry(&fp->link_seq, seq));
+
+ if (link_up) {
+ ks->base.speed = speed;
+ ks->base.duplex = DUPLEX_FULL;
+ fun_link_modes_to_ethtool(lp_advertising,
+ ks->link_modes.lp_advertising);
+ } else {
+ ks->base.speed = SPEED_UNKNOWN;
+ ks->base.duplex = DUPLEX_UNKNOWN;
+ }
+
+ ks->base.autoneg = (fp->advertising & FUN_PORT_CAP_AUTONEG) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+ ks->base.port = fun_port_type(xcvr);
+
+ fun_link_modes_to_ethtool(fp->port_caps, ks->link_modes.supported);
+ if (fp->port_caps & (FUN_PORT_CAP_RX_PAUSE | FUN_PORT_CAP_TX_PAUSE))
+ ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
+
+ fun_link_modes_to_ethtool(fp->advertising, ks->link_modes.advertising);
+ set_asym_pause(fp->advertising, ks);
+ return 0;
+}
+
+static u64 fun_advert_modes(const struct ethtool_link_ksettings *ks)
+{
+ u64 modes = 0;
+
+#define HAS_MODE(mode) \
+ ethtool_link_ksettings_test_link_mode(ks, advertising, mode)
+
+ if (HAS_MODE(1000baseX_Full))
+ modes |= FUN_PORT_CAP_1000_X;
+ if (HAS_MODE(10000baseCR_Full) || HAS_MODE(10000baseSR_Full) ||
+ HAS_MODE(10000baseLR_Full) || HAS_MODE(10000baseER_Full))
+ modes |= FUN_PORT_CAP_10G_R;
+ if (HAS_MODE(25000baseCR_Full) || HAS_MODE(25000baseSR_Full))
+ modes |= FUN_PORT_CAP_25G_R;
+ if (HAS_MODE(40000baseCR4_Full) || HAS_MODE(40000baseSR4_Full) ||
+ HAS_MODE(40000baseLR4_Full))
+ modes |= FUN_PORT_CAP_40G_R4;
+ if (HAS_MODE(50000baseCR2_Full) || HAS_MODE(50000baseSR2_Full))
+ modes |= FUN_PORT_CAP_50G_R2;
+ if (HAS_MODE(50000baseCR_Full) || HAS_MODE(50000baseSR_Full) ||
+ HAS_MODE(50000baseLR_ER_FR_Full))
+ modes |= FUN_PORT_CAP_50G_R;
+ if (HAS_MODE(100000baseCR4_Full) || HAS_MODE(100000baseSR4_Full) ||
+ HAS_MODE(100000baseLR4_ER4_Full))
+ modes |= FUN_PORT_CAP_100G_R4;
+ if (HAS_MODE(100000baseCR2_Full) || HAS_MODE(100000baseSR2_Full) ||
+ HAS_MODE(100000baseLR2_ER2_FR2_Full))
+ modes |= FUN_PORT_CAP_100G_R2;
+
+ return modes;
+#undef HAS_MODE
+}
+
+static u64 fun_speed_to_link_mode(unsigned int speed)
+{
+ switch (speed) {
+ case SPEED_100000:
+ return FUN_PORT_CAP_100G_R4 | FUN_PORT_CAP_100G_R2;
+ case SPEED_50000:
+ return FUN_PORT_CAP_50G_R | FUN_PORT_CAP_50G_R2;
+ case SPEED_40000:
+ return FUN_PORT_CAP_40G_R4;
+ case SPEED_25000:
+ return FUN_PORT_CAP_25G_R;
+ case SPEED_10000:
+ return FUN_PORT_CAP_10G_R;
+ case SPEED_1000:
+ return FUN_PORT_CAP_1000_X;
+ default:
+ return 0;
+ }
+}
+
+static int fun_change_advert(struct funeth_priv *fp, u64 new_advert)
+{
+ int err;
+
+ if (new_advert == fp->advertising)
+ return 0;
+
+ err = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT, new_advert);
+ if (!err)
+ fp->advertising = new_advert;
+ return err;
+}
+
+#define FUN_PORT_CAP_FEC_MASK \
+ (FUN_PORT_CAP_FEC_NONE | FUN_PORT_CAP_FEC_FC | FUN_PORT_CAP_FEC_RS)
+
+static int fun_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *ks)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 new_advert;
+
+ /* eswitch ports don't support mode changes */
+ if (fp->port_caps & FUN_PORT_CAP_VPORT)
+ return -EOPNOTSUPP;
+
+ if (ks->base.duplex == DUPLEX_HALF)
+ return -EINVAL;
+ if (ks->base.autoneg == AUTONEG_ENABLE &&
+ !(fp->port_caps & FUN_PORT_CAP_AUTONEG))
+ return -EINVAL;
+
+ if (ks->base.autoneg == AUTONEG_ENABLE) {
+ if (linkmode_empty(ks->link_modes.advertising))
+ return -EINVAL;
+
+ fun_link_modes_to_ethtool(fp->port_caps, supported);
+ if (!linkmode_subset(ks->link_modes.advertising, supported))
+ return -EINVAL;
+
+ new_advert = fun_advert_modes(ks) | FUN_PORT_CAP_AUTONEG;
+ } else {
+ new_advert = fun_speed_to_link_mode(ks->base.speed);
+ new_advert &= fp->port_caps;
+ if (!new_advert)
+ return -EINVAL;
+ }
+ new_advert |= fp->advertising &
+ (FUN_PORT_CAP_PAUSE_MASK | FUN_PORT_CAP_FEC_MASK);
+
+ return fun_change_advert(fp, new_advert);
+}
+
+static void fun_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ u8 active_pause = fp->active_fc;
+
+ pause->rx_pause = !!(active_pause & FUN_PORT_CAP_RX_PAUSE);
+ pause->tx_pause = !!(active_pause & FUN_PORT_CAP_TX_PAUSE);
+ pause->autoneg = !!(fp->advertising & FUN_PORT_CAP_AUTONEG);
+}
+
+static int fun_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 new_advert;
+
+ if (fp->port_caps & FUN_PORT_CAP_VPORT)
+ return -EOPNOTSUPP;
+ /* Forcing PAUSE settings with AN enabled is unsupported. */
+ if (!pause->autoneg && (fp->advertising & FUN_PORT_CAP_AUTONEG))
+ return -EOPNOTSUPP;
+ if (pause->autoneg && !(fp->advertising & FUN_PORT_CAP_AUTONEG))
+ return -EINVAL;
+ if (pause->tx_pause && !(fp->port_caps & FUN_PORT_CAP_TX_PAUSE))
+ return -EINVAL;
+ if (pause->rx_pause && !(fp->port_caps & FUN_PORT_CAP_RX_PAUSE))
+ return -EINVAL;
+
+ new_advert = fp->advertising & ~FUN_PORT_CAP_PAUSE_MASK;
+ if (pause->tx_pause)
+ new_advert |= FUN_PORT_CAP_TX_PAUSE;
+ if (pause->rx_pause)
+ new_advert |= FUN_PORT_CAP_RX_PAUSE;
+
+ return fun_change_advert(fp, new_advert);
+}
+
+static int fun_restart_an(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->advertising & FUN_PORT_CAP_AUTONEG))
+ return -EOPNOTSUPP;
+
+ return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT,
+ FUN_PORT_CAP_AUTONEG);
+}
+
+static int fun_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int beacon;
+
+ if (fp->port_caps & FUN_PORT_CAP_VPORT)
+ return -EOPNOTSUPP;
+ if (state != ETHTOOL_ID_ACTIVE && state != ETHTOOL_ID_INACTIVE)
+ return -EOPNOTSUPP;
+
+ beacon = state == ETHTOOL_ID_ACTIVE ? FUN_PORT_LED_BEACON_ON :
+ FUN_PORT_LED_BEACON_OFF;
+ return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_LED, beacon);
+}
+
+static void fun_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(fp->pdev), sizeof(info->bus_info));
+}
+
+static u32 fun_get_msglevel(struct net_device *netdev)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ return fp->msg_enable;
+}
+
+static void fun_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+
+ fp->msg_enable = value;
+}
+
+static int fun_get_regs_len(struct net_device *dev)
+{
+ return NVME_REG_ACQ + sizeof(u64);
+}
+
+static void fun_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *buf)
+{
+ const struct funeth_priv *fp = netdev_priv(dev);
+ void __iomem *bar = fp->fdev->bar;
+
+ regs->version = 0;
+ *(u64 *)(buf + NVME_REG_CAP) = readq(bar + NVME_REG_CAP);
+ *(u32 *)(buf + NVME_REG_VS) = readl(bar + NVME_REG_VS);
+ *(u32 *)(buf + NVME_REG_INTMS) = readl(bar + NVME_REG_INTMS);
+ *(u32 *)(buf + NVME_REG_INTMC) = readl(bar + NVME_REG_INTMC);
+ *(u32 *)(buf + NVME_REG_CC) = readl(bar + NVME_REG_CC);
+ *(u32 *)(buf + NVME_REG_CSTS) = readl(bar + NVME_REG_CSTS);
+ *(u32 *)(buf + NVME_REG_AQA) = readl(bar + NVME_REG_AQA);
+ *(u64 *)(buf + NVME_REG_ASQ) = readq(bar + NVME_REG_ASQ);
+ *(u64 *)(buf + NVME_REG_ACQ) = readq(bar + NVME_REG_ACQ);
+}
+
+static int fun_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kcoal,
+ struct netlink_ext_ack *ext_ack)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ coal->rx_coalesce_usecs = fp->rx_coal_usec;
+ coal->rx_max_coalesced_frames = fp->rx_coal_count;
+ coal->use_adaptive_rx_coalesce = !fp->cq_irq_db;
+ coal->tx_coalesce_usecs = fp->tx_coal_usec;
+ coal->tx_max_coalesced_frames = fp->tx_coal_count;
+ return 0;
+}
+
+static int fun_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kcoal,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct funeth_rxq **rxqs;
+ unsigned int i, db_val;
+
+ if (coal->rx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M ||
+ coal->rx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M ||
+ (coal->rx_coalesce_usecs | coal->rx_max_coalesced_frames) == 0 ||
+ coal->tx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M ||
+ coal->tx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M ||
+ (coal->tx_coalesce_usecs | coal->tx_max_coalesced_frames) == 0)
+ return -EINVAL;
+
+ /* a timer is required if there's any coalescing */
+ if ((coal->rx_max_coalesced_frames > 1 && !coal->rx_coalesce_usecs) ||
+ (coal->tx_max_coalesced_frames > 1 && !coal->tx_coalesce_usecs))
+ return -EINVAL;
+
+ fp->rx_coal_usec = coal->rx_coalesce_usecs;
+ fp->rx_coal_count = coal->rx_max_coalesced_frames;
+ fp->tx_coal_usec = coal->tx_coalesce_usecs;
+ fp->tx_coal_count = coal->tx_max_coalesced_frames;
+
+ db_val = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count);
+ WRITE_ONCE(fp->cq_irq_db, db_val);
+
+ rxqs = rtnl_dereference(fp->rxqs);
+ if (!rxqs)
+ return 0;
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++)
+ WRITE_ONCE(rxqs[i]->irq_db_val, db_val);
+
+ db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec, fp->tx_coal_count);
+ for (i = 0; i < netdev->real_num_tx_queues; i++)
+ WRITE_ONCE(fp->txqs[i]->irq_db_val, db_val);
+
+ return 0;
+}
+
+static void fun_get_channels(struct net_device *netdev,
+ struct ethtool_channels *chan)
+{
+ chan->max_rx = netdev->num_rx_queues;
+ chan->rx_count = netdev->real_num_rx_queues;
+
+ chan->max_tx = netdev->num_tx_queues;
+ chan->tx_count = netdev->real_num_tx_queues;
+}
+
+static int fun_set_channels(struct net_device *netdev,
+ struct ethtool_channels *chan)
+{
+ if (!chan->tx_count || !chan->rx_count)
+ return -EINVAL;
+
+ if (chan->tx_count == netdev->real_num_tx_queues &&
+ chan->rx_count == netdev->real_num_rx_queues)
+ return 0;
+
+ if (netif_running(netdev))
+ return fun_change_num_queues(netdev, chan->tx_count,
+ chan->rx_count);
+
+ fun_set_ring_count(netdev, chan->tx_count, chan->rx_count);
+ return 0;
+}
+
+static void fun_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *extack)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int max_depth = fp->fdev->q_depth;
+
+ /* We size CQs to be twice the RQ depth so max RQ depth is half the
+ * max queue depth.
+ */
+ ring->rx_max_pending = max_depth / 2;
+ ring->tx_max_pending = max_depth;
+
+ ring->rx_pending = fp->rq_depth;
+ ring->tx_pending = fp->sq_depth;
+
+ kring->rx_buf_len = PAGE_SIZE;
+ kring->cqe_size = FUNETH_CQE_SIZE;
+}
+
+static int fun_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *extack)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ int rc;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ /* queue depths must be powers-of-2 */
+ if (!is_power_of_2(ring->rx_pending) ||
+ !is_power_of_2(ring->tx_pending))
+ return -EINVAL;
+
+ if (ring->rx_pending < FUNETH_MIN_QDEPTH ||
+ ring->tx_pending < FUNETH_MIN_QDEPTH)
+ return -EINVAL;
+
+ if (fp->sq_depth == ring->tx_pending &&
+ fp->rq_depth == ring->rx_pending)
+ return 0;
+
+ if (netif_running(netdev)) {
+ struct fun_qset req = {
+ .cq_depth = 2 * ring->rx_pending,
+ .rq_depth = ring->rx_pending,
+ .sq_depth = ring->tx_pending
+ };
+
+ rc = fun_replace_queues(netdev, &req, extack);
+ if (rc)
+ return rc;
+ }
+
+ fp->sq_depth = ring->tx_pending;
+ fp->rq_depth = ring->rx_pending;
+ fp->cq_depth = 2 * fp->rq_depth;
+ return 0;
+}
+
+static int fun_get_sset_count(struct net_device *dev, int sset)
+{
+ const struct funeth_priv *fp = netdev_priv(dev);
+ int n;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ n = (dev->real_num_tx_queues + 1) * ARRAY_SIZE(txq_stat_names) +
+ (dev->real_num_rx_queues + 1) * ARRAY_SIZE(rxq_stat_names) +
+ (fp->num_xdpqs + 1) * ARRAY_SIZE(xdpq_stat_names) +
+ ARRAY_SIZE(tls_stat_names);
+ if (fp->port_caps & FUN_PORT_CAP_STATS) {
+ n += ARRAY_SIZE(mac_tx_stat_names) +
+ ARRAY_SIZE(mac_rx_stat_names);
+ }
+ return n;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void fun_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int i, j;
+ u8 *p = data;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ if (fp->port_caps & FUN_PORT_CAP_STATS) {
+ memcpy(p, mac_tx_stat_names, sizeof(mac_tx_stat_names));
+ p += sizeof(mac_tx_stat_names);
+ memcpy(p, mac_rx_stat_names, sizeof(mac_rx_stat_names));
+ p += sizeof(mac_rx_stat_names);
+ }
+
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++)
+ ethtool_sprintf(&p, "%s[%u]", txq_stat_names[j],
+ i);
+ }
+ for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++)
+ ethtool_sprintf(&p, txq_stat_names[j]);
+
+ for (i = 0; i < fp->num_xdpqs; i++) {
+ for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
+ ethtool_sprintf(&p, "%s[%u]",
+ xdpq_stat_names[j], i);
+ }
+ for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
+ ethtool_sprintf(&p, xdpq_stat_names[j]);
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++) {
+ for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
+ ethtool_sprintf(&p, "%s[%u]", rxq_stat_names[j],
+ i);
+ }
+ for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
+ ethtool_sprintf(&p, rxq_stat_names[j]);
+
+ for (j = 0; j < ARRAY_SIZE(tls_stat_names); j++)
+ ethtool_sprintf(&p, tls_stat_names[j]);
+ break;
+ default:
+ break;
+ }
+}
+
+static u64 *get_mac_stats(const struct funeth_priv *fp, u64 *data)
+{
+#define TX_STAT(s) \
+ *data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s])
+
+ TX_STAT(etherStatsOctets);
+ TX_STAT(etherStatsPkts);
+ TX_STAT(VLANTransmittedOK);
+ TX_STAT(ifOutUcastPkts);
+ TX_STAT(ifOutMulticastPkts);
+ TX_STAT(ifOutBroadcastPkts);
+ TX_STAT(ifOutErrors);
+ TX_STAT(CBFCPAUSEFramesTransmitted_0);
+ TX_STAT(CBFCPAUSEFramesTransmitted_1);
+ TX_STAT(CBFCPAUSEFramesTransmitted_2);
+ TX_STAT(CBFCPAUSEFramesTransmitted_3);
+ TX_STAT(CBFCPAUSEFramesTransmitted_4);
+ TX_STAT(CBFCPAUSEFramesTransmitted_5);
+ TX_STAT(CBFCPAUSEFramesTransmitted_6);
+ TX_STAT(CBFCPAUSEFramesTransmitted_7);
+ TX_STAT(CBFCPAUSEFramesTransmitted_8);
+ TX_STAT(CBFCPAUSEFramesTransmitted_9);
+ TX_STAT(CBFCPAUSEFramesTransmitted_10);
+ TX_STAT(CBFCPAUSEFramesTransmitted_11);
+ TX_STAT(CBFCPAUSEFramesTransmitted_12);
+ TX_STAT(CBFCPAUSEFramesTransmitted_13);
+ TX_STAT(CBFCPAUSEFramesTransmitted_14);
+ TX_STAT(CBFCPAUSEFramesTransmitted_15);
+
+#define RX_STAT(s) *data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_##s])
+
+ RX_STAT(etherStatsOctets);
+ RX_STAT(etherStatsPkts);
+ RX_STAT(VLANReceivedOK);
+ RX_STAT(ifInUcastPkts);
+ RX_STAT(ifInMulticastPkts);
+ RX_STAT(ifInBroadcastPkts);
+ RX_STAT(etherStatsDropEvents);
+ RX_STAT(ifInErrors);
+ RX_STAT(aAlignmentErrors);
+ RX_STAT(CBFCPAUSEFramesReceived_0);
+ RX_STAT(CBFCPAUSEFramesReceived_1);
+ RX_STAT(CBFCPAUSEFramesReceived_2);
+ RX_STAT(CBFCPAUSEFramesReceived_3);
+ RX_STAT(CBFCPAUSEFramesReceived_4);
+ RX_STAT(CBFCPAUSEFramesReceived_5);
+ RX_STAT(CBFCPAUSEFramesReceived_6);
+ RX_STAT(CBFCPAUSEFramesReceived_7);
+ RX_STAT(CBFCPAUSEFramesReceived_8);
+ RX_STAT(CBFCPAUSEFramesReceived_9);
+ RX_STAT(CBFCPAUSEFramesReceived_10);
+ RX_STAT(CBFCPAUSEFramesReceived_11);
+ RX_STAT(CBFCPAUSEFramesReceived_12);
+ RX_STAT(CBFCPAUSEFramesReceived_13);
+ RX_STAT(CBFCPAUSEFramesReceived_14);
+ RX_STAT(CBFCPAUSEFramesReceived_15);
+
+ return data;
+
+#undef TX_STAT
+#undef RX_STAT
+}
+
+static void fun_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ struct funeth_txq_stats txs;
+ struct funeth_rxq_stats rxs;
+ struct funeth_txq **xdpqs;
+ struct funeth_rxq **rxqs;
+ unsigned int i, start;
+ u64 *totals, *tot;
+
+ if (fp->port_caps & FUN_PORT_CAP_STATS)
+ data = get_mac_stats(fp, data);
+
+ rxqs = rtnl_dereference(fp->rxqs);
+ if (!rxqs)
+ return;
+
+#define ADD_STAT(cnt) do { \
+ *data = (cnt); *tot++ += *data++; \
+} while (0)
+
+ /* Tx queues */
+ totals = data + netdev->real_num_tx_queues * ARRAY_SIZE(txq_stat_names);
+
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ tot = totals;
+
+ FUN_QSTAT_READ(fp->txqs[i], start, txs);
+
+ ADD_STAT(txs.tx_pkts);
+ ADD_STAT(txs.tx_bytes);
+ ADD_STAT(txs.tx_cso);
+ ADD_STAT(txs.tx_tso);
+ ADD_STAT(txs.tx_encap_tso);
+ ADD_STAT(txs.tx_more);
+ ADD_STAT(txs.tx_nstops);
+ ADD_STAT(txs.tx_nrestarts);
+ ADD_STAT(txs.tx_map_err);
+ ADD_STAT(txs.tx_tls_pkts);
+ ADD_STAT(txs.tx_tls_bytes);
+ ADD_STAT(txs.tx_tls_fallback);
+ ADD_STAT(txs.tx_tls_drops);
+ }
+ data += ARRAY_SIZE(txq_stat_names);
+
+ /* XDP Tx queues */
+ xdpqs = rtnl_dereference(fp->xdpqs);
+ totals = data + fp->num_xdpqs * ARRAY_SIZE(xdpq_stat_names);
+
+ for (i = 0; i < fp->num_xdpqs; i++) {
+ tot = totals;
+
+ FUN_QSTAT_READ(xdpqs[i], start, txs);
+
+ ADD_STAT(txs.tx_pkts);
+ ADD_STAT(txs.tx_bytes);
+ ADD_STAT(txs.tx_xdp_full);
+ ADD_STAT(txs.tx_map_err);
+ }
+ data += ARRAY_SIZE(xdpq_stat_names);
+
+ /* Rx queues */
+ totals = data + netdev->real_num_rx_queues * ARRAY_SIZE(rxq_stat_names);
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++) {
+ tot = totals;
+
+ FUN_QSTAT_READ(rxqs[i], start, rxs);
+
+ ADD_STAT(rxs.rx_pkts);
+ ADD_STAT(rxs.rx_bytes);
+ ADD_STAT(rxs.rx_cso);
+ ADD_STAT(rxs.gro_pkts);
+ ADD_STAT(rxs.gro_merged);
+ ADD_STAT(rxs.xdp_tx);
+ ADD_STAT(rxs.xdp_redir);
+ ADD_STAT(rxs.xdp_drops);
+ ADD_STAT(rxs.rx_bufs);
+ ADD_STAT(rxs.rx_page_alloc);
+ ADD_STAT(rxs.rx_mem_drops + rxs.xdp_err);
+ ADD_STAT(rxs.rx_budget);
+ ADD_STAT(rxs.rx_map_err);
+ }
+ data += ARRAY_SIZE(rxq_stat_names);
+#undef ADD_STAT
+
+ *data++ = atomic64_read(&fp->tx_tls_add);
+ *data++ = atomic64_read(&fp->tx_tls_del);
+ *data++ = atomic64_read(&fp->tx_tls_resync);
+}
+
+#define RX_STAT(fp, s) be64_to_cpu((fp)->stats[PORT_MAC_RX_##s])
+#define TX_STAT(fp, s) \
+ be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s])
+#define FEC_STAT(fp, s) \
+ be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + \
+ PORT_MAC_TX_STATS_MAX + PORT_MAC_FEC_##s])
+
+static void fun_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *stats)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->tx_pause_frames = TX_STAT(fp, aPAUSEMACCtrlFramesTransmitted);
+ stats->rx_pause_frames = RX_STAT(fp, aPAUSEMACCtrlFramesReceived);
+}
+
+static void fun_get_802_3_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *stats)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->FramesTransmittedOK = TX_STAT(fp, aFramesTransmittedOK);
+ stats->FramesReceivedOK = RX_STAT(fp, aFramesReceivedOK);
+ stats->FrameCheckSequenceErrors = RX_STAT(fp, aFrameCheckSequenceErrors);
+ stats->OctetsTransmittedOK = TX_STAT(fp, OctetsTransmittedOK);
+ stats->OctetsReceivedOK = RX_STAT(fp, OctetsReceivedOK);
+ stats->InRangeLengthErrors = RX_STAT(fp, aInRangeLengthErrors);
+ stats->FrameTooLongErrors = RX_STAT(fp, aFrameTooLongErrors);
+}
+
+static void fun_get_802_3_ctrl_stats(struct net_device *netdev,
+ struct ethtool_eth_ctrl_stats *stats)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->MACControlFramesTransmitted = TX_STAT(fp, MACControlFramesTransmitted);
+ stats->MACControlFramesReceived = RX_STAT(fp, MACControlFramesReceived);
+}
+
+static void fun_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ static const struct ethtool_rmon_hist_range rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 32767 },
+ {}
+ };
+
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->undersize_pkts = RX_STAT(fp, etherStatsUndersizePkts);
+ stats->oversize_pkts = RX_STAT(fp, etherStatsOversizePkts);
+ stats->fragments = RX_STAT(fp, etherStatsFragments);
+ stats->jabbers = RX_STAT(fp, etherStatsJabbers);
+
+ stats->hist[0] = RX_STAT(fp, etherStatsPkts64Octets);
+ stats->hist[1] = RX_STAT(fp, etherStatsPkts65to127Octets);
+ stats->hist[2] = RX_STAT(fp, etherStatsPkts128to255Octets);
+ stats->hist[3] = RX_STAT(fp, etherStatsPkts256to511Octets);
+ stats->hist[4] = RX_STAT(fp, etherStatsPkts512to1023Octets);
+ stats->hist[5] = RX_STAT(fp, etherStatsPkts1024to1518Octets);
+ stats->hist[6] = RX_STAT(fp, etherStatsPkts1519toMaxOctets);
+
+ stats->hist_tx[0] = TX_STAT(fp, etherStatsPkts64Octets);
+ stats->hist_tx[1] = TX_STAT(fp, etherStatsPkts65to127Octets);
+ stats->hist_tx[2] = TX_STAT(fp, etherStatsPkts128to255Octets);
+ stats->hist_tx[3] = TX_STAT(fp, etherStatsPkts256to511Octets);
+ stats->hist_tx[4] = TX_STAT(fp, etherStatsPkts512to1023Octets);
+ stats->hist_tx[5] = TX_STAT(fp, etherStatsPkts1024to1518Octets);
+ stats->hist_tx[6] = TX_STAT(fp, etherStatsPkts1519toMaxOctets);
+
+ *ranges = rmon_ranges;
+}
+
+static void fun_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *stats)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->corrected_blocks.total = FEC_STAT(fp, Correctable);
+ stats->uncorrectable_blocks.total = FEC_STAT(fp, Uncorrectable);
+}
+
+#undef RX_STAT
+#undef TX_STAT
+#undef FEC_STAT
+
+static int fun_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = netdev->real_num_rx_queues;
+ return 0;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int fun_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
+{
+ return 0;
+}
+
+static u32 fun_get_rxfh_indir_size(struct net_device *netdev)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ return fp->indir_table_nentries;
+}
+
+static u32 fun_get_rxfh_key_size(struct net_device *netdev)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ return sizeof(fp->rss_key);
+}
+
+static int fun_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!fp->rss_cfg)
+ return -EOPNOTSUPP;
+
+ if (indir)
+ memcpy(indir, fp->indir_table,
+ sizeof(u32) * fp->indir_table_nentries);
+
+ if (key)
+ memcpy(key, fp->rss_key, sizeof(fp->rss_key));
+
+ if (hfunc)
+ *hfunc = fp->hash_algo == FUN_ETH_RSS_ALG_TOEPLITZ ?
+ ETH_RSS_HASH_TOP : ETH_RSS_HASH_CRC32;
+
+ return 0;
+}
+
+static int fun_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ const u32 *rss_indir = indir ? indir : fp->indir_table;
+ const u8 *rss_key = key ? key : fp->rss_key;
+ enum fun_eth_hash_alg algo;
+
+ if (!fp->rss_cfg)
+ return -EOPNOTSUPP;
+
+ if (hfunc == ETH_RSS_HASH_NO_CHANGE)
+ algo = fp->hash_algo;
+ else if (hfunc == ETH_RSS_HASH_CRC32)
+ algo = FUN_ETH_RSS_ALG_CRC32;
+ else if (hfunc == ETH_RSS_HASH_TOP)
+ algo = FUN_ETH_RSS_ALG_TOEPLITZ;
+ else
+ return -EINVAL;
+
+ /* If the port is enabled try to reconfigure RSS and keep the new
+ * settings if successful. If it is down we update the RSS settings
+ * and apply them at the next UP time.
+ */
+ if (netif_running(netdev)) {
+ int rc = fun_config_rss(netdev, algo, rss_key, rss_indir,
+ FUN_ADMIN_SUBOP_MODIFY);
+ if (rc)
+ return rc;
+ }
+
+ fp->hash_algo = algo;
+ if (key)
+ memcpy(fp->rss_key, key, sizeof(fp->rss_key));
+ if (indir)
+ memcpy(fp->indir_table, indir,
+ sizeof(u32) * fp->indir_table_nentries);
+ return 0;
+}
+
+static int fun_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = -1;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
+static unsigned int to_ethtool_fec(unsigned int fun_fec)
+{
+ unsigned int fec = 0;
+
+ if (fun_fec == FUN_PORT_FEC_NA)
+ fec |= ETHTOOL_FEC_NONE;
+ if (fun_fec & FUN_PORT_FEC_OFF)
+ fec |= ETHTOOL_FEC_OFF;
+ if (fun_fec & FUN_PORT_FEC_RS)
+ fec |= ETHTOOL_FEC_RS;
+ if (fun_fec & FUN_PORT_FEC_FC)
+ fec |= ETHTOOL_FEC_BASER;
+ if (fun_fec & FUN_PORT_FEC_AUTO)
+ fec |= ETHTOOL_FEC_AUTO;
+ return fec;
+}
+
+static int fun_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 fec_data;
+ int rc;
+
+ rc = fun_port_read_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, &fec_data);
+ if (rc)
+ return rc;
+
+ fec->active_fec = to_ethtool_fec(fec_data & 0xff);
+ fec->fec = to_ethtool_fec(fec_data >> 8);
+ return 0;
+}
+
+static int fun_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 fec_mode;
+
+ switch (fec->fec) {
+ case ETHTOOL_FEC_AUTO:
+ fec_mode = FUN_PORT_FEC_AUTO;
+ break;
+ case ETHTOOL_FEC_OFF:
+ if (!(fp->port_caps & FUN_PORT_CAP_FEC_NONE))
+ return -EINVAL;
+ fec_mode = FUN_PORT_FEC_OFF;
+ break;
+ case ETHTOOL_FEC_BASER:
+ if (!(fp->port_caps & FUN_PORT_CAP_FEC_FC))
+ return -EINVAL;
+ fec_mode = FUN_PORT_FEC_FC;
+ break;
+ case ETHTOOL_FEC_RS:
+ if (!(fp->port_caps & FUN_PORT_CAP_FEC_RS))
+ return -EINVAL;
+ fec_mode = FUN_PORT_FEC_RS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, fec_mode);
+}
+
+static const struct ethtool_ops fun_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
+ .get_link_ksettings = fun_get_link_ksettings,
+ .set_link_ksettings = fun_set_link_ksettings,
+ .set_phys_id = fun_set_phys_id,
+ .get_drvinfo = fun_get_drvinfo,
+ .get_msglevel = fun_get_msglevel,
+ .set_msglevel = fun_set_msglevel,
+ .get_regs_len = fun_get_regs_len,
+ .get_regs = fun_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = fun_get_coalesce,
+ .set_coalesce = fun_set_coalesce,
+ .get_ts_info = fun_get_ts_info,
+ .get_ringparam = fun_get_ringparam,
+ .set_ringparam = fun_set_ringparam,
+ .get_sset_count = fun_get_sset_count,
+ .get_strings = fun_get_strings,
+ .get_ethtool_stats = fun_get_ethtool_stats,
+ .get_rxnfc = fun_get_rxnfc,
+ .set_rxnfc = fun_set_rxnfc,
+ .get_rxfh_indir_size = fun_get_rxfh_indir_size,
+ .get_rxfh_key_size = fun_get_rxfh_key_size,
+ .get_rxfh = fun_get_rxfh,
+ .set_rxfh = fun_set_rxfh,
+ .get_channels = fun_get_channels,
+ .set_channels = fun_set_channels,
+ .get_fecparam = fun_get_fecparam,
+ .set_fecparam = fun_set_fecparam,
+ .get_pauseparam = fun_get_pauseparam,
+ .set_pauseparam = fun_set_pauseparam,
+ .nway_reset = fun_restart_an,
+ .get_pause_stats = fun_get_pause_stats,
+ .get_fec_stats = fun_get_fec_stats,
+ .get_eth_mac_stats = fun_get_802_3_stats,
+ .get_eth_ctrl_stats = fun_get_802_3_ctrl_stats,
+ .get_rmon_stats = fun_get_rmon_stats,
+};
+
+void fun_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &fun_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ktls.c b/drivers/net/ethernet/fungible/funeth/funeth_ktls.c
new file mode 100644
index 000000000000..f871def70d70
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ktls.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include "funeth.h"
+#include "funeth_ktls.h"
+
+static int fun_admin_ktls_create(struct funeth_priv *fp, unsigned int id)
+{
+ struct fun_admin_ktls_create_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
+ sizeof(req)),
+ .subop = FUN_ADMIN_SUBOP_CREATE,
+ .id = cpu_to_be32(id),
+ };
+
+ return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
+}
+
+static int fun_ktls_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct fun_admin_ktls_modify_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
+ sizeof(req)),
+ .subop = FUN_ADMIN_SUBOP_MODIFY,
+ .id = cpu_to_be32(fp->ktls_id),
+ .tcp_seq = cpu_to_be32(start_offload_tcp_sn),
+ };
+ struct fun_admin_ktls_modify_rsp rsp;
+ struct fun_ktls_tx_ctx *tx_ctx;
+ int rc;
+
+ if (direction != TLS_OFFLOAD_CTX_DIR_TX)
+ return -EOPNOTSUPP;
+
+ if (crypto_info->version == TLS_1_2_VERSION)
+ req.version = FUN_KTLS_TLSV2;
+ else
+ return -EOPNOTSUPP;
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *c = (void *)crypto_info;
+
+ req.cipher = FUN_KTLS_CIPHER_AES_GCM_128;
+ memcpy(req.key, c->key, sizeof(c->key));
+ memcpy(req.iv, c->iv, sizeof(c->iv));
+ memcpy(req.salt, c->salt, sizeof(c->salt));
+ memcpy(req.record_seq, c->rec_seq, sizeof(c->rec_seq));
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, &rsp,
+ sizeof(rsp), 0);
+ memzero_explicit(&req, sizeof(req));
+ if (rc)
+ return rc;
+
+ tx_ctx = tls_driver_ctx(sk, direction);
+ tx_ctx->tlsid = rsp.tlsid;
+ tx_ctx->next_seq = start_offload_tcp_sn;
+ atomic64_inc(&fp->tx_tls_add);
+ return 0;
+}
+
+static void fun_ktls_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct fun_admin_ktls_modify_req req;
+ struct fun_ktls_tx_ctx *tx_ctx;
+
+ if (direction != TLS_OFFLOAD_CTX_DIR_TX)
+ return;
+
+ tx_ctx = __tls_driver_ctx(tls_ctx, direction);
+
+ req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
+ offsetof(struct fun_admin_ktls_modify_req, tcp_seq));
+ req.subop = FUN_ADMIN_SUBOP_MODIFY;
+ req.flags = cpu_to_be16(FUN_KTLS_MODIFY_REMOVE);
+ req.id = cpu_to_be32(fp->ktls_id);
+ req.tlsid = tx_ctx->tlsid;
+
+ fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
+ atomic64_inc(&fp->tx_tls_del);
+}
+
+static int fun_ktls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
+ u8 *rcd_sn, enum tls_offload_ctx_dir direction)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct fun_admin_ktls_modify_req req;
+ struct fun_ktls_tx_ctx *tx_ctx;
+ int rc;
+
+ if (direction != TLS_OFFLOAD_CTX_DIR_TX)
+ return -EOPNOTSUPP;
+
+ tx_ctx = tls_driver_ctx(sk, direction);
+
+ req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
+ offsetof(struct fun_admin_ktls_modify_req, key));
+ req.subop = FUN_ADMIN_SUBOP_MODIFY;
+ req.flags = 0;
+ req.id = cpu_to_be32(fp->ktls_id);
+ req.tlsid = tx_ctx->tlsid;
+ req.tcp_seq = cpu_to_be32(seq);
+ req.version = 0;
+ req.cipher = 0;
+ memcpy(req.record_seq, rcd_sn, sizeof(req.record_seq));
+
+ atomic64_inc(&fp->tx_tls_resync);
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
+ if (!rc)
+ tx_ctx->next_seq = seq;
+ return rc;
+}
+
+static const struct tlsdev_ops fun_ktls_ops = {
+ .tls_dev_add = fun_ktls_add,
+ .tls_dev_del = fun_ktls_del,
+ .tls_dev_resync = fun_ktls_resync,
+};
+
+int fun_ktls_init(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ int rc;
+
+ rc = fun_admin_ktls_create(fp, netdev->dev_port);
+ if (rc)
+ return rc;
+
+ fp->ktls_id = netdev->dev_port;
+ netdev->tlsdev_ops = &fun_ktls_ops;
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ return 0;
+}
+
+void fun_ktls_cleanup(struct funeth_priv *fp)
+{
+ if (fp->ktls_id == FUN_HCI_ID_INVALID)
+ return;
+
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_KTLS, 0, fp->ktls_id);
+ fp->ktls_id = FUN_HCI_ID_INVALID;
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ktls.h b/drivers/net/ethernet/fungible/funeth/funeth_ktls.h
new file mode 100644
index 000000000000..9d6f2141a959
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ktls.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUN_KTLS_H
+#define _FUN_KTLS_H
+
+#include <net/tls.h>
+
+struct funeth_priv;
+
+struct fun_ktls_tx_ctx {
+ __be64 tlsid;
+ u32 next_seq;
+};
+
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+int fun_ktls_init(struct net_device *netdev);
+void fun_ktls_cleanup(struct funeth_priv *fp);
+
+#else
+
+static inline void fun_ktls_init(struct net_device *netdev)
+{
+}
+
+static inline void fun_ktls_cleanup(struct funeth_priv *fp)
+{
+}
+#endif
+
+#endif /* _FUN_KTLS_H */
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
new file mode 100644
index 000000000000..67dd02ed1fa3
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -0,0 +1,2091 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/bpf.h>
+#include <linux/crash_dump.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/filter.h>
+#include <linux/idr.h>
+#include <linux/if_vlan.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/inetdevice.h>
+
+#include "funeth.h"
+#include "funeth_devlink.h"
+#include "funeth_ktls.h"
+#include "fun_port.h"
+#include "fun_queue.h"
+#include "funeth_txrx.h"
+
+#define ADMIN_SQ_DEPTH 32
+#define ADMIN_CQ_DEPTH 64
+#define ADMIN_RQ_DEPTH 16
+
+/* Default number of Tx/Rx queues. */
+#define FUN_DFLT_QUEUES 16U
+
+enum {
+ FUN_SERV_RES_CHANGE = FUN_SERV_FIRST_AVAIL,
+ FUN_SERV_DEL_PORTS,
+};
+
+static const struct pci_device_id funeth_id_table[] = {
+ { PCI_VDEVICE(FUNGIBLE, 0x0101) },
+ { PCI_VDEVICE(FUNGIBLE, 0x0181) },
+ { 0, }
+};
+
+/* Issue a port write admin command with @n key/value pairs. */
+static int fun_port_write_cmds(struct funeth_priv *fp, unsigned int n,
+ const int *keys, const u64 *data)
+{
+ unsigned int cmd_size, i;
+ union {
+ struct fun_admin_port_req req;
+ struct fun_admin_port_rsp rsp;
+ u8 v[ADMIN_SQE_SIZE];
+ } cmd;
+
+ cmd_size = offsetof(struct fun_admin_port_req, u.write.write48) +
+ n * sizeof(struct fun_admin_write48_req);
+ if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN)
+ return -EINVAL;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
+ cmd_size);
+ cmd.req.u.write =
+ FUN_ADMIN_PORT_WRITE_REQ_INIT(FUN_ADMIN_SUBOP_WRITE, 0,
+ fp->netdev->dev_port);
+ for (i = 0; i < n; i++)
+ cmd.req.u.write.write48[i] =
+ FUN_ADMIN_WRITE48_REQ_INIT(keys[i], data[i]);
+
+ return fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
+ &cmd.rsp, cmd_size, 0);
+}
+
+int fun_port_write_cmd(struct funeth_priv *fp, int key, u64 data)
+{
+ return fun_port_write_cmds(fp, 1, &key, &data);
+}
+
+/* Issue a port read admin command with @n key/value pairs. */
+static int fun_port_read_cmds(struct funeth_priv *fp, unsigned int n,
+ const int *keys, u64 *data)
+{
+ const struct fun_admin_read48_rsp *r48rsp;
+ unsigned int cmd_size, i;
+ int rc;
+ union {
+ struct fun_admin_port_req req;
+ struct fun_admin_port_rsp rsp;
+ u8 v[ADMIN_SQE_SIZE];
+ } cmd;
+
+ cmd_size = offsetof(struct fun_admin_port_req, u.read.read48) +
+ n * sizeof(struct fun_admin_read48_req);
+ if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN)
+ return -EINVAL;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
+ cmd_size);
+ cmd.req.u.read =
+ FUN_ADMIN_PORT_READ_REQ_INIT(FUN_ADMIN_SUBOP_READ, 0,
+ fp->netdev->dev_port);
+ for (i = 0; i < n; i++)
+ cmd.req.u.read.read48[i] = FUN_ADMIN_READ48_REQ_INIT(keys[i]);
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
+ &cmd.rsp, cmd_size, 0);
+ if (rc)
+ return rc;
+
+ for (r48rsp = cmd.rsp.u.read.read48, i = 0; i < n; i++, r48rsp++) {
+ data[i] = FUN_ADMIN_READ48_RSP_DATA_G(r48rsp->key_to_data);
+ dev_dbg(fp->fdev->dev,
+ "port_read_rsp lport=%u (key_to_data=0x%llx) key=%d data:%lld retval:%lld",
+ fp->lport, r48rsp->key_to_data, keys[i], data[i],
+ FUN_ADMIN_READ48_RSP_RET_G(r48rsp->key_to_data));
+ }
+ return 0;
+}
+
+int fun_port_read_cmd(struct funeth_priv *fp, int key, u64 *data)
+{
+ return fun_port_read_cmds(fp, 1, &key, data);
+}
+
+static void fun_report_link(struct net_device *netdev)
+{
+ if (netif_carrier_ok(netdev)) {
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ const char *fec = "", *pause = "";
+ int speed = fp->link_speed;
+ char unit = 'M';
+
+ if (fp->link_speed >= SPEED_1000) {
+ speed /= 1000;
+ unit = 'G';
+ }
+
+ if (fp->active_fec & FUN_PORT_FEC_RS)
+ fec = ", RS-FEC";
+ else if (fp->active_fec & FUN_PORT_FEC_FC)
+ fec = ", BASER-FEC";
+
+ if ((fp->active_fc & FUN_PORT_CAP_PAUSE_MASK) == FUN_PORT_CAP_PAUSE_MASK)
+ pause = ", Tx/Rx PAUSE";
+ else if (fp->active_fc & FUN_PORT_CAP_RX_PAUSE)
+ pause = ", Rx PAUSE";
+ else if (fp->active_fc & FUN_PORT_CAP_TX_PAUSE)
+ pause = ", Tx PAUSE";
+
+ netdev_info(netdev, "Link up at %d %cb/s full-duplex%s%s\n",
+ speed, unit, pause, fec);
+ } else {
+ netdev_info(netdev, "Link down\n");
+ }
+}
+
+static int fun_adi_write(struct fun_dev *fdev, enum fun_admin_adi_attr attr,
+ unsigned int adi_id, const struct fun_adi_param *param)
+{
+ struct fun_admin_adi_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ADI,
+ sizeof(req)),
+ .u.write.subop = FUN_ADMIN_SUBOP_WRITE,
+ .u.write.attribute = attr,
+ .u.write.id = cpu_to_be32(adi_id),
+ .u.write.param = *param
+ };
+
+ return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0);
+}
+
+/* Configure RSS for the given port. @op determines whether a new RSS context
+ * is to be created or whether an existing one should be reconfigured. The
+ * remaining parameters specify the hashing algorithm, key, and indirection
+ * table.
+ *
+ * This initiates packet delivery to the Rx queues set in the indirection
+ * table.
+ */
+int fun_config_rss(struct net_device *dev, int algo, const u8 *key,
+ const u32 *qtable, u8 op)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int table_len = fp->indir_table_nentries;
+ unsigned int len = FUN_ETH_RSS_MAX_KEY_SIZE + sizeof(u32) * table_len;
+ struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs);
+ union {
+ struct {
+ struct fun_admin_rss_req req;
+ struct fun_dataop_gl gl;
+ };
+ struct fun_admin_generic_create_rsp rsp;
+ } cmd;
+ __be32 *indir_tab;
+ u16 flags;
+ int rc;
+
+ if (op != FUN_ADMIN_SUBOP_CREATE && fp->rss_hw_id == FUN_HCI_ID_INVALID)
+ return -EINVAL;
+
+ flags = op == FUN_ADMIN_SUBOP_CREATE ?
+ FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR : 0;
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_RSS,
+ sizeof(cmd));
+ cmd.req.u.create =
+ FUN_ADMIN_RSS_CREATE_REQ_INIT(op, flags, fp->rss_hw_id,
+ dev->dev_port, algo,
+ FUN_ETH_RSS_MAX_KEY_SIZE,
+ table_len, 0,
+ FUN_ETH_RSS_MAX_KEY_SIZE);
+ cmd.req.u.create.dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len);
+ fun_dataop_gl_init(&cmd.gl, 0, 0, len, fp->rss_dma_addr);
+
+ /* write the key and indirection table into the RSS DMA area */
+ memcpy(fp->rss_cfg, key, FUN_ETH_RSS_MAX_KEY_SIZE);
+ indir_tab = fp->rss_cfg + FUN_ETH_RSS_MAX_KEY_SIZE;
+ for (rc = 0; rc < table_len; rc++)
+ *indir_tab++ = cpu_to_be32(rxqs[*qtable++]->hw_cqid);
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
+ &cmd.rsp, sizeof(cmd.rsp), 0);
+ if (!rc && op == FUN_ADMIN_SUBOP_CREATE)
+ fp->rss_hw_id = be32_to_cpu(cmd.rsp.id);
+ return rc;
+}
+
+/* Destroy the HW RSS conntext associated with the given port. This also stops
+ * all packet delivery to our Rx queues.
+ */
+static void fun_destroy_rss(struct funeth_priv *fp)
+{
+ if (fp->rss_hw_id != FUN_HCI_ID_INVALID) {
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_RSS, 0, fp->rss_hw_id);
+ fp->rss_hw_id = FUN_HCI_ID_INVALID;
+ }
+}
+
+static void fun_irq_aff_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct fun_irq *p = container_of(notify, struct fun_irq, aff_notify);
+
+ cpumask_copy(&p->affinity_mask, mask);
+}
+
+static void fun_irq_aff_release(struct kref __always_unused *ref)
+{
+}
+
+/* Allocate an IRQ structure, assign an MSI-X index and initial affinity to it,
+ * and add it to the IRQ XArray.
+ */
+static struct fun_irq *fun_alloc_qirq(struct funeth_priv *fp, unsigned int idx,
+ int node, unsigned int xa_idx_offset)
+{
+ struct fun_irq *irq;
+ int cpu, res;
+
+ cpu = cpumask_local_spread(idx, node);
+ node = cpu_to_mem(cpu);
+
+ irq = kzalloc_node(sizeof(*irq), GFP_KERNEL, node);
+ if (!irq)
+ return ERR_PTR(-ENOMEM);
+
+ res = fun_reserve_irqs(fp->fdev, 1, &irq->irq_idx);
+ if (res != 1)
+ goto free_irq;
+
+ res = xa_insert(&fp->irqs, idx + xa_idx_offset, irq, GFP_KERNEL);
+ if (res)
+ goto release_irq;
+
+ irq->irq = pci_irq_vector(fp->pdev, irq->irq_idx);
+ cpumask_set_cpu(cpu, &irq->affinity_mask);
+ irq->aff_notify.notify = fun_irq_aff_notify;
+ irq->aff_notify.release = fun_irq_aff_release;
+ irq->state = FUN_IRQ_INIT;
+ return irq;
+
+release_irq:
+ fun_release_irqs(fp->fdev, 1, &irq->irq_idx);
+free_irq:
+ kfree(irq);
+ return ERR_PTR(res);
+}
+
+static void fun_free_qirq(struct funeth_priv *fp, struct fun_irq *irq)
+{
+ netif_napi_del(&irq->napi);
+ fun_release_irqs(fp->fdev, 1, &irq->irq_idx);
+ kfree(irq);
+}
+
+/* Release the IRQs reserved for Tx/Rx queues that aren't being used. */
+static void fun_prune_queue_irqs(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int nreleased = 0;
+ struct fun_irq *irq;
+ unsigned long idx;
+
+ xa_for_each(&fp->irqs, idx, irq) {
+ if (irq->txq || irq->rxq) /* skip those in use */
+ continue;
+
+ xa_erase(&fp->irqs, idx);
+ fun_free_qirq(fp, irq);
+ nreleased++;
+ if (idx < fp->rx_irq_ofst)
+ fp->num_tx_irqs--;
+ else
+ fp->num_rx_irqs--;
+ }
+ netif_info(fp, intr, dev, "Released %u queue IRQs\n", nreleased);
+}
+
+/* Reserve IRQs, one per queue, to acommodate the requested queue numbers @ntx
+ * and @nrx. IRQs are added incrementally to those we already have.
+ * We hold on to allocated IRQs until garbage collection of unused IRQs is
+ * separately requested.
+ */
+static int fun_alloc_queue_irqs(struct net_device *dev, unsigned int ntx,
+ unsigned int nrx)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ int node = dev_to_node(&fp->pdev->dev);
+ struct fun_irq *irq;
+ unsigned int i;
+
+ for (i = fp->num_tx_irqs; i < ntx; i++) {
+ irq = fun_alloc_qirq(fp, i, node, 0);
+ if (IS_ERR(irq))
+ return PTR_ERR(irq);
+
+ fp->num_tx_irqs++;
+ netif_tx_napi_add(dev, &irq->napi, fun_txq_napi_poll,
+ NAPI_POLL_WEIGHT);
+ }
+
+ for (i = fp->num_rx_irqs; i < nrx; i++) {
+ irq = fun_alloc_qirq(fp, i, node, fp->rx_irq_ofst);
+ if (IS_ERR(irq))
+ return PTR_ERR(irq);
+
+ fp->num_rx_irqs++;
+ netif_napi_add(dev, &irq->napi, fun_rxq_napi_poll,
+ NAPI_POLL_WEIGHT);
+ }
+
+ netif_info(fp, intr, dev, "Reserved %u/%u IRQs for Tx/Rx queues\n",
+ ntx, nrx);
+ return 0;
+}
+
+static void free_txqs(struct funeth_txq **txqs, unsigned int nqs,
+ unsigned int start, int state)
+{
+ unsigned int i;
+
+ for (i = start; i < nqs && txqs[i]; i++)
+ txqs[i] = funeth_txq_free(txqs[i], state);
+}
+
+static int alloc_txqs(struct net_device *dev, struct funeth_txq **txqs,
+ unsigned int nqs, unsigned int depth, unsigned int start,
+ int state)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int i;
+ int err;
+
+ for (i = start; i < nqs; i++) {
+ err = funeth_txq_create(dev, i, depth, xa_load(&fp->irqs, i),
+ state, &txqs[i]);
+ if (err) {
+ free_txqs(txqs, nqs, start, FUN_QSTATE_DESTROYED);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void free_rxqs(struct funeth_rxq **rxqs, unsigned int nqs,
+ unsigned int start, int state)
+{
+ unsigned int i;
+
+ for (i = start; i < nqs && rxqs[i]; i++)
+ rxqs[i] = funeth_rxq_free(rxqs[i], state);
+}
+
+static int alloc_rxqs(struct net_device *dev, struct funeth_rxq **rxqs,
+ unsigned int nqs, unsigned int ncqe, unsigned int nrqe,
+ unsigned int start, int state)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int i;
+ int err;
+
+ for (i = start; i < nqs; i++) {
+ err = funeth_rxq_create(dev, i, ncqe, nrqe,
+ xa_load(&fp->irqs, i + fp->rx_irq_ofst),
+ state, &rxqs[i]);
+ if (err) {
+ free_rxqs(rxqs, nqs, start, FUN_QSTATE_DESTROYED);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void free_xdpqs(struct funeth_txq **xdpqs, unsigned int nqs,
+ unsigned int start, int state)
+{
+ unsigned int i;
+
+ for (i = start; i < nqs && xdpqs[i]; i++)
+ xdpqs[i] = funeth_txq_free(xdpqs[i], state);
+
+ if (state == FUN_QSTATE_DESTROYED)
+ kfree(xdpqs);
+}
+
+static struct funeth_txq **alloc_xdpqs(struct net_device *dev, unsigned int nqs,
+ unsigned int depth, unsigned int start,
+ int state)
+{
+ struct funeth_txq **xdpqs;
+ unsigned int i;
+ int err;
+
+ xdpqs = kcalloc(nqs, sizeof(*xdpqs), GFP_KERNEL);
+ if (!xdpqs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = start; i < nqs; i++) {
+ err = funeth_txq_create(dev, i, depth, NULL, state, &xdpqs[i]);
+ if (err) {
+ free_xdpqs(xdpqs, nqs, start, FUN_QSTATE_DESTROYED);
+ return ERR_PTR(err);
+ }
+ }
+ return xdpqs;
+}
+
+static void fun_free_rings(struct net_device *netdev, struct fun_qset *qset)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct funeth_txq **xdpqs = qset->xdpqs;
+ struct funeth_rxq **rxqs = qset->rxqs;
+
+ /* qset may not specify any queues to operate on. In that case the
+ * currently installed queues are implied.
+ */
+ if (!rxqs) {
+ rxqs = rtnl_dereference(fp->rxqs);
+ xdpqs = rtnl_dereference(fp->xdpqs);
+ qset->txqs = fp->txqs;
+ qset->nrxqs = netdev->real_num_rx_queues;
+ qset->ntxqs = netdev->real_num_tx_queues;
+ qset->nxdpqs = fp->num_xdpqs;
+ }
+ if (!rxqs)
+ return;
+
+ if (rxqs == rtnl_dereference(fp->rxqs)) {
+ rcu_assign_pointer(fp->rxqs, NULL);
+ rcu_assign_pointer(fp->xdpqs, NULL);
+ synchronize_net();
+ fp->txqs = NULL;
+ }
+
+ free_rxqs(rxqs, qset->nrxqs, qset->rxq_start, qset->state);
+ free_txqs(qset->txqs, qset->ntxqs, qset->txq_start, qset->state);
+ free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, qset->state);
+ if (qset->state == FUN_QSTATE_DESTROYED)
+ kfree(rxqs);
+
+ /* Tell the caller which queues were operated on. */
+ qset->rxqs = rxqs;
+ qset->xdpqs = xdpqs;
+}
+
+static int fun_alloc_rings(struct net_device *netdev, struct fun_qset *qset)
+{
+ struct funeth_txq **xdpqs = NULL, **txqs;
+ struct funeth_rxq **rxqs;
+ int err;
+
+ err = fun_alloc_queue_irqs(netdev, qset->ntxqs, qset->nrxqs);
+ if (err)
+ return err;
+
+ rxqs = kcalloc(qset->ntxqs + qset->nrxqs, sizeof(*rxqs), GFP_KERNEL);
+ if (!rxqs)
+ return -ENOMEM;
+
+ if (qset->nxdpqs) {
+ xdpqs = alloc_xdpqs(netdev, qset->nxdpqs, qset->sq_depth,
+ qset->xdpq_start, qset->state);
+ if (IS_ERR(xdpqs)) {
+ err = PTR_ERR(xdpqs);
+ goto free_qvec;
+ }
+ }
+
+ txqs = (struct funeth_txq **)&rxqs[qset->nrxqs];
+ err = alloc_txqs(netdev, txqs, qset->ntxqs, qset->sq_depth,
+ qset->txq_start, qset->state);
+ if (err)
+ goto free_xdpqs;
+
+ err = alloc_rxqs(netdev, rxqs, qset->nrxqs, qset->cq_depth,
+ qset->rq_depth, qset->rxq_start, qset->state);
+ if (err)
+ goto free_txqs;
+
+ qset->rxqs = rxqs;
+ qset->txqs = txqs;
+ qset->xdpqs = xdpqs;
+ return 0;
+
+free_txqs:
+ free_txqs(txqs, qset->ntxqs, qset->txq_start, FUN_QSTATE_DESTROYED);
+free_xdpqs:
+ free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, FUN_QSTATE_DESTROYED);
+free_qvec:
+ kfree(rxqs);
+ return err;
+}
+
+/* Take queues to the next level. Presently this means creating them on the
+ * device.
+ */
+static int fun_advance_ring_state(struct net_device *dev, struct fun_qset *qset)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ int i, err;
+
+ for (i = 0; i < qset->nrxqs; i++) {
+ err = fun_rxq_create_dev(qset->rxqs[i],
+ xa_load(&fp->irqs,
+ i + fp->rx_irq_ofst));
+ if (err)
+ goto out;
+ }
+
+ for (i = 0; i < qset->ntxqs; i++) {
+ err = fun_txq_create_dev(qset->txqs[i], xa_load(&fp->irqs, i));
+ if (err)
+ goto out;
+ }
+
+ for (i = 0; i < qset->nxdpqs; i++) {
+ err = fun_txq_create_dev(qset->xdpqs[i], NULL);
+ if (err)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ fun_free_rings(dev, qset);
+ return err;
+}
+
+static int fun_port_create(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ union {
+ struct fun_admin_port_req req;
+ struct fun_admin_port_rsp rsp;
+ } cmd;
+ int rc;
+
+ if (fp->lport != INVALID_LPORT)
+ return 0;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
+ sizeof(cmd.req));
+ cmd.req.u.create =
+ FUN_ADMIN_PORT_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, 0,
+ netdev->dev_port);
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
+ sizeof(cmd.rsp), 0);
+
+ if (!rc)
+ fp->lport = be16_to_cpu(cmd.rsp.u.create.lport);
+ return rc;
+}
+
+static int fun_port_destroy(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (fp->lport == INVALID_LPORT)
+ return 0;
+
+ fp->lport = INVALID_LPORT;
+ return fun_res_destroy(fp->fdev, FUN_ADMIN_OP_PORT, 0,
+ netdev->dev_port);
+}
+
+static int fun_eth_create(struct funeth_priv *fp)
+{
+ union {
+ struct fun_admin_eth_req req;
+ struct fun_admin_generic_create_rsp rsp;
+ } cmd;
+ int rc;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ETH,
+ sizeof(cmd.req));
+ cmd.req.u.create = FUN_ADMIN_ETH_CREATE_REQ_INIT(
+ FUN_ADMIN_SUBOP_CREATE,
+ FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR,
+ 0, fp->netdev->dev_port);
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
+ sizeof(cmd.rsp), 0);
+ return rc ? rc : be32_to_cpu(cmd.rsp.id);
+}
+
+static int fun_vi_create(struct funeth_priv *fp)
+{
+ struct fun_admin_vi_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_VI,
+ sizeof(req)),
+ .u.create = FUN_ADMIN_VI_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE,
+ 0,
+ fp->netdev->dev_port,
+ fp->netdev->dev_port)
+ };
+
+ return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
+}
+
+/* Helper to create an ETH flow and bind an SQ to it.
+ * Returns the ETH id (>= 0) on success or a negative error.
+ */
+int fun_create_and_bind_tx(struct funeth_priv *fp, u32 sqid)
+{
+ int rc, ethid;
+
+ ethid = fun_eth_create(fp);
+ if (ethid >= 0) {
+ rc = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_EPSQ, sqid,
+ FUN_ADMIN_BIND_TYPE_ETH, ethid);
+ if (rc) {
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, ethid);
+ ethid = rc;
+ }
+ }
+ return ethid;
+}
+
+static irqreturn_t fun_queue_irq_handler(int irq, void *data)
+{
+ struct fun_irq *p = data;
+
+ if (p->rxq) {
+ prefetch(p->rxq->next_cqe_info);
+ p->rxq->irq_cnt++;
+ }
+ napi_schedule_irqoff(&p->napi);
+ return IRQ_HANDLED;
+}
+
+static int fun_enable_irqs(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned long idx, last;
+ unsigned int qidx;
+ struct fun_irq *p;
+ const char *qtype;
+ int err;
+
+ xa_for_each(&fp->irqs, idx, p) {
+ if (p->txq) {
+ qtype = "tx";
+ qidx = p->txq->qidx;
+ } else if (p->rxq) {
+ qtype = "rx";
+ qidx = p->rxq->qidx;
+ } else {
+ continue;
+ }
+
+ if (p->state != FUN_IRQ_INIT)
+ continue;
+
+ snprintf(p->name, sizeof(p->name) - 1, "%s-%s-%u", dev->name,
+ qtype, qidx);
+ err = request_irq(p->irq, fun_queue_irq_handler, 0, p->name, p);
+ if (err) {
+ netdev_err(dev, "Failed to allocate IRQ %u, err %d\n",
+ p->irq, err);
+ goto unroll;
+ }
+ p->state = FUN_IRQ_REQUESTED;
+ }
+
+ xa_for_each(&fp->irqs, idx, p) {
+ if (p->state != FUN_IRQ_REQUESTED)
+ continue;
+ irq_set_affinity_notifier(p->irq, &p->aff_notify);
+ irq_set_affinity_and_hint(p->irq, &p->affinity_mask);
+ napi_enable(&p->napi);
+ p->state = FUN_IRQ_ENABLED;
+ }
+
+ return 0;
+
+unroll:
+ last = idx - 1;
+ xa_for_each_range(&fp->irqs, idx, p, 0, last)
+ if (p->state == FUN_IRQ_REQUESTED) {
+ free_irq(p->irq, p);
+ p->state = FUN_IRQ_INIT;
+ }
+
+ return err;
+}
+
+static void fun_disable_one_irq(struct fun_irq *irq)
+{
+ napi_disable(&irq->napi);
+ irq_set_affinity_notifier(irq->irq, NULL);
+ irq_update_affinity_hint(irq->irq, NULL);
+ free_irq(irq->irq, irq);
+ irq->state = FUN_IRQ_INIT;
+}
+
+static void fun_disable_irqs(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_irq *p;
+ unsigned long idx;
+
+ xa_for_each(&fp->irqs, idx, p)
+ if (p->state == FUN_IRQ_ENABLED)
+ fun_disable_one_irq(p);
+}
+
+static void fun_down(struct net_device *dev, struct fun_qset *qset)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+
+ /* If we don't have queues the data path is already down.
+ * Note netif_running(dev) may be true.
+ */
+ if (!rcu_access_pointer(fp->rxqs))
+ return;
+
+ /* It is also down if the queues aren't on the device. */
+ if (fp->txqs[0]->init_state >= FUN_QSTATE_INIT_FULL) {
+ netif_info(fp, ifdown, dev,
+ "Tearing down data path on device\n");
+ fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_DISABLE, 0);
+
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+
+ fun_destroy_rss(fp);
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port);
+ fun_disable_irqs(dev);
+ }
+
+ fun_free_rings(dev, qset);
+}
+
+static int fun_up(struct net_device *dev, struct fun_qset *qset)
+{
+ static const int port_keys[] = {
+ FUN_ADMIN_PORT_KEY_STATS_DMA_LOW,
+ FUN_ADMIN_PORT_KEY_STATS_DMA_HIGH,
+ FUN_ADMIN_PORT_KEY_ENABLE
+ };
+
+ struct funeth_priv *fp = netdev_priv(dev);
+ u64 vals[] = {
+ lower_32_bits(fp->stats_dma_addr),
+ upper_32_bits(fp->stats_dma_addr),
+ FUN_PORT_FLAG_ENABLE_NOTIFY
+ };
+ int err;
+
+ netif_info(fp, ifup, dev, "Setting up data path on device\n");
+
+ if (qset->rxqs[0]->init_state < FUN_QSTATE_INIT_FULL) {
+ err = fun_advance_ring_state(dev, qset);
+ if (err)
+ return err;
+ }
+
+ err = fun_vi_create(fp);
+ if (err)
+ goto free_queues;
+
+ fp->txqs = qset->txqs;
+ rcu_assign_pointer(fp->rxqs, qset->rxqs);
+ rcu_assign_pointer(fp->xdpqs, qset->xdpqs);
+
+ err = fun_enable_irqs(dev);
+ if (err)
+ goto destroy_vi;
+
+ if (fp->rss_cfg) {
+ err = fun_config_rss(dev, fp->hash_algo, fp->rss_key,
+ fp->indir_table, FUN_ADMIN_SUBOP_CREATE);
+ } else {
+ /* The non-RSS case has only 1 queue. */
+ err = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_VI, dev->dev_port,
+ FUN_ADMIN_BIND_TYPE_EPCQ,
+ qset->rxqs[0]->hw_cqid);
+ }
+ if (err)
+ goto disable_irqs;
+
+ err = fun_port_write_cmds(fp, 3, port_keys, vals);
+ if (err)
+ goto free_rss;
+
+ netif_tx_start_all_queues(dev);
+ return 0;
+
+free_rss:
+ fun_destroy_rss(fp);
+disable_irqs:
+ fun_disable_irqs(dev);
+destroy_vi:
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port);
+free_queues:
+ fun_free_rings(dev, qset);
+ return err;
+}
+
+static int funeth_open(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct fun_qset qset = {
+ .nrxqs = netdev->real_num_rx_queues,
+ .ntxqs = netdev->real_num_tx_queues,
+ .nxdpqs = fp->num_xdpqs,
+ .cq_depth = fp->cq_depth,
+ .rq_depth = fp->rq_depth,
+ .sq_depth = fp->sq_depth,
+ .state = FUN_QSTATE_INIT_FULL,
+ };
+ int rc;
+
+ rc = fun_alloc_rings(netdev, &qset);
+ if (rc)
+ return rc;
+
+ rc = fun_up(netdev, &qset);
+ if (rc) {
+ qset.state = FUN_QSTATE_DESTROYED;
+ fun_free_rings(netdev, &qset);
+ }
+
+ return rc;
+}
+
+static int funeth_close(struct net_device *netdev)
+{
+ struct fun_qset qset = { .state = FUN_QSTATE_DESTROYED };
+
+ fun_down(netdev, &qset);
+ return 0;
+}
+
+static void fun_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct funeth_txq **xdpqs;
+ struct funeth_rxq **rxqs;
+ unsigned int i, start;
+
+ stats->tx_packets = fp->tx_packets;
+ stats->tx_bytes = fp->tx_bytes;
+ stats->tx_dropped = fp->tx_dropped;
+
+ stats->rx_packets = fp->rx_packets;
+ stats->rx_bytes = fp->rx_bytes;
+ stats->rx_dropped = fp->rx_dropped;
+
+ rcu_read_lock();
+ rxqs = rcu_dereference(fp->rxqs);
+ if (!rxqs)
+ goto unlock;
+
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ struct funeth_txq_stats txs;
+
+ FUN_QSTAT_READ(fp->txqs[i], start, txs);
+ stats->tx_packets += txs.tx_pkts;
+ stats->tx_bytes += txs.tx_bytes;
+ stats->tx_dropped += txs.tx_map_err;
+ }
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++) {
+ struct funeth_rxq_stats rxs;
+
+ FUN_QSTAT_READ(rxqs[i], start, rxs);
+ stats->rx_packets += rxs.rx_pkts;
+ stats->rx_bytes += rxs.rx_bytes;
+ stats->rx_dropped += rxs.rx_map_err + rxs.rx_mem_drops;
+ }
+
+ xdpqs = rcu_dereference(fp->xdpqs);
+ if (!xdpqs)
+ goto unlock;
+
+ for (i = 0; i < fp->num_xdpqs; i++) {
+ struct funeth_txq_stats txs;
+
+ FUN_QSTAT_READ(xdpqs[i], start, txs);
+ stats->tx_packets += txs.tx_pkts;
+ stats->tx_bytes += txs.tx_bytes;
+ }
+unlock:
+ rcu_read_unlock();
+}
+
+static int fun_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ int rc;
+
+ rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MTU, new_mtu);
+ if (!rc)
+ netdev->mtu = new_mtu;
+ return rc;
+}
+
+static int fun_set_macaddr(struct net_device *netdev, void *addr)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+ int rc;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, saddr->sa_data))
+ return 0;
+
+ rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR,
+ ether_addr_to_u64(saddr->sa_data));
+ if (!rc)
+ eth_hw_addr_set(netdev, saddr->sa_data);
+ return rc;
+}
+
+static int fun_get_port_attributes(struct net_device *netdev)
+{
+ static const int keys[] = {
+ FUN_ADMIN_PORT_KEY_MACADDR, FUN_ADMIN_PORT_KEY_CAPABILITIES,
+ FUN_ADMIN_PORT_KEY_ADVERT, FUN_ADMIN_PORT_KEY_MTU
+ };
+ static const int phys_keys[] = {
+ FUN_ADMIN_PORT_KEY_LANE_ATTRS,
+ };
+
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 data[ARRAY_SIZE(keys)];
+ u8 mac[ETH_ALEN];
+ int i, rc;
+
+ rc = fun_port_read_cmds(fp, ARRAY_SIZE(keys), keys, data);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < ARRAY_SIZE(keys); i++) {
+ switch (keys[i]) {
+ case FUN_ADMIN_PORT_KEY_MACADDR:
+ u64_to_ether_addr(data[i], mac);
+ if (is_zero_ether_addr(mac)) {
+ eth_hw_addr_random(netdev);
+ } else if (is_valid_ether_addr(mac)) {
+ eth_hw_addr_set(netdev, mac);
+ } else {
+ netdev_err(netdev,
+ "device provided a bad MAC address %pM\n",
+ mac);
+ return -EINVAL;
+ }
+ break;
+
+ case FUN_ADMIN_PORT_KEY_CAPABILITIES:
+ fp->port_caps = data[i];
+ break;
+
+ case FUN_ADMIN_PORT_KEY_ADVERT:
+ fp->advertising = data[i];
+ break;
+
+ case FUN_ADMIN_PORT_KEY_MTU:
+ netdev->mtu = data[i];
+ break;
+ }
+ }
+
+ if (!(fp->port_caps & FUN_PORT_CAP_VPORT)) {
+ rc = fun_port_read_cmds(fp, ARRAY_SIZE(phys_keys), phys_keys,
+ data);
+ if (rc)
+ return rc;
+
+ fp->lane_attrs = data[0];
+ }
+
+ if (netdev->addr_assign_type == NET_ADDR_RANDOM)
+ return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR,
+ ether_addr_to_u64(netdev->dev_addr));
+ return 0;
+}
+
+static int fun_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ const struct funeth_priv *fp = netdev_priv(dev);
+
+ return copy_to_user(ifr->ifr_data, &fp->hwtstamp_cfg,
+ sizeof(fp->hwtstamp_cfg)) ? -EFAULT : 0;
+}
+
+static int fun_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct hwtstamp_config cfg;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* no TX HW timestamps */
+ cfg.tx_type = HWTSTAMP_TX_OFF;
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ fp->hwtstamp_cfg = cfg;
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int fun_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return fun_hwtstamp_set(dev, ifr);
+ case SIOCGHWTSTAMP:
+ return fun_hwtstamp_get(dev, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Prepare the queues for XDP. */
+static int fun_enter_xdp(struct net_device *dev, struct bpf_prog *prog)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int i, nqs = num_online_cpus();
+ struct funeth_txq **xdpqs;
+ struct funeth_rxq **rxqs;
+ int err;
+
+ xdpqs = alloc_xdpqs(dev, nqs, fp->sq_depth, 0, FUN_QSTATE_INIT_FULL);
+ if (IS_ERR(xdpqs))
+ return PTR_ERR(xdpqs);
+
+ rxqs = rtnl_dereference(fp->rxqs);
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ err = fun_rxq_set_bpf(rxqs[i], prog);
+ if (err)
+ goto out;
+ }
+
+ fp->num_xdpqs = nqs;
+ rcu_assign_pointer(fp->xdpqs, xdpqs);
+ return 0;
+out:
+ while (i--)
+ fun_rxq_set_bpf(rxqs[i], NULL);
+
+ free_xdpqs(xdpqs, nqs, 0, FUN_QSTATE_DESTROYED);
+ return err;
+}
+
+/* Set the queues for non-XDP operation. */
+static void fun_end_xdp(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct funeth_txq **xdpqs;
+ struct funeth_rxq **rxqs;
+ unsigned int i;
+
+ xdpqs = rtnl_dereference(fp->xdpqs);
+ rcu_assign_pointer(fp->xdpqs, NULL);
+ synchronize_net();
+ /* at this point both Rx and Tx XDP processing has ended */
+
+ free_xdpqs(xdpqs, fp->num_xdpqs, 0, FUN_QSTATE_DESTROYED);
+ fp->num_xdpqs = 0;
+
+ rxqs = rtnl_dereference(fp->rxqs);
+ for (i = 0; i < dev->real_num_rx_queues; i++)
+ fun_rxq_set_bpf(rxqs[i], NULL);
+}
+
+#define XDP_MAX_MTU \
+ (PAGE_SIZE - FUN_XDP_HEADROOM - VLAN_ETH_HLEN - FUN_RX_TAILROOM)
+
+static int fun_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct bpf_prog *old_prog, *prog = xdp->prog;
+ struct funeth_priv *fp = netdev_priv(dev);
+ int i, err;
+
+ /* XDP uses at most one buffer */
+ if (prog && dev->mtu > XDP_MAX_MTU) {
+ netdev_err(dev, "device MTU %u too large for XDP\n", dev->mtu);
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Device MTU too large for XDP");
+ return -EINVAL;
+ }
+
+ if (!netif_running(dev)) {
+ fp->num_xdpqs = prog ? num_online_cpus() : 0;
+ } else if (prog && !fp->xdp_prog) {
+ err = fun_enter_xdp(dev, prog);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Failed to set queues for XDP.");
+ return err;
+ }
+ } else if (!prog && fp->xdp_prog) {
+ fun_end_xdp(dev);
+ } else {
+ struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs);
+
+ for (i = 0; i < dev->real_num_rx_queues; i++)
+ WRITE_ONCE(rxqs[i]->xdp_prog, prog);
+ }
+
+ dev->max_mtu = prog ? XDP_MAX_MTU : FUN_MAX_MTU;
+ old_prog = xchg(&fp->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+static int fun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return fun_xdp_setup(dev, xdp);
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct devlink_port *fun_get_devlink_port(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+
+ return &fp->dl_port;
+}
+
+static int fun_init_vports(struct fun_ethdev *ed, unsigned int n)
+{
+ if (ed->num_vports)
+ return -EINVAL;
+
+ ed->vport_info = kvcalloc(n, sizeof(*ed->vport_info), GFP_KERNEL);
+ if (!ed->vport_info)
+ return -ENOMEM;
+ ed->num_vports = n;
+ return 0;
+}
+
+static void fun_free_vports(struct fun_ethdev *ed)
+{
+ kvfree(ed->vport_info);
+ ed->vport_info = NULL;
+ ed->num_vports = 0;
+}
+
+static struct fun_vport_info *fun_get_vport(struct fun_ethdev *ed,
+ unsigned int vport)
+{
+ if (!ed->vport_info || vport >= ed->num_vports)
+ return NULL;
+
+ return ed->vport_info + vport;
+}
+
+static int fun_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_adi_param mac_param = {};
+ struct fun_dev *fdev = fp->fdev;
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ struct fun_vport_info *vi;
+ int rc = -EINVAL;
+
+ if (is_multicast_ether_addr(mac))
+ return -EINVAL;
+
+ mutex_lock(&ed->state_mutex);
+ vi = fun_get_vport(ed, vf);
+ if (!vi)
+ goto unlock;
+
+ mac_param.u.mac = FUN_ADI_MAC_INIT(ether_addr_to_u64(mac));
+ rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_MACADDR, vf + 1,
+ &mac_param);
+ if (!rc)
+ ether_addr_copy(vi->mac, mac);
+unlock:
+ mutex_unlock(&ed->state_mutex);
+ return rc;
+}
+
+static int fun_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_adi_param vlan_param = {};
+ struct fun_dev *fdev = fp->fdev;
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ struct fun_vport_info *vi;
+ int rc = -EINVAL;
+
+ if (vlan > 4095 || qos > 7)
+ return -EINVAL;
+ if (vlan_proto && vlan_proto != htons(ETH_P_8021Q) &&
+ vlan_proto != htons(ETH_P_8021AD))
+ return -EINVAL;
+
+ mutex_lock(&ed->state_mutex);
+ vi = fun_get_vport(ed, vf);
+ if (!vi)
+ goto unlock;
+
+ vlan_param.u.vlan = FUN_ADI_VLAN_INIT(be16_to_cpu(vlan_proto),
+ ((u16)qos << VLAN_PRIO_SHIFT) | vlan);
+ rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_VLAN, vf + 1, &vlan_param);
+ if (!rc) {
+ vi->vlan = vlan;
+ vi->qos = qos;
+ vi->vlan_proto = vlan_proto;
+ }
+unlock:
+ mutex_unlock(&ed->state_mutex);
+ return rc;
+}
+
+static int fun_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_adi_param rate_param = {};
+ struct fun_dev *fdev = fp->fdev;
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ struct fun_vport_info *vi;
+ int rc = -EINVAL;
+
+ if (min_tx_rate)
+ return -EINVAL;
+
+ mutex_lock(&ed->state_mutex);
+ vi = fun_get_vport(ed, vf);
+ if (!vi)
+ goto unlock;
+
+ rate_param.u.rate = FUN_ADI_RATE_INIT(max_tx_rate);
+ rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_RATE, vf + 1, &rate_param);
+ if (!rc)
+ vi->max_rate = max_tx_rate;
+unlock:
+ mutex_unlock(&ed->state_mutex);
+ return rc;
+}
+
+static int fun_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_ethdev *ed = to_fun_ethdev(fp->fdev);
+ const struct fun_vport_info *vi;
+
+ mutex_lock(&ed->state_mutex);
+ vi = fun_get_vport(ed, vf);
+ if (!vi)
+ goto unlock;
+
+ memset(ivi, 0, sizeof(*ivi));
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, vi->mac);
+ ivi->vlan = vi->vlan;
+ ivi->qos = vi->qos;
+ ivi->vlan_proto = vi->vlan_proto;
+ ivi->max_tx_rate = vi->max_rate;
+ ivi->spoofchk = vi->spoofchk;
+unlock:
+ mutex_unlock(&ed->state_mutex);
+ return vi ? 0 : -EINVAL;
+}
+
+static void fun_uninit(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+
+ fun_prune_queue_irqs(dev);
+ xa_destroy(&fp->irqs);
+}
+
+static const struct net_device_ops fun_netdev_ops = {
+ .ndo_open = funeth_open,
+ .ndo_stop = funeth_close,
+ .ndo_start_xmit = fun_start_xmit,
+ .ndo_get_stats64 = fun_get_stats64,
+ .ndo_change_mtu = fun_change_mtu,
+ .ndo_set_mac_address = fun_set_macaddr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_eth_ioctl = fun_ioctl,
+ .ndo_uninit = fun_uninit,
+ .ndo_bpf = fun_xdp,
+ .ndo_xdp_xmit = fun_xdp_xmit_frames,
+ .ndo_set_vf_mac = fun_set_vf_mac,
+ .ndo_set_vf_vlan = fun_set_vf_vlan,
+ .ndo_set_vf_rate = fun_set_vf_rate,
+ .ndo_get_vf_config = fun_get_vf_config,
+ .ndo_get_devlink_port = fun_get_devlink_port,
+};
+
+#define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_HW_CSUM | TSO_FLAGS | \
+ GSO_ENCAP_FLAGS | NETIF_F_HIGHDMA)
+
+static void fun_dflt_rss_indir(struct funeth_priv *fp, unsigned int nrx)
+{
+ unsigned int i;
+
+ for (i = 0; i < fp->indir_table_nentries; i++)
+ fp->indir_table[i] = ethtool_rxfh_indir_default(i, nrx);
+}
+
+/* Reset the RSS indirection table to equal distribution across the current
+ * number of Rx queues. Called at init time and whenever the number of Rx
+ * queues changes subsequently. Note that this may also resize the indirection
+ * table.
+ */
+static void fun_reset_rss_indir(struct net_device *dev, unsigned int nrx)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+
+ if (!fp->rss_cfg)
+ return;
+
+ /* Set the table size to the max possible that allows an equal number
+ * of occurrences of each CQ.
+ */
+ fp->indir_table_nentries = rounddown(FUN_ETH_RSS_MAX_INDIR_ENT, nrx);
+ fun_dflt_rss_indir(fp, nrx);
+}
+
+/* Update the RSS LUT to contain only queues in [0, nrx). Normally this will
+ * update the LUT to an equal distribution among nrx queues, If @only_if_needed
+ * is set the LUT is left unchanged if it already does not reference any queues
+ * >= nrx.
+ */
+static int fun_rss_set_qnum(struct net_device *dev, unsigned int nrx,
+ bool only_if_needed)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ u32 old_lut[FUN_ETH_RSS_MAX_INDIR_ENT];
+ unsigned int i, oldsz;
+ int err;
+
+ if (!fp->rss_cfg)
+ return 0;
+
+ if (only_if_needed) {
+ for (i = 0; i < fp->indir_table_nentries; i++)
+ if (fp->indir_table[i] >= nrx)
+ break;
+
+ if (i >= fp->indir_table_nentries)
+ return 0;
+ }
+
+ memcpy(old_lut, fp->indir_table, sizeof(old_lut));
+ oldsz = fp->indir_table_nentries;
+ fun_reset_rss_indir(dev, nrx);
+
+ err = fun_config_rss(dev, fp->hash_algo, fp->rss_key,
+ fp->indir_table, FUN_ADMIN_SUBOP_MODIFY);
+ if (!err)
+ return 0;
+
+ memcpy(fp->indir_table, old_lut, sizeof(old_lut));
+ fp->indir_table_nentries = oldsz;
+ return err;
+}
+
+/* Allocate the DMA area for the RSS configuration commands to the device, and
+ * initialize the hash, hash key, indirection table size and its entries to
+ * their defaults. The indirection table defaults to equal distribution across
+ * the Rx queues.
+ */
+static int fun_init_rss(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ size_t size = sizeof(fp->rss_key) + sizeof(fp->indir_table);
+
+ fp->rss_hw_id = FUN_HCI_ID_INVALID;
+ if (!(fp->port_caps & FUN_PORT_CAP_OFFLOADS))
+ return 0;
+
+ fp->rss_cfg = dma_alloc_coherent(&fp->pdev->dev, size,
+ &fp->rss_dma_addr, GFP_KERNEL);
+ if (!fp->rss_cfg)
+ return -ENOMEM;
+
+ fp->hash_algo = FUN_ETH_RSS_ALG_TOEPLITZ;
+ netdev_rss_key_fill(fp->rss_key, sizeof(fp->rss_key));
+ fun_reset_rss_indir(dev, dev->real_num_rx_queues);
+ return 0;
+}
+
+static void fun_free_rss(struct funeth_priv *fp)
+{
+ if (fp->rss_cfg) {
+ dma_free_coherent(&fp->pdev->dev,
+ sizeof(fp->rss_key) + sizeof(fp->indir_table),
+ fp->rss_cfg, fp->rss_dma_addr);
+ fp->rss_cfg = NULL;
+ }
+}
+
+void fun_set_ring_count(struct net_device *netdev, unsigned int ntx,
+ unsigned int nrx)
+{
+ netif_set_real_num_tx_queues(netdev, ntx);
+ if (nrx != netdev->real_num_rx_queues) {
+ netif_set_real_num_rx_queues(netdev, nrx);
+ fun_reset_rss_indir(netdev, nrx);
+ }
+}
+
+static int fun_init_stats_area(struct funeth_priv *fp)
+{
+ unsigned int nstats;
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return 0;
+
+ nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX +
+ PORT_MAC_FEC_STATS_MAX;
+
+ fp->stats = dma_alloc_coherent(&fp->pdev->dev, nstats * sizeof(u64),
+ &fp->stats_dma_addr, GFP_KERNEL);
+ if (!fp->stats)
+ return -ENOMEM;
+ return 0;
+}
+
+static void fun_free_stats_area(struct funeth_priv *fp)
+{
+ unsigned int nstats;
+
+ if (fp->stats) {
+ nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX;
+ dma_free_coherent(&fp->pdev->dev, nstats * sizeof(u64),
+ fp->stats, fp->stats_dma_addr);
+ fp->stats = NULL;
+ }
+}
+
+static int fun_dl_port_register(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct devlink *dl = priv_to_devlink(fp->fdev);
+ struct devlink_port_attrs attrs = {};
+ unsigned int idx;
+
+ if (fp->port_caps & FUN_PORT_CAP_VPORT) {
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL;
+ idx = fp->lport;
+ } else {
+ idx = netdev->dev_port;
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.lanes = fp->lane_attrs & 7;
+ if (fp->lane_attrs & FUN_PORT_LANE_SPLIT) {
+ attrs.split = 1;
+ attrs.phys.port_number = fp->lport & ~3;
+ attrs.phys.split_subport_number = fp->lport & 3;
+ } else {
+ attrs.phys.port_number = fp->lport;
+ }
+ }
+
+ devlink_port_attrs_set(&fp->dl_port, &attrs);
+
+ return devlink_port_register(dl, &fp->dl_port, idx);
+}
+
+/* Determine the max Tx/Rx queues for a port. */
+static int fun_max_qs(struct fun_ethdev *ed, unsigned int *ntx,
+ unsigned int *nrx)
+{
+ int neth;
+
+ if (ed->num_ports > 1 || is_kdump_kernel()) {
+ *ntx = 1;
+ *nrx = 1;
+ return 0;
+ }
+
+ neth = fun_get_res_count(&ed->fdev, FUN_ADMIN_OP_ETH);
+ if (neth < 0)
+ return neth;
+
+ /* We determine the max number of queues based on the CPU
+ * cores, device interrupts and queues, RSS size, and device Tx flows.
+ *
+ * - At least 1 Rx and 1 Tx queues.
+ * - At most 1 Rx/Tx queue per core.
+ * - Each Rx/Tx queue needs 1 SQ.
+ */
+ *ntx = min(ed->nsqs_per_port - 1, num_online_cpus());
+ *nrx = *ntx;
+ if (*ntx > neth)
+ *ntx = neth;
+ if (*nrx > FUN_ETH_RSS_MAX_INDIR_ENT)
+ *nrx = FUN_ETH_RSS_MAX_INDIR_ENT;
+ return 0;
+}
+
+static void fun_queue_defaults(struct net_device *dev, unsigned int nsqs)
+{
+ unsigned int ntx, nrx;
+
+ ntx = min(dev->num_tx_queues, FUN_DFLT_QUEUES);
+ nrx = min(dev->num_rx_queues, FUN_DFLT_QUEUES);
+ if (ntx <= nrx) {
+ ntx = min(ntx, nsqs / 2);
+ nrx = min(nrx, nsqs - ntx);
+ } else {
+ nrx = min(nrx, nsqs / 2);
+ ntx = min(ntx, nsqs - nrx);
+ }
+
+ netif_set_real_num_tx_queues(dev, ntx);
+ netif_set_real_num_rx_queues(dev, nrx);
+}
+
+/* Replace the existing Rx/Tx/XDP queues with equal number of queues with
+ * different settings, e.g. depth. This is a disruptive replacement that
+ * temporarily shuts down the data path and should be limited to changes that
+ * can't be applied to live queues. The old queues are always discarded.
+ */
+int fun_replace_queues(struct net_device *dev, struct fun_qset *newqs,
+ struct netlink_ext_ack *extack)
+{
+ struct fun_qset oldqs = { .state = FUN_QSTATE_DESTROYED };
+ struct funeth_priv *fp = netdev_priv(dev);
+ int err;
+
+ newqs->nrxqs = dev->real_num_rx_queues;
+ newqs->ntxqs = dev->real_num_tx_queues;
+ newqs->nxdpqs = fp->num_xdpqs;
+ newqs->state = FUN_QSTATE_INIT_SW;
+ err = fun_alloc_rings(dev, newqs);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to allocate memory for new queues, keeping current settings");
+ return err;
+ }
+
+ fun_down(dev, &oldqs);
+
+ err = fun_up(dev, newqs);
+ if (!err)
+ return 0;
+
+ /* The new queues couldn't be installed. We do not retry the old queues
+ * as they are the same to the device as the new queues and would
+ * similarly fail.
+ */
+ newqs->state = FUN_QSTATE_DESTROYED;
+ fun_free_rings(dev, newqs);
+ NL_SET_ERR_MSG_MOD(extack, "Unable to restore the data path with the new queues.");
+ return err;
+}
+
+/* Change the number of Rx/Tx queues of a device while it is up. This is done
+ * by incrementally adding/removing queues to meet the new requirements while
+ * handling ongoing traffic.
+ */
+int fun_change_num_queues(struct net_device *dev, unsigned int ntx,
+ unsigned int nrx)
+{
+ unsigned int keep_tx = min(dev->real_num_tx_queues, ntx);
+ unsigned int keep_rx = min(dev->real_num_rx_queues, nrx);
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_qset oldqs = {
+ .rxqs = rtnl_dereference(fp->rxqs),
+ .txqs = fp->txqs,
+ .nrxqs = dev->real_num_rx_queues,
+ .ntxqs = dev->real_num_tx_queues,
+ .rxq_start = keep_rx,
+ .txq_start = keep_tx,
+ .state = FUN_QSTATE_DESTROYED
+ };
+ struct fun_qset newqs = {
+ .nrxqs = nrx,
+ .ntxqs = ntx,
+ .rxq_start = keep_rx,
+ .txq_start = keep_tx,
+ .cq_depth = fp->cq_depth,
+ .rq_depth = fp->rq_depth,
+ .sq_depth = fp->sq_depth,
+ .state = FUN_QSTATE_INIT_FULL
+ };
+ int i, err;
+
+ err = fun_alloc_rings(dev, &newqs);
+ if (err)
+ goto free_irqs;
+
+ err = fun_enable_irqs(dev); /* of any newly added queues */
+ if (err)
+ goto free_rings;
+
+ /* copy the queues we are keeping to the new set */
+ memcpy(newqs.rxqs, oldqs.rxqs, keep_rx * sizeof(*oldqs.rxqs));
+ memcpy(newqs.txqs, fp->txqs, keep_tx * sizeof(*fp->txqs));
+
+ if (nrx < dev->real_num_rx_queues) {
+ err = fun_rss_set_qnum(dev, nrx, true);
+ if (err)
+ goto disable_tx_irqs;
+
+ for (i = nrx; i < dev->real_num_rx_queues; i++)
+ fun_disable_one_irq(container_of(oldqs.rxqs[i]->napi,
+ struct fun_irq, napi));
+
+ netif_set_real_num_rx_queues(dev, nrx);
+ }
+
+ if (ntx < dev->real_num_tx_queues)
+ netif_set_real_num_tx_queues(dev, ntx);
+
+ rcu_assign_pointer(fp->rxqs, newqs.rxqs);
+ fp->txqs = newqs.txqs;
+ synchronize_net();
+
+ if (ntx > dev->real_num_tx_queues)
+ netif_set_real_num_tx_queues(dev, ntx);
+
+ if (nrx > dev->real_num_rx_queues) {
+ netif_set_real_num_rx_queues(dev, nrx);
+ fun_rss_set_qnum(dev, nrx, false);
+ }
+
+ /* disable interrupts of any excess Tx queues */
+ for (i = keep_tx; i < oldqs.ntxqs; i++)
+ fun_disable_one_irq(oldqs.txqs[i]->irq);
+
+ fun_free_rings(dev, &oldqs);
+ fun_prune_queue_irqs(dev);
+ return 0;
+
+disable_tx_irqs:
+ for (i = oldqs.ntxqs; i < ntx; i++)
+ fun_disable_one_irq(newqs.txqs[i]->irq);
+free_rings:
+ newqs.state = FUN_QSTATE_DESTROYED;
+ fun_free_rings(dev, &newqs);
+free_irqs:
+ fun_prune_queue_irqs(dev);
+ return err;
+}
+
+static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid)
+{
+ struct fun_dev *fdev = &ed->fdev;
+ struct net_device *netdev;
+ struct funeth_priv *fp;
+ unsigned int ntx, nrx;
+ int rc;
+
+ rc = fun_max_qs(ed, &ntx, &nrx);
+ if (rc)
+ return rc;
+
+ netdev = alloc_etherdev_mqs(sizeof(*fp), ntx, nrx);
+ if (!netdev) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ netdev->dev_port = portid;
+ fun_queue_defaults(netdev, ed->nsqs_per_port);
+
+ fp = netdev_priv(netdev);
+ fp->fdev = fdev;
+ fp->pdev = to_pci_dev(fdev->dev);
+ fp->netdev = netdev;
+ xa_init(&fp->irqs);
+ fp->rx_irq_ofst = ntx;
+ seqcount_init(&fp->link_seq);
+
+ fp->lport = INVALID_LPORT;
+ rc = fun_port_create(netdev);
+ if (rc)
+ goto free_netdev;
+
+ /* bind port to admin CQ for async events */
+ rc = fun_bind(fdev, FUN_ADMIN_BIND_TYPE_PORT, portid,
+ FUN_ADMIN_BIND_TYPE_EPCQ, 0);
+ if (rc)
+ goto destroy_port;
+
+ rc = fun_get_port_attributes(netdev);
+ if (rc)
+ goto destroy_port;
+
+ rc = fun_init_rss(netdev);
+ if (rc)
+ goto destroy_port;
+
+ rc = fun_init_stats_area(fp);
+ if (rc)
+ goto free_rss;
+
+ SET_NETDEV_DEV(netdev, fdev->dev);
+ netdev->netdev_ops = &fun_netdev_ops;
+
+ netdev->hw_features = NETIF_F_SG | NETIF_F_RXHASH | NETIF_F_RXCSUM;
+ if (fp->port_caps & FUN_PORT_CAP_OFFLOADS)
+ netdev->hw_features |= NETIF_F_HW_CSUM | TSO_FLAGS;
+ if (fp->port_caps & FUN_PORT_CAP_ENCAP_OFFLOADS)
+ netdev->hw_features |= GSO_ENCAP_FLAGS;
+
+ netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA;
+ netdev->vlan_features = netdev->features & VLAN_FEAT;
+ netdev->mpls_features = netdev->vlan_features;
+ netdev->hw_enc_features = netdev->hw_features;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = FUN_MAX_MTU;
+
+ fun_set_ethtool_ops(netdev);
+
+ /* configurable parameters */
+ fp->sq_depth = min(SQ_DEPTH, fdev->q_depth);
+ fp->cq_depth = min(CQ_DEPTH, fdev->q_depth);
+ fp->rq_depth = min_t(unsigned int, RQ_DEPTH, fdev->q_depth);
+ fp->rx_coal_usec = CQ_INTCOAL_USEC;
+ fp->rx_coal_count = CQ_INTCOAL_NPKT;
+ fp->tx_coal_usec = SQ_INTCOAL_USEC;
+ fp->tx_coal_count = SQ_INTCOAL_NPKT;
+ fp->cq_irq_db = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count);
+
+ rc = fun_dl_port_register(netdev);
+ if (rc)
+ goto free_stats;
+
+ fp->ktls_id = FUN_HCI_ID_INVALID;
+ fun_ktls_init(netdev); /* optional, failure OK */
+
+ netif_carrier_off(netdev);
+ ed->netdevs[portid] = netdev;
+ rc = register_netdev(netdev);
+ if (rc)
+ goto unreg_devlink;
+
+ if (fp->dl_port.devlink)
+ devlink_port_type_eth_set(&fp->dl_port, netdev);
+
+ return 0;
+
+unreg_devlink:
+ ed->netdevs[portid] = NULL;
+ fun_ktls_cleanup(fp);
+ if (fp->dl_port.devlink)
+ devlink_port_unregister(&fp->dl_port);
+free_stats:
+ fun_free_stats_area(fp);
+free_rss:
+ fun_free_rss(fp);
+destroy_port:
+ fun_port_destroy(netdev);
+free_netdev:
+ free_netdev(netdev);
+done:
+ dev_err(fdev->dev, "couldn't allocate port %u, error %d", portid, rc);
+ return rc;
+}
+
+static void fun_destroy_netdev(struct net_device *netdev)
+{
+ struct funeth_priv *fp;
+
+ fp = netdev_priv(netdev);
+ if (fp->dl_port.devlink) {
+ devlink_port_type_clear(&fp->dl_port);
+ devlink_port_unregister(&fp->dl_port);
+ }
+ unregister_netdev(netdev);
+ fun_ktls_cleanup(fp);
+ fun_free_stats_area(fp);
+ fun_free_rss(fp);
+ fun_port_destroy(netdev);
+ free_netdev(netdev);
+}
+
+static int fun_create_ports(struct fun_ethdev *ed, unsigned int nports)
+{
+ struct fun_dev *fd = &ed->fdev;
+ int i, rc;
+
+ /* The admin queue takes 1 IRQ and 2 SQs. */
+ ed->nsqs_per_port = min(fd->num_irqs - 1,
+ fd->kern_end_qid - 2) / nports;
+ if (ed->nsqs_per_port < 2) {
+ dev_err(fd->dev, "Too few SQs for %u ports", nports);
+ return -EINVAL;
+ }
+
+ ed->netdevs = kcalloc(nports, sizeof(*ed->netdevs), GFP_KERNEL);
+ if (!ed->netdevs)
+ return -ENOMEM;
+
+ ed->num_ports = nports;
+ for (i = 0; i < nports; i++) {
+ rc = fun_create_netdev(ed, i);
+ if (rc)
+ goto free_netdevs;
+ }
+
+ return 0;
+
+free_netdevs:
+ while (i)
+ fun_destroy_netdev(ed->netdevs[--i]);
+ kfree(ed->netdevs);
+ ed->netdevs = NULL;
+ ed->num_ports = 0;
+ return rc;
+}
+
+static void fun_destroy_ports(struct fun_ethdev *ed)
+{
+ unsigned int i;
+
+ for (i = 0; i < ed->num_ports; i++)
+ fun_destroy_netdev(ed->netdevs[i]);
+
+ kfree(ed->netdevs);
+ ed->netdevs = NULL;
+ ed->num_ports = 0;
+}
+
+static void fun_update_link_state(const struct fun_ethdev *ed,
+ const struct fun_admin_port_notif *notif)
+{
+ unsigned int port_idx = be16_to_cpu(notif->id);
+ struct net_device *netdev;
+ struct funeth_priv *fp;
+
+ if (port_idx >= ed->num_ports)
+ return;
+
+ netdev = ed->netdevs[port_idx];
+ fp = netdev_priv(netdev);
+
+ write_seqcount_begin(&fp->link_seq);
+ fp->link_speed = be32_to_cpu(notif->speed) * 10; /* 10 Mbps->Mbps */
+ fp->active_fc = notif->flow_ctrl;
+ fp->active_fec = notif->fec;
+ fp->xcvr_type = notif->xcvr_type;
+ fp->link_down_reason = notif->link_down_reason;
+ fp->lp_advertising = be64_to_cpu(notif->lp_advertising);
+
+ if ((notif->link_state | notif->missed_events) & FUN_PORT_FLAG_MAC_DOWN)
+ netif_carrier_off(netdev);
+ if (notif->link_state & FUN_PORT_FLAG_MAC_UP)
+ netif_carrier_on(netdev);
+
+ write_seqcount_end(&fp->link_seq);
+ fun_report_link(netdev);
+}
+
+/* handler for async events delivered through the admin CQ */
+static void fun_event_cb(struct fun_dev *fdev, void *entry)
+{
+ u8 op = ((struct fun_admin_rsp_common *)entry)->op;
+
+ if (op == FUN_ADMIN_OP_PORT) {
+ const struct fun_admin_port_notif *rsp = entry;
+
+ if (rsp->subop == FUN_ADMIN_SUBOP_NOTIFY) {
+ fun_update_link_state(to_fun_ethdev(fdev), rsp);
+ } else if (rsp->subop == FUN_ADMIN_SUBOP_RES_COUNT) {
+ const struct fun_admin_res_count_rsp *r = entry;
+
+ if (r->count.data)
+ set_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags);
+ else
+ set_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags);
+ fun_serv_sched(fdev);
+ } else {
+ dev_info(fdev->dev, "adminq event unexpected op %u subop %u",
+ op, rsp->subop);
+ }
+ } else {
+ dev_info(fdev->dev, "adminq event unexpected op %u", op);
+ }
+}
+
+/* handler for pending work managed by the service task */
+static void fun_service_cb(struct fun_dev *fdev)
+{
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ int rc;
+
+ if (test_and_clear_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags))
+ fun_destroy_ports(ed);
+
+ if (!test_and_clear_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags))
+ return;
+
+ rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT);
+ if (rc < 0 || rc == ed->num_ports)
+ return;
+
+ if (ed->num_ports)
+ fun_destroy_ports(ed);
+ if (rc)
+ fun_create_ports(ed, rc);
+}
+
+static int funeth_sriov_configure(struct pci_dev *pdev, int nvfs)
+{
+ struct fun_dev *fdev = pci_get_drvdata(pdev);
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ int rc;
+
+ if (nvfs == 0) {
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(&pdev->dev,
+ "Cannot disable SR-IOV while VFs are assigned\n");
+ return -EPERM;
+ }
+
+ mutex_lock(&ed->state_mutex);
+ fun_free_vports(ed);
+ mutex_unlock(&ed->state_mutex);
+ pci_disable_sriov(pdev);
+ return 0;
+ }
+
+ rc = pci_enable_sriov(pdev, nvfs);
+ if (rc)
+ return rc;
+
+ mutex_lock(&ed->state_mutex);
+ rc = fun_init_vports(ed, nvfs);
+ mutex_unlock(&ed->state_mutex);
+ if (rc) {
+ pci_disable_sriov(pdev);
+ return rc;
+ }
+
+ return nvfs;
+}
+
+static int funeth_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct fun_dev_params aqreq = {
+ .cqe_size_log2 = ilog2(ADMIN_CQE_SIZE),
+ .sqe_size_log2 = ilog2(ADMIN_SQE_SIZE),
+ .cq_depth = ADMIN_CQ_DEPTH,
+ .sq_depth = ADMIN_SQ_DEPTH,
+ .rq_depth = ADMIN_RQ_DEPTH,
+ .min_msix = 2, /* 1 Rx + 1 Tx */
+ .event_cb = fun_event_cb,
+ .serv_cb = fun_service_cb,
+ };
+ struct devlink *devlink;
+ struct fun_ethdev *ed;
+ struct fun_dev *fdev;
+ int rc;
+
+ devlink = fun_devlink_alloc(&pdev->dev);
+ if (!devlink) {
+ dev_err(&pdev->dev, "devlink alloc failed\n");
+ return -ENOMEM;
+ }
+
+ ed = devlink_priv(devlink);
+ mutex_init(&ed->state_mutex);
+
+ fdev = &ed->fdev;
+ rc = fun_dev_enable(fdev, pdev, &aqreq, KBUILD_MODNAME);
+ if (rc)
+ goto free_devlink;
+
+ rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT);
+ if (rc > 0)
+ rc = fun_create_ports(ed, rc);
+ if (rc < 0)
+ goto disable_dev;
+
+ fun_serv_restart(fdev);
+ fun_devlink_register(devlink);
+ return 0;
+
+disable_dev:
+ fun_dev_disable(fdev);
+free_devlink:
+ mutex_destroy(&ed->state_mutex);
+ fun_devlink_free(devlink);
+ return rc;
+}
+
+static void funeth_remove(struct pci_dev *pdev)
+{
+ struct fun_dev *fdev = pci_get_drvdata(pdev);
+ struct devlink *devlink;
+ struct fun_ethdev *ed;
+
+ ed = to_fun_ethdev(fdev);
+ devlink = priv_to_devlink(ed);
+ fun_devlink_unregister(devlink);
+
+#ifdef CONFIG_PCI_IOV
+ funeth_sriov_configure(pdev, 0);
+#endif
+
+ fun_serv_stop(fdev);
+ fun_destroy_ports(ed);
+ fun_dev_disable(fdev);
+ mutex_destroy(&ed->state_mutex);
+
+ fun_devlink_free(devlink);
+}
+
+static struct pci_driver funeth_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = funeth_id_table,
+ .probe = funeth_probe,
+ .remove = funeth_remove,
+ .shutdown = funeth_remove,
+ .sriov_configure = funeth_sriov_configure,
+};
+
+module_pci_driver(funeth_driver);
+
+MODULE_AUTHOR("Dimitris Michailidis <dmichail@fungible.com>");
+MODULE_DESCRIPTION("Fungible Ethernet Network Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DEVICE_TABLE(pci, funeth_id_table);
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_rx.c b/drivers/net/ethernet/fungible/funeth/funeth_rx.c
new file mode 100644
index 000000000000..0f6a549b9f67
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_rx.c
@@ -0,0 +1,826 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/bpf_trace.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/filter.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include "funeth_txrx.h"
+#include "funeth.h"
+#include "fun_queue.h"
+
+#define CREATE_TRACE_POINTS
+#include "funeth_trace.h"
+
+/* Given the device's max supported MTU and pages of at least 4KB a packet can
+ * be scattered into at most 4 buffers.
+ */
+#define RX_MAX_FRAGS 4
+
+/* Per packet headroom in non-XDP mode. Present only for 1-frag packets. */
+#define FUN_RX_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+
+/* We try to reuse pages for our buffers. To avoid frequent page ref writes we
+ * take EXTRA_PAGE_REFS references at once and then hand them out one per packet
+ * occupying the buffer.
+ */
+#define EXTRA_PAGE_REFS 1000000
+#define MIN_PAGE_REFS 1000
+
+enum {
+ FUN_XDP_FLUSH_REDIR = 1,
+ FUN_XDP_FLUSH_TX = 2,
+};
+
+/* See if a page is running low on refs we are holding and if so take more. */
+static void refresh_refs(struct funeth_rxbuf *buf)
+{
+ if (unlikely(buf->pg_refs < MIN_PAGE_REFS)) {
+ buf->pg_refs += EXTRA_PAGE_REFS;
+ page_ref_add(buf->page, EXTRA_PAGE_REFS);
+ }
+}
+
+/* Offer a buffer to the Rx buffer cache. The cache will hold the buffer if its
+ * page is worth retaining and there's room for it. Otherwise the page is
+ * unmapped and our references released.
+ */
+static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf)
+{
+ struct funeth_rx_cache *c = &q->cache;
+
+ if (c->prod_cnt - c->cons_cnt <= c->mask && buf->node == numa_mem_id()) {
+ c->bufs[c->prod_cnt & c->mask] = *buf;
+ c->prod_cnt++;
+ } else {
+ dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ __page_frag_cache_drain(buf->page, buf->pg_refs);
+ }
+}
+
+/* Get a page from the Rx buffer cache. We only consider the next available
+ * page and return it if we own all its references.
+ */
+static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb)
+{
+ struct funeth_rx_cache *c = &q->cache;
+ struct funeth_rxbuf *buf;
+
+ if (c->prod_cnt == c->cons_cnt)
+ return false; /* empty cache */
+
+ buf = &c->bufs[c->cons_cnt & c->mask];
+ if (page_ref_count(buf->page) == buf->pg_refs) {
+ dma_sync_single_for_device(q->dma_dev, buf->dma_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ *rb = *buf;
+ buf->page = NULL;
+ refresh_refs(rb);
+ c->cons_cnt++;
+ return true;
+ }
+
+ /* Page can't be reused. If the cache is full drop this page. */
+ if (c->prod_cnt - c->cons_cnt > c->mask) {
+ dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ __page_frag_cache_drain(buf->page, buf->pg_refs);
+ buf->page = NULL;
+ c->cons_cnt++;
+ }
+ return false;
+}
+
+/* Allocate and DMA-map a page for receive. */
+static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb,
+ int node, gfp_t gfp)
+{
+ struct page *p;
+
+ if (cache_get(q, rb))
+ return 0;
+
+ p = __alloc_pages_node(node, gfp | __GFP_NOWARN, 0);
+ if (unlikely(!p))
+ return -ENOMEM;
+
+ rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(q->dma_dev, rb->dma_addr))) {
+ FUN_QSTAT_INC(q, rx_map_err);
+ __free_page(p);
+ return -ENOMEM;
+ }
+
+ FUN_QSTAT_INC(q, rx_page_alloc);
+
+ rb->page = p;
+ rb->pg_refs = 1;
+ refresh_refs(rb);
+ rb->node = page_is_pfmemalloc(p) ? -1 : page_to_nid(p);
+ return 0;
+}
+
+static void funeth_free_page(struct funeth_rxq *q, struct funeth_rxbuf *rb)
+{
+ if (rb->page) {
+ dma_unmap_page(q->dma_dev, rb->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ __page_frag_cache_drain(rb->page, rb->pg_refs);
+ rb->page = NULL;
+ }
+}
+
+/* Run the XDP program assigned to an Rx queue.
+ * Return %NULL if the buffer is consumed, or the virtual address of the packet
+ * to turn into an skb.
+ */
+static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
+ int ref_ok, struct funeth_txq *xdp_q)
+{
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
+ u32 act;
+
+ /* VA includes the headroom, frag size includes headroom + tailroom */
+ xdp_init_buff(&xdp, ALIGN(skb_frag_size(frags), FUN_EPRQ_PKT_ALIGN),
+ &q->xdp_rxq);
+ xdp_prepare_buff(&xdp, buf_va, FUN_XDP_HEADROOM, skb_frag_size(frags) -
+ (FUN_RX_TAILROOM + FUN_XDP_HEADROOM), false);
+
+ xdp_prog = READ_ONCE(q->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ /* remove headroom, which may not be FUN_XDP_HEADROOM now */
+ skb_frag_size_set(frags, xdp.data_end - xdp.data);
+ skb_frag_off_add(frags, xdp.data - xdp.data_hard_start);
+ goto pass;
+ case XDP_TX:
+ if (unlikely(!ref_ok))
+ goto pass;
+ if (!fun_xdp_tx(xdp_q, xdp.data, xdp.data_end - xdp.data))
+ goto xdp_error;
+ FUN_QSTAT_INC(q, xdp_tx);
+ q->xdp_flush |= FUN_XDP_FLUSH_TX;
+ break;
+ case XDP_REDIRECT:
+ if (unlikely(!ref_ok))
+ goto pass;
+ if (unlikely(xdp_do_redirect(q->netdev, &xdp, xdp_prog)))
+ goto xdp_error;
+ FUN_QSTAT_INC(q, xdp_redir);
+ q->xdp_flush |= FUN_XDP_FLUSH_REDIR;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(q->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(q->netdev, xdp_prog, act);
+xdp_error:
+ q->cur_buf->pg_refs++; /* return frags' page reference */
+ FUN_QSTAT_INC(q, xdp_err);
+ break;
+ case XDP_DROP:
+ q->cur_buf->pg_refs++;
+ FUN_QSTAT_INC(q, xdp_drops);
+ break;
+ }
+ return NULL;
+
+pass:
+ return xdp.data;
+}
+
+/* A CQE contains a fixed completion structure along with optional metadata and
+ * even packet data. Given the start address of a CQE return the start of the
+ * contained fixed structure, which lies at the end.
+ */
+static const void *cqe_to_info(const void *cqe)
+{
+ return cqe + FUNETH_CQE_INFO_OFFSET;
+}
+
+/* The inverse of cqe_to_info(). */
+static const void *info_to_cqe(const void *cqe_info)
+{
+ return cqe_info - FUNETH_CQE_INFO_OFFSET;
+}
+
+/* Return the type of hash provided by the device based on the L3 and L4
+ * protocols it parsed for the packet.
+ */
+static enum pkt_hash_types cqe_to_pkt_hash_type(u16 pkt_parse)
+{
+ static const enum pkt_hash_types htype_map[] = {
+ PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3,
+ PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L4,
+ PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3,
+ PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3
+ };
+ u16 key;
+
+ /* Build the key from the TCP/UDP and IP/IPv6 bits */
+ key = ((pkt_parse >> FUN_ETH_RX_CV_OL4_PROT_S) & 6) |
+ ((pkt_parse >> (FUN_ETH_RX_CV_OL3_PROT_S + 1)) & 1);
+
+ return htype_map[key];
+}
+
+/* Each received packet can be scattered across several Rx buffers or can
+ * share a buffer with previously received packets depending on the buffer
+ * and packet sizes and the room available in the most recently used buffer.
+ *
+ * The rules are:
+ * - If the buffer at the head of an RQ has not been used it gets (part of) the
+ * next incoming packet.
+ * - Otherwise, if the packet fully fits in the buffer's remaining space the
+ * packet is written there.
+ * - Otherwise, the packet goes into the next Rx buffer.
+ *
+ * This function returns the Rx buffer for a packet or fragment thereof of the
+ * given length. If it isn't @buf it either recycles or frees that buffer
+ * before advancing the queue to the next buffer.
+ *
+ * If called repeatedly with the remaining length of a packet it will walk
+ * through all the buffers containing the packet.
+ */
+static struct funeth_rxbuf *
+get_buf(struct funeth_rxq *q, struct funeth_rxbuf *buf, unsigned int len)
+{
+ if (q->buf_offset + len <= PAGE_SIZE || !q->buf_offset)
+ return buf; /* @buf holds (part of) the packet */
+
+ /* The packet occupies part of the next buffer. Move there after
+ * replenishing the current buffer slot either with the spare page or
+ * by reusing the slot's existing page. Note that if a spare page isn't
+ * available and the current packet occupies @buf it is a multi-frag
+ * packet that will be dropped leaving @buf available for reuse.
+ */
+ if ((page_ref_count(buf->page) == buf->pg_refs &&
+ buf->node == numa_mem_id()) || !q->spare_buf.page) {
+ dma_sync_single_for_device(q->dma_dev, buf->dma_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ refresh_refs(buf);
+ } else {
+ cache_offer(q, buf);
+ *buf = q->spare_buf;
+ q->spare_buf.page = NULL;
+ q->rqes[q->rq_cons & q->rq_mask] =
+ FUN_EPRQ_RQBUF_INIT(buf->dma_addr);
+ }
+ q->buf_offset = 0;
+ q->rq_cons++;
+ return &q->bufs[q->rq_cons & q->rq_mask];
+}
+
+/* Gather the page fragments making up the first Rx packet on @q. Its total
+ * length @tot_len includes optional head- and tail-rooms.
+ *
+ * Return 0 if the device retains ownership of at least some of the pages.
+ * In this case the caller may only copy the packet.
+ *
+ * A non-zero return value gives the caller permission to use references to the
+ * pages, e.g., attach them to skbs. Additionally, if the value is <0 at least
+ * one of the pages is PF_MEMALLOC.
+ *
+ * Regardless of outcome the caller is granted a reference to each of the pages.
+ */
+static int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len,
+ skb_frag_t *frags)
+{
+ struct funeth_rxbuf *buf = q->cur_buf;
+ unsigned int frag_len;
+ int ref_ok = 1;
+
+ for (;;) {
+ buf = get_buf(q, buf, tot_len);
+
+ /* We always keep the RQ full of buffers so before we can give
+ * one of our pages to the stack we require that we can obtain
+ * a replacement page. If we can't the packet will either be
+ * copied or dropped so we can retain ownership of the page and
+ * reuse it.
+ */
+ if (!q->spare_buf.page &&
+ funeth_alloc_page(q, &q->spare_buf, numa_mem_id(),
+ GFP_ATOMIC | __GFP_MEMALLOC))
+ ref_ok = 0;
+
+ frag_len = min_t(unsigned int, tot_len,
+ PAGE_SIZE - q->buf_offset);
+ dma_sync_single_for_cpu(q->dma_dev,
+ buf->dma_addr + q->buf_offset,
+ frag_len, DMA_FROM_DEVICE);
+ buf->pg_refs--;
+ if (ref_ok)
+ ref_ok |= buf->node;
+
+ __skb_frag_set_page(frags, buf->page);
+ skb_frag_off_set(frags, q->buf_offset);
+ skb_frag_size_set(frags++, frag_len);
+
+ tot_len -= frag_len;
+ if (!tot_len)
+ break;
+
+ q->buf_offset = PAGE_SIZE;
+ }
+ q->buf_offset = ALIGN(q->buf_offset + frag_len, FUN_EPRQ_PKT_ALIGN);
+ q->cur_buf = buf;
+ return ref_ok;
+}
+
+static bool rx_hwtstamp_enabled(const struct net_device *dev)
+{
+ const struct funeth_priv *d = netdev_priv(dev);
+
+ return d->hwtstamp_cfg.rx_filter == HWTSTAMP_FILTER_ALL;
+}
+
+/* Advance the CQ pointers and phase tag to the next CQE. */
+static void advance_cq(struct funeth_rxq *q)
+{
+ if (unlikely(q->cq_head == q->cq_mask)) {
+ q->cq_head = 0;
+ q->phase ^= 1;
+ q->next_cqe_info = cqe_to_info(q->cqes);
+ } else {
+ q->cq_head++;
+ q->next_cqe_info += FUNETH_CQE_SIZE;
+ }
+ prefetch(q->next_cqe_info);
+}
+
+/* Process the packet represented by the head CQE of @q. Gather the packet's
+ * fragments, run it through the optional XDP program, and if needed construct
+ * an skb and pass it to the stack.
+ */
+static void fun_handle_cqe_pkt(struct funeth_rxq *q, struct funeth_txq *xdp_q)
+{
+ const struct fun_eth_cqe *rxreq = info_to_cqe(q->next_cqe_info);
+ unsigned int i, tot_len, pkt_len = be32_to_cpu(rxreq->pkt_len);
+ struct net_device *ndev = q->netdev;
+ skb_frag_t frags[RX_MAX_FRAGS];
+ struct skb_shared_info *si;
+ unsigned int headroom;
+ gro_result_t gro_res;
+ struct sk_buff *skb;
+ int ref_ok;
+ void *va;
+ u16 cv;
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.rx_pkts++;
+ q->stats.rx_bytes += pkt_len;
+ u64_stats_update_end(&q->syncp);
+
+ advance_cq(q);
+
+ /* account for head- and tail-room, present only for 1-buffer packets */
+ tot_len = pkt_len;
+ headroom = be16_to_cpu(rxreq->headroom);
+ if (likely(headroom))
+ tot_len += FUN_RX_TAILROOM + headroom;
+
+ ref_ok = fun_gather_pkt(q, tot_len, frags);
+ va = skb_frag_address(frags);
+ if (xdp_q && headroom == FUN_XDP_HEADROOM) {
+ va = fun_run_xdp(q, frags, va, ref_ok, xdp_q);
+ if (!va)
+ return;
+ headroom = 0; /* XDP_PASS trims it */
+ }
+ if (unlikely(!ref_ok))
+ goto no_mem;
+
+ if (likely(headroom)) {
+ /* headroom is either FUN_RX_HEADROOM or FUN_XDP_HEADROOM */
+ prefetch(va + headroom);
+ skb = napi_build_skb(va, ALIGN(tot_len, FUN_EPRQ_PKT_ALIGN));
+ if (unlikely(!skb))
+ goto no_mem;
+
+ skb_reserve(skb, headroom);
+ __skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ } else {
+ prefetch(va);
+ skb = napi_get_frags(q->napi);
+ if (unlikely(!skb))
+ goto no_mem;
+
+ if (ref_ok < 0)
+ skb->pfmemalloc = 1;
+
+ si = skb_shinfo(skb);
+ si->nr_frags = rxreq->nsgl;
+ for (i = 0; i < si->nr_frags; i++)
+ si->frags[i] = frags[i];
+
+ skb->len = pkt_len;
+ skb->data_len = pkt_len;
+ skb->truesize += round_up(pkt_len, FUN_EPRQ_PKT_ALIGN);
+ }
+
+ skb_record_rx_queue(skb, q->qidx);
+ cv = be16_to_cpu(rxreq->pkt_cv);
+ if (likely((q->netdev->features & NETIF_F_RXHASH) && rxreq->hash))
+ skb_set_hash(skb, be32_to_cpu(rxreq->hash),
+ cqe_to_pkt_hash_type(cv));
+ if (likely((q->netdev->features & NETIF_F_RXCSUM) && rxreq->csum)) {
+ FUN_QSTAT_INC(q, rx_cso);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = be16_to_cpu(rxreq->csum) - 1;
+ }
+ if (unlikely(rx_hwtstamp_enabled(q->netdev)))
+ skb_hwtstamps(skb)->hwtstamp = be64_to_cpu(rxreq->timestamp);
+
+ trace_funeth_rx(q, rxreq->nsgl, pkt_len, skb->hash, cv);
+
+ gro_res = skb->data_len ? napi_gro_frags(q->napi) :
+ napi_gro_receive(q->napi, skb);
+ if (gro_res == GRO_MERGED || gro_res == GRO_MERGED_FREE)
+ FUN_QSTAT_INC(q, gro_merged);
+ else if (gro_res == GRO_HELD)
+ FUN_QSTAT_INC(q, gro_pkts);
+ return;
+
+no_mem:
+ FUN_QSTAT_INC(q, rx_mem_drops);
+
+ /* Release the references we've been granted for the frag pages.
+ * We return the ref of the last frag and free the rest.
+ */
+ q->cur_buf->pg_refs++;
+ for (i = 0; i < rxreq->nsgl - 1; i++)
+ __free_page(skb_frag_page(frags + i));
+}
+
+/* Return 0 if the phase tag of the CQE at the CQ's head matches expectations
+ * indicating the CQE is new.
+ */
+static u16 cqe_phase_mismatch(const struct fun_cqe_info *ci, u16 phase)
+{
+ u16 sf_p = be16_to_cpu(ci->sf_p);
+
+ return (sf_p & 1) ^ phase;
+}
+
+/* Walk through a CQ identifying and processing fresh CQEs up to the given
+ * budget. Return the remaining budget.
+ */
+static int fun_process_cqes(struct funeth_rxq *q, int budget)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ struct funeth_txq **xdpqs, *xdp_q = NULL;
+
+ xdpqs = rcu_dereference_bh(fp->xdpqs);
+ if (xdpqs)
+ xdp_q = xdpqs[smp_processor_id()];
+
+ while (budget && !cqe_phase_mismatch(q->next_cqe_info, q->phase)) {
+ /* access other descriptor fields after the phase check */
+ dma_rmb();
+
+ fun_handle_cqe_pkt(q, xdp_q);
+ budget--;
+ }
+
+ if (unlikely(q->xdp_flush)) {
+ if (q->xdp_flush & FUN_XDP_FLUSH_TX)
+ fun_txq_wr_db(xdp_q);
+ if (q->xdp_flush & FUN_XDP_FLUSH_REDIR)
+ xdp_do_flush();
+ q->xdp_flush = 0;
+ }
+
+ return budget;
+}
+
+/* NAPI handler for Rx queues. Calls the CQE processing loop and writes RQ/CQ
+ * doorbells as needed.
+ */
+int fun_rxq_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct fun_irq *irq = container_of(napi, struct fun_irq, napi);
+ struct funeth_rxq *q = irq->rxq;
+ int work_done = budget - fun_process_cqes(q, budget);
+ u32 cq_db_val = q->cq_head;
+
+ if (unlikely(work_done >= budget))
+ FUN_QSTAT_INC(q, rx_budget);
+ else if (napi_complete_done(napi, work_done))
+ cq_db_val |= q->irq_db_val;
+
+ /* check whether to post new Rx buffers */
+ if (q->rq_cons - q->rq_cons_db >= q->rq_db_thres) {
+ u64_stats_update_begin(&q->syncp);
+ q->stats.rx_bufs += q->rq_cons - q->rq_cons_db;
+ u64_stats_update_end(&q->syncp);
+ q->rq_cons_db = q->rq_cons;
+ writel((q->rq_cons - 1) & q->rq_mask, q->rq_db);
+ }
+
+ writel(cq_db_val, q->cq_db);
+ return work_done;
+}
+
+/* Free the Rx buffers of an Rx queue. */
+static void fun_rxq_free_bufs(struct funeth_rxq *q)
+{
+ struct funeth_rxbuf *b = q->bufs;
+ unsigned int i;
+
+ for (i = 0; i <= q->rq_mask; i++, b++)
+ funeth_free_page(q, b);
+
+ funeth_free_page(q, &q->spare_buf);
+ q->cur_buf = NULL;
+}
+
+/* Initially provision an Rx queue with Rx buffers. */
+static int fun_rxq_alloc_bufs(struct funeth_rxq *q, int node)
+{
+ struct funeth_rxbuf *b = q->bufs;
+ unsigned int i;
+
+ for (i = 0; i <= q->rq_mask; i++, b++) {
+ if (funeth_alloc_page(q, b, node, GFP_KERNEL)) {
+ fun_rxq_free_bufs(q);
+ return -ENOMEM;
+ }
+ q->rqes[i] = FUN_EPRQ_RQBUF_INIT(b->dma_addr);
+ }
+ q->cur_buf = q->bufs;
+ return 0;
+}
+
+/* Initialize a used-buffer cache of the given depth. */
+static int fun_rxq_init_cache(struct funeth_rx_cache *c, unsigned int depth,
+ int node)
+{
+ c->mask = depth - 1;
+ c->bufs = kvzalloc_node(depth * sizeof(*c->bufs), GFP_KERNEL, node);
+ return c->bufs ? 0 : -ENOMEM;
+}
+
+/* Deallocate an Rx queue's used-buffer cache and its contents. */
+static void fun_rxq_free_cache(struct funeth_rxq *q)
+{
+ struct funeth_rxbuf *b = q->cache.bufs;
+ unsigned int i;
+
+ for (i = 0; i <= q->cache.mask; i++, b++)
+ funeth_free_page(q, b);
+
+ kvfree(q->cache.bufs);
+ q->cache.bufs = NULL;
+}
+
+int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ struct fun_admin_epcq_req cmd;
+ u16 headroom;
+ int err;
+
+ headroom = prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM;
+ if (headroom != q->headroom) {
+ cmd.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPCQ,
+ sizeof(cmd));
+ cmd.u.modify =
+ FUN_ADMIN_EPCQ_MODIFY_REQ_INIT(FUN_ADMIN_SUBOP_MODIFY,
+ 0, q->hw_cqid, headroom);
+ err = fun_submit_admin_sync_cmd(fp->fdev, &cmd.common, NULL, 0,
+ 0);
+ if (err)
+ return err;
+ q->headroom = headroom;
+ }
+
+ WRITE_ONCE(q->xdp_prog, prog);
+ return 0;
+}
+
+/* Create an Rx queue, allocating the host memory it needs. */
+static struct funeth_rxq *fun_rxq_create_sw(struct net_device *dev,
+ unsigned int qidx,
+ unsigned int ncqe,
+ unsigned int nrqe,
+ struct fun_irq *irq)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct funeth_rxq *q;
+ int err = -ENOMEM;
+ int numa_node;
+
+ numa_node = fun_irq_node(irq);
+ q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node);
+ if (!q)
+ goto err;
+
+ q->qidx = qidx;
+ q->netdev = dev;
+ q->cq_mask = ncqe - 1;
+ q->rq_mask = nrqe - 1;
+ q->numa_node = numa_node;
+ q->rq_db_thres = nrqe / 4;
+ u64_stats_init(&q->syncp);
+ q->dma_dev = &fp->pdev->dev;
+
+ q->rqes = fun_alloc_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes),
+ sizeof(*q->bufs), false, numa_node,
+ &q->rq_dma_addr, (void **)&q->bufs, NULL);
+ if (!q->rqes)
+ goto free_q;
+
+ q->cqes = fun_alloc_ring_mem(q->dma_dev, ncqe, FUNETH_CQE_SIZE, 0,
+ false, numa_node, &q->cq_dma_addr, NULL,
+ NULL);
+ if (!q->cqes)
+ goto free_rqes;
+
+ err = fun_rxq_init_cache(&q->cache, nrqe, numa_node);
+ if (err)
+ goto free_cqes;
+
+ err = fun_rxq_alloc_bufs(q, numa_node);
+ if (err)
+ goto free_cache;
+
+ q->stats.rx_bufs = q->rq_mask;
+ q->init_state = FUN_QSTATE_INIT_SW;
+ return q;
+
+free_cache:
+ fun_rxq_free_cache(q);
+free_cqes:
+ dma_free_coherent(q->dma_dev, ncqe * FUNETH_CQE_SIZE, q->cqes,
+ q->cq_dma_addr);
+free_rqes:
+ fun_free_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes), false, q->rqes,
+ q->rq_dma_addr, q->bufs);
+free_q:
+ kfree(q);
+err:
+ netdev_err(dev, "Unable to allocate memory for Rx queue %u\n", qidx);
+ return ERR_PTR(err);
+}
+
+static void fun_rxq_free_sw(struct funeth_rxq *q)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+
+ fun_rxq_free_cache(q);
+ fun_rxq_free_bufs(q);
+ fun_free_ring_mem(q->dma_dev, q->rq_mask + 1, sizeof(*q->rqes), false,
+ q->rqes, q->rq_dma_addr, q->bufs);
+ dma_free_coherent(q->dma_dev, (q->cq_mask + 1) * FUNETH_CQE_SIZE,
+ q->cqes, q->cq_dma_addr);
+
+ /* Before freeing the queue transfer key counters to the device. */
+ fp->rx_packets += q->stats.rx_pkts;
+ fp->rx_bytes += q->stats.rx_bytes;
+ fp->rx_dropped += q->stats.rx_map_err + q->stats.rx_mem_drops;
+
+ kfree(q);
+}
+
+/* Create an Rx queue's resources on the device. */
+int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ unsigned int ncqe = q->cq_mask + 1;
+ unsigned int nrqe = q->rq_mask + 1;
+ int err;
+
+ err = xdp_rxq_info_reg(&q->xdp_rxq, q->netdev, q->qidx,
+ irq->napi.napi_id);
+ if (err)
+ goto out;
+
+ err = xdp_rxq_info_reg_mem_model(&q->xdp_rxq, MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+ goto xdp_unreg;
+
+ q->phase = 1;
+ q->irq_cnt = 0;
+ q->cq_head = 0;
+ q->rq_cons = 0;
+ q->rq_cons_db = 0;
+ q->buf_offset = 0;
+ q->napi = &irq->napi;
+ q->irq_db_val = fp->cq_irq_db;
+ q->next_cqe_info = cqe_to_info(q->cqes);
+
+ q->xdp_prog = fp->xdp_prog;
+ q->headroom = fp->xdp_prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM;
+
+ err = fun_sq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR |
+ FUN_ADMIN_EPSQ_CREATE_FLAG_RQ, 0,
+ FUN_HCI_ID_INVALID, 0, nrqe, q->rq_dma_addr, 0, 0,
+ 0, 0, fp->fdev->kern_end_qid, PAGE_SHIFT,
+ &q->hw_sqid, &q->rq_db);
+ if (err)
+ goto xdp_unreg;
+
+ err = fun_cq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR |
+ FUN_ADMIN_EPCQ_CREATE_FLAG_RQ, 0,
+ q->hw_sqid, ilog2(FUNETH_CQE_SIZE), ncqe,
+ q->cq_dma_addr, q->headroom, FUN_RX_TAILROOM, 0, 0,
+ irq->irq_idx, 0, fp->fdev->kern_end_qid,
+ &q->hw_cqid, &q->cq_db);
+ if (err)
+ goto free_rq;
+
+ irq->rxq = q;
+ writel(q->rq_mask, q->rq_db);
+ q->init_state = FUN_QSTATE_INIT_FULL;
+
+ netif_info(fp, ifup, q->netdev,
+ "Rx queue %u, depth %u/%u, HW qid %u/%u, IRQ idx %u, node %d, headroom %u\n",
+ q->qidx, ncqe, nrqe, q->hw_cqid, q->hw_sqid, irq->irq_idx,
+ q->numa_node, q->headroom);
+ return 0;
+
+free_rq:
+ fun_destroy_sq(fp->fdev, q->hw_sqid);
+xdp_unreg:
+ xdp_rxq_info_unreg(&q->xdp_rxq);
+out:
+ netdev_err(q->netdev,
+ "Failed to create Rx queue %u on device, error %d\n",
+ q->qidx, err);
+ return err;
+}
+
+static void fun_rxq_free_dev(struct funeth_rxq *q)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ struct fun_irq *irq;
+
+ if (q->init_state < FUN_QSTATE_INIT_FULL)
+ return;
+
+ irq = container_of(q->napi, struct fun_irq, napi);
+ netif_info(fp, ifdown, q->netdev,
+ "Freeing Rx queue %u (id %u/%u), IRQ %u\n",
+ q->qidx, q->hw_cqid, q->hw_sqid, irq->irq_idx);
+
+ irq->rxq = NULL;
+ xdp_rxq_info_unreg(&q->xdp_rxq);
+ fun_destroy_sq(fp->fdev, q->hw_sqid);
+ fun_destroy_cq(fp->fdev, q->hw_cqid);
+ q->init_state = FUN_QSTATE_INIT_SW;
+}
+
+/* Create or advance an Rx queue, allocating all the host and device resources
+ * needed to reach the target state.
+ */
+int funeth_rxq_create(struct net_device *dev, unsigned int qidx,
+ unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq,
+ int state, struct funeth_rxq **qp)
+{
+ struct funeth_rxq *q = *qp;
+ int err;
+
+ if (!q) {
+ q = fun_rxq_create_sw(dev, qidx, ncqe, nrqe, irq);
+ if (IS_ERR(q))
+ return PTR_ERR(q);
+ }
+
+ if (q->init_state >= state)
+ goto out;
+
+ err = fun_rxq_create_dev(q, irq);
+ if (err) {
+ if (!*qp)
+ fun_rxq_free_sw(q);
+ return err;
+ }
+
+out:
+ *qp = q;
+ return 0;
+}
+
+/* Free Rx queue resources until it reaches the target state. */
+struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state)
+{
+ if (state < FUN_QSTATE_INIT_FULL)
+ fun_rxq_free_dev(q);
+
+ if (state == FUN_QSTATE_DESTROYED) {
+ fun_rxq_free_sw(q);
+ q = NULL;
+ }
+
+ return q;
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_trace.h b/drivers/net/ethernet/fungible/funeth/funeth_trace.h
new file mode 100644
index 000000000000..9e58dfec19d5
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_trace.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM funeth
+
+#if !defined(_TRACE_FUNETH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FUNETH_H
+
+#include <linux/tracepoint.h>
+
+#include "funeth_txrx.h"
+
+TRACE_EVENT(funeth_tx,
+
+ TP_PROTO(const struct funeth_txq *txq,
+ u32 len,
+ u32 sqe_idx,
+ u32 ngle),
+
+ TP_ARGS(txq, len, sqe_idx, ngle),
+
+ TP_STRUCT__entry(
+ __field(u32, qidx)
+ __field(u32, len)
+ __field(u32, sqe_idx)
+ __field(u32, ngle)
+ __string(devname, txq->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->qidx = txq->qidx;
+ __entry->len = len;
+ __entry->sqe_idx = sqe_idx;
+ __entry->ngle = ngle;
+ __assign_str(devname, txq->netdev->name);
+ ),
+
+ TP_printk("%s: Txq %u, SQE idx %u, len %u, num GLEs %u",
+ __get_str(devname), __entry->qidx, __entry->sqe_idx,
+ __entry->len, __entry->ngle)
+);
+
+TRACE_EVENT(funeth_tx_free,
+
+ TP_PROTO(const struct funeth_txq *txq,
+ u32 sqe_idx,
+ u32 num_sqes,
+ u32 hw_head),
+
+ TP_ARGS(txq, sqe_idx, num_sqes, hw_head),
+
+ TP_STRUCT__entry(
+ __field(u32, qidx)
+ __field(u32, sqe_idx)
+ __field(u32, num_sqes)
+ __field(u32, hw_head)
+ __string(devname, txq->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->qidx = txq->qidx;
+ __entry->sqe_idx = sqe_idx;
+ __entry->num_sqes = num_sqes;
+ __entry->hw_head = hw_head;
+ __assign_str(devname, txq->netdev->name);
+ ),
+
+ TP_printk("%s: Txq %u, SQE idx %u, SQEs %u, HW head %u",
+ __get_str(devname), __entry->qidx, __entry->sqe_idx,
+ __entry->num_sqes, __entry->hw_head)
+);
+
+TRACE_EVENT(funeth_rx,
+
+ TP_PROTO(const struct funeth_rxq *rxq,
+ u32 num_rqes,
+ u32 pkt_len,
+ u32 hash,
+ u32 cls_vec),
+
+ TP_ARGS(rxq, num_rqes, pkt_len, hash, cls_vec),
+
+ TP_STRUCT__entry(
+ __field(u32, qidx)
+ __field(u32, cq_head)
+ __field(u32, num_rqes)
+ __field(u32, len)
+ __field(u32, hash)
+ __field(u32, cls_vec)
+ __string(devname, rxq->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->qidx = rxq->qidx;
+ __entry->cq_head = rxq->cq_head;
+ __entry->num_rqes = num_rqes;
+ __entry->len = pkt_len;
+ __entry->hash = hash;
+ __entry->cls_vec = cls_vec;
+ __assign_str(devname, rxq->netdev->name);
+ ),
+
+ TP_printk("%s: Rxq %u, CQ head %u, RQEs %u, len %u, hash %u, CV %#x",
+ __get_str(devname), __entry->qidx, __entry->cq_head,
+ __entry->num_rqes, __entry->len, __entry->hash,
+ __entry->cls_vec)
+);
+
+#endif /* _TRACE_FUNETH_H */
+
+/* Below must be outside protection. */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE funeth_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_tx.c b/drivers/net/ethernet/fungible/funeth/funeth_tx.c
new file mode 100644
index 000000000000..ff6e29237253
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_tx.c
@@ -0,0 +1,763 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/dma-mapping.h>
+#include <linux/ip.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <uapi/linux/udp.h>
+#include "funeth.h"
+#include "funeth_ktls.h"
+#include "funeth_txrx.h"
+#include "funeth_trace.h"
+#include "fun_queue.h"
+
+#define FUN_XDP_CLEAN_THRES 32
+#define FUN_XDP_CLEAN_BATCH 16
+
+/* DMA-map a packet and return the (length, DMA_address) pairs for its
+ * segments. If a mapping error occurs -ENOMEM is returned.
+ */
+static int map_skb(const struct sk_buff *skb, struct device *dev,
+ dma_addr_t *addr, unsigned int *len)
+{
+ const struct skb_shared_info *si;
+ const skb_frag_t *fp, *end;
+
+ *len = skb_headlen(skb);
+ *addr = dma_map_single(dev, skb->data, *len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ return -ENOMEM;
+
+ si = skb_shinfo(skb);
+ end = &si->frags[si->nr_frags];
+
+ for (fp = si->frags; fp < end; fp++) {
+ *++len = skb_frag_size(fp);
+ *++addr = skb_frag_dma_map(dev, fp, 0, *len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ goto unwind;
+ }
+ return 0;
+
+unwind:
+ while (fp-- > si->frags)
+ dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
+
+ dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
+ return -ENOMEM;
+}
+
+/* Return the address just past the end of a Tx queue's descriptor ring.
+ * It exploits the fact that the HW writeback area is just after the end
+ * of the descriptor ring.
+ */
+static void *txq_end(const struct funeth_txq *q)
+{
+ return (void *)q->hw_wb;
+}
+
+/* Return the amount of space within a Tx ring from the given address to the
+ * end.
+ */
+static unsigned int txq_to_end(const struct funeth_txq *q, void *p)
+{
+ return txq_end(q) - p;
+}
+
+/* Return the number of Tx descriptors occupied by a Tx request. */
+static unsigned int tx_req_ndesc(const struct fun_eth_tx_req *req)
+{
+ return DIV_ROUND_UP(req->len8, FUNETH_SQE_SIZE / 8);
+}
+
+static __be16 tcp_hdr_doff_flags(const struct tcphdr *th)
+{
+ return *(__be16 *)&tcp_flag_word(th);
+}
+
+static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
+ unsigned int *tls_len)
+{
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ const struct fun_ktls_tx_ctx *tls_ctx;
+ u32 datalen, seq;
+
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ if (!datalen)
+ return skb;
+
+ if (likely(!tls_offload_tx_resync_pending(skb->sk))) {
+ seq = ntohl(tcp_hdr(skb)->seq);
+ tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
+
+ if (likely(tls_ctx->next_seq == seq)) {
+ *tls_len = datalen;
+ return skb;
+ }
+ if (seq - tls_ctx->next_seq < U32_MAX / 4) {
+ tls_offload_tx_resync_request(skb->sk, seq,
+ tls_ctx->next_seq);
+ }
+ }
+
+ FUN_QSTAT_INC(q, tx_tls_fallback);
+ skb = tls_encrypt_skb(skb);
+ if (!skb)
+ FUN_QSTAT_INC(q, tx_tls_drops);
+
+ return skb;
+#else
+ return NULL;
+#endif
+}
+
+/* Write as many descriptors as needed for the supplied skb starting at the
+ * current producer location. The caller has made certain enough descriptors
+ * are available.
+ *
+ * Returns the number of descriptors written, 0 on error.
+ */
+static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
+ unsigned int tls_len)
+{
+ unsigned int extra_bytes = 0, extra_pkts = 0;
+ unsigned int idx = q->prod_cnt & q->mask;
+ const struct skb_shared_info *shinfo;
+ unsigned int lens[MAX_SKB_FRAGS + 1];
+ dma_addr_t addrs[MAX_SKB_FRAGS + 1];
+ struct fun_eth_tx_req *req;
+ struct fun_dataop_gl *gle;
+ const struct tcphdr *th;
+ unsigned int ngle, i;
+ u16 flags;
+
+ if (unlikely(map_skb(skb, q->dma_dev, addrs, lens))) {
+ FUN_QSTAT_INC(q, tx_map_err);
+ return 0;
+ }
+
+ req = fun_tx_desc_addr(q, idx);
+ req->op = FUN_ETH_OP_TX;
+ req->len8 = 0;
+ req->flags = 0;
+ req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
+ req->repr_idn = 0;
+ req->encap_proto = 0;
+
+ shinfo = skb_shinfo(skb);
+ if (likely(shinfo->gso_size)) {
+ if (skb->encapsulation) {
+ u16 ol4_ofst;
+
+ flags = FUN_ETH_OUTER_EN | FUN_ETH_INNER_LSO |
+ FUN_ETH_UPDATE_INNER_L4_CKSUM |
+ FUN_ETH_UPDATE_OUTER_L3_LEN;
+ if (shinfo->gso_type & (SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM)) {
+ flags |= FUN_ETH_UPDATE_OUTER_L4_LEN |
+ FUN_ETH_OUTER_UDP;
+ if (shinfo->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
+ flags |= FUN_ETH_UPDATE_OUTER_L4_CKSUM;
+ ol4_ofst = skb_transport_offset(skb);
+ } else {
+ ol4_ofst = skb_inner_network_offset(skb);
+ }
+
+ if (ip_hdr(skb)->version == 4)
+ flags |= FUN_ETH_UPDATE_OUTER_L3_CKSUM;
+ else
+ flags |= FUN_ETH_OUTER_IPV6;
+
+ if (skb->inner_network_header) {
+ if (inner_ip_hdr(skb)->version == 4)
+ flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM |
+ FUN_ETH_UPDATE_INNER_L3_LEN;
+ else
+ flags |= FUN_ETH_INNER_IPV6 |
+ FUN_ETH_UPDATE_INNER_L3_LEN;
+ }
+ th = inner_tcp_hdr(skb);
+ fun_eth_offload_init(&req->offload, flags,
+ shinfo->gso_size,
+ tcp_hdr_doff_flags(th), 0,
+ skb_inner_network_offset(skb),
+ skb_inner_transport_offset(skb),
+ skb_network_offset(skb), ol4_ofst);
+ FUN_QSTAT_INC(q, tx_encap_tso);
+ } else {
+ /* HW considers one set of headers as inner */
+ flags = FUN_ETH_INNER_LSO |
+ FUN_ETH_UPDATE_INNER_L4_CKSUM |
+ FUN_ETH_UPDATE_INNER_L3_LEN;
+ if (shinfo->gso_type & SKB_GSO_TCPV6)
+ flags |= FUN_ETH_INNER_IPV6;
+ else
+ flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
+ th = tcp_hdr(skb);
+ fun_eth_offload_init(&req->offload, flags,
+ shinfo->gso_size,
+ tcp_hdr_doff_flags(th), 0,
+ skb_network_offset(skb),
+ skb_transport_offset(skb), 0, 0);
+ FUN_QSTAT_INC(q, tx_tso);
+ }
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.tx_cso += shinfo->gso_segs;
+ u64_stats_update_end(&q->syncp);
+
+ extra_pkts = shinfo->gso_segs - 1;
+ extra_bytes = (be16_to_cpu(req->offload.inner_l4_off) +
+ __tcp_hdrlen(th)) * extra_pkts;
+ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ flags = FUN_ETH_UPDATE_INNER_L4_CKSUM;
+ if (skb->csum_offset == offsetof(struct udphdr, check))
+ flags |= FUN_ETH_INNER_UDP;
+ fun_eth_offload_init(&req->offload, flags, 0, 0, 0, 0,
+ skb_checksum_start_offset(skb), 0, 0);
+ FUN_QSTAT_INC(q, tx_cso);
+ } else {
+ fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
+ }
+
+ ngle = shinfo->nr_frags + 1;
+ req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8;
+ req->dataop = FUN_DATAOP_HDR_INIT(ngle, 0, ngle, 0, skb->len);
+
+ for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm;
+ i < ngle && txq_to_end(q, gle); i++, gle++)
+ fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
+
+ if (txq_to_end(q, gle) == 0) {
+ gle = (struct fun_dataop_gl *)q->desc;
+ for ( ; i < ngle; i++, gle++)
+ fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
+ }
+
+ if (IS_ENABLED(CONFIG_TLS_DEVICE) && unlikely(tls_len)) {
+ struct fun_eth_tls *tls = (struct fun_eth_tls *)gle;
+ struct fun_ktls_tx_ctx *tls_ctx;
+
+ req->len8 += FUNETH_TLS_SZ / 8;
+ req->flags = cpu_to_be16(FUN_ETH_TX_TLS);
+
+ tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
+ tls->tlsid = tls_ctx->tlsid;
+ tls_ctx->next_seq += tls_len;
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.tx_tls_bytes += tls_len;
+ q->stats.tx_tls_pkts += 1 + extra_pkts;
+ u64_stats_update_end(&q->syncp);
+ }
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.tx_bytes += skb->len + extra_bytes;
+ q->stats.tx_pkts += 1 + extra_pkts;
+ u64_stats_update_end(&q->syncp);
+
+ q->info[idx].skb = skb;
+
+ trace_funeth_tx(q, skb->len, idx, req->dataop.ngather);
+ return tx_req_ndesc(req);
+}
+
+/* Return the number of available descriptors of a Tx queue.
+ * HW assumes head==tail means the ring is empty so we need to keep one
+ * descriptor unused.
+ */
+static unsigned int fun_txq_avail(const struct funeth_txq *q)
+{
+ return q->mask - q->prod_cnt + q->cons_cnt;
+}
+
+/* Stop a queue if it can't handle another worst-case packet. */
+static void fun_tx_check_stop(struct funeth_txq *q)
+{
+ if (likely(fun_txq_avail(q) >= FUNETH_MAX_PKT_DESC))
+ return;
+
+ netif_tx_stop_queue(q->ndq);
+
+ /* NAPI reclaim is freeing packets in parallel with us and we may race.
+ * We have stopped the queue but check again after synchronizing with
+ * reclaim.
+ */
+ smp_mb();
+ if (likely(fun_txq_avail(q) < FUNETH_MAX_PKT_DESC))
+ FUN_QSTAT_INC(q, tx_nstops);
+ else
+ netif_tx_start_queue(q->ndq);
+}
+
+/* Return true if a queue has enough space to restart. Current condition is
+ * that the queue must be >= 1/4 empty.
+ */
+static bool fun_txq_may_restart(struct funeth_txq *q)
+{
+ return fun_txq_avail(q) >= q->mask / 4;
+}
+
+netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int qid = skb_get_queue_mapping(skb);
+ struct funeth_txq *q = fp->txqs[qid];
+ unsigned int tls_len = 0;
+ unsigned int ndesc;
+
+ if (IS_ENABLED(CONFIG_TLS_DEVICE) && skb->sk &&
+ tls_is_sk_tx_device_offloaded(skb->sk)) {
+ skb = fun_tls_tx(skb, q, &tls_len);
+ if (unlikely(!skb))
+ goto dropped;
+ }
+
+ ndesc = write_pkt_desc(skb, q, tls_len);
+ if (unlikely(!ndesc)) {
+ dev_kfree_skb_any(skb);
+ goto dropped;
+ }
+
+ q->prod_cnt += ndesc;
+ fun_tx_check_stop(q);
+
+ skb_tx_timestamp(skb);
+
+ if (__netdev_tx_sent_queue(q->ndq, skb->len, netdev_xmit_more()))
+ fun_txq_wr_db(q);
+ else
+ FUN_QSTAT_INC(q, tx_more);
+
+ return NETDEV_TX_OK;
+
+dropped:
+ /* A dropped packet may be the last one in a xmit_more train,
+ * ring the doorbell just in case.
+ */
+ if (!netdev_xmit_more())
+ fun_txq_wr_db(q);
+ return NETDEV_TX_OK;
+}
+
+/* Return a Tx queue's HW head index written back to host memory. */
+static u16 txq_hw_head(const struct funeth_txq *q)
+{
+ return (u16)be64_to_cpu(*q->hw_wb);
+}
+
+/* Unmap the Tx packet starting at the given descriptor index and
+ * return the number of Tx descriptors it occupied.
+ */
+static unsigned int unmap_skb(const struct funeth_txq *q, unsigned int idx)
+{
+ const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
+ unsigned int ngle = req->dataop.ngather;
+ struct fun_dataop_gl *gle;
+
+ if (ngle) {
+ gle = (struct fun_dataop_gl *)req->dataop.imm;
+ dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data),
+ be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE);
+
+ for (gle++; --ngle && txq_to_end(q, gle); gle++)
+ dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
+ be32_to_cpu(gle->sgl_len),
+ DMA_TO_DEVICE);
+
+ for (gle = (struct fun_dataop_gl *)q->desc; ngle; ngle--, gle++)
+ dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
+ be32_to_cpu(gle->sgl_len),
+ DMA_TO_DEVICE);
+ }
+
+ return tx_req_ndesc(req);
+}
+
+/* Reclaim completed Tx descriptors and free their packets. Restart a stopped
+ * queue if we freed enough descriptors.
+ *
+ * Return true if we exhausted the budget while there is more work to be done.
+ */
+static bool fun_txq_reclaim(struct funeth_txq *q, int budget)
+{
+ unsigned int npkts = 0, nbytes = 0, ndesc = 0;
+ unsigned int head, limit, reclaim_idx;
+
+ /* budget may be 0, e.g., netpoll */
+ limit = budget ? budget : UINT_MAX;
+
+ for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
+ head != reclaim_idx && npkts < limit; head = txq_hw_head(q)) {
+ /* The HW head is continually updated, ensure we don't read
+ * descriptor state before the head tells us to reclaim it.
+ * On the enqueue side the doorbell is an implicit write
+ * barrier.
+ */
+ rmb();
+
+ do {
+ unsigned int pkt_desc = unmap_skb(q, reclaim_idx);
+ struct sk_buff *skb = q->info[reclaim_idx].skb;
+
+ trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
+
+ nbytes += skb->len;
+ napi_consume_skb(skb, budget);
+ ndesc += pkt_desc;
+ reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
+ npkts++;
+ } while (reclaim_idx != head && npkts < limit);
+ }
+
+ q->cons_cnt += ndesc;
+ netdev_tx_completed_queue(q->ndq, npkts, nbytes);
+ smp_mb(); /* pairs with the one in fun_tx_check_stop() */
+
+ if (unlikely(netif_tx_queue_stopped(q->ndq) &&
+ fun_txq_may_restart(q))) {
+ netif_tx_wake_queue(q->ndq);
+ FUN_QSTAT_INC(q, tx_nrestarts);
+ }
+
+ return reclaim_idx != head;
+}
+
+/* The NAPI handler for Tx queues. */
+int fun_txq_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct fun_irq *irq = container_of(napi, struct fun_irq, napi);
+ struct funeth_txq *q = irq->txq;
+ unsigned int db_val;
+
+ if (fun_txq_reclaim(q, budget))
+ return budget; /* exhausted budget */
+
+ napi_complete(napi); /* exhausted pending work */
+ db_val = READ_ONCE(q->irq_db_val) | (q->cons_cnt & q->mask);
+ writel(db_val, q->db);
+ return 0;
+}
+
+static void fun_xdp_unmap(const struct funeth_txq *q, unsigned int idx)
+{
+ const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
+ const struct fun_dataop_gl *gle;
+
+ gle = (const struct fun_dataop_gl *)req->dataop.imm;
+ dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data),
+ be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE);
+}
+
+/* Reclaim up to @budget completed Tx descriptors from a TX XDP queue. */
+static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
+{
+ unsigned int npkts = 0, head, reclaim_idx;
+
+ for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
+ head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) {
+ /* The HW head is continually updated, ensure we don't read
+ * descriptor state before the head tells us to reclaim it.
+ * On the enqueue side the doorbell is an implicit write
+ * barrier.
+ */
+ rmb();
+
+ do {
+ fun_xdp_unmap(q, reclaim_idx);
+ page_frag_free(q->info[reclaim_idx].vaddr);
+
+ trace_funeth_tx_free(q, reclaim_idx, 1, head);
+
+ reclaim_idx = (reclaim_idx + 1) & q->mask;
+ npkts++;
+ } while (reclaim_idx != head && npkts < budget);
+ }
+
+ q->cons_cnt += npkts;
+ return npkts;
+}
+
+bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
+{
+ struct fun_eth_tx_req *req;
+ struct fun_dataop_gl *gle;
+ unsigned int idx;
+ dma_addr_t dma;
+
+ if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
+ fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH);
+
+ if (!unlikely(fun_txq_avail(q))) {
+ FUN_QSTAT_INC(q, tx_xdp_full);
+ return false;
+ }
+
+ dma = dma_map_single(q->dma_dev, data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(q->dma_dev, dma))) {
+ FUN_QSTAT_INC(q, tx_map_err);
+ return false;
+ }
+
+ idx = q->prod_cnt & q->mask;
+ req = fun_tx_desc_addr(q, idx);
+ req->op = FUN_ETH_OP_TX;
+ req->len8 = (sizeof(*req) + sizeof(*gle)) / 8;
+ req->flags = 0;
+ req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
+ req->repr_idn = 0;
+ req->encap_proto = 0;
+ fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
+ req->dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len);
+
+ gle = (struct fun_dataop_gl *)req->dataop.imm;
+ fun_dataop_gl_init(gle, 0, 0, len, dma);
+
+ q->info[idx].vaddr = data;
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.tx_bytes += len;
+ q->stats.tx_pkts++;
+ u64_stats_update_end(&q->syncp);
+
+ trace_funeth_tx(q, len, idx, 1);
+ q->prod_cnt++;
+
+ return true;
+}
+
+int fun_xdp_xmit_frames(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct funeth_txq *q, **xdpqs;
+ int i, q_idx;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ xdpqs = rcu_dereference_bh(fp->xdpqs);
+ if (unlikely(!xdpqs))
+ return -ENETDOWN;
+
+ q_idx = smp_processor_id();
+ if (unlikely(q_idx >= fp->num_xdpqs))
+ return -ENXIO;
+
+ for (q = xdpqs[q_idx], i = 0; i < n; i++) {
+ const struct xdp_frame *xdpf = frames[i];
+
+ if (!fun_xdp_tx(q, xdpf->data, xdpf->len))
+ break;
+ }
+
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ fun_txq_wr_db(q);
+ return i;
+}
+
+/* Purge a Tx queue of any queued packets. Should be called once HW access
+ * to the packets has been revoked, e.g., after the queue has been disabled.
+ */
+static void fun_txq_purge(struct funeth_txq *q)
+{
+ while (q->cons_cnt != q->prod_cnt) {
+ unsigned int idx = q->cons_cnt & q->mask;
+
+ q->cons_cnt += unmap_skb(q, idx);
+ dev_kfree_skb_any(q->info[idx].skb);
+ }
+ netdev_tx_reset_queue(q->ndq);
+}
+
+static void fun_xdpq_purge(struct funeth_txq *q)
+{
+ while (q->cons_cnt != q->prod_cnt) {
+ unsigned int idx = q->cons_cnt & q->mask;
+
+ fun_xdp_unmap(q, idx);
+ page_frag_free(q->info[idx].vaddr);
+ q->cons_cnt++;
+ }
+}
+
+/* Create a Tx queue, allocating all the host resources needed. */
+static struct funeth_txq *fun_txq_create_sw(struct net_device *dev,
+ unsigned int qidx,
+ unsigned int ndesc,
+ struct fun_irq *irq)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct funeth_txq *q;
+ int numa_node;
+
+ if (irq)
+ numa_node = fun_irq_node(irq); /* skb Tx queue */
+ else
+ numa_node = cpu_to_node(qidx); /* XDP Tx queue */
+
+ q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node);
+ if (!q)
+ goto err;
+
+ q->dma_dev = &fp->pdev->dev;
+ q->desc = fun_alloc_ring_mem(q->dma_dev, ndesc, FUNETH_SQE_SIZE,
+ sizeof(*q->info), true, numa_node,
+ &q->dma_addr, (void **)&q->info,
+ &q->hw_wb);
+ if (!q->desc)
+ goto free_q;
+
+ q->netdev = dev;
+ q->mask = ndesc - 1;
+ q->qidx = qidx;
+ q->numa_node = numa_node;
+ u64_stats_init(&q->syncp);
+ q->init_state = FUN_QSTATE_INIT_SW;
+ return q;
+
+free_q:
+ kfree(q);
+err:
+ netdev_err(dev, "Can't allocate memory for %s queue %u\n",
+ irq ? "Tx" : "XDP", qidx);
+ return NULL;
+}
+
+static void fun_txq_free_sw(struct funeth_txq *q)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+
+ fun_free_ring_mem(q->dma_dev, q->mask + 1, FUNETH_SQE_SIZE, true,
+ q->desc, q->dma_addr, q->info);
+
+ fp->tx_packets += q->stats.tx_pkts;
+ fp->tx_bytes += q->stats.tx_bytes;
+ fp->tx_dropped += q->stats.tx_map_err;
+
+ kfree(q);
+}
+
+/* Allocate the device portion of a Tx queue. */
+int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ unsigned int irq_idx, ndesc = q->mask + 1;
+ int err;
+
+ q->irq = irq;
+ *q->hw_wb = 0;
+ q->prod_cnt = 0;
+ q->cons_cnt = 0;
+ irq_idx = irq ? irq->irq_idx : 0;
+
+ err = fun_sq_create(fp->fdev,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS |
+ FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR, 0,
+ FUN_HCI_ID_INVALID, ilog2(FUNETH_SQE_SIZE), ndesc,
+ q->dma_addr, fp->tx_coal_count, fp->tx_coal_usec,
+ irq_idx, 0, fp->fdev->kern_end_qid, 0,
+ &q->hw_qid, &q->db);
+ if (err)
+ goto out;
+
+ err = fun_create_and_bind_tx(fp, q->hw_qid);
+ if (err < 0)
+ goto free_devq;
+ q->ethid = err;
+
+ if (irq) {
+ irq->txq = q;
+ q->ndq = netdev_get_tx_queue(q->netdev, q->qidx);
+ q->irq_db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec,
+ fp->tx_coal_count);
+ writel(q->irq_db_val, q->db);
+ }
+
+ q->init_state = FUN_QSTATE_INIT_FULL;
+ netif_info(fp, ifup, q->netdev,
+ "%s queue %u, depth %u, HW qid %u, IRQ idx %u, eth id %u, node %d\n",
+ irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx,
+ q->ethid, q->numa_node);
+ return 0;
+
+free_devq:
+ fun_destroy_sq(fp->fdev, q->hw_qid);
+out:
+ netdev_err(q->netdev,
+ "Failed to create %s queue %u on device, error %d\n",
+ irq ? "Tx" : "XDP", q->qidx, err);
+ return err;
+}
+
+static void fun_txq_free_dev(struct funeth_txq *q)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+
+ if (q->init_state < FUN_QSTATE_INIT_FULL)
+ return;
+
+ netif_info(fp, ifdown, q->netdev,
+ "Freeing %s queue %u (id %u), IRQ %u, ethid %u\n",
+ q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid,
+ q->irq ? q->irq->irq_idx : 0, q->ethid);
+
+ fun_destroy_sq(fp->fdev, q->hw_qid);
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, q->ethid);
+
+ if (q->irq) {
+ q->irq->txq = NULL;
+ fun_txq_purge(q);
+ } else {
+ fun_xdpq_purge(q);
+ }
+
+ q->init_state = FUN_QSTATE_INIT_SW;
+}
+
+/* Create or advance a Tx queue, allocating all the host and device resources
+ * needed to reach the target state.
+ */
+int funeth_txq_create(struct net_device *dev, unsigned int qidx,
+ unsigned int ndesc, struct fun_irq *irq, int state,
+ struct funeth_txq **qp)
+{
+ struct funeth_txq *q = *qp;
+ int err;
+
+ if (!q)
+ q = fun_txq_create_sw(dev, qidx, ndesc, irq);
+ if (!q)
+ return -ENOMEM;
+
+ if (q->init_state >= state)
+ goto out;
+
+ err = fun_txq_create_dev(q, irq);
+ if (err) {
+ if (!*qp)
+ fun_txq_free_sw(q);
+ return err;
+ }
+
+out:
+ *qp = q;
+ return 0;
+}
+
+/* Free Tx queue resources until it reaches the target state.
+ * The queue must be already disconnected from the stack.
+ */
+struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state)
+{
+ if (state < FUN_QSTATE_INIT_FULL)
+ fun_txq_free_dev(q);
+
+ if (state == FUN_QSTATE_DESTROYED) {
+ fun_txq_free_sw(q);
+ q = NULL;
+ }
+
+ return q;
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
new file mode 100644
index 000000000000..04c9f91b7489
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUNETH_TXRX_H
+#define _FUNETH_TXRX_H
+
+#include <linux/netdevice.h>
+#include <linux/u64_stats_sync.h>
+
+/* Tx descriptor size */
+#define FUNETH_SQE_SIZE 64U
+
+/* Size of device headers per Tx packet */
+#define FUNETH_FUNOS_HDR_SZ (sizeof(struct fun_eth_tx_req))
+
+/* Number of gather list entries per Tx descriptor */
+#define FUNETH_GLE_PER_DESC (FUNETH_SQE_SIZE / sizeof(struct fun_dataop_gl))
+
+/* Max gather list size in bytes for an sk_buff. */
+#define FUNETH_MAX_GL_SZ ((MAX_SKB_FRAGS + 1) * sizeof(struct fun_dataop_gl))
+
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+# define FUNETH_TLS_SZ sizeof(struct fun_eth_tls)
+#else
+# define FUNETH_TLS_SZ 0
+#endif
+
+/* Max number of Tx descriptors for an sk_buff using a gather list. */
+#define FUNETH_MAX_GL_DESC \
+ DIV_ROUND_UP((FUNETH_FUNOS_HDR_SZ + FUNETH_MAX_GL_SZ + FUNETH_TLS_SZ), \
+ FUNETH_SQE_SIZE)
+
+/* Max number of Tx descriptors for any packet. */
+#define FUNETH_MAX_PKT_DESC FUNETH_MAX_GL_DESC
+
+/* Rx CQ descriptor size. */
+#define FUNETH_CQE_SIZE 64U
+
+/* Offset of cqe_info within a CQE. */
+#define FUNETH_CQE_INFO_OFFSET (FUNETH_CQE_SIZE - sizeof(struct fun_cqe_info))
+
+/* Construct the IRQ portion of a CQ doorbell. The resulting value arms the
+ * interrupt with the supplied time delay and packet count moderation settings.
+ */
+#define FUN_IRQ_CQ_DB(usec, pkts) \
+ (FUN_DB_IRQ_ARM_F | ((usec) << FUN_DB_INTCOAL_USEC_S) | \
+ ((pkts) << FUN_DB_INTCOAL_ENTRIES_S))
+
+/* As above for SQ doorbells. */
+#define FUN_IRQ_SQ_DB(usec, pkts) \
+ (FUN_DB_IRQ_ARM_F | \
+ ((usec) << FUN_DB_INTCOAL_USEC_S) | \
+ ((pkts) << FUN_DB_INTCOAL_ENTRIES_S))
+
+/* Per packet tailroom. Present only for 1-frag packets. */
+#define FUN_RX_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
+/* Per packet headroom for XDP. Preferred over XDP_PACKET_HEADROOM to
+ * accommodate two packets per buffer for 4K pages and 1500B MTUs.
+ */
+#define FUN_XDP_HEADROOM 192
+
+/* Initialization state of a queue. */
+enum {
+ FUN_QSTATE_DESTROYED, /* what queue? */
+ FUN_QSTATE_INIT_SW, /* exists in SW, not on the device */
+ FUN_QSTATE_INIT_FULL, /* exists both in SW and on device */
+};
+
+/* Initialization state of an interrupt. */
+enum {
+ FUN_IRQ_INIT, /* initialized and in the XArray but inactive */
+ FUN_IRQ_REQUESTED, /* request_irq() done */
+ FUN_IRQ_ENABLED, /* processing enabled */
+ FUN_IRQ_DISABLED, /* processing disabled */
+};
+
+struct bpf_prog;
+
+struct funeth_txq_stats { /* per Tx queue SW counters */
+ u64 tx_pkts; /* # of Tx packets */
+ u64 tx_bytes; /* total bytes of Tx packets */
+ u64 tx_cso; /* # of packets with checksum offload */
+ u64 tx_tso; /* # of non-encapsulated TSO super-packets */
+ u64 tx_encap_tso; /* # of encapsulated TSO super-packets */
+ u64 tx_more; /* # of DBs elided due to xmit_more */
+ u64 tx_nstops; /* # of times the queue has stopped */
+ u64 tx_nrestarts; /* # of times the queue has restarted */
+ u64 tx_map_err; /* # of packets dropped due to DMA mapping errors */
+ u64 tx_xdp_full; /* # of XDP packets that could not be enqueued */
+ u64 tx_tls_pkts; /* # of Tx TLS packets offloaded to HW */
+ u64 tx_tls_bytes; /* Tx bytes of HW-handled TLS payload */
+ u64 tx_tls_fallback; /* attempted Tx TLS offloads punted to SW */
+ u64 tx_tls_drops; /* attempted Tx TLS offloads dropped */
+};
+
+struct funeth_tx_info { /* per Tx descriptor state */
+ union {
+ struct sk_buff *skb; /* associated packet */
+ void *vaddr; /* start address for XDP */
+ };
+};
+
+struct funeth_txq {
+ /* RO cacheline of frequently accessed data */
+ u32 mask; /* queue depth - 1 */
+ u32 hw_qid; /* device ID of the queue */
+ void *desc; /* base address of descriptor ring */
+ struct funeth_tx_info *info;
+ struct device *dma_dev; /* device for DMA mappings */
+ volatile __be64 *hw_wb; /* HW write-back location */
+ u32 __iomem *db; /* SQ doorbell register address */
+ struct netdev_queue *ndq;
+ dma_addr_t dma_addr; /* DMA address of descriptor ring */
+ /* producer R/W cacheline */
+ u16 qidx; /* queue index within net_device */
+ u16 ethid;
+ u32 prod_cnt; /* producer counter */
+ struct funeth_txq_stats stats;
+ /* shared R/W cacheline, primarily accessed by consumer */
+ u32 irq_db_val; /* value written to IRQ doorbell */
+ u32 cons_cnt; /* consumer (cleanup) counter */
+ struct net_device *netdev;
+ struct fun_irq *irq;
+ int numa_node;
+ u8 init_state; /* queue initialization state */
+ struct u64_stats_sync syncp;
+};
+
+struct funeth_rxq_stats { /* per Rx queue SW counters */
+ u64 rx_pkts; /* # of received packets, including SW drops */
+ u64 rx_bytes; /* total size of received packets */
+ u64 rx_cso; /* # of packets with checksum offload */
+ u64 rx_bufs; /* total # of Rx buffers provided to device */
+ u64 gro_pkts; /* # of GRO superpackets */
+ u64 gro_merged; /* # of pkts merged into existing GRO superpackets */
+ u64 rx_page_alloc; /* # of page allocations for Rx buffers */
+ u64 rx_budget; /* NAPI iterations that exhausted their budget */
+ u64 rx_mem_drops; /* # of packets dropped due to memory shortage */
+ u64 rx_map_err; /* # of page DMA mapping errors */
+ u64 xdp_drops; /* XDP_DROPped packets */
+ u64 xdp_tx; /* successful XDP transmits */
+ u64 xdp_redir; /* successful XDP redirects */
+ u64 xdp_err; /* packets dropped due to XDP errors */
+};
+
+struct funeth_rxbuf { /* per Rx buffer state */
+ struct page *page; /* associated page */
+ dma_addr_t dma_addr; /* DMA address of page start */
+ int pg_refs; /* page refs held by driver */
+ int node; /* page node, or -1 if it is PF_MEMALLOC */
+};
+
+struct funeth_rx_cache { /* cache of DMA-mapped previously used buffers */
+ struct funeth_rxbuf *bufs; /* base of Rx buffer state ring */
+ unsigned int prod_cnt; /* producer counter */
+ unsigned int cons_cnt; /* consumer counter */
+ unsigned int mask; /* depth - 1 */
+};
+
+/* An Rx queue consists of a CQ and an SQ used to provide Rx buffers. */
+struct funeth_rxq {
+ struct net_device *netdev;
+ struct napi_struct *napi;
+ struct device *dma_dev; /* device for DMA mappings */
+ void *cqes; /* base of CQ descriptor ring */
+ const void *next_cqe_info; /* fun_cqe_info of next CQE */
+ u32 __iomem *cq_db; /* CQ doorbell register address */
+ unsigned int cq_head; /* CQ head index */
+ unsigned int cq_mask; /* CQ depth - 1 */
+ u16 phase; /* CQ phase tag */
+ u16 qidx; /* queue index within net_device */
+ unsigned int irq_db_val; /* IRQ info for CQ doorbell */
+ struct fun_eprq_rqbuf *rqes; /* base of RQ descriptor ring */
+ struct funeth_rxbuf *bufs; /* base of Rx buffer state ring */
+ struct funeth_rxbuf *cur_buf; /* currently active buffer */
+ u32 __iomem *rq_db; /* RQ doorbell register address */
+ unsigned int rq_cons; /* RQ consumer counter */
+ unsigned int rq_mask; /* RQ depth - 1 */
+ unsigned int buf_offset; /* offset of next pkt in head buffer */
+ u8 xdp_flush; /* XDP flush types needed at NAPI end */
+ u8 init_state; /* queue initialization state */
+ u16 headroom; /* per packet headroom */
+ unsigned int rq_cons_db; /* value of rq_cons at last RQ db */
+ unsigned int rq_db_thres; /* # of new buffers needed to write RQ db */
+ struct funeth_rxbuf spare_buf; /* spare for next buffer replacement */
+ struct funeth_rx_cache cache; /* used buffer cache */
+ struct bpf_prog *xdp_prog; /* optional XDP BPF program */
+ struct funeth_rxq_stats stats;
+ dma_addr_t cq_dma_addr; /* DMA address of CQE ring */
+ dma_addr_t rq_dma_addr; /* DMA address of RQE ring */
+ u16 irq_cnt;
+ u32 hw_cqid; /* device ID of the queue's CQ */
+ u32 hw_sqid; /* device ID of the queue's SQ */
+ int numa_node;
+ struct u64_stats_sync syncp;
+ struct xdp_rxq_info xdp_rxq;
+};
+
+#define FUN_QSTAT_INC(q, counter) \
+ do { \
+ u64_stats_update_begin(&(q)->syncp); \
+ (q)->stats.counter++; \
+ u64_stats_update_end(&(q)->syncp); \
+ } while (0)
+
+#define FUN_QSTAT_READ(q, seq, stats_copy) \
+ do { \
+ seq = u64_stats_fetch_begin(&(q)->syncp); \
+ stats_copy = (q)->stats; \
+ } while (u64_stats_fetch_retry(&(q)->syncp, (seq)))
+
+#define FUN_INT_NAME_LEN (IFNAMSIZ + 16)
+
+struct fun_irq {
+ struct napi_struct napi;
+ struct funeth_txq *txq;
+ struct funeth_rxq *rxq;
+ u8 state;
+ u16 irq_idx; /* index of MSI-X interrupt */
+ int irq; /* Linux IRQ vector */
+ cpumask_t affinity_mask; /* IRQ affinity */
+ struct irq_affinity_notify aff_notify;
+ char name[FUN_INT_NAME_LEN];
+} ____cacheline_internodealigned_in_smp;
+
+/* Return the start address of the idx-th Tx descriptor. */
+static inline void *fun_tx_desc_addr(const struct funeth_txq *q,
+ unsigned int idx)
+{
+ return q->desc + idx * FUNETH_SQE_SIZE;
+}
+
+static inline void fun_txq_wr_db(const struct funeth_txq *q)
+{
+ unsigned int tail = q->prod_cnt & q->mask;
+
+ writel(tail, q->db);
+}
+
+static inline int fun_irq_node(const struct fun_irq *p)
+{
+ return cpu_to_mem(cpumask_first(&p->affinity_mask));
+}
+
+int fun_rxq_napi_poll(struct napi_struct *napi, int budget);
+int fun_txq_napi_poll(struct napi_struct *napi, int budget);
+netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len);
+int fun_xdp_xmit_frames(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags);
+
+int funeth_txq_create(struct net_device *dev, unsigned int qidx,
+ unsigned int ndesc, struct fun_irq *irq, int state,
+ struct funeth_txq **qp);
+int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq);
+struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state);
+int funeth_rxq_create(struct net_device *dev, unsigned int qidx,
+ unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq,
+ int state, struct funeth_rxq **qp);
+int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq);
+struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state);
+int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog);
+
+#endif /* _FUNETH_TXRX_H */
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 54e51c8221b8..6cafee55efc3 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -857,8 +857,7 @@ static int gve_alloc_qpls(struct gve_priv *priv)
int i, j;
int err;
- /* Raw addressing means no QPLs */
- if (priv->queue_format == GVE_GQI_RDA_FORMAT)
+ if (num_qpls == 0)
return 0;
priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL);
@@ -901,8 +900,7 @@ static void gve_free_qpls(struct gve_priv *priv)
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
int i;
- /* Raw addressing means no QPLs */
- if (priv->queue_format == GVE_GQI_RDA_FORMAT)
+ if (num_qpls == 0)
return;
kvfree(priv->qpl_cfg.qpl_id_map);
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index e4e98aa7745f..021bbf308d68 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -439,7 +439,7 @@ static bool gve_rx_ctx_init(struct gve_rx_ctx *ctx, struct gve_rx_ring *rx)
if (frag_size > rx->packet_buffer_size) {
packet_size_error = true;
netdev_warn(priv->dev,
- "RX fragment error: packet_buffer_size=%d, frag_size=%d, droping packet.",
+ "RX fragment error: packet_buffer_size=%d, frag_size=%d, dropping packet.",
rx->packet_buffer_size, be16_to_cpu(desc->len));
}
page_info = &rx->data.page_info[idx];
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index d7a27c244d48..54faf0f2d1d8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -887,8 +887,8 @@ static void hns_get_ethtool_stats(struct net_device *netdev,
p[21] = net_stats->rx_compressed;
p[22] = net_stats->tx_compressed;
- p[23] = netdev->rx_dropped.counter;
- p[24] = netdev->tx_dropped.counter;
+ p[23] = 0; /* was netdev->rx_dropped.counter */
+ p[24] = 0; /* was netdev->tx_dropped.counter */
p[25] = priv->tx_timeout_count;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 9298fbecb31a..6f18c9a03231 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -167,6 +167,7 @@ struct hnae3_handle;
struct hnae3_queue {
void __iomem *io_base;
+ void __iomem *mem_base;
struct hnae3_ae_algo *ae_algo;
struct hnae3_handle *handle;
int tqp_index; /* index in a handle */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index babc5d7a3b52..0b8a73c40b12 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2028,9 +2028,73 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
return bd_num;
}
+static void hns3_tx_push_bd(struct hns3_enet_ring *ring, int num)
+{
+#define HNS3_BYTES_PER_64BIT 8
+
+ struct hns3_desc desc[HNS3_MAX_PUSH_BD_NUM] = {};
+ int offset = 0;
+
+ /* make sure everything is visible to device before
+ * excuting tx push or updating doorbell
+ */
+ dma_wmb();
+
+ do {
+ int idx = (ring->next_to_use - num + ring->desc_num) %
+ ring->desc_num;
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_push++;
+ u64_stats_update_end(&ring->syncp);
+ memcpy(&desc[offset], &ring->desc[idx],
+ sizeof(struct hns3_desc));
+ offset++;
+ } while (--num);
+
+ __iowrite64_copy(ring->tqp->mem_base, desc,
+ (sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) /
+ HNS3_BYTES_PER_64BIT);
+
+ io_stop_wc();
+}
+
+static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
+{
+#define HNS3_MEM_DOORBELL_OFFSET 64
+
+ __le64 bd_num = cpu_to_le64((u64)ring->pending_buf);
+
+ /* make sure everything is visible to device before
+ * excuting tx push or updating doorbell
+ */
+ dma_wmb();
+
+ __iowrite64_copy(ring->tqp->mem_base + HNS3_MEM_DOORBELL_OFFSET,
+ &bd_num, 1);
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_mem_doorbell += ring->pending_buf;
+ u64_stats_update_end(&ring->syncp);
+
+ io_stop_wc();
+}
+
static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
bool doorbell)
{
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ /* when tx push is enabled, the packet whose number of BD below
+ * HNS3_MAX_PUSH_BD_NUM can be pushed directly.
+ */
+ if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
+ !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
+ hns3_tx_push_bd(ring, num);
+ WRITE_ONCE(ring->last_to_use, ring->next_to_use);
+ return;
+ }
+
ring->pending_buf += num;
if (!doorbell) {
@@ -2038,11 +2102,12 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
return;
}
- if (!ring->pending_buf)
- return;
+ if (ring->tqp->mem_base)
+ hns3_tx_mem_doorbell(ring);
+ else
+ writel(ring->pending_buf,
+ ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
- writel(ring->pending_buf,
- ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
ring->pending_buf = 0;
WRITE_ONCE(ring->last_to_use, ring->next_to_use);
}
@@ -2732,6 +2797,9 @@ static void hns3_dump_queue_stats(struct net_device *ndev,
"seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
+
+ netdev_info(ndev, "tx_push: %llu, tx_mem_doorbell: %llu\n",
+ tx_ring->stats.tx_push, tx_ring->stats.tx_mem_doorbell);
}
static void hns3_dump_queue_reg(struct net_device *ndev,
@@ -5094,6 +5162,9 @@ static void hns3_state_init(struct hnae3_handle *handle)
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
+ set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index a05a0c7423ce..4a3253692dcc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -7,6 +7,7 @@
#include <linux/dim.h>
#include <linux/if_vlan.h>
#include <net/page_pool.h>
+#include <asm/barrier.h>
#include "hnae3.h"
@@ -25,9 +26,12 @@ enum hns3_nic_state {
HNS3_NIC_STATE2_RESET_REQUESTED,
HNS3_NIC_STATE_HW_TX_CSUM_ENABLE,
HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE,
+ HNS3_NIC_STATE_TX_PUSH_ENABLE,
HNS3_NIC_STATE_MAX
};
+#define HNS3_MAX_PUSH_BD_NUM 2
+
#define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000
#define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004
#define HNS3_RING_RX_RING_BD_NUM_REG 0x00008
@@ -410,6 +414,8 @@ struct ring_stats {
u64 tx_pkts;
u64 tx_bytes;
u64 tx_more;
+ u64 tx_push;
+ u64 tx_mem_doorbell;
u64 restart_queue;
u64 tx_busy;
u64 tx_copy;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index c06c39ece80d..6469238ae090 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -23,6 +23,8 @@ static const struct hns3_stats hns3_txq_stats[] = {
HNS3_TQP_STAT("packets", tx_pkts),
HNS3_TQP_STAT("bytes", tx_bytes),
HNS3_TQP_STAT("more", tx_more),
+ HNS3_TQP_STAT("push", tx_push),
+ HNS3_TQP_STAT("mem_doorbell", tx_mem_doorbell),
HNS3_TQP_STAT("wake", restart_queue),
HNS3_TQP_STAT("busy", tx_busy),
HNS3_TQP_STAT("copy", tx_copy),
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 24f7afacae02..78d0498bdabc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -1643,6 +1643,7 @@ static int hclge_config_gro(struct hclge_dev *hdev)
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_comm_tqp *tqp;
int i;
@@ -1676,6 +1677,14 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev)
(i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
HCLGE_TQP_REG_SIZE;
+ /* when device supports tx push and has device memory,
+ * the queue can execute push mode or doorbell mode on
+ * device memory.
+ */
+ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
+ tqp->q.mem_base = hdev->hw.hw.mem_base +
+ HCLGE_TQP_MEM_OFFSET(hdev, i);
+
tqp++;
}
@@ -11008,8 +11017,6 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
static int hclge_dev_mem_map(struct hclge_dev *hdev)
{
-#define HCLGE_MEM_BAR 4
-
struct pci_dev *pdev = hdev->pdev;
struct hclge_hw *hw = &hdev->hw;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index adfb26e79262..fc92ae385e30 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -169,6 +169,14 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_VECTOR0_ALL_MSIX_ERR_B 6U
#define HCLGE_TRIGGER_IMP_RESET_B 7U
+#define HCLGE_TQP_MEM_SIZE 0x10000
+#define HCLGE_MEM_BAR 4
+/* in the bar4, the first half is for roce, and the second half is for nic */
+#define HCLGE_NIC_MEM_OFFSET(hdev) \
+ (pci_resource_len((hdev)->pdev, HCLGE_MEM_BAR) >> 1)
+#define HCLGE_TQP_MEM_OFFSET(hdev, i) \
+ (HCLGE_NIC_MEM_OFFSET(hdev) + HCLGE_TQP_MEM_SIZE * (i))
+
#define HCLGE_MAC_DEFAULT_FRAME \
(ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
#define HCLGE_MAC_MIN_FRAME 64
@@ -1060,11 +1068,6 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
return tqp->index;
}
-static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
-{
- return !!hdev->reset_pending;
-}
-
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 089f4444b7e3..1f87a8a3fe32 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1225,7 +1225,7 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
if (ret == -EOPNOTSUPP) {
dev_warn(&hdev->pdev->dev,
- "fw %08x does't support ets tc weight cmd\n",
+ "fw %08x doesn't support ets tc weight cmd\n",
hdev->fw_version);
ret = 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 21442a9bb996..93389bec8d89 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -321,6 +321,7 @@ static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_comm_tqp *tqp;
int i;
@@ -354,6 +355,14 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
(i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
HCLGEVF_TQP_REG_SIZE;
+ /* when device supports tx push and has device memory,
+ * the queue can execute push mode or doorbell mode on
+ * device memory.
+ */
+ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
+ tqp->q.mem_base = hdev->hw.hw.mem_base +
+ HCLGEVF_TQP_MEM_OFFSET(hdev, i);
+
tqp++;
}
@@ -2546,8 +2555,6 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client,
static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
{
-#define HCLGEVF_MEM_BAR 4
-
struct pci_dev *pdev = hdev->pdev;
struct hclgevf_hw *hw = &hdev->hw;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 502ca1ce1a90..4b00fd44f118 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -96,6 +96,14 @@
#define HCLGEVF_RSS_IND_TBL_SIZE 512
+#define HCLGEVF_TQP_MEM_SIZE 0x10000
+#define HCLGEVF_MEM_BAR 4
+/* in the bar4, the first half is for roce, and the second half is for nic */
+#define HCLGEVF_NIC_MEM_OFFSET(hdev) \
+ (pci_resource_len((hdev)->pdev, HCLGEVF_MEM_BAR) >> 1)
+#define HCLGEVF_TQP_MEM_OFFSET(hdev, i) \
+ (HCLGEVF_NIC_MEM_OFFSET(hdev) + HCLGEVF_TQP_MEM_SIZE * (i))
+
#define HCLGEVF_MAC_MAX_FRAME 9728
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.h b/drivers/net/ethernet/i825xx/sun3_82586.h
index 79aef681ac85..d82eca563266 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.h
+++ b/drivers/net/ethernet/i825xx/sun3_82586.h
@@ -150,7 +150,7 @@ struct rfd_struct
#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */
#define RFD_ERR_OVR 0x01 /* DMA Overrun! */
-#define RFD_ERR_FTS 0x0080 /* Frame to short */
+#define RFD_ERR_FTS 0x0080 /* Frame too short */
#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */
#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */
#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */
@@ -250,7 +250,7 @@ struct mcsetup_cmd_struct
unsigned short cmd_cmd;
unsigned short cmd_link;
unsigned short mc_cnt; /* number of bytes in the MC-List */
- unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */
+ unsigned char mc_list[][6]; /* pointer to 6 bytes entries */
};
/*
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index dee05a353dbd..77683909ca3d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -60,6 +60,7 @@
#include <asm/hvcall.h>
#include <linux/atomic.h>
#include <asm/vio.h>
+#include <asm/xive.h>
#include <asm/iommu.h>
#include <linux/uaccess.h>
#include <asm/firmware.h>
@@ -1429,6 +1430,15 @@ static int __ibmvnic_open(struct net_device *netdev)
return rc;
}
+ adapter->tx_queues_active = true;
+
+ /* Since queues were stopped until now, there shouldn't be any
+ * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we
+ * don't need the synchronize_rcu()? Leaving it for consistency
+ * with setting ->tx_queues_active = false.
+ */
+ synchronize_rcu();
+
netif_tx_start_all_queues(netdev);
if (prev_state == VNIC_CLOSED) {
@@ -1603,6 +1613,14 @@ static void ibmvnic_cleanup(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
/* ensure that transmissions are stopped if called by do_reset */
+
+ adapter->tx_queues_active = false;
+
+ /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active
+ * update so they don't restart a queue after we stop it below.
+ */
+ synchronize_rcu();
+
if (test_bit(0, &adapter->resetting))
netif_tx_disable(netdev);
else
@@ -1842,14 +1860,21 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
tx_buff->skb = NULL;
adapter->netdev->stats.tx_dropped++;
}
+
ind_bufp->index = 0;
+
if (atomic_sub_return(entries, &tx_scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
- __netif_subqueue_stopped(adapter->netdev, queue_num) &&
- !test_bit(0, &adapter->resetting)) {
- netif_wake_subqueue(adapter->netdev, queue_num);
- netdev_dbg(adapter->netdev, "Started queue %d\n",
- queue_num);
+ __netif_subqueue_stopped(adapter->netdev, queue_num)) {
+ rcu_read_lock();
+
+ if (adapter->tx_queues_active) {
+ netif_wake_subqueue(adapter->netdev, queue_num);
+ netdev_dbg(adapter->netdev, "Started queue %d\n",
+ queue_num);
+ }
+
+ rcu_read_unlock();
}
}
@@ -1904,11 +1929,12 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
int index = 0;
u8 proto = 0;
- tx_scrq = adapter->tx_scrq[queue_num];
- txq = netdev_get_tx_queue(netdev, queue_num);
- ind_bufp = &tx_scrq->ind_buf;
-
- if (test_bit(0, &adapter->resetting)) {
+ /* If a reset is in progress, drop the packet since
+ * the scrqs may get torn down. Otherwise use the
+ * rcu to ensure reset waits for us to complete.
+ */
+ rcu_read_lock();
+ if (!adapter->tx_queues_active) {
dev_kfree_skb_any(skb);
tx_send_failed++;
@@ -1917,6 +1943,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
goto out;
}
+ tx_scrq = adapter->tx_scrq[queue_num];
+ txq = netdev_get_tx_queue(netdev, queue_num);
+ ind_bufp = &tx_scrq->ind_buf;
+
if (ibmvnic_xmit_workarounds(skb, netdev)) {
tx_dropped++;
tx_send_failed++;
@@ -1924,6 +1954,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
ibmvnic_tx_scrq_flush(adapter, tx_scrq);
goto out;
}
+
if (skb_is_gso(skb))
tx_pool = &adapter->tso_pool[queue_num];
else
@@ -2078,6 +2109,7 @@ tx_err:
netif_carrier_off(netdev);
}
out:
+ rcu_read_unlock();
netdev->stats.tx_dropped += tx_dropped;
netdev->stats.tx_bytes += tx_bytes;
netdev->stats.tx_packets += tx_packets;
@@ -2213,6 +2245,19 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
}
/*
+ * Initialize the init_done completion and return code values. We
+ * can get a transport event just after registering the CRQ and the
+ * tasklet will use this to communicate the transport event. To ensure
+ * we don't miss the notification/error, initialize these _before_
+ * regisering the CRQ.
+ */
+static inline void reinit_init_done(struct ibmvnic_adapter *adapter)
+{
+ reinit_completion(&adapter->init_done);
+ adapter->init_done_rc = 0;
+}
+
+/*
* do_reset returns zero if we are able to keep processing reset events, or
* non-zero if we hit a fatal error and must halt.
*/
@@ -2318,6 +2363,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
+ reinit_init_done(adapter);
+
if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
rc = init_crq_queue(adapter);
} else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
@@ -2461,7 +2508,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
- reinit_completion(&adapter->init_done);
+ reinit_init_done(adapter);
+
rc = init_crq_queue(adapter);
if (rc) {
netdev_err(adapter->netdev,
@@ -2602,23 +2650,82 @@ out:
static void __ibmvnic_reset(struct work_struct *work)
{
struct ibmvnic_adapter *adapter;
- bool saved_state = false;
+ unsigned int timeout = 5000;
struct ibmvnic_rwi *tmprwi;
+ bool saved_state = false;
struct ibmvnic_rwi *rwi;
unsigned long flags;
- u32 reset_state;
+ struct device *dev;
+ bool need_reset;
int num_fails = 0;
+ u32 reset_state;
int rc = 0;
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
+ dev = &adapter->vdev->dev;
- if (test_and_set_bit_lock(0, &adapter->resetting)) {
+ /* Wait for ibmvnic_probe() to complete. If probe is taking too long
+ * or if another reset is in progress, defer work for now. If probe
+ * eventually fails it will flush and terminate our work.
+ *
+ * Three possibilities here:
+ * 1. Adpater being removed - just return
+ * 2. Timed out on probe or another reset in progress - delay the work
+ * 3. Completed probe - perform any resets in queue
+ */
+ if (adapter->state == VNIC_PROBING &&
+ !wait_for_completion_timeout(&adapter->probe_done, timeout)) {
+ dev_err(dev, "Reset thread timed out on probe");
queue_delayed_work(system_long_wq,
&adapter->ibmvnic_delayed_reset,
IBMVNIC_RESET_DELAY);
return;
}
+ /* adapter is done with probe (i.e state is never VNIC_PROBING now) */
+ if (adapter->state == VNIC_REMOVING)
+ return;
+
+ /* ->rwi_list is stable now (no one else is removing entries) */
+
+ /* ibmvnic_probe() may have purged the reset queue after we were
+ * scheduled to process a reset so there maybe no resets to process.
+ * Before setting the ->resetting bit though, we have to make sure
+ * that there is infact a reset to process. Otherwise we may race
+ * with ibmvnic_open() and end up leaving the vnic down:
+ *
+ * __ibmvnic_reset() ibmvnic_open()
+ * ----------------- --------------
+ *
+ * set ->resetting bit
+ * find ->resetting bit is set
+ * set ->state to IBMVNIC_OPEN (i.e
+ * assume reset will open device)
+ * return
+ * find reset queue empty
+ * return
+ *
+ * Neither performed vnic login/open and vnic stays down
+ *
+ * If we hold the lock and conditionally set the bit, either we
+ * or ibmvnic_open() will complete the open.
+ */
+ need_reset = false;
+ spin_lock(&adapter->rwi_lock);
+ if (!list_empty(&adapter->rwi_list)) {
+ if (test_and_set_bit_lock(0, &adapter->resetting)) {
+ queue_delayed_work(system_long_wq,
+ &adapter->ibmvnic_delayed_reset,
+ IBMVNIC_RESET_DELAY);
+ } else {
+ need_reset = true;
+ }
+ }
+ spin_unlock(&adapter->rwi_lock);
+
+ if (!need_reset)
+ return;
+
rwi = get_next_rwi(adapter);
while (rwi) {
spin_lock_irqsave(&adapter->state_lock, flags);
@@ -2735,12 +2842,23 @@ static void __ibmvnic_delayed_reset(struct work_struct *work)
__ibmvnic_reset(&adapter->ibmvnic_reset);
}
+static void flush_reset_queue(struct ibmvnic_adapter *adapter)
+{
+ struct list_head *entry, *tmp_entry;
+
+ if (!list_empty(&adapter->rwi_list)) {
+ list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
+ list_del(entry);
+ kfree(list_entry(entry, struct ibmvnic_rwi, list));
+ }
+ }
+}
+
static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
enum ibmvnic_reset_reason reason)
{
- struct list_head *entry, *tmp_entry;
- struct ibmvnic_rwi *rwi, *tmp;
struct net_device *netdev = adapter->netdev;
+ struct ibmvnic_rwi *rwi, *tmp;
unsigned long flags;
int ret;
@@ -2759,13 +2877,6 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
goto err;
}
- if (adapter->state == VNIC_PROBING) {
- netdev_warn(netdev, "Adapter reset during probe\n");
- adapter->init_done_rc = -EAGAIN;
- ret = EAGAIN;
- goto err;
- }
-
list_for_each_entry(tmp, &adapter->rwi_list, list) {
if (tmp->reset_reason == reason) {
netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
@@ -2783,10 +2894,9 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
/* if we just received a transport event,
* flush reset queue and process this reset
*/
- if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
- list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
- list_del(entry);
- }
+ if (adapter->force_reset_recovery)
+ flush_reset_queue(adapter);
+
rwi->reset_reason = reason;
list_add_tail(&rwi->list, &adapter->rwi_list);
netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
@@ -3562,6 +3672,30 @@ static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
return rc;
}
+/* We can not use the IRQ chip EOI handler because that has the
+ * unintended effect of changing the interrupt priority.
+ */
+static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq)
+{
+ u64 val = 0xff000000 | scrq->hw_irq;
+ unsigned long rc;
+
+ rc = plpar_hcall_norets(H_EOI, val);
+ if (rc)
+ dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc);
+}
+
+/* Due to a firmware bug, the hypervisor can send an interrupt to a
+ * transmit or receive queue just prior to a partition migration.
+ * Force an EOI after migration.
+ */
+static void ibmvnic_clear_pending_interrupt(struct device *dev,
+ struct ibmvnic_sub_crq_queue *scrq)
+{
+ if (!xive_enabled())
+ ibmvnic_xics_eoi(dev, scrq);
+}
+
static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
@@ -3575,15 +3709,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
if (test_bit(0, &adapter->resetting) &&
adapter->reset_reason == VNIC_RESET_MOBILITY) {
- u64 val = (0xff000000) | scrq->hw_irq;
-
- rc = plpar_hcall_norets(H_EOI, val);
- /* H_EOI would fail with rc = H_FUNCTION when running
- * in XIVE mode which is expected, but not an error.
- */
- if (rc && (rc != H_FUNCTION))
- dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
- val, rc);
+ ibmvnic_clear_pending_interrupt(dev, scrq);
}
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
@@ -3654,9 +3780,15 @@ restart_loop:
(adapter->req_tx_entries_per_subcrq / 2) &&
__netif_subqueue_stopped(adapter->netdev,
scrq->pool_index)) {
- netif_wake_subqueue(adapter->netdev, scrq->pool_index);
- netdev_dbg(adapter->netdev, "Started queue %d\n",
- scrq->pool_index);
+ rcu_read_lock();
+ if (adapter->tx_queues_active) {
+ netif_wake_subqueue(adapter->netdev,
+ scrq->pool_index);
+ netdev_dbg(adapter->netdev,
+ "Started queue %d\n",
+ scrq->pool_index);
+ }
+ rcu_read_unlock();
}
}
@@ -5321,9 +5453,9 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
}
if (!completion_done(&adapter->init_done)) {
- complete(&adapter->init_done);
if (!adapter->init_done_rc)
adapter->init_done_rc = -EAGAIN;
+ complete(&adapter->init_done);
}
break;
@@ -5346,6 +5478,13 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
adapter->fw_done_rc = -EIO;
complete(&adapter->fw_done);
}
+
+ /* if we got here during crq-init, retry crq-init */
+ if (!completion_done(&adapter->init_done)) {
+ adapter->init_done_rc = -EAGAIN;
+ complete(&adapter->init_done);
+ }
+
if (!completion_done(&adapter->stats_done))
complete(&adapter->stats_done);
if (test_bit(0, &adapter->resetting))
@@ -5662,10 +5801,6 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
adapter->from_passive_init = false;
- if (reset)
- reinit_completion(&adapter->init_done);
-
- adapter->init_done_rc = 0;
rc = ibmvnic_send_crq_init(adapter);
if (rc) {
dev_err(dev, "Send crq init failed with error %d\n", rc);
@@ -5679,12 +5814,14 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
if (adapter->init_done_rc) {
release_crq_queue(adapter);
+ dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc);
return adapter->init_done_rc;
}
if (adapter->from_passive_init) {
adapter->state = VNIC_OPEN;
adapter->from_passive_init = false;
+ dev_err(dev, "CRQ-init failed, passive-init\n");
return -EINVAL;
}
@@ -5724,6 +5861,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
unsigned char *mac_addr_p;
+ unsigned long flags;
bool init_success;
int rc;
@@ -5768,6 +5906,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
spin_lock_init(&adapter->rwi_lock);
spin_lock_init(&adapter->state_lock);
mutex_init(&adapter->fw_lock);
+ init_completion(&adapter->probe_done);
init_completion(&adapter->init_done);
init_completion(&adapter->fw_done);
init_completion(&adapter->reset_done);
@@ -5778,6 +5917,33 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
init_success = false;
do {
+ reinit_init_done(adapter);
+
+ /* clear any failovers we got in the previous pass
+ * since we are reinitializing the CRQ
+ */
+ adapter->failover_pending = false;
+
+ /* If we had already initialized CRQ, we may have one or
+ * more resets queued already. Discard those and release
+ * the CRQ before initializing the CRQ again.
+ */
+ release_crq_queue(adapter);
+
+ /* Since we are still in PROBING state, __ibmvnic_reset()
+ * will not access the ->rwi_list and since we released CRQ,
+ * we won't get _new_ transport events. But there maybe an
+ * ongoing ibmvnic_reset() call. So serialize access to
+ * rwi_list. If we win the race, ibvmnic_reset() could add
+ * a reset after we purged but thats ok - we just may end
+ * up with an extra reset (i.e similar to having two or more
+ * resets in the queue at once).
+ * CHECK.
+ */
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
+ flush_reset_queue(adapter);
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+
rc = init_crq_queue(adapter);
if (rc) {
dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
@@ -5809,12 +5975,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
goto ibmvnic_dev_file_err;
netif_carrier_off(netdev);
- rc = register_netdev(netdev);
- if (rc) {
- dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
- goto ibmvnic_register_fail;
- }
- dev_info(&dev->dev, "ibmvnic registered\n");
if (init_success) {
adapter->state = VNIC_PROBED;
@@ -5827,6 +5987,16 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->wait_for_reset = false;
adapter->last_reset_time = jiffies;
+
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
+ goto ibmvnic_register_fail;
+ }
+ dev_info(&dev->dev, "ibmvnic registered\n");
+
+ complete(&adapter->probe_done);
+
return 0;
ibmvnic_register_fail:
@@ -5841,6 +6011,17 @@ ibmvnic_stats_fail:
ibmvnic_init_fail:
release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
+
+ /* cleanup worker thread after releasing CRQ so we don't get
+ * transport events (i.e new work items for the worker thread).
+ */
+ adapter->state = VNIC_REMOVING;
+ complete(&adapter->probe_done);
+ flush_work(&adapter->ibmvnic_reset);
+ flush_delayed_work(&adapter->ibmvnic_delayed_reset);
+
+ flush_reset_queue(adapter);
+
mutex_destroy(&adapter->fw_lock);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4a7a56ff74ce..8f5cefb932dd 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -930,6 +930,7 @@ struct ibmvnic_adapter {
struct ibmvnic_tx_pool *tx_pool;
struct ibmvnic_tx_pool *tso_pool;
+ struct completion probe_done;
struct completion init_done;
int init_done_rc;
@@ -1005,11 +1006,14 @@ struct ibmvnic_adapter {
struct work_struct ibmvnic_reset;
struct delayed_work ibmvnic_delayed_reset;
unsigned long resetting;
- bool napi_enabled, from_passive_init;
- bool login_pending;
/* last device reset time */
unsigned long last_reset_time;
+ bool napi_enabled;
+ bool from_passive_init;
+ bool login_pending;
+ /* protected by rcu */
+ bool tx_queues_active;
bool failover_pending;
bool force_reset_recovery;
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index bcf680e83811..13382df2f2ef 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -630,6 +630,7 @@ struct e1000_phy_info {
bool disable_polarity_correction;
bool is_mdix;
bool polarity_correction;
+ bool reset_disable;
bool speed_downgraded;
bool autoneg_wait_to_complete;
};
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index c908c84b86d2..d60e2016d03c 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -2050,6 +2050,10 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
bool blocked = false;
int i = 0;
+ /* Check the PHY (LCD) reset flag */
+ if (hw->phy.reset_disable)
+ return true;
+
while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
(i++ < 30))
usleep_range(10000, 11000);
@@ -4136,9 +4140,9 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
return ret_val;
if (!(data & valid_csum_mask)) {
- e_dbg("NVM Checksum Invalid\n");
+ e_dbg("NVM Checksum valid bit not set\n");
- if (hw->mac.type < e1000_pch_cnp) {
+ if (hw->mac.type < e1000_pch_tgp) {
data |= valid_csum_mask;
ret_val = e1000_write_nvm(hw, word, 1, &data);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 2504b11c3169..638a3ddd7ada 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -271,6 +271,7 @@
#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
#define I217_MEMPWR PHY_REG(772, 26)
#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
+#define I217_MEMPWR_MOEM 0x1000
/* Receive Address Initial CRC Calculation */
#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4))
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a42aeb555f34..fa06f68c8c80 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6987,8 +6987,21 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_data;
int rc;
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+ hw->mac.type >= e1000_pch_adp) {
+ /* Mask OEM Bits / Gig Disable / Restart AN (772_26[12] = 1) */
+ e1e_rphy(hw, I217_MEMPWR, &phy_data);
+ phy_data |= I217_MEMPWR_MOEM;
+ e1e_wphy(hw, I217_MEMPWR, phy_data);
+
+ /* Disable LCD reset */
+ hw->phy.reset_disable = true;
+ }
+
e1000e_flush_lpic(pdev);
e1000e_pm_freeze(dev);
@@ -7010,6 +7023,8 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_data;
int rc;
/* Introduce S0ix implementation */
@@ -7020,6 +7035,17 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
if (rc)
return rc;
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+ hw->mac.type >= e1000_pch_adp) {
+ /* Unmask OEM Bits / Gig Disable / Restart AN 772_26[12] = 0 */
+ e1e_rphy(hw, I217_MEMPWR, &phy_data);
+ phy_data &= ~I217_MEMPWR_MOEM;
+ e1e_wphy(hw, I217_MEMPWR, phy_data);
+
+ /* Enable LCD reset */
+ hw->phy.reset_disable = false;
+ }
+
return e1000e_pm_thaw(dev);
}
@@ -7388,9 +7414,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
resource_size_t flash_start, flash_len;
static int cards_found;
u16 aspm_disable_flag = 0;
- int bars, i, err, pci_using_dac;
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
+ int bars, i, err;
s32 ret_val = 0;
if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
@@ -7404,17 +7430,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
bars = pci_select_bars(pdev, IORESOURCE_MEM);
@@ -7550,10 +7570,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
/* MTU range: 68 - max_hw_frame_size */
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 0f0efee5fc8e..fd07c3679bb1 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -146,11 +146,11 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Read did not complete\n");
+ e_dbg("MDI Read PHY Reg Address %d did not complete\n", offset);
return -E1000_ERR_PHY;
}
if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Error\n");
+ e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
return -E1000_ERR_PHY;
}
if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
@@ -210,11 +210,11 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Write did not complete\n");
+ e_dbg("MDI Write PHY Reg Address %d did not complete\n", offset);
return -E1000_ERR_PHY;
}
if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Error\n");
+ e_dbg("MDI Write PHY Red Address %d Error\n", offset);
return -E1000_ERR_PHY;
}
if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 80c5cecaf2b5..55c6bce5da61 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -854,6 +854,10 @@ struct i40e_vsi {
u64 tx_force_wb;
u64 rx_buf_failed;
u64 rx_page_failed;
+ u64 rx_page_reuse;
+ u64 rx_page_alloc;
+ u64 rx_page_waive;
+ u64 rx_page_busy;
/* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 7abef88801fb..42439f725aa4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -769,7 +769,7 @@ static bool i40e_asq_done(struct i40e_hw *hw)
}
/**
- * i40e_asq_send_command_atomic - send command to Admin Queue
+ * i40e_asq_send_command_atomic_exec - send command to Admin Queue
* @hw: pointer to the hw struct
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buff: buffer to use for indirect commands
@@ -780,11 +780,13 @@ static bool i40e_asq_done(struct i40e_hw *hw)
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
-i40e_status
-i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */ u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details,
- bool is_atomic_context)
+static i40e_status
+i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context)
{
i40e_status status = 0;
struct i40e_dma_mem *dma_buff = NULL;
@@ -794,8 +796,6 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
u16 retval = 0;
u32 val = 0;
- mutex_lock(&hw->aq.asq_mutex);
-
if (hw->aq.asq.count == 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: Admin queue not initialized.\n");
@@ -969,6 +969,36 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
}
asq_send_command_error:
+ return status;
+}
+
+/**
+ * i40e_asq_send_command_atomic - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ * @is_atomic_context: is the function called in an atomic context?
+ *
+ * Acquires the lock and calls the main send command execution
+ * routine.
+ **/
+i40e_status
+i40e_asq_send_command_atomic(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context)
+{
+ i40e_status status;
+
+ mutex_lock(&hw->aq.asq_mutex);
+ status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size,
+ cmd_details,
+ is_atomic_context);
+
mutex_unlock(&hw->aq.asq_mutex);
return status;
}
@@ -983,6 +1013,52 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
}
/**
+ * i40e_asq_send_command_atomic_v2 - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ * @is_atomic_context: is the function called in an atomic context?
+ * @aq_status: pointer to Admin Queue status return value
+ *
+ * Acquires the lock and calls the main send command execution
+ * routine. Returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ **/
+i40e_status
+i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context,
+ enum i40e_admin_queue_err *aq_status)
+{
+ i40e_status status;
+
+ mutex_lock(&hw->aq.asq_mutex);
+ status = i40e_asq_send_command_atomic_exec(hw, desc, buff,
+ buff_size,
+ cmd_details,
+ is_atomic_context);
+ if (aq_status)
+ *aq_status = hw->aq.asq_last_status;
+ mutex_unlock(&hw->aq.asq_mutex);
+ return status;
+}
+
+i40e_status
+i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status)
+{
+ return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size,
+ cmd_details, true, aq_status);
+}
+
+/**
* i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
* @opcode: the opcode can be used to decide which flags to turn off or on
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 9ddeb015eb7e..6aefffd83615 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1899,8 +1899,9 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
- sizeof(vsi_ctx->info), cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info),
+ cmd_details, true);
if (status)
goto aq_add_vsi_exit;
@@ -2287,8 +2288,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
- sizeof(vsi_ctx->info), cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info),
+ cmd_details, true);
vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
@@ -2632,33 +2634,28 @@ get_veb_exit:
}
/**
- * i40e_aq_add_macvlan
- * @hw: pointer to the hw struct
- * @seid: VSI for the mac address
+ * i40e_prepare_add_macvlan
* @mv_list: list of macvlans to be added
+ * @desc: pointer to AQ descriptor structure
* @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
+ * @seid: VSI for the mac address
*
- * Add MAC/VLAN addresses to the HW filtering
+ * Internal helper function that prepares the add macvlan request
+ * and returns the buffer size.
**/
-i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_macvlan_element_data *mv_list,
- u16 count, struct i40e_asq_cmd_details *cmd_details)
+static u16
+i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
+ struct i40e_aq_desc *desc, u16 count, u16 seid)
{
- struct i40e_aq_desc desc;
struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
+ (struct i40e_aqc_macvlan *)&desc->params.raw;
u16 buf_size;
int i;
- if (count == 0 || !mv_list || !hw)
- return I40E_ERR_PARAM;
-
buf_size = count * sizeof(*mv_list);
/* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
+ i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan);
cmd->num_addresses = cpu_to_le16(count);
cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
cmd->seid[1] = 0;
@@ -2669,14 +2666,71 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
mv_list[i].flags |=
cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
- cmd_details);
+ return buf_size;
+}
- return status;
+/**
+ * i40e_aq_add_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add MAC/VLAN addresses to the HW filtering
+ **/
+i40e_status
+i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
+
+ return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
+ cmd_details, true);
+}
+
+/**
+ * i40e_aq_add_macvlan_v2
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ * @aq_status: pointer to Admin Queue status return value
+ *
+ * Add MAC/VLAN addresses to the HW filtering.
+ * The _v2 version returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ * It also calls _v2 versions of asq_send_command functions to
+ * get the aq_status on the stack.
+ **/
+i40e_status
+i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status)
+{
+ struct i40e_aq_desc desc;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
+
+ return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
+ cmd_details, true, aq_status);
}
/**
@@ -2715,13 +2769,59 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
if (buf_size > I40E_AQ_LARGE_BUF)
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
- cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
+ cmd_details, true);
return status;
}
/**
+ * i40e_aq_remove_macvlan_v2
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ * @aq_status: pointer to Admin Queue status return value
+ *
+ * Remove MAC/VLAN addresses from the HW filtering.
+ * The _v2 version returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ * It also calls _v2 versions of asq_send_command functions to
+ * get the aq_status on the stack.
+ **/
+i40e_status
+i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status)
+{
+ struct i40e_aqc_macvlan *cmd;
+ struct i40e_aq_desc desc;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*mv_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd = (struct i40e_aqc_macvlan *)&desc.params.raw;
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
+ cmd_details, true, aq_status);
+}
+
+/**
* i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
* @hw: pointer to the hw struct
* @opcode: AQ opcode for add or delete mirror rule
@@ -3868,7 +3968,8 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
cmd->seid = cpu_to_le16(seid);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+ cmd_details, true);
return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 1e57cc8c47d7..be7c6f34d45c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -275,9 +275,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->rx_stats.alloc_page_failed,
rx_ring->rx_stats.alloc_buff_failed);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: realloc_count = %lld, page_reuse_count = %lld\n",
+ " rx_rings[%i]: rx_stats: realloc_count = 0, page_reuse_count = %lld\n",
i,
- rx_ring->rx_stats.realloc_count,
rx_ring->rx_stats.page_reuse_count);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: size = %i\n",
@@ -742,10 +741,8 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
vsi = pf->vsi[vf->lan_vsi_idx];
dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
- dev_info(&pf->pdev->dev, " num MDD=%lld, invalid msg=%lld, valid msg=%lld\n",
- vf->num_mdd_events,
- vf->num_invalid_msgs,
- vf->num_valid_msgs);
+ dev_info(&pf->pdev->dev, " num MDD=%lld\n",
+ vf->num_mdd_events);
} else {
dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 091f36adbbe1..e48499624d22 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -295,6 +295,10 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("tx_busy", tx_busy),
I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
+ I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse),
+ I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc),
+ I40E_VSI_STAT("rx_cache_waive", rx_page_waive),
+ I40E_VSI_STAT("rx_cache_busy", rx_page_busy),
};
/* These PF_STATs might look like duplicates of some NETDEV_STATs,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 31b03fe78d3b..6778df2177a1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -773,6 +773,7 @@ void i40e_update_veb_stats(struct i40e_veb *veb)
**/
static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
{
+ u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
struct i40e_pf *pf = vsi->back;
struct rtnl_link_stats64 *ons;
struct rtnl_link_stats64 *ns; /* netdev stats */
@@ -780,7 +781,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct i40e_eth_stats *es; /* device's eth stats */
u64 tx_restart, tx_busy;
struct i40e_ring *p;
- u64 rx_page, rx_buf;
u64 bytes, packets;
unsigned int start;
u64 tx_linearize;
@@ -806,6 +806,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
rx_page = 0;
rx_buf = 0;
+ rx_reuse = 0;
+ rx_alloc = 0;
+ rx_waive = 0;
+ rx_busy = 0;
rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */
@@ -839,6 +843,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_p += packets;
rx_buf += p->rx_stats.alloc_buff_failed;
rx_page += p->rx_stats.alloc_page_failed;
+ rx_reuse += p->rx_stats.page_reuse_count;
+ rx_alloc += p->rx_stats.page_alloc_count;
+ rx_waive += p->rx_stats.page_waive_count;
+ rx_busy += p->rx_stats.page_busy_count;
if (i40e_enabled_xdp_vsi(vsi)) {
/* locate XDP ring */
@@ -866,6 +874,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
vsi->tx_force_wb = tx_force_wb;
vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf;
+ vsi->rx_page_reuse = rx_reuse;
+ vsi->rx_page_alloc = rx_alloc;
+ vsi->rx_page_waive = rx_waive;
+ vsi->rx_page_busy = rx_busy;
ns->rx_packets = rx_p;
ns->rx_bytes = rx_b;
@@ -2143,19 +2155,19 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
int num_del, int *retval)
{
struct i40e_hw *hw = &vsi->back->hw;
+ enum i40e_admin_queue_err aq_status;
i40e_status aq_ret;
- int aq_err;
- aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
- aq_err = hw->aq.asq_last_status;
+ aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
+ &aq_status);
/* Explicitly ignore and do not report when firmware returns ENOENT */
- if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
+ if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
*retval = -EIO;
dev_info(&vsi->back->pdev->dev,
"ignoring delete macvlan error on %s, err %s, aq_err %s\n",
vsi_name, i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw, aq_err));
+ i40e_aq_str(hw, aq_status));
}
}
@@ -2178,10 +2190,10 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
int num_add)
{
struct i40e_hw *hw = &vsi->back->hw;
- int aq_err, fcnt;
+ enum i40e_admin_queue_err aq_status;
+ int fcnt;
- i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
- aq_err = hw->aq.asq_last_status;
+ i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
fcnt = i40e_update_filter_state(num_add, list, add_head);
if (fcnt != num_add) {
@@ -2189,17 +2201,19 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n",
- i40e_aq_str(hw, aq_err), vsi_name);
+ i40e_aq_str(hw, aq_status), vsi_name);
} else if (vsi->type == I40E_VSI_SRIOV ||
vsi->type == I40E_VSI_VMDQ1 ||
vsi->type == I40E_VSI_VMDQ2) {
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
- i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
+ i40e_aq_str(hw, aq_status), vsi_name,
+ vsi_name);
} else {
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
- i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
+ i40e_aq_str(hw, aq_status), vsi_name,
+ vsi->type);
}
}
}
@@ -12712,7 +12726,8 @@ static int i40e_set_features(struct net_device *netdev,
else
i40e_vlan_stripping_disable(vsi);
- if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
+ if (!(features & NETIF_F_HW_TC) &&
+ (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
dev_err(&pf->pdev->dev,
"Offloaded tc filters active, can't turn hw_tc_offload off");
return -EINVAL;
@@ -13468,6 +13483,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+ netdev->features &= ~NETIF_F_HW_TC;
+
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
ether_addr_copy(mac_addr, hw->mac.perm_addr);
@@ -15331,12 +15348,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
}
/* set up pci connections */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index fe6dca846028..3a38bf8bcde7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -682,10 +682,11 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
__le16 le_sum;
ret_code = i40e_calc_nvm_checksum(hw, &checksum);
- le_sum = cpu_to_le16(checksum);
- if (!ret_code)
+ if (!ret_code) {
+ le_sum = cpu_to_le16(checksum);
ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
1, &le_sum, true);
+ }
return ret_code;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 9241b6005ad3..ebdcde6f1aeb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -27,10 +27,25 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
i40e_status
+i40e_asq_send_command_v2(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status);
+i40e_status
i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context);
+i40e_status
+i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context,
+ enum i40e_admin_queue_err *aq_status);
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
@@ -150,9 +165,19 @@ i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status
+i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status);
i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status
+i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status);
i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
struct i40e_asq_cmd_details *cmd_details,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 66cc79500c10..0eae5858f2fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -830,8 +830,6 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
i40e_clean_tx_ring(tx_ring);
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;
- kfree(tx_ring->xsk_descs);
- tx_ring->xsk_descs = NULL;
if (tx_ring->desc) {
dma_free_coherent(tx_ring->dev, tx_ring->size,
@@ -1382,8 +1380,6 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
new_buff->page_offset = old_buff->page_offset;
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
- rx_ring->rx_stats.page_reuse_count++;
-
/* clear contents of buffer_info */
old_buff->page = NULL;
}
@@ -1433,13 +1429,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
if (!tx_ring->tx_bi)
goto err;
- if (ring_is_xdp(tx_ring)) {
- tx_ring->xsk_descs = kcalloc(I40E_MAX_NUM_DESCRIPTORS, sizeof(*tx_ring->xsk_descs),
- GFP_KERNEL);
- if (!tx_ring->xsk_descs)
- goto err;
- }
-
u64_stats_init(&tx_ring->syncp);
/* round up to nearest 4K */
@@ -1463,8 +1452,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
return 0;
err:
- kfree(tx_ring->xsk_descs);
- tx_ring->xsk_descs = NULL;
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;
return -ENOMEM;
@@ -1675,6 +1662,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
return false;
}
+ rx_ring->rx_stats.page_alloc_count++;
+
/* map page for use */
dma = dma_map_page_attrs(rx_ring->dev, page, 0,
i40e_rx_pg_size(rx_ring),
@@ -1982,32 +1971,43 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
/**
* i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buffer: buffer containing the page
+ * @rx_stats: rx stats structure for the rx ring
* @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
*
* If page is reusable, we have a green light for calling i40e_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
* pointing to; otherwise, the DMA mapping needs to be destroyed and
- * page freed
+ * page freed.
+ *
+ * rx_stats will be updated to indicate whether the page was waived
+ * or busy if it could not be reused.
*/
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
+ struct i40e_rx_queue_stats *rx_stats,
int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
/* Is any reuse possible? */
- if (!dev_page_is_reusable(page))
+ if (!dev_page_is_reusable(page)) {
+ rx_stats->page_waive_count++;
return false;
+ }
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) {
+ rx_stats->page_busy_count++;
return false;
+ }
#else
#define I40E_LAST_OFFSET \
(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
- if (rx_buffer->page_offset > I40E_LAST_OFFSET)
+ if (rx_buffer->page_offset > I40E_LAST_OFFSET) {
+ rx_stats->page_busy_count++;
return false;
+ }
#endif
/* If we have drained the page fragment pool we need to update
@@ -2237,7 +2237,7 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{
- if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
+ if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
} else {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index bfc2845c99d1..c471c2da313c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -298,7 +298,9 @@ struct i40e_rx_queue_stats {
u64 alloc_page_failed;
u64 alloc_buff_failed;
u64 page_reuse_count;
- u64 realloc_count;
+ u64 page_alloc_count;
+ u64 page_waive_count;
+ u64 page_busy_count;
};
enum i40e_ring_state_t {
@@ -390,7 +392,6 @@ struct i40e_ring {
u16 rx_offset;
struct xdp_rxq_info xdp_rxq;
struct xsk_buff_pool *xsk_pool;
- struct xdp_desc *xsk_descs; /* For storing descriptors in the AF_XDP ZC path */
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index dfdb6e786461..2606e8f0f19b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1917,19 +1917,17 @@ sriov_configure_out:
/***********************virtual channel routines******************/
/**
- * i40e_vc_send_msg_to_vf_ex
+ * i40e_vc_send_msg_to_vf
* @vf: pointer to the VF info
* @v_opcode: virtual channel opcode
* @v_retval: virtual channel return value
* @msg: pointer to the msg buffer
* @msglen: msg length
- * @is_quiet: true for not printing unsuccessful return values, false otherwise
*
* send msg to VF
**/
-static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
- u32 v_retval, u8 *msg, u16 msglen,
- bool is_quiet)
+static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen)
{
struct i40e_pf *pf;
struct i40e_hw *hw;
@@ -1944,25 +1942,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
hw = &pf->hw;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
- /* single place to detect unsuccessful return values */
- if (v_retval && !is_quiet) {
- vf->num_invalid_msgs++;
- dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
- vf->vf_id, v_opcode, v_retval);
- if (vf->num_invalid_msgs >
- I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
- dev_err(&pf->pdev->dev,
- "Number of invalid messages exceeded for VF %d\n",
- vf->vf_id);
- dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
- set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
- }
- } else {
- vf->num_valid_msgs++;
- /* reset the invalid counter, if a valid message is received. */
- vf->num_invalid_msgs = 0;
- }
-
aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret) {
@@ -1976,23 +1955,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
}
/**
- * i40e_vc_send_msg_to_vf
- * @vf: pointer to the VF info
- * @v_opcode: virtual channel opcode
- * @v_retval: virtual channel return value
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * send msg to VF
- **/
-static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
- u32 v_retval, u8 *msg, u16 msglen)
-{
- return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval,
- msg, msglen, false);
-}
-
-/**
* i40e_vc_send_resp_to_vf
* @vf: pointer to the VF info
* @opcode: operation code
@@ -2822,7 +2784,6 @@ error_param:
* i40e_check_vf_permission
* @vf: pointer to the VF info
* @al: MAC address list from virtchnl
- * @is_quiet: set true for printing msg without opcode info, false otherwise
*
* Check that the given list of MAC addresses is allowed. Will return -EPERM
* if any address in the list is not valid. Checks the following conditions:
@@ -2837,8 +2798,7 @@ error_param:
* addresses might not be accurate.
**/
static inline int i40e_check_vf_permission(struct i40e_vf *vf,
- struct virtchnl_ether_addr_list *al,
- bool *is_quiet)
+ struct virtchnl_ether_addr_list *al)
{
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
@@ -2846,7 +2806,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
int mac2add_cnt = 0;
int i;
- *is_quiet = false;
for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f;
u8 *addr = al->list[i].addr;
@@ -2870,7 +2829,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
- *is_quiet = true;
return -EPERM;
}
@@ -2921,7 +2879,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- bool is_quiet = false;
i40e_status ret = 0;
int i;
@@ -2938,7 +2895,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
*/
spin_lock_bh(&vsi->mac_filter_hash_lock);
- ret = i40e_check_vf_permission(vf, al, &is_quiet);
+ ret = i40e_check_vf_permission(vf, al);
if (ret) {
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
@@ -2976,8 +2933,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
error_param:
/* send the response to the VF */
- return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
- ret, NULL, 0, is_quiet);
+ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+ ret, NULL, 0);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 03c42fd0fea1..a554d0a0b09b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -10,8 +10,6 @@
#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
-#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
-
#define I40E_VLAN_PRIORITY_SHIFT 13
#define I40E_VLAN_MASK 0xFFF
#define I40E_PRIORITY_MASK 0xE000
@@ -92,9 +90,6 @@ struct i40e_vf {
u8 num_queue_pairs; /* num of qps assigned to VF vsis */
u8 num_req_queues; /* num of requested qps */
u64 num_mdd_events; /* num of mdd events detected */
- /* num of continuous malformed or invalid msgs detected */
- u64 num_invalid_msgs;
- u64 num_valid_msgs; /* num of valid msgs detected */
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 945b1bb9c6f4..c1d25b0b0ca2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -218,7 +218,6 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
ntu += nb_buffs;
if (ntu == rx_ring->count) {
rx_desc = I40E_RX_DESC(rx_ring, 0);
- xdp = i40e_rx_bi(rx_ring, 0);
ntu = 0;
}
@@ -241,21 +240,25 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
struct xdp_buff *xdp)
{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
struct sk_buff *skb;
+ net_prefetch(xdp->data_meta);
+
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- xdp->data_end - xdp->data_hard_start,
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
goto out;
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), xdp->data, datasize);
- if (metasize)
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
out:
xsk_buff_free(xdp);
@@ -324,11 +327,11 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
u16 next_to_clean = rx_ring->next_to_clean;
u16 count_mask = rx_ring->count - 1;
unsigned int xdp_res, xdp_xmit = 0;
bool failure = false;
+ u16 cleaned_count;
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
@@ -467,11 +470,11 @@ static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
**/
static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
{
- struct xdp_desc *descs = xdp_ring->xsk_descs;
+ struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
u32 nb_pkts, nb_processed = 0;
unsigned int total_bytes = 0;
- nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
+ nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
if (!nb_pkts)
return true;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 59806d1f7e79..49aed3e506a6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -44,6 +44,9 @@
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "iavf: "
+int iavf_status_to_errno(enum iavf_status status);
+int virtchnl_status_to_errno(enum virtchnl_status_code v_status);
+
/* VSI state flags shared with common code */
enum iavf_vsi_state_t {
__IAVF_VSI_DOWN,
@@ -188,7 +191,7 @@ enum iavf_state_t {
__IAVF_REMOVE, /* driver is being unloaded */
__IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
__IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
- __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS,
+ __IAVF_INIT_EXTENDED_CAPS, /* process extended caps which require aq msg exchange */
__IAVF_INIT_CONFIG_ADAPTER,
__IAVF_INIT_SW, /* got resources, setting up structs */
__IAVF_INIT_FAILED, /* init failed, restarting procedure */
@@ -201,6 +204,10 @@ enum iavf_state_t {
__IAVF_RUNNING, /* opened, working */
};
+enum iavf_critical_section_t {
+ __IAVF_IN_REMOVE_TASK, /* device being removed */
+};
+
#define IAVF_CLOUD_FIELD_OMAC 0x01
#define IAVF_CLOUD_FIELD_IMAC 0x02
#define IAVF_CLOUD_FIELD_IVLAN 0x04
@@ -246,7 +253,6 @@ struct iavf_adapter {
struct list_head mac_filter_list;
struct mutex crit_lock;
struct mutex client_lock;
- struct mutex remove_lock;
/* Lock to protect accesses to MAC and VLAN lists */
spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
@@ -284,6 +290,8 @@ struct iavf_adapter {
#define IAVF_FLAG_LEGACY_RX BIT(15)
#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
+#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
+#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20)
/* duplicates for common code */
#define IAVF_FLAG_DCB_ENABLED 0
/* flags for admin queue service task */
@@ -329,6 +337,21 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION BIT_ULL(37)
#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION BIT_ULL(38)
+ /* flags for processing extended capability messages during
+ * __IAVF_INIT_EXTENDED_CAPS. Each capability exchange requires
+ * both a SEND and a RECV step, which must be processed in sequence.
+ *
+ * During the __IAVF_INIT_EXTENDED_CAPS state, the driver will
+ * process one flag at a time during each state loop.
+ */
+ u64 extended_caps;
+#define IAVF_EXTENDED_CAP_SEND_VLAN_V2 BIT_ULL(0)
+#define IAVF_EXTENDED_CAP_RECV_VLAN_V2 BIT_ULL(1)
+
+#define IAVF_EXTENDED_CAPS \
+ (IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
+ IAVF_EXTENDED_CAP_RECV_VLAN_V2)
+
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
@@ -510,7 +533,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter);
void iavf_del_vlans(struct iavf_adapter *adapter);
void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags);
void iavf_request_stats(struct iavf_adapter *adapter);
-void iavf_request_reset(struct iavf_adapter *adapter);
+int iavf_request_reset(struct iavf_adapter *adapter);
void iavf_get_hena(struct iavf_adapter *adapter);
void iavf_set_hena(struct iavf_adapter *adapter);
void iavf_set_rss_key(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index e9cc7f6ddc46..34e46a23894f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -131,8 +131,8 @@ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
return "IAVF_ERR_INVALID_MAC_ADDR";
case IAVF_ERR_DEVICE_NOT_SUPPORTED:
return "IAVF_ERR_DEVICE_NOT_SUPPORTED";
- case IAVF_ERR_MASTER_REQUESTS_PENDING:
- return "IAVF_ERR_MASTER_REQUESTS_PENDING";
+ case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
+ return "IAVF_ERR_PRIMARY_REQUESTS_PENDING";
case IAVF_ERR_INVALID_LINK_SETTINGS:
return "IAVF_ERR_INVALID_LINK_SETTINGS";
case IAVF_ERR_AUTONEG_NOT_COMPLETE:
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 8125b9120615..190590d32faf 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -51,6 +51,113 @@ MODULE_LICENSE("GPL v2");
static const struct net_device_ops iavf_netdev_ops;
struct workqueue_struct *iavf_wq;
+int iavf_status_to_errno(enum iavf_status status)
+{
+ switch (status) {
+ case IAVF_SUCCESS:
+ return 0;
+ case IAVF_ERR_PARAM:
+ case IAVF_ERR_MAC_TYPE:
+ case IAVF_ERR_INVALID_MAC_ADDR:
+ case IAVF_ERR_INVALID_LINK_SETTINGS:
+ case IAVF_ERR_INVALID_PD_ID:
+ case IAVF_ERR_INVALID_QP_ID:
+ case IAVF_ERR_INVALID_CQ_ID:
+ case IAVF_ERR_INVALID_CEQ_ID:
+ case IAVF_ERR_INVALID_AEQ_ID:
+ case IAVF_ERR_INVALID_SIZE:
+ case IAVF_ERR_INVALID_ARP_INDEX:
+ case IAVF_ERR_INVALID_FPM_FUNC_ID:
+ case IAVF_ERR_QP_INVALID_MSG_SIZE:
+ case IAVF_ERR_INVALID_FRAG_COUNT:
+ case IAVF_ERR_INVALID_ALIGNMENT:
+ case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
+ case IAVF_ERR_INVALID_IMM_DATA_SIZE:
+ case IAVF_ERR_INVALID_VF_ID:
+ case IAVF_ERR_INVALID_HMCFN_ID:
+ case IAVF_ERR_INVALID_PBLE_INDEX:
+ case IAVF_ERR_INVALID_SD_INDEX:
+ case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
+ case IAVF_ERR_INVALID_SD_TYPE:
+ case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
+ case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
+ case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
+ return -EINVAL;
+ case IAVF_ERR_NVM:
+ case IAVF_ERR_NVM_CHECKSUM:
+ case IAVF_ERR_PHY:
+ case IAVF_ERR_CONFIG:
+ case IAVF_ERR_UNKNOWN_PHY:
+ case IAVF_ERR_LINK_SETUP:
+ case IAVF_ERR_ADAPTER_STOPPED:
+ case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
+ case IAVF_ERR_AUTONEG_NOT_COMPLETE:
+ case IAVF_ERR_RESET_FAILED:
+ case IAVF_ERR_BAD_PTR:
+ case IAVF_ERR_SWFW_SYNC:
+ case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
+ case IAVF_ERR_QUEUE_EMPTY:
+ case IAVF_ERR_FLUSHED_QUEUE:
+ case IAVF_ERR_OPCODE_MISMATCH:
+ case IAVF_ERR_CQP_COMPL_ERROR:
+ case IAVF_ERR_BACKING_PAGE_ERROR:
+ case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
+ case IAVF_ERR_MEMCPY_FAILED:
+ case IAVF_ERR_SRQ_ENABLED:
+ case IAVF_ERR_ADMIN_QUEUE_ERROR:
+ case IAVF_ERR_ADMIN_QUEUE_FULL:
+ case IAVF_ERR_BAD_IWARP_CQE:
+ case IAVF_ERR_NVM_BLANK_MODE:
+ case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
+ case IAVF_ERR_DIAG_TEST_FAILED:
+ case IAVF_ERR_FIRMWARE_API_VERSION:
+ case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return -EIO;
+ case IAVF_ERR_DEVICE_NOT_SUPPORTED:
+ return -ENODEV;
+ case IAVF_ERR_NO_AVAILABLE_VSI:
+ case IAVF_ERR_RING_FULL:
+ return -ENOSPC;
+ case IAVF_ERR_NO_MEMORY:
+ return -ENOMEM;
+ case IAVF_ERR_TIMEOUT:
+ case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
+ return -ETIMEDOUT;
+ case IAVF_ERR_NOT_IMPLEMENTED:
+ case IAVF_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
+ return -EALREADY;
+ case IAVF_ERR_NOT_READY:
+ return -EBUSY;
+ case IAVF_ERR_BUF_TOO_SHORT:
+ return -EMSGSIZE;
+ }
+
+ return -EIO;
+}
+
+int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
+{
+ switch (v_status) {
+ case VIRTCHNL_STATUS_SUCCESS:
+ return 0;
+ case VIRTCHNL_STATUS_ERR_PARAM:
+ case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
+ return -EINVAL;
+ case VIRTCHNL_STATUS_ERR_NO_MEMORY:
+ return -ENOMEM;
+ case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
+ case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
+ case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
+ return -EIO;
+ case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ }
+
+ return -EIO;
+}
+
/**
* iavf_pdev_to_adapter - go from pci_dev to adapter
* @pdev: pci_dev pointer
@@ -302,8 +409,9 @@ static irqreturn_t iavf_msix_aq(int irq, void *data)
rd32(hw, IAVF_VFINT_ICR01);
rd32(hw, IAVF_VFINT_ICR0_ENA1);
- /* schedule work on the private workqueue */
- queue_work(iavf_wq, &adapter->adminq_task);
+ if (adapter->state != __IAVF_REMOVE)
+ /* schedule work on the private workqueue */
+ queue_work(iavf_wq, &adapter->adminq_task);
return IRQ_HANDLED;
}
@@ -876,6 +984,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
f->is_new_mac = true;
+ f->is_primary = false;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
} else {
f->remove = false;
@@ -909,17 +1018,22 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
f = iavf_find_filter(adapter, hw->mac.addr);
if (f) {
f->remove = true;
+ f->is_primary = true;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
}
f = iavf_add_filter(adapter, addr->sa_data);
-
- spin_unlock_bh(&adapter->mac_vlan_list_lock);
-
if (f) {
+ f->is_primary = true;
ether_addr_copy(hw->mac.addr, addr->sa_data);
}
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+ /* schedule the watchdog task to immediately process the request */
+ if (f)
+ queue_work(iavf_wq, &adapter->watchdog_task.work);
+
return (f == NULL) ? -ENOMEM : 0;
}
@@ -1136,8 +1250,7 @@ void iavf_down(struct iavf_adapter *adapter)
rss->state = IAVF_ADV_RSS_DEL_REQUEST;
spin_unlock_bh(&adapter->adv_rss_lock);
- if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
- adapter->state != __IAVF_RESETTING) {
+ if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
/* cancel any current operation */
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
/* Schedule operations to close down the HW. Don't wait
@@ -1421,7 +1534,7 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
struct iavf_aqc_get_set_rss_key_data *rss_key =
(struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
struct iavf_hw *hw = &adapter->hw;
- int ret = 0;
+ enum iavf_status status;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -1430,24 +1543,25 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
return -EBUSY;
}
- ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
- if (ret) {
+ status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
+ if (status) {
dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
- iavf_stat_str(hw, ret),
+ iavf_stat_str(hw, status),
iavf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
+ return iavf_status_to_errno(status);
}
- ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
- adapter->rss_lut, adapter->rss_lut_size);
- if (ret) {
+ status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
+ adapter->rss_lut, adapter->rss_lut_size);
+ if (status) {
dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
- iavf_stat_str(hw, ret),
+ iavf_stat_str(hw, status),
iavf_aq_str(hw, hw->aq.asq_last_status));
+ return iavf_status_to_errno(status);
}
- return ret;
+ return 0;
}
@@ -1517,7 +1631,6 @@ static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
static int iavf_init_rss(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
- int ret;
if (!RSS_PF(adapter)) {
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
@@ -1533,9 +1646,8 @@ static int iavf_init_rss(struct iavf_adapter *adapter)
iavf_fill_rss_lut(adapter);
netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
- ret = iavf_config_rss(adapter);
- return ret;
+ return iavf_config_rss(adapter);
}
/**
@@ -2003,23 +2115,24 @@ static void iavf_startup(struct iavf_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct iavf_hw *hw = &adapter->hw;
- int err;
+ enum iavf_status status;
+ int ret;
WARN_ON(adapter->state != __IAVF_STARTUP);
/* driver loaded, probe complete */
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
- err = iavf_set_mac_type(hw);
- if (err) {
- dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err);
+ status = iavf_set_mac_type(hw);
+ if (status) {
+ dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
goto err;
}
- err = iavf_check_reset_complete(hw);
- if (err) {
+ ret = iavf_check_reset_complete(hw);
+ if (ret) {
dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
- err);
+ ret);
goto err;
}
hw->aq.num_arq_entries = IAVF_AQ_LEN;
@@ -2027,14 +2140,15 @@ static void iavf_startup(struct iavf_adapter *adapter)
hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
- err = iavf_init_adminq(hw);
- if (err) {
- dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err);
+ status = iavf_init_adminq(hw);
+ if (status) {
+ dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
+ status);
goto err;
}
- err = iavf_send_api_ver(adapter);
- if (err) {
- dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
+ ret = iavf_send_api_ver(adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
iavf_shutdown_adminq(hw);
goto err;
}
@@ -2070,7 +2184,7 @@ static void iavf_init_version_check(struct iavf_adapter *adapter)
/* aq msg sent, awaiting reply */
err = iavf_verify_api_ver(adapter);
if (err) {
- if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
+ if (err == -EALREADY)
err = iavf_send_api_ver(adapter);
else
dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
@@ -2120,7 +2234,7 @@ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
"Requested %d queues, but PF only gave us %d.\n",
num_req_queues,
adapter->vsi_res->num_queue_pairs);
- adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
iavf_schedule_reset(adapter);
@@ -2171,11 +2285,11 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
}
}
err = iavf_get_vf_config(adapter);
- if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
+ if (err == -EALREADY) {
err = iavf_send_vf_config_msg(adapter);
goto err_alloc;
- } else if (err == IAVF_ERR_PARAM) {
- /* We only get ERR_PARAM if the device is in a very bad
+ } else if (err == -EINVAL) {
+ /* We only get -EINVAL if the device is in a very bad
* state or if we've been disabled for previous bad
* behavior. Either way, we're done now.
*/
@@ -2189,26 +2303,18 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
}
err = iavf_parse_vf_resource_msg(adapter);
- if (err)
- goto err_alloc;
-
- err = iavf_send_vf_offload_vlan_v2_msg(adapter);
- if (err == -EOPNOTSUPP) {
- /* underlying PF doesn't support VIRTCHNL_VF_OFFLOAD_VLAN_V2, so
- * go directly to finishing initialization
- */
- iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
- return;
- } else if (err) {
- dev_err(&pdev->dev, "Unable to send offload vlan v2 request (%d)\n",
+ if (err) {
+ dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
err);
goto err_alloc;
}
-
- /* underlying PF supports VIRTCHNL_VF_OFFLOAD_VLAN_V2, so update the
- * state accordingly
+ /* Some features require additional messages to negotiate extended
+ * capabilities. These are processed in sequence by the
+ * __IAVF_INIT_EXTENDED_CAPS driver state.
*/
- iavf_change_state(adapter, __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS);
+ adapter->extended_caps = IAVF_EXTENDED_CAPS;
+
+ iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
return;
err_alloc:
@@ -2219,35 +2325,93 @@ err:
}
/**
- * iavf_init_get_offload_vlan_v2_caps - part of driver startup
+ * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
* @adapter: board private structure
*
- * Function processes __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS driver state if the
- * VF negotiates VIRTCHNL_VF_OFFLOAD_VLAN_V2. If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is
- * not negotiated, then this state will never be entered.
+ * Function processes send of the extended VLAN V2 capability message to the
+ * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
+ * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
+ */
+static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
+
+ ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
+ if (ret && ret == -EOPNOTSUPP) {
+ /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
+ * we did not send the capability exchange message and do not
+ * expect a response.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
+ }
+
+ /* We sent the message, so move on to the next step */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
+}
+
+/**
+ * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
+ * @adapter: board private structure
+ *
+ * Function processes receipt of the extended VLAN V2 capability message from
+ * the PF.
**/
-static void iavf_init_get_offload_vlan_v2_caps(struct iavf_adapter *adapter)
+static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
{
int ret;
- WARN_ON(adapter->state != __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS);
+ WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
ret = iavf_get_vf_vlan_v2_caps(adapter);
- if (ret) {
- if (ret == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
- iavf_send_vf_offload_vlan_v2_msg(adapter);
+ if (ret)
goto err;
- }
- iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
+ /* We've processed receipt of the VLAN V2 caps message */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
return;
err:
+ /* We didn't receive a reply. Make sure we try sending again when
+ * __IAVF_INIT_FAILED attempts to recover.
+ */
+ adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
/**
+ * iavf_init_process_extended_caps - Part of driver startup
+ * @adapter: board private structure
+ *
+ * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
+ * handles negotiating capabilities for features which require an additional
+ * message.
+ *
+ * Once all extended capabilities exchanges are finished, the driver will
+ * transition into __IAVF_INIT_CONFIG_ADAPTER.
+ */
+static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
+{
+ WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
+
+ /* Process capability exchange for VLAN V2 */
+ if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
+ iavf_init_send_offload_vlan_v2_caps(adapter);
+ return;
+ } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
+ iavf_init_recv_offload_vlan_v2_caps(adapter);
+ return;
+ }
+
+ /* When we reach here, no further extended capabilities exchanges are
+ * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
+ */
+ iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
+}
+
+/**
* iavf_init_config_adapter - last part of driver startup
* @adapter: board private structure
*
@@ -2374,17 +2538,22 @@ static void iavf_watchdog_task(struct work_struct *work)
struct iavf_hw *hw = &adapter->hw;
u32 reg_val;
- if (!mutex_trylock(&adapter->crit_lock))
+ if (!mutex_trylock(&adapter->crit_lock)) {
+ if (adapter->state == __IAVF_REMOVE)
+ return;
+
goto restart_watchdog;
+ }
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
iavf_change_state(adapter, __IAVF_COMM_FAILED);
- if (adapter->flags & IAVF_FLAG_RESET_NEEDED &&
- adapter->state != __IAVF_RESETTING) {
- iavf_change_state(adapter, __IAVF_RESETTING);
+ if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ mutex_unlock(&adapter->crit_lock);
+ queue_work(iavf_wq, &adapter->reset_task);
+ return;
}
switch (adapter->state) {
@@ -2406,8 +2575,8 @@ static void iavf_watchdog_task(struct work_struct *work)
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
- case __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS:
- iavf_init_get_offload_vlan_v2_caps(adapter);
+ case __IAVF_INIT_EXTENDED_CAPS:
+ iavf_init_process_extended_caps(adapter);
mutex_unlock(&adapter->crit_lock);
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
@@ -2419,6 +2588,15 @@ static void iavf_watchdog_task(struct work_struct *work)
msecs_to_jiffies(1));
return;
case __IAVF_INIT_FAILED:
+ if (test_bit(__IAVF_IN_REMOVE_TASK,
+ &adapter->crit_section)) {
+ /* Do not update the state and do not reschedule
+ * watchdog task, iavf_remove should handle this state
+ * as it can loop forever
+ */
+ mutex_unlock(&adapter->crit_lock);
+ return;
+ }
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
dev_err(&adapter->pdev->dev,
"Failed to communicate with PF; waiting before retry\n");
@@ -2435,6 +2613,17 @@ static void iavf_watchdog_task(struct work_struct *work)
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
return;
case __IAVF_COMM_FAILED:
+ if (test_bit(__IAVF_IN_REMOVE_TASK,
+ &adapter->crit_section)) {
+ /* Set state to __IAVF_INIT_FAILED and perform remove
+ * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
+ * doesn't bring the state back to __IAVF_COMM_FAILED.
+ */
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
+ mutex_unlock(&adapter->crit_lock);
+ return;
+ }
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
@@ -2507,7 +2696,8 @@ static void iavf_watchdog_task(struct work_struct *work)
schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
mutex_unlock(&adapter->crit_lock);
restart_watchdog:
- queue_work(iavf_wq, &adapter->adminq_task);
+ if (adapter->state >= __IAVF_DOWN)
+ queue_work(iavf_wq, &adapter->adminq_task);
if (adapter->aq_required)
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(20));
@@ -2515,6 +2705,13 @@ restart_watchdog:
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
}
+/**
+ * iavf_disable_vf - disable VF
+ * @adapter: board private structure
+ *
+ * Set communication failed flag and free all resources.
+ * NOTE: This function is expected to be called with crit_lock being held.
+ **/
static void iavf_disable_vf(struct iavf_adapter *adapter)
{
struct iavf_mac_filter *f, *ftmp;
@@ -2569,7 +2766,6 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
iavf_shutdown_adminq(&adapter->hw);
adapter->netdev->flags &= ~IFF_UP;
- mutex_unlock(&adapter->crit_lock);
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
@@ -2594,6 +2790,7 @@ static void iavf_reset_task(struct work_struct *work)
struct iavf_hw *hw = &adapter->hw;
struct iavf_mac_filter *f, *ftmp;
struct iavf_cloud_filter *cf;
+ enum iavf_status status;
u32 reg_val;
int i = 0, err;
bool running;
@@ -2601,13 +2798,13 @@ static void iavf_reset_task(struct work_struct *work)
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
- if (mutex_is_locked(&adapter->remove_lock))
- return;
+ if (!mutex_trylock(&adapter->crit_lock)) {
+ if (adapter->state != __IAVF_REMOVE)
+ queue_work(iavf_wq, &adapter->reset_task);
- if (iavf_lock_timeout(&adapter->crit_lock, 200)) {
- schedule_work(&adapter->reset_task);
return;
}
+
while (!mutex_trylock(&adapter->client_lock))
usleep_range(500, 1000);
if (CLIENT_ENABLED(adapter)) {
@@ -2662,6 +2859,7 @@ static void iavf_reset_task(struct work_struct *work)
reg_val);
iavf_disable_vf(adapter);
mutex_unlock(&adapter->client_lock);
+ mutex_unlock(&adapter->crit_lock);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -2670,8 +2868,7 @@ continue_reset:
* ndo_open() returning, so we can't assume it means all our open
* tasks have finished, since we're not holding the rtnl_lock here.
*/
- running = ((adapter->state == __IAVF_RUNNING) ||
- (adapter->state == __IAVF_RESETTING));
+ running = adapter->state == __IAVF_RUNNING;
if (running) {
netdev->flags &= ~IFF_UP;
@@ -2695,13 +2892,16 @@ continue_reset:
/* kill and reinit the admin queue */
iavf_shutdown_adminq(hw);
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- err = iavf_init_adminq(hw);
- if (err)
+ status = iavf_init_adminq(hw);
+ if (status) {
dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
- err);
+ status);
+ goto reset_err;
+ }
adapter->aq_required = 0;
- if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
+ if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
+ (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
err = iavf_reinit_interrupt_scheme(adapter);
if (err)
goto reset_err;
@@ -2773,12 +2973,13 @@ continue_reset:
if (err)
goto reset_err;
- if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
+ if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
+ (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
err = iavf_request_traffic_irqs(adapter, netdev->name);
if (err)
goto reset_err;
- adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
}
iavf_configure(adapter);
@@ -2793,6 +2994,9 @@ continue_reset:
iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
}
+
+ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
@@ -2826,13 +3030,19 @@ static void iavf_adminq_task(struct work_struct *work)
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
goto out;
+ if (!mutex_trylock(&adapter->crit_lock)) {
+ if (adapter->state == __IAVF_REMOVE)
+ return;
+
+ queue_work(iavf_wq, &adapter->adminq_task);
+ goto out;
+ }
+
event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf)
goto out;
- if (iavf_lock_timeout(&adapter->crit_lock, 200))
- goto freedom;
do {
ret = iavf_clean_arq_element(hw, &event, &pending);
v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
@@ -2848,6 +3058,24 @@ static void iavf_adminq_task(struct work_struct *work)
} while (pending);
mutex_unlock(&adapter->crit_lock);
+ if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
+ if (adapter->netdev_registered ||
+ !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
+ struct net_device *netdev = adapter->netdev;
+
+ rtnl_lock();
+ netdev_update_features(netdev);
+ rtnl_unlock();
+ /* Request VLAN offload settings */
+ if (VLAN_V2_ALLOWED(adapter))
+ iavf_set_vlan_offload_features
+ (adapter, 0, netdev->features);
+
+ iavf_set_queue_vlan_tag_loc(adapter);
+ }
+
+ adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
+ }
if ((adapter->flags &
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
adapter->state == __IAVF_RESETTING)
@@ -3800,11 +4028,12 @@ static int iavf_close(struct net_device *netdev)
struct iavf_adapter *adapter = netdev_priv(netdev);
int status;
- if (adapter->state <= __IAVF_DOWN_PENDING)
- return 0;
+ mutex_lock(&adapter->crit_lock);
- while (!mutex_trylock(&adapter->crit_lock))
- usleep_range(500, 1000);
+ if (adapter->state <= __IAVF_DOWN_PENDING) {
+ mutex_unlock(&adapter->crit_lock);
+ return 0;
+ }
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter))
@@ -3853,8 +4082,11 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
iavf_notify_client_l2_params(&adapter->vsi);
adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
}
- adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- queue_work(iavf_wq, &adapter->reset_task);
+
+ if (netif_running(netdev)) {
+ adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+ queue_work(iavf_wq, &adapter->reset_task);
+ }
return 0;
}
@@ -4368,12 +4600,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
}
err = pci_request_regions(pdev, iavf_driver_name);
@@ -4431,7 +4660,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
mutex_init(&adapter->crit_lock);
mutex_init(&adapter->client_lock);
- mutex_init(&adapter->remove_lock);
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
@@ -4547,7 +4775,6 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
static void iavf_remove(struct pci_dev *pdev)
{
struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
- enum iavf_state_t prev_state = adapter->last_state;
struct net_device *netdev = adapter->netdev;
struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
@@ -4556,14 +4783,37 @@ static void iavf_remove(struct pci_dev *pdev)
struct iavf_cloud_filter *cf, *cftmp;
struct iavf_hw *hw = &adapter->hw;
int err;
- /* Indicate we are in remove and not to run reset_task */
- mutex_lock(&adapter->remove_lock);
- cancel_work_sync(&adapter->reset_task);
+
+ /* When reboot/shutdown is in progress no need to do anything
+ * as the adapter is already REMOVE state that was set during
+ * iavf_shutdown() callback.
+ */
+ if (adapter->state == __IAVF_REMOVE)
+ return;
+
+ set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
+ /* Wait until port initialization is complete.
+ * There are flows where register/unregister netdev may race.
+ */
+ while (1) {
+ mutex_lock(&adapter->crit_lock);
+ if (adapter->state == __IAVF_RUNNING ||
+ adapter->state == __IAVF_DOWN ||
+ adapter->state == __IAVF_INIT_FAILED) {
+ mutex_unlock(&adapter->crit_lock);
+ break;
+ }
+
+ mutex_unlock(&adapter->crit_lock);
+ usleep_range(500, 1000);
+ }
cancel_delayed_work_sync(&adapter->watchdog_task);
- cancel_delayed_work_sync(&adapter->client_task);
+
if (adapter->netdev_registered) {
- unregister_netdev(netdev);
+ rtnl_lock();
+ unregister_netdevice(netdev);
adapter->netdev_registered = false;
+ rtnl_unlock();
}
if (CLIENT_ALLOWED(adapter)) {
err = iavf_lan_del_device(adapter);
@@ -4572,6 +4822,10 @@ static void iavf_remove(struct pci_dev *pdev)
err);
}
+ mutex_lock(&adapter->crit_lock);
+ dev_info(&adapter->pdev->dev, "Remove device\n");
+ iavf_change_state(adapter, __IAVF_REMOVE);
+
iavf_request_reset(adapter);
msleep(50);
/* If the FW isn't responding, kick it once, but only once. */
@@ -4579,37 +4833,24 @@ static void iavf_remove(struct pci_dev *pdev)
iavf_request_reset(adapter);
msleep(50);
}
- if (iavf_lock_timeout(&adapter->crit_lock, 5000))
- dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
- dev_info(&adapter->pdev->dev, "Removing device\n");
+ iavf_misc_irq_disable(adapter);
/* Shut down all the garbage mashers on the detention level */
- iavf_change_state(adapter, __IAVF_REMOVE);
+ cancel_work_sync(&adapter->reset_task);
+ cancel_delayed_work_sync(&adapter->watchdog_task);
+ cancel_work_sync(&adapter->adminq_task);
+ cancel_delayed_work_sync(&adapter->client_task);
+
adapter->aq_required = 0;
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
iavf_free_all_tx_resources(adapter);
iavf_free_all_rx_resources(adapter);
- iavf_misc_irq_disable(adapter);
iavf_free_misc_irq(adapter);
- /* In case we enter iavf_remove from erroneous state, free traffic irqs
- * here, so as to not cause a kernel crash, when calling
- * iavf_reset_interrupt_capability.
- */
- if ((adapter->last_state == __IAVF_RESETTING &&
- prev_state != __IAVF_DOWN) ||
- (adapter->last_state == __IAVF_RUNNING &&
- !(netdev->flags & IFF_UP)))
- iavf_free_traffic_irqs(adapter);
-
iavf_reset_interrupt_capability(adapter);
iavf_free_q_vectors(adapter);
- cancel_delayed_work_sync(&adapter->watchdog_task);
-
- cancel_work_sync(&adapter->adminq_task);
-
iavf_free_rss(adapter);
if (hw->aq.asq.count)
@@ -4621,8 +4862,6 @@ static void iavf_remove(struct pci_dev *pdev)
mutex_destroy(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
mutex_destroy(&adapter->crit_lock);
- mutex_unlock(&adapter->remove_lock);
- mutex_destroy(&adapter->remove_lock);
iounmap(hw->hw_addr);
pci_release_regions(pdev);
@@ -4692,8 +4931,6 @@ static struct pci_driver iavf_driver = {
**/
static int __init iavf_init_module(void)
{
- int ret;
-
pr_info("iavf: %s\n", iavf_driver_string);
pr_info("%s\n", iavf_copyright);
@@ -4704,8 +4941,7 @@ static int __init iavf_init_module(void)
pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
return -ENOMEM;
}
- ret = pci_register_driver(&iavf_driver);
- return ret;
+ return pci_register_driver(&iavf_driver);
}
module_init(iavf_init_module);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_status.h b/drivers/net/ethernet/intel/iavf/iavf_status.h
index 46e3d1f6b604..2ea5c7c339bc 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_status.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_status.h
@@ -18,7 +18,7 @@ enum iavf_status {
IAVF_ERR_ADAPTER_STOPPED = -9,
IAVF_ERR_INVALID_MAC_ADDR = -10,
IAVF_ERR_DEVICE_NOT_SUPPORTED = -11,
- IAVF_ERR_MASTER_REQUESTS_PENDING = -12,
+ IAVF_ERR_PRIMARY_REQUESTS_PENDING = -12,
IAVF_ERR_INVALID_LINK_SETTINGS = -13,
IAVF_ERR_AUTONEG_NOT_COMPLETE = -14,
IAVF_ERR_RESET_FAILED = -15,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 8cbe7ad1347c..978f651c6b09 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -374,29 +374,60 @@ static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
return &q_vector->rx == rc;
}
-static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)
+#define IAVF_AIM_MULTIPLIER_100G 2560
+#define IAVF_AIM_MULTIPLIER_50G 1280
+#define IAVF_AIM_MULTIPLIER_40G 1024
+#define IAVF_AIM_MULTIPLIER_20G 512
+#define IAVF_AIM_MULTIPLIER_10G 256
+#define IAVF_AIM_MULTIPLIER_1G 32
+
+static unsigned int iavf_mbps_itr_multiplier(u32 speed_mbps)
{
- unsigned int divisor;
+ switch (speed_mbps) {
+ case SPEED_100000:
+ return IAVF_AIM_MULTIPLIER_100G;
+ case SPEED_50000:
+ return IAVF_AIM_MULTIPLIER_50G;
+ case SPEED_40000:
+ return IAVF_AIM_MULTIPLIER_40G;
+ case SPEED_25000:
+ case SPEED_20000:
+ return IAVF_AIM_MULTIPLIER_20G;
+ case SPEED_10000:
+ default:
+ return IAVF_AIM_MULTIPLIER_10G;
+ case SPEED_1000:
+ case SPEED_100:
+ return IAVF_AIM_MULTIPLIER_1G;
+ }
+}
- switch (q_vector->adapter->link_speed) {
+static unsigned int
+iavf_virtchnl_itr_multiplier(enum virtchnl_link_speed speed_virtchnl)
+{
+ switch (speed_virtchnl) {
case VIRTCHNL_LINK_SPEED_40GB:
- divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;
- break;
+ return IAVF_AIM_MULTIPLIER_40G;
case VIRTCHNL_LINK_SPEED_25GB:
case VIRTCHNL_LINK_SPEED_20GB:
- divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;
- break;
- default:
+ return IAVF_AIM_MULTIPLIER_20G;
case VIRTCHNL_LINK_SPEED_10GB:
- divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;
- break;
+ default:
+ return IAVF_AIM_MULTIPLIER_10G;
case VIRTCHNL_LINK_SPEED_1GB:
case VIRTCHNL_LINK_SPEED_100MB:
- divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;
- break;
+ return IAVF_AIM_MULTIPLIER_1G;
}
+}
- return divisor;
+static unsigned int iavf_itr_divisor(struct iavf_adapter *adapter)
+{
+ if (ADV_LINK_SUPPORT(adapter))
+ return IAVF_ITR_ADAPTIVE_MIN_INC *
+ iavf_mbps_itr_multiplier(adapter->link_speed_mbps);
+ else
+ return IAVF_ITR_ADAPTIVE_MIN_INC *
+ iavf_virtchnl_itr_multiplier(adapter->link_speed);
}
/**
@@ -586,8 +617,9 @@ adjust_by_size:
* Use addition as we have already recorded the new latency flag
* for the ITR value.
*/
- itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) *
- IAVF_ITR_ADAPTIVE_MIN_INC;
+ itr += DIV_ROUND_UP(avg_wire_size,
+ iavf_itr_divisor(q_vector->adapter)) *
+ IAVF_ITR_ADAPTIVE_MIN_INC;
if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
itr &= IAVF_ITR_ADAPTIVE_LATENCY;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 5ee1d118fd30..782450d5c12f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -22,17 +22,17 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter,
enum virtchnl_ops op, u8 *msg, u16 len)
{
struct iavf_hw *hw = &adapter->hw;
- enum iavf_status err;
+ enum iavf_status status;
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
return 0; /* nothing to see here, move along */
- err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
- if (err)
- dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
- op, iavf_stat_str(hw, err),
+ status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
+ if (status)
+ dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n",
+ op, iavf_stat_str(hw, status),
iavf_aq_str(hw, hw->aq.asq_last_status));
- return err;
+ return iavf_status_to_errno(status);
}
/**
@@ -55,6 +55,41 @@ int iavf_send_api_ver(struct iavf_adapter *adapter)
}
/**
+ * iavf_poll_virtchnl_msg
+ * @hw: HW configuration structure
+ * @event: event to populate on success
+ * @op_to_poll: requested virtchnl op to poll for
+ *
+ * Initialize poll for virtchnl msg matching the requested_op. Returns 0
+ * if a message of the correct opcode is in the queue or an error code
+ * if no message matching the op code is waiting and other failures.
+ */
+static int
+iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event,
+ enum virtchnl_ops op_to_poll)
+{
+ enum virtchnl_ops received_op;
+ enum iavf_status status;
+ u32 v_retval;
+
+ while (1) {
+ /* When the AQ is empty, iavf_clean_arq_element will return
+ * nonzero and this loop will terminate.
+ */
+ status = iavf_clean_arq_element(hw, event, NULL);
+ if (status != IAVF_SUCCESS)
+ return iavf_status_to_errno(status);
+ received_op =
+ (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
+ if (op_to_poll == received_op)
+ break;
+ }
+
+ v_retval = le32_to_cpu(event->desc.cookie_low);
+ return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval);
+}
+
+/**
* iavf_verify_api_ver
* @adapter: adapter structure
*
@@ -65,55 +100,28 @@ int iavf_send_api_ver(struct iavf_adapter *adapter)
**/
int iavf_verify_api_ver(struct iavf_adapter *adapter)
{
- struct virtchnl_version_info *pf_vvi;
- struct iavf_hw *hw = &adapter->hw;
struct iavf_arq_event_info event;
- enum virtchnl_ops op;
- enum iavf_status err;
+ int err;
event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
- event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
- if (!event.msg_buf) {
- err = -ENOMEM;
- goto out;
- }
-
- while (1) {
- err = iavf_clean_arq_element(hw, &event, NULL);
- /* When the AQ is empty, iavf_clean_arq_element will return
- * nonzero and this loop will terminate.
- */
- if (err)
- goto out_alloc;
- op =
- (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- if (op == VIRTCHNL_OP_VERSION)
- break;
- }
+ event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL);
+ if (!event.msg_buf)
+ return -ENOMEM;
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION);
+ if (!err) {
+ struct virtchnl_version_info *pf_vvi =
+ (struct virtchnl_version_info *)event.msg_buf;
+ adapter->pf_version = *pf_vvi;
- err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
- if (err)
- goto out_alloc;
-
- if (op != VIRTCHNL_OP_VERSION) {
- dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
- op);
- err = -EIO;
- goto out_alloc;
+ if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR ||
+ (pf_vvi->major == VIRTCHNL_VERSION_MAJOR &&
+ pf_vvi->minor > VIRTCHNL_VERSION_MINOR))
+ err = -EIO;
}
- pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
- adapter->pf_version = *pf_vvi;
-
- if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
- ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
- (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
- err = -EIO;
-
-out_alloc:
kfree(event.msg_buf);
-out:
+
return err;
}
@@ -208,33 +216,17 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
struct iavf_arq_event_info event;
- enum virtchnl_ops op;
- enum iavf_status err;
u16 len;
+ int err;
- len = sizeof(struct virtchnl_vf_resource) +
+ len = sizeof(struct virtchnl_vf_resource) +
IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
event.buf_len = len;
- event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
- if (!event.msg_buf) {
- err = -ENOMEM;
- goto out;
- }
+ event.msg_buf = kzalloc(len, GFP_KERNEL);
+ if (!event.msg_buf)
+ return -ENOMEM;
- while (1) {
- /* When the AQ is empty, iavf_clean_arq_element will return
- * nonzero and this loop will terminate.
- */
- err = iavf_clean_arq_element(hw, &event, NULL);
- if (err)
- goto out_alloc;
- op =
- (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
- break;
- }
-
- err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
+ err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES);
memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
/* some PFs send more queues than we should have so validate that
@@ -243,48 +235,32 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)
if (!err)
iavf_validate_num_queues(adapter);
iavf_vf_parse_hw_config(hw, adapter->vf_res);
-out_alloc:
+
kfree(event.msg_buf);
-out:
+
return err;
}
int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
{
- struct iavf_hw *hw = &adapter->hw;
struct iavf_arq_event_info event;
- enum virtchnl_ops op;
- enum iavf_status err;
+ int err;
u16 len;
- len = sizeof(struct virtchnl_vlan_caps);
+ len = sizeof(struct virtchnl_vlan_caps);
event.buf_len = len;
- event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
- if (!event.msg_buf) {
- err = -ENOMEM;
- goto out;
- }
-
- while (1) {
- /* When the AQ is empty, iavf_clean_arq_element will return
- * nonzero and this loop will terminate.
- */
- err = iavf_clean_arq_element(hw, &event, NULL);
- if (err)
- goto out_alloc;
- op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- if (op == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
- break;
- }
+ event.msg_buf = kzalloc(len, GFP_KERNEL);
+ if (!event.msg_buf)
+ return -ENOMEM;
- err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
- if (err)
- goto out_alloc;
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
+ VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS);
+ if (!err)
+ memcpy(&adapter->vlan_v2_caps, event.msg_buf,
+ min(event.msg_len, len));
- memcpy(&adapter->vlan_v2_caps, event.msg_buf, min(event.msg_len, len));
-out_alloc:
kfree(event.msg_buf);
-out:
+
return err;
}
@@ -454,6 +430,20 @@ void iavf_map_queues(struct iavf_adapter *adapter)
}
/**
+ * iavf_set_mac_addr_type - Set the correct request type from the filter type
+ * @virtchnl_ether_addr: pointer to requested list element
+ * @filter: pointer to requested filter
+ **/
+static void
+iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr,
+ const struct iavf_mac_filter *filter)
+{
+ virtchnl_ether_addr->type = filter->is_primary ?
+ VIRTCHNL_ETHER_ADDR_PRIMARY :
+ VIRTCHNL_ETHER_ADDR_EXTRA;
+}
+
+/**
* iavf_add_ether_addrs
* @adapter: adapter structure
*
@@ -508,6 +498,7 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter)
list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->add) {
ether_addr_copy(veal->list[i].addr, f->macaddr);
+ iavf_set_mac_addr_type(&veal->list[i], f);
i++;
f->add = false;
if (i == count)
@@ -577,6 +568,7 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
if (f->remove) {
ether_addr_copy(veal->list[i].addr, f->macaddr);
+ iavf_set_mac_addr_type(&veal->list[i], f);
i++;
list_del(&f->list);
kfree(f);
@@ -1827,11 +1819,29 @@ void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
*
* Request that the PF reset this VF. No response is expected.
**/
-void iavf_request_reset(struct iavf_adapter *adapter)
+int iavf_request_reset(struct iavf_adapter *adapter)
{
+ int err;
/* Don't check CURRENT_OP - this is always higher priority */
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
+ err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ return err;
+}
+
+/**
+ * iavf_netdev_features_vlan_strip_set - update vlan strip status
+ * @netdev: ptr to netdev being adjusted
+ * @enable: enable or disable vlan strip
+ *
+ * Helper function to change vlan strip status in netdev->features.
+ */
+static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
+ const bool enable)
+{
+ if (enable)
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+ else
+ netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
/**
@@ -2057,8 +2067,18 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
+ /* Vlan stripping could not be enabled by ethtool.
+ * Disable it in netdev->features.
+ */
+ iavf_netdev_features_vlan_strip_set(netdev, false);
+ break;
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
+ /* Vlan stripping could not be disabled by ethtool.
+ * Enable it in netdev->features.
+ */
+ iavf_netdev_features_vlan_strip_set(netdev, true);
break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
@@ -2146,29 +2166,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
sizeof(adapter->vlan_v2_caps)));
iavf_process_config(adapter);
-
- /* unlock crit_lock before acquiring rtnl_lock as other
- * processes holding rtnl_lock could be waiting for the same
- * crit_lock
- */
- mutex_unlock(&adapter->crit_lock);
- /* VLAN capabilities can change during VFR, so make sure to
- * update the netdev features with the new capabilities
- */
- rtnl_lock();
- netdev_update_features(netdev);
- rtnl_unlock();
- if (iavf_lock_timeout(&adapter->crit_lock, 10000))
- dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n",
- __FUNCTION__);
-
- /* Request VLAN offload settings */
- if (VLAN_V2_ALLOWED(adapter))
- iavf_set_vlan_offload_features(adapter, 0,
- netdev->features);
-
- iavf_set_queue_vlan_tag_loc(adapter);
-
+ adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
@@ -2334,6 +2332,20 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_unlock_bh(&adapter->adv_rss_lock);
}
break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ /* PF enabled vlan strip on this VF.
+ * Update netdev->features if needed to be in sync with ethtool.
+ */
+ if (!v_retval)
+ iavf_netdev_features_vlan_strip_set(netdev, true);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ /* PF disabled vlan strip on this VF.
+ * Update netdev->features if needed to be in sync with ethtool.
+ */
+ if (!v_retval)
+ iavf_netdev_features_vlan_strip_set(netdev, false);
+ break;
default:
if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index c36faa7d1471..9183d480b70b 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -18,8 +18,12 @@ ice-y := ice_main.o \
ice_txrx_lib.o \
ice_txrx.o \
ice_fltr.o \
+ ice_pf_vsi_vlan_ops.o \
+ ice_vsi_vlan_ops.o \
+ ice_vsi_vlan_lib.o \
ice_fdir.o \
ice_ethtool_fdir.o \
+ ice_vlan_mode.o \
ice_flex_pipe.o \
ice_flow.o \
ice_idc.o \
@@ -29,9 +33,16 @@ ice-y := ice_main.o \
ice_ethtool.o \
ice_repr.o \
ice_tc_lib.o
-ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o
-ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o
+ice-$(CONFIG_PCI_IOV) += \
+ ice_sriov.o \
+ ice_virtchnl.o \
+ ice_virtchnl_allowlist.o \
+ ice_virtchnl_fdir.o \
+ ice_vf_mbx.o \
+ ice_vf_vsi_vlan_ops.o \
+ ice_vf_lib.o
ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
+ice-$(CONFIG_TTY) += ice_gnss.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 473b1f6be9de..b0b27bfcd7a2 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -51,9 +51,7 @@
#include <net/gre.h>
#include <net/udp_tunnel.h>
#include <net/vxlan.h>
-#if IS_ENABLED(CONFIG_DCB)
-#include <scsi/iscsi_proto.h>
-#endif /* CONFIG_DCB */
+#include <net/gtp.h>
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
@@ -63,8 +61,8 @@
#include "ice_flow.h"
#include "ice_sched.h"
#include "ice_idc_int.h"
-#include "ice_virtchnl_pf.h"
#include "ice_sriov.h"
+#include "ice_vf_mbx.h"
#include "ice_ptp.h"
#include "ice_fdir.h"
#include "ice_xsk.h"
@@ -72,6 +70,8 @@
#include "ice_repr.h"
#include "ice_eswitch.h"
#include "ice_lag.h"
+#include "ice_vsi_vlan_ops.h"
+#include "ice_gnss.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -107,7 +107,6 @@
/* All VF control VSIs share the same IRQ, so assign a unique ID for them */
#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1)
#define ICE_INVAL_Q_INDEX 0xffff
-#define ICE_INVAL_VFID 256
#define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */
@@ -183,6 +182,7 @@
enum ice_feature {
ICE_F_DSCP,
ICE_F_SMA_CTRL,
+ ICE_F_GNSS,
ICE_F_MAX
};
@@ -290,6 +290,7 @@ enum ice_pf_state {
ICE_LINK_DEFAULT_OVERRIDE_PENDING,
ICE_PHY_INIT_COMPLETE,
ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */
+ ICE_AUX_ERR_PENDING,
ICE_STATE_NBITS /* must be last */
};
@@ -330,7 +331,7 @@ struct ice_vsi {
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
- s16 vf_id; /* VF ID for SR-IOV VSIs */
+ struct ice_vf *vf; /* VF associated with this VSI */
u16 ethtype; /* Ethernet protocol for pause frame */
u16 num_gfltr;
@@ -367,6 +368,8 @@ struct ice_vsi {
u8 irqs_ready:1;
u8 current_isup:1; /* Sync 'link up' logging */
u8 stat_offsets_loaded:1;
+ struct ice_vsi_vlan_ops inner_vlan_ops;
+ struct ice_vsi_vlan_ops outer_vlan_ops;
u16 num_vlan;
/* queue information */
@@ -467,7 +470,6 @@ enum ice_pf_flags {
ICE_FLAG_FD_ENA,
ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */
ICE_FLAG_PTP, /* PTP is enabled by software */
- ICE_FLAG_AUX_ENA,
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */
ICE_FLAG_CLS_FLOWER,
@@ -481,8 +483,11 @@ enum ice_pf_flags {
ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
+ ICE_FLAG_VF_VLAN_PRUNING,
ICE_FLAG_LINK_LENIENT_MODE_ENA,
ICE_FLAG_PLUG_AUX_DEV,
+ ICE_FLAG_MTU_CHANGED,
+ ICE_FLAG_GNSS, /* GNSS successfully initialized */
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -523,15 +528,7 @@ struct ice_pf {
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */
- /* Virtchnl/SR-IOV config info */
- struct ice_vf *vf;
- u16 num_alloc_vfs; /* actual number of VFs allocated */
- u16 num_vfs_supported; /* num VFs supported for this PF */
- u16 num_qps_per_vf;
- u16 num_msix_per_vf;
- /* used to ratelimit the MDD event logging */
- unsigned long last_printed_mdd_jiffies;
- DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT);
+ struct ice_vfs vfs;
DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
@@ -546,6 +543,9 @@ struct ice_pf {
struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable;
struct ice_ptp ptp;
+ struct tty_driver *ice_gnss_tty_driver;
+ struct tty_port gnss_tty_port;
+ struct gnss_serial *gnss_serial;
u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
u16 rdma_base_vector;
@@ -558,6 +558,7 @@ struct ice_pf {
wait_queue_head_t reset_wait_queue;
u32 hw_csum_rx_error;
+ u32 oicr_err_reg;
u16 oicr_idx; /* Other interrupt cause MSIX vector index */
u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
u16 max_pf_txqs; /* Total Tx queues PF wide */
@@ -833,6 +834,9 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf);
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx);
void ice_update_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_pf *pf);
+void
+ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
+ struct ice_q_stats stats, u64 *pkts, u64 *bytes);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
@@ -886,7 +890,6 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
{
if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- set_bit(ICE_FLAG_AUX_ENA, pf->flags);
set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
}
}
@@ -897,8 +900,16 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
*/
static inline void ice_clear_rdma_cap(struct ice_pf *pf)
{
- ice_unplug_aux_dev(pf);
+ /* We can directly unplug aux device here only if the flag bit
+ * ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev()
+ * could race with ice_plug_aux_dev() called from
+ * ice_service_task(). In this case we only clear that bit now and
+ * aux device will be unplugged later once ice_plug_aux_device()
+ * called from ice_service_task() finishes (see ice_service_task()).
+ */
+ if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+ ice_unplug_aux_dev(pf);
+
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index ad1dcfa5ff65..b25e27c4d887 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -226,6 +226,15 @@ struct ice_aqc_get_sw_cfg_resp_elem {
#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15)
};
+/* Set Port parameters, (direct, 0x0203) */
+struct ice_aqc_set_port_params {
+ __le16 cmd_flags;
+#define ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA BIT(2)
+ __le16 bad_frame_vsi;
+ __le16 swid;
+ u8 reserved[10];
+};
+
/* These resource type defines are used for all switch resource
* commands where a resource type is required, such as:
* Get Resource Allocation command (indirect 0x0204)
@@ -283,6 +292,40 @@ struct ice_aqc_alloc_free_res_elem {
struct ice_aqc_res_elem elem[];
};
+/* Request buffer for Set VLAN Mode AQ command (indirect 0x020C) */
+struct ice_aqc_set_vlan_mode {
+ u8 reserved;
+ u8 l2tag_prio_tagging;
+#define ICE_AQ_VLAN_PRIO_TAG_S 0
+#define ICE_AQ_VLAN_PRIO_TAG_M (0x7 << ICE_AQ_VLAN_PRIO_TAG_S)
+#define ICE_AQ_VLAN_PRIO_TAG_NOT_SUPPORTED 0x0
+#define ICE_AQ_VLAN_PRIO_TAG_STAG 0x1
+#define ICE_AQ_VLAN_PRIO_TAG_OUTER_CTAG 0x2
+#define ICE_AQ_VLAN_PRIO_TAG_OUTER_VLAN 0x3
+#define ICE_AQ_VLAN_PRIO_TAG_INNER_CTAG 0x4
+#define ICE_AQ_VLAN_PRIO_TAG_MAX 0x4
+#define ICE_AQ_VLAN_PRIO_TAG_ERROR 0x7
+ u8 l2tag_reserved[64];
+ u8 rdma_packet;
+#define ICE_AQ_VLAN_RDMA_TAG_S 0
+#define ICE_AQ_VLAN_RDMA_TAG_M (0x3F << ICE_AQ_VLAN_RDMA_TAG_S)
+#define ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING 0x10
+#define ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING 0x1A
+ u8 rdma_reserved[2];
+ u8 mng_vlan_prot_id;
+#define ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER 0x10
+#define ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER 0x11
+ u8 prot_id_reserved[30];
+};
+
+/* Response buffer for Get VLAN Mode AQ command (indirect 0x020D) */
+struct ice_aqc_get_vlan_mode {
+ u8 vlan_mode;
+#define ICE_AQ_VLAN_MODE_DVM_ENA BIT(0)
+ u8 l2tag_prio_tagging;
+ u8 reserved[98];
+};
+
/* Add VSI (indirect 0x0210)
* Update VSI (indirect 0x0211)
* Get VSI (indirect 0x0212)
@@ -343,108 +386,113 @@ struct ice_aqc_vsi_props {
#define ICE_AQ_VSI_SW_FLAG_SRC_PRUNE BIT(7)
u8 sw_flags2;
#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S 0
-#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M \
- (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S)
+#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S)
#define ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA BIT(0)
#define ICE_AQ_VSI_SW_FLAG_LAN_ENA BIT(4)
u8 veb_stat_id;
#define ICE_AQ_VSI_SW_VEB_STAT_ID_S 0
-#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S)
+#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S)
#define ICE_AQ_VSI_SW_VEB_STAT_ID_VALID BIT(5)
/* security section */
u8 sec_flags;
#define ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD BIT(0)
#define ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF BIT(2)
-#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4
-#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)
+#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4
+#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)
#define ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA BIT(0)
u8 sec_reserved;
/* VLAN section */
- __le16 pvid; /* VLANS include priority bits */
- u8 pvlan_reserved[2];
- u8 vlan_flags;
-#define ICE_AQ_VSI_VLAN_MODE_S 0
-#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S)
-#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1
-#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2
-#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3
-#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2)
-#define ICE_AQ_VSI_VLAN_EMOD_S 3
-#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
-#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S)
-#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S)
-#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S)
-#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
- u8 pvlan_reserved2[3];
+ __le16 port_based_inner_vlan; /* VLANS include priority bits */
+ u8 inner_vlan_reserved[2];
+ u8 inner_vlan_flags;
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_S 0
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_TX_MODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED 0x1
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED 0x2
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL 0x3
+#define ICE_AQ_VSI_INNER_VLAN_INSERT_PVID BIT(2)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_S 3
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH (0x0 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP (0x1 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR (0x2 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+ u8 inner_vlan_reserved2[3];
/* ingress egress up sections */
__le32 ingress_table; /* bitmap, 3 bits per up */
-#define ICE_AQ_VSI_UP_TABLE_UP0_S 0
-#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S)
-#define ICE_AQ_VSI_UP_TABLE_UP1_S 3
-#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S)
-#define ICE_AQ_VSI_UP_TABLE_UP2_S 6
-#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S)
-#define ICE_AQ_VSI_UP_TABLE_UP3_S 9
-#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S)
-#define ICE_AQ_VSI_UP_TABLE_UP4_S 12
-#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S)
-#define ICE_AQ_VSI_UP_TABLE_UP5_S 15
-#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S)
-#define ICE_AQ_VSI_UP_TABLE_UP6_S 18
-#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S)
-#define ICE_AQ_VSI_UP_TABLE_UP7_S 21
-#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S)
+#define ICE_AQ_VSI_UP_TABLE_UP0_S 0
+#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S)
+#define ICE_AQ_VSI_UP_TABLE_UP1_S 3
+#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S)
+#define ICE_AQ_VSI_UP_TABLE_UP2_S 6
+#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S)
+#define ICE_AQ_VSI_UP_TABLE_UP3_S 9
+#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S)
+#define ICE_AQ_VSI_UP_TABLE_UP4_S 12
+#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S)
+#define ICE_AQ_VSI_UP_TABLE_UP5_S 15
+#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S)
+#define ICE_AQ_VSI_UP_TABLE_UP6_S 18
+#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S)
+#define ICE_AQ_VSI_UP_TABLE_UP7_S 21
+#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S)
__le32 egress_table; /* same defines as for ingress table */
/* outer tags section */
- __le16 outer_tag;
- u8 outer_tag_flags;
-#define ICE_AQ_VSI_OUTER_TAG_MODE_S 0
-#define ICE_AQ_VSI_OUTER_TAG_MODE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_MODE_S)
-#define ICE_AQ_VSI_OUTER_TAG_NOTHING 0x0
-#define ICE_AQ_VSI_OUTER_TAG_REMOVE 0x1
-#define ICE_AQ_VSI_OUTER_TAG_COPY 0x2
-#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2
-#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S)
-#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0
-#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1
-#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2
-#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3
-#define ICE_AQ_VSI_OUTER_TAG_INSERT BIT(4)
-#define ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST BIT(6)
- u8 outer_tag_reserved;
+ __le16 port_based_outer_vlan;
+ u8 outer_vlan_flags;
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_S 0
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_OUTER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH 0x0
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_UP 0x1
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW 0x2
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING 0x3
+#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2
+#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S)
+#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0
+#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1
+#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2
+#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3
+#define ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT BIT(4)
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S 5
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M (0x3 << ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S)
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTUNTAGGED 0x1
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTTAGGED 0x2
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL 0x3
+#define ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC BIT(7)
+ u8 outer_vlan_reserved;
/* queue mapping section */
__le16 mapping_flags;
-#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0
-#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0)
+#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0
+#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0)
__le16 q_mapping[16];
-#define ICE_AQ_VSI_Q_S 0
-#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S)
+#define ICE_AQ_VSI_Q_S 0
+#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S)
__le16 tc_mapping[8];
-#define ICE_AQ_VSI_TC_Q_OFFSET_S 0
-#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S)
-#define ICE_AQ_VSI_TC_Q_NUM_S 11
-#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S)
+#define ICE_AQ_VSI_TC_Q_OFFSET_S 0
+#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S)
+#define ICE_AQ_VSI_TC_Q_NUM_S 11
+#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S)
/* queueing option section */
u8 q_opt_rss;
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3
-#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2
-#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
-#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3
+#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2
+#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
u8 q_opt_tc;
-#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
-#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
-#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7)
+#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
+#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
+#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7)
u8 q_opt_flags;
-#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0)
+#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0)
u8 q_opt_reserved[3];
/* outer up section */
__le32 outer_up_table; /* same structure and defines as ingress tbl */
@@ -452,27 +500,27 @@ struct ice_aqc_vsi_props {
__le16 sect_10_reserved;
/* flow director section */
__le16 fd_options;
-#define ICE_AQ_VSI_FD_ENABLE BIT(0)
-#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1)
-#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3)
+#define ICE_AQ_VSI_FD_ENABLE BIT(0)
+#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1)
+#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3)
__le16 max_fd_fltr_dedicated;
__le16 max_fd_fltr_shared;
__le16 fd_def_q;
-#define ICE_AQ_VSI_FD_DEF_Q_S 0
-#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S)
-#define ICE_AQ_VSI_FD_DEF_GRP_S 12
-#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S)
+#define ICE_AQ_VSI_FD_DEF_Q_S 0
+#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S)
+#define ICE_AQ_VSI_FD_DEF_GRP_S 12
+#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S)
__le16 fd_report_opt;
-#define ICE_AQ_VSI_FD_REPORT_Q_S 0
-#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S)
-#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12
-#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S)
-#define ICE_AQ_VSI_FD_DEF_DROP BIT(15)
+#define ICE_AQ_VSI_FD_REPORT_Q_S 0
+#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S)
+#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12
+#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S)
+#define ICE_AQ_VSI_FD_DEF_DROP BIT(15)
/* PASID section */
__le32 pasid_id;
-#define ICE_AQ_VSI_PASID_ID_S 0
-#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S)
-#define ICE_AQ_VSI_PASID_ID_VALID BIT(31)
+#define ICE_AQ_VSI_PASID_ID_S 0
+#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S)
+#define ICE_AQ_VSI_PASID_ID_VALID BIT(31)
u8 reserved[24];
};
@@ -489,9 +537,13 @@ struct ice_aqc_add_get_recipe {
struct ice_aqc_recipe_content {
u8 rid;
+#define ICE_AQ_RECIPE_ID_S 0
+#define ICE_AQ_RECIPE_ID_M (0x3F << ICE_AQ_RECIPE_ID_S)
#define ICE_AQ_RECIPE_ID_IS_ROOT BIT(7)
#define ICE_AQ_SW_ID_LKUP_IDX 0
u8 lkup_indx[5];
+#define ICE_AQ_RECIPE_LKUP_DATA_S 0
+#define ICE_AQ_RECIPE_LKUP_DATA_M (0x3F << ICE_AQ_RECIPE_LKUP_DATA_S)
#define ICE_AQ_RECIPE_LKUP_IGNORE BIT(7)
#define ICE_AQ_SW_ID_LKUP_MASK 0x00FF
__le16 mask[5];
@@ -502,15 +554,25 @@ struct ice_aqc_recipe_content {
u8 rsvd0[3];
u8 act_ctrl_join_priority;
u8 act_ctrl_fwd_priority;
+#define ICE_AQ_RECIPE_FWD_PRIORITY_S 0
+#define ICE_AQ_RECIPE_FWD_PRIORITY_M (0xF << ICE_AQ_RECIPE_FWD_PRIORITY_S)
u8 act_ctrl;
+#define ICE_AQ_RECIPE_ACT_NEED_PASS_L2 BIT(0)
+#define ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2 BIT(1)
#define ICE_AQ_RECIPE_ACT_INV_ACT BIT(2)
+#define ICE_AQ_RECIPE_ACT_PRUNE_INDX_S 4
+#define ICE_AQ_RECIPE_ACT_PRUNE_INDX_M (0x3 << ICE_AQ_RECIPE_ACT_PRUNE_INDX_S)
u8 rsvd1;
__le32 dflt_act;
+#define ICE_AQ_RECIPE_DFLT_ACT_S 0
+#define ICE_AQ_RECIPE_DFLT_ACT_M (0x7FFFF << ICE_AQ_RECIPE_DFLT_ACT_S)
+#define ICE_AQ_RECIPE_DFLT_ACT_VALID BIT(31)
};
struct ice_aqc_recipe_data_elem {
u8 recipe_indx;
u8 resp_bits;
+#define ICE_AQ_RECIPE_WAS_UPDATED BIT(0)
u8 rsvd0[2];
u8 recipe_bitmap[8];
u8 rsvd1[4];
@@ -1339,6 +1401,24 @@ struct ice_aqc_get_link_topo {
u8 rsvd[9];
};
+/* Read I2C (direct, 0x06E2) */
+struct ice_aqc_i2c {
+ struct ice_aqc_link_topo_addr topo_addr;
+ __le16 i2c_addr;
+ u8 i2c_params;
+#define ICE_AQC_I2C_DATA_SIZE_M GENMASK(3, 0)
+#define ICE_AQC_I2C_USE_REPEATED_START BIT(7)
+
+ u8 rsvd;
+ __le16 i2c_bus_addr;
+ u8 rsvd2[4];
+};
+
+/* Read I2C Response (direct, 0x06E2) */
+struct ice_aqc_read_i2c_resp {
+ u8 i2c_data[16];
+};
+
/* Set Port Identification LED (direct, 0x06E9) */
struct ice_aqc_set_port_id_led {
u8 lport_num;
@@ -1883,7 +1963,7 @@ struct ice_aqc_get_clear_fw_log {
};
/* Download Package (indirect 0x0C40) */
-/* Also used for Update Package (indirect 0x0C42) */
+/* Also used for Update Package (indirect 0x0C41 and 0x0C42) */
struct ice_aqc_download_pkg {
u8 flags;
#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01
@@ -2009,6 +2089,7 @@ struct ice_aq_desc {
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
struct ice_aqc_get_sw_cfg get_sw_conf;
+ struct ice_aqc_set_port_params set_port_params;
struct ice_aqc_sw_rules sw_rules;
struct ice_aqc_add_get_recipe add_get_recipe;
struct ice_aqc_recipe_to_profile recipe_to_profile;
@@ -2049,6 +2130,8 @@ struct ice_aq_desc {
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
struct ice_aqc_get_link_topo get_link_topo;
+ struct ice_aqc_i2c read_i2c;
+ struct ice_aqc_read_i2c_resp read_i2c_resp;
} params;
};
@@ -2110,10 +2193,13 @@ enum ice_adminq_opc {
/* internal switch commands */
ice_aqc_opc_get_sw_cfg = 0x0200,
+ ice_aqc_opc_set_port_params = 0x0203,
/* Alloc/Free/Get Resources */
ice_aqc_opc_alloc_res = 0x0208,
ice_aqc_opc_free_res = 0x0209,
+ ice_aqc_opc_set_vlan_mode_parameters = 0x020C,
+ ice_aqc_opc_get_vlan_mode_parameters = 0x020D,
/* VSI commands */
ice_aqc_opc_add_vsi = 0x0210,
@@ -2160,6 +2246,7 @@ enum ice_adminq_opc {
ice_aqc_opc_set_event_mask = 0x0613,
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_get_link_topo = 0x06E0,
+ ice_aqc_opc_read_i2c = 0x06E2,
ice_aqc_opc_set_port_id_led = 0x06E9,
ice_aqc_opc_set_gpio = 0x06EC,
ice_aqc_opc_get_gpio = 0x06ED,
@@ -2204,6 +2291,7 @@ enum ice_adminq_opc {
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
+ ice_aqc_opc_upload_section = 0x0C41,
ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h
index 80ed76f0cace..9669ad9bf7b5 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.h
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.h
@@ -3,6 +3,9 @@
#ifndef _ICE_ARFS_H_
#define _ICE_ARFS_H_
+
+#include "ice_fdir.h"
+
enum ice_arfs_fltr_state {
ICE_ARFS_INACTIVE,
ICE_ARFS_ACTIVE,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 1a5ece3bce79..136d7911adb4 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -5,6 +5,7 @@
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
+#include "ice_sriov.h"
static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
{
@@ -322,7 +323,7 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
break;
case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF ID */
- tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
+ tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
case ICE_VSI_SWITCHDEV_CTRL:
@@ -418,8 +419,22 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
*/
rlan_ctx.crcstrip = 1;
- /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
- rlan_ctx.l2tsel = 1;
+ /* L2TSEL flag defines the reported L2 Tags in the receive descriptor
+ * and it needs to remain 1 for non-DVM capable configurations to not
+ * break backward compatibility for VF drivers. Setting this field to 0
+ * will cause the single/outer VLAN tag to be stripped to the L2TAG2_2ND
+ * field in the Rx descriptor. Setting it to 1 allows the VLAN tag to
+ * be stripped in L2TAG1 of the Rx descriptor, which is where VFs will
+ * check for the tag
+ */
+ if (ice_is_dvm_ena(hw))
+ if (vsi->type == ICE_VSI_VF &&
+ ice_vf_is_port_vlan_ena(vsi->vf))
+ rlan_ctx.l2tsel = 1;
+ else
+ rlan_ctx.l2tsel = 0;
+ else
+ rlan_ctx.l2tsel = 1;
rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index e2af99a763ed..9619bdb9e49a 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1518,16 +1518,27 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
/* When a package download is in process (i.e. when the firmware's
* Global Configuration Lock resource is held), only the Download
- * Package, Get Version, Get Package Info List and Release Resource
- * (with resource ID set to Global Config Lock) AdminQ commands are
- * allowed; all others must block until the package download completes
- * and the Global Config Lock is released. See also
- * ice_acquire_global_cfg_lock().
+ * Package, Get Version, Get Package Info List, Upload Section,
+ * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters,
+ * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get
+ * Recipes to Profile Association, and Release Resource (with resource
+ * ID set to Global Config Lock) AdminQ commands are allowed; all others
+ * must block until the package download completes and the Global Config
+ * Lock is released. See also ice_acquire_global_cfg_lock().
*/
switch (le16_to_cpu(desc->opcode)) {
case ice_aqc_opc_download_pkg:
case ice_aqc_opc_get_pkg_info_list:
case ice_aqc_opc_get_ver:
+ case ice_aqc_opc_upload_section:
+ case ice_aqc_opc_update_pkg:
+ case ice_aqc_opc_set_port_params:
+ case ice_aqc_opc_get_vlan_mode_parameters:
+ case ice_aqc_opc_set_vlan_mode_parameters:
+ case ice_aqc_opc_add_recipe:
+ case ice_aqc_opc_recipe_to_profile:
+ case ice_aqc_opc_get_recipe:
+ case ice_aqc_opc_get_recipe_to_profile:
break;
case ice_aqc_opc_release_res:
if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
@@ -2737,6 +2748,34 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
}
/**
+ * ice_aq_set_port_params - set physical port parameters.
+ * @pi: pointer to the port info struct
+ * @double_vlan: if set double VLAN is enabled
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set Physical port parameters (0x0203)
+ */
+int
+ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
+ struct ice_sq_cd *cd)
+
+{
+ struct ice_aqc_set_port_params *cmd;
+ struct ice_hw *hw = pi->hw;
+ struct ice_aq_desc desc;
+ u16 cmd_flags = 0;
+
+ cmd = &desc.params.set_port_params;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
+ if (double_vlan)
+ cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
+ cmd->cmd_flags = cpu_to_le16(cmd_flags);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_get_link_speed_based_on_phy_type - returns link speed
* @phy_type_low: lower part of phy_type
* @phy_type_high: higher part of phy_type
@@ -4759,6 +4798,59 @@ ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
}
/**
+ * ice_aq_read_i2c
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [7] - Repeated start,
+ * bits [6:5] data offset size,
+ * bit [4] - I2C address type,
+ * bits [3:0] - data size to read (0-16 bytes)
+ * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
+ * @cd: pointer to command details structure or NULL
+ *
+ * Read I2C (0x06E2)
+ */
+int
+ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc = { 0 };
+ struct ice_aqc_i2c *cmd;
+ u8 data_size;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
+ cmd = &desc.params.read_i2c;
+
+ if (!data)
+ return -EINVAL;
+
+ data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
+
+ cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (!status) {
+ struct ice_aqc_read_i2c_resp *resp;
+ u8 i;
+
+ resp = &desc.params.read_i2c_resp;
+ for (i = 0; i < data_size; i++) {
+ *data = resp->i2c_data[i];
+ data++;
+ }
+ }
+
+ return status;
+}
+
+/**
* ice_aq_set_driver_param - Set driver parameter to share via firmware
* @hw: pointer to the HW struct
* @idx: parameter index to set
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 1c57097ddf0b..872ea7d2332d 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -4,12 +4,14 @@
#ifndef _ICE_COMMON_H_
#define _ICE_COMMON_H_
-#include "ice.h"
+#include <linux/bitfield.h>
+
#include "ice_type.h"
#include "ice_nvm.h"
#include "ice_flex_pipe.h"
-#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
+#include "ice_switch.h"
+#include "ice_fdir.h"
#define ICE_SQ_SEND_DELAY_TIME_MS 10
#define ICE_SQ_SEND_MAX_EXECUTE 3
@@ -85,6 +87,9 @@ int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd);
int
+ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
+ struct ice_sq_cd *cd);
+int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
@@ -205,5 +210,9 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
+int
+ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ struct ice_sq_cd *cd);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index d73348f279f7..6abf28a14291 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -5,6 +5,7 @@
#define _ICE_DCB_H_
#include "ice_type.h"
+#include <scsi/iscsi_proto.h>
#define ICE_DCBX_STATUS_NOT_STARTED 0
#define ICE_DCBX_STATUS_IN_PROGRESS 1
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index b94d8daeaa58..add90e75f05c 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -916,7 +916,8 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
return;
/* Insert 802.1p priority into VLAN header */
- if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN) ||
+ if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN ||
+ first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) ||
skb->priority != TC_PRIO_CONTROL) {
first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
/* Mask the lower 3 bits to set the 802.1p priority */
@@ -925,7 +926,10 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
/* if this is not already set it means a VLAN 0 + priority needs
* to be offloaded
*/
- first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
+ if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
+ first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
+ else
+ first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 73edc24d81d5..9a84d746a6c4 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -116,9 +116,12 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
struct net_device *uplink_netdev = uplink_vsi->netdev;
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_vsi_vlan_ops *vlan_ops;
bool rule_added = false;
- ice_vsi_manage_vlan_stripping(ctrl_vsi, false);
+ vlan_ops = ice_get_compat_vsi_vlan_ops(ctrl_vsi);
+ if (vlan_ops->dis_stripping(ctrl_vsi))
+ return -ENODEV;
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
@@ -127,7 +130,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
__dev_mc_unsync(uplink_netdev, NULL);
netif_addr_unlock_bh(uplink_netdev);
- if (ice_vsi_add_vlan(uplink_vsi, 0, ICE_FWD_TO_VSI))
+ if (ice_vsi_add_vlan_zero(uplink_vsi))
goto err_def_rx;
if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) {
@@ -173,10 +176,20 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
int q_id;
ice_for_each_txq(vsi, q_id) {
- struct ice_repr *repr = pf->vf[q_id].repr;
- struct ice_q_vector *q_vector = repr->q_vector;
- struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
- struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
+ struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+ struct ice_repr *repr;
+ struct ice_vf *vf;
+
+ vf = ice_get_vf_by_id(pf, q_id);
+ if (WARN_ON(!vf))
+ continue;
+
+ repr = vf->repr;
+ q_vector = repr->q_vector;
+ tx_ring = vsi->tx_rings[q_id];
+ rx_ring = vsi->rx_rings[q_id];
q_vector->vsi = vsi;
q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
@@ -196,6 +209,38 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
rx_ring->q_vector = q_vector;
rx_ring->next = NULL;
rx_ring->netdev = repr->netdev;
+
+ ice_put_vf(vf);
+ }
+}
+
+/**
+ * ice_eswitch_release_reprs - clear PR VSIs configuration
+ * @pf: poiner to PF struct
+ * @ctrl_vsi: pointer to switchdev control VSI
+ */
+static void
+ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ ice_for_each_vf(pf, bkt, vf) {
+ struct ice_vsi *vsi = vf->repr->src_vsi;
+
+ /* Skip VFs that aren't configured */
+ if (!vf->repr->dst)
+ continue;
+
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ metadata_dst_free(vf->repr->dst);
+ vf->repr->dst = NULL;
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+
+ netif_napi_del(&vf->repr->q_vector->napi);
}
}
@@ -207,11 +252,13 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
{
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
int max_vsi_num = 0;
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
- ice_for_each_vf(pf, i) {
- struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
- struct ice_vf *vf = &pf->vf[i];
+ ice_for_each_vf(pf, bkt, vf) {
+ struct ice_vsi *vsi = vf->repr->src_vsi;
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
@@ -228,14 +275,16 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst);
+ vf->repr->dst = NULL;
goto err;
}
- if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI)) {
+ if (ice_vsi_add_vlan_zero(vsi)) {
ice_fltr_add_mac_and_broadcast(vsi,
vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst);
+ vf->repr->dst = NULL;
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
goto err;
}
@@ -249,8 +298,8 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
netif_keep_dst(vf->repr->netdev);
}
- ice_for_each_vf(pf, i) {
- struct ice_repr *repr = pf->vf[i].repr;
+ ice_for_each_vf(pf, bkt, vf) {
+ struct ice_repr *repr = vf->repr;
struct ice_vsi *vsi = repr->src_vsi;
struct metadata_dst *dst;
@@ -263,43 +312,12 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
return 0;
err:
- for (i = i - 1; i >= 0; i--) {
- struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
- struct ice_vf *vf = &pf->vf[i];
-
- ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
- metadata_dst_free(vf->repr->dst);
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
- ICE_FWD_TO_VSI);
- }
+ ice_eswitch_release_reprs(pf, ctrl_vsi);
return -ENODEV;
}
/**
- * ice_eswitch_release_reprs - clear PR VSIs configuration
- * @pf: poiner to PF struct
- * @ctrl_vsi: pointer to switchdev control VSI
- */
-static void
-ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
-{
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
- struct ice_vf *vf = &pf->vf[i];
-
- ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
- metadata_dst_free(vf->repr->dst);
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
- ICE_FWD_TO_VSI);
-
- netif_napi_del(&vf->repr->q_vector->napi);
- }
-}
-
-/**
* ice_eswitch_update_repr - reconfigure VF port representor
* @vsi: VF VSI for which port representor is configured
*/
@@ -313,7 +331,7 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
if (!ice_is_switchdev_running(pf))
return;
- vf = &pf->vf[vsi->vf_id];
+ vf = vsi->vf;
repr = vf->repr;
repr->src_vsi = vsi;
repr->dst->u.port_info.port_id = vsi->vsi_num;
@@ -321,7 +339,8 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (ret) {
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
- dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id);
+ dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
+ vsi->vf->vf_id);
}
}
@@ -405,7 +424,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
static struct ice_vsi *
ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL);
+ return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, NULL, NULL);
}
/**
@@ -414,10 +433,13 @@ ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
*/
static void ice_eswitch_napi_del(struct ice_pf *pf)
{
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
- ice_for_each_vf(pf, i)
- netif_napi_del(&pf->vf[i].repr->q_vector->napi);
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ ice_for_each_vf(pf, bkt, vf)
+ netif_napi_del(&vf->repr->q_vector->napi);
}
/**
@@ -426,10 +448,13 @@ static void ice_eswitch_napi_del(struct ice_pf *pf)
*/
static void ice_eswitch_napi_enable(struct ice_pf *pf)
{
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
- ice_for_each_vf(pf, i)
- napi_enable(&pf->vf[i].repr->q_vector->napi);
+ ice_for_each_vf(pf, bkt, vf)
+ napi_enable(&vf->repr->q_vector->napi);
}
/**
@@ -438,10 +463,13 @@ static void ice_eswitch_napi_enable(struct ice_pf *pf)
*/
static void ice_eswitch_napi_disable(struct ice_pf *pf)
{
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
- ice_for_each_vf(pf, i)
- napi_disable(&pf->vf[i].repr->q_vector->napi);
+ ice_for_each_vf(pf, bkt, vf)
+ napi_disable(&vf->repr->q_vector->napi);
}
/**
@@ -519,7 +547,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (pf->eswitch_mode == mode)
return 0;
- if (pf->num_alloc_vfs) {
+ if (ice_has_vfs(pf)) {
dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
return -EOPNOTSUPP;
@@ -610,16 +638,17 @@ int ice_eswitch_configure(struct ice_pf *pf)
*/
static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
{
- struct ice_repr *repr;
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
if (test_bit(ICE_DOWN, pf->state))
return;
- ice_for_each_vf(pf, i) {
- repr = pf->vf[i].repr;
- if (repr)
- ice_repr_start_tx_queues(repr);
+ ice_for_each_vf(pf, bkt, vf) {
+ if (vf->repr)
+ ice_repr_start_tx_queues(vf->repr);
}
}
@@ -629,16 +658,17 @@ static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
*/
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
{
- struct ice_repr *repr;
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
if (test_bit(ICE_DOWN, pf->state))
return;
- ice_for_each_vf(pf, i) {
- repr = pf->vf[i].repr;
- if (repr)
- ice_repr_stop_tx_queues(repr);
+ ice_for_each_vf(pf, bkt, vf) {
+ if (vf->repr)
+ ice_repr_stop_tx_queues(vf->repr);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index e2e3ef7fba7f..24cda7e1f916 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -164,6 +164,7 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("vf-true-promisc-support",
ICE_FLAG_VF_TRUE_PROMISC_ENA),
ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
+ ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING),
ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
@@ -315,16 +316,20 @@ out:
*/
static bool ice_active_vfs(struct ice_pf *pf)
{
- unsigned int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
+ bool active = false;
+ struct ice_vf *vf;
+ unsigned int bkt;
- if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
- return true;
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ active = true;
+ break;
+ }
}
+ rcu_read_unlock();
- return false;
+ return active;
}
/**
@@ -1295,6 +1300,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
ret = -EAGAIN;
}
+
+ if (test_bit(ICE_FLAG_VF_VLAN_PRUNING, change_flags) &&
+ ice_has_vfs(pf)) {
+ dev_err(dev, "vf-vlan-pruning: VLAN pruning cannot be changed while VFs are active.\n");
+ /* toggle bit back to previous state */
+ change_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags);
+ ret = -EOPNOTSUPP;
+ }
ethtool_exit:
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
return ret;
@@ -2298,7 +2311,7 @@ ice_set_link_ksettings(struct net_device *netdev,
if (err)
goto done;
- curr_link_speed = pi->phy.link_info.link_speed;
+ curr_link_speed = pi->phy.curr_user_speed_req;
adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
/* If speed didn't get set, set it to what it currently is.
@@ -2803,6 +2816,8 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
/* clone ring and setup updated count */
xdp_rings[i] = *vsi->xdp_rings[i];
xdp_rings[i].count = new_tx_cnt;
+ xdp_rings[i].next_dd = ICE_RING_QUARTER(&xdp_rings[i]) - 1;
+ xdp_rings[i].next_rs = ICE_RING_QUARTER(&xdp_rings[i]) - 1;
xdp_rings[i].desc = NULL;
xdp_rings[i].tx_buf = NULL;
err = ice_setup_tx_ring(&xdp_rings[i]);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 4deb2c9446ec..c73cdab44f70 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -4,10 +4,19 @@
#include "ice_common.h"
#include "ice_flex_pipe.h"
#include "ice_flow.h"
+#include "ice.h"
+
+/* For supporting double VLAN mode, it is necessary to enable or disable certain
+ * boost tcam entries. The metadata labels names that match the following
+ * prefixes will be saved to allow enabling double VLAN mode.
+ */
+#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */
+#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */
/* To support tunneling entries by PF, the package will append the PF number to
* the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
*/
+#define ICE_TNL_PRE "TNL_"
static const struct ice_tunnel_type_scan tnls[] = {
{ TNL_VXLAN, "TNL_VXLAN_PF" },
{ TNL_GENEVE, "TNL_GENEVE_PF" },
@@ -523,6 +532,55 @@ ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
}
/**
+ * ice_add_tunnel_hint
+ * @hw: pointer to the HW structure
+ * @label_name: label text
+ * @val: value of the tunnel port boost entry
+ */
+static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
+{
+ if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
+ u16 i;
+
+ for (i = 0; tnls[i].type != TNL_LAST; i++) {
+ size_t len = strlen(tnls[i].label_prefix);
+
+ /* Look for matching label start, before continuing */
+ if (strncmp(label_name, tnls[i].label_prefix, len))
+ continue;
+
+ /* Make sure this label matches our PF. Note that the PF
+ * character ('0' - '7') will be located where our
+ * prefix string's null terminator is located.
+ */
+ if ((label_name[len] - '0') == hw->pf_id) {
+ hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
+ hw->tnl.tbl[hw->tnl.count].valid = false;
+ hw->tnl.tbl[hw->tnl.count].boost_addr = val;
+ hw->tnl.tbl[hw->tnl.count].port = 0;
+ hw->tnl.count++;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * ice_add_dvm_hint
+ * @hw: pointer to the HW structure
+ * @val: value of the boost entry
+ * @enable: true if entry needs to be enabled, or false if needs to be disabled
+ */
+static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable)
+{
+ if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) {
+ hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
+ hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
+ hw->dvm_upd.count++;
+ }
+}
+
+/**
* ice_init_pkg_hints
* @hw: pointer to the HW structure
* @ice_seg: pointer to the segment of the package scan (non-NULL)
@@ -548,32 +606,23 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
&val);
- while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
- for (i = 0; tnls[i].type != TNL_LAST; i++) {
- size_t len = strlen(tnls[i].label_prefix);
+ while (label_name) {
+ if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
+ /* check for a tunnel entry */
+ ice_add_tunnel_hint(hw, label_name, val);
- /* Look for matching label start, before continuing */
- if (strncmp(label_name, tnls[i].label_prefix, len))
- continue;
+ /* check for a dvm mode entry */
+ else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE)))
+ ice_add_dvm_hint(hw, val, true);
- /* Make sure this label matches our PF. Note that the PF
- * character ('0' - '7') will be located where our
- * prefix string's null terminator is located.
- */
- if ((label_name[len] - '0') == hw->pf_id) {
- hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
- hw->tnl.tbl[hw->tnl.count].valid = false;
- hw->tnl.tbl[hw->tnl.count].boost_addr = val;
- hw->tnl.tbl[hw->tnl.count].port = 0;
- hw->tnl.count++;
- break;
- }
- }
+ /* check for a svm mode entry */
+ else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE)))
+ ice_add_dvm_hint(hw, val, false);
label_name = ice_enum_labels(NULL, 0, &state, &val);
}
- /* Cache the appropriate boost TCAM entry pointers */
+ /* Cache the appropriate boost TCAM entry pointers for tunnels */
for (i = 0; i < hw->tnl.count; i++) {
ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
&hw->tnl.tbl[i].boost_entry);
@@ -583,6 +632,11 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
}
}
+
+ /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */
+ for (i = 0; i < hw->dvm_upd.count; i++)
+ ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr,
+ &hw->dvm_upd.tbl[i].boost_entry);
}
/* Key creation */
@@ -874,6 +928,27 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
}
/**
+ * ice_aq_upload_section
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer which will receive the section
+ * @buf_size: the size of the package buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Upload Section (0x0C41)
+ */
+int
+ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+}
+
+/**
* ice_aq_update_pkg
* @hw: pointer to the hardware structure
* @pkg_buf: the package cmd buffer
@@ -957,25 +1032,21 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
}
/**
- * ice_update_pkg
+ * ice_update_pkg_no_lock
* @hw: pointer to the hardware structure
* @bufs: pointer to an array of buffers
* @count: the number of buffers in the array
- *
- * Obtains change lock and updates package.
*/
-static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+static int
+ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
- u32 offset, info, i;
- int status;
-
- status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
- if (status)
- return status;
+ int status = 0;
+ u32 i;
for (i = 0; i < count; i++) {
struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
bool last = ((i + 1) == count);
+ u32 offset, info;
status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
last, &offset, &info, NULL);
@@ -987,6 +1058,27 @@ static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
}
}
+ return status;
+}
+
+/**
+ * ice_update_pkg
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains change lock and updates package.
+ */
+static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ int status;
+
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ice_update_pkg_no_lock(hw, bufs, count);
+
ice_release_change_lock(hw);
return status;
@@ -1080,6 +1172,13 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
break;
}
+ if (!status) {
+ status = ice_set_vlan_mode(hw);
+ if (status)
+ ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
+ status);
+ }
+
ice_release_global_cfg_lock(hw);
return state;
@@ -1117,6 +1216,7 @@ static enum ice_ddp_state
ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
{
struct ice_buf_table *ice_buf_tbl;
+ int status;
ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
ice_seg->hdr.seg_format_ver.major,
@@ -1133,8 +1233,12 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
le32_to_cpu(ice_buf_tbl->buf_count));
- return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
- le32_to_cpu(ice_buf_tbl->buf_count));
+ status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
+ le32_to_cpu(ice_buf_tbl->buf_count));
+
+ ice_post_pkg_dwnld_vlan_mode_cfg(hw);
+
+ return status;
}
/**
@@ -1701,16 +1805,43 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
return bld;
}
+static bool ice_is_gtp_u_profile(u16 prof_idx)
+{
+ return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
+ prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) ||
+ prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
+}
+
+static bool ice_is_gtp_c_profile(u16 prof_idx)
+{
+ switch (prof_idx) {
+ case ICE_PROFID_IPV4_GTPC_TEID:
+ case ICE_PROFID_IPV4_GTPC_NO_TEID:
+ case ICE_PROFID_IPV6_GTPC_TEID:
+ case ICE_PROFID_IPV6_GTPC_NO_TEID:
+ return true;
+ default:
+ return false;
+ }
+}
+
/**
* ice_get_sw_prof_type - determine switch profile type
* @hw: pointer to the HW structure
* @fv: pointer to the switch field vector
+ * @prof_idx: profile index to check
*/
static enum ice_prof_type
-ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
+ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx)
{
u16 i;
+ if (ice_is_gtp_c_profile(prof_idx))
+ return ICE_PROF_TUN_GTPC;
+
+ if (ice_is_gtp_u_profile(prof_idx))
+ return ICE_PROF_TUN_GTPU;
+
for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
@@ -1757,7 +1888,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
if (fv) {
/* Determine field vector type */
- prof_type = ice_get_sw_prof_type(hw, fv);
+ prof_type = ice_get_sw_prof_type(hw, fv, offset);
if (req_profs & prof_type)
set_bit((u16)offset, bm);
@@ -1768,20 +1899,19 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
/**
* ice_get_sw_fv_list
* @hw: pointer to the HW structure
- * @prot_ids: field vector to search for with a given protocol ID
- * @ids_cnt: lookup/protocol count
+ * @lkups: list of protocol types
* @bm: bitmap of field vectors to consider
* @fv_list: Head of a list
*
* Finds all the field vector entries from switch block that contain
- * a given protocol ID and returns a list of structures of type
+ * a given protocol ID and offset and returns a list of structures of type
* "ice_sw_fv_list_entry". Every structure in the list has a field vector
* definition and profile ID information
* NOTE: The caller of the function is responsible for freeing the memory
* allocated for every list entry.
*/
int
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
unsigned long *bm, struct list_head *fv_list)
{
struct ice_sw_fv_list_entry *fvl;
@@ -1793,7 +1923,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
memset(&state, 0, sizeof(state));
- if (!ids_cnt || !hw->seg)
+ if (!lkups->n_val_words || !hw->seg)
return -EINVAL;
ice_seg = hw->seg;
@@ -1812,20 +1942,17 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
if (!test_bit((u16)offset, bm))
continue;
- for (i = 0; i < ids_cnt; i++) {
+ for (i = 0; i < lkups->n_val_words; i++) {
int j;
- /* This code assumes that if a switch field vector line
- * has a matching protocol, then this line will contain
- * the entries necessary to represent every field in
- * that protocol header.
- */
for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv->ew[j].prot_id == prot_ids[i])
+ if (fv->ew[j].prot_id ==
+ lkups->fv_words[i].prot_id &&
+ fv->ew[j].off == lkups->fv_words[i].off)
break;
if (j >= hw->blk[ICE_BLK_SW].es.fvw)
break;
- if (i + 1 == ids_cnt) {
+ if (i + 1 == lkups->n_val_words) {
fvl = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*fvl), GFP_KERNEL);
if (!fvl)
@@ -1897,7 +2024,7 @@ void ice_init_prof_result_bm(struct ice_hw *hw)
*
* Frees a package buffer
*/
-static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
{
devm_kfree(ice_hw_to_dev(hw), bld);
}
@@ -1997,6 +2124,43 @@ ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
}
/**
+ * ice_pkg_buf_alloc_single_section
+ * @hw: pointer to the HW structure
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ * @section: returns pointer to the section
+ *
+ * Allocates a package buffer with a single section.
+ * Note: all package contents must be in Little Endian form.
+ */
+struct ice_buf_build *
+ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
+ void **section)
+{
+ struct ice_buf_build *buf;
+
+ if (!section)
+ return NULL;
+
+ buf = ice_pkg_buf_alloc(hw);
+ if (!buf)
+ return NULL;
+
+ if (ice_pkg_buf_reserve_section(buf, 1))
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ *section = ice_pkg_buf_alloc_section(buf, type, size);
+ if (!*section)
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ return buf;
+
+ice_pkg_buf_alloc_single_section_err:
+ ice_pkg_buf_free(hw, buf);
+ return NULL;
+}
+
+/**
* ice_pkg_buf_get_active_sections
* @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
*
@@ -2023,7 +2187,7 @@ static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
*
* Return a pointer to the buffer's header
*/
-static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
{
if (!bld)
return NULL;
@@ -2060,6 +2224,89 @@ ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
}
/**
+ * ice_upd_dvm_boost_entry
+ * @hw: pointer to the HW structure
+ * @entry: pointer to double vlan boost entry info
+ */
+static int
+ice_upd_dvm_boost_entry(struct ice_hw *hw, struct ice_dvm_entry *entry)
+{
+ struct ice_boost_tcam_section *sect_rx, *sect_tx;
+ int status = -ENOSPC;
+ struct ice_buf_build *bld;
+ u8 val, dc, nm;
+
+ bld = ice_pkg_buf_alloc(hw);
+ if (!bld)
+ return -ENOMEM;
+
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
+ if (ice_pkg_buf_reserve_section(bld, 2))
+ goto ice_upd_dvm_boost_entry_err;
+
+ sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+ struct_size(sect_rx, tcam, 1));
+ if (!sect_rx)
+ goto ice_upd_dvm_boost_entry_err;
+ sect_rx->count = cpu_to_le16(1);
+
+ sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+ struct_size(sect_tx, tcam, 1));
+ if (!sect_tx)
+ goto ice_upd_dvm_boost_entry_err;
+ sect_tx->count = cpu_to_le16(1);
+
+ /* copy original boost entry to update package buffer */
+ memcpy(sect_rx->tcam, entry->boost_entry, sizeof(*sect_rx->tcam));
+
+ /* re-write the don't care and never match bits accordingly */
+ if (entry->enable) {
+ /* all bits are don't care */
+ val = 0x00;
+ dc = 0xFF;
+ nm = 0x00;
+ } else {
+ /* disable, one never match bit, the rest are don't care */
+ val = 0x00;
+ dc = 0xF7;
+ nm = 0x08;
+ }
+
+ ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
+ &val, NULL, &dc, &nm, 0, sizeof(u8));
+
+ /* exact copy of entry to Tx section entry */
+ memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
+
+ status = ice_update_pkg_no_lock(hw, ice_pkg_buf(bld), 1);
+
+ice_upd_dvm_boost_entry_err:
+ ice_pkg_buf_free(hw, bld);
+
+ return status;
+}
+
+/**
+ * ice_set_dvm_boost_entries
+ * @hw: pointer to the HW structure
+ *
+ * Enable double vlan by updating the appropriate boost tcam entries.
+ */
+int ice_set_dvm_boost_entries(struct ice_hw *hw)
+{
+ int status;
+ u16 i;
+
+ for (i = 0; i < hw->dvm_upd.count; i++) {
+ status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]);
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
+/**
* ice_tunnel_idx_to_entry - convert linear index to the sparse one
* @hw: pointer to the HW structure
* @type: type of tunnel
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 6cbc29bcb02f..9c530c86703e 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -87,8 +87,14 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
void
ice_init_prof_result_bm(struct ice_hw *hw);
int
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
unsigned long *bm, struct list_head *fv_list);
+int
+ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
+u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
+int
+ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd);
bool
ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
enum ice_tunnel_type type);
@@ -96,6 +102,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti);
int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti);
+int ice_set_dvm_boost_entries(struct ice_hw *hw);
/* Rx parser PTYPE functions */
bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
@@ -119,4 +126,10 @@ void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
+struct ice_buf_build *
+ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
+ void **section);
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
+
#endif /* _ICE_FLEX_PIPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index fc087e0b5292..974d14a83b2e 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -162,6 +162,7 @@ struct ice_meta_sect {
#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_RXPARSER_METADATA_INIT 58
#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_XLT0_PE 80
@@ -416,6 +417,8 @@ enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
TNL_GRETAP,
+ TNL_GTPC,
+ TNL_GTPU,
__TNL_TYPE_CNT,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
@@ -442,6 +445,19 @@ struct ice_tunnel_table {
u16 valid_count[__TNL_TYPE_CNT];
};
+struct ice_dvm_entry {
+ u16 boost_addr;
+ u16 enable;
+ struct ice_boost_tcam_entry *boost_entry;
+};
+
+#define ICE_DVM_MAX_ENTRIES 48
+
+struct ice_dvm_table {
+ struct ice_dvm_entry tbl[ICE_DVM_MAX_ENTRIES];
+ u16 count;
+};
+
struct ice_pkg_es {
__le16 count;
__le16 offset;
@@ -659,7 +675,35 @@ enum ice_prof_type {
ICE_PROF_NON_TUN = 0x1,
ICE_PROF_TUN_UDP = 0x2,
ICE_PROF_TUN_GRE = 0x4,
- ICE_PROF_TUN_ALL = 0x6,
+ ICE_PROF_TUN_GTPU = 0x8,
+ ICE_PROF_TUN_GTPC = 0x10,
+ ICE_PROF_TUN_ALL = 0x1E,
ICE_PROF_ALL = 0xFF,
};
+
+/* Number of bits/bytes contained in meta init entry. Note, this should be a
+ * multiple of 32 bits.
+ */
+#define ICE_META_INIT_BITS 192
+#define ICE_META_INIT_DW_CNT (ICE_META_INIT_BITS / (sizeof(__le32) * \
+ BITS_PER_BYTE))
+
+/* The meta init Flag field starts at this bit */
+#define ICE_META_FLAGS_ST 123
+
+/* The entry and bit to check for Double VLAN Mode (DVM) support */
+#define ICE_META_VLAN_MODE_ENTRY 0
+#define ICE_META_FLAG_VLAN_MODE 60
+#define ICE_META_VLAN_MODE_BIT (ICE_META_FLAGS_ST + \
+ ICE_META_FLAG_VLAN_MODE)
+
+struct ice_meta_init_entry {
+ __le32 bm[ICE_META_INIT_DW_CNT];
+};
+
+struct ice_meta_init_section {
+ __le16 count;
+ __le16 offset;
+ struct ice_meta_init_entry entry;
+};
#endif /* _ICE_FLEX_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index beed4838dcbe..ef103e47a8dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -3,6 +3,7 @@
#include "ice_common.h"
#include "ice_flow.h"
+#include <net/gre.h>
/* Describe properties of a protocol header field */
struct ice_flow_field_info {
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 84b6e4464a21..b465d27d9b80 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -4,6 +4,8 @@
#ifndef _ICE_FLOW_H_
#define _ICE_FLOW_H_
+#include "ice_flex_type.h"
+
#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
#define ICE_FLOW_FLD_OFF_INVAL 0xffff
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index c29177c6bb9d..af57eb114966 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -203,21 +203,22 @@ ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
* ice_fltr_add_vlan_to_list - add VLAN filter info to exsisting list
* @vsi: pointer to VSI struct
* @list: list to add filter info to
- * @vlan_id: VLAN ID to add
- * @action: filter action
+ * @vlan: VLAN filter details
*/
static int
ice_fltr_add_vlan_to_list(struct ice_vsi *vsi, struct list_head *list,
- u16 vlan_id, enum ice_sw_fwd_act_type action)
+ struct ice_vlan *vlan)
{
struct ice_fltr_info info = { 0 };
info.flag = ICE_FLTR_TX;
info.src_id = ICE_SRC_ID_VSI;
info.lkup_type = ICE_SW_LKUP_VLAN;
- info.fltr_act = action;
+ info.fltr_act = ICE_FWD_TO_VSI;
info.vsi_handle = vsi->idx;
- info.l_data.vlan.vlan_id = vlan_id;
+ info.l_data.vlan.vlan_id = vlan->vid;
+ info.l_data.vlan.tpid = vlan->tpid;
+ info.l_data.vlan.tpid_valid = true;
return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info,
list);
@@ -310,19 +311,17 @@ ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
/**
* ice_fltr_prepare_vlan - add or remove VLAN filter
* @vsi: pointer to VSI struct
- * @vlan_id: VLAN ID to add
- * @action: action to be performed on filter match
+ * @vlan: VLAN filter details
* @vlan_action: pointer to add or remove VLAN function
*/
static int
-ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action,
+ice_fltr_prepare_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan,
int (*vlan_action)(struct ice_vsi *, struct list_head *))
{
LIST_HEAD(tmp_list);
int result;
- if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action))
+ if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan))
return -ENOMEM;
result = vlan_action(vsi, &tmp_list);
@@ -395,27 +394,21 @@ int ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
/**
* ice_fltr_add_vlan - add single VLAN filter
* @vsi: pointer to VSI struct
- * @vlan_id: VLAN ID to add
- * @action: action to be performed on filter match
+ * @vlan: VLAN filter details
*/
-int ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
{
- return ice_fltr_prepare_vlan(vsi, vlan_id, action,
- ice_fltr_add_vlan_list);
+ return ice_fltr_prepare_vlan(vsi, vlan, ice_fltr_add_vlan_list);
}
/**
* ice_fltr_remove_vlan - remove VLAN filter
* @vsi: pointer to VSI struct
- * @vlan_id: filter VLAN to remove
- * @action: action to remove
+ * @vlan: VLAN filter details
*/
-int ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
{
- return ice_fltr_prepare_vlan(vsi, vlan_id, action,
- ice_fltr_remove_vlan_list);
+ return ice_fltr_prepare_vlan(vsi, vlan, ice_fltr_remove_vlan_list);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h
index 3eb42479175f..0f3dbc308eec 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.h
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.h
@@ -4,6 +4,8 @@
#ifndef _ICE_FLTR_H_
#define _ICE_FLTR_H_
+#include "ice_vlan.h"
+
void ice_fltr_free_list(struct device *dev, struct list_head *h);
int
ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
@@ -32,12 +34,8 @@ ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
-int
-ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vid,
- enum ice_sw_fwd_act_type action);
-int
-ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vid,
- enum ice_sw_fwd_act_type action);
+int ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+int ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
int
ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
new file mode 100644
index 000000000000..35579cf4283f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include <linux/tty_driver.h>
+
+/**
+ * ice_gnss_read - Read data from internal GNSS module
+ * @work: GNSS read work structure
+ *
+ * Read the data from internal GNSS receiver, number of bytes read will be
+ * returned in *read_data parameter.
+ */
+static void ice_gnss_read(struct kthread_work *work)
+{
+ struct gnss_serial *gnss = container_of(work, struct gnss_serial,
+ read_work.work);
+ struct ice_aqc_link_topo_addr link_topo;
+ u8 i2c_params, bytes_read;
+ struct tty_port *port;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ __be16 data_len_b;
+ char *buf = NULL;
+ u16 i, data_len;
+ int err = 0;
+
+ pf = gnss->back;
+ if (!pf || !gnss->tty || !gnss->tty->port) {
+ err = -EFAULT;
+ goto exit;
+ }
+
+ hw = &pf->hw;
+ port = gnss->tty->port;
+
+ buf = (char *)get_zeroed_page(GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ memset(&link_topo, 0, sizeof(struct ice_aqc_link_topo_addr));
+ link_topo.topo_params.index = ICE_E810T_GNSS_I2C_BUS;
+ link_topo.topo_params.node_type_ctx |=
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE);
+
+ i2c_params = ICE_GNSS_UBX_DATA_LEN_WIDTH |
+ ICE_AQC_I2C_USE_REPEATED_START;
+
+ /* Read data length in a loop, when it's not 0 the data is ready */
+ for (i = 0; i < ICE_MAX_UBX_READ_TRIES; i++) {
+ err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(ICE_GNSS_UBX_DATA_LEN_H),
+ i2c_params, (u8 *)&data_len_b, NULL);
+ if (err)
+ goto exit_buf;
+
+ data_len = be16_to_cpu(data_len_b);
+ if (data_len != 0 && data_len != U16_MAX)
+ break;
+
+ mdelay(10);
+ }
+
+ data_len = min(data_len, (u16)PAGE_SIZE);
+ data_len = tty_buffer_request_room(port, data_len);
+ if (!data_len) {
+ err = -ENOMEM;
+ goto exit_buf;
+ }
+
+ /* Read received data */
+ for (i = 0; i < data_len; i += bytes_read) {
+ u16 bytes_left = data_len - i;
+
+ bytes_read = bytes_left < ICE_MAX_I2C_DATA_SIZE ? bytes_left :
+ ICE_MAX_I2C_DATA_SIZE;
+
+ err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(ICE_GNSS_UBX_EMPTY_DATA),
+ bytes_read, &buf[i], NULL);
+ if (err)
+ goto exit_buf;
+ }
+
+ /* Send the data to the tty layer for users to read. This doesn't
+ * actually push the data through unless tty->low_latency is set.
+ */
+ tty_insert_flip_string(port, buf, i);
+ tty_flip_buffer_push(port);
+
+exit_buf:
+ free_page((unsigned long)buf);
+ kthread_queue_delayed_work(gnss->kworker, &gnss->read_work,
+ ICE_GNSS_TIMER_DELAY_TIME);
+exit:
+ if (err)
+ dev_dbg(ice_pf_to_dev(pf), "GNSS failed to read err=%d\n", err);
+}
+
+/**
+ * ice_gnss_struct_init - Initialize GNSS structure for the TTY
+ * @pf: Board private structure
+ */
+static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct kthread_worker *kworker;
+ struct gnss_serial *gnss;
+
+ gnss = kzalloc(sizeof(*gnss), GFP_KERNEL);
+ if (!gnss)
+ return NULL;
+
+ mutex_init(&gnss->gnss_mutex);
+ gnss->open_count = 0;
+ gnss->back = pf;
+ pf->gnss_serial = gnss;
+
+ kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
+ /* Allocate a kworker for handling work required for the GNSS TTY
+ * writes.
+ */
+ kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
+ if (IS_ERR(kworker)) {
+ kfree(gnss);
+ return NULL;
+ }
+
+ gnss->kworker = kworker;
+
+ return gnss;
+}
+
+/**
+ * ice_gnss_tty_open - Initialize GNSS structures on TTY device open
+ * @tty: pointer to the tty_struct
+ * @filp: pointer to the file
+ *
+ * This routine is mandatory. If this routine is not filled in, the attempted
+ * open will fail with ENODEV.
+ */
+static int ice_gnss_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct gnss_serial *gnss;
+ struct ice_pf *pf;
+
+ pf = (struct ice_pf *)tty->driver->driver_state;
+ if (!pf)
+ return -EFAULT;
+
+ /* Clear the pointer in case something fails */
+ tty->driver_data = NULL;
+
+ /* Get the serial object associated with this tty pointer */
+ gnss = pf->gnss_serial;
+ if (!gnss) {
+ /* Initialize GNSS struct on the first device open */
+ gnss = ice_gnss_struct_init(pf);
+ if (!gnss)
+ return -ENOMEM;
+ }
+
+ mutex_lock(&gnss->gnss_mutex);
+
+ /* Save our structure within the tty structure */
+ tty->driver_data = gnss;
+ gnss->tty = tty;
+ gnss->open_count++;
+ kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, 0);
+
+ mutex_unlock(&gnss->gnss_mutex);
+
+ return 0;
+}
+
+/**
+ * ice_gnss_tty_close - Cleanup GNSS structures on tty device close
+ * @tty: pointer to the tty_struct
+ * @filp: pointer to the file
+ */
+static void ice_gnss_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ struct gnss_serial *gnss = tty->driver_data;
+ struct ice_pf *pf;
+
+ if (!gnss)
+ return;
+
+ pf = (struct ice_pf *)tty->driver->driver_state;
+ if (!pf)
+ return;
+
+ mutex_lock(&gnss->gnss_mutex);
+
+ if (!gnss->open_count) {
+ /* Port was never opened */
+ dev_err(ice_pf_to_dev(pf), "GNSS port not opened\n");
+ goto exit;
+ }
+
+ gnss->open_count--;
+ if (gnss->open_count <= 0) {
+ /* Port is in shutdown state */
+ kthread_cancel_delayed_work_sync(&gnss->read_work);
+ }
+exit:
+ mutex_unlock(&gnss->gnss_mutex);
+}
+
+/**
+ * ice_gnss_tty_write - Dummy TTY write function to avoid kernel panic
+ * @tty: pointer to the tty_struct
+ * @buf: pointer to the user data
+ * @cnt: the number of characters that was able to be sent to the hardware (or
+ * queued to be sent at a later time)
+ */
+static int
+ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int cnt)
+{
+ return 0;
+}
+
+/**
+ * ice_gnss_tty_write_room - Dummy TTY write_room function to avoid kernel panic
+ * @tty: pointer to the tty_struct
+ */
+static unsigned int ice_gnss_tty_write_room(struct tty_struct *tty)
+{
+ return 0;
+}
+
+static const struct tty_operations tty_gps_ops = {
+ .open = ice_gnss_tty_open,
+ .close = ice_gnss_tty_close,
+ .write = ice_gnss_tty_write,
+ .write_room = ice_gnss_tty_write_room,
+};
+
+/**
+ * ice_gnss_create_tty_driver - Create a TTY driver for GNSS
+ * @pf: Board private structure
+ */
+static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ const int ICE_TTYDRV_NAME_MAX = 14;
+ struct tty_driver *tty_driver;
+ char *ttydrv_name;
+ int err;
+
+ tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW);
+ if (IS_ERR(tty_driver)) {
+ dev_err(ice_pf_to_dev(pf), "Failed to allocate memory for GNSS TTY\n");
+ return NULL;
+ }
+
+ ttydrv_name = kzalloc(ICE_TTYDRV_NAME_MAX, GFP_KERNEL);
+ if (!ttydrv_name) {
+ tty_driver_kref_put(tty_driver);
+ return NULL;
+ }
+
+ snprintf(ttydrv_name, ICE_TTYDRV_NAME_MAX, "ttyGNSS_%02x%02x_",
+ (u8)pf->pdev->bus->number, (u8)PCI_SLOT(pf->pdev->devfn));
+
+ /* Initialize the tty driver*/
+ tty_driver->owner = THIS_MODULE;
+ tty_driver->driver_name = dev_driver_string(dev);
+ tty_driver->name = (const char *)ttydrv_name;
+ tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ tty_driver->init_termios = tty_std_termios;
+ tty_driver->init_termios.c_iflag &= ~INLCR;
+ tty_driver->init_termios.c_iflag |= IGNCR;
+ tty_driver->init_termios.c_oflag &= ~OPOST;
+ tty_driver->init_termios.c_lflag &= ~ICANON;
+ tty_driver->init_termios.c_cflag &= ~(CSIZE | CBAUD | CBAUDEX);
+ /* baud rate 9600 */
+ tty_termios_encode_baud_rate(&tty_driver->init_termios, 9600, 9600);
+ tty_driver->driver_state = pf;
+ tty_set_operations(tty_driver, &tty_gps_ops);
+
+ pf->gnss_serial = NULL;
+
+ tty_port_init(&pf->gnss_tty_port);
+ tty_port_link_device(&pf->gnss_tty_port, tty_driver, 0);
+
+ err = tty_register_driver(tty_driver);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to register TTY driver err=%d\n",
+ err);
+
+ tty_port_destroy(&pf->gnss_tty_port);
+ kfree(ttydrv_name);
+ tty_driver_kref_put(pf->ice_gnss_tty_driver);
+
+ return NULL;
+ }
+
+ return tty_driver;
+}
+
+/**
+ * ice_gnss_init - Initialize GNSS TTY support
+ * @pf: Board private structure
+ */
+void ice_gnss_init(struct ice_pf *pf)
+{
+ struct tty_driver *tty_driver;
+
+ tty_driver = ice_gnss_create_tty_driver(pf);
+ if (!tty_driver)
+ return;
+
+ pf->ice_gnss_tty_driver = tty_driver;
+
+ set_bit(ICE_FLAG_GNSS, pf->flags);
+ dev_info(ice_pf_to_dev(pf), "GNSS TTY init successful\n");
+}
+
+/**
+ * ice_gnss_exit - Disable GNSS TTY support
+ * @pf: Board private structure
+ */
+void ice_gnss_exit(struct ice_pf *pf)
+{
+ if (!test_bit(ICE_FLAG_GNSS, pf->flags) || !pf->ice_gnss_tty_driver)
+ return;
+
+ tty_port_destroy(&pf->gnss_tty_port);
+
+ if (pf->gnss_serial) {
+ struct gnss_serial *gnss = pf->gnss_serial;
+
+ kthread_cancel_delayed_work_sync(&gnss->read_work);
+ kfree(gnss);
+ pf->gnss_serial = NULL;
+ }
+
+ tty_unregister_driver(pf->ice_gnss_tty_driver);
+ kfree(pf->ice_gnss_tty_driver->name);
+ tty_driver_kref_put(pf->ice_gnss_tty_driver);
+ pf->ice_gnss_tty_driver = NULL;
+}
+
+/**
+ * ice_gnss_is_gps_present - Check if GPS HW is present
+ * @hw: pointer to HW struct
+ */
+bool ice_gnss_is_gps_present(struct ice_hw *hw)
+{
+ if (!hw->func_caps.ts_func_info.src_tmr_owned)
+ return false;
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+ if (ice_is_e810t(hw)) {
+ int err;
+ u8 data;
+
+ err = ice_read_pca9575_reg_e810t(hw, ICE_PCA9575_P0_IN, &data);
+ if (err || !!(data & ICE_E810T_P0_GNSS_PRSNT_N))
+ return false;
+ } else {
+ return false;
+ }
+#else
+ if (!ice_is_e810t(hw))
+ return false;
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+
+ return true;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
new file mode 100644
index 000000000000..9211adb2372c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_GNSS_H_
+#define _ICE_GNSS_H_
+
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#define ICE_E810T_GNSS_I2C_BUS 0x2
+#define ICE_GNSS_UBX_I2C_BUS_ADDR 0x42
+/* Data length register is big endian */
+#define ICE_GNSS_UBX_DATA_LEN_H 0xFD
+#define ICE_GNSS_UBX_DATA_LEN_WIDTH 2
+#define ICE_GNSS_UBX_EMPTY_DATA 0xFF
+#define ICE_GNSS_TIMER_DELAY_TIME (HZ / 10) /* 0.1 second per message */
+#define ICE_MAX_I2C_DATA_SIZE FIELD_MAX(ICE_AQC_I2C_DATA_SIZE_M)
+#define ICE_MAX_UBX_READ_TRIES 255
+
+/**
+ * struct gnss_serial - data used to initialize GNSS TTY port
+ * @back: back pointer to PF
+ * @tty: pointer to the tty for this device
+ * @open_count: number of times this port has been opened
+ * @gnss_mutex: gnss_mutex used to protect GNSS serial operations
+ * @kworker: kwork thread for handling periodic work
+ * @read_work: read_work function for handling GNSS reads
+ */
+struct gnss_serial {
+ struct ice_pf *back;
+ struct tty_struct *tty;
+ int open_count;
+ struct mutex gnss_mutex; /* protects GNSS serial structure */
+ struct kthread_worker *kworker;
+ struct kthread_delayed_work read_work;
+};
+
+#if IS_ENABLED(CONFIG_TTY)
+void ice_gnss_init(struct ice_pf *pf);
+void ice_gnss_exit(struct ice_pf *pf);
+bool ice_gnss_is_gps_present(struct ice_hw *hw);
+#else
+static inline void ice_gnss_init(struct ice_pf *pf) { }
+static inline void ice_gnss_exit(struct ice_pf *pf) { }
+static inline bool ice_gnss_is_gps_present(struct ice_hw *hw)
+{
+ return false;
+}
+#endif /* IS_ENABLED(CONFIG_TTY) */
+#endif /* _ICE_GNSS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index fc3580167e7b..25a436d342c2 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -34,6 +34,9 @@ void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
{
struct iidc_auxiliary_drv *iadrv;
+ if (WARN_ON_ONCE(!in_task()))
+ return;
+
if (!pf->adev)
return;
@@ -79,7 +82,7 @@ int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ if (!ice_is_rdma_ena(pf))
return -EINVAL;
vsi = ice_get_main_vsi(pf);
@@ -227,6 +230,11 @@ void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
+
+ qos->pfc_mode = dcbx_cfg->pfc_mode;
+ if (qos->pfc_mode == IIDC_DSCP_PFC_MODE)
+ for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++)
+ qos->dscp_map[i] = dcbx_cfg->dscp_map[i];
}
EXPORT_SYMBOL_GPL(ice_get_qos_params);
@@ -236,7 +244,7 @@ EXPORT_SYMBOL_GPL(ice_get_qos_params);
*/
static int ice_reserve_rdma_qvector(struct ice_pf *pf)
{
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ if (ice_is_rdma_ena(pf)) {
int index;
index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix,
@@ -274,7 +282,7 @@ int ice_plug_aux_dev(struct ice_pf *pf)
/* if this PF doesn't support a technology that requires auxiliary
* devices, then gracefully exit
*/
- if (!ice_is_aux_ena(pf))
+ if (!ice_is_rdma_ena(pf))
return 0;
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
diff --git a/drivers/net/ethernet/intel/ice/ice_idc_int.h b/drivers/net/ethernet/intel/ice/ice_idc_int.h
index b7796b8aecbd..4b0c86757df9 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc_int.h
+++ b/drivers/net/ethernet/intel/ice/ice_idc_int.h
@@ -5,7 +5,6 @@
#define _ICE_IDC_INT_H_
#include <linux/net/intel/iidc.h>
-#include "ice.h"
struct ice_pf;
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 85a612838a89..b3baf7c3f910 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -424,6 +424,8 @@ enum ice_rx_flex_desc_status_error_0_bits {
enum ice_rx_flex_desc_status_error_1_bits {
/* Note: These are predefined bit offsets */
ICE_RX_FLEX_DESC_STATUS1_NAT_S = 4,
+ /* [10:5] reserved */
+ ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 53256aca27c7..b897926f817d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -8,6 +8,7 @@
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
#include "ice_devlink.h"
+#include "ice_vsi_vlan_ops.h"
/**
* ice_vsi_type_str - maps VSI type enum to string equivalents
@@ -165,21 +166,19 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
- * @vf_id: ID of the VF being configured
+ * @vf: the VF associated with this VSI, if any
*
* Return 0 on success and a negative value on error
*/
-static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
+static void ice_vsi_set_num_qs(struct ice_vsi *vsi, struct ice_vf *vf)
{
+ enum ice_vsi_type vsi_type = vsi->type;
struct ice_pf *pf = vsi->back;
- struct ice_vf *vf = NULL;
- if (vsi->type == ICE_VSI_VF)
- vsi->vf_id = vf_id;
- else
- vsi->vf_id = ICE_INVAL_VFID;
+ if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
+ return;
- switch (vsi->type) {
+ switch (vsi_type) {
case ICE_VSI_PF:
if (vsi->req_txq) {
vsi->alloc_txq = vsi->req_txq;
@@ -216,22 +215,21 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
/* The number of queues for ctrl VSI is equal to number of VFs.
* Each ring is associated to the corresponding VF_PR netdev.
*/
- vsi->alloc_txq = pf->num_alloc_vfs;
- vsi->alloc_rxq = pf->num_alloc_vfs;
+ vsi->alloc_txq = ice_get_num_vfs(pf);
+ vsi->alloc_rxq = vsi->alloc_txq;
vsi->num_q_vectors = 1;
break;
case ICE_VSI_VF:
- vf = &pf->vf[vsi->vf_id];
if (vf->num_req_qs)
vf->num_vf_qs = vf->num_req_qs;
vsi->alloc_txq = vf->num_vf_qs;
vsi->alloc_rxq = vf->num_vf_qs;
- /* pf->num_msix_per_vf includes (VF miscellaneous vector +
+ /* pf->vfs.num_msix_per includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
* original vector count
*/
- vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF;
+ vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF;
break;
case ICE_VSI_CTRL:
vsi->alloc_txq = 1;
@@ -247,7 +245,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vsi->alloc_rxq = 1;
break;
default:
- dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type);
+ dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type);
break;
}
@@ -298,7 +296,7 @@ void ice_vsi_delete(struct ice_vsi *vsi)
return;
if (vsi->type == ICE_VSI_VF)
- ctxt->vf_num = vsi->vf_id;
+ ctxt->vf_num = vsi->vf->vf_id;
ctxt->vsi_num = vsi->vsi_num;
memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
@@ -383,8 +381,7 @@ int ice_vsi_clear(struct ice_vsi *vsi)
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL)
pf->next_vsi = vsi->idx;
- if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL &&
- vsi->vf_id != ICE_INVAL_VFID)
+ if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && vsi->vf)
pf->next_vsi = vsi->idx;
ice_vsi_free_arrays(vsi);
@@ -436,13 +433,16 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
struct ice_pf *pf = q_vector->vsi->back;
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
return IRQ_HANDLED;
- ice_for_each_vf(pf, i)
- napi_schedule(&pf->vf[i].repr->q_vector->napi);
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf)
+ napi_schedule(&vf->repr->q_vector->napi);
+ rcu_read_unlock();
return IRQ_HANDLED;
}
@@ -452,17 +452,24 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d
* @pf: board private structure
* @vsi_type: type of VSI
* @ch: ptr to channel
- * @vf_id: ID of the VF being configured
+ * @vf: VF for ICE_VSI_VF and ICE_VSI_CTRL
+ *
+ * The VF pointer is used for ICE_VSI_VF and ICE_VSI_CTRL. For ICE_VSI_CTRL,
+ * it may be NULL in the case there is no association with a VF. For
+ * ICE_VSI_VF the VF pointer *must not* be NULL.
*
* returns a pointer to a VSI on success, NULL on failure.
*/
static struct ice_vsi *
ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
- struct ice_channel *ch, u16 vf_id)
+ struct ice_channel *ch, struct ice_vf *vf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
+ if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
+ return NULL;
+
/* Need to protect the allocation of the VSIs at the PF level */
mutex_lock(&pf->sw_mutex);
@@ -484,9 +491,9 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
set_bit(ICE_VSI_DOWN, vsi->state);
if (vsi_type == ICE_VSI_VF)
- ice_vsi_set_num_qs(vsi, vf_id);
+ ice_vsi_set_num_qs(vsi, vf);
else if (vsi_type != ICE_VSI_CHNL)
- ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
+ ice_vsi_set_num_qs(vsi, NULL);
switch (vsi->type) {
case ICE_VSI_SWITCHDEV_CTRL:
@@ -509,10 +516,16 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
/* Setup ctrl VSI MSIX irq handler */
vsi->irq_handler = ice_msix_clean_ctrl_vsi;
+
+ /* For the PF control VSI this is NULL, for the VF control VSI
+ * this will be the first VF to allocate it.
+ */
+ vsi->vf = vf;
break;
case ICE_VSI_VF:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
+ vsi->vf = vf;
break;
case ICE_VSI_CHNL:
if (!ch)
@@ -530,7 +543,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
goto unlock_pf;
}
- if (vsi->type == ICE_VSI_CTRL && vf_id == ICE_INVAL_VFID) {
+ if (vsi->type == ICE_VSI_CTRL && !vf) {
/* Use the last VSI slot as the index for PF control VSI */
vsi->idx = pf->num_alloc_vsi - 1;
pf->ctrl_vsi_idx = vsi->idx;
@@ -545,8 +558,8 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
pf->next_vsi);
}
- if (vsi->type == ICE_VSI_CTRL && vf_id != ICE_INVAL_VFID)
- pf->vf[vf_id].ctrl_vsi_idx = vsi->idx;
+ if (vsi->type == ICE_VSI_CTRL && vf)
+ vf->ctrl_vsi_idx = vsi->idx;
goto unlock_pf;
err_rings:
@@ -732,14 +745,14 @@ bool ice_is_safe_mode(struct ice_pf *pf)
}
/**
- * ice_is_aux_ena
+ * ice_is_rdma_ena
* @pf: pointer to the PF struct
*
- * returns true if AUX devices/drivers are supported, false otherwise
+ * returns true if RDMA is currently supported, false otherwise
*/
-bool ice_is_aux_ena(struct ice_pf *pf)
+bool ice_is_rdma_ena(struct ice_pf *pf)
{
- return test_bit(ICE_FLAG_AUX_ENA, pf->flags);
+ return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
/**
@@ -838,11 +851,12 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
/**
* ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
+ * @hw: HW structure used to determine the VLAN mode of the device
* @ctxt: the VSI context being set
*
* This initializes a default VSI context for all sections except the Queues.
*/
-static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
+static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
{
u32 table = 0;
@@ -853,13 +867,27 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
/* Traffic from VSI can be sent to LAN */
ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
- /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
- * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
- * packets untagged/tagged.
+ /* allow all untagged/tagged packets by default on Tx */
+ ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL &
+ ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >>
+ ICE_AQ_VSI_INNER_VLAN_TX_MODE_S);
+ /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which
+ * results in legacy behavior (show VLAN, DEI, and UP) in descriptor.
+ *
+ * DVM - leave inner VLAN in packet by default
*/
- ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
- ICE_AQ_VSI_VLAN_MODE_M) >>
- ICE_AQ_VSI_VLAN_MODE_S);
+ if (ice_is_dvm_ena(hw)) {
+ ctxt->info.inner_vlan_flags |=
+ ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
+ ctxt->info.outer_vlan_flags =
+ (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
+ ctxt->info.outer_vlan_flags |=
+ (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
+ ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M;
+ }
/* Have 1:1 UP mapping for both ingress/egress tables */
table |= ICE_UP_TABLE_TRANSLATE(0, 0);
table |= ICE_UP_TABLE_TRANSLATE(1, 1);
@@ -1114,7 +1142,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
case ICE_VSI_VF:
ctxt->flags = ICE_AQ_VSI_TYPE_VF;
/* VF number here is the absolute VF number (0-255) */
- ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
+ ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id;
break;
default:
ret = -ENODEV;
@@ -1136,7 +1164,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
}
- ice_set_dflt_vsi_ctx(ctxt);
+ ice_set_dflt_vsi_ctx(hw, ctxt);
if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
ice_set_fd_vsi_ctx(ctxt, vsi);
/* if the switch is in VEB mode, allow VSI loopback */
@@ -1168,25 +1196,6 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
}
- /* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off
- * respectively
- */
- if (vsi->type == ICE_VSI_VF) {
- ctxt->info.valid_sections |=
- cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
- if (pf->vf[vsi->vf_id].spoofchk) {
- ctxt->info.sec_flags |=
- ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
- (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
- } else {
- ctxt->info.sec_flags &=
- ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
- (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
- }
- }
-
/* Allow control frames out of main VSI */
if (vsi->type == ICE_VSI_PF) {
ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
@@ -1325,6 +1334,36 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
}
/**
+ * ice_get_vf_ctrl_res - Get VF control VSI resource
+ * @pf: pointer to the PF structure
+ * @vsi: the VSI to allocate a resource for
+ *
+ * Look up whether another VF has already allocated the control VSI resource.
+ * If so, re-use this resource so that we share it among all VFs.
+ *
+ * Otherwise, allocate the resource and return it.
+ */
+static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+ int base;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
+ base = pf->vsi[vf->ctrl_vsi_idx]->base_vector;
+ rcu_read_unlock();
+ return base;
+ }
+ }
+ rcu_read_unlock();
+
+ return ice_get_res(pf, pf->irq_tracker, vsi->num_q_vectors,
+ ICE_RES_VF_CTRL_VEC_ID);
+}
+
+/**
* ice_vsi_setup_vector_base - Set up the base vector for the given VSI
* @vsi: ptr to the VSI
*
@@ -1356,20 +1395,8 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
num_q_vectors = vsi->num_q_vectors;
/* reserve slots from OS requested IRQs */
- if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) {
- base = pf->vsi[vf->ctrl_vsi_idx]->base_vector;
- break;
- }
- }
- if (i == pf->num_alloc_vfs)
- base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
- ICE_RES_VF_CTRL_VEC_ID);
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
+ base = ice_get_vf_ctrl_res(pf, vsi);
} else {
base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
vsi->idx);
@@ -1431,6 +1458,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
*/
static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
{
+ bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw);
struct ice_pf *pf = vsi->back;
struct device *dev;
u16 i;
@@ -1452,6 +1480,10 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->tx_tstamps = &pf->ptp.port.tx;
ring->dev = dev;
ring->count = vsi->num_tx_desc;
+ if (dvm_ena)
+ ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
+ else
+ ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1;
WRITE_ONCE(vsi->tx_rings[i], ring);
}
@@ -1763,62 +1795,6 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
}
/**
- * ice_vsi_add_vlan - Add VSI membership for given VLAN
- * @vsi: the VSI being configured
- * @vid: VLAN ID to be added
- * @action: filter action to be performed on match
- */
-int
-ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- int err = 0;
-
- dev = ice_pf_to_dev(pf);
-
- if (!ice_fltr_add_vlan(vsi, vid, action)) {
- vsi->num_vlan++;
- } else {
- err = -ENODEV;
- dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid,
- vsi->vsi_num);
- }
-
- return err;
-}
-
-/**
- * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
- * @vsi: the VSI being configured
- * @vid: VLAN ID to be removed
- *
- * Returns 0 on success and negative on failure
- */
-int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- int err;
-
- dev = ice_pf_to_dev(pf);
-
- err = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
- if (!err) {
- vsi->num_vlan--;
- } else if (err == -ENOENT) {
- dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, error: %d\n",
- vid, vsi->vsi_num, err);
- err = 0;
- } else {
- dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
- vid, vsi->vsi_num, err);
- }
-
- return err;
-}
-
-/**
* ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
* @vsi: VSI
*/
@@ -2146,95 +2122,6 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
}
/**
- * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
- * @vsi: the VSI being changed
- */
-int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
-{
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_vsi_ctx *ctxt;
- int ret;
-
- ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
- if (!ctxt)
- return -ENOMEM;
-
- /* Here we are configuring the VSI to let the driver add VLAN tags by
- * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
- * insertion happens in the Tx hot path, in ice_tx_map.
- */
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
-
- /* Preserve existing VLAN strip setting */
- ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
- ICE_AQ_VSI_VLAN_EMOD_M);
-
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
-
- ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (ret) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
- goto out;
- }
-
- vsi->info.vlan_flags = ctxt->info.vlan_flags;
-out:
- kfree(ctxt);
- return ret;
-}
-
-/**
- * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
- * @vsi: the VSI being changed
- * @ena: boolean value indicating if this is a enable or disable request
- */
-int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
-{
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_vsi_ctx *ctxt;
- int ret;
-
- /* do not allow modifying VLAN stripping when a port VLAN is configured
- * on this VSI
- */
- if (vsi->info.pvid)
- return 0;
-
- ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
- if (!ctxt)
- return -ENOMEM;
-
- /* Here we are configuring what the VSI should do with the VLAN tag in
- * the Rx packet. We can either leave the tag in the packet or put it in
- * the Rx descriptor.
- */
- if (ena)
- /* Strip VLAN tag from Rx packet and put it in the desc */
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
- else
- /* Disable stripping. Leave tag in packet */
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
-
- /* Allow all packets untagged/tagged */
- ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
-
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
-
- ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (ret) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n",
- ena, ret, ice_aq_str(hw->adminq.sq_last_status));
- goto out;
- }
-
- vsi->info.vlan_flags = ctxt->info.vlan_flags;
-out:
- kfree(ctxt);
- return ret;
-}
-
-/**
* ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
* @vsi: the VSI whose rings are to be enabled
*
@@ -2327,61 +2214,6 @@ bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA);
}
-/**
- * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
- * @vsi: VSI to enable or disable VLAN pruning on
- * @ena: set to true to enable VLAN pruning and false to disable it
- *
- * returns 0 if VSI is updated, negative otherwise
- */
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
-{
- struct ice_vsi_ctx *ctxt;
- struct ice_pf *pf;
- int status;
-
- if (!vsi)
- return -EINVAL;
-
- /* Don't enable VLAN pruning if the netdev is currently in promiscuous
- * mode. VLAN pruning will be enabled when the interface exits
- * promiscuous mode if any VLAN filters are active.
- */
- if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena)
- return 0;
-
- pf = vsi->back;
- ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
- if (!ctxt)
- return -ENOMEM;
-
- ctxt->info = vsi->info;
-
- if (ena)
- ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- else
- ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
-
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
-
- status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
- if (status) {
- netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n",
- ena ? "En" : "Dis", vsi->idx, vsi->vsi_num,
- status, ice_aq_str(pf->hw.adminq.sq_last_status));
- goto err_out;
- }
-
- vsi->info.sw_flags2 = ctxt->info.sw_flags2;
-
- kfree(ctxt);
- return 0;
-
-err_out:
- kfree(ctxt);
- return -EIO;
-}
-
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
@@ -2416,7 +2248,7 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
}
if (vsi->type == ICE_VSI_VF) {
- struct ice_vf *vf = &vsi->back->vf[vsi->vf_id];
+ struct ice_vf *vf = vsi->vf;
q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector);
} else {
@@ -2601,9 +2433,8 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
* @pf: board private structure
* @pi: pointer to the port_info instance
* @vsi_type: VSI type
- * @vf_id: defines VF ID to which this VSI connects. This field is meant to be
- * used only for ICE_VSI_VF VSI type. For other VSI types, should
- * fill-in ICE_INVAL_VFID as input.
+ * @vf: pointer to VF to which this VSI connects. This field is used primarily
+ * for the ICE_VSI_VF type. Other VSI types should pass NULL.
* @ch: ptr to channel
*
* This allocates the sw VSI structure and its queue resources.
@@ -2613,7 +2444,8 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
*/
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch)
+ enum ice_vsi_type vsi_type, struct ice_vf *vf,
+ struct ice_channel *ch)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
@@ -2621,11 +2453,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
int ret, i;
if (vsi_type == ICE_VSI_CHNL)
- vsi = ice_vsi_alloc(pf, vsi_type, ch, ICE_INVAL_VFID);
+ vsi = ice_vsi_alloc(pf, vsi_type, ch, NULL);
else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
- vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf_id);
+ vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf);
else
- vsi = ice_vsi_alloc(pf, vsi_type, NULL, ICE_INVAL_VFID);
+ vsi = ice_vsi_alloc(pf, vsi_type, NULL, NULL);
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
@@ -2637,9 +2469,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (vsi->type == ICE_VSI_PF)
vsi->ethtype = ETH_P_PAUSE;
- if (vsi->type == ICE_VSI_VF || vsi->type == ICE_VSI_CTRL)
- vsi->vf_id = vf_id;
-
ice_alloc_fd_res(vsi);
if (vsi_type != ICE_VSI_CHNL) {
@@ -2661,6 +2490,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_get_qs;
+ ice_vsi_init_vlan_ops(vsi);
+
switch (vsi->type) {
case ICE_VSI_CTRL:
case ICE_VSI_SWITCHDEV_CTRL:
@@ -2681,17 +2512,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_vector_base;
- /* Always add VLAN ID 0 switch rule by default. This is needed
- * in order to allow all untagged and 0 tagged priority traffic
- * if Rx VLAN pruning is enabled. Also there are cases where we
- * don't get the call to add VLAN 0 via ice_vlan_rx_add_vid()
- * so this handles those cases (i.e. adding the PF to a bridge
- * without the 8021q module loaded).
- */
- ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
- if (ret)
- goto unroll_clear_rings;
-
ice_vsi_map_rings_to_vectors(vsi);
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
@@ -3069,6 +2889,37 @@ void ice_napi_del(struct ice_vsi *vsi)
}
/**
+ * ice_free_vf_ctrl_res - Free the VF control VSI resource
+ * @pf: pointer to PF structure
+ * @vsi: the VSI to free resources for
+ *
+ * Check if the VF control VSI resource is still in use. If no VF is using it
+ * any more, release the VSI resource. Otherwise, leave it to be cleaned up
+ * once no other VF uses it.
+ */
+static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+
+ /* No other VFs left that have control VSI. It is now safe to reclaim
+ * SW interrupts back to the common pool.
+ */
+ ice_free_res(pf->irq_tracker, vsi->base_vector,
+ ICE_RES_VF_CTRL_VEC_ID);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+}
+
+/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
@@ -3111,23 +2962,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
* many interrupts each VF needs. SR-IOV MSIX resources are also
* cleared in the same manner.
*/
- if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI)
- break;
- }
- if (i == pf->num_alloc_vfs) {
- /* No other VFs left that have control VSI, reclaim SW
- * interrupts back to the common pool
- */
- ice_free_res(pf->irq_tracker, vsi->base_vector,
- ICE_RES_VF_CTRL_VEC_ID);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
- }
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
+ ice_free_vf_ctrl_res(pf, vsi);
} else if (vsi->type != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
@@ -3311,7 +3147,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_coalesce_stored *coalesce;
int prev_num_q_vectors = 0;
- struct ice_vf *vf = NULL;
enum ice_vsi_type vtype;
struct ice_pf *pf;
int ret, i;
@@ -3321,8 +3156,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
pf = vsi->back;
vtype = vsi->type;
- if (vtype == ICE_VSI_VF)
- vf = &pf->vf[vsi->vf_id];
+ if (WARN_ON(vtype == ICE_VSI_VF) && !vsi->vf)
+ return -EINVAL;
+
+ ice_vsi_init_vlan_ops(vsi);
coalesce = kcalloc(vsi->num_q_vectors,
sizeof(struct ice_coalesce_stored), GFP_KERNEL);
@@ -3359,9 +3196,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
if (vtype == ICE_VSI_VF)
- ice_vsi_set_num_qs(vsi, vf->vf_id);
+ ice_vsi_set_num_qs(vsi, vsi->vf);
else
- ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
+ ice_vsi_set_num_qs(vsi, NULL);
ret = ice_vsi_alloc_arrays(vsi);
if (ret < 0)
@@ -4123,9 +3960,9 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
*/
if (status == -EIO) {
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
- dev_warn(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
- (ena ? "ON" : "OFF"), status,
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
+ (ena ? "ON" : "OFF"), status,
+ ice_aq_str(hw->adminq.sq_last_status));
} else if (status) {
dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
(ena ? "ON" : "OFF"), status,
@@ -4137,6 +3974,120 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
}
/**
+ * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI
+ * @vsi: VSI used to add VLAN filters
+ *
+ * In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are based
+ * on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) doesn't
+ * matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
+ * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
+ *
+ * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
+ * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
+ * traffic in SVM, since the VLAN TPID isn't part of filtering.
+ *
+ * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
+ * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
+ * part of filtering.
+ */
+int ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ struct ice_vlan vlan;
+ int err;
+
+ vlan = ICE_VLAN(0, 0, 0);
+ err = vlan_ops->add_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+
+ /* in SVM both VLAN 0 filters are identical */
+ if (!ice_is_dvm_ena(&vsi->back->hw))
+ return 0;
+
+ vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
+ err = vlan_ops->add_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI
+ * @vsi: VSI used to add VLAN filters
+ *
+ * Delete the VLAN 0 filters in the same manner that they were added in
+ * ice_vsi_add_vlan_zero.
+ */
+int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ struct ice_vlan vlan;
+ int err;
+
+ vlan = ICE_VLAN(0, 0, 0);
+ err = vlan_ops->del_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+
+ /* in SVM both VLAN 0 filters are identical */
+ if (!ice_is_dvm_ena(&vsi->back->hw))
+ return 0;
+
+ vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
+ err = vlan_ops->del_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode
+ * @vsi: VSI used to get the VLAN mode
+ *
+ * If DVM is enabled then 2 VLAN 0 filters are added, else if SVM is enabled
+ * then 1 VLAN 0 filter is added. See ice_vsi_add_vlan_zero for more details.
+ */
+static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi)
+{
+#define ICE_DVM_NUM_ZERO_VLAN_FLTRS 2
+#define ICE_SVM_NUM_ZERO_VLAN_FLTRS 1
+ /* no VLAN 0 filter is created when a port VLAN is active */
+ if (vsi->type == ICE_VSI_VF) {
+ if (WARN_ON(!vsi->vf))
+ return 0;
+
+ if (ice_vf_is_port_vlan_ena(vsi->vf))
+ return 0;
+ }
+
+ if (ice_is_dvm_ena(&vsi->back->hw))
+ return ICE_DVM_NUM_ZERO_VLAN_FLTRS;
+ else
+ return ICE_SVM_NUM_ZERO_VLAN_FLTRS;
+}
+
+/**
+ * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs
+ * @vsi: VSI used to determine if any non-zero VLANs have been added
+ */
+bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi)
+{
+ return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi));
+}
+
+/**
+ * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI
+ * @vsi: VSI used to get the number of non-zero VLANs added
+ */
+u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi)
+{
+ return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi));
+}
+
+/**
* ice_is_feature_supported
* @pf: pointer to the struct ice_pf instance
* @f: feature enum to be checked
@@ -4190,8 +4141,11 @@ void ice_init_feature_support(struct ice_pf *pf)
case ICE_DEV_ID_E810C_QSFP:
case ICE_DEV_ID_E810C_SFP:
ice_set_feature_support(pf, ICE_F_DSCP);
- if (ice_is_e810t(&pf->hw))
+ if (ice_is_e810t(&pf->hw)) {
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
+ if (ice_gnss_is_gps_present(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_GNSS);
+ }
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index b2ed189527d6..0095329949d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -5,6 +5,7 @@
#define _ICE_LIB_H_
#include "ice.h"
+#include "ice_vlan.h"
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
@@ -22,15 +23,6 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
-int
-ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action);
-
-int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
-
-int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi);
-
-int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena);
-
int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi);
int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi);
@@ -45,8 +37,6 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi);
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
-
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
int ice_set_link(struct ice_vsi *vsi, bool ena);
@@ -62,7 +52,8 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch);
+ enum ice_vsi_type vsi_type, struct ice_vf *vf,
+ struct ice_channel *ch);
void ice_napi_del(struct ice_vsi *vsi);
@@ -110,7 +101,7 @@ void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
-bool ice_is_aux_ena(struct ice_pf *pf);
+bool ice_is_rdma_ena(struct ice_pf *pf);
bool ice_is_dflt_vsi_in_use(struct ice_sw *sw);
bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
@@ -132,7 +123,10 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
-
+int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
+int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
+bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi);
+u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi);
bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f);
void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_init_feature_support(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index f3c346e13b7a..b588d7995631 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -21,6 +21,7 @@
#include "ice_trace.h"
#include "ice_eswitch.h"
#include "ice_tc_lib.h"
+#include "ice_vsi_vlan_ops.h"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -47,6 +48,21 @@ static DEFINE_IDA(ice_aux_ida);
DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
EXPORT_SYMBOL(ice_xdp_locking_key);
+/**
+ * ice_hw_to_dev - Get device pointer from the hardware structure
+ * @hw: pointer to the device HW structure
+ *
+ * Used to access the device pointer from compilation units which can't easily
+ * include the definition of struct ice_pf without leading to circular header
+ * dependencies.
+ */
+struct device *ice_hw_to_dev(struct ice_hw *hw)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+
+ return &pf->pdev->dev;
+}
+
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
@@ -244,7 +260,7 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
if (vsi->type != ICE_VSI_PF)
return 0;
- if (vsi->num_vlan > 1)
+ if (ice_vsi_has_non_zero_vlans(vsi))
status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
else
status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
@@ -264,7 +280,7 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
if (vsi->type != ICE_VSI_PF)
return 0;
- if (vsi->num_vlan > 1)
+ if (ice_vsi_has_non_zero_vlans(vsi))
status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
else
status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
@@ -279,6 +295,7 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
*/
static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
struct device *dev = ice_pf_to_dev(vsi->back);
struct net_device *netdev = vsi->netdev;
bool promisc_forced_on = false;
@@ -352,7 +369,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
if (vsi->current_netdev_flags & IFF_ALLMULTI) {
- if (vsi->num_vlan > 1)
+ if (ice_vsi_has_non_zero_vlans(vsi))
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_MCAST_PROMISC_BITS;
@@ -366,7 +383,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
} else {
/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
- if (vsi->num_vlan > 1)
+ if (ice_vsi_has_non_zero_vlans(vsi))
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_MCAST_PROMISC_BITS;
@@ -396,7 +413,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
goto out_promisc;
}
err = 0;
- ice_cfg_vlan_pruning(vsi, false);
+ vlan_ops->dis_rx_filtering(vsi);
}
} else {
/* Clear Rx filter to remove traffic from wire */
@@ -409,8 +426,9 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
IFF_PROMISC;
goto out_promisc;
}
- if (vsi->num_vlan > 1)
- ice_cfg_vlan_pruning(vsi, true);
+ if (vsi->current_netdev_flags &
+ NETIF_F_HW_VLAN_CTAG_FILTER)
+ vlan_ops->ena_rx_filtering(vsi);
}
}
}
@@ -502,7 +520,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct ice_hw *hw = &pf->hw;
struct ice_vsi *vsi;
- unsigned int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
@@ -517,8 +536,10 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_vc_notify_reset(pf);
/* Disable VFs until reset is completed */
- ice_for_each_vf(pf, i)
- ice_set_vf_state_qs_dis(&pf->vf[i]);
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf)
+ ice_set_vf_state_qs_dis(vf);
+ mutex_unlock(&pf->vfs.table_lock);
if (ice_is_eswitch_mode_switchdev(pf)) {
if (reset_type != ICE_RESET_PFR)
@@ -565,6 +586,9 @@ skip:
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_prepare_for_reset(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_exit(pf);
+
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
@@ -610,7 +634,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
clear_bit(ICE_PFR_REQ, pf->state);
wake_up(&pf->reset_wait_queue);
- ice_reset_all_vfs(pf, true);
+ ice_reset_all_vfs(pf);
}
}
@@ -661,7 +685,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
clear_bit(ICE_CORER_REQ, pf->state);
clear_bit(ICE_GLOBR_REQ, pf->state);
wake_up(&pf->reset_wait_queue);
- ice_reset_all_vfs(pf, true);
+ ice_reset_all_vfs(pf);
}
return;
@@ -1660,7 +1684,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- unsigned int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
u32 reg;
if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
@@ -1748,47 +1773,46 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
/* Check to see if one of the VFs caused an MDD event, and then
* increment counters and set print pending
*/
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- reg = rd32(hw, VP_MDET_TX_PQM(i));
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
+ reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
if (reg & VP_MDET_TX_PQM_VALID_M) {
- wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
+ wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
- i);
+ vf->vf_id);
}
- reg = rd32(hw, VP_MDET_TX_TCLAN(i));
+ reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
- wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
+ wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
- i);
+ vf->vf_id);
}
- reg = rd32(hw, VP_MDET_TX_TDPU(i));
+ reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
if (reg & VP_MDET_TX_TDPU_VALID_M) {
- wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
+ wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
- i);
+ vf->vf_id);
}
- reg = rd32(hw, VP_MDET_RX(i));
+ reg = rd32(hw, VP_MDET_RX(vf->vf_id));
if (reg & VP_MDET_RX_VALID_M) {
- wr32(hw, VP_MDET_RX(i), 0xFFFF);
+ wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
vf->mdd_rx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
- i);
+ vf->vf_id);
/* Since the queue is disabled on VF Rx MDD events, the
* PF can be configured to reset the VF through ethtool
@@ -1799,12 +1823,11 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
* reset, so print the event prior to reset.
*/
ice_print_vf_rx_mdd_event(vf);
- mutex_lock(&pf->vf[i].cfg_lock);
- ice_reset_vf(&pf->vf[i], false);
- mutex_unlock(&pf->vf[i].cfg_lock);
+ ice_reset_vf(vf, ICE_VF_RESET_LOCK);
}
}
}
+ mutex_unlock(&pf->vfs.table_lock);
ice_print_vfs_mdd_events(pf);
}
@@ -2255,9 +2278,43 @@ static void ice_service_task(struct work_struct *work)
return;
}
- if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+ if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
+ struct iidc_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (event) {
+ set_bit(IIDC_EVENT_CRIT_ERR, event->type);
+ /* report the entire OICR value to AUX driver */
+ swap(event->reg, pf->oicr_err_reg);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+ }
+ }
+
+ if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
+ /* Plug aux device per request */
ice_plug_aux_dev(pf);
+ /* Mark plugging as done but check whether unplug was
+ * requested during ice_plug_aux_dev() call
+ * (e.g. from ice_clear_rdma_cap()) and if so then
+ * plug aux device.
+ */
+ if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+ ice_unplug_aux_dev(pf);
+ }
+
+ if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
+ struct iidc_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (event) {
+ set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+ }
+ }
+
ice_clean_adminq_subtask(pf);
ice_check_media_subtask(pf);
ice_check_for_hang_subtask(pf);
@@ -2433,7 +2490,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
/* skip this unused q_vector */
continue;
}
- if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf)
err = devm_request_irq(dev, irq_num, vsi->irq_handler,
IRQF_SHARED, q_vector->name,
q_vector);
@@ -2500,10 +2557,10 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
xdp_ring->vsi = vsi;
xdp_ring->netdev = NULL;
- xdp_ring->next_dd = ICE_TX_THRESH - 1;
- xdp_ring->next_rs = ICE_TX_THRESH - 1;
xdp_ring->dev = dev;
xdp_ring->count = vsi->num_tx_desc;
+ xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1;
+ xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1;
WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
@@ -3020,17 +3077,9 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
if (oicr & ICE_AUX_CRIT_ERR) {
- struct iidc_event *event;
-
+ pf->oicr_err_reg |= oicr;
+ set_bit(ICE_AUX_ERR_PENDING, pf->state);
ena_mask &= ~ICE_AUX_CRIT_ERR;
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (event) {
- set_bit(IIDC_EVENT_CRIT_ERR, event->type);
- /* report the entire OICR value to AUX driver */
- event->reg = oicr;
- ice_send_event_to_aux(pf, event);
- kfree(event);
- }
}
/* Report any remaining unexpected interrupts */
@@ -3235,6 +3284,7 @@ static void ice_set_ops(struct net_device *netdev)
static void ice_set_netdev_features(struct net_device *netdev)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
netdev_features_t csumo_features;
netdev_features_t vlano_features;
netdev_features_t dflt_features;
@@ -3261,6 +3311,10 @@ static void ice_set_netdev_features(struct net_device *netdev)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
+ /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
+ if (is_dvm_ena)
+ vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
+
tso_features = NETIF_F_TSO |
NETIF_F_TSO_ECN |
NETIF_F_TSO6 |
@@ -3292,6 +3346,15 @@ static void ice_set_netdev_features(struct net_device *netdev)
tso_features;
netdev->vlan_features |= dflt_features | csumo_features |
tso_features;
+
+ /* advertise support but don't enable by default since only one type of
+ * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
+ * type turns on the other has to be turned off. This is enforced by the
+ * ice_fix_features() ndo callback.
+ */
+ if (is_dvm_ena)
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX;
}
/**
@@ -3366,14 +3429,14 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL);
+ return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL);
}
static struct ice_vsi *
ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_channel *ch)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch);
+ return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch);
}
/**
@@ -3387,7 +3450,7 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL);
+ return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL);
}
/**
@@ -3401,40 +3464,37 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL);
+ return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL);
}
/**
* ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
* @netdev: network interface to be adjusted
- * @proto: unused protocol
+ * @proto: VLAN TPID
* @vid: VLAN ID to be added
*
* net_device_ops implementation for adding VLAN IDs
*/
static int
-ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
- u16 vid)
+ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi_vlan_ops *vlan_ops;
struct ice_vsi *vsi = np->vsi;
+ struct ice_vlan vlan;
int ret;
/* VLAN 0 is added by default during load/reset */
if (!vid)
return 0;
- /* Enable VLAN pruning when a VLAN other than 0 is added */
- if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
- ret = ice_cfg_vlan_pruning(vsi, true);
- if (ret)
- return ret;
- }
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
* packets aren't pruned by the device's internal switch on Rx
*/
- ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
+ vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
+ ret = vlan_ops->add_vlan(vsi, &vlan);
if (!ret)
set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
@@ -3444,36 +3504,36 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
/**
* ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
* @netdev: network interface to be adjusted
- * @proto: unused protocol
+ * @proto: VLAN TPID
* @vid: VLAN ID to be removed
*
* net_device_ops implementation for removing VLAN IDs
*/
static int
-ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
- u16 vid)
+ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi_vlan_ops *vlan_ops;
struct ice_vsi *vsi = np->vsi;
+ struct ice_vlan vlan;
int ret;
/* don't allow removal of VLAN 0 */
if (!vid)
return 0;
- /* Make sure ice_vsi_kill_vlan is successful before updating VLAN
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ /* Make sure VLAN delete is successful before updating VLAN
* information
*/
- ret = ice_vsi_kill_vlan(vsi, vid);
+ vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
+ ret = vlan_ops->del_vlan(vsi, &vlan);
if (ret)
return ret;
- /* Disable pruning when VLAN 0 is the only VLAN rule */
- if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
- ret = ice_cfg_vlan_pruning(vsi, false);
-
set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
- return ret;
+ return 0;
}
/**
@@ -3542,12 +3602,17 @@ static int ice_tc_indir_block_register(struct ice_vsi *vsi)
static int ice_setup_pf_sw(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
+ bool dvm = ice_is_dvm_ena(&pf->hw);
struct ice_vsi *vsi;
int status;
if (ice_is_reset_in_progress(pf->state))
return -EBUSY;
+ status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
+ if (status)
+ return -EIO;
+
vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
if (!vsi)
return -ENOMEM;
@@ -3658,6 +3723,7 @@ static void ice_deinit_pf(struct ice_pf *pf)
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->tc_mutex);
mutex_destroy(&pf->avail_q_mutex);
+ mutex_destroy(&pf->vfs.table_lock);
if (pf->avail_txqs) {
bitmap_free(pf->avail_txqs);
@@ -3682,19 +3748,16 @@ static void ice_set_pf_caps(struct ice_pf *pf)
struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
- if (func_caps->common_cap.rdma) {
+ if (func_caps->common_cap.rdma)
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- set_bit(ICE_FLAG_AUX_ENA, pf->flags);
- }
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
if (func_caps->common_cap.dcb)
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
if (func_caps->common_cap.sr_iov_1_1) {
set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
- pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
- ICE_MAX_VF_COUNT);
+ pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
+ ICE_MAX_SRIOV_VFS);
}
clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
if (func_caps->common_cap.rss_table_size)
@@ -3760,6 +3823,9 @@ static int ice_init_pf(struct ice_pf *pf)
return -ENOMEM;
}
+ mutex_init(&pf->vfs.table_lock);
+ hash_init(pf->vfs.table);
+
return 0;
}
@@ -3814,7 +3880,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_left -= needed;
/* reserve vectors for RDMA auxiliary driver */
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ if (ice_is_rdma_ena(pf)) {
needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
if (v_left < needed)
goto no_hw_vecs_left_err;
@@ -3855,7 +3921,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
int v_remain = v_actual - v_other;
int v_rdma = 0, v_min_rdma = 0;
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ if (ice_is_rdma_ena(pf)) {
/* Need at least 1 interrupt in addition to
* AEQ MSIX
*/
@@ -3889,7 +3955,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
pf->num_lan_msix);
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ if (ice_is_rdma_ena(pf))
dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
pf->num_rdma_msix);
}
@@ -4069,8 +4135,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
/* allow all VLANs on Tx and don't strip on Rx */
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
- ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+ ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
+ ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
@@ -4079,7 +4145,7 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
} else {
vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
- vsi->info.vlan_flags = ctxt->info.vlan_flags;
+ vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
}
kfree(ctxt);
@@ -4464,8 +4530,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
/* set up for high or low DMA */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (err)
- err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (err) {
dev_err(dev, "DMA configuration failed: 0x%x\n", err);
return err;
@@ -4688,6 +4752,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_init(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_init(pf);
+
/* Note: Flow director init failure is non-fatal to load */
if (ice_init_fdir(pf))
dev_err(dev, "could not initialize flow director\n");
@@ -4717,7 +4784,7 @@ probe_done:
/* ready to go, so clear down state bit */
clear_bit(ICE_DOWN, pf->state);
- if (ice_is_aux_ena(pf)) {
+ if (ice_is_rdma_ena(pf)) {
pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
if (pf->aux_idx < 0) {
dev_err(dev, "Failed to allocate device ID for AUX driver\n");
@@ -4859,14 +4926,16 @@ static void ice_remove(struct pci_dev *pdev)
ice_devlink_unregister_params(pf);
set_bit(ICE_DOWN, pf->state);
- mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
ice_deinit_lag(pf);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_release(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_exit(pf);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
ice_setup_mc_magic_wake(pf);
ice_vsi_release_all(pf);
+ mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
ice_set_wake(pf);
ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) {
@@ -5575,6 +5644,194 @@ ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
return err;
}
+#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_RX | \
+ NETIF_F_HW_VLAN_STAG_TX)
+
+#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_STAG_FILTER)
+
+/**
+ * ice_fix_features - fix the netdev features flags based on device limitations
+ * @netdev: ptr to the netdev that flags are being fixed on
+ * @features: features that need to be checked and possibly fixed
+ *
+ * Make sure any fixups are made to features in this callback. This enables the
+ * driver to not have to check unsupported configurations throughout the driver
+ * because that's the responsiblity of this callback.
+ *
+ * Single VLAN Mode (SVM) Supported Features:
+ * NETIF_F_HW_VLAN_CTAG_FILTER
+ * NETIF_F_HW_VLAN_CTAG_RX
+ * NETIF_F_HW_VLAN_CTAG_TX
+ *
+ * Double VLAN Mode (DVM) Supported Features:
+ * NETIF_F_HW_VLAN_CTAG_FILTER
+ * NETIF_F_HW_VLAN_CTAG_RX
+ * NETIF_F_HW_VLAN_CTAG_TX
+ *
+ * NETIF_F_HW_VLAN_STAG_FILTER
+ * NETIF_HW_VLAN_STAG_RX
+ * NETIF_HW_VLAN_STAG_TX
+ *
+ * Features that need fixing:
+ * Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
+ * These are mutually exlusive as the VSI context cannot support multiple
+ * VLAN ethertypes simultaneously for stripping and/or insertion. If this
+ * is not done, then default to clearing the requested STAG offload
+ * settings.
+ *
+ * All supported filtering has to be enabled or disabled together. For
+ * example, in DVM, CTAG and STAG filtering have to be enabled and disabled
+ * together. If this is not done, then default to VLAN filtering disabled.
+ * These are mutually exclusive as there is currently no way to
+ * enable/disable VLAN filtering based on VLAN ethertype when using VLAN
+ * prune rules.
+ */
+static netdev_features_t
+ice_fix_features(struct net_device *netdev, netdev_features_t features)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ netdev_features_t supported_vlan_filtering;
+ netdev_features_t requested_vlan_filtering;
+ struct ice_vsi *vsi = np->vsi;
+
+ requested_vlan_filtering = features & NETIF_VLAN_FILTERING_FEATURES;
+
+ /* make sure supported_vlan_filtering works for both SVM and DVM */
+ supported_vlan_filtering = NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (ice_is_dvm_ena(&vsi->back->hw))
+ supported_vlan_filtering |= NETIF_F_HW_VLAN_STAG_FILTER;
+
+ if (requested_vlan_filtering &&
+ requested_vlan_filtering != supported_vlan_filtering) {
+ if (requested_vlan_filtering & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ netdev_warn(netdev, "cannot support requested VLAN filtering settings, enabling all supported VLAN filtering settings\n");
+ features |= supported_vlan_filtering;
+ } else {
+ netdev_warn(netdev, "cannot support requested VLAN filtering settings, clearing all supported VLAN filtering settings\n");
+ features &= ~supported_vlan_filtering;
+ }
+ }
+
+ if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
+ (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
+ netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
+ features &= ~(NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX);
+ }
+
+ return features;
+}
+
+/**
+ * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
+ * @vsi: PF's VSI
+ * @features: features used to determine VLAN offload settings
+ *
+ * First, determine the vlan_ethertype based on the VLAN offload bits in
+ * features. Then determine if stripping and insertion should be enabled or
+ * disabled. Finally enable or disable VLAN stripping and insertion.
+ */
+static int
+ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
+{
+ bool enable_stripping = true, enable_insertion = true;
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int strip_err = 0, insert_err = 0;
+ u16 vlan_ethertype = 0;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
+ vlan_ethertype = ETH_P_8021AD;
+ else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
+ vlan_ethertype = ETH_P_8021Q;
+
+ if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
+ enable_stripping = false;
+ if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
+ enable_insertion = false;
+
+ if (enable_stripping)
+ strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
+ else
+ strip_err = vlan_ops->dis_stripping(vsi);
+
+ if (enable_insertion)
+ insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
+ else
+ insert_err = vlan_ops->dis_insertion(vsi);
+
+ if (strip_err || insert_err)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
+ * @vsi: PF's VSI
+ * @features: features used to determine VLAN filtering settings
+ *
+ * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
+ * features.
+ */
+static int
+ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ int err = 0;
+
+ /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
+ * if either bit is set
+ */
+ if (features &
+ (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
+ err = vlan_ops->ena_rx_filtering(vsi);
+ else
+ err = vlan_ops->dis_rx_filtering(vsi);
+
+ return err;
+}
+
+/**
+ * ice_set_vlan_features - set VLAN settings based on suggested feature set
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ *
+ * Only update VLAN settings if the requested_vlan_features are different than
+ * the current_vlan_features.
+ */
+static int
+ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
+{
+ netdev_features_t current_vlan_features, requested_vlan_features;
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ int err;
+
+ current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
+ requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
+ if (current_vlan_features ^ requested_vlan_features) {
+ err = ice_set_vlan_offload_features(vsi, features);
+ if (err)
+ return err;
+ }
+
+ current_vlan_features = netdev->features &
+ NETIF_VLAN_FILTERING_FEATURES;
+ requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
+ if (current_vlan_features ^ requested_vlan_features) {
+ err = ice_set_vlan_filtering_features(vsi, features);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
/**
* ice_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
@@ -5609,26 +5866,9 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
netdev->features & NETIF_F_RXHASH)
ice_vsi_manage_rss_lut(vsi, false);
- if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
- ret = ice_vsi_manage_vlan_stripping(vsi, true);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
- ret = ice_vsi_manage_vlan_stripping(vsi, false);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
- ret = ice_vsi_manage_vlan_insertion(vsi);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
- (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
- ret = ice_vsi_manage_vlan_insertion(vsi);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
- ret = ice_cfg_vlan_pruning(vsi, true);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
- ret = ice_cfg_vlan_pruning(vsi, false);
+ ret = ice_set_vlan_features(netdev, features);
+ if (ret)
+ return ret;
if ((features & NETIF_F_NTUPLE) &&
!(netdev->features & NETIF_F_NTUPLE)) {
@@ -5652,23 +5892,26 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
else
clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
- return ret;
+ return 0;
}
/**
- * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
+ * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
* @vsi: VSI to setup VLAN properties for
*/
static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
{
- int ret = 0;
+ int err;
- if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
- ret = ice_vsi_manage_vlan_stripping(vsi, true);
- if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
- ret = ice_vsi_manage_vlan_insertion(vsi);
+ err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
+ if (err)
+ return err;
- return ret;
+ err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
+ if (err)
+ return err;
+
+ return ice_vsi_add_vlan_zero(vsi);
}
/**
@@ -5909,9 +6152,9 @@ int ice_up(struct ice_vsi *vsi)
* This function fetches stats from the ring considering the atomic operations
* that needs to be performed to read u64 values in 32 bit machine.
*/
-static void
-ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats,
- u64 *pkts, u64 *bytes)
+void
+ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
+ struct ice_q_stats stats, u64 *pkts, u64 *bytes)
{
unsigned int start;
@@ -5941,8 +6184,9 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
u64 pkts = 0, bytes = 0;
ring = READ_ONCE(rings[i]);
- if (ring)
- ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
+ if (!ring)
+ continue;
+ ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
vsi_stats->tx_packets += pkts;
vsi_stats->tx_bytes += bytes;
vsi->tx_restart += ring->tx_stats.restart_q;
@@ -6269,11 +6513,12 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
*/
int ice_down(struct ice_vsi *vsi)
{
- int i, tx_err, rx_err, link_err = 0;
+ int i, tx_err, rx_err, link_err = 0, vlan_err = 0;
WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ vlan_err = ice_vsi_del_vlan_zero(vsi);
if (!ice_is_e810(&vsi->back->hw))
ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
netif_carrier_off(vsi->netdev);
@@ -6315,7 +6560,7 @@ int ice_down(struct ice_vsi *vsi)
ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]);
- if (tx_err || rx_err || link_err) {
+ if (tx_err || rx_err || link_err || vlan_err) {
netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
return -EIO;
@@ -6625,6 +6870,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
+ bool dvm;
int err;
if (test_bit(ICE_DOWN, pf->state))
@@ -6688,6 +6934,12 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_init_ctrlq;
}
+ dvm = ice_is_dvm_ena(hw);
+
+ err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
+ if (err)
+ goto err_init_ctrlq;
+
err = ice_sched_init_port(hw->port_info);
if (err)
goto err_sched_init_port;
@@ -6724,6 +6976,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_reset(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_init(pf);
+
/* rebuild PF VSI */
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
if (err) {
@@ -6822,7 +7077,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- struct iidc_event *event;
u8 count = 0;
int err = 0;
@@ -6857,14 +7111,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return -EBUSY;
}
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (!event)
- return -ENOMEM;
-
- set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
- ice_send_event_to_aux(pf, event);
- clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
-
netdev->mtu = (unsigned int)new_mtu;
/* if VSI is up, bring it down and then back up */
@@ -6872,21 +7118,18 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
err = ice_down(vsi);
if (err) {
netdev_err(netdev, "change MTU if_down err %d\n", err);
- goto event_after;
+ return err;
}
err = ice_up(vsi);
if (err) {
netdev_err(netdev, "change MTU if_up err %d\n", err);
- goto event_after;
+ return err;
}
}
netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
-event_after:
- set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
- ice_send_event_to_aux(pf, event);
- kfree(event);
+ set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
return err;
}
@@ -8596,6 +8839,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_start_xmit = ice_start_xmit,
.ndo_select_queue = ice_select_queue,
.ndo_features_check = ice_features_check,
+ .ndo_fix_features = ice_fix_features,
.ndo_set_rx_mode = ice_set_rx_mode,
.ndo_set_mac_address = ice_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h
index f57c414bc0a9..82bc54fec7f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_osdep.h
+++ b/drivers/net/ethernet/intel/ice/ice_osdep.h
@@ -5,10 +5,18 @@
#define _ICE_OSDEP_H_
#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/pci_ids.h>
#ifndef CONFIG_64BIT
#include <linux/io-64-nonatomic-lo-hi.h>
#endif
+#include <net/udp_tunnel.h>
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
#define rd32(a, reg) readl((a)->hw_addr + (reg))
@@ -24,8 +32,8 @@ struct ice_dma_mem {
size_t size;
};
-#define ice_hw_to_dev(ptr) \
- (&(container_of((ptr), struct ice_pf, hw))->pdev->dev)
+struct ice_hw;
+struct device *ice_hw_to_dev(struct ice_hw *hw);
#ifdef CONFIG_DYNAMIC_DEBUG
#define ice_debug(hw, type, fmt, args...) \
diff --git a/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.c
new file mode 100644
index 000000000000..976a03d3bdd5
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_vsi_vlan_ops.h"
+#include "ice_vsi_vlan_lib.h"
+#include "ice_vlan_mode.h"
+#include "ice.h"
+#include "ice_pf_vsi_vlan_ops.h"
+
+void ice_pf_vsi_init_vlan_ops(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+
+ if (ice_is_dvm_ena(&vsi->back->hw)) {
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+ vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ } else {
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ }
+}
+
diff --git a/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.h
new file mode 100644
index 000000000000..6741ec8c5f6b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_PF_VSI_VLAN_OPS_H_
+#define _ICE_PF_VSI_VLAN_OPS_H_
+
+#include "ice_vsi_vlan_ops.h"
+
+struct ice_vsi;
+
+void ice_pf_vsi_init_vlan_ops(struct ice_vsi *vsi);
+
+#endif /* _ICE_PF_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 695b6dd61dc2..3f64300b0e14 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -29,6 +29,7 @@ enum ice_protocol_type {
ICE_MAC_OFOS = 0,
ICE_MAC_IL,
ICE_ETYPE_OL,
+ ICE_ETYPE_IL,
ICE_VLAN_OFOS,
ICE_IPV4_OFOS,
ICE_IPV4_IL,
@@ -40,6 +41,8 @@ enum ice_protocol_type {
ICE_VXLAN,
ICE_GENEVE,
ICE_NVGRE,
+ ICE_GTP,
+ ICE_GTP_NO_PAY,
ICE_VXLAN_GPE,
ICE_SCTP_IL,
ICE_PROTOCOL_LAST
@@ -51,6 +54,8 @@ enum ice_sw_tunnel_type {
ICE_SW_TUN_VXLAN,
ICE_SW_TUN_GENEVE,
ICE_SW_TUN_NVGRE,
+ ICE_SW_TUN_GTPU,
+ ICE_SW_TUN_GTPC,
ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
};
@@ -92,6 +97,7 @@ enum ice_prot_id {
#define ICE_MAC_OFOS_HW 1
#define ICE_MAC_IL_HW 4
#define ICE_ETYPE_OL_HW 9
+#define ICE_ETYPE_IL_HW 10
#define ICE_VLAN_OF_HW 16
#define ICE_VLAN_OL_HW 17
#define ICE_IPV4_OFOS_HW 32
@@ -180,6 +186,20 @@ struct ice_udp_tnl_hdr {
__be32 vni; /* only use lower 24-bits */
};
+struct ice_udp_gtp_hdr {
+ u8 flags;
+ u8 msg_type;
+ __be16 rsrvd_len;
+ __be32 teid;
+ __be16 rsrvd_seq_nbr;
+ u8 rsrvd_n_pdu_nbr;
+ u8 rsrvd_next_ext;
+ u8 rsvrd_ext_len;
+ u8 pdu_type;
+ u8 qfi;
+ u8 rsvrd;
+};
+
struct ice_nvgre_hdr {
__be16 flags;
__be16 protocol;
@@ -196,6 +216,7 @@ union ice_prot_hdr {
struct ice_sctp_hdr sctp_hdr;
struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre_hdr nvgre_hdr;
+ struct ice_udp_gtp_hdr gtp_hdr;
};
/* This is mapping table entry that maps every word within a given protocol
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 000c39d163a2..a1cd33273ca4 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -3,6 +3,7 @@
#include "ice.h"
#include "ice_lib.h"
+#include "ice_trace.h"
#define E810_OUT_PROP_DELAY_NS 1
@@ -2063,11 +2064,15 @@ static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
struct sk_buff *skb;
int err;
+ ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+
err = ice_read_phy_tstamp(hw, tx->quad, phy_idx,
&raw_tstamp);
if (err)
continue;
+ ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
+
/* Check if the timestamp is invalid or stale */
if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
raw_tstamp == tx->tstamps[idx].cached_tstamp)
@@ -2093,6 +2098,8 @@ static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ ice_trace(tx_tstamp_complete, skb, idx);
+
skb_tstamp_tx(skb, &shhwtstamps);
dev_kfree_skb_any(skb);
}
@@ -2131,6 +2138,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
tx->tstamps[idx].start = jiffies;
tx->tstamps[idx].skb = skb_get(skb);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ ice_trace(tx_tstamp_request, skb, idx);
}
spin_unlock(&tx->lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index ec8450f034e6..6dff97d53d81 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -3251,6 +3251,37 @@ int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
}
/**
+ * ice_read_pca9575_reg_e810t
+ * @hw: pointer to the hw struct
+ * @offset: GPIO controller register offset
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Read the register from the GPIO controller
+ */
+int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ __le16 addr;
+ u16 handle;
+ int err;
+
+ memset(&link_topo, 0, sizeof(link_topo));
+
+ err = ice_get_pca9575_handle(hw, &handle);
+ if (err)
+ return err;
+
+ link_topo.handle = cpu_to_le16(handle);
+ link_topo.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
+
+ addr = cpu_to_le16((u16)offset);
+
+ return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
+}
+
+/**
* ice_is_pca9575_present
* @hw: pointer to the hw struct
*
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 519e75462e67..1246e4ee4b5d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -191,6 +191,7 @@ int ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port);
int ice_ptp_init_phy_e810(struct ice_hw *hw);
int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
+int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_is_pca9575_present(struct ice_hw *hw);
#define PFTSYN_SEM_BYTES 4
@@ -443,4 +444,10 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
#define ICE_SMA_MAX_BIT_E810T 7
#define ICE_PCA9575_P1_OFFSET 8
+/* E810T PCA9575 IO controller registers */
+#define ICE_PCA9575_P0_IN 0x0
+
+/* E810T PCA9575 IO controller pin control */
+#define ICE_E810T_P0_GNSS_PRSNT_N BIT(4)
+
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index dcc310e29300..848f2adea563 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -4,7 +4,7 @@
#include "ice.h"
#include "ice_eswitch.h"
#include "ice_devlink.h"
-#include "ice_virtchnl_pf.h"
+#include "ice_sriov.h"
#include "ice_tc_lib.h"
/**
@@ -142,6 +142,59 @@ ice_repr_get_devlink_port(struct net_device *netdev)
return &repr->vf->devlink_port;
}
+/**
+ * ice_repr_sp_stats64 - get slow path stats for port representor
+ * @dev: network interface device structure
+ * @stats: netlink stats structure
+ *
+ * RX/TX stats are being swapped here to be consistent with VF stats. In slow
+ * path, port representor receives data when the corresponding VF is sending it
+ * (and vice versa), TX and RX bytes/packets are effectively swapped on port
+ * representor.
+ */
+static int
+ice_repr_sp_stats64(const struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ int vf_id = np->repr->vf->vf_id;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+ u64 pkts, bytes;
+
+ tx_ring = np->vsi->tx_rings[vf_id];
+ ice_fetch_u64_stats_per_ring(&tx_ring->syncp, tx_ring->stats,
+ &pkts, &bytes);
+ stats->rx_packets = pkts;
+ stats->rx_bytes = bytes;
+
+ rx_ring = np->vsi->rx_rings[vf_id];
+ ice_fetch_u64_stats_per_ring(&rx_ring->syncp, rx_ring->stats,
+ &pkts, &bytes);
+ stats->tx_packets = pkts;
+ stats->tx_bytes = bytes;
+ stats->tx_dropped = rx_ring->rx_stats.alloc_page_failed +
+ rx_ring->rx_stats.alloc_buf_failed;
+
+ return 0;
+}
+
+static bool
+ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
+{
+ return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
+}
+
+static int
+ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
+{
+ if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
+ return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
+
+ return -EINVAL;
+}
+
static int
ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
struct flow_cls_offload *flower)
@@ -199,6 +252,8 @@ static const struct net_device_ops ice_repr_netdev_ops = {
.ndo_start_xmit = ice_eswitch_port_start_xmit,
.ndo_get_devlink_port = ice_repr_get_devlink_port,
.ndo_setup_tc = ice_repr_setup_tc,
+ .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
+ .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
};
/**
@@ -284,6 +339,8 @@ static int ice_repr_add(struct ice_vf *vf)
devlink_port_type_eth_set(&vf->devlink_port, repr->netdev);
+ ice_virtchnl_set_repr_ops(vf);
+
return 0;
err_netdev:
@@ -311,6 +368,9 @@ err_alloc_rule:
*/
static void ice_repr_rem(struct ice_vf *vf)
{
+ if (!vf->repr)
+ return;
+
ice_devlink_destroy_vf_port(vf);
kfree(vf->repr->q_vector);
vf->repr->q_vector = NULL;
@@ -323,6 +383,23 @@ static void ice_repr_rem(struct ice_vf *vf)
#endif
kfree(vf->repr);
vf->repr = NULL;
+
+ ice_virtchnl_set_dflt_ops(vf);
+}
+
+/**
+ * ice_repr_rem_from_all_vfs - remove port representor for all VFs
+ * @pf: pointer to PF structure
+ */
+void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ ice_for_each_vf(pf, bkt, vf)
+ ice_repr_rem(vf);
}
/**
@@ -331,49 +408,27 @@ static void ice_repr_rem(struct ice_vf *vf)
*/
int ice_repr_add_for_all_vfs(struct ice_pf *pf)
{
+ struct ice_vf *vf;
+ unsigned int bkt;
int err;
- int i;
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
+ lockdep_assert_held(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
err = ice_repr_add(vf);
if (err)
goto err;
-
- ice_vc_change_ops_to_repr(&vf->vc_ops);
}
return 0;
err:
- for (i = i - 1; i >= 0; i--) {
- struct ice_vf *vf = &pf->vf[i];
-
- ice_repr_rem(vf);
- ice_vc_set_dflt_vf_ops(&vf->vc_ops);
- }
+ ice_repr_rem_from_all_vfs(pf);
return err;
}
/**
- * ice_repr_rem_from_all_vfs - remove port representor for all VFs
- * @pf: pointer to PF structure
- */
-void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
-{
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- ice_repr_rem(vf);
- ice_vc_set_dflt_vf_ops(&vf->vc_ops);
- }
-}
-
-/**
* ice_repr_start_tx_queues - start Tx queues of port representor
* @repr: pointer to repr structure
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index 0c77ff050d15..378a45bfa256 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -5,7 +5,6 @@
#define _ICE_REPR_H_
#include <net/dst_metadata.h>
-#include "ice.h"
struct ice_repr {
struct ice_vsi *src_vsi;
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 52c6bac41bf7..8915a9d39e36 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -1,532 +1,1919 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
-#include "ice_common.h"
-#include "ice_sriov.h"
+#include "ice.h"
+#include "ice_vf_lib_private.h"
+#include "ice_base.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+#include "ice_dcb_lib.h"
+#include "ice_flow.h"
+#include "ice_eswitch.h"
+#include "ice_virtchnl_allowlist.h"
+#include "ice_flex_pipe.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_vlan.h"
/**
- * ice_aq_send_msg_to_vf
- * @hw: pointer to the hardware structure
- * @vfid: VF ID to send msg
- * @v_opcode: opcodes for VF-PF communication
- * @v_retval: return error code
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- * @cd: pointer to command details
+ * ice_free_vf_entries - Free all VF entries from the hash table
+ * @pf: pointer to the PF structure
*
- * Send message to VF driver (0x0802) using mailbox
- * queue and asynchronously sending message via
- * ice_sq_send_cmd() function
+ * Iterate over the VF hash table, removing and releasing all VF entries.
+ * Called during VF teardown or as cleanup during failed VF initialization.
*/
-int
-ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
- u8 *msg, u16 msglen, struct ice_sq_cd *cd)
+static void ice_free_vf_entries(struct ice_pf *pf)
{
- struct ice_aqc_pf_vf_msg *cmd;
- struct ice_aq_desc desc;
+ struct ice_vfs *vfs = &pf->vfs;
+ struct hlist_node *tmp;
+ struct ice_vf *vf;
+ unsigned int bkt;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
+ /* Remove all VFs from the hash table and release their main
+ * reference. Once all references to the VF are dropped, ice_put_vf()
+ * will call ice_release_vf which will remove the VF memory.
+ */
+ lockdep_assert_held(&vfs->table_lock);
- cmd = &desc.params.virt;
- cmd->id = cpu_to_le32(vfid);
+ hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
+ hash_del_rcu(&vf->entry);
+ ice_put_vf(vf);
+ }
+}
- desc.cookie_high = cpu_to_le32(v_opcode);
- desc.cookie_low = cpu_to_le32(v_retval);
+/**
+ * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
+ * @vf: invalidate this VF's VSI after freeing it
+ */
+static void ice_vf_vsi_release(struct ice_vf *vf)
+{
+ ice_vsi_release(ice_get_vf_vsi(vf));
+ ice_vf_invalidate_vsi(vf);
+}
- if (msglen)
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+/**
+ * ice_free_vf_res - Free a VF's resources
+ * @vf: pointer to the VF info
+ */
+static void ice_free_vf_res(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ int i, last_vector_idx;
+
+ /* First, disable VF's configuration API to prevent OS from
+ * accessing the VF's VSI after it's freed or invalidated.
+ */
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ ice_vf_fdir_exit(vf);
+ /* free VF control VSI */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_vsi_release(vf);
+
+ /* free VSI and disconnect it from the parent uplink */
+ if (vf->lan_vsi_idx != ICE_NO_VSI) {
+ ice_vf_vsi_release(vf);
+ vf->num_mac = 0;
+ }
- return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+ last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1;
+
+ /* clear VF MDD event information */
+ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
+ memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
+
+ /* Disable interrupts so that VF starts in a known state */
+ for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
+ wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
+ ice_flush(&pf->hw);
+ }
+ /* reset some of the state variables keeping track of the resources */
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
}
/**
- * ice_conv_link_speed_to_virtchnl
- * @adv_link_support: determines the format of the returned link speed
- * @link_speed: variable containing the link_speed to be converted
+ * ice_dis_vf_mappings
+ * @vf: pointer to the VF structure
+ */
+static void ice_dis_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int first, last, v;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+ vsi = ice_get_vf_vsi(vf);
+
+ dev = ice_pf_to_dev(pf);
+ wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
+ wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
+
+ first = vf->first_vector_idx;
+ last = first + pf->vfs.num_msix_per - 1;
+ for (v = first; v <= last; v++) {
+ u32 reg;
+
+ reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
+ GLINT_VECT2FUNC_IS_PF_M) |
+ ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+ GLINT_VECT2FUNC_PF_NUM_M));
+ wr32(hw, GLINT_VECT2FUNC(v), reg);
+ }
+
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
+ wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
+ else
+ dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
+
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
+ wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
+ else
+ dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
+}
+
+/**
+ * ice_sriov_free_msix_res - Reset/free any used MSIX resources
+ * @pf: pointer to the PF structure
+ *
+ * Since no MSIX entries are taken from the pf->irq_tracker then just clear
+ * the pf->sriov_base_vector.
*
- * Convert link speed supported by HW to link speed supported by virtchnl.
- * If adv_link_support is true, then return link speed in Mbps. Else return
- * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
- * needs to cast back to an enum virtchnl_link_speed in the case where
- * adv_link_support is false, but when adv_link_support is true the caller can
- * expect the speed in Mbps.
+ * Returns 0 on success, and -EINVAL on error.
*/
-u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
+static int ice_sriov_free_msix_res(struct ice_pf *pf)
{
- u32 speed;
+ struct ice_res_tracker *res;
- if (adv_link_support)
- switch (link_speed) {
- case ICE_AQ_LINK_SPEED_10MB:
- speed = ICE_LINK_SPEED_10MBPS;
- break;
- case ICE_AQ_LINK_SPEED_100MB:
- speed = ICE_LINK_SPEED_100MBPS;
- break;
- case ICE_AQ_LINK_SPEED_1000MB:
- speed = ICE_LINK_SPEED_1000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_2500MB:
- speed = ICE_LINK_SPEED_2500MBPS;
- break;
- case ICE_AQ_LINK_SPEED_5GB:
- speed = ICE_LINK_SPEED_5000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_10GB:
- speed = ICE_LINK_SPEED_10000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_20GB:
- speed = ICE_LINK_SPEED_20000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_25GB:
- speed = ICE_LINK_SPEED_25000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_40GB:
- speed = ICE_LINK_SPEED_40000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_50GB:
- speed = ICE_LINK_SPEED_50000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_100GB:
- speed = ICE_LINK_SPEED_100000MBPS;
- break;
- default:
- speed = ICE_LINK_SPEED_UNKNOWN;
- break;
- }
+ if (!pf)
+ return -EINVAL;
+
+ res = pf->irq_tracker;
+ if (!res)
+ return -EINVAL;
+
+ /* give back irq_tracker resources used */
+ WARN_ON(pf->sriov_base_vector < res->num_entries);
+
+ pf->sriov_base_vector = 0;
+
+ return 0;
+}
+
+/**
+ * ice_free_vfs - Free all VFs
+ * @pf: pointer to the PF structure
+ */
+void ice_free_vfs(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vfs *vfs = &pf->vfs;
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ if (!ice_has_vfs(pf))
+ return;
+
+ while (test_and_set_bit(ICE_VF_DIS, pf->state))
+ usleep_range(1000, 2000);
+
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
else
- /* Virtchnl speeds are not defined for every speed supported in
- * the hardware. To maintain compatibility with older AVF
- * drivers, while reporting the speed the new speed values are
- * resolved to the closest known virtchnl speeds
- */
- switch (link_speed) {
- case ICE_AQ_LINK_SPEED_10MB:
- case ICE_AQ_LINK_SPEED_100MB:
- speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
- break;
- case ICE_AQ_LINK_SPEED_1000MB:
- case ICE_AQ_LINK_SPEED_2500MB:
- case ICE_AQ_LINK_SPEED_5GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
- break;
- case ICE_AQ_LINK_SPEED_10GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
- break;
- case ICE_AQ_LINK_SPEED_20GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
- break;
- case ICE_AQ_LINK_SPEED_25GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
- break;
- case ICE_AQ_LINK_SPEED_40GB:
- case ICE_AQ_LINK_SPEED_50GB:
- case ICE_AQ_LINK_SPEED_100GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
- break;
- default:
- speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
- break;
+ dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
+
+ mutex_lock(&vfs->table_lock);
+
+ ice_eswitch_release(pf);
+
+ ice_for_each_vf(pf, bkt, vf) {
+ mutex_lock(&vf->cfg_lock);
+
+ ice_dis_vf_qs(vf);
+
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ /* disable VF qp mappings and set VF disable state */
+ ice_dis_vf_mappings(vf);
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ ice_free_vf_res(vf);
}
- return speed;
+ if (!pci_vfs_assigned(pf->pdev)) {
+ u32 reg_idx, bit_idx;
+
+ reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ }
+
+ /* clear malicious info since the VF is getting released */
+ if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
+ ICE_MAX_SRIOV_VFS, vf->vf_id))
+ dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
+ vf->vf_id);
+
+ mutex_unlock(&vf->cfg_lock);
+ }
+
+ if (ice_sriov_free_msix_res(pf))
+ dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
+
+ vfs->num_qps_per = 0;
+ ice_free_vf_entries(pf);
+
+ mutex_unlock(&vfs->table_lock);
+
+ clear_bit(ICE_VF_DIS, pf->state);
+ clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
}
-/* The mailbox overflow detection algorithm helps to check if there
- * is a possibility of a malicious VF transmitting too many MBX messages to the
- * PF.
- * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
- * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
- * The struct ice_mbx_snapshot helps to track and traverse a static window of
- * messages within the mailbox queue while looking for a malicious VF.
+/**
+ * ice_vf_vsi_setup - Set up a VF VSI
+ * @vf: VF to setup VSI for
*
- * 2. When the caller starts processing its mailbox queue in response to an
- * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
- * the algorithm can be run for the first time for that interrupt. This can be
- * done via ice_mbx_reset_snapshot().
+ * Returns pointer to the successfully allocated VSI struct on success,
+ * otherwise returns NULL on failure.
+ */
+static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL);
+
+ if (!vsi) {
+ dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
+ ice_vf_invalidate_vsi(vf);
+ return NULL;
+ }
+
+ vf->lan_vsi_idx = vsi->idx;
+ vf->lan_vsi_num = vsi->vsi_num;
+
+ return vsi;
+}
+
+/**
+ * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF that the first MSIX vector index is being calculated for
*
- * 3. For every message read by the caller from the MBX Queue, the caller must
- * call the detection algorithm's entry function ice_mbx_vf_state_handler().
- * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
- * filled as it is required to be passed to the algorithm.
+ * This returns the first MSIX vector index in PF space that is used by this VF.
+ * This index is used when accessing PF relative registers such as
+ * GLINT_VECT2FUNC and GLINT_DYN_CTL.
+ * This will always be the OICR index in the AVF driver so any functionality
+ * using vf->first_vector_idx for queue configuration will have to increment by
+ * 1 to avoid meddling with the OICR index.
+ */
+static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
+{
+ return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per;
+}
+
+/**
+ * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
+ * @vf: VF to enable MSIX mappings for
*
- * 4. Every time a message is read from the MBX queue, a VFId is received which
- * is passed to the state handler. The boolean output is_malvf of the state
- * handler ice_mbx_vf_state_handler() serves as an indicator to the caller
- * whether this VF is malicious or not.
+ * Some of the registers need to be indexed/configured using hardware global
+ * device values and other registers need 0-based values, which represent PF
+ * based values.
+ */
+static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
+{
+ int device_based_first_msix, device_based_last_msix;
+ int pf_based_first_msix, pf_based_last_msix, v;
+ struct ice_pf *pf = vf->pf;
+ int device_based_vf_id;
+ struct ice_hw *hw;
+ u32 reg;
+
+ hw = &pf->hw;
+ pf_based_first_msix = vf->first_vector_idx;
+ pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1;
+
+ device_based_first_msix = pf_based_first_msix +
+ pf->hw.func_caps.common_cap.msix_vector_first_id;
+ device_based_last_msix =
+ (device_based_first_msix + pf->vfs.num_msix_per) - 1;
+ device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
+ VPINT_ALLOC_FIRST_M) |
+ ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
+ VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
+ wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
+
+ reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
+ & VPINT_ALLOC_PCI_FIRST_M) |
+ ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
+ VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
+ wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
+
+ /* map the interrupts to its functions */
+ for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
+ reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
+ GLINT_VECT2FUNC_VF_NUM_M) |
+ ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+ GLINT_VECT2FUNC_PF_NUM_M));
+ wr32(hw, GLINT_VECT2FUNC(v), reg);
+ }
+
+ /* Map mailbox interrupt to VF MSI-X vector 0 */
+ wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
+ * @vf: VF to enable the mappings for
+ * @max_txq: max Tx queues allowed on the VF's VSI
+ * @max_rxq: max Rx queues allowed on the VF's VSI
+ */
+static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_hw *hw = &vf->pf->hw;
+ u32 reg;
+
+ /* set regardless of mapping mode */
+ wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
+
+ /* VF Tx queues allocation */
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+ /* set the VF PF Tx queue range
+ * VFNUMQ value should be set to (number of queues - 1). A value
+ * of 0 means 1 queue and a value of 255 means 256 queues
+ */
+ reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
+ VPLAN_TX_QBASE_VFFIRSTQ_M) |
+ (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
+ VPLAN_TX_QBASE_VFNUMQ_M));
+ wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
+ } else {
+ dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
+ }
+
+ /* set regardless of mapping mode */
+ wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
+
+ /* VF Rx queues allocation */
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+ /* set the VF PF Rx queue range
+ * VFNUMQ value should be set to (number of queues - 1). A value
+ * of 0 means 1 queue and a value of 255 means 256 queues
+ */
+ reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
+ VPLAN_RX_QBASE_VFFIRSTQ_M) |
+ (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
+ VPLAN_RX_QBASE_VFNUMQ_M));
+ wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
+ } else {
+ dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
+ }
+}
+
+/**
+ * ice_ena_vf_mappings - enable VF MSIX and queue mapping
+ * @vf: pointer to the VF structure
+ */
+static void ice_ena_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ ice_ena_vf_msix_mappings(vf);
+ ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
+}
+
+/**
+ * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
+ * @vf: VF to calculate the register index for
+ * @q_vector: a q_vector associated to the VF
+ */
+int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
+{
+ struct ice_pf *pf;
+
+ if (!vf || !q_vector)
+ return -EINVAL;
+
+ pf = vf->pf;
+
+ /* always add one to account for the OICR being the first MSIX */
+ return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id +
+ q_vector->v_idx + 1;
+}
+
+/**
+ * ice_get_max_valid_res_idx - Get the max valid resource index
+ * @res: pointer to the resource to find the max valid index for
*
- * 5. When a VF is identified to be malicious, the caller can send a message
- * to the system administrator. The caller can invoke ice_mbx_report_malvf()
- * to help determine if a malicious VF is to be reported or not. This function
- * requires the caller to maintain a global bitmap to track all malicious VFs
- * and pass that to ice_mbx_report_malvf() along with the VFID which was identified
- * to be malicious by ice_mbx_vf_state_handler().
+ * Start from the end of the ice_res_tracker and return right when we find the
+ * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
+ * valid for SR-IOV because it is the only consumer that manipulates the
+ * res->end and this is always called when res->end is set to res->num_entries.
+ */
+static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
+{
+ int i;
+
+ if (!res)
+ return -EINVAL;
+
+ for (i = res->num_entries - 1; i >= 0; i--)
+ if (res->list[i] & ICE_RES_VALID_BIT)
+ return i;
+
+ return 0;
+}
+
+/**
+ * ice_sriov_set_msix_res - Set any used MSIX resources
+ * @pf: pointer to PF structure
+ * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
*
- * 6. The global bitmap maintained by PF can be cleared completely if PF is in
- * reset or the bit corresponding to a VF can be cleared if that VF is in reset.
- * When a VF is shut down and brought back up, we assume that the new VF
- * brought up is not malicious and hence report it if found malicious.
+ * This function allows SR-IOV resources to be taken from the end of the PF's
+ * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
+ * just set the pf->sriov_base_vector and return success.
*
- * 7. The function ice_mbx_reset_snapshot() is called to reset the information
- * in ice_mbx_snapshot for every new mailbox interrupt handled.
+ * If there are not enough resources available, return an error. This should
+ * always be caught by ice_set_per_vf_res().
*
- * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
- * when driver is unloaded.
+ * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
+ * in the PF's space available for SR-IOV.
*/
-#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
-/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
- * the max messages check must be ignored in the algorithm
+static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
+{
+ u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
+ int vectors_used = pf->irq_tracker->num_entries;
+ int sriov_base_vector;
+
+ sriov_base_vector = total_vectors - num_msix_needed;
+
+ /* make sure we only grab irq_tracker entries from the list end and
+ * that we have enough available MSIX vectors
+ */
+ if (sriov_base_vector < vectors_used)
+ return -EINVAL;
+
+ pf->sriov_base_vector = sriov_base_vector;
+
+ return 0;
+}
+
+/**
+ * ice_set_per_vf_res - check if vectors and queues are available
+ * @pf: pointer to the PF structure
+ * @num_vfs: the number of SR-IOV VFs being configured
+ *
+ * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
+ * get more vectors and can enable more queues per VF. Note that this does not
+ * grab any vectors from the SW pool already allocated. Also note, that all
+ * vector counts include one for each VF's miscellaneous interrupt vector
+ * (i.e. OICR).
+ *
+ * Minimum VFs - 2 vectors, 1 queue pair
+ * Small VFs - 5 vectors, 4 queue pairs
+ * Medium VFs - 17 vectors, 16 queue pairs
+ *
+ * Second, determine number of queue pairs per VF by starting with a pre-defined
+ * maximum each VF supports. If this is not possible, then we adjust based on
+ * queue pairs available on the device.
+ *
+ * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
+ * by each VF during VF initialization and reset.
*/
-#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
+static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
+{
+ int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
+ u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
+ int msix_avail_per_vf, msix_avail_for_sriov;
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ if (!num_vfs)
+ return -EINVAL;
+
+ if (max_valid_res_idx < 0)
+ return -ENOSPC;
+
+ /* determine MSI-X resources per VF */
+ msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
+ pf->irq_tracker->num_entries;
+ msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
+ if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
+ } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
+ } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
+ } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
+ num_msix_per_vf = ICE_MIN_INTR_PER_VF;
+ } else {
+ dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
+ msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
+ num_vfs);
+ return -ENOSPC;
+ }
+
+ num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
+ ICE_MAX_RSS_QS_PER_VF);
+ avail_qs = ice_get_avail_txq_count(pf) / num_vfs;
+ if (!avail_qs)
+ num_txq = 0;
+ else if (num_txq > avail_qs)
+ num_txq = rounddown_pow_of_two(avail_qs);
+
+ num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
+ ICE_MAX_RSS_QS_PER_VF);
+ avail_qs = ice_get_avail_rxq_count(pf) / num_vfs;
+ if (!avail_qs)
+ num_rxq = 0;
+ else if (num_rxq > avail_qs)
+ num_rxq = rounddown_pow_of_two(avail_qs);
+
+ if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) {
+ dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
+ ICE_MIN_QS_PER_VF, num_vfs);
+ return -ENOSPC;
+ }
+
+ err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
+ if (err) {
+ dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
+ num_vfs, err);
+ return err;
+ }
+
+ /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
+ pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
+ pf->vfs.num_msix_per = num_msix_per_vf;
+ dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
+ num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per);
+
+ return 0;
+}
/**
- * ice_mbx_traverse - Pass through mailbox snapshot
- * @hw: pointer to the HW struct
- * @new_state: new algorithm state
+ * ice_init_vf_vsi_res - initialize/setup VF VSI resources
+ * @vf: VF to initialize/setup the VSI for
*
- * Traversing the mailbox static snapshot without checking
- * for malicious VFs.
+ * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
+ * VF VSI's broadcast filter and is only used during initial VF creation.
*/
-static void
-ice_mbx_traverse(struct ice_hw *hw,
- enum ice_mbx_snapshot_state *new_state)
+static int ice_init_vf_vsi_res(struct ice_vf *vf)
{
- struct ice_mbx_snap_buffer_data *snap_buf;
- u32 num_iterations;
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vf->pf;
+ u8 broadcast[ETH_ALEN];
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int err;
- snap_buf = &hw->mbx_snapshot.mbx_buf;
+ vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
- /* As mailbox buffer is circular, applying a mask
- * on the incremented iteration count.
- */
- num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
-
- /* Checking either of the below conditions to exit snapshot traversal:
- * Condition-1: If the number of iterations in the mailbox is equal to
- * the mailbox head which would indicate that we have reached the end
- * of the static snapshot.
- * Condition-2: If the maximum messages serviced in the mailbox for a
- * given interrupt is the highest possible value then there is no need
- * to check if the number of messages processed is equal to it. If not
- * check if the number of messages processed is greater than or equal
- * to the maximum number of mailbox entries serviced in current work item.
+ dev = ice_pf_to_dev(pf);
+ vsi = ice_vf_vsi_setup(vf);
+ if (!vsi)
+ return -ENOMEM;
+
+ err = ice_vsi_add_vlan_zero(vsi);
+ if (err) {
+ dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
+ vf->vf_id);
+ goto release_vsi;
+ }
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ err = vlan_ops->ena_rx_filtering(vsi);
+ if (err) {
+ dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
+ vf->vf_id);
+ goto release_vsi;
+ }
+
+ eth_broadcast_addr(broadcast);
+ err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (err) {
+ dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
+ vf->vf_id, err);
+ goto release_vsi;
+ }
+
+ err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
+ if (err) {
+ dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
+ vf->vf_id);
+ goto release_vsi;
+ }
+
+ vf->num_mac = 1;
+
+ return 0;
+
+release_vsi:
+ ice_vf_vsi_release(vf);
+ return err;
+}
+
+/**
+ * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
+ * @pf: PF the VFs are associated with
+ */
+static int ice_start_vfs(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ unsigned int bkt, it_cnt;
+ struct ice_vf *vf;
+ int retval;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ it_cnt = 0;
+ ice_for_each_vf(pf, bkt, vf) {
+ vf->vf_ops->clear_reset_trigger(vf);
+
+ retval = ice_init_vf_vsi_res(vf);
+ if (retval) {
+ dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
+ vf->vf_id, retval);
+ goto teardown;
+ }
+
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ ice_ena_vf_mappings(vf);
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+ it_cnt++;
+ }
+
+ ice_flush(hw);
+ return 0;
+
+teardown:
+ ice_for_each_vf(pf, bkt, vf) {
+ if (it_cnt == 0)
+ break;
+
+ ice_dis_vf_mappings(vf);
+ ice_vf_vsi_release(vf);
+ it_cnt--;
+ }
+
+ return retval;
+}
+
+/**
+ * ice_sriov_free_vf - Free VF memory after all references are dropped
+ * @vf: pointer to VF to free
+ *
+ * Called by ice_put_vf through ice_release_vf once the last reference to a VF
+ * structure has been dropped.
+ */
+static void ice_sriov_free_vf(struct ice_vf *vf)
+{
+ mutex_destroy(&vf->cfg_lock);
+
+ kfree_rcu(vf, rcu);
+}
+
+/**
+ * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers
+ * @vf: the vf to configure
+ */
+static void ice_sriov_clear_mbx_register(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0);
+ wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0);
+}
+
+/**
+ * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF
+ * @vf: pointer to VF structure
+ * @is_vflr: true if reset occurred due to VFLR
+ *
+ * Trigger and cleanup after a VF reset for a SR-IOV VF.
+ */
+static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr)
+{
+ struct ice_pf *pf = vf->pf;
+ u32 reg, reg_idx, bit_idx;
+ unsigned int vf_abs_id, i;
+ struct device *dev;
+ struct ice_hw *hw;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ /* In the case of a VFLR, HW has already reset the VF and we just need
+ * to clean up. Otherwise we must first trigger the reset using the
+ * VFRTRIG register.
*/
- if (num_iterations == snap_buf->head ||
- (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
- ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
- *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+ if (!is_vflr) {
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+ reg |= VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+ }
+
+ /* clear the VFLR bit in GLGEN_VFLRSTAT */
+ reg_idx = (vf_abs_id) / 32;
+ bit_idx = (vf_abs_id) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ ice_flush(hw);
+
+ wr32(hw, PF_PCI_CIAA,
+ VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
+ for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
+ reg = rd32(hw, PF_PCI_CIAD);
+ /* no transactions pending so stop polling */
+ if ((reg & VF_TRANS_PENDING_M) == 0)
+ break;
+
+ dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
+ udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
+ }
+}
+
+/**
+ * ice_sriov_poll_reset_status - poll SRIOV VF reset status
+ * @vf: pointer to VF structure
+ *
+ * Returns true when reset is successful, else returns false
+ */
+static bool ice_sriov_poll_reset_status(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ unsigned int i;
+ u32 reg;
+
+ for (i = 0; i < 10; i++) {
+ /* VF reset requires driver to first reset the VF and then
+ * poll the status register to make sure that the reset
+ * completed successfully.
+ */
+ reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id));
+ if (reg & VPGEN_VFRSTAT_VFRD_M)
+ return true;
+
+ /* only sleep if the reset is not done */
+ usleep_range(10, 20);
+ }
+ return false;
+}
+
+/**
+ * ice_sriov_clear_reset_trigger - enable VF to access hardware
+ * @vf: VF to enabled hardware access for
+ */
+static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
+{
+ struct ice_hw *hw = &vf->pf->hw;
+ u32 reg;
+
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+ ice_flush(hw);
+}
+
+/**
+ * ice_sriov_vsi_rebuild - release and rebuild VF's VSI
+ * @vf: VF to release and setup the VSI for
+ *
+ * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
+ * configuration change, etc.).
+ */
+static int ice_sriov_vsi_rebuild(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ ice_vf_vsi_release(vf);
+ if (!ice_vf_vsi_setup(vf)) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to release and setup the VF%u's VSI\n",
+ vf->vf_id);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
+ * @vf: VF to perform tasks on
+ */
+static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
+{
+ ice_vf_rebuild_host_cfg(vf);
+ ice_vf_set_initialized(vf);
+ ice_ena_vf_mappings(vf);
+ wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+}
+
+static const struct ice_vf_ops ice_sriov_vf_ops = {
+ .reset_type = ICE_VF_RESET,
+ .free = ice_sriov_free_vf,
+ .clear_mbx_register = ice_sriov_clear_mbx_register,
+ .trigger_reset_register = ice_sriov_trigger_reset_register,
+ .poll_reset_status = ice_sriov_poll_reset_status,
+ .clear_reset_trigger = ice_sriov_clear_reset_trigger,
+ .vsi_rebuild = ice_sriov_vsi_rebuild,
+ .post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
+};
+
+/**
+ * ice_create_vf_entries - Allocate and insert VF entries
+ * @pf: pointer to the PF structure
+ * @num_vfs: the number of VFs to allocate
+ *
+ * Allocate new VF entries and insert them into the hash table. Set some
+ * basic default fields for initializing the new VFs.
+ *
+ * After this function exits, the hash table will have num_vfs entries
+ * inserted.
+ *
+ * Returns 0 on success or an integer error code on failure.
+ */
+static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
+{
+ struct ice_vfs *vfs = &pf->vfs;
+ struct ice_vf *vf;
+ u16 vf_id;
+ int err;
+
+ lockdep_assert_held(&vfs->table_lock);
+
+ for (vf_id = 0; vf_id < num_vfs; vf_id++) {
+ vf = kzalloc(sizeof(*vf), GFP_KERNEL);
+ if (!vf) {
+ err = -ENOMEM;
+ goto err_free_entries;
+ }
+ kref_init(&vf->refcnt);
+
+ vf->pf = pf;
+ vf->vf_id = vf_id;
+
+ /* set sriov vf ops for VFs created during SRIOV flow */
+ vf->vf_ops = &ice_sriov_vf_ops;
+
+ vf->vf_sw_id = pf->first_sw;
+ /* assign default capabilities */
+ vf->spoofchk = true;
+ vf->num_vf_qs = pf->vfs.num_qps_per;
+ ice_vc_set_default_allowlist(vf);
+
+ /* ctrl_vsi_idx will be set to a valid value only when VF
+ * creates its first fdir rule.
+ */
+ ice_vf_ctrl_invalidate_vsi(vf);
+ ice_vf_fdir_init(vf);
+
+ ice_virtchnl_set_dflt_ops(vf);
+
+ mutex_init(&vf->cfg_lock);
+
+ hash_add_rcu(vfs->table, &vf->entry, vf_id);
+ }
+
+ return 0;
+
+err_free_entries:
+ ice_free_vf_entries(pf);
+ return err;
}
/**
- * ice_mbx_detect_malvf - Detect malicious VF in snapshot
- * @hw: pointer to the HW struct
- * @vf_id: relative virtual function ID
- * @new_state: new algorithm state
- * @is_malvf: boolean output to indicate if VF is malicious
+ * ice_ena_vfs - enable VFs so they are ready to be used
+ * @pf: pointer to the PF structure
+ * @num_vfs: number of VFs to enable
+ */
+static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int ret;
+
+ /* Disable global interrupt 0 so we don't try to handle the VFLR. */
+ wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
+ ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
+ set_bit(ICE_OICR_INTR_DIS, pf->state);
+ ice_flush(hw);
+
+ ret = pci_enable_sriov(pf->pdev, num_vfs);
+ if (ret)
+ goto err_unroll_intr;
+
+ mutex_lock(&pf->vfs.table_lock);
+
+ ret = ice_set_per_vf_res(pf, num_vfs);
+ if (ret) {
+ dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n",
+ num_vfs, ret);
+ goto err_unroll_sriov;
+ }
+
+ ret = ice_create_vf_entries(pf, num_vfs);
+ if (ret) {
+ dev_err(dev, "Failed to allocate VF entries for %d VFs\n",
+ num_vfs);
+ goto err_unroll_sriov;
+ }
+
+ ret = ice_start_vfs(pf);
+ if (ret) {
+ dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
+ ret = -EAGAIN;
+ goto err_unroll_vf_entries;
+ }
+
+ clear_bit(ICE_VF_DIS, pf->state);
+
+ ret = ice_eswitch_configure(pf);
+ if (ret) {
+ dev_err(dev, "Failed to configure eswitch, err %d\n", ret);
+ goto err_unroll_sriov;
+ }
+
+ /* rearm global interrupts */
+ if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+
+ mutex_unlock(&pf->vfs.table_lock);
+
+ return 0;
+
+err_unroll_vf_entries:
+ ice_free_vf_entries(pf);
+err_unroll_sriov:
+ mutex_unlock(&pf->vfs.table_lock);
+ pci_disable_sriov(pf->pdev);
+err_unroll_intr:
+ /* rearm interrupts here */
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+ clear_bit(ICE_OICR_INTR_DIS, pf->state);
+ return ret;
+}
+
+/**
+ * ice_pci_sriov_ena - Enable or change number of VFs
+ * @pf: pointer to the PF structure
+ * @num_vfs: number of VFs to allocate
*
- * This function tracks the number of asynchronous messages
- * sent per VF and marks the VF as malicious if it exceeds
- * the permissible number of messages to send.
+ * Returns 0 on success and negative on failure
*/
-static int
-ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
- enum ice_mbx_snapshot_state *new_state,
- bool *is_malvf)
+static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
{
- struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ int pre_existing_vfs = pci_num_vf(pf->pdev);
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
- if (vf_id >= snap->mbx_vf.vfcntr_len)
- return -EIO;
+ if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+ ice_free_vfs(pf);
+ else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+ return 0;
- /* increment the message count in the VF array */
- snap->mbx_vf.vf_cntr[vf_id]++;
+ if (num_vfs > pf->vfs.num_supported) {
+ dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
+ num_vfs, pf->vfs.num_supported);
+ return -EOPNOTSUPP;
+ }
- if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
- *is_malvf = true;
+ dev_info(dev, "Enabling %d VFs\n", num_vfs);
+ err = ice_ena_vfs(pf, num_vfs);
+ if (err) {
+ dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
+ return err;
+ }
- /* continue to iterate through the mailbox snapshot */
- ice_mbx_traverse(hw, new_state);
+ set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
+ return 0;
+}
+
+/**
+ * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
+ * @pf: PF to enabled SR-IOV on
+ */
+static int ice_check_sriov_allowed(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
+ dev_err(dev, "This device is not capable of SR-IOV\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ice_is_safe_mode(pf)) {
+ dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ice_pf_state_is_nominal(pf)) {
+ dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
+ return -EBUSY;
+ }
return 0;
}
/**
- * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
- * @snap: pointer to mailbox snapshot structure in the ice_hw struct
+ * ice_sriov_configure - Enable or change number of VFs via sysfs
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate or 0 to free VFs
+ *
+ * This function is called when the user updates the number of VFs in sysfs. On
+ * success return whatever num_vfs was set to by the caller. Return negative on
+ * failure.
+ */
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ err = ice_check_sriov_allowed(pf);
+ if (err)
+ return err;
+
+ if (!num_vfs) {
+ if (!pci_vfs_assigned(pdev)) {
+ ice_mbx_deinit_snapshot(&pf->hw);
+ ice_free_vfs(pf);
+ if (pf->lag)
+ ice_enable_lag(pf->lag);
+ return 0;
+ }
+
+ dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
+ return -EBUSY;
+ }
+
+ err = ice_mbx_init_snapshot(&pf->hw, num_vfs);
+ if (err)
+ return err;
+
+ err = ice_pci_sriov_ena(pf, num_vfs);
+ if (err) {
+ ice_mbx_deinit_snapshot(&pf->hw);
+ return err;
+ }
+
+ if (pf->lag)
+ ice_disable_lag(pf->lag);
+ return num_vfs;
+}
+
+/**
+ * ice_process_vflr_event - Free VF resources via IRQ calls
+ * @pf: pointer to the PF structure
*
- * Reset the mailbox snapshot structure and clear VF counter array.
+ * called from the VFLR IRQ handler to
+ * free up VF resources and state variables
*/
-static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
+void ice_process_vflr_event(struct ice_pf *pf)
{
- u32 vfcntr_len;
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
+ u32 reg;
- if (!snap || !snap->mbx_vf.vf_cntr)
+ if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
+ !ice_has_vfs(pf))
return;
- /* Clear VF counters. */
- vfcntr_len = snap->mbx_vf.vfcntr_len;
- if (vfcntr_len)
- memset(snap->mbx_vf.vf_cntr, 0,
- (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)));
-
- /* Reset mailbox snapshot for a new capture. */
- memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
- snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
-}
-
-/**
- * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
- * @hw: pointer to the HW struct
- * @mbx_data: pointer to structure containing mailbox data
- * @vf_id: relative virtual function (VF) ID
- * @is_malvf: boolean output to indicate if VF is malicious
- *
- * The function serves as an entry point for the malicious VF
- * detection algorithm by handling the different states and state
- * transitions of the algorithm:
- * New snapshot: This state is entered when creating a new static
- * snapshot. The data from any previous mailbox snapshot is
- * cleared and a new capture of the mailbox head and tail is
- * logged. This will be the new static snapshot to detect
- * asynchronous messages sent by VFs. On capturing the snapshot
- * and depending on whether the number of pending messages in that
- * snapshot exceed the watermark value, the state machine enters
- * traverse or detect states.
- * Traverse: If pending message count is below watermark then iterate
- * through the snapshot without any action on VF.
- * Detect: If pending message count exceeds watermark traverse
- * the static snapshot and look for a malicious VF.
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
+ u32 reg_idx, bit_idx;
+
+ reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+ /* read GLGEN_VFLRSTAT register to find out the flr VFs */
+ reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
+ if (reg & BIT(bit_idx))
+ /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
+ ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
+ }
+ mutex_unlock(&pf->vfs.table_lock);
+}
+
+/**
+ * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
+ * @pf: PF used to index all VFs
+ * @pfq: queue index relative to the PF's function space
+ *
+ * If no VF is found who owns the pfq then return NULL, otherwise return a
+ * pointer to the VF who owns the pfq
+ *
+ * If this function returns non-NULL, it acquires a reference count of the VF
+ * structure. The caller is responsible for calling ice_put_vf() to drop this
+ * reference.
+ */
+static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ struct ice_vsi *vsi;
+ u16 rxq_idx;
+
+ vsi = ice_get_vf_vsi(vf);
+
+ ice_for_each_rxq(vsi, rxq_idx)
+ if (vsi->rxq_map[rxq_idx] == pfq) {
+ struct ice_vf *found;
+
+ if (kref_get_unless_zero(&vf->refcnt))
+ found = vf;
+ else
+ found = NULL;
+ rcu_read_unlock();
+ return found;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+/**
+ * ice_globalq_to_pfq - convert from global queue index to PF space queue index
+ * @pf: PF used for conversion
+ * @globalq: global queue index used to convert to PF space queue index
+ */
+static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
+{
+ return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
+}
+
+/**
+ * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
+ * @pf: PF that the LAN overflow event happened on
+ * @event: structure holding the event information for the LAN overflow event
+ *
+ * Determine if the LAN overflow event was caused by a VF queue. If it was not
+ * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
+ * reset on the offending VF.
+ */
+void
+ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ u32 gldcb_rtctq, queue;
+ struct ice_vf *vf;
+
+ gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
+ dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
+
+ /* event returns device global Rx queue number */
+ queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
+ GLDCB_RTCTQ_RXQNUM_S;
+
+ vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
+ if (!vf)
+ return;
+
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
+ ice_put_vf(vf);
+}
+
+/**
+ * ice_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ena: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ */
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct ice_vsi *vf_vsi;
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ netdev_err(netdev, "VSI %d for VF %d is null\n",
+ vf->lan_vsi_idx, vf->vf_id);
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
+
+ if (vf_vsi->type != ICE_VSI_VF) {
+ netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
+ vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
+ ret = -ENODEV;
+ goto out_put_vf;
+ }
+
+ if (ena == vf->spoofchk) {
+ dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
+ ret = 0;
+ goto out_put_vf;
+ }
+
+ ret = ice_vsi_apply_spoofchk(vf_vsi, ena);
+ if (ret)
+ dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n",
+ ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret);
+ else
+ vf->spoofchk = ena;
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_get_vf_cfg
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ivi: VF configuration structure
+ *
+ * return VF configuration
*/
int
-ice_mbx_vf_state_handler(struct ice_hw *hw,
- struct ice_mbx_data *mbx_data, u16 vf_id,
- bool *is_malvf)
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
{
- struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
- struct ice_mbx_snap_buffer_data *snap_buf;
- struct ice_ctl_q_info *cq = &hw->mailboxq;
- enum ice_mbx_snapshot_state new_state;
- int status = 0;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vf *vf;
+ int ret;
- if (!is_malvf || !mbx_data)
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
return -EINVAL;
- /* When entering the mailbox state machine assume that the VF
- * is not malicious until detected.
- */
- *is_malvf = false;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ ivi->vf = vf_id;
+ ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
+
+ /* VF configuration for VLAN and applicable QoS */
+ ivi->vlan = ice_vf_get_port_vlan_id(vf);
+ ivi->qos = ice_vf_get_port_vlan_prio(vf);
+ if (ice_vf_is_port_vlan_ena(vf))
+ ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf));
+
+ ivi->trusted = vf->trusted;
+ ivi->spoofchk = vf->spoofchk;
+ if (!vf->link_forced)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->link_up)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ ivi->max_tx_rate = vf->max_tx_rate;
+ ivi->min_tx_rate = vf->min_tx_rate;
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
+ * @pf: PF used to reference the switch's rules
+ * @umac: unicast MAC to compare against existing switch rules
+ *
+ * Return true on the first/any match, else return false
+ */
+static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
+{
+ struct ice_sw_recipe *mac_recipe_list =
+ &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
+ struct ice_fltr_mgmt_list_entry *list_itr;
+ struct list_head *rule_head;
+ struct mutex *rule_lock; /* protect MAC filter list access */
+
+ rule_head = &mac_recipe_list->filt_rules;
+ rule_lock = &mac_recipe_list->filt_rule_lock;
- /* Checking if max messages allowed to be processed while servicing current
- * interrupt is not less than the defined AVF message threshold.
- */
- if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
+ mutex_lock(rule_lock);
+ list_for_each_entry(list_itr, rule_head, list_entry) {
+ u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
+
+ if (ether_addr_equal(existing_mac, umac)) {
+ mutex_unlock(rule_lock);
+ return true;
+ }
+ }
+
+ mutex_unlock(rule_lock);
+
+ return false;
+}
+
+/**
+ * ice_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @mac: MAC address
+ *
+ * program VF MAC address
+ */
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vf *vf;
+ int ret;
+
+ if (is_multicast_ether_addr(mac)) {
+ netdev_err(netdev, "%pM not a valid unicast address\n", mac);
return -EINVAL;
+ }
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
+ /* nothing left to do, unicast MAC already set */
+ if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
+ ether_addr_equal(vf->hw_lan_addr.addr, mac)) {
+ ret = 0;
+ goto out_put_vf;
+ }
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
- /* The watermark value should not be lesser than the threshold limit
- * set for the number of asynchronous messages a VF can send to mailbox
- * nor should it be greater than the maximum number of messages in the
- * mailbox serviced in current interrupt.
+ if (ice_unicast_mac_exists(pf, mac)) {
+ netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
+ mac, vf_id, mac);
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
+
+ mutex_lock(&vf->cfg_lock);
+
+ /* VF is notified of its new MAC via the PF's response to the
+ * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
*/
- if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
- mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
+ ether_addr_copy(vf->dev_lan_addr.addr, mac);
+ ether_addr_copy(vf->hw_lan_addr.addr, mac);
+ if (is_zero_ether_addr(mac)) {
+ /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
+ vf->pf_set_mac = false;
+ netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
+ vf->vf_id);
+ } else {
+ /* PF will add MAC rule for the VF */
+ vf->pf_set_mac = true;
+ netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
+ mac, vf_id);
+ }
+
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ mutex_unlock(&vf->cfg_lock);
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_set_vf_trust
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @trusted: Boolean value to enable/disable trusted VF
+ *
+ * Enable or disable a given VF as trusted
+ */
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vf *vf;
+ int ret;
+
+ if (ice_is_eswitch_mode_switchdev(pf)) {
+ dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
return -EINVAL;
- new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
- snap_buf = &snap->mbx_buf;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
- switch (snap_buf->state) {
- case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
- /* Clear any previously held data in mailbox snapshot structure. */
- ice_mbx_reset_snapshot(snap);
+ /* Check if already trusted */
+ if (trusted == vf->trusted) {
+ ret = 0;
+ goto out_put_vf;
+ }
- /* Collect the pending ARQ count, number of messages processed and
- * the maximum number of messages allowed to be processed from the
- * Mailbox for current interrupt.
- */
- snap_buf->num_pending_arq = mbx_data->num_pending_arq;
- snap_buf->num_msg_proc = mbx_data->num_msg_proc;
- snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
+ mutex_lock(&vf->cfg_lock);
- /* Capture a new static snapshot of the mailbox by logging the
- * head and tail of snapshot and set num_iterations to the tail
- * value to mark the start of the iteration through the snapshot.
- */
- snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
- mbx_data->num_pending_arq);
- snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
- snap_buf->num_iterations = snap_buf->tail;
-
- /* Pending ARQ messages returned by ice_clean_rq_elem
- * is the difference between the head and tail of the
- * mailbox queue. Comparing this value against the watermark
- * helps to check if we potentially have malicious VFs.
- */
- if (snap_buf->num_pending_arq >=
- mbx_data->async_watermark_val) {
- new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
- status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
- } else {
- new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
- ice_mbx_traverse(hw, &new_state);
- }
- break;
+ vf->trusted = trusted;
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
+ vf_id, trusted ? "" : "un");
- case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
- new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
- ice_mbx_traverse(hw, &new_state);
- break;
+ mutex_unlock(&vf->cfg_lock);
- case ICE_MAL_VF_DETECT_STATE_DETECT:
- new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
- status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
- break;
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @link_state: required link state
+ *
+ * Set VF's link state, irrespective of physical link state status
+ */
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vf *vf;
+ int ret;
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+ switch (link_state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ vf->link_forced = false;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ vf->link_forced = true;
+ vf->link_up = true;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ vf->link_forced = true;
+ vf->link_up = false;
+ break;
default:
- new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
- status = -EIO;
+ ret = -EINVAL;
+ goto out_put_vf;
}
- snap_buf->state = new_state;
+ ice_vc_notify_vf_link_state(vf);
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
+ * @pf: PF associated with VFs
+ */
+static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+ int rate = 0;
- return status;
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf)
+ rate += vf->min_tx_rate;
+ rcu_read_unlock();
+
+ return rate;
}
/**
- * ice_mbx_report_malvf - Track and note malicious VF
- * @hw: pointer to the HW struct
- * @all_malvfs: all malicious VFs tracked by PF
- * @bitmap_len: length of bitmap in bits
- * @vf_id: relative virtual function ID of the malicious VF
- * @report_malvf: boolean to indicate if malicious VF must be reported
+ * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
+ * @vf: VF trying to configure min_tx_rate
+ * @min_tx_rate: min Tx rate in Mbps
+ *
+ * Check if the min_tx_rate being passed in will cause oversubscription of total
+ * min_tx_rate based on the current link speed and all other VFs configured
+ * min_tx_rate
*
- * This function will update a bitmap that keeps track of the malicious
- * VFs attached to the PF. A malicious VF must be reported only once if
- * discovered between VF resets or loading so the function checks
- * the input vf_id against the bitmap to verify if the VF has been
- * detected in any previous mailbox iterations.
+ * Return true if the passed min_tx_rate would cause oversubscription, else
+ * return false
+ */
+static bool
+ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
+{
+ int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf));
+ int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
+
+ /* this VF's previous rate is being overwritten */
+ all_vfs_min_tx_rate -= vf->min_tx_rate;
+
+ if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
+ dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
+ min_tx_rate, vf->vf_id,
+ all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
+ link_speed_mbps);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_set_vf_bw - set min/max VF bandwidth
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @min_tx_rate: Minimum Tx rate in Mbps
+ * @max_tx_rate: Maximum Tx rate in Mbps
*/
int
-ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id, bool *report_malvf)
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
{
- if (!all_malvfs || !report_malvf)
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
return -EINVAL;
- *report_malvf = false;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ vsi = ice_get_vf_vsi(vf);
+
+ /* when max_tx_rate is zero that means no max Tx rate limiting, so only
+ * check if max_tx_rate is non-zero
+ */
+ if (max_tx_rate && min_tx_rate > max_tx_rate) {
+ dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n",
+ min_tx_rate, max_tx_rate);
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
+
+ if (min_tx_rate && ice_is_dcb_active(pf)) {
+ dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
+ ret = -EOPNOTSUPP;
+ goto out_put_vf;
+ }
+
+ if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) {
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
+
+ if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
+ ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
+ if (ret) {
+ dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
+ vf->vf_id);
+ goto out_put_vf;
+ }
+
+ vf->min_tx_rate = min_tx_rate;
+ }
+
+ if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
+ ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
+ if (ret) {
+ dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
+ vf->vf_id);
+ goto out_put_vf;
+ }
+
+ vf->max_tx_rate = max_tx_rate;
+ }
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_get_vf_stats - populate some stats for the VF
+ * @netdev: the netdev of the PF
+ * @vf_id: the host OS identifier (0-255)
+ * @vf_stats: pointer to the OS memory to be initialized
+ */
+int ice_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_eth_stats *stats;
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+ int ret;
- if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
return -EINVAL;
- if (vf_id >= bitmap_len)
- return -EIO;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
- /* If the vf_id is found in the bitmap set bit and boolean to true */
- if (!test_and_set_bit(vf_id, all_malvfs))
- *report_malvf = true;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
- return 0;
+ ice_update_eth_stats(vsi);
+ stats = &vsi->eth_stats;
+
+ memset(vf_stats, 0, sizeof(*vf_stats));
+
+ vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
+ stats->rx_multicast;
+ vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
+ stats->tx_multicast;
+ vf_stats->rx_bytes = stats->rx_bytes;
+ vf_stats->tx_bytes = stats->tx_bytes;
+ vf_stats->broadcast = stats->rx_broadcast;
+ vf_stats->multicast = stats->rx_multicast;
+ vf_stats->rx_dropped = stats->rx_discards;
+ vf_stats->tx_dropped = stats->tx_discards;
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported
+ * @hw: hardware structure used to check the VLAN mode
+ * @vlan_proto: VLAN TPID being checked
+ *
+ * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q
+ * and ETH_P_8021AD are supported. If the device is configured in Single VLAN
+ * Mode (SVM), then only ETH_P_8021Q is supported.
+ */
+static bool
+ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto)
+{
+ bool is_supported = false;
+
+ switch (vlan_proto) {
+ case ETH_P_8021Q:
+ is_supported = true;
+ break;
+ case ETH_P_8021AD:
+ if (ice_is_dvm_ena(hw))
+ is_supported = true;
+ break;
+ }
+
+ return is_supported;
}
/**
- * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID
- * @snap: pointer to the mailbox snapshot structure
- * @all_malvfs: all malicious VFs tracked by PF
- * @bitmap_len: length of bitmap in bits
- * @vf_id: relative virtual function ID of the malicious VF
+ * ice_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @vlan_id: VLAN ID being set
+ * @qos: priority setting
+ * @vlan_proto: VLAN protocol
*
- * In case of a VF reset, this function can be called to clear
- * the bit corresponding to the VF ID in the bitmap tracking all
- * malicious VFs attached to the PF. The function also clears the
- * VF counter array at the index of the VF ID. This is to ensure
- * that the new VF loaded is not considered malicious before going
- * through the overflow detection algorithm.
+ * program VF Port VLAN ID and/or QoS
*/
int
-ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id)
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto)
{
- if (!snap || !all_malvfs)
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ u16 local_vlan_proto = ntohs(vlan_proto);
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+
+ if (vlan_id >= VLAN_N_VID || qos > 7) {
+ dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
+ vf_id, vlan_id, qos);
return -EINVAL;
+ }
- if (bitmap_len < snap->mbx_vf.vfcntr_len)
+ if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) {
+ dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n",
+ local_vlan_proto);
+ return -EPROTONOSUPPORT;
+ }
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
return -EINVAL;
- /* Ensure VF ID value is not larger than bitmap or VF counter length */
- if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
- return -EIO;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
- /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
- clear_bit(vf_id, all_malvfs);
+ if (ice_vf_get_port_vlan_prio(vf) == qos &&
+ ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto &&
+ ice_vf_get_port_vlan_id(vf) == vlan_id) {
+ /* duplicate request, so just return success */
+ dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n",
+ vlan_id, qos, local_vlan_proto);
+ ret = 0;
+ goto out_put_vf;
+ }
- /* Clear the VF counter in the mailbox snapshot structure for that VF ID.
- * This is to ensure that if a VF is unloaded and a new one brought back
- * up with the same VF ID for a snapshot currently in traversal or detect
- * state the counter for that VF ID does not increment on top of existing
- * values in the mailbox overflow detection algorithm.
- */
- snap->mbx_vf.vf_cntr[vf_id] = 0;
+ mutex_lock(&vf->cfg_lock);
- return 0;
+ vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos);
+ if (ice_vf_is_port_vlan_ena(vf))
+ dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n",
+ vlan_id, qos, local_vlan_proto, vf_id);
+ else
+ dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
+
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ mutex_unlock(&vf->cfg_lock);
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
}
/**
- * ice_mbx_init_snapshot - Initialize mailbox snapshot structure
- * @hw: pointer to the hardware structure
- * @vf_count: number of VFs allocated on a PF
- *
- * Clear the mailbox snapshot structure and allocate memory
- * for the VF counter array based on the number of VFs allocated
- * on that PF.
+ * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
+ * @vf: pointer to the VF structure
+ */
+void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+
+ dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
+ vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
+ vf->dev_lan_addr.addr,
+ test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
+ ? "on" : "off");
+}
+
+/**
+ * ice_print_vfs_mdd_events - print VFs malicious driver detect event
+ * @pf: pointer to the PF structure
*
- * Assumption: This function will assume ice_get_caps() has already been
- * called to ensure that the vf_count can be compared against the number
- * of VFs supported as defined in the functional capabilities of the device.
+ * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
*/
-int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
+void ice_print_vfs_mdd_events(struct ice_pf *pf)
{
- struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
- /* Ensure that the number of VFs allocated is non-zero and
- * is not greater than the number of supported VFs defined in
- * the functional capabilities of the PF.
- */
- if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
- return -EINVAL;
+ /* check that there are pending MDD events to print */
+ if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
+ return;
- snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
- sizeof(*snap->mbx_vf.vf_cntr),
- GFP_KERNEL);
- if (!snap->mbx_vf.vf_cntr)
- return -ENOMEM;
+ /* VF MDD event logs are rate limited to one second intervals */
+ if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1))
+ return;
- /* Setting the VF counter length to the number of allocated
- * VFs for given PF's functional capabilities.
- */
- snap->mbx_vf.vfcntr_len = vf_count;
+ pf->vfs.last_printed_mdd_jiffies = jiffies;
- /* Clear mbx_buf in the mailbox snaphot structure and setting the
- * mailbox snapshot state to a new capture.
- */
- memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
- snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
+ /* only print Rx MDD event message if there are new events */
+ if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
+ vf->mdd_rx_events.last_printed =
+ vf->mdd_rx_events.count;
+ ice_print_vf_rx_mdd_event(vf);
+ }
- return 0;
+ /* only print Tx MDD event message if there are new events */
+ if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
+ vf->mdd_tx_events.last_printed =
+ vf->mdd_tx_events.count;
+
+ dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
+ vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
+ vf->dev_lan_addr.addr);
+ }
+ }
+ mutex_unlock(&pf->vfs.table_lock);
}
/**
- * ice_mbx_deinit_snapshot - Free mailbox snapshot structure
- * @hw: pointer to the hardware structure
+ * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
+ * @pdev: pointer to a pci_dev structure
*
- * Clear the mailbox snapshot structure and free the VF counter array.
+ * Called when recovering from a PF FLR to restore interrupt capability to
+ * the VFs.
+ */
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
+{
+ u16 vf_id;
+ int pos;
+
+ if (!pci_num_vf(pdev))
+ return;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ struct pci_dev *vfdev;
+
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
+ &vf_id);
+ vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
+ while (vfdev) {
+ if (vfdev->is_virtfn && vfdev->physfn == pdev)
+ pci_restore_msi_state(vfdev);
+ vfdev = pci_get_device(pdev->vendor, vf_id,
+ vfdev);
+ }
+ }
+}
+
+/**
+ * ice_is_malicious_vf - helper function to detect a malicious VF
+ * @pf: ptr to struct ice_pf
+ * @event: pointer to the AQ event
+ * @num_msg_proc: the number of messages processed so far
+ * @num_msg_pending: the number of messages peinding in admin queue
*/
-void ice_mbx_deinit_snapshot(struct ice_hw *hw)
+bool
+ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
+ u16 num_msg_proc, u16 num_msg_pending)
{
- struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ s16 vf_id = le16_to_cpu(event->desc.retval);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_mbx_data mbxdata;
+ bool malvf = false;
+ struct ice_vf *vf;
+ int status;
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return false;
- /* Free VF counter array and reset VF counter length */
- devm_kfree(ice_hw_to_dev(hw), snap->mbx_vf.vf_cntr);
- snap->mbx_vf.vfcntr_len = 0;
+ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
+ goto out_put_vf;
+
+ mbxdata.num_msg_proc = num_msg_proc;
+ mbxdata.num_pending_arq = num_msg_pending;
+ mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
+#define ICE_MBX_OVERFLOW_WATERMARK 64
+ mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
+
+ /* check to see if we have a malicious VF */
+ status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
+ if (status)
+ goto out_put_vf;
+
+ if (malvf) {
+ bool report_vf = false;
+
+ /* if the VF is malicious and we haven't let the user
+ * know about it, then let them know now
+ */
+ status = ice_mbx_report_malvf(&pf->hw, pf->vfs.malvfs,
+ ICE_MAX_SRIOV_VFS, vf_id,
+ &report_vf);
+ if (status)
+ dev_dbg(dev, "Error reporting malicious VF\n");
+
+ if (report_vf) {
+ struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
+
+ if (pf_vsi)
+ dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
+ &vf->dev_lan_addr.addr[0],
+ pf_vsi->netdev->dev_addr);
+ }
+ }
- /* Clear mbx_buf in the mailbox snaphot structure */
- memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+out_put_vf:
+ ice_put_vf(vf);
+ return malvf;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 68686a3fd7e8..955ab810a198 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -3,50 +3,159 @@
#ifndef _ICE_SRIOV_H_
#define _ICE_SRIOV_H_
+#include "ice_virtchnl_fdir.h"
+#include "ice_vf_lib.h"
+#include "ice_virtchnl.h"
-#include "ice_type.h"
-#include "ice_controlq.h"
+/* Static VF transaction/status register def */
+#define VF_DEVICE_STATUS 0xAA
+#define VF_TRANS_PENDING_M 0x20
-/* Defining the mailbox message threshold as 63 asynchronous
- * pending messages. Normal VF functionality does not require
- * sending more than 63 asynchronous pending message.
- */
-#define ICE_ASYNC_VF_MSG_THRESHOLD 63
+/* wait defines for polling PF_PCI_CIAD register status */
+#define ICE_PCI_CIAD_WAIT_COUNT 100
+#define ICE_PCI_CIAD_WAIT_DELAY_US 1
+
+/* VF resource constraints */
+#define ICE_MIN_QS_PER_VF 1
+#define ICE_NONQ_VECS_VF 1
+#define ICE_NUM_VF_MSIX_MED 17
+#define ICE_NUM_VF_MSIX_SMALL 5
+#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3
+#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
+#define ICE_MAX_VF_RESET_TRIES 40
+#define ICE_MAX_VF_RESET_SLEEP_MS 20
#ifdef CONFIG_PCI_IOV
+void ice_process_vflr_event(struct ice_pf *pf);
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
int
-ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
- u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
+
+void ice_free_vfs(struct ice_pf *pf);
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
+bool
+ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
+ u16 num_msg_proc, u16 num_msg_pending);
-u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
int
-ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
- u16 vf_id, bool *is_mal_vf);
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto);
+
int
-ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id);
-int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
-void ice_mbx_deinit_snapshot(struct ice_hw *hw);
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate);
+
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
+
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
+
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
+
+int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
+
int
-ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id, bool *report_malvf);
+ice_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats);
+void
+ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
+void ice_print_vfs_mdd_events(struct ice_pf *pf);
+void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
#else /* CONFIG_PCI_IOV */
+static inline void ice_process_vflr_event(struct ice_pf *pf) { }
+static inline void ice_free_vfs(struct ice_pf *pf) { }
+static inline
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { }
+static inline
+void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
+static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
+static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
+static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
+
+static inline bool
+ice_is_malicious_vf(struct ice_pf __always_unused *pf,
+ struct ice_rq_event_info __always_unused *event,
+ u16 __always_unused num_msg_proc,
+ u16 __always_unused num_msg_pending)
+{
+ return false;
+}
+
static inline int
-ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
- u16 __always_unused vfid, u32 __always_unused v_opcode,
- u32 __always_unused v_retval, u8 __always_unused *msg,
- u16 __always_unused msglen,
- struct ice_sq_cd __always_unused *cd)
+ice_sriov_configure(struct pci_dev __always_unused *pdev,
+ int __always_unused num_vfs)
{
- return 0;
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_mac(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, u8 __always_unused *mac)
+{
+ return -EOPNOTSUPP;
}
-static inline u32
-ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
- u16 __always_unused link_speed)
+static inline int
+ice_get_vf_cfg(struct net_device __always_unused *netdev,
+ int __always_unused vf_id,
+ struct ifla_vf_info __always_unused *ivi)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_trust(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, bool __always_unused trusted)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_port_vlan(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, u16 __always_unused vid,
+ u8 __always_unused qos, __be16 __always_unused v_proto)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_spoofchk(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, bool __always_unused ena)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_link_state(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused link_state)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_bw(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused min_tx_rate,
+ int __always_unused max_tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
+ struct ice_q_vector __always_unused *q_vector)
{
return 0;
}
+static inline int
+ice_get_vf_stats(struct net_device __always_unused *netdev,
+ int __always_unused vf_id,
+ struct ifla_vf_stats __always_unused *vf_stats)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 475ec2afa210..25b8f6f726eb 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -41,6 +41,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
{ ICE_IPV4_OFOS, 14 },
{ ICE_NVGRE, 34 },
{ ICE_MAC_IL, 42 },
+ { ICE_ETYPE_IL, 54 },
{ ICE_IPV4_IL, 56 },
{ ICE_TCP_IL, 76 },
{ ICE_PROTOCOL_LAST, 0 },
@@ -65,7 +66,8 @@ static const u8 dummy_gre_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_IL 54 */
0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
0x00, 0x00, 0x00, 0x00,
@@ -86,6 +88,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
{ ICE_IPV4_OFOS, 14 },
{ ICE_NVGRE, 34 },
{ ICE_MAC_IL, 42 },
+ { ICE_ETYPE_IL, 54 },
{ ICE_IPV4_IL, 56 },
{ ICE_UDP_ILOS, 76 },
{ ICE_PROTOCOL_LAST, 0 },
@@ -110,7 +113,8 @@ static const u8 dummy_gre_udp_packet[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_IL 54 */
0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
0x00, 0x00, 0x00, 0x00,
@@ -131,6 +135,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
{ ICE_GENEVE, 42 },
{ ICE_VXLAN_GPE, 42 },
{ ICE_MAC_IL, 50 },
+ { ICE_ETYPE_IL, 62 },
{ ICE_IPV4_IL, 64 },
{ ICE_TCP_IL, 84 },
{ ICE_PROTOCOL_LAST, 0 },
@@ -158,7 +163,8 @@ static const u8 dummy_udp_tun_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_IL 62 */
0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
0x00, 0x01, 0x00, 0x00,
@@ -182,6 +188,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
{ ICE_GENEVE, 42 },
{ ICE_VXLAN_GPE, 42 },
{ ICE_MAC_IL, 50 },
+ { ICE_ETYPE_IL, 62 },
{ ICE_IPV4_IL, 64 },
{ ICE_UDP_ILOS, 84 },
{ ICE_PROTOCOL_LAST, 0 },
@@ -209,7 +216,8 @@ static const u8 dummy_udp_tun_udp_packet[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_IL 62 */
0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
0x00, 0x01, 0x00, 0x00,
@@ -221,6 +229,224 @@ static const u8 dummy_udp_tun_udp_packet[] = {
0x00, 0x08, 0x00, 0x00,
};
+static const struct ice_dummy_pkt_offsets
+dummy_gre_ipv6_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_NVGRE, 34 },
+ { ICE_MAC_IL, 42 },
+ { ICE_ETYPE_IL, 54 },
+ { ICE_IPV6_IL, 56 },
+ { ICE_TCP_IL, 96 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_gre_ipv6_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x2F, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
+ 0x00, 0x08, 0x06, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x02, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+static const struct ice_dummy_pkt_offsets
+dummy_gre_ipv6_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_NVGRE, 34 },
+ { ICE_MAC_IL, 42 },
+ { ICE_ETYPE_IL, 54 },
+ { ICE_IPV6_IL, 56 },
+ { ICE_UDP_ILOS, 96 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_gre_ipv6_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x2F, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
+ 0x00, 0x08, 0x11, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
+ 0x00, 0x08, 0x00, 0x00,
+};
+
+static const struct ice_dummy_pkt_offsets
+dummy_udp_tun_ipv6_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_VXLAN, 42 },
+ { ICE_GENEVE, 42 },
+ { ICE_VXLAN_GPE, 42 },
+ { ICE_MAC_IL, 50 },
+ { ICE_ETYPE_IL, 62 },
+ { ICE_IPV6_IL, 64 },
+ { ICE_TCP_IL, 104 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_udp_tun_ipv6_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x40, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
+ 0x00, 0x5a, 0x00, 0x00,
+
+ 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
+ 0x00, 0x08, 0x06, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x02, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+static const struct ice_dummy_pkt_offsets
+dummy_udp_tun_ipv6_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_VXLAN, 42 },
+ { ICE_GENEVE, 42 },
+ { ICE_VXLAN_GPE, 42 },
+ { ICE_MAC_IL, 50 },
+ { ICE_ETYPE_IL, 62 },
+ { ICE_IPV6_IL, 64 },
+ { ICE_UDP_ILOS, 104 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_udp_tun_ipv6_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
+ 0x00, 0x4e, 0x00, 0x00,
+
+ 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
+ 0x00, 0x08, 0x11, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
+ 0x00, 0x08, 0x00, 0x00,
+};
+
/* offset info for MAC + IPv4 + UDP dummy packet */
static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
{ ICE_MAC_OFOS, 0 },
@@ -500,6 +726,495 @@ static const u8 dummy_vlan_udp_ipv6_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
+/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP, 42 },
+ { ICE_IPV4_IL, 62 },
+ { ICE_TCP_IL, 82 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x58, /* IP 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+ 0x00, 0x44, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x28, /* IP 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP, 42 },
+ { ICE_IPV4_IL, 62 },
+ { ICE_UDP_ILOS, 82 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+ 0x00, 0x38, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP, 42 },
+ { ICE_IPV6_IL, 62 },
+ { ICE_TCP_IL, 102 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+ 0x00, 0x58, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
+ 0x00, 0x14, 0x06, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP, 42 },
+ { ICE_IPV6_IL, 62 },
+ { ICE_UDP_ILOS, 102 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x60, /* IP 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+ 0x00, 0x4c, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
+ 0x00, 0x08, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP, 62 },
+ { ICE_IPV4_IL, 82 },
+ { ICE_TCP_IL, 102 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+ 0x00, 0x44, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+ 0x00, 0x44, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x28, /* IP 82 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP, 62 },
+ { ICE_IPV4_IL, 82 },
+ { ICE_UDP_ILOS, 102 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+ 0x00, 0x38, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+ 0x00, 0x38, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP, 62 },
+ { ICE_IPV6_IL, 82 },
+ { ICE_TCP_IL, 122 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+ 0x00, 0x58, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+ 0x00, 0x58, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
+ 0x00, 0x14, 0x06, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP, 62 },
+ { ICE_IPV6_IL, 82 },
+ { ICE_UDP_ILOS, 122 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+ 0x00, 0x4c, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+ 0x00, 0x4c, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
+ 0x00, 0x08, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
+ 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP_NO_PAY, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP_NO_PAY, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv6_gtp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
+ 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00,
+};
+
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
(DUMMY_ETH_HDR_LEN * \
@@ -1097,6 +1812,64 @@ ice_aq_get_recipe(struct ice_hw *hw,
}
/**
+ * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
+ * @hw: pointer to the HW struct
+ * @params: parameters used to update the default recipe
+ *
+ * This function only supports updating default recipes and it only supports
+ * updating a single recipe based on the lkup_idx at a time.
+ *
+ * This is done as a read-modify-write operation. First, get the current recipe
+ * contents based on the recipe's ID. Then modify the field vector index and
+ * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
+ * the pre-existing recipe with the modifications.
+ */
+int
+ice_update_recipe_lkup_idx(struct ice_hw *hw,
+ struct ice_update_recipe_lkup_idx_params *params)
+{
+ struct ice_aqc_recipe_data_elem *rcp_list;
+ u16 num_recps = ICE_MAX_NUM_RECIPES;
+ int status;
+
+ rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
+ if (!rcp_list)
+ return -ENOMEM;
+
+ /* read current recipe list from firmware */
+ rcp_list->recipe_indx = params->rid;
+ status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
+ params->rid, status);
+ goto error_out;
+ }
+
+ /* only modify existing recipe's lkup_idx and mask if valid, while
+ * leaving all other fields the same, then update the recipe firmware
+ */
+ rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
+ if (params->mask_valid)
+ rcp_list->content.mask[params->lkup_idx] =
+ cpu_to_le16(params->mask);
+
+ if (params->ignore_valid)
+ rcp_list->content.lkup_indx[params->lkup_idx] |=
+ ICE_AQ_RECIPE_LKUP_IGNORE;
+
+ status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
+ if (status)
+ ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
+ params->rid, params->lkup_idx, params->fv_idx,
+ params->mask, params->mask_valid ? "true" : "false",
+ status);
+
+error_out:
+ kfree(rcp_list);
+ return status;
+}
+
+/**
* ice_aq_map_recipe_to_profile - Map recipe to packet profile
* @hw: pointer to the HW struct
* @profile_id: package profile ID to associate the recipe with
@@ -1539,6 +2312,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
{
u16 vlan_id = ICE_MAX_VLAN_ID + 1;
+ u16 vlan_tpid = ETH_P_8021Q;
void *daddr = NULL;
u16 eth_hdr_sz;
u8 *eth_hdr;
@@ -1611,6 +2385,8 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
case ICE_SW_LKUP_VLAN:
vlan_id = f_info->l_data.vlan.vlan_id;
+ if (f_info->l_data.vlan.tpid_valid)
+ vlan_tpid = f_info->l_data.vlan.tpid;
if (f_info->fltr_act == ICE_FWD_TO_VSI ||
f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
act |= ICE_SINGLE_ACT_PRUNE;
@@ -1653,6 +2429,8 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
if (!(vlan_id > ICE_MAX_VLAN_ID)) {
off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
*off = cpu_to_be16(vlan_id);
+ off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
+ *off = cpu_to_be16(vlan_tpid);
}
/* Create the switch rule with the final dummy Ethernet header */
@@ -3755,6 +4533,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
{ ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
{ ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
{ ICE_ETYPE_OL, { 0 } },
+ { ICE_ETYPE_IL, { 0 } },
{ ICE_VLAN_OFOS, { 2, 0 } },
{ ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
{ ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
@@ -3767,13 +4546,16 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
{ ICE_UDP_ILOS, { 0, 2 } },
{ ICE_VXLAN, { 8, 10, 12, 14 } },
{ ICE_GENEVE, { 8, 10, 12, 14 } },
- { ICE_NVGRE, { 0, 2, 4, 6 } },
+ { ICE_NVGRE, { 0, 2, 4, 6 } },
+ { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
+ { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
};
static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
{ ICE_MAC_IL, ICE_MAC_IL_HW },
{ ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
+ { ICE_ETYPE_IL, ICE_ETYPE_IL_HW },
{ ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
{ ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
{ ICE_IPV4_IL, ICE_IPV4_IL_HW },
@@ -3784,7 +4566,9 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
{ ICE_VXLAN, ICE_UDP_OF_HW },
{ ICE_GENEVE, ICE_UDP_OF_HW },
- { ICE_NVGRE, ICE_GRE_OF_HW },
+ { ICE_NVGRE, ICE_GRE_OF_HW },
+ { ICE_GTP, ICE_UDP_OF_HW },
+ { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
};
/**
@@ -3868,6 +4652,23 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
}
/**
+ * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
+ *
+ * As protocol id for outer vlan is different in dvm and svm, if dvm is
+ * supported protocol array record for outer vlan has to be modified to
+ * reflect the value proper for DVM.
+ */
+void ice_change_proto_id_to_dvm(void)
+{
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
+ if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
+ ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
+ ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
+}
+
+/**
* ice_prot_type_to_id - get protocol ID from protocol type
* @type: protocol type
* @id: pointer to variable that will receive the ID
@@ -4427,41 +5228,6 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
}
/**
- * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
- * @hw: pointer to hardware structure
- * @lkups: lookup elements or match criteria for the advanced recipe, one
- * structure per protocol header
- * @lkups_cnt: number of protocols
- * @bm: bitmap of field vectors to consider
- * @fv_list: pointer to a list that holds the returned field vectors
- */
-static int
-ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
- unsigned long *bm, struct list_head *fv_list)
-{
- u8 *prot_ids;
- int status;
- u16 i;
-
- prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL);
- if (!prot_ids)
- return -ENOMEM;
-
- for (i = 0; i < lkups_cnt; i++)
- if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
- status = -EIO;
- goto free_mem;
- }
-
- /* Find field vectors that include all specified protocol types */
- status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
-
-free_mem:
- kfree(prot_ids);
- return status;
-}
-
-/**
* ice_tun_type_match_word - determine if tun type needs a match mask
* @tun_type: tunnel type
* @mask: mask to be used for the tunnel
@@ -4472,6 +5238,8 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
case ICE_SW_TUN_GENEVE:
case ICE_SW_TUN_VXLAN:
case ICE_SW_TUN_NVGRE:
+ case ICE_SW_TUN_GTPU:
+ case ICE_SW_TUN_GTPC:
*mask = ICE_TUN_FLAG_MASK;
return true;
@@ -4537,6 +5305,12 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
case ICE_SW_TUN_NVGRE:
prof_type = ICE_PROF_TUN_GRE;
break;
+ case ICE_SW_TUN_GTPU:
+ prof_type = ICE_PROF_TUN_GTPU;
+ break;
+ case ICE_SW_TUN_GTPC:
+ prof_type = ICE_PROF_TUN_GTPC;
+ break;
case ICE_SW_TUN_AND_NON_TUN:
default:
prof_type = ICE_PROF_ALL;
@@ -4609,11 +5383,11 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* Get bitmap of field vectors (profiles) that are compatible with the
* rule request; only these will be searched in the subsequent call to
- * ice_get_fv.
+ * ice_get_sw_fv_list.
*/
ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
- status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
+ status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
if (status)
goto err_unroll;
@@ -4737,34 +5511,126 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
const u8 **pkt, u16 *pkt_len,
const struct ice_dummy_pkt_offsets **offsets)
{
- bool tcp = false, udp = false, ipv6 = false, vlan = false;
+ bool inner_tcp = false, inner_udp = false, outer_ipv6 = false;
+ bool vlan = false, inner_ipv6 = false, gtp_no_pay = false;
u16 i;
for (i = 0; i < lkups_cnt; i++) {
if (lkups[i].type == ICE_UDP_ILOS)
- udp = true;
+ inner_udp = true;
else if (lkups[i].type == ICE_TCP_IL)
- tcp = true;
+ inner_tcp = true;
else if (lkups[i].type == ICE_IPV6_OFOS)
- ipv6 = true;
+ outer_ipv6 = true;
else if (lkups[i].type == ICE_VLAN_OFOS)
vlan = true;
else if (lkups[i].type == ICE_ETYPE_OL &&
lkups[i].h_u.ethertype.ethtype_id ==
cpu_to_be16(ICE_IPV6_ETHER_ID) &&
lkups[i].m_u.ethertype.ethtype_id ==
- cpu_to_be16(0xFFFF))
- ipv6 = true;
+ cpu_to_be16(0xFFFF))
+ outer_ipv6 = true;
+ else if (lkups[i].type == ICE_ETYPE_IL &&
+ lkups[i].h_u.ethertype.ethtype_id ==
+ cpu_to_be16(ICE_IPV6_ETHER_ID) &&
+ lkups[i].m_u.ethertype.ethtype_id ==
+ cpu_to_be16(0xFFFF))
+ inner_ipv6 = true;
+ else if (lkups[i].type == ICE_IPV6_IL)
+ inner_ipv6 = true;
+ else if (lkups[i].type == ICE_GTP_NO_PAY)
+ gtp_no_pay = true;
+ }
+
+ if (tun_type == ICE_SW_TUN_GTPU) {
+ if (outer_ipv6) {
+ if (gtp_no_pay) {
+ *pkt = dummy_ipv6_gtp_packet;
+ *pkt_len = sizeof(dummy_ipv6_gtp_packet);
+ *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
+ } else if (inner_ipv6) {
+ if (inner_udp) {
+ *pkt = dummy_ipv6_gtpu_ipv6_udp_packet;
+ *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet);
+ *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets;
+ } else {
+ *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet;
+ *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet);
+ *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets;
+ }
+ } else {
+ if (inner_udp) {
+ *pkt = dummy_ipv6_gtpu_ipv4_udp_packet;
+ *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet);
+ *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets;
+ } else {
+ *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet;
+ *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet);
+ *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets;
+ }
+ }
+ } else {
+ if (gtp_no_pay) {
+ *pkt = dummy_ipv4_gtpu_ipv4_packet;
+ *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
+ *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
+ } else if (inner_ipv6) {
+ if (inner_udp) {
+ *pkt = dummy_ipv4_gtpu_ipv6_udp_packet;
+ *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet);
+ *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets;
+ } else {
+ *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet;
+ *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet);
+ *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets;
+ }
+ } else {
+ if (inner_udp) {
+ *pkt = dummy_ipv4_gtpu_ipv4_udp_packet;
+ *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet);
+ *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets;
+ } else {
+ *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet;
+ *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet);
+ *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets;
+ }
+ }
+ }
+ return;
+ }
+
+ if (tun_type == ICE_SW_TUN_GTPC) {
+ if (outer_ipv6) {
+ *pkt = dummy_ipv6_gtp_packet;
+ *pkt_len = sizeof(dummy_ipv6_gtp_packet);
+ *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
+ } else {
+ *pkt = dummy_ipv4_gtpu_ipv4_packet;
+ *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
+ *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
+ }
+ return;
}
if (tun_type == ICE_SW_TUN_NVGRE) {
- if (tcp) {
+ if (inner_tcp && inner_ipv6) {
+ *pkt = dummy_gre_ipv6_tcp_packet;
+ *pkt_len = sizeof(dummy_gre_ipv6_tcp_packet);
+ *offsets = dummy_gre_ipv6_tcp_packet_offsets;
+ return;
+ }
+ if (inner_tcp) {
*pkt = dummy_gre_tcp_packet;
*pkt_len = sizeof(dummy_gre_tcp_packet);
*offsets = dummy_gre_tcp_packet_offsets;
return;
}
-
+ if (inner_ipv6) {
+ *pkt = dummy_gre_ipv6_udp_packet;
+ *pkt_len = sizeof(dummy_gre_ipv6_udp_packet);
+ *offsets = dummy_gre_ipv6_udp_packet_offsets;
+ return;
+ }
*pkt = dummy_gre_udp_packet;
*pkt_len = sizeof(dummy_gre_udp_packet);
*offsets = dummy_gre_udp_packet_offsets;
@@ -4773,20 +5639,31 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
if (tun_type == ICE_SW_TUN_VXLAN ||
tun_type == ICE_SW_TUN_GENEVE) {
- if (tcp) {
+ if (inner_tcp && inner_ipv6) {
+ *pkt = dummy_udp_tun_ipv6_tcp_packet;
+ *pkt_len = sizeof(dummy_udp_tun_ipv6_tcp_packet);
+ *offsets = dummy_udp_tun_ipv6_tcp_packet_offsets;
+ return;
+ }
+ if (inner_tcp) {
*pkt = dummy_udp_tun_tcp_packet;
*pkt_len = sizeof(dummy_udp_tun_tcp_packet);
*offsets = dummy_udp_tun_tcp_packet_offsets;
return;
}
-
+ if (inner_ipv6) {
+ *pkt = dummy_udp_tun_ipv6_udp_packet;
+ *pkt_len = sizeof(dummy_udp_tun_ipv6_udp_packet);
+ *offsets = dummy_udp_tun_ipv6_udp_packet_offsets;
+ return;
+ }
*pkt = dummy_udp_tun_udp_packet;
*pkt_len = sizeof(dummy_udp_tun_udp_packet);
*offsets = dummy_udp_tun_udp_packet_offsets;
return;
}
- if (udp && !ipv6) {
+ if (inner_udp && !outer_ipv6) {
if (vlan) {
*pkt = dummy_vlan_udp_packet;
*pkt_len = sizeof(dummy_vlan_udp_packet);
@@ -4797,7 +5674,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
*pkt_len = sizeof(dummy_udp_packet);
*offsets = dummy_udp_packet_offsets;
return;
- } else if (udp && ipv6) {
+ } else if (inner_udp && outer_ipv6) {
if (vlan) {
*pkt = dummy_vlan_udp_ipv6_packet;
*pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
@@ -4808,7 +5685,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
*pkt_len = sizeof(dummy_udp_ipv6_packet);
*offsets = dummy_udp_ipv6_packet_offsets;
return;
- } else if ((tcp && ipv6) || ipv6) {
+ } else if ((inner_tcp && outer_ipv6) || outer_ipv6) {
if (vlan) {
*pkt = dummy_vlan_tcp_ipv6_packet;
*pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
@@ -4885,6 +5762,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
len = sizeof(struct ice_ether_hdr);
break;
case ICE_ETYPE_OL:
+ case ICE_ETYPE_IL:
len = sizeof(struct ice_ethtype_hdr);
break;
case ICE_VLAN_OFOS:
@@ -4913,6 +5791,10 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_GENEVE:
len = sizeof(struct ice_udp_tnl_hdr);
break;
+ case ICE_GTP_NO_PAY:
+ case ICE_GTP:
+ len = sizeof(struct ice_udp_gtp_hdr);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index d8334beaaa8a..ed3d1d03befa 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -14,6 +14,15 @@
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
+/* Switch Profile IDs for Profile related switch rules */
+#define ICE_PROFID_IPV4_GTPC_TEID 41
+#define ICE_PROFID_IPV4_GTPC_NO_TEID 42
+#define ICE_PROFID_IPV4_GTPU_TEID 43
+#define ICE_PROFID_IPV6_GTPC_TEID 44
+#define ICE_PROFID_IPV6_GTPC_NO_TEID 45
+#define ICE_PROFID_IPV6_GTPU_TEID 46
+#define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70
+
#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
@@ -33,15 +42,6 @@ struct ice_vsi_ctx {
struct ice_q_ctx *rdma_q_ctx[ICE_MAX_TRAFFIC_CLASS];
};
-enum ice_sw_fwd_act_type {
- ICE_FWD_TO_VSI = 0,
- ICE_FWD_TO_VSI_LIST, /* Do not use this when adding filter */
- ICE_FWD_TO_Q,
- ICE_FWD_TO_QGRP,
- ICE_DROP_PACKET,
- ICE_INVAL_ACT
-};
-
/* Switch recipe ID enum values are specific to hardware */
enum ice_sw_lkup_type {
ICE_SW_LKUP_ETHERTYPE = 0,
@@ -86,6 +86,8 @@ struct ice_fltr_info {
} mac_vlan;
struct {
u16 vlan_id;
+ u16 tpid;
+ u8 tpid_valid;
} vlan;
/* Set lkup_type as ICE_SW_LKUP_ETHERTYPE
* if just using ethertype as filter. Set lkup_type as
@@ -125,6 +127,15 @@ struct ice_fltr_info {
u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
};
+struct ice_update_recipe_lkup_idx_params {
+ u16 rid;
+ u16 fv_idx;
+ bool ignore_valid;
+ u16 mask;
+ bool mask_valid;
+ u8 lkup_idx;
+};
+
struct ice_adv_lkup_elem {
enum ice_protocol_type type;
union ice_prot_hdr h_u; /* Header values */
@@ -367,4 +378,8 @@ void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+int
+ice_update_recipe_lkup_idx(struct ice_hw *hw,
+ struct ice_update_recipe_lkup_idx_params *params);
+void ice_change_proto_id_to_dvm(void);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 65cf32eb4046..3acd9f921c44 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -24,6 +24,12 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
lkups_cnt++;
+ if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
+ lkups_cnt++;
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS)
+ lkups_cnt++;
+
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
@@ -33,9 +39,7 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
lkups_cnt++;
- /* currently inner etype filter isn't supported */
- if ((flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) &&
- fltr->tunnel_type == TNL_LAST)
+ if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID)
lkups_cnt++;
/* are MAC fields specified? */
@@ -64,6 +68,11 @@ static enum ice_protocol_type ice_proto_type_from_mac(bool inner)
return inner ? ICE_MAC_IL : ICE_MAC_OFOS;
}
+static enum ice_protocol_type ice_proto_type_from_etype(bool inner)
+{
+ return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL;
+}
+
static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner)
{
return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS;
@@ -96,6 +105,11 @@ ice_proto_type_from_tunnel(enum ice_tunnel_type type)
return ICE_GENEVE;
case TNL_GRETAP:
return ICE_NVGRE;
+ case TNL_GTPU:
+ /* NO_PAY profiles will not work with GTP-U */
+ return ICE_GTP;
+ case TNL_GTPC:
+ return ICE_GTP_NO_PAY;
default:
return 0;
}
@@ -111,6 +125,10 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type)
return ICE_SW_TUN_GENEVE;
case TNL_GRETAP:
return ICE_SW_TUN_NVGRE;
+ case TNL_GTPU:
+ return ICE_SW_TUN_GTPU;
+ case TNL_GTPC:
+ return ICE_SW_TUN_GTPC;
default:
return ICE_NON_TUN;
}
@@ -137,7 +155,15 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
break;
case TNL_GRETAP:
list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id;
- memcpy(&list[i].m_u.nvgre_hdr.tni_flow, "\xff\xff\xff\xff", 4);
+ memcpy(&list[i].m_u.nvgre_hdr.tni_flow,
+ "\xff\xff\xff\xff", 4);
+ i++;
+ break;
+ case TNL_GTPC:
+ case TNL_GTPU:
+ list[i].h_u.gtp_hdr.teid = fltr->tenant_id;
+ memcpy(&list[i].m_u.gtp_hdr.teid,
+ "\xff\xff\xff\xff", 4);
i++;
break;
default:
@@ -145,6 +171,33 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
}
}
+ if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) {
+ list[i].type = ice_proto_type_from_mac(false);
+ ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
+ hdr->l2_key.dst_mac);
+ ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
+ hdr->l2_mask.dst_mac);
+ i++;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS &&
+ (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
+ list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
+
+ if (fltr->gtp_pdu_info_masks.pdu_type) {
+ list[i].h_u.gtp_hdr.pdu_type =
+ fltr->gtp_pdu_info_keys.pdu_type << 4;
+ memcpy(&list[i].m_u.gtp_hdr.pdu_type, "\xf0", 1);
+ }
+
+ if (fltr->gtp_pdu_info_masks.qfi) {
+ list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi;
+ memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f", 1);
+ }
+
+ i++;
+ }
+
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
list[i].type = ice_proto_type_from_ipv4(false);
@@ -224,8 +277,10 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
headers = &tc_fltr->inner_headers;
inner = true;
- } else if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
- list[i].type = ICE_ETYPE_OL;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
+ list[i].type = ice_proto_type_from_etype(inner);
list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
i++;
@@ -344,6 +399,12 @@ static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
if (netif_is_gretap(tunnel_dev) ||
netif_is_ip6gretap(tunnel_dev))
return TNL_GRETAP;
+
+ /* Assume GTP-U by default in case of GTP netdev.
+ * GTP-C may be selected later, based on enc_dst_port.
+ */
+ if (netif_is_gtp(tunnel_dev))
+ return TNL_GTPU;
return TNL_LAST;
}
@@ -743,6 +804,40 @@ ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule)
return NULL;
}
+/**
+ * ice_parse_gtp_type - Sets GTP tunnel type to GTP-U or GTP-C
+ * @match: Flow match structure
+ * @fltr: Pointer to filter structure
+ *
+ * GTP-C/GTP-U is selected based on destination port number (enc_dst_port).
+ * Before calling this funtcion, fltr->tunnel_type should be set to TNL_GTPU,
+ * therefore making GTP-U the default choice (when destination port number is
+ * not specified).
+ */
+static int
+ice_parse_gtp_type(struct flow_match_ports match,
+ struct ice_tc_flower_fltr *fltr)
+{
+ u16 dst_port;
+
+ if (match.key->dst) {
+ dst_port = be16_to_cpu(match.key->dst);
+
+ switch (dst_port) {
+ case 2152:
+ break;
+ case 2123:
+ fltr->tunnel_type = TNL_GTPC;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int
ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct ice_tc_flower_fltr *fltr)
@@ -798,8 +893,28 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct flow_match_ports match;
flow_rule_match_enc_ports(rule, &match);
- if (ice_tc_set_port(match, fltr, headers, true))
- return -EINVAL;
+
+ if (fltr->tunnel_type != TNL_GTPU) {
+ if (ice_tc_set_port(match, fltr, headers, true))
+ return -EINVAL;
+ } else {
+ if (ice_parse_gtp_type(match, fltr))
+ return -EINVAL;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ struct flow_match_enc_opts match;
+
+ flow_rule_match_enc_opts(rule, &match);
+
+ memcpy(&fltr->gtp_pdu_info_keys, &match.key->data[0],
+ sizeof(struct gtp_pdu_session_info));
+
+ memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0],
+ sizeof(struct gtp_pdu_session_info));
+
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS;
}
return 0;
@@ -837,6 +952,7 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT(FLOW_DISSECTOR_KEY_PORTS))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
@@ -1059,12 +1175,24 @@ ice_handle_tclass_action(struct ice_vsi *vsi,
* this code won't do anything
* 2. For non-tunnel, if user didn't specify MAC address, add implicit
* dest MAC to be lower netdev's active unicast MAC address
+ * 3. For tunnel, as of now TC-filter through flower classifier doesn't
+ * have provision for user to specify outer DMAC, hence driver to
+ * implicitly add outer dest MAC to be lower netdev's active unicast
+ * MAC address.
*/
- if (!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) {
- ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
- main_vsi->netdev->dev_addr);
- eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
+ if (fltr->tunnel_type != TNL_LAST &&
+ !(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC))
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC;
+
+ if (fltr->tunnel_type == TNL_LAST &&
+ !(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC))
fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
+
+ if (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_ENC_DST_MAC)) {
+ ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
+ vsi->netdev->dev_addr);
+ memset(fltr->outer_headers.l2_mask.dst_mac, 0xff, ETH_ALEN);
}
/* validate specified dest MAC address, make sure either it belongs to
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index 319049477959..e25e958f4396 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -22,6 +22,7 @@
#define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15)
#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
+#define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
@@ -119,6 +120,8 @@ struct ice_tc_flower_fltr {
struct ice_tc_flower_lyr_2_4_hdrs inner_headers;
struct ice_vsi *src_vsi;
__be32 tenant_id;
+ struct gtp_pdu_session_info gtp_pdu_info_keys;
+ struct gtp_pdu_session_info gtp_pdu_info_masks;
u32 flags;
u8 tunnel_type;
struct ice_tc_flower_action action;
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index cf685247c07a..ae98d5a8ff60 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -216,6 +216,30 @@ DEFINE_EVENT(ice_xmit_template, name, \
DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring);
DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring_drop);
+DECLARE_EVENT_CLASS(ice_tx_tstamp_template,
+ TP_PROTO(struct sk_buff *skb, int idx),
+
+ TP_ARGS(skb, idx),
+
+ TP_STRUCT__entry(__field(void *, skb)
+ __field(int, idx)),
+
+ TP_fast_assign(__entry->skb = skb;
+ __entry->idx = idx;),
+
+ TP_printk("skb %pK idx %d",
+ __entry->skb, __entry->idx)
+);
+#define DEFINE_TX_TSTAMP_OP_EVENT(name) \
+DEFINE_EVENT(ice_tx_tstamp_template, name, \
+ TP_PROTO(struct sk_buff *skb, int idx), \
+ TP_ARGS(skb, idx))
+
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_request);
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_req);
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_done);
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_complete);
+
/* End tracepoints */
#endif /* _ICE_TRACE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 3e38695f1c9d..f9bf008471c9 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -173,6 +173,8 @@ tx_skip_free:
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+ tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1;
+ tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1;
if (!tx_ring->netdev)
return;
@@ -221,8 +223,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
struct ice_tx_buf *tx_buf;
/* get the bql data ready */
- if (!ice_ring_is_xdp(tx_ring))
- netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
+ netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
tx_buf = &tx_ring->tx_buf[i];
tx_desc = ICE_TX_DESC(tx_ring, i);
@@ -311,10 +312,6 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
tx_ring->next_to_clean = i;
ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
-
- if (ice_ring_is_xdp(tx_ring))
- return !!budget;
-
netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
@@ -983,15 +980,17 @@ static struct sk_buff *
ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
+ unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int size = xdp->data_end - xdp->data;
unsigned int headlen;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- net_prefetch(xdp->data);
+ net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ ICE_RX_HDR_SIZE + metasize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
@@ -1003,8 +1002,13 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
- sizeof(long)));
+ memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
+ ALIGN(headlen + metasize, sizeof(long)));
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
/* if we exhaust the linear part then add what is left as a frag */
size -= headlen;
@@ -1080,7 +1084,7 @@ ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
{
/* if we are the last buffer then there is nothing else to do */
#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
- if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
+ if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
return false;
rx_ring->rx_stats.non_eop_descs++;
@@ -1142,7 +1146,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
* hardware wrote DD then it will be non-zero
*/
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
- if (!ice_test_staterr(rx_desc, stat_err_bits))
+ if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
break;
/* This memory barrier is needed to keep us from reading
@@ -1156,7 +1160,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
struct ice_vsi *ctrl_vsi = rx_ring->vsi;
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
- ctrl_vsi->vf_id != ICE_INVAL_VFID)
+ ctrl_vsi->vf)
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
ice_put_rx_buf(rx_ring, NULL, 0);
cleaned_count++;
@@ -1228,14 +1232,13 @@ construct_skb:
continue;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
- if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
+ if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
+ stat_err_bits))) {
dev_kfree_skb_any(skb);
continue;
}
- stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
- if (ice_test_staterr(rx_desc, stat_err_bits))
- vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
+ vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
/* pad the skb if needed, to make a valid ethernet frame */
if (eth_skb_pad(skb)) {
@@ -1460,7 +1463,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
bool wd;
if (tx_ring->xsk_pool)
- wd = ice_clean_tx_irq_zc(tx_ring, budget);
+ wd = ice_xmit_zc(tx_ring, ICE_DESC_UNUSED(tx_ring), budget);
else if (ice_ring_is_xdp(tx_ring))
wd = true;
else
@@ -1513,7 +1516,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
- if (likely(napi_complete_done(napi, work_done))) {
+ if (napi_complete_done(napi, work_done)) {
ice_net_dim(q_vector);
ice_enable_interrupt(q_vector);
} else {
@@ -1917,12 +1920,16 @@ ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
return;
- /* currently, we always assume 802.1Q for VLAN insertion as VLAN
- * insertion for 802.1AD is not supported
+ /* the VLAN ethertype/tpid is determined by VSI configuration and netdev
+ * feature flags, which the driver only allows either 802.1Q or 802.1ad
+ * VLAN offloads exclusively so we only care about the VLAN ID here
*/
if (skb_vlan_tag_present(skb)) {
first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
- first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
+ if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
+ first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
+ else
+ first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
}
ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
@@ -2295,6 +2302,13 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* prepare the VLAN tagging flags for Tx */
ice_tx_prepare_vlan_flags(tx_ring, first);
+ if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
+ offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+ (ICE_TX_CTX_DESC_IL2TAG2 <<
+ ICE_TXD_CTX_QW1_CMD_S));
+ offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
+ ICE_TX_FLAGS_VLAN_S;
+ }
/* set up TSO offload */
tso = ice_tso(first, &offload);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index b7b3bd4816f0..cead3eb149bd 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -13,7 +13,6 @@
#define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17
-#define ICE_TX_THRESH 32
/* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to
@@ -111,6 +110,8 @@ static inline int ice_skb_pad(void)
(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
+#define ICE_RING_QUARTER(R) ((R)->count >> 2)
+
#define ICE_TX_FLAGS_TSO BIT(0)
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
@@ -122,6 +123,7 @@ static inline int ice_skb_pad(void)
#define ICE_TX_FLAGS_IPV4 BIT(5)
#define ICE_TX_FLAGS_IPV6 BIT(6)
#define ICE_TX_FLAGS_TUNNEL BIT(7)
+#define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(8)
#define ICE_TX_FLAGS_VLAN_M 0xffff0000
#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
#define ICE_TX_FLAGS_VLAN_PR_S 29
@@ -321,18 +323,21 @@ struct ice_tx_ring {
u16 count; /* Number of descriptors */
u16 q_index; /* Queue number of ring */
/* stats structs */
- struct ice_q_stats stats;
- struct u64_stats_sync syncp;
struct ice_txq_stats tx_stats;
-
/* CL3 - 3rd cacheline starts here */
+ struct ice_q_stats stats;
+ struct u64_stats_sync syncp;
struct rcu_head rcu; /* to avoid race on free */
DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
struct ice_channel *ch;
struct ice_ptp_tx *tx_tstamps;
spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */
+ /* CL4 - 4th cacheline starts here */
+ u16 xdp_tx_active;
#define ICE_TX_FLAGS_RING_XDP BIT(0)
+#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
+#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
u8 flags;
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_tx;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 0e87b98e0966..7ee38d02d1e5 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -209,9 +209,14 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
void
ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
{
- if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (vlan_tag & VLAN_VID_MASK))
+ netdev_features_t features = rx_ring->netdev->features;
+ bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK);
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
+
napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
@@ -222,6 +227,7 @@ ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
{
unsigned int total_bytes = 0, total_pkts = 0;
+ u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u16 ntc = xdp_ring->next_to_clean;
struct ice_tx_desc *next_dd_desc;
u16 next_dd = xdp_ring->next_dd;
@@ -233,7 +239,7 @@ static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
return;
- for (i = 0; i < ICE_TX_THRESH; i++) {
+ for (i = 0; i < tx_thresh; i++) {
tx_buf = &xdp_ring->tx_buf[ntc];
total_bytes += tx_buf->bytecount;
@@ -254,9 +260,9 @@ static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
}
next_dd_desc->cmd_type_offset_bsz = 0;
- xdp_ring->next_dd = xdp_ring->next_dd + ICE_TX_THRESH;
+ xdp_ring->next_dd = xdp_ring->next_dd + tx_thresh;
if (xdp_ring->next_dd > xdp_ring->count)
- xdp_ring->next_dd = ICE_TX_THRESH - 1;
+ xdp_ring->next_dd = tx_thresh - 1;
xdp_ring->next_to_clean = ntc;
ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
}
@@ -269,12 +275,13 @@ static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
*/
int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
{
+ u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u16 i = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
dma_addr_t dma;
- if (ICE_DESC_UNUSED(xdp_ring) < ICE_TX_THRESH)
+ if (ICE_DESC_UNUSED(xdp_ring) < tx_thresh)
ice_clean_xdp_irq(xdp_ring);
if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
@@ -300,13 +307,14 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0,
size, 0);
+ xdp_ring->xdp_tx_active++;
i++;
if (i == xdp_ring->count) {
i = 0;
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
tx_desc->cmd_type_offset_bsz |=
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs = ICE_TX_THRESH - 1;
+ xdp_ring->next_rs = tx_thresh - 1;
}
xdp_ring->next_to_use = i;
@@ -314,7 +322,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
tx_desc->cmd_type_offset_bsz |=
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs += ICE_TX_THRESH;
+ xdp_ring->next_rs += tx_thresh;
}
return ICE_XDP_TX;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 11b6c1601986..c7d2954dc9ea 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -7,7 +7,7 @@
/**
* ice_test_staterr - tests bits in Rx descriptor status and error fields
- * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @status_err_n: Rx descriptor status_error0 or status_error1 bits
* @stat_err_bits: value to mask
*
* This function does some fast chicanery in order to return the
@@ -16,9 +16,9 @@
* at offset zero.
*/
static inline bool
-ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
+ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits)
{
- return !!(rx_desc->wb.status_error0 & cpu_to_le16(stat_err_bits));
+ return !!(status_err_n & cpu_to_le16(stat_err_bits));
}
static inline __le64
@@ -32,6 +32,30 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
+ * ice_get_vlan_tag_from_rx_desc - get VLAN from Rx flex descriptor
+ * @rx_desc: Rx 32b flex descriptor with RXDID=2
+ *
+ * The OS and current PF implementation only support stripping a single VLAN tag
+ * at a time, so there should only ever be 0 or 1 tags in the l2tag* fields. If
+ * one is found return the tag, else return 0 to mean no VLAN tag was found.
+ */
+static inline u16
+ice_get_vlan_tag_from_rx_desc(union ice_32b_rx_flex_desc *rx_desc)
+{
+ u16 stat_err_bits;
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
+ if (ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
+ return le16_to_cpu(rx_desc->wb.l2tag1);
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
+ if (ice_test_staterr(rx_desc->wb.status_error1, stat_err_bits))
+ return le16_to_cpu(rx_desc->wb.l2tag2_2nd);
+
+ return 0;
+}
+
+/**
* ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register
* @xdp_ring: XDP Tx ring
*
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 546145dd1f02..f2a518a1fd94 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -9,12 +9,14 @@
#define ICE_CHNL_MAX_TC 16
#include "ice_hw_autogen.h"
+#include "ice_devids.h"
#include "ice_osdep.h"
#include "ice_controlq.h"
#include "ice_lan_tx_rx.h"
#include "ice_flex_type.h"
#include "ice_protocol_type.h"
#include "ice_sbq_cmd.h"
+#include "ice_vlan_mode.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -54,6 +56,11 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
#define ICE_DBG_AQ_DESC BIT_ULL(25)
#define ICE_DBG_AQ_DESC_BUF BIT_ULL(26)
#define ICE_DBG_AQ_CMD BIT_ULL(27)
+#define ICE_DBG_AQ (ICE_DBG_AQ_MSG | \
+ ICE_DBG_AQ_DESC | \
+ ICE_DBG_AQ_DESC_BUF | \
+ ICE_DBG_AQ_CMD)
+
#define ICE_DBG_USER BIT_ULL(31)
enum ice_aq_res_ids {
@@ -920,6 +927,9 @@ struct ice_hw {
struct udp_tunnel_nic_shared udp_tunnel_shared;
struct udp_tunnel_nic_info udp_tunnel_nic;
+ /* dvm boost update information */
+ struct ice_dvm_table dvm_upd;
+
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
@@ -943,6 +953,7 @@ struct ice_hw {
struct list_head rss_list_head;
struct ice_mbx_snapshot mbx_snapshot;
DECLARE_BITMAP(hw_ptype, ICE_FLOW_PTYPE_MAX);
+ u8 dvm_ena;
u16 io_expander_handle;
};
@@ -1008,6 +1019,15 @@ struct ice_hw_port_stats {
u64 fd_sb_match;
};
+enum ice_sw_fwd_act_type {
+ ICE_FWD_TO_VSI = 0,
+ ICE_FWD_TO_VSI_LIST, /* Do not use this when adding filter */
+ ICE_FWD_TO_Q,
+ ICE_FWD_TO_QGRP,
+ ICE_DROP_PACKET,
+ ICE_INVAL_ACT
+};
+
struct ice_aq_get_set_rss_lut_params {
u16 vsi_handle; /* software VSI handle */
u16 lut_size; /* size of the LUT buffer */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
new file mode 100644
index 000000000000..6578059d9479
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -0,0 +1,1029 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+#include "ice_virtchnl_allowlist.h"
+
+/* Public functions which may be accessed by all driver files */
+
+/**
+ * ice_get_vf_by_id - Get pointer to VF by ID
+ * @pf: the PF private structure
+ * @vf_id: the VF ID to locate
+ *
+ * Locate and return a pointer to the VF structure associated with a given ID.
+ * Returns NULL if the ID does not have a valid VF structure associated with
+ * it.
+ *
+ * This function takes a reference to the VF, which must be released by
+ * calling ice_put_vf() once the caller is finished accessing the VF structure
+ * returned.
+ */
+struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
+{
+ struct ice_vf *vf;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
+ if (vf->vf_id == vf_id) {
+ struct ice_vf *found;
+
+ if (kref_get_unless_zero(&vf->refcnt))
+ found = vf;
+ else
+ found = NULL;
+
+ rcu_read_unlock();
+ return found;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+/**
+ * ice_release_vf - Release VF associated with a refcount
+ * @ref: the kref decremented to zero
+ *
+ * Callback function for kref_put to release a VF once its reference count has
+ * hit zero.
+ */
+static void ice_release_vf(struct kref *ref)
+{
+ struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
+
+ vf->vf_ops->free(vf);
+}
+
+/**
+ * ice_put_vf - Release a reference to a VF
+ * @vf: the VF structure to decrease reference count on
+ *
+ * Decrease the reference count for a VF, and free the entry if it is no
+ * longer in use.
+ *
+ * This must be called after ice_get_vf_by_id() once the reference to the VF
+ * structure is no longer used. Otherwise, the VF structure will never be
+ * freed.
+ */
+void ice_put_vf(struct ice_vf *vf)
+{
+ kref_put(&vf->refcnt, ice_release_vf);
+}
+
+/**
+ * ice_has_vfs - Return true if the PF has any associated VFs
+ * @pf: the PF private structure
+ *
+ * Return whether or not the PF has any allocated VFs.
+ *
+ * Note that this function only guarantees that there are no VFs at the point
+ * of calling it. It does not guarantee that no more VFs will be added.
+ */
+bool ice_has_vfs(struct ice_pf *pf)
+{
+ /* A simple check that the hash table is not empty does not require
+ * the mutex or rcu_read_lock.
+ */
+ return !hash_empty(pf->vfs.table);
+}
+
+/**
+ * ice_get_num_vfs - Get number of allocated VFs
+ * @pf: the PF private structure
+ *
+ * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
+ * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
+ * the output of this function.
+ */
+u16 ice_get_num_vfs(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+ u16 num_vfs = 0;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf)
+ num_vfs++;
+ rcu_read_unlock();
+
+ return num_vfs;
+}
+
+/**
+ * ice_get_vf_vsi - get VF's VSI based on the stored index
+ * @vf: VF used to get VSI
+ */
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+{
+ if (vf->lan_vsi_idx == ICE_NO_VSI)
+ return NULL;
+
+ return vf->pf->vsi[vf->lan_vsi_idx];
+}
+
+/**
+ * ice_is_vf_disabled
+ * @vf: pointer to the VF info
+ *
+ * If the PF has been disabled, there is no need resetting VF until PF is
+ * active again. Similarly, if the VF has been disabled, this means something
+ * else is resetting the VF, so we shouldn't continue.
+ *
+ * Returns true if the caller should consider the VF as disabled whether
+ * because that single VF is explicitly disabled or because the PF is
+ * currently disabled.
+ */
+bool ice_is_vf_disabled(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ return (test_bit(ICE_VF_DIS, pf->state) ||
+ test_bit(ICE_VF_STATE_DIS, vf->vf_states));
+}
+
+/**
+ * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
+ * @vf: The VF being resseting
+ *
+ * The max poll time is about ~800ms, which is about the maximum time it takes
+ * for a VF to be reset and/or a VF driver to be removed.
+ */
+static void ice_wait_on_vf_reset(struct ice_vf *vf)
+{
+ int i;
+
+ for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
+ break;
+ msleep(ICE_MAX_VF_RESET_SLEEP_MS);
+ }
+}
+
+/**
+ * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
+ * @vf: VF to check if it's ready to be configured/queried
+ *
+ * The purpose of this function is to make sure the VF is not in reset, not
+ * disabled, and initialized so it can be configured and/or queried by a host
+ * administrator.
+ */
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+{
+ ice_wait_on_vf_reset(vf);
+
+ if (ice_is_vf_disabled(vf))
+ return -EINVAL;
+
+ if (ice_check_vf_init(vf))
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
+ * ice_trigger_vf_reset - Reset a VF on HW
+ * @vf: pointer to the VF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ * @is_pfr: true if the reset was triggered due to a previous PFR
+ *
+ * Trigger hardware to start a reset for a particular VF. Expects the caller
+ * to wait the proper amount of time to allow hardware to reset the VF before
+ * it cleans up and restores VF functionality.
+ */
+static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
+{
+ /* Inform VF that it is no longer active, as a warning */
+ clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+ /* Disable VF's configuration API during reset. The flag is re-enabled
+ * when it's safe again to access VF's VSI.
+ */
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+ /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
+ * needs to clear them in the case of VFR/VFLR. If this is done for
+ * PFR, it can mess up VF resets because the VF driver may already
+ * have started cleanup by the time we get here.
+ */
+ if (!is_pfr)
+ vf->vf_ops->clear_mbx_register(vf);
+
+ vf->vf_ops->trigger_reset_register(vf, is_vflr);
+}
+
+static void ice_vf_clear_counters(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ vf->num_mac = 0;
+ vsi->num_vlan = 0;
+ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
+ memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
+}
+
+/**
+ * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
+ * @vf: VF to perform pre VSI rebuild tasks
+ *
+ * These tasks are items that don't need to be amortized since they are most
+ * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
+ */
+static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
+{
+ ice_vf_clear_counters(vf);
+ vf->vf_ops->clear_reset_trigger(vf);
+}
+
+/**
+ * ice_vf_rebuild_vsi - rebuild the VF's VSI
+ * @vf: VF to rebuild the VSI for
+ *
+ * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
+ * host, PFR, CORER, etc.).
+ */
+static int ice_vf_rebuild_vsi(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_pf *pf = vf->pf;
+
+ if (ice_vsi_rebuild(vsi, true)) {
+ dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
+ vf->vf_id);
+ return -EIO;
+ }
+ /* vsi->idx will remain the same in this case so don't update
+ * vf->lan_vsi_idx
+ */
+ vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
+ vf->lan_vsi_num = vsi->vsi_num;
+
+ return 0;
+}
+
+/**
+ * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
+ * @pf: PF structure for accessing VF(s)
+ *
+ * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
+ * else return true
+ */
+bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
+{
+ bool is_vf_promisc = false;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ /* found a VF that has promiscuous mode configured */
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+ is_vf_promisc = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return is_vf_promisc;
+}
+
+/**
+ * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
+ * @vf: the VF to configure
+ * @vsi: the VF's VSI
+ * @promisc_m: the promiscuous mode to enable
+ */
+int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ int status;
+
+ if (ice_vf_is_port_vlan_ena(vf))
+ status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ ice_vf_get_port_vlan_id(vf));
+ else if (ice_vsi_has_non_zero_vlans(vsi))
+ status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
+ else
+ status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
+
+ if (status && status != -EEXIST) {
+ dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
+ vf->vf_id, status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
+ * @vf: the VF to configure
+ * @vsi: the VF's VSI
+ * @promisc_m: the promiscuous mode to disable
+ */
+int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ int status;
+
+ if (ice_vf_is_port_vlan_ena(vf))
+ status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ ice_vf_get_port_vlan_id(vf));
+ else if (ice_vsi_has_non_zero_vlans(vsi))
+ status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
+ else
+ status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
+
+ if (status && status != -ENOENT) {
+ dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
+ vf->vf_id, status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_reset_all_vfs - reset all allocated VFs in one go
+ * @pf: pointer to the PF structure
+ *
+ * First, tell the hardware to reset each VF, then do all the waiting in one
+ * chunk, and finally finish restoring each VF after the wait. This is useful
+ * during PF routines which need to reset all VFs, as otherwise it must perform
+ * these resets in a serialized fashion.
+ *
+ * Returns true if any VFs were reset, and false otherwise.
+ */
+void ice_reset_all_vfs(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ /* If we don't have any VFs, then there is nothing to reset */
+ if (!ice_has_vfs(pf))
+ return;
+
+ mutex_lock(&pf->vfs.table_lock);
+
+ /* clear all malicious info if the VFs are getting reset */
+ ice_for_each_vf(pf, bkt, vf)
+ if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
+ ICE_MAX_SRIOV_VFS, vf->vf_id))
+ dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
+ vf->vf_id);
+
+ /* If VFs have been disabled, there is no need to reset */
+ if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
+ mutex_unlock(&pf->vfs.table_lock);
+ return;
+ }
+
+ /* Begin reset on all VFs at once */
+ ice_for_each_vf(pf, bkt, vf)
+ ice_trigger_vf_reset(vf, true, true);
+
+ /* HW requires some time to make sure it can flush the FIFO for a VF
+ * when it resets it. Now that we've triggered all of the VFs, iterate
+ * the table again and wait for each VF to complete.
+ */
+ ice_for_each_vf(pf, bkt, vf) {
+ if (!vf->vf_ops->poll_reset_status(vf)) {
+ /* Display a warning if at least one VF didn't manage
+ * to reset in time, but continue on with the
+ * operation.
+ */
+ dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
+ break;
+ }
+ }
+
+ /* free VF resources to begin resetting the VSI state */
+ ice_for_each_vf(pf, bkt, vf) {
+ mutex_lock(&vf->cfg_lock);
+
+ vf->driver_caps = 0;
+ ice_vc_set_default_allowlist(vf);
+
+ ice_vf_fdir_exit(vf);
+ ice_vf_fdir_init(vf);
+ /* clean VF control VSI when resetting VFs since it should be
+ * setup only when VF creates its first FDIR rule.
+ */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_invalidate_vsi(vf);
+
+ ice_vf_pre_vsi_rebuild(vf);
+ ice_vf_rebuild_vsi(vf);
+ vf->vf_ops->post_vsi_rebuild(vf);
+
+ mutex_unlock(&vf->cfg_lock);
+ }
+
+ if (ice_is_eswitch_mode_switchdev(pf))
+ if (ice_eswitch_rebuild(pf))
+ dev_warn(dev, "eswitch rebuild failed\n");
+
+ ice_flush(hw);
+ clear_bit(ICE_VF_DIS, pf->state);
+
+ mutex_unlock(&pf->vfs.table_lock);
+}
+
+/**
+ * ice_notify_vf_reset - Notify VF of a reset event
+ * @vf: pointer to the VF structure
+ */
+static void ice_notify_vf_reset(struct ice_vf *vf)
+{
+ struct ice_hw *hw = &vf->pf->hw;
+ struct virtchnl_pf_event pfe;
+
+ /* Bail out if VF is in disabled state, neither initialized, nor active
+ * state - otherwise proceed with notifications
+ */
+ if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
+ test_bit(ICE_VF_STATE_DIS, vf->vf_states))
+ return;
+
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
+ NULL);
+}
+
+/**
+ * ice_reset_vf - Reset a particular VF
+ * @vf: pointer to the VF structure
+ * @flags: flags controlling behavior of the reset
+ *
+ * Flags:
+ * ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
+ * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
+ * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
+ *
+ * Returns 0 if the VF is currently in reset, if the resets are disabled, or
+ * if the VF resets successfully. Returns an error code if the VF fails to
+ * rebuild.
+ */
+int ice_reset_vf(struct ice_vf *vf, u32 flags)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_hw *hw;
+ u8 promisc_m;
+ int err = 0;
+ bool rsd;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+
+ if (flags & ICE_VF_RESET_NOTIFY)
+ ice_notify_vf_reset(vf);
+
+ if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
+ dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
+ vf->vf_id);
+ return 0;
+ }
+
+ if (ice_is_vf_disabled(vf)) {
+ dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
+ vf->vf_id);
+ return 0;
+ }
+
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_lock(&vf->cfg_lock);
+ else
+ lockdep_assert_held(&vf->cfg_lock);
+
+ /* Set VF disable bit state here, before triggering reset */
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
+
+ vsi = ice_get_vf_vsi(vf);
+
+ ice_dis_vf_qs(vf);
+
+ /* Call Disable LAN Tx queue AQ whether or not queues are
+ * enabled. This is needed for successful completion of VFR.
+ */
+ ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
+ NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
+
+ /* poll VPGEN_VFRSTAT reg to make sure
+ * that reset is complete
+ */
+ rsd = vf->vf_ops->poll_reset_status(vf);
+
+ /* Display a warning if VF didn't manage to reset in time, but need to
+ * continue on with the operation.
+ */
+ if (!rsd)
+ dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
+
+ vf->driver_caps = 0;
+ ice_vc_set_default_allowlist(vf);
+
+ /* disable promiscuous modes in case they were enabled
+ * ignore any error if disabling process failed
+ */
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+ if (ice_vf_is_port_vlan_ena(vf) || vsi->num_vlan)
+ promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_UCAST_PROMISC_BITS;
+
+ if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m))
+ dev_err(dev, "disabling promiscuous mode failed\n");
+ }
+
+ ice_eswitch_del_vf_mac_rule(vf);
+
+ ice_vf_fdir_exit(vf);
+ ice_vf_fdir_init(vf);
+ /* clean VF control VSI when resetting VF since it should be setup
+ * only when VF creates its first FDIR rule.
+ */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_vsi_release(vf);
+
+ ice_vf_pre_vsi_rebuild(vf);
+
+ if (vf->vf_ops->vsi_rebuild(vf)) {
+ dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
+ vf->vf_id);
+ err = -EFAULT;
+ goto out_unlock;
+ }
+
+ vf->vf_ops->post_vsi_rebuild(vf);
+ vsi = ice_get_vf_vsi(vf);
+ ice_eswitch_update_repr(vsi);
+ ice_eswitch_replay_vf_mac_rule(vf);
+
+ /* if the VF has been reset allow it to come up again */
+ if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
+ ICE_MAX_SRIOV_VFS, vf->vf_id))
+ dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
+ vf->vf_id);
+
+out_unlock:
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_unlock(&vf->cfg_lock);
+
+ return err;
+}
+
+/**
+ * ice_set_vf_state_qs_dis - Set VF queues state to disabled
+ * @vf: pointer to the VF structure
+ */
+void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+{
+ /* Clear Rx/Tx enabled queues flag */
+ bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+}
+
+/* Private functions only accessed from other virtualization files */
+
+/**
+ * ice_dis_vf_qs - Disable the VF queues
+ * @vf: pointer to the VF structure
+ */
+void ice_dis_vf_qs(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
+ ice_vsi_stop_all_rx_rings(vsi);
+ ice_set_vf_state_qs_dis(vf);
+}
+
+/**
+ * ice_check_vf_init - helper to check if VF init complete
+ * @vf: the pointer to the VF to check
+ */
+int ice_check_vf_init(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
+ vf->vf_id);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/**
+ * ice_vf_get_port_info - Get the VF's port info structure
+ * @vf: VF used to get the port info structure for
+ */
+struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
+{
+ return vf->pf->hw.port_info;
+}
+
+static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
+{
+ struct ice_vsi_ctx *ctx;
+ int err;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->info.sec_flags = vsi->info.sec_flags;
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+
+ if (enable)
+ ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+ else
+ ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+
+ err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
+ enable ? "ON" : "OFF", vsi->vsi_num, err);
+ else
+ vsi->info.sec_flags = ctx->info.sec_flags;
+
+ kfree(ctx);
+
+ return err;
+}
+
+/**
+ * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
+ * @vsi: VSI to enable Tx spoof checking for
+ */
+static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int err;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ err = vlan_ops->ena_tx_filtering(vsi);
+ if (err)
+ return err;
+
+ return ice_cfg_mac_antispoof(vsi, true);
+}
+
+/**
+ * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
+ * @vsi: VSI to disable Tx spoof checking for
+ */
+static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int err;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ err = vlan_ops->dis_tx_filtering(vsi);
+ if (err)
+ return err;
+
+ return ice_cfg_mac_antispoof(vsi, false);
+}
+
+/**
+ * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
+ * @vsi: VSI associated to the VF
+ * @enable: whether to enable or disable the spoof checking
+ */
+int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
+{
+ int err;
+
+ if (enable)
+ err = ice_vsi_ena_spoofchk(vsi);
+ else
+ err = ice_vsi_dis_spoofchk(vsi);
+
+ return err;
+}
+
+/**
+ * ice_is_vf_trusted
+ * @vf: pointer to the VF info
+ */
+bool ice_is_vf_trusted(struct ice_vf *vf)
+{
+ return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+}
+
+/**
+ * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
+ * @vf: the VF to check
+ *
+ * Returns true if the VF has no Rx and no Tx queues enabled and returns false
+ * otherwise
+ */
+bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
+{
+ return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
+ !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
+}
+
+/**
+ * ice_is_vf_link_up - check if the VF's link is up
+ * @vf: VF to check if link is up
+ */
+bool ice_is_vf_link_up(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+
+ if (ice_check_vf_init(vf))
+ return false;
+
+ if (ice_vf_has_no_qs_ena(vf))
+ return false;
+ else if (vf->link_forced)
+ return vf->link_up;
+ else
+ return pi->phy.link_info.link_info &
+ ICE_AQ_LINK_UP;
+}
+
+/**
+ * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
+ * @vf: VF to configure trust setting for
+ */
+static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
+{
+ if (vf->trusted)
+ set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+ else
+ clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+}
+
+/**
+ * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
+ * @vf: VF to add MAC filters for
+ *
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
+ */
+static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ u8 broadcast[ETH_ALEN];
+ int status;
+
+ if (ice_is_eswitch_mode_switchdev(vf->pf))
+ return 0;
+
+ eth_broadcast_addr(broadcast);
+ status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
+ vf->vf_id, status);
+ return status;
+ }
+
+ vf->num_mac++;
+
+ if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
+ status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
+ &vf->hw_lan_addr.addr[0], vf->vf_id,
+ status);
+ return status;
+ }
+ vf->num_mac++;
+
+ ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
+ * @vf: VF to add MAC filters for
+ * @vsi: Pointer to VSI
+ *
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds either a VLAN 0 or port VLAN based filter after reset.
+ */
+static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ int err;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
+ if (err) {
+ dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
+ } else {
+ err = ice_vsi_add_vlan_zero(vsi);
+ }
+
+ if (err) {
+ dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
+ ice_vf_is_port_vlan_ena(vf) ?
+ ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
+ return err;
+ }
+
+ err = vlan_ops->ena_rx_filtering(vsi);
+ if (err)
+ dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
+ vf->vf_id, vsi->idx, err);
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
+ * @vf: VF to re-apply the configuration for
+ *
+ * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
+ * needs to re-apply the host configured Tx rate limiting configuration.
+ */
+static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ int err;
+
+ if (vf->min_tx_rate) {
+ err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->min_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ if (vf->max_tx_rate) {
+ err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->max_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
+ * @vsi: Pointer to VSI
+ *
+ * This function moves VSI into corresponding scheduler aggregator node
+ * based on cached value of "aggregator node info" per VSI
+ */
+static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ int status;
+
+ if (!vsi->agg_node)
+ return;
+
+ dev = ice_pf_to_dev(pf);
+ if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
+ dev_dbg(dev,
+ "agg_id %u already has reached max_num_vsis %u\n",
+ vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
+ return;
+ }
+
+ status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
+ vsi->idx, vsi->tc_cfg.ena_tc);
+ if (status)
+ dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
+ vsi->idx, vsi->agg_node->agg_id);
+ else
+ vsi->agg_node->num_vsis++;
+}
+
+/**
+ * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
+ * @vf: VF to rebuild host configuration on
+ */
+void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ ice_vf_set_host_trust_cfg(vf);
+
+ if (ice_vf_rebuild_host_mac_cfg(vf))
+ dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
+ vf->vf_id);
+
+ if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
+ dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
+ vf->vf_id);
+
+ if (ice_vf_rebuild_host_tx_rate_cfg(vf))
+ dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
+ vf->vf_id);
+
+ if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
+ dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
+ vf->vf_id);
+
+ /* rebuild aggregator node config for main VF VSI */
+ ice_vf_rebuild_aggregator_node_cfg(vsi);
+}
+
+/**
+ * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
+ * @vf: VF that control VSI is being invalidated on
+ */
+void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
+{
+ vf->ctrl_vsi_idx = ICE_NO_VSI;
+}
+
+/**
+ * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
+ * @vf: VF that control VSI is being released on
+ */
+void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
+{
+ ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
+ ice_vf_ctrl_invalidate_vsi(vf);
+}
+
+/**
+ * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
+ * @vf: VF to setup control VSI for
+ *
+ * Returns pointer to the successfully allocated VSI struct on success,
+ * otherwise returns NULL on failure.
+ */
+struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf, NULL);
+ if (!vsi) {
+ dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
+ ice_vf_ctrl_invalidate_vsi(vf);
+ }
+
+ return vsi;
+}
+
+/**
+ * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
+ * @vf: VF to remove access to VSI for
+ */
+void ice_vf_invalidate_vsi(struct ice_vf *vf)
+{
+ vf->lan_vsi_idx = ICE_NO_VSI;
+ vf->lan_vsi_num = ICE_NO_VSI;
+}
+
+/**
+ * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
+ * @vf: VF to set in initialized state
+ *
+ * After this function the VF will be ready to receive/handle the
+ * VIRTCHNL_OP_GET_VF_RESOURCES message
+ */
+void ice_vf_set_initialized(struct ice_vf *vf)
+{
+ ice_set_vf_state_qs_dis(vf);
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
new file mode 100644
index 000000000000..831b667dc5b2
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_VF_LIB_H_
+#define _ICE_VF_LIB_H_
+
+#include <linux/types.h>
+#include <linux/hashtable.h>
+#include <linux/bitmap.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <net/devlink.h>
+#include <linux/avf/virtchnl.h>
+#include "ice_type.h"
+#include "ice_virtchnl_fdir.h"
+#include "ice_vsi_vlan_ops.h"
+
+#define ICE_MAX_SRIOV_VFS 256
+
+/* VF resource constraints */
+#define ICE_MAX_RSS_QS_PER_VF 16
+
+struct ice_pf;
+struct ice_vf;
+struct ice_virtchnl_ops;
+
+/* VF capabilities */
+enum ice_virtchnl_cap {
+ ICE_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
+};
+
+/* Specific VF states */
+enum ice_vf_states {
+ ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
+ ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */
+ ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */
+ ICE_VF_STATE_DIS,
+ ICE_VF_STATE_MC_PROMISC,
+ ICE_VF_STATE_UC_PROMISC,
+ ICE_VF_STATES_NBITS
+};
+
+struct ice_time_mac {
+ unsigned long time_modified;
+ u8 addr[ETH_ALEN];
+};
+
+/* VF MDD events print structure */
+struct ice_mdd_vf_events {
+ u16 count; /* total count of Rx|Tx events */
+ /* count number of the last printed event */
+ u16 last_printed;
+};
+
+/* VF operations */
+struct ice_vf_ops {
+ enum ice_disq_rst_src reset_type;
+ void (*free)(struct ice_vf *vf);
+ void (*clear_mbx_register)(struct ice_vf *vf);
+ void (*trigger_reset_register)(struct ice_vf *vf, bool is_vflr);
+ bool (*poll_reset_status)(struct ice_vf *vf);
+ void (*clear_reset_trigger)(struct ice_vf *vf);
+ int (*vsi_rebuild)(struct ice_vf *vf);
+ void (*post_vsi_rebuild)(struct ice_vf *vf);
+};
+
+/* Virtchnl/SR-IOV config info */
+struct ice_vfs {
+ DECLARE_HASHTABLE(table, 8); /* table of VF entries */
+ struct mutex table_lock; /* Lock for protecting the hash table */
+ u16 num_supported; /* max supported VFs on this PF */
+ u16 num_qps_per; /* number of queue pairs per VF */
+ u16 num_msix_per; /* number of MSI-X vectors per VF */
+ unsigned long last_printed_mdd_jiffies; /* MDD message rate limit */
+ DECLARE_BITMAP(malvfs, ICE_MAX_SRIOV_VFS); /* malicious VF indicator */
+};
+
+/* VF information structure */
+struct ice_vf {
+ struct hlist_node entry;
+ struct rcu_head rcu;
+ struct kref refcnt;
+ struct ice_pf *pf;
+
+ /* Used during virtchnl message handling and NDO ops against the VF
+ * that will trigger a VFR
+ */
+ struct mutex cfg_lock;
+
+ u16 vf_id; /* VF ID in the PF space */
+ u16 lan_vsi_idx; /* index into PF struct */
+ u16 ctrl_vsi_idx;
+ struct ice_vf_fdir fdir;
+ /* first vector index of this VF in the PF space */
+ int first_vector_idx;
+ struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
+ struct virtchnl_version_info vf_ver;
+ u32 driver_caps; /* reported by VF driver */
+ struct virtchnl_ether_addr dev_lan_addr;
+ struct virtchnl_ether_addr hw_lan_addr;
+ struct ice_time_mac legacy_last_added_umac;
+ DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
+ DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ struct ice_vlan port_vlan_info; /* Port VLAN ID, QoS, and TPID */
+ struct virtchnl_vlan_caps vlan_v2_caps;
+ u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
+ u8 trusted:1;
+ u8 spoofchk:1;
+ u8 link_forced:1;
+ u8 link_up:1; /* only valid if VF link is forced */
+ /* VSI indices - actual VSI pointers are maintained in the PF structure
+ * When assigned, these will be non-zero, because VSI 0 is always
+ * the main LAN VSI for the PF.
+ */
+ u16 lan_vsi_num; /* ID as used by firmware */
+ unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
+ unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
+ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
+
+ unsigned long vf_caps; /* VF's adv. capabilities */
+ u8 num_req_qs; /* num of queue pairs requested by VF */
+ u16 num_mac;
+ u16 num_vf_qs; /* num of queue configured per VF */
+ struct ice_mdd_vf_events mdd_rx_events;
+ struct ice_mdd_vf_events mdd_tx_events;
+ DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
+
+ struct ice_repr *repr;
+ const struct ice_virtchnl_ops *virtchnl_ops;
+ const struct ice_vf_ops *vf_ops;
+
+ /* devlink port data */
+ struct devlink_port devlink_port;
+};
+
+/* Flags for controlling behavior of ice_reset_vf */
+enum ice_vf_reset_flags {
+ ICE_VF_RESET_VFLR = BIT(0), /* Indicate a VFLR reset */
+ ICE_VF_RESET_NOTIFY = BIT(1), /* Notify VF prior to reset */
+ ICE_VF_RESET_LOCK = BIT(2), /* Acquire the VF cfg_lock */
+};
+
+static inline u16 ice_vf_get_port_vlan_id(struct ice_vf *vf)
+{
+ return vf->port_vlan_info.vid;
+}
+
+static inline u8 ice_vf_get_port_vlan_prio(struct ice_vf *vf)
+{
+ return vf->port_vlan_info.prio;
+}
+
+static inline bool ice_vf_is_port_vlan_ena(struct ice_vf *vf)
+{
+ return (ice_vf_get_port_vlan_id(vf) || ice_vf_get_port_vlan_prio(vf));
+}
+
+static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
+{
+ return vf->port_vlan_info.tpid;
+}
+
+/* VF Hash Table access functions
+ *
+ * These functions provide abstraction for interacting with the VF hash table.
+ * In general, direct access to the hash table should be avoided outside of
+ * these functions where possible.
+ *
+ * The VF entries in the hash table are protected by reference counting to
+ * track lifetime of accesses from the table. The ice_get_vf_by_id() function
+ * obtains a reference to the VF structure which must be dropped by using
+ * ice_put_vf().
+ */
+
+/**
+ * ice_for_each_vf - Iterate over each VF entry
+ * @pf: pointer to the PF private structure
+ * @bkt: bucket index used for iteration
+ * @vf: pointer to the VF entry currently being processed in the loop.
+ *
+ * The bkt variable is an unsigned integer iterator used to traverse the VF
+ * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
+ * Use vf->vf_id to get the id number if needed.
+ *
+ * The caller is expected to be under the table_lock mutex for the entire
+ * loop. Use this iterator if your loop is long or if it might sleep.
+ */
+#define ice_for_each_vf(pf, bkt, vf) \
+ hash_for_each((pf)->vfs.table, (bkt), (vf), entry)
+
+/**
+ * ice_for_each_vf_rcu - Iterate over each VF entry protected by RCU
+ * @pf: pointer to the PF private structure
+ * @bkt: bucket index used for iteration
+ * @vf: pointer to the VF entry currently being processed in the loop.
+ *
+ * The bkt variable is an unsigned integer iterator used to traverse the VF
+ * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
+ * Use vf->vf_id to get the id number if needed.
+ *
+ * The caller is expected to be under rcu_read_lock() for the entire loop.
+ * Only use this iterator if your loop is short and you can guarantee it does
+ * not sleep.
+ */
+#define ice_for_each_vf_rcu(pf, bkt, vf) \
+ hash_for_each_rcu((pf)->vfs.table, (bkt), (vf), entry)
+
+#ifdef CONFIG_PCI_IOV
+struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id);
+void ice_put_vf(struct ice_vf *vf);
+bool ice_has_vfs(struct ice_pf *pf);
+u16 ice_get_num_vfs(struct ice_pf *pf);
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
+bool ice_is_vf_disabled(struct ice_vf *vf);
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
+void ice_set_vf_state_qs_dis(struct ice_vf *vf);
+bool ice_is_any_vf_in_promisc(struct ice_pf *pf);
+int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
+int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
+int ice_reset_vf(struct ice_vf *vf, u32 flags);
+void ice_reset_all_vfs(struct ice_pf *pf);
+#else /* CONFIG_PCI_IOV */
+static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
+{
+ return NULL;
+}
+
+static inline void ice_put_vf(struct ice_vf *vf)
+{
+}
+
+static inline bool ice_has_vfs(struct ice_pf *pf)
+{
+ return false;
+}
+
+static inline u16 ice_get_num_vfs(struct ice_pf *pf)
+{
+ return 0;
+}
+
+static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+{
+ return NULL;
+}
+
+static inline bool ice_is_vf_disabled(struct ice_vf *vf)
+{
+ return true;
+}
+
+static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+{
+}
+
+static inline bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
+{
+ return false;
+}
+
+static inline int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ice_reset_vf(struct ice_vf *vf, u32 flags)
+{
+ return 0;
+}
+
+static inline void ice_reset_all_vfs(struct ice_pf *pf)
+{
+}
+#endif /* !CONFIG_PCI_IOV */
+
+#endif /* _ICE_VF_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
new file mode 100644
index 000000000000..15887e772c76
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_VF_LIB_PRIVATE_H_
+#define _ICE_VF_LIB_PRIVATE_H_
+
+#include "ice_vf_lib.h"
+
+/* This header file is for exposing functions in ice_vf_lib.c to other files
+ * which are also conditionally compiled depending on CONFIG_PCI_IOV.
+ * Functions which may be used by other files should be exposed as part of
+ * ice_vf_lib.h
+ *
+ * Functions in this file are exposed only when CONFIG_PCI_IOV is enabled, and
+ * thus this header must not be included by .c files which may be compiled
+ * with CONFIG_PCI_IOV disabled.
+ *
+ * To avoid this, only include this header file directly within .c files that
+ * are conditionally enabled in the "ice-$(CONFIG_PCI_IOV)" block.
+ */
+
+#ifndef CONFIG_PCI_IOV
+#warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files"
+#endif
+
+void ice_dis_vf_qs(struct ice_vf *vf);
+int ice_check_vf_init(struct ice_vf *vf);
+struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf);
+int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable);
+bool ice_is_vf_trusted(struct ice_vf *vf);
+bool ice_vf_has_no_qs_ena(struct ice_vf *vf);
+bool ice_is_vf_link_up(struct ice_vf *vf);
+void ice_vf_rebuild_host_cfg(struct ice_vf *vf);
+void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf);
+void ice_vf_ctrl_vsi_release(struct ice_vf *vf);
+struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
+void ice_vf_invalidate_vsi(struct ice_vf *vf);
+void ice_vf_set_initialized(struct ice_vf *vf);
+
+#endif /* _ICE_VF_LIB_PRIVATE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
new file mode 100644
index 000000000000..fc8c93fa4455
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_vf_mbx.h"
+
+/**
+ * ice_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: VF ID to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cd: pointer to command details
+ *
+ * Send message to VF driver (0x0802) using mailbox
+ * queue and asynchronously sending message via
+ * ice_sq_send_cmd() function
+ */
+int
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_pf_vf_msg *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
+
+ cmd = &desc.params.virt;
+ cmd->id = cpu_to_le32(vfid);
+
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+
+ if (msglen)
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+}
+
+/**
+ * ice_conv_link_speed_to_virtchnl
+ * @adv_link_support: determines the format of the returned link speed
+ * @link_speed: variable containing the link_speed to be converted
+ *
+ * Convert link speed supported by HW to link speed supported by virtchnl.
+ * If adv_link_support is true, then return link speed in Mbps. Else return
+ * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
+ * needs to cast back to an enum virtchnl_link_speed in the case where
+ * adv_link_support is false, but when adv_link_support is true the caller can
+ * expect the speed in Mbps.
+ */
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
+{
+ u32 speed;
+
+ if (adv_link_support)
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ speed = ICE_LINK_SPEED_10MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_100MB:
+ speed = ICE_LINK_SPEED_100MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ speed = ICE_LINK_SPEED_1000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ speed = ICE_LINK_SPEED_2500MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_5GB:
+ speed = ICE_LINK_SPEED_5000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ speed = ICE_LINK_SPEED_10000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ speed = ICE_LINK_SPEED_20000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ speed = ICE_LINK_SPEED_25000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ speed = ICE_LINK_SPEED_40000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_50GB:
+ speed = ICE_LINK_SPEED_50000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_100GB:
+ speed = ICE_LINK_SPEED_100000MBPS;
+ break;
+ default:
+ speed = ICE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ else
+ /* Virtchnl speeds are not defined for every speed supported in
+ * the hardware. To maintain compatibility with older AVF
+ * drivers, while reporting the speed the new speed values are
+ * resolved to the closest known virtchnl speeds
+ */
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ case ICE_AQ_LINK_SPEED_100MB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ case ICE_AQ_LINK_SPEED_2500MB:
+ case ICE_AQ_LINK_SPEED_5GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ case ICE_AQ_LINK_SPEED_50GB:
+ case ICE_AQ_LINK_SPEED_100GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
+ break;
+ default:
+ speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ return speed;
+}
+
+/* The mailbox overflow detection algorithm helps to check if there
+ * is a possibility of a malicious VF transmitting too many MBX messages to the
+ * PF.
+ * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
+ * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
+ * The struct ice_mbx_snapshot helps to track and traverse a static window of
+ * messages within the mailbox queue while looking for a malicious VF.
+ *
+ * 2. When the caller starts processing its mailbox queue in response to an
+ * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
+ * the algorithm can be run for the first time for that interrupt. This can be
+ * done via ice_mbx_reset_snapshot().
+ *
+ * 3. For every message read by the caller from the MBX Queue, the caller must
+ * call the detection algorithm's entry function ice_mbx_vf_state_handler().
+ * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
+ * filled as it is required to be passed to the algorithm.
+ *
+ * 4. Every time a message is read from the MBX queue, a VFId is received which
+ * is passed to the state handler. The boolean output is_malvf of the state
+ * handler ice_mbx_vf_state_handler() serves as an indicator to the caller
+ * whether this VF is malicious or not.
+ *
+ * 5. When a VF is identified to be malicious, the caller can send a message
+ * to the system administrator. The caller can invoke ice_mbx_report_malvf()
+ * to help determine if a malicious VF is to be reported or not. This function
+ * requires the caller to maintain a global bitmap to track all malicious VFs
+ * and pass that to ice_mbx_report_malvf() along with the VFID which was identified
+ * to be malicious by ice_mbx_vf_state_handler().
+ *
+ * 6. The global bitmap maintained by PF can be cleared completely if PF is in
+ * reset or the bit corresponding to a VF can be cleared if that VF is in reset.
+ * When a VF is shut down and brought back up, we assume that the new VF
+ * brought up is not malicious and hence report it if found malicious.
+ *
+ * 7. The function ice_mbx_reset_snapshot() is called to reset the information
+ * in ice_mbx_snapshot for every new mailbox interrupt handled.
+ *
+ * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
+ * when driver is unloaded.
+ */
+#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
+/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
+ * the max messages check must be ignored in the algorithm
+ */
+#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
+
+/**
+ * ice_mbx_traverse - Pass through mailbox snapshot
+ * @hw: pointer to the HW struct
+ * @new_state: new algorithm state
+ *
+ * Traversing the mailbox static snapshot without checking
+ * for malicious VFs.
+ */
+static void
+ice_mbx_traverse(struct ice_hw *hw,
+ enum ice_mbx_snapshot_state *new_state)
+{
+ struct ice_mbx_snap_buffer_data *snap_buf;
+ u32 num_iterations;
+
+ snap_buf = &hw->mbx_snapshot.mbx_buf;
+
+ /* As mailbox buffer is circular, applying a mask
+ * on the incremented iteration count.
+ */
+ num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
+
+ /* Checking either of the below conditions to exit snapshot traversal:
+ * Condition-1: If the number of iterations in the mailbox is equal to
+ * the mailbox head which would indicate that we have reached the end
+ * of the static snapshot.
+ * Condition-2: If the maximum messages serviced in the mailbox for a
+ * given interrupt is the highest possible value then there is no need
+ * to check if the number of messages processed is equal to it. If not
+ * check if the number of messages processed is greater than or equal
+ * to the maximum number of mailbox entries serviced in current work item.
+ */
+ if (num_iterations == snap_buf->head ||
+ (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
+ ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
+ *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+}
+
+/**
+ * ice_mbx_detect_malvf - Detect malicious VF in snapshot
+ * @hw: pointer to the HW struct
+ * @vf_id: relative virtual function ID
+ * @new_state: new algorithm state
+ * @is_malvf: boolean output to indicate if VF is malicious
+ *
+ * This function tracks the number of asynchronous messages
+ * sent per VF and marks the VF as malicious if it exceeds
+ * the permissible number of messages to send.
+ */
+static int
+ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
+ enum ice_mbx_snapshot_state *new_state,
+ bool *is_malvf)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ if (vf_id >= snap->mbx_vf.vfcntr_len)
+ return -EIO;
+
+ /* increment the message count in the VF array */
+ snap->mbx_vf.vf_cntr[vf_id]++;
+
+ if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
+ *is_malvf = true;
+
+ /* continue to iterate through the mailbox snapshot */
+ ice_mbx_traverse(hw, new_state);
+
+ return 0;
+}
+
+/**
+ * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
+ * @snap: pointer to mailbox snapshot structure in the ice_hw struct
+ *
+ * Reset the mailbox snapshot structure and clear VF counter array.
+ */
+static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
+{
+ u32 vfcntr_len;
+
+ if (!snap || !snap->mbx_vf.vf_cntr)
+ return;
+
+ /* Clear VF counters. */
+ vfcntr_len = snap->mbx_vf.vfcntr_len;
+ if (vfcntr_len)
+ memset(snap->mbx_vf.vf_cntr, 0,
+ (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)));
+
+ /* Reset mailbox snapshot for a new capture. */
+ memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+ snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+}
+
+/**
+ * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
+ * @hw: pointer to the HW struct
+ * @mbx_data: pointer to structure containing mailbox data
+ * @vf_id: relative virtual function (VF) ID
+ * @is_malvf: boolean output to indicate if VF is malicious
+ *
+ * The function serves as an entry point for the malicious VF
+ * detection algorithm by handling the different states and state
+ * transitions of the algorithm:
+ * New snapshot: This state is entered when creating a new static
+ * snapshot. The data from any previous mailbox snapshot is
+ * cleared and a new capture of the mailbox head and tail is
+ * logged. This will be the new static snapshot to detect
+ * asynchronous messages sent by VFs. On capturing the snapshot
+ * and depending on whether the number of pending messages in that
+ * snapshot exceed the watermark value, the state machine enters
+ * traverse or detect states.
+ * Traverse: If pending message count is below watermark then iterate
+ * through the snapshot without any action on VF.
+ * Detect: If pending message count exceeds watermark traverse
+ * the static snapshot and look for a malicious VF.
+ */
+int
+ice_mbx_vf_state_handler(struct ice_hw *hw,
+ struct ice_mbx_data *mbx_data, u16 vf_id,
+ bool *is_malvf)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ struct ice_mbx_snap_buffer_data *snap_buf;
+ struct ice_ctl_q_info *cq = &hw->mailboxq;
+ enum ice_mbx_snapshot_state new_state;
+ int status = 0;
+
+ if (!is_malvf || !mbx_data)
+ return -EINVAL;
+
+ /* When entering the mailbox state machine assume that the VF
+ * is not malicious until detected.
+ */
+ *is_malvf = false;
+
+ /* Checking if max messages allowed to be processed while servicing current
+ * interrupt is not less than the defined AVF message threshold.
+ */
+ if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
+ return -EINVAL;
+
+ /* The watermark value should not be lesser than the threshold limit
+ * set for the number of asynchronous messages a VF can send to mailbox
+ * nor should it be greater than the maximum number of messages in the
+ * mailbox serviced in current interrupt.
+ */
+ if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
+ mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
+ return -EINVAL;
+
+ new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
+ snap_buf = &snap->mbx_buf;
+
+ switch (snap_buf->state) {
+ case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
+ /* Clear any previously held data in mailbox snapshot structure. */
+ ice_mbx_reset_snapshot(snap);
+
+ /* Collect the pending ARQ count, number of messages processed and
+ * the maximum number of messages allowed to be processed from the
+ * Mailbox for current interrupt.
+ */
+ snap_buf->num_pending_arq = mbx_data->num_pending_arq;
+ snap_buf->num_msg_proc = mbx_data->num_msg_proc;
+ snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
+
+ /* Capture a new static snapshot of the mailbox by logging the
+ * head and tail of snapshot and set num_iterations to the tail
+ * value to mark the start of the iteration through the snapshot.
+ */
+ snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
+ mbx_data->num_pending_arq);
+ snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
+ snap_buf->num_iterations = snap_buf->tail;
+
+ /* Pending ARQ messages returned by ice_clean_rq_elem
+ * is the difference between the head and tail of the
+ * mailbox queue. Comparing this value against the watermark
+ * helps to check if we potentially have malicious VFs.
+ */
+ if (snap_buf->num_pending_arq >=
+ mbx_data->async_watermark_val) {
+ new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
+ status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
+ } else {
+ new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
+ ice_mbx_traverse(hw, &new_state);
+ }
+ break;
+
+ case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
+ new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
+ ice_mbx_traverse(hw, &new_state);
+ break;
+
+ case ICE_MAL_VF_DETECT_STATE_DETECT:
+ new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
+ status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
+ break;
+
+ default:
+ new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
+ status = -EIO;
+ }
+
+ snap_buf->state = new_state;
+
+ return status;
+}
+
+/**
+ * ice_mbx_report_malvf - Track and note malicious VF
+ * @hw: pointer to the HW struct
+ * @all_malvfs: all malicious VFs tracked by PF
+ * @bitmap_len: length of bitmap in bits
+ * @vf_id: relative virtual function ID of the malicious VF
+ * @report_malvf: boolean to indicate if malicious VF must be reported
+ *
+ * This function will update a bitmap that keeps track of the malicious
+ * VFs attached to the PF. A malicious VF must be reported only once if
+ * discovered between VF resets or loading so the function checks
+ * the input vf_id against the bitmap to verify if the VF has been
+ * detected in any previous mailbox iterations.
+ */
+int
+ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
+ u16 bitmap_len, u16 vf_id, bool *report_malvf)
+{
+ if (!all_malvfs || !report_malvf)
+ return -EINVAL;
+
+ *report_malvf = false;
+
+ if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
+ return -EINVAL;
+
+ if (vf_id >= bitmap_len)
+ return -EIO;
+
+ /* If the vf_id is found in the bitmap set bit and boolean to true */
+ if (!test_and_set_bit(vf_id, all_malvfs))
+ *report_malvf = true;
+
+ return 0;
+}
+
+/**
+ * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID
+ * @snap: pointer to the mailbox snapshot structure
+ * @all_malvfs: all malicious VFs tracked by PF
+ * @bitmap_len: length of bitmap in bits
+ * @vf_id: relative virtual function ID of the malicious VF
+ *
+ * In case of a VF reset, this function can be called to clear
+ * the bit corresponding to the VF ID in the bitmap tracking all
+ * malicious VFs attached to the PF. The function also clears the
+ * VF counter array at the index of the VF ID. This is to ensure
+ * that the new VF loaded is not considered malicious before going
+ * through the overflow detection algorithm.
+ */
+int
+ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
+ u16 bitmap_len, u16 vf_id)
+{
+ if (!snap || !all_malvfs)
+ return -EINVAL;
+
+ if (bitmap_len < snap->mbx_vf.vfcntr_len)
+ return -EINVAL;
+
+ /* Ensure VF ID value is not larger than bitmap or VF counter length */
+ if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
+ return -EIO;
+
+ /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
+ clear_bit(vf_id, all_malvfs);
+
+ /* Clear the VF counter in the mailbox snapshot structure for that VF ID.
+ * This is to ensure that if a VF is unloaded and a new one brought back
+ * up with the same VF ID for a snapshot currently in traversal or detect
+ * state the counter for that VF ID does not increment on top of existing
+ * values in the mailbox overflow detection algorithm.
+ */
+ snap->mbx_vf.vf_cntr[vf_id] = 0;
+
+ return 0;
+}
+
+/**
+ * ice_mbx_init_snapshot - Initialize mailbox snapshot structure
+ * @hw: pointer to the hardware structure
+ * @vf_count: number of VFs allocated on a PF
+ *
+ * Clear the mailbox snapshot structure and allocate memory
+ * for the VF counter array based on the number of VFs allocated
+ * on that PF.
+ *
+ * Assumption: This function will assume ice_get_caps() has already been
+ * called to ensure that the vf_count can be compared against the number
+ * of VFs supported as defined in the functional capabilities of the device.
+ */
+int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ /* Ensure that the number of VFs allocated is non-zero and
+ * is not greater than the number of supported VFs defined in
+ * the functional capabilities of the PF.
+ */
+ if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
+ return -EINVAL;
+
+ snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
+ sizeof(*snap->mbx_vf.vf_cntr),
+ GFP_KERNEL);
+ if (!snap->mbx_vf.vf_cntr)
+ return -ENOMEM;
+
+ /* Setting the VF counter length to the number of allocated
+ * VFs for given PF's functional capabilities.
+ */
+ snap->mbx_vf.vfcntr_len = vf_count;
+
+ /* Clear mbx_buf in the mailbox snaphot structure and setting the
+ * mailbox snapshot state to a new capture.
+ */
+ memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+ snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+
+ return 0;
+}
+
+/**
+ * ice_mbx_deinit_snapshot - Free mailbox snapshot structure
+ * @hw: pointer to the hardware structure
+ *
+ * Clear the mailbox snapshot structure and free the VF counter array.
+ */
+void ice_mbx_deinit_snapshot(struct ice_hw *hw)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ /* Free VF counter array and reset VF counter length */
+ devm_kfree(ice_hw_to_dev(hw), snap->mbx_vf.vf_cntr);
+ snap->mbx_vf.vfcntr_len = 0;
+
+ /* Clear mbx_buf in the mailbox snaphot structure */
+ memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
new file mode 100644
index 000000000000..582716e6d5f9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_VF_MBX_H_
+#define _ICE_VF_MBX_H_
+
+#include "ice_type.h"
+#include "ice_controlq.h"
+
+/* Defining the mailbox message threshold as 63 asynchronous
+ * pending messages. Normal VF functionality does not require
+ * sending more than 63 asynchronous pending message.
+ */
+#define ICE_ASYNC_VF_MSG_THRESHOLD 63
+
+#ifdef CONFIG_PCI_IOV
+int
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
+int
+ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
+ u16 vf_id, bool *is_mal_vf);
+int
+ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
+ u16 bitmap_len, u16 vf_id);
+int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
+void ice_mbx_deinit_snapshot(struct ice_hw *hw);
+int
+ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
+ u16 bitmap_len, u16 vf_id, bool *report_malvf);
+#else /* CONFIG_PCI_IOV */
+static inline int
+ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
+ u16 __always_unused vfid, u32 __always_unused v_opcode,
+ u32 __always_unused v_retval, u8 __always_unused *msg,
+ u16 __always_unused msglen,
+ struct ice_sq_cd __always_unused *cd)
+{
+ return 0;
+}
+
+static inline u32
+ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
+ u16 __always_unused link_speed)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_VF_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
new file mode 100644
index 000000000000..5ecc0ee9a78e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_vsi_vlan_ops.h"
+#include "ice_vsi_vlan_lib.h"
+#include "ice_vlan_mode.h"
+#include "ice.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_sriov.h"
+
+static int
+noop_vlan_arg(struct ice_vsi __always_unused *vsi,
+ struct ice_vlan __always_unused *vlan)
+{
+ return 0;
+}
+
+static int
+noop_vlan(struct ice_vsi __always_unused *vsi)
+{
+ return 0;
+}
+
+/**
+ * ice_vf_vsi_init_vlan_ops - Initialize default VSI VLAN ops for VF VSI
+ * @vsi: VF's VSI being configured
+ *
+ * If Double VLAN Mode (DVM) is enabled, assume that the VF supports the new
+ * VIRTCHNL_VF_VLAN_OFFLOAD_V2 capability and set up the VLAN ops accordingly.
+ * If SVM is enabled maintain the same level of VLAN support previous to
+ * VIRTCHNL_VF_VLAN_OFFLOAD_V2.
+ */
+void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf = vsi->vf;
+
+ if (WARN_ON(!vf))
+ return;
+
+ if (ice_is_dvm_ena(&pf->hw)) {
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ /* outer VLAN ops regardless of port VLAN config */
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
+ vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ /* setup outer VLAN ops */
+ vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
+ vlan_ops->add_vlan = noop_vlan_arg;
+ vlan_ops->del_vlan = noop_vlan_arg;
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ } else {
+ if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
+ vlan_ops->ena_rx_filtering = noop_vlan;
+ else
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ }
+ } else {
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ /* inner VLAN ops regardless of port VLAN config */
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
+ vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+ } else {
+ if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
+ vlan_ops->ena_rx_filtering = noop_vlan;
+ else
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ }
+ }
+}
+
+/**
+ * ice_vf_vsi_cfg_dvm_legacy_vlan_mode - Config VLAN mode for old VFs in DVM
+ * @vsi: VF's VSI being configured
+ *
+ * This should only be called when Double VLAN Mode (DVM) is enabled, there
+ * is not a port VLAN enabled on this VF, and the VF negotiates
+ * VIRTCHNL_VF_OFFLOAD_VLAN.
+ *
+ * This function sets up the VF VSI's inner and outer ice_vsi_vlan_ops and also
+ * initializes software only VLAN mode (i.e. allow all VLANs). Also, use no-op
+ * implementations for any functions that may be called during the lifetime of
+ * the VF so these methods do nothing and succeed.
+ */
+void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_vf *vf = vsi->vf;
+ struct device *dev;
+
+ if (WARN_ON(!vf))
+ return;
+
+ dev = ice_pf_to_dev(vf->pf);
+
+ if (!ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf))
+ return;
+
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ /* Rx VLAN filtering always disabled to allow software offloaded VLANs
+ * for VFs that only support VIRTCHNL_VF_OFFLOAD_VLAN and don't have a
+ * port VLAN configured
+ */
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ /* Don't fail when attempting to enable Rx VLAN filtering */
+ vlan_ops->ena_rx_filtering = noop_vlan;
+
+ /* Tx VLAN filtering always disabled to allow software offloaded VLANs
+ * for VFs that only support VIRTCHNL_VF_OFFLOAD_VLAN and don't have a
+ * port VLAN configured
+ */
+ vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
+ /* Don't fail when attempting to enable Tx VLAN filtering */
+ vlan_ops->ena_tx_filtering = noop_vlan;
+
+ if (vlan_ops->dis_rx_filtering(vsi))
+ dev_dbg(dev, "Failed to disable Rx VLAN filtering for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+ if (vlan_ops->dis_tx_filtering(vsi))
+ dev_dbg(dev, "Failed to disable Tx VLAN filtering for old VF without VIRTHCNL_VF_OFFLOAD_VLAN_V2 support\n");
+
+ /* All outer VLAN offloads must be disabled */
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+
+ if (vlan_ops->dis_stripping(vsi))
+ dev_dbg(dev, "Failed to disable outer VLAN stripping for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+
+ if (vlan_ops->dis_insertion(vsi))
+ dev_dbg(dev, "Failed to disable outer VLAN insertion for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+
+ /* All inner VLAN offloads must be disabled */
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+
+ if (vlan_ops->dis_stripping(vsi))
+ dev_dbg(dev, "Failed to disable inner VLAN stripping for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+
+ if (vlan_ops->dis_insertion(vsi))
+ dev_dbg(dev, "Failed to disable inner VLAN insertion for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+}
+
+/**
+ * ice_vf_vsi_cfg_svm_legacy_vlan_mode - Config VLAN mode for old VFs in SVM
+ * @vsi: VF's VSI being configured
+ *
+ * This should only be called when Single VLAN Mode (SVM) is enabled, there is
+ * not a port VLAN enabled on this VF, and the VF negotiates
+ * VIRTCHNL_VF_OFFLOAD_VLAN.
+ *
+ * All of the normal SVM VLAN ops are identical for this case. However, by
+ * default Rx VLAN filtering should be turned off by default in this case.
+ */
+void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi)
+{
+ struct ice_vf *vf = vsi->vf;
+
+ if (WARN_ON(!vf))
+ return;
+
+ if (ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf))
+ return;
+
+ if (vsi->inner_vlan_ops.dis_rx_filtering(vsi))
+ dev_dbg(ice_pf_to_dev(vf->pf), "Failed to disable Rx VLAN filtering for old VF with VIRTCHNL_VF_OFFLOAD_VLAN support\n");
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
new file mode 100644
index 000000000000..875a4e615f39
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VF_VSI_VLAN_OPS_H_
+#define _ICE_VF_VSI_VLAN_OPS_H_
+
+#include "ice_vsi_vlan_ops.h"
+
+struct ice_vsi;
+
+void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi);
+void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi);
+
+#ifdef CONFIG_PCI_IOV
+void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi);
+#else
+static inline void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) { }
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_PF_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 408f78e3eb13..3f1a63815bac 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -1,15 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018, Intel Corporation. */
+/* Copyright (C) 2022, Intel Corporation. */
+#include "ice_virtchnl.h"
+#include "ice_vf_lib_private.h"
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_fltr.h"
-#include "ice_dcb_lib.h"
-#include "ice_flow.h"
-#include "ice_eswitch.h"
#include "ice_virtchnl_allowlist.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_vlan.h"
#include "ice_flex_pipe.h"
+#include "ice_dcb_lib.h"
#define FIELD_SELECTOR(proto_hdr_field) \
BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
@@ -164,45 +166,6 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
};
/**
- * ice_get_vf_vsi - get VF's VSI based on the stored index
- * @vf: VF used to get VSI
- */
-struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
-{
- return vf->pf->vsi[vf->lan_vsi_idx];
-}
-
-/**
- * ice_validate_vf_id - helper to check if VF ID is valid
- * @pf: pointer to the PF structure
- * @vf_id: the ID of the VF to check
- */
-static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
-{
- /* vf_id range is only valid for 0-255, and should always be unsigned */
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
- return -EINVAL;
- }
- return 0;
-}
-
-/**
- * ice_check_vf_init - helper to check if VF init complete
- * @pf: pointer to the PF structure
- * @vf: the pointer to the VF to check
- */
-static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
-{
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
- vf->vf_id);
- return -EBUSY;
- }
- return 0;
-}
-
-/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
* @v_opcode: operation code
@@ -215,11 +178,11 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
struct ice_hw *hw = &pf->hw;
- unsigned int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
+ struct ice_vf *vf;
+ unsigned int bkt;
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
/* Not all vfs are enabled so skip the ones that are not */
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
@@ -231,6 +194,7 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
msglen, NULL);
}
+ mutex_unlock(&pf->vfs.table_lock);
}
/**
@@ -259,39 +223,6 @@ ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
}
/**
- * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
- * @vf: the VF to check
- *
- * Returns true if the VF has no Rx and no Tx queues enabled and returns false
- * otherwise
- */
-static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
-{
- return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
- !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
-}
-
-/**
- * ice_is_vf_link_up - check if the VF's link is up
- * @vf: VF to check if link is up
- */
-static bool ice_is_vf_link_up(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
-
- if (ice_check_vf_init(pf, vf))
- return false;
-
- if (ice_vf_has_no_qs_ena(vf))
- return false;
- else if (vf->link_forced)
- return vf->link_up;
- else
- return pf->hw.port_info->phy.link_info.link_info &
- ICE_AQ_LINK_UP;
-}
-
-/**
* ice_vc_notify_vf_link_state - Inform a VF of link status
* @vf: pointer to the VF structure
*
@@ -317,1367 +248,18 @@ void ice_vc_notify_vf_link_state(struct ice_vf *vf)
}
/**
- * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
- * @vf: VF to remove access to VSI for
- */
-static void ice_vf_invalidate_vsi(struct ice_vf *vf)
-{
- vf->lan_vsi_idx = ICE_NO_VSI;
- vf->lan_vsi_num = ICE_NO_VSI;
-}
-
-/**
- * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
- * @vf: invalidate this VF's VSI after freeing it
- */
-static void ice_vf_vsi_release(struct ice_vf *vf)
-{
- ice_vsi_release(ice_get_vf_vsi(vf));
- ice_vf_invalidate_vsi(vf);
-}
-
-/**
- * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
- * @vf: VF that control VSI is being invalidated on
- */
-static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
-{
- vf->ctrl_vsi_idx = ICE_NO_VSI;
-}
-
-/**
- * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
- * @vf: VF that control VSI is being released on
- */
-static void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
-{
- ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
- ice_vf_ctrl_invalidate_vsi(vf);
-}
-
-/**
- * ice_free_vf_res - Free a VF's resources
- * @vf: pointer to the VF info
- */
-static void ice_free_vf_res(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- int i, last_vector_idx;
-
- /* First, disable VF's configuration API to prevent OS from
- * accessing the VF's VSI after it's freed or invalidated.
- */
- clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
- ice_vf_fdir_exit(vf);
- /* free VF control VSI */
- if (vf->ctrl_vsi_idx != ICE_NO_VSI)
- ice_vf_ctrl_vsi_release(vf);
-
- /* free VSI and disconnect it from the parent uplink */
- if (vf->lan_vsi_idx != ICE_NO_VSI) {
- ice_vf_vsi_release(vf);
- vf->num_mac = 0;
- }
-
- last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
-
- /* clear VF MDD event information */
- memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
- memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
-
- /* Disable interrupts so that VF starts in a known state */
- for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
- wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
- ice_flush(&pf->hw);
- }
- /* reset some of the state variables keeping track of the resources */
- clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
- clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
-}
-
-/**
- * ice_dis_vf_mappings
- * @vf: pointer to the VF structure
- */
-static void ice_dis_vf_mappings(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- int first, last, v;
- struct ice_hw *hw;
-
- hw = &pf->hw;
- vsi = ice_get_vf_vsi(vf);
-
- dev = ice_pf_to_dev(pf);
- wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
- wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
-
- first = vf->first_vector_idx;
- last = first + pf->num_msix_per_vf - 1;
- for (v = first; v <= last; v++) {
- u32 reg;
-
- reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
- GLINT_VECT2FUNC_IS_PF_M) |
- ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
- GLINT_VECT2FUNC_PF_NUM_M));
- wr32(hw, GLINT_VECT2FUNC(v), reg);
- }
-
- if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
- wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
- else
- dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
-
- if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
- wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
- else
- dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
-}
-
-/**
- * ice_sriov_free_msix_res - Reset/free any used MSIX resources
- * @pf: pointer to the PF structure
- *
- * Since no MSIX entries are taken from the pf->irq_tracker then just clear
- * the pf->sriov_base_vector.
- *
- * Returns 0 on success, and -EINVAL on error.
- */
-static int ice_sriov_free_msix_res(struct ice_pf *pf)
-{
- struct ice_res_tracker *res;
-
- if (!pf)
- return -EINVAL;
-
- res = pf->irq_tracker;
- if (!res)
- return -EINVAL;
-
- /* give back irq_tracker resources used */
- WARN_ON(pf->sriov_base_vector < res->num_entries);
-
- pf->sriov_base_vector = 0;
-
- return 0;
-}
-
-/**
- * ice_set_vf_state_qs_dis - Set VF queues state to disabled
- * @vf: pointer to the VF structure
- */
-void ice_set_vf_state_qs_dis(struct ice_vf *vf)
-{
- /* Clear Rx/Tx enabled queues flag */
- bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
- bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
- clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-}
-
-/**
- * ice_dis_vf_qs - Disable the VF queues
- * @vf: pointer to the VF structure
- */
-static void ice_dis_vf_qs(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
- ice_vsi_stop_all_rx_rings(vsi);
- ice_set_vf_state_qs_dis(vf);
-}
-
-/**
- * ice_free_vfs - Free all VFs
- * @pf: pointer to the PF structure
- */
-void ice_free_vfs(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- unsigned int tmp, i;
-
- if (!pf->vf)
- return;
-
- ice_eswitch_release(pf);
-
- while (test_and_set_bit(ICE_VF_DIS, pf->state))
- usleep_range(1000, 2000);
-
- /* Disable IOV before freeing resources. This lets any VF drivers
- * running in the host get themselves cleaned up before we yank
- * the carpet out from underneath their feet.
- */
- if (!pci_vfs_assigned(pf->pdev))
- pci_disable_sriov(pf->pdev);
- else
- dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
-
- tmp = pf->num_alloc_vfs;
- pf->num_qps_per_vf = 0;
- pf->num_alloc_vfs = 0;
- for (i = 0; i < tmp; i++) {
- struct ice_vf *vf = &pf->vf[i];
-
- mutex_lock(&vf->cfg_lock);
-
- ice_dis_vf_qs(vf);
-
- if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- /* disable VF qp mappings and set VF disable state */
- ice_dis_vf_mappings(vf);
- set_bit(ICE_VF_STATE_DIS, vf->vf_states);
- ice_free_vf_res(vf);
- }
-
- mutex_unlock(&vf->cfg_lock);
-
- mutex_destroy(&vf->cfg_lock);
- }
-
- if (ice_sriov_free_msix_res(pf))
- dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
-
- devm_kfree(dev, pf->vf);
- pf->vf = NULL;
-
- /* This check is for when the driver is unloaded while VFs are
- * assigned. Setting the number of VFs to 0 through sysfs is caught
- * before this function ever gets called.
- */
- if (!pci_vfs_assigned(pf->pdev)) {
- unsigned int vf_id;
-
- /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
- * work correctly when SR-IOV gets re-enabled.
- */
- for (vf_id = 0; vf_id < tmp; vf_id++) {
- u32 reg_idx, bit_idx;
-
- reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
- bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
- wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
- }
- }
-
- /* clear malicious info if the VFs are getting released */
- for (i = 0; i < tmp; i++)
- if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs,
- ICE_MAX_VF_COUNT, i))
- dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
- i);
-
- clear_bit(ICE_VF_DIS, pf->state);
- clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
-}
-
-/**
- * ice_trigger_vf_reset - Reset a VF on HW
- * @vf: pointer to the VF structure
- * @is_vflr: true if VFLR was issued, false if not
- * @is_pfr: true if the reset was triggered due to a previous PFR
- *
- * Trigger hardware to start a reset for a particular VF. Expects the caller
- * to wait the proper amount of time to allow hardware to reset the VF before
- * it cleans up and restores VF functionality.
- */
-static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
-{
- struct ice_pf *pf = vf->pf;
- u32 reg, reg_idx, bit_idx;
- unsigned int vf_abs_id, i;
- struct device *dev;
- struct ice_hw *hw;
-
- dev = ice_pf_to_dev(pf);
- hw = &pf->hw;
- vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
-
- /* Inform VF that it is no longer active, as a warning */
- clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
-
- /* Disable VF's configuration API during reset. The flag is re-enabled
- * when it's safe again to access VF's VSI.
- */
- clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
-
- /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
- * needs to clear them in the case of VFR/VFLR. If this is done for
- * PFR, it can mess up VF resets because the VF driver may already
- * have started cleanup by the time we get here.
- */
- if (!is_pfr) {
- wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
- wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
- }
-
- /* In the case of a VFLR, the HW has already reset the VF and we
- * just need to clean up, so don't hit the VFRTRIG register.
- */
- if (!is_vflr) {
- /* reset VF using VPGEN_VFRTRIG reg */
- reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
- reg |= VPGEN_VFRTRIG_VFSWR_M;
- wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
- }
- /* clear the VFLR bit in GLGEN_VFLRSTAT */
- reg_idx = (vf_abs_id) / 32;
- bit_idx = (vf_abs_id) % 32;
- wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
- ice_flush(hw);
-
- wr32(hw, PF_PCI_CIAA,
- VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
- for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
- reg = rd32(hw, PF_PCI_CIAD);
- /* no transactions pending so stop polling */
- if ((reg & VF_TRANS_PENDING_M) == 0)
- break;
-
- dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
- udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
- }
-}
-
-/**
- * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
- * @vsi: the VSI to update
- * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
- * @enable: true for enable PVID false for disable
- */
-static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
-{
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_aqc_vsi_props *info;
- struct ice_vsi_ctx *ctxt;
- int ret;
-
- ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
- if (!ctxt)
- return -ENOMEM;
-
- ctxt->info = vsi->info;
- info = &ctxt->info;
- if (enable) {
- info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
- ICE_AQ_VSI_PVLAN_INSERT_PVID |
- ICE_AQ_VSI_VLAN_EMOD_STR;
- info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- } else {
- info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
- ICE_AQ_VSI_VLAN_MODE_ALL;
- info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- }
-
- info->pvid = cpu_to_le16(pvid_info);
- info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
- ICE_AQ_VSI_PROP_SW_VALID);
-
- ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (ret) {
- dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
- goto out;
- }
-
- vsi->info.vlan_flags = info->vlan_flags;
- vsi->info.sw_flags2 = info->sw_flags2;
- vsi->info.pvid = info->pvid;
-out:
- kfree(ctxt);
- return ret;
-}
-
-/**
- * ice_vf_get_port_info - Get the VF's port info structure
- * @vf: VF used to get the port info structure for
- */
-static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
-{
- return vf->pf->hw.port_info;
-}
-
-/**
- * ice_vf_vsi_setup - Set up a VF VSI
- * @vf: VF to setup VSI for
- *
- * Returns pointer to the successfully allocated VSI struct on success,
- * otherwise returns NULL on failure.
- */
-static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
-{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id, NULL);
-
- if (!vsi) {
- dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
- ice_vf_invalidate_vsi(vf);
- return NULL;
- }
-
- vf->lan_vsi_idx = vsi->idx;
- vf->lan_vsi_num = vsi->vsi_num;
-
- return vsi;
-}
-
-/**
- * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
- * @vf: VF to setup control VSI for
- *
- * Returns pointer to the successfully allocated VSI struct on success,
- * otherwise returns NULL on failure.
- */
-struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
-{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id, NULL);
- if (!vsi) {
- dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
- ice_vf_ctrl_invalidate_vsi(vf);
- }
-
- return vsi;
-}
-
-/**
- * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
- * @pf: pointer to PF structure
- * @vf: pointer to VF that the first MSIX vector index is being calculated for
- *
- * This returns the first MSIX vector index in PF space that is used by this VF.
- * This index is used when accessing PF relative registers such as
- * GLINT_VECT2FUNC and GLINT_DYN_CTL.
- * This will always be the OICR index in the AVF driver so any functionality
- * using vf->first_vector_idx for queue configuration will have to increment by
- * 1 to avoid meddling with the OICR index.
- */
-static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
-{
- return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
-}
-
-/**
- * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
- * @vf: VF to re-apply the configuration for
- *
- * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
- * needs to re-apply the host configured Tx rate limiting configuration.
- */
-static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- int err;
-
- if (vf->min_tx_rate) {
- err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
- if (err) {
- dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
- vf->min_tx_rate, vf->vf_id, err);
- return err;
- }
- }
-
- if (vf->max_tx_rate) {
- err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
- if (err) {
- dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
- vf->max_tx_rate, vf->vf_id, err);
- return err;
- }
- }
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
- * @vf: VF to add MAC filters for
- *
- * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
- * always re-adds either a VLAN 0 or port VLAN based filter after reset.
- */
-static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- u16 vlan_id = 0;
- int err;
-
- if (vf->port_vlan_info) {
- err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
- if (err) {
- dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
- vf->vf_id, err);
- return err;
- }
-
- vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
- }
-
- /* vlan_id will either be 0 or the port VLAN number */
- err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
- if (err) {
- dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
- vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
- err);
- return err;
- }
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
- * @vf: VF to add MAC filters for
- *
- * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
- * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
- */
-static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- u8 broadcast[ETH_ALEN];
- int status;
-
- if (ice_is_eswitch_mode_switchdev(vf->pf))
- return 0;
-
- eth_broadcast_addr(broadcast);
- status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
- if (status) {
- dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
- vf->vf_id, status);
- return status;
- }
-
- vf->num_mac++;
-
- if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
- status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
- ICE_FWD_TO_VSI);
- if (status) {
- dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
- &vf->hw_lan_addr.addr[0], vf->vf_id,
- status);
- return status;
- }
- vf->num_mac++;
-
- ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
- }
-
- return 0;
-}
-
-/**
- * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
- * @vf: VF to configure trust setting for
- */
-static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
-{
- if (vf->trusted)
- set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
- else
- clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-}
-
-/**
- * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
- * @vf: VF to enable MSIX mappings for
- *
- * Some of the registers need to be indexed/configured using hardware global
- * device values and other registers need 0-based values, which represent PF
- * based values.
- */
-static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
-{
- int device_based_first_msix, device_based_last_msix;
- int pf_based_first_msix, pf_based_last_msix, v;
- struct ice_pf *pf = vf->pf;
- int device_based_vf_id;
- struct ice_hw *hw;
- u32 reg;
-
- hw = &pf->hw;
- pf_based_first_msix = vf->first_vector_idx;
- pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
-
- device_based_first_msix = pf_based_first_msix +
- pf->hw.func_caps.common_cap.msix_vector_first_id;
- device_based_last_msix =
- (device_based_first_msix + pf->num_msix_per_vf) - 1;
- device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
-
- reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
- VPINT_ALLOC_FIRST_M) |
- ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
- VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
- wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
-
- reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
- & VPINT_ALLOC_PCI_FIRST_M) |
- ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
- VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
- wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
-
- /* map the interrupts to its functions */
- for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
- reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
- GLINT_VECT2FUNC_VF_NUM_M) |
- ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
- GLINT_VECT2FUNC_PF_NUM_M));
- wr32(hw, GLINT_VECT2FUNC(v), reg);
- }
-
- /* Map mailbox interrupt to VF MSI-X vector 0 */
- wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
- * @vf: VF to enable the mappings for
- * @max_txq: max Tx queues allowed on the VF's VSI
- * @max_rxq: max Rx queues allowed on the VF's VSI
- */
-static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- struct ice_hw *hw = &vf->pf->hw;
- u32 reg;
-
- /* set regardless of mapping mode */
- wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
-
- /* VF Tx queues allocation */
- if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
- /* set the VF PF Tx queue range
- * VFNUMQ value should be set to (number of queues - 1). A value
- * of 0 means 1 queue and a value of 255 means 256 queues
- */
- reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
- VPLAN_TX_QBASE_VFFIRSTQ_M) |
- (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
- VPLAN_TX_QBASE_VFNUMQ_M));
- wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
- } else {
- dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
- }
-
- /* set regardless of mapping mode */
- wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
-
- /* VF Rx queues allocation */
- if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
- /* set the VF PF Rx queue range
- * VFNUMQ value should be set to (number of queues - 1). A value
- * of 0 means 1 queue and a value of 255 means 256 queues
- */
- reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
- VPLAN_RX_QBASE_VFFIRSTQ_M) |
- (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
- VPLAN_RX_QBASE_VFNUMQ_M));
- wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
- } else {
- dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
- }
-}
-
-/**
- * ice_ena_vf_mappings - enable VF MSIX and queue mapping
- * @vf: pointer to the VF structure
- */
-static void ice_ena_vf_mappings(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- ice_ena_vf_msix_mappings(vf);
- ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
-}
-
-/**
- * ice_determine_res
- * @pf: pointer to the PF structure
- * @avail_res: available resources in the PF structure
- * @max_res: maximum resources that can be given per VF
- * @min_res: minimum resources that can be given per VF
- *
- * Returns non-zero value if resources (queues/vectors) are available or
- * returns zero if PF cannot accommodate for all num_alloc_vfs.
- */
-static int
-ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
-{
- bool checked_min_res = false;
- int res;
-
- /* start by checking if PF can assign max number of resources for
- * all num_alloc_vfs.
- * if yes, return number per VF
- * If no, divide by 2 and roundup, check again
- * repeat the loop till we reach a point where even minimum resources
- * are not available, in that case return 0
- */
- res = max_res;
- while ((res >= min_res) && !checked_min_res) {
- int num_all_res;
-
- num_all_res = pf->num_alloc_vfs * res;
- if (num_all_res <= avail_res)
- return res;
-
- if (res == min_res)
- checked_min_res = true;
-
- res = DIV_ROUND_UP(res, 2);
- }
- return 0;
-}
-
-/**
- * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
- * @vf: VF to calculate the register index for
- * @q_vector: a q_vector associated to the VF
- */
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
-{
- struct ice_pf *pf;
-
- if (!vf || !q_vector)
- return -EINVAL;
-
- pf = vf->pf;
-
- /* always add one to account for the OICR being the first MSIX */
- return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
- q_vector->v_idx + 1;
-}
-
-/**
- * ice_get_max_valid_res_idx - Get the max valid resource index
- * @res: pointer to the resource to find the max valid index for
- *
- * Start from the end of the ice_res_tracker and return right when we find the
- * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
- * valid for SR-IOV because it is the only consumer that manipulates the
- * res->end and this is always called when res->end is set to res->num_entries.
- */
-static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
-{
- int i;
-
- if (!res)
- return -EINVAL;
-
- for (i = res->num_entries - 1; i >= 0; i--)
- if (res->list[i] & ICE_RES_VALID_BIT)
- return i;
-
- return 0;
-}
-
-/**
- * ice_sriov_set_msix_res - Set any used MSIX resources
- * @pf: pointer to PF structure
- * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
- *
- * This function allows SR-IOV resources to be taken from the end of the PF's
- * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
- * just set the pf->sriov_base_vector and return success.
- *
- * If there are not enough resources available, return an error. This should
- * always be caught by ice_set_per_vf_res().
- *
- * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
- * in the PF's space available for SR-IOV.
- */
-static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
-{
- u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors_used = pf->irq_tracker->num_entries;
- int sriov_base_vector;
-
- sriov_base_vector = total_vectors - num_msix_needed;
-
- /* make sure we only grab irq_tracker entries from the list end and
- * that we have enough available MSIX vectors
- */
- if (sriov_base_vector < vectors_used)
- return -EINVAL;
-
- pf->sriov_base_vector = sriov_base_vector;
-
- return 0;
-}
-
-/**
- * ice_set_per_vf_res - check if vectors and queues are available
- * @pf: pointer to the PF structure
- *
- * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
- * get more vectors and can enable more queues per VF. Note that this does not
- * grab any vectors from the SW pool already allocated. Also note, that all
- * vector counts include one for each VF's miscellaneous interrupt vector
- * (i.e. OICR).
- *
- * Minimum VFs - 2 vectors, 1 queue pair
- * Small VFs - 5 vectors, 4 queue pairs
- * Medium VFs - 17 vectors, 16 queue pairs
- *
- * Second, determine number of queue pairs per VF by starting with a pre-defined
- * maximum each VF supports. If this is not possible, then we adjust based on
- * queue pairs available on the device.
- *
- * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
- * by each VF during VF initialization and reset.
- */
-static int ice_set_per_vf_res(struct ice_pf *pf)
-{
- int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
- int msix_avail_per_vf, msix_avail_for_sriov;
- struct device *dev = ice_pf_to_dev(pf);
- u16 num_msix_per_vf, num_txq, num_rxq;
-
- if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
- return -EINVAL;
-
- /* determine MSI-X resources per VF */
- msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
- pf->irq_tracker->num_entries;
- msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
- if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
- num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
- } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
- num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
- } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
- num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
- } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
- num_msix_per_vf = ICE_MIN_INTR_PER_VF;
- } else {
- dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
- msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
- pf->num_alloc_vfs);
- return -EIO;
- }
-
- /* determine queue resources per VF */
- num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
- min_t(u16,
- num_msix_per_vf - ICE_NONQ_VECS_VF,
- ICE_MAX_RSS_QS_PER_VF),
- ICE_MIN_QS_PER_VF);
-
- num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
- min_t(u16,
- num_msix_per_vf - ICE_NONQ_VECS_VF,
- ICE_MAX_RSS_QS_PER_VF),
- ICE_MIN_QS_PER_VF);
-
- if (!num_txq || !num_rxq) {
- dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
- ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
- return -EIO;
- }
-
- if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
- dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
- pf->num_alloc_vfs);
- return -EINVAL;
- }
-
- /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
- pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
- pf->num_msix_per_vf = num_msix_per_vf;
- dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
- pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
-
- return 0;
-}
-
-/**
- * ice_clear_vf_reset_trigger - enable VF to access hardware
- * @vf: VF to enabled hardware access for
- */
-static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
-{
- struct ice_hw *hw = &vf->pf->hw;
- u32 reg;
-
- reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
- reg &= ~VPGEN_VFRTRIG_VFSWR_M;
- wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
- ice_flush(hw);
-}
-
-static int
-ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
-{
- struct ice_hw *hw = &vsi->back->hw;
- int status;
-
- if (vf->port_vlan_info)
- status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_info & VLAN_VID_MASK);
- else if (vsi->num_vlan > 1)
- status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
- else
- status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
-
- if (status && status != -EEXIST) {
- dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
- vf->vf_id, status);
- return status;
- }
-
- return 0;
-}
-
-static int
-ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
-{
- struct ice_hw *hw = &vsi->back->hw;
- int status;
-
- if (vf->port_vlan_info)
- status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_info & VLAN_VID_MASK);
- else if (vsi->num_vlan > 1)
- status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
- else
- status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
-
- if (status && status != -ENOENT) {
- dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
- vf->vf_id, status);
- return status;
- }
-
- return 0;
-}
-
-static void ice_vf_clear_counters(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- vf->num_mac = 0;
- vsi->num_vlan = 0;
- memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
- memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
-}
-
-/**
- * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
- * @vf: VF to perform pre VSI rebuild tasks
- *
- * These tasks are items that don't need to be amortized since they are most
- * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
- */
-static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
-{
- ice_vf_clear_counters(vf);
- ice_clear_vf_reset_trigger(vf);
-}
-
-/**
- * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
- * @vsi: Pointer to VSI
- *
- * This function moves VSI into corresponding scheduler aggregator node
- * based on cached value of "aggregator node info" per VSI
- */
-static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- int status;
-
- if (!vsi->agg_node)
- return;
-
- dev = ice_pf_to_dev(pf);
- if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
- dev_dbg(dev,
- "agg_id %u already has reached max_num_vsis %u\n",
- vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
- return;
- }
-
- status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
- vsi->idx, vsi->tc_cfg.ena_tc);
- if (status)
- dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
- vsi->idx, vsi->agg_node->agg_id);
- else
- vsi->agg_node->num_vsis++;
-}
-
-/**
- * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
- * @vf: VF to rebuild host configuration on
- */
-static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- ice_vf_set_host_trust_cfg(vf);
-
- if (ice_vf_rebuild_host_mac_cfg(vf))
- dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
- vf->vf_id);
-
- if (ice_vf_rebuild_host_vlan_cfg(vf))
- dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
- vf->vf_id);
-
- if (ice_vf_rebuild_host_tx_rate_cfg(vf))
- dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
- vf->vf_id);
-
- /* rebuild aggregator node config for main VF VSI */
- ice_vf_rebuild_aggregator_node_cfg(vsi);
-}
-
-/**
- * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
- * @vf: VF to release and setup the VSI for
- *
- * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
- * configuration change, etc.).
- */
-static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
-{
- ice_vf_vsi_release(vf);
- if (!ice_vf_vsi_setup(vf))
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_vsi - rebuild the VF's VSI
- * @vf: VF to rebuild the VSI for
- *
- * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
- * host, PFR, CORER, etc.).
- */
-static int ice_vf_rebuild_vsi(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- struct ice_pf *pf = vf->pf;
-
- if (ice_vsi_rebuild(vsi, true)) {
- dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
- vf->vf_id);
- return -EIO;
- }
- /* vsi->idx will remain the same in this case so don't update
- * vf->lan_vsi_idx
- */
- vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
- vf->lan_vsi_num = vsi->vsi_num;
-
- return 0;
-}
-
-/**
- * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
- * @vf: VF to set in initialized state
- *
- * After this function the VF will be ready to receive/handle the
- * VIRTCHNL_OP_GET_VF_RESOURCES message
- */
-static void ice_vf_set_initialized(struct ice_vf *vf)
-{
- ice_set_vf_state_qs_dis(vf);
- clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
- clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
- clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
- set_bit(ICE_VF_STATE_INIT, vf->vf_states);
-}
-
-/**
- * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
- * @vf: VF to perform tasks on
- */
-static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- struct ice_hw *hw;
-
- hw = &pf->hw;
-
- ice_vf_rebuild_host_cfg(vf);
-
- ice_vf_set_initialized(vf);
- ice_ena_vf_mappings(vf);
- wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
-}
-
-/**
- * ice_reset_all_vfs - reset all allocated VFs in one go
- * @pf: pointer to the PF structure
- * @is_vflr: true if VFLR was issued, false if not
- *
- * First, tell the hardware to reset each VF, then do all the waiting in one
- * chunk, and finally finish restoring each VF after the wait. This is useful
- * during PF routines which need to reset all VFs, as otherwise it must perform
- * these resets in a serialized fashion.
- *
- * Returns true if any VFs were reset, and false otherwise.
- */
-bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- struct ice_vf *vf;
- int v, i;
-
- /* If we don't have any VFs, then there is nothing to reset */
- if (!pf->num_alloc_vfs)
- return false;
-
- /* clear all malicious info if the VFs are getting reset */
- ice_for_each_vf(pf, i)
- if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, i))
- dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
-
- /* If VFs have been disabled, there is no need to reset */
- if (test_and_set_bit(ICE_VF_DIS, pf->state))
- return false;
-
- /* Begin reset on all VFs at once */
- ice_for_each_vf(pf, v)
- ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
-
- /* HW requires some time to make sure it can flush the FIFO for a VF
- * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
- * sequence to make sure that it has completed. We'll keep track of
- * the VFs using a simple iterator that increments once that VF has
- * finished resetting.
- */
- for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
- /* Check each VF in sequence */
- while (v < pf->num_alloc_vfs) {
- u32 reg;
-
- vf = &pf->vf[v];
- reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
- if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
- /* only delay if the check failed */
- usleep_range(10, 20);
- break;
- }
-
- /* If the current VF has finished resetting, move on
- * to the next VF in sequence.
- */
- v++;
- }
- }
-
- /* Display a warning if at least one VF didn't manage to reset in
- * time, but continue on with the operation.
- */
- if (v < pf->num_alloc_vfs)
- dev_warn(dev, "VF reset check timeout\n");
-
- /* free VF resources to begin resetting the VSI state */
- ice_for_each_vf(pf, v) {
- vf = &pf->vf[v];
-
- mutex_lock(&vf->cfg_lock);
-
- vf->driver_caps = 0;
- ice_vc_set_default_allowlist(vf);
-
- ice_vf_fdir_exit(vf);
- ice_vf_fdir_init(vf);
- /* clean VF control VSI when resetting VFs since it should be
- * setup only when VF creates its first FDIR rule.
- */
- if (vf->ctrl_vsi_idx != ICE_NO_VSI)
- ice_vf_ctrl_invalidate_vsi(vf);
-
- ice_vf_pre_vsi_rebuild(vf);
- ice_vf_rebuild_vsi(vf);
- ice_vf_post_vsi_rebuild(vf);
-
- mutex_unlock(&vf->cfg_lock);
- }
-
- if (ice_is_eswitch_mode_switchdev(pf))
- if (ice_eswitch_rebuild(pf))
- dev_warn(dev, "eswitch rebuild failed\n");
-
- ice_flush(hw);
- clear_bit(ICE_VF_DIS, pf->state);
-
- return true;
-}
-
-/**
- * ice_is_vf_disabled
- * @vf: pointer to the VF info
- *
- * Returns true if the PF or VF is disabled, false otherwise.
- */
-bool ice_is_vf_disabled(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
-
- /* If the PF has been disabled, there is no need resetting VF until
- * PF is active again. Similarly, if the VF has been disabled, this
- * means something else is resetting the VF, so we shouldn't continue.
- * Otherwise, set disable VF state bit for actual reset, and continue.
- */
- return (test_bit(ICE_VF_DIS, pf->state) ||
- test_bit(ICE_VF_STATE_DIS, vf->vf_states));
-}
-
-/**
- * ice_reset_vf - Reset a particular VF
- * @vf: pointer to the VF structure
- * @is_vflr: true if VFLR was issued, false if not
- *
- * Returns true if the VF is currently in reset, resets successfully, or resets
- * are disabled and false otherwise.
- */
-bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
-{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- struct ice_hw *hw;
- bool rsd = false;
- u8 promisc_m;
- u32 reg;
- int i;
-
- lockdep_assert_held(&vf->cfg_lock);
-
- dev = ice_pf_to_dev(pf);
-
- if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
- dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
- vf->vf_id);
- return true;
- }
-
- if (ice_is_vf_disabled(vf)) {
- dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
- vf->vf_id);
- return true;
- }
-
- /* Set VF disable bit state here, before triggering reset */
- set_bit(ICE_VF_STATE_DIS, vf->vf_states);
- ice_trigger_vf_reset(vf, is_vflr, false);
-
- vsi = ice_get_vf_vsi(vf);
-
- ice_dis_vf_qs(vf);
-
- /* Call Disable LAN Tx queue AQ whether or not queues are
- * enabled. This is needed for successful completion of VFR.
- */
- ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
- NULL, ICE_VF_RESET, vf->vf_id, NULL);
-
- hw = &pf->hw;
- /* poll VPGEN_VFRSTAT reg to make sure
- * that reset is complete
- */
- for (i = 0; i < 10; i++) {
- /* VF reset requires driver to first reset the VF and then
- * poll the status register to make sure that the reset
- * completed successfully.
- */
- reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
- if (reg & VPGEN_VFRSTAT_VFRD_M) {
- rsd = true;
- break;
- }
-
- /* only sleep if the reset is not done */
- usleep_range(10, 20);
- }
-
- vf->driver_caps = 0;
- ice_vc_set_default_allowlist(vf);
-
- /* Display a warning if VF didn't manage to reset in time, but need to
- * continue on with the operation.
- */
- if (!rsd)
- dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
-
- /* disable promiscuous modes in case they were enabled
- * ignore any error if disabling process failed
- */
- if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
- if (vf->port_vlan_info || vsi->num_vlan)
- promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
- else
- promisc_m = ICE_UCAST_PROMISC_BITS;
-
- if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m))
- dev_err(dev, "disabling promiscuous mode failed\n");
- }
-
- ice_eswitch_del_vf_mac_rule(vf);
-
- ice_vf_fdir_exit(vf);
- ice_vf_fdir_init(vf);
- /* clean VF control VSI when resetting VF since it should be setup
- * only when VF creates its first FDIR rule.
- */
- if (vf->ctrl_vsi_idx != ICE_NO_VSI)
- ice_vf_ctrl_vsi_release(vf);
-
- ice_vf_pre_vsi_rebuild(vf);
-
- if (ice_vf_rebuild_vsi_with_release(vf)) {
- dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
- return false;
- }
-
- ice_vf_post_vsi_rebuild(vf);
- vsi = ice_get_vf_vsi(vf);
- ice_eswitch_update_repr(vsi);
- ice_eswitch_replay_vf_mac_rule(vf);
-
- /* if the VF has been reset allow it to come up again */
- if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id))
- dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
-
- return true;
-}
-
-/**
* ice_vc_notify_link_state - Inform all VFs on a PF of link status
* @pf: pointer to the PF structure
*/
void ice_vc_notify_link_state(struct ice_pf *pf)
{
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
- ice_for_each_vf(pf, i)
- ice_vc_notify_vf_link_state(&pf->vf[i]);
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf)
+ ice_vc_notify_vf_link_state(vf);
+ mutex_unlock(&pf->vfs.table_lock);
}
/**
@@ -1690,7 +272,7 @@ void ice_vc_notify_reset(struct ice_pf *pf)
{
struct virtchnl_pf_event pfe;
- if (!pf->num_alloc_vfs)
+ if (!ice_has_vfs(pf))
return;
pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
@@ -1700,462 +282,6 @@ void ice_vc_notify_reset(struct ice_pf *pf)
}
/**
- * ice_vc_notify_vf_reset - Notify VF of a reset event
- * @vf: pointer to the VF structure
- */
-static void ice_vc_notify_vf_reset(struct ice_vf *vf)
-{
- struct virtchnl_pf_event pfe;
- struct ice_pf *pf;
-
- if (!vf)
- return;
-
- pf = vf->pf;
- if (ice_validate_vf_id(pf, vf->vf_id))
- return;
-
- /* Bail out if VF is in disabled state, neither initialized, nor active
- * state - otherwise proceed with notifications
- */
- if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
- test_bit(ICE_VF_STATE_DIS, vf->vf_states))
- return;
-
- pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
- pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
- ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
- VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
- NULL);
-}
-
-/**
- * ice_init_vf_vsi_res - initialize/setup VF VSI resources
- * @vf: VF to initialize/setup the VSI for
- *
- * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
- * VF VSI's broadcast filter and is only used during initial VF creation.
- */
-static int ice_init_vf_vsi_res(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- u8 broadcast[ETH_ALEN];
- struct ice_vsi *vsi;
- struct device *dev;
- int err;
-
- vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
-
- dev = ice_pf_to_dev(pf);
- vsi = ice_vf_vsi_setup(vf);
- if (!vsi)
- return -ENOMEM;
-
- err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
- if (err) {
- dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
- vf->vf_id);
- goto release_vsi;
- }
-
- eth_broadcast_addr(broadcast);
- err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
- if (err) {
- dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
- vf->vf_id, err);
- goto release_vsi;
- }
-
- vf->num_mac = 1;
-
- return 0;
-
-release_vsi:
- ice_vf_vsi_release(vf);
- return err;
-}
-
-/**
- * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
- * @pf: PF the VFs are associated with
- */
-static int ice_start_vfs(struct ice_pf *pf)
-{
- struct ice_hw *hw = &pf->hw;
- int retval, i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- ice_clear_vf_reset_trigger(vf);
-
- retval = ice_init_vf_vsi_res(vf);
- if (retval) {
- dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
- vf->vf_id, retval);
- goto teardown;
- }
-
- set_bit(ICE_VF_STATE_INIT, vf->vf_states);
- ice_ena_vf_mappings(vf);
- wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
- }
-
- ice_flush(hw);
- return 0;
-
-teardown:
- for (i = i - 1; i >= 0; i--) {
- struct ice_vf *vf = &pf->vf[i];
-
- ice_dis_vf_mappings(vf);
- ice_vf_vsi_release(vf);
- }
-
- return retval;
-}
-
-/**
- * ice_set_dflt_settings_vfs - set VF defaults during initialization/creation
- * @pf: PF holding reference to all VFs for default configuration
- */
-static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
-{
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- vf->pf = pf;
- vf->vf_id = i;
- vf->vf_sw_id = pf->first_sw;
- /* assign default capabilities */
- set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
- vf->spoofchk = true;
- vf->num_vf_qs = pf->num_qps_per_vf;
- ice_vc_set_default_allowlist(vf);
-
- /* ctrl_vsi_idx will be set to a valid value only when VF
- * creates its first fdir rule.
- */
- ice_vf_ctrl_invalidate_vsi(vf);
- ice_vf_fdir_init(vf);
-
- ice_vc_set_dflt_vf_ops(&vf->vc_ops);
-
- mutex_init(&vf->cfg_lock);
- }
-}
-
-/**
- * ice_alloc_vfs - allocate num_vfs in the PF structure
- * @pf: PF to store the allocated VFs in
- * @num_vfs: number of VFs to allocate
- */
-static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
-{
- struct ice_vf *vfs;
-
- vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
- GFP_KERNEL);
- if (!vfs)
- return -ENOMEM;
-
- pf->vf = vfs;
- pf->num_alloc_vfs = num_vfs;
-
- return 0;
-}
-
-/**
- * ice_ena_vfs - enable VFs so they are ready to be used
- * @pf: pointer to the PF structure
- * @num_vfs: number of VFs to enable
- */
-static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- int ret;
-
- /* Disable global interrupt 0 so we don't try to handle the VFLR. */
- wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
- ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
- set_bit(ICE_OICR_INTR_DIS, pf->state);
- ice_flush(hw);
-
- ret = pci_enable_sriov(pf->pdev, num_vfs);
- if (ret) {
- pf->num_alloc_vfs = 0;
- goto err_unroll_intr;
- }
-
- ret = ice_alloc_vfs(pf, num_vfs);
- if (ret)
- goto err_pci_disable_sriov;
-
- if (ice_set_per_vf_res(pf)) {
- dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
- num_vfs);
- ret = -ENOSPC;
- goto err_unroll_sriov;
- }
-
- ice_set_dflt_settings_vfs(pf);
-
- if (ice_start_vfs(pf)) {
- dev_err(dev, "Failed to start VF(s)\n");
- ret = -EAGAIN;
- goto err_unroll_sriov;
- }
-
- clear_bit(ICE_VF_DIS, pf->state);
-
- ret = ice_eswitch_configure(pf);
- if (ret)
- goto err_unroll_sriov;
-
- /* rearm global interrupts */
- if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
- ice_irq_dynamic_ena(hw, NULL, NULL);
-
- return 0;
-
-err_unroll_sriov:
- devm_kfree(dev, pf->vf);
- pf->vf = NULL;
- pf->num_alloc_vfs = 0;
-err_pci_disable_sriov:
- pci_disable_sriov(pf->pdev);
-err_unroll_intr:
- /* rearm interrupts here */
- ice_irq_dynamic_ena(hw, NULL, NULL);
- clear_bit(ICE_OICR_INTR_DIS, pf->state);
- return ret;
-}
-
-/**
- * ice_pci_sriov_ena - Enable or change number of VFs
- * @pf: pointer to the PF structure
- * @num_vfs: number of VFs to allocate
- *
- * Returns 0 on success and negative on failure
- */
-static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
-{
- int pre_existing_vfs = pci_num_vf(pf->pdev);
- struct device *dev = ice_pf_to_dev(pf);
- int err;
-
- if (pre_existing_vfs && pre_existing_vfs != num_vfs)
- ice_free_vfs(pf);
- else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
- return 0;
-
- if (num_vfs > pf->num_vfs_supported) {
- dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
- num_vfs, pf->num_vfs_supported);
- return -EOPNOTSUPP;
- }
-
- dev_info(dev, "Enabling %d VFs\n", num_vfs);
- err = ice_ena_vfs(pf, num_vfs);
- if (err) {
- dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
- return err;
- }
-
- set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
- return 0;
-}
-
-/**
- * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
- * @pf: PF to enabled SR-IOV on
- */
-static int ice_check_sriov_allowed(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
-
- if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
- dev_err(dev, "This device is not capable of SR-IOV\n");
- return -EOPNOTSUPP;
- }
-
- if (ice_is_safe_mode(pf)) {
- dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
- return -EOPNOTSUPP;
- }
-
- if (!ice_pf_state_is_nominal(pf)) {
- dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-/**
- * ice_sriov_configure - Enable or change number of VFs via sysfs
- * @pdev: pointer to a pci_dev structure
- * @num_vfs: number of VFs to allocate or 0 to free VFs
- *
- * This function is called when the user updates the number of VFs in sysfs. On
- * success return whatever num_vfs was set to by the caller. Return negative on
- * failure.
- */
-int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
-{
- struct ice_pf *pf = pci_get_drvdata(pdev);
- struct device *dev = ice_pf_to_dev(pf);
- int err;
-
- err = ice_check_sriov_allowed(pf);
- if (err)
- return err;
-
- if (!num_vfs) {
- if (!pci_vfs_assigned(pdev)) {
- ice_mbx_deinit_snapshot(&pf->hw);
- ice_free_vfs(pf);
- if (pf->lag)
- ice_enable_lag(pf->lag);
- return 0;
- }
-
- dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
- return -EBUSY;
- }
-
- err = ice_mbx_init_snapshot(&pf->hw, num_vfs);
- if (err)
- return err;
-
- err = ice_pci_sriov_ena(pf, num_vfs);
- if (err) {
- ice_mbx_deinit_snapshot(&pf->hw);
- return err;
- }
-
- if (pf->lag)
- ice_disable_lag(pf->lag);
- return num_vfs;
-}
-
-/**
- * ice_process_vflr_event - Free VF resources via IRQ calls
- * @pf: pointer to the PF structure
- *
- * called from the VFLR IRQ handler to
- * free up VF resources and state variables
- */
-void ice_process_vflr_event(struct ice_pf *pf)
-{
- struct ice_hw *hw = &pf->hw;
- unsigned int vf_id;
- u32 reg;
-
- if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
- !pf->num_alloc_vfs)
- return;
-
- ice_for_each_vf(pf, vf_id) {
- struct ice_vf *vf = &pf->vf[vf_id];
- u32 reg_idx, bit_idx;
-
- reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
- bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
- /* read GLGEN_VFLRSTAT register to find out the flr VFs */
- reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
- if (reg & BIT(bit_idx)) {
- /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
- mutex_lock(&vf->cfg_lock);
- ice_reset_vf(vf, true);
- mutex_unlock(&vf->cfg_lock);
- }
- }
-}
-
-/**
- * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
- * @vf: pointer to the VF info
- */
-static void ice_vc_reset_vf(struct ice_vf *vf)
-{
- ice_vc_notify_vf_reset(vf);
- ice_reset_vf(vf, false);
-}
-
-/**
- * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
- * @pf: PF used to index all VFs
- * @pfq: queue index relative to the PF's function space
- *
- * If no VF is found who owns the pfq then return NULL, otherwise return a
- * pointer to the VF who owns the pfq
- */
-static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
-{
- unsigned int vf_id;
-
- ice_for_each_vf(pf, vf_id) {
- struct ice_vf *vf = &pf->vf[vf_id];
- struct ice_vsi *vsi;
- u16 rxq_idx;
-
- vsi = ice_get_vf_vsi(vf);
-
- ice_for_each_rxq(vsi, rxq_idx)
- if (vsi->rxq_map[rxq_idx] == pfq)
- return vf;
- }
-
- return NULL;
-}
-
-/**
- * ice_globalq_to_pfq - convert from global queue index to PF space queue index
- * @pf: PF used for conversion
- * @globalq: global queue index used to convert to PF space queue index
- */
-static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
-{
- return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
-}
-
-/**
- * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
- * @pf: PF that the LAN overflow event happened on
- * @event: structure holding the event information for the LAN overflow event
- *
- * Determine if the LAN overflow event was caused by a VF queue. If it was not
- * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
- * reset on the offending VF.
- */
-void
-ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
-{
- u32 gldcb_rtctq, queue;
- struct ice_vf *vf;
-
- gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
- dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
-
- /* event returns device global Rx queue number */
- queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
- GLDCB_RTCTQ_RXQNUM_S;
-
- vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
- if (!vf)
- return;
-
- mutex_lock(&vf->cfg_lock);
- ice_vc_reset_vf(vf);
- mutex_unlock(&vf->cfg_lock);
-}
-
-/**
* ice_vc_send_msg_to_vf - Send message to VF
* @vf: pointer to the VF info
* @v_opcode: virtual channel opcode
@@ -2173,33 +299,9 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
struct ice_pf *pf;
int aq_ret;
- if (!vf)
- return -EINVAL;
-
pf = vf->pf;
- if (ice_validate_vf_id(pf, vf->vf_id))
- return -EINVAL;
-
dev = ice_pf_to_dev(pf);
- /* single place to detect unsuccessful return values */
- if (v_retval) {
- vf->num_inval_msgs++;
- dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
- v_opcode, v_retval);
- if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
- dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
- vf->vf_id);
- dev_err(dev, "Use PF Control I/F to enable the VF\n");
- set_bit(ICE_VF_STATE_DIS, vf->vf_states);
- return -EIO;
- }
- } else {
- vf->num_valid_msgs++;
- /* reset the invalid counter, if a valid message is received. */
- vf->num_inval_msgs = 0;
- }
-
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
@@ -2251,7 +353,7 @@ static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
max_frame_size = pi->phy.link_info.max_frame_size;
- if (vf->port_vlan_info)
+ if (ice_vf_is_port_vlan_ena(vf))
max_frame_size -= VLAN_HLEN;
return max_frame_size;
@@ -2268,12 +370,12 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vf_resource *vfres = NULL;
- struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw = &vf->pf->hw;
struct ice_vsi *vsi;
int len = 0;
int ret;
- if (ice_check_vf_init(pf, vf)) {
+ if (ice_check_vf_init(vf)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto err;
}
@@ -2300,8 +402,33 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
goto err;
}
- if (!vsi->info.pvid)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+ /* VLAN offloads based on current device configuration */
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN_V2;
+ } else if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) {
+ /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for
+ * these two conditions, which amounts to guest VLAN filtering
+ * and offloads being based on the inner VLAN or the
+ * inner/single VLAN respectively and don't allow VF to
+ * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
+ */
+ if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) {
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
+ } else if (!ice_is_dvm_ena(hw) &&
+ !ice_vf_is_port_vlan_ena(vf)) {
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
+ /* configure backward compatible support for VFs that
+ * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is
+ * configured in SVM, and no port VLAN is configured
+ */
+ ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi);
+ } else if (ice_is_dvm_ena(hw)) {
+ /* configure software offloaded VLAN support when DVM
+ * is enabled, but no port VLAN is enabled
+ */
+ ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
+ }
+ }
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
@@ -2345,7 +472,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->num_vsis = 1;
/* Tx and Rx queue are equal for VF */
vfres->num_queue_pairs = vsi->num_txq;
- vfres->max_vectors = pf->num_msix_per_vf;
+ vfres->max_vectors = vf->pf->vfs.num_msix_per;
vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
vfres->max_mtu = ice_vc_get_max_frame_size(vf);
@@ -2384,7 +511,7 @@ err:
static void ice_vc_reset_vf_msg(struct ice_vf *vf)
{
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
- ice_reset_vf(vf, false);
+ ice_reset_vf(vf, 0);
}
/**
@@ -2419,7 +546,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
vsi = ice_find_vsi_from_id(pf, vsi_id);
- return (vsi && (vsi->vf_id == vf->vf_id));
+ return (vsi && (vsi->vf == vf));
}
/**
@@ -2851,150 +978,6 @@ error_param:
}
/**
- * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
- * @vf: The VF being resseting
- *
- * The max poll time is about ~800ms, which is about the maximum time it takes
- * for a VF to be reset and/or a VF driver to be removed.
- */
-static void ice_wait_on_vf_reset(struct ice_vf *vf)
-{
- int i;
-
- for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
- if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
- break;
- msleep(ICE_MAX_VF_RESET_SLEEP_MS);
- }
-}
-
-/**
- * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
- * @vf: VF to check if it's ready to be configured/queried
- *
- * The purpose of this function is to make sure the VF is not in reset, not
- * disabled, and initialized so it can be configured and/or queried by a host
- * administrator.
- */
-int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
-{
- struct ice_pf *pf;
-
- ice_wait_on_vf_reset(vf);
-
- if (ice_is_vf_disabled(vf))
- return -EINVAL;
-
- pf = vf->pf;
- if (ice_check_vf_init(pf, vf))
- return -EBUSY;
-
- return 0;
-}
-
-/**
- * ice_set_vf_spoofchk
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @ena: flag to enable or disable feature
- *
- * Enable or disable VF spoof checking
- */
-int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
- struct ice_vsi_ctx *ctx;
- struct ice_vsi *vf_vsi;
- struct device *dev;
- struct ice_vf *vf;
- int ret;
-
- dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- vf_vsi = ice_get_vf_vsi(vf);
- if (!vf_vsi) {
- netdev_err(netdev, "VSI %d for VF %d is null\n",
- vf->lan_vsi_idx, vf->vf_id);
- return -EINVAL;
- }
-
- if (vf_vsi->type != ICE_VSI_VF) {
- netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
- vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
- return -ENODEV;
- }
-
- if (ena == vf->spoofchk) {
- dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
- return 0;
- }
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- ctx->info.sec_flags = vf_vsi->info.sec_flags;
- ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
- if (ena) {
- ctx->info.sec_flags |=
- ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
- (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
- } else {
- ctx->info.sec_flags &=
- ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
- (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
- }
-
- ret = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
- if (ret) {
- dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n",
- ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, ret);
- goto out;
- }
-
- /* only update spoofchk state and VSI context on success */
- vf_vsi->info.sec_flags = ctx->info.sec_flags;
- vf->spoofchk = ena;
-
-out:
- kfree(ctx);
- return ret;
-}
-
-/**
- * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
- * @pf: PF structure for accessing VF(s)
- *
- * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
- * else return true
- */
-bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
-{
- int vf_idx;
-
- ice_for_each_vf(pf, vf_idx) {
- struct ice_vf *vf = &pf->vf[vf_idx];
-
- /* found a VF that has promiscuous mode configured */
- if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
- return true;
- }
-
- return false;
-}
-
-/**
* ice_vc_cfg_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -3007,6 +990,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
bool rm_promisc, alluni = false, allmulti = false;
struct virtchnl_promisc_info *info =
(struct virtchnl_promisc_info *)msg;
+ struct ice_vsi_vlan_ops *vlan_ops;
int mcast_err = 0, ucast_err = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
@@ -3030,7 +1014,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
}
dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ if (!ice_is_vf_trusted(vf)) {
dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
vf->vf_id);
/* Leave v_ret alone, lie to the VF on purpose. */
@@ -3045,16 +1029,15 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
rm_promisc = !allmulti && !alluni;
- if (vsi->num_vlan || vf->port_vlan_info) {
- if (rm_promisc)
- ret = ice_cfg_vlan_pruning(vsi, true);
- else
- ret = ice_cfg_vlan_pruning(vsi, false);
- if (ret) {
- dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ if (rm_promisc)
+ ret = vlan_ops->ena_rx_filtering(vsi);
+ else
+ ret = vlan_ops->dis_rx_filtering(vsi);
+ if (ret) {
+ dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
}
if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
@@ -3081,7 +1064,8 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
} else {
u8 mcast_m, ucast_m;
- if (vf->port_vlan_info || vsi->num_vlan > 1) {
+ if (ice_vf_is_port_vlan_ena(vf) ||
+ ice_vsi_has_non_zero_vlans(vsi)) {
mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
} else {
@@ -3108,16 +1092,21 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
!test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
vf->vf_id);
- else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ else if (!allmulti &&
+ test_and_clear_bit(ICE_VF_STATE_MC_PROMISC,
+ vf->vf_states))
dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
vf->vf_id);
}
if (!ucast_err) {
- if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
+ if (alluni &&
+ !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
vf->vf_id);
- else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
+ else if (!alluni &&
+ test_and_clear_bit(ICE_VF_STATE_UC_PROMISC,
+ vf->vf_states))
dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
vf->vf_id);
}
@@ -3510,7 +1499,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
* there is actually at least a single VF queue vector mapped
*/
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- pf->num_msix_per_vf < num_q_vectors_mapped ||
+ pf->vfs.num_msix_per < num_q_vectors_mapped ||
!num_q_vectors_mapped) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -3532,7 +1521,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
/* vector_id is always 0-based for each VF, and can never be
* larger than or equal to the max allowed interrupts per VF
*/
- if (!(vector_id < pf->num_msix_per_vf) ||
+ if (!(vector_id < pf->vfs.num_msix_per) ||
!ice_vc_isvalid_vsi_id(vf, vsi_id) ||
(!vector_id && (map->rxq_map || map->txq_map))) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -3661,10 +1650,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
vsi->max_frame = qpi->rxq.max_pkt_size;
- /* add space for the port VLAN since the VF driver is not
- * expected to account for it in the MTU calculation
+ /* add space for the port VLAN since the VF driver is
+ * not expected to account for it in the MTU
+ * calculation
*/
- if (vf->port_vlan_info)
+ if (ice_vf_is_port_vlan_ena(vf))
vsi->max_frame += VLAN_HLEN;
if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
@@ -3681,15 +1671,6 @@ error_param:
}
/**
- * ice_is_vf_trusted
- * @vf: pointer to the VF info
- */
-static bool ice_is_vf_trusted(struct ice_vf *vf)
-{
- return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-}
-
-/**
* ice_can_vf_change_mac
* @vf: pointer to the VF info
*
@@ -4063,7 +2044,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
- ice_vc_reset_vf(vf);
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
dev_info(dev, "VF %d granted request of %u queues.\n",
vf->vf_id, req_queues);
return 0;
@@ -4076,78 +2057,91 @@ error_param:
}
/**
- * ice_set_vf_port_vlan
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @vlan_id: VLAN ID being set
- * @qos: priority setting
- * @vlan_proto: VLAN protocol
+ * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
+ * @caps: VF driver negotiated capabilities
*
- * program VF Port VLAN ID and/or QoS
+ * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
*/
-int
-ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
- __be16 vlan_proto)
+static bool ice_vf_vlan_offload_ena(u32 caps)
{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct device *dev;
- struct ice_vf *vf;
- u16 vlanprio;
- int ret;
-
- dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- if (vlan_id >= VLAN_N_VID || qos > 7) {
- dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
- vf_id, vlan_id, qos);
- return -EINVAL;
- }
-
- if (vlan_proto != htons(ETH_P_8021Q)) {
- dev_err(dev, "VF VLAN protocol is not supported\n");
- return -EPROTONOSUPPORT;
- }
+ return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
+}
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
+/**
+ * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
+ * @vf: VF used to determine if VLAN promiscuous config is allowed
+ */
+static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
+{
+ if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
+ test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags))
+ return true;
- vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
+ return false;
+}
- if (vf->port_vlan_info == vlanprio) {
- /* duplicate request, so just return success */
- dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
- return 0;
- }
+/**
+ * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
+ * @vsi: VF's VSI used to enable VLAN promiscuous mode
+ * @vlan: VLAN used to enable VLAN promiscuous
+ *
+ * This function should only be called if VLAN promiscuous mode is allowed,
+ * which can be determined via ice_is_vlan_promisc_allowed().
+ */
+static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
+ int status;
- mutex_lock(&vf->cfg_lock);
+ status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
+ vlan->vid);
+ if (status && status != -EEXIST)
+ return status;
- vf->port_vlan_info = vlanprio;
+ return 0;
+}
- if (vf->port_vlan_info)
- dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
- vlan_id, qos, vf_id);
- else
- dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
+/**
+ * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN
+ * @vsi: VF's VSI used to disable VLAN promiscuous mode for
+ * @vlan: VLAN used to disable VLAN promiscuous
+ *
+ * This function should only be called if VLAN promiscuous mode is allowed,
+ * which can be determined via ice_is_vlan_promisc_allowed().
+ */
+static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
+ int status;
- ice_vc_reset_vf(vf);
- mutex_unlock(&vf->cfg_lock);
+ status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
+ vlan->vid);
+ if (status && status != -ENOENT)
+ return status;
return 0;
}
/**
- * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
- * @caps: VF driver negotiated capabilities
+ * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters
+ * @vf: VF to check against
+ * @vsi: VF's VSI
*
- * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
+ * If the VF is trusted then the VF is allowed to add as many VLANs as it
+ * wants to, so return false.
+ *
+ * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max
+ * allowed VLANs for an untrusted VF. Return the result of this comparison.
*/
-static bool ice_vf_vlan_offload_ena(u32 caps)
+static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi)
{
- return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
+ if (ice_is_vf_trusted(vf))
+ return false;
+
+#define ICE_VF_ADDED_VLAN_ZERO_FLTRS 1
+ return ((ice_vsi_num_non_zero_vlans(vsi) +
+ ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF);
}
/**
@@ -4167,9 +2161,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
bool vlan_promisc = false;
struct ice_vsi *vsi;
struct device *dev;
- struct ice_hw *hw;
int status = 0;
- u8 promisc_m;
int i;
dev = ice_pf_to_dev(pf);
@@ -4197,15 +2189,13 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
}
}
- hw = &pf->hw;
vsi = ice_get_vf_vsi(vf);
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (add_v && !ice_is_vf_trusted(vf) &&
- vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+ if (add_v && ice_vf_has_max_vlans(vf, vsi)) {
dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
/* There is no need to let VF know about being not trusted,
@@ -4214,22 +2204,28 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- if (vsi->info.pvid) {
+ /* in DVM a VF can add/delete inner VLAN filters when
+ * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM
+ */
+ if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
- test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
- vlan_promisc = true;
+ /* in DVM VLAN promiscuous is based on the outer VLAN, which would be
+ * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only
+ * allow vlan_promisc = true in SVM and if no port VLAN is configured
+ */
+ vlan_promisc = ice_is_vlan_promisc_allowed(vf) &&
+ !ice_is_dvm_ena(&pf->hw) &&
+ !ice_vf_is_port_vlan_ena(vf);
if (add_v) {
for (i = 0; i < vfl->num_elements; i++) {
u16 vid = vfl->vlan_id[i];
+ struct ice_vlan vlan;
- if (!ice_is_vf_trusted(vf) &&
- vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+ if (ice_vf_has_max_vlans(vf, vsi)) {
dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
/* There is no need to let VF know about being
@@ -4246,29 +2242,23 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (!vid)
continue;
- status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
+ vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
+ status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- /* Enable VLAN pruning when non-zero VLAN is added */
- if (!vlan_promisc && vid &&
- !ice_vsi_is_vlan_pruning_ena(vsi)) {
- status = ice_cfg_vlan_pruning(vsi, true);
- if (status) {
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
+ if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
vid, status);
goto error_param;
}
} else if (vlan_promisc) {
- /* Enable Ucast/Mcast VLAN promiscuous mode */
- promisc_m = ICE_PROMISC_VLAN_TX |
- ICE_PROMISC_VLAN_RX;
-
- status = ice_set_vsi_promisc(hw, vsi->idx,
- promisc_m, vid);
+ status = ice_vf_ena_vlan_promisc(vsi, &vlan);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
@@ -4289,6 +2279,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
num_vf_vlan = vsi->num_vlan;
for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
u16 vid = vfl->vlan_id[i];
+ struct ice_vlan vlan;
/* we add VLAN 0 by default for each VF so we can enable
* Tx VLAN anti-spoof without triggering MDD events so
@@ -4297,28 +2288,19 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (!vid)
continue;
- /* Make sure ice_vsi_kill_vlan is successful before
- * updating VLAN information
- */
- status = ice_vsi_kill_vlan(vsi, vid);
+ vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
+ status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- /* Disable VLAN pruning when only VLAN 0 is left */
- if (vsi->num_vlan == 1 &&
- ice_vsi_is_vlan_pruning_ena(vsi))
- ice_cfg_vlan_pruning(vsi, false);
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi))
+ vsi->inner_vlan_ops.dis_rx_filtering(vsi);
- /* Disable Unicast/Multicast VLAN promiscuous mode */
- if (vlan_promisc) {
- promisc_m = ICE_PROMISC_VLAN_TX |
- ICE_PROMISC_VLAN_RX;
-
- ice_clear_vsi_promisc(hw, vsi->idx,
- promisc_m, vid);
- }
+ if (vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
}
}
@@ -4378,7 +2360,7 @@ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
}
vsi = ice_get_vf_vsi(vf);
- if (ice_vsi_manage_vlan_stripping(vsi, true))
+ if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
error_param:
@@ -4413,7 +2395,7 @@ static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
goto error_param;
}
- if (ice_vsi_manage_vlan_stripping(vsi, false))
+ if (vsi->inner_vlan_ops.dis_stripping(vsi))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
error_param:
@@ -4425,11 +2407,8 @@ error_param:
* ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
* @vf: VF to enable/disable VLAN stripping for on initialization
*
- * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
- * the flag is cleared then we want to disable stripping. For example, the flag
- * will be cleared when port VLANs are configured by the administrator before
- * passing the VF to the guest or if the AVF driver doesn't support VLAN
- * offloads.
+ * Set the default for VLAN stripping based on whether a port VLAN is configured
+ * and the current VLAN mode of the device.
*/
static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
{
@@ -4438,17 +2417,977 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
if (!vsi)
return -EINVAL;
- /* don't modify stripping if port VLAN is configured */
- if (vsi->info.pvid)
+ /* don't modify stripping if port VLAN is configured in SVM since the
+ * port VLAN is based on the inner/single VLAN in SVM
+ */
+ if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
return 0;
if (ice_vf_vlan_offload_ena(vf->driver_caps))
- return ice_vsi_manage_vlan_stripping(vsi, true);
+ return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
+ else
+ return vsi->inner_vlan_ops.dis_stripping(vsi);
+}
+
+static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
+{
+ if (vf->trusted)
+ return VLAN_N_VID;
+ else
+ return ICE_MAX_VLAN_PER_VF;
+}
+
+/**
+ * ice_vf_outer_vlan_not_allowed - check if outer VLAN can be used
+ * @vf: VF that being checked for
+ *
+ * When the device is in double VLAN mode, check whether or not the outer VLAN
+ * is allowed.
+ */
+static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf)
+{
+ if (ice_vf_is_port_vlan_ena(vf))
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM
+ * @vf: VF that capabilities are being set for
+ * @caps: VLAN capabilities to populate
+ *
+ * Determine VLAN capabilities support based on whether a port VLAN is
+ * configured. If a port VLAN is configured then the VF should use the inner
+ * filtering/offload capabilities since the port VLAN is using the outer VLAN
+ * capabilies.
+ */
+static void
+ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
+{
+ struct virtchnl_vlan_supported_caps *supported_caps;
+
+ if (ice_vf_outer_vlan_not_allowed(vf)) {
+ /* until support for inner VLAN filtering is added when a port
+ * VLAN is configured, only support software offloaded inner
+ * VLANs when a port VLAN is confgured in DVM
+ */
+ supported_caps = &caps->filtering.filtering_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.stripping_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.insertion_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+ caps->offloads.ethertype_match =
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ } else {
+ supported_caps = &caps->filtering.filtering_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+ supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ VIRTCHNL_VLAN_ETHERTYPE_9100 |
+ VIRTCHNL_VLAN_ETHERTYPE_AND;
+ caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ VIRTCHNL_VLAN_ETHERTYPE_9100;
+
+ supported_caps = &caps->offloads.stripping_support;
+ supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ VIRTCHNL_VLAN_ETHERTYPE_9100 |
+ VIRTCHNL_VLAN_ETHERTYPE_XOR |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2;
+
+ supported_caps = &caps->offloads.insertion_support;
+ supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ VIRTCHNL_VLAN_ETHERTYPE_9100 |
+ VIRTCHNL_VLAN_ETHERTYPE_XOR |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2;
+
+ caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+
+ caps->offloads.ethertype_match =
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ }
+
+ caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
+}
+
+/**
+ * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM
+ * @vf: VF that capabilities are being set for
+ * @caps: VLAN capabilities to populate
+ *
+ * Determine VLAN capabilities support based on whether a port VLAN is
+ * configured. If a port VLAN is configured then the VF does not have any VLAN
+ * filtering or offload capabilities since the port VLAN is using the inner VLAN
+ * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner
+ * VLAN fitlering and offload capabilities.
+ */
+static void
+ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
+{
+ struct virtchnl_vlan_supported_caps *supported_caps;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ supported_caps = &caps->filtering.filtering_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.stripping_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.insertion_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED;
+ caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED;
+ caps->filtering.max_filters = 0;
+ } else {
+ supported_caps = &caps->filtering.filtering_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+ caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+
+ supported_caps = &caps->offloads.stripping_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.insertion_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+ caps->offloads.ethertype_match =
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
+ }
+}
+
+/**
+ * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities
+ * @vf: VF to determine VLAN capabilities for
+ *
+ * This will only be called if the VF and PF successfully negotiated
+ * VIRTCHNL_VF_OFFLOAD_VLAN_V2.
+ *
+ * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN
+ * is configured or not.
+ */
+static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_caps *caps = NULL;
+ int err, len = 0;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ caps = kzalloc(sizeof(*caps), GFP_KERNEL);
+ if (!caps) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto out;
+ }
+ len = sizeof(*caps);
+
+ if (ice_is_dvm_ena(&vf->pf->hw))
+ ice_vc_set_dvm_caps(vf, caps);
+ else
+ ice_vc_set_svm_caps(vf, caps);
+
+ /* store negotiated caps to prevent invalid VF messages */
+ memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps));
+
+out:
+ err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
+ v_ret, (u8 *)caps, len);
+ kfree(caps);
+ return err;
+}
+
+/**
+ * ice_vc_validate_vlan_tpid - validate VLAN TPID
+ * @filtering_caps: negotiated/supported VLAN filtering capabilities
+ * @tpid: VLAN TPID used for validation
+ *
+ * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against
+ * the negotiated/supported filtering caps to see if the VLAN TPID is valid.
+ */
+static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid)
+{
+ enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ switch (tpid) {
+ case ETH_P_8021Q:
+ vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
+ break;
+ case ETH_P_8021AD:
+ vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8;
+ break;
+ case ETH_P_QINQ1:
+ vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100;
+ break;
+ }
+
+ if (!(filtering_caps & vlan_ethertype))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_is_valid_vlan - validate the virtchnl_vlan
+ * @vc_vlan: virtchnl_vlan to validate
+ *
+ * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return
+ * false. Otherwise return true.
+ */
+static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan)
+{
+ if (!vc_vlan->tci || !vc_vlan->tpid)
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_validate_vlan_filter_list - validate the filter list from the VF
+ * @vfc: negotiated/supported VLAN filtering capabilities
+ * @vfl: VLAN filter list from VF to validate
+ *
+ * Validate all of the filters in the VLAN filter list from the VF. If any of
+ * the checks fail then return false. Otherwise return true.
+ */
+static bool
+ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc,
+ struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+ u16 i;
+
+ if (!vfl->num_elements)
+ return false;
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ struct virtchnl_vlan_supported_caps *filtering_support =
+ &vfc->filtering_support;
+ struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
+ struct virtchnl_vlan *outer = &vlan_fltr->outer;
+ struct virtchnl_vlan *inner = &vlan_fltr->inner;
+
+ if ((ice_vc_is_valid_vlan(outer) &&
+ filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) ||
+ (ice_vc_is_valid_vlan(inner) &&
+ filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED))
+ return false;
+
+ if ((outer->tci_mask &&
+ !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) ||
+ (inner->tci_mask &&
+ !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK)))
+ return false;
+
+ if (((outer->tci & VLAN_PRIO_MASK) &&
+ !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) ||
+ ((inner->tci & VLAN_PRIO_MASK) &&
+ !(filtering_support->inner & VIRTCHNL_VLAN_PRIO)))
+ return false;
+
+ if ((ice_vc_is_valid_vlan(outer) &&
+ !ice_vc_validate_vlan_tpid(filtering_support->outer,
+ outer->tpid)) ||
+ (ice_vc_is_valid_vlan(inner) &&
+ !ice_vc_validate_vlan_tpid(filtering_support->inner,
+ inner->tpid)))
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan
+ * @vc_vlan: struct virtchnl_vlan to transform
+ */
+static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
+{
+ struct ice_vlan vlan = { 0 };
+
+ vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
+ vlan.tpid = vc_vlan->tpid;
+
+ return vlan;
+}
+
+/**
+ * ice_vc_vlan_action - action to perform on the virthcnl_vlan
+ * @vsi: VF's VSI used to perform the action
+ * @vlan_action: function to perform the action with (i.e. add/del)
+ * @vlan: VLAN filter to perform the action with
+ */
+static int
+ice_vc_vlan_action(struct ice_vsi *vsi,
+ int (*vlan_action)(struct ice_vsi *, struct ice_vlan *),
+ struct ice_vlan *vlan)
+{
+ int err;
+
+ err = vlan_action(vsi, vlan);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list
+ * @vf: VF used to delete the VLAN(s)
+ * @vsi: VF's VSI used to delete the VLAN(s)
+ * @vfl: virthchnl filter list used to delete the filters
+ */
+static int
+ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+ bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
+ int err;
+ u16 i;
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
+ struct virtchnl_vlan *vc_vlan;
+
+ vc_vlan = &vlan_fltr->outer;
+ if (ice_vc_is_valid_vlan(vc_vlan)) {
+ struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+ err = ice_vc_vlan_action(vsi,
+ vsi->outer_vlan_ops.del_vlan,
+ &vlan);
+ if (err)
+ return err;
+
+ if (vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
+ }
+
+ vc_vlan = &vlan_fltr->inner;
+ if (ice_vc_is_valid_vlan(vc_vlan)) {
+ struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+ err = ice_vc_vlan_action(vsi,
+ vsi->inner_vlan_ops.del_vlan,
+ &vlan);
+ if (err)
+ return err;
+
+ /* no support for VLAN promiscuous on inner VLAN unless
+ * we are in Single VLAN Mode (SVM)
+ */
+ if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ */
+static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vlan_filter_list_v2 *vfl =
+ (struct virtchnl_vlan_filter_list_v2 *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi;
+
+ if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering,
+ vfl)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (ice_vc_del_vlans(vf, vsi, vfl))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL,
+ 0);
+}
+
+/**
+ * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list
+ * @vf: VF used to add the VLAN(s)
+ * @vsi: VF's VSI used to add the VLAN(s)
+ * @vfl: virthchnl filter list used to add the filters
+ */
+static int
+ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+ bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
+ int err;
+ u16 i;
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
+ struct virtchnl_vlan *vc_vlan;
+
+ vc_vlan = &vlan_fltr->outer;
+ if (ice_vc_is_valid_vlan(vc_vlan)) {
+ struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+ err = ice_vc_vlan_action(vsi,
+ vsi->outer_vlan_ops.add_vlan,
+ &vlan);
+ if (err)
+ return err;
+
+ if (vlan_promisc) {
+ err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ if (err)
+ return err;
+ }
+ }
+
+ vc_vlan = &vlan_fltr->inner;
+ if (ice_vc_is_valid_vlan(vc_vlan)) {
+ struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+ err = ice_vc_vlan_action(vsi,
+ vsi->inner_vlan_ops.add_vlan,
+ &vlan);
+ if (err)
+ return err;
+
+ /* no support for VLAN promiscuous on inner VLAN unless
+ * we are in Single VLAN Mode (SVM)
+ */
+ if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) {
+ err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ if (err)
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF
+ * @vsi: VF VSI used to get number of existing VLAN filters
+ * @vfc: negotiated/supported VLAN filtering capabilities
+ * @vfl: VLAN filter list from VF to validate
+ *
+ * Validate all of the filters in the VLAN filter list from the VF during the
+ * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false.
+ * Otherwise return true.
+ */
+static bool
+ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
+ struct virtchnl_vlan_filtering_caps *vfc,
+ struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+ u16 num_requested_filters = vsi->num_vlan + vfl->num_elements;
+
+ if (num_requested_filters > vfc->max_filters)
+ return false;
+
+ return ice_vc_validate_vlan_filter_list(vfc, vfl);
+}
+
+/**
+ * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ */
+static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_filter_list_v2 *vfl =
+ (struct virtchnl_vlan_filter_list_v2 *)msg;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_validate_add_vlan_filter_list(vsi,
+ &vf->vlan_v2_caps.filtering,
+ vfl)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (ice_vc_add_vlans(vf, vsi, vfl))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL,
+ 0);
+}
+
+/**
+ * ice_vc_valid_vlan_setting - validate VLAN setting
+ * @negotiated_settings: negotiated VLAN settings during VF init
+ * @ethertype_setting: ethertype(s) requested for the VLAN setting
+ */
+static bool
+ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting)
+{
+ if (ethertype_setting && !(negotiated_settings & ethertype_setting))
+ return false;
+
+ /* only allow a single VIRTCHNL_VLAN_ETHERTYPE if
+ * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported
+ */
+ if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) &&
+ hweight32(ethertype_setting) > 1)
+ return false;
+
+ /* ability to modify the VLAN setting was not negotiated */
+ if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message
+ * @caps: negotiated VLAN settings during VF init
+ * @msg: message to validate
+ *
+ * Used to validate any VLAN virtchnl message sent as a
+ * virtchnl_vlan_setting structure. Validates the message against the
+ * negotiated/supported caps during VF driver init.
+ */
+static bool
+ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps,
+ struct virtchnl_vlan_setting *msg)
+{
+ if ((!msg->outer_ethertype_setting &&
+ !msg->inner_ethertype_setting) ||
+ (!caps->outer && !caps->inner))
+ return false;
+
+ if (msg->outer_ethertype_setting &&
+ !ice_vc_valid_vlan_setting(caps->outer,
+ msg->outer_ethertype_setting))
+ return false;
+
+ if (msg->inner_ethertype_setting &&
+ !ice_vc_valid_vlan_setting(caps->inner,
+ msg->inner_ethertype_setting))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID
+ * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID
+ * @tpid: VLAN TPID to populate
+ */
+static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid)
+{
+ switch (ethertype_setting) {
+ case VIRTCHNL_VLAN_ETHERTYPE_8100:
+ *tpid = ETH_P_8021Q;
+ break;
+ case VIRTCHNL_VLAN_ETHERTYPE_88A8:
+ *tpid = ETH_P_8021AD;
+ break;
+ case VIRTCHNL_VLAN_ETHERTYPE_9100:
+ *tpid = ETH_P_QINQ1;
+ break;
+ default:
+ *tpid = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting
+ * @vsi: VF's VSI used to enable the VLAN offload
+ * @ena_offload: function used to enable the VLAN offload
+ * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for
+ */
+static int
+ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
+ int (*ena_offload)(struct ice_vsi *vsi, u16 tpid),
+ u32 ethertype_setting)
+{
+ u16 tpid;
+ int err;
+
+ err = ice_vc_get_tpid(ethertype_setting, &tpid);
+ if (err)
+ return err;
+
+ err = ena_offload(vsi, tpid);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+#define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
+#define ICE_L2TSEL_BIT_OFFSET 23
+enum ice_l2tsel {
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
+};
+
+/**
+ * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
+ * @vsi: VSI used to update l2tsel on
+ * @l2tsel: l2tsel setting requested
+ *
+ * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
+ * This will modify which descriptor field the first offloaded VLAN will be
+ * stripped into.
+ */
+static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 l2tsel_bit;
+ int i;
+
+ if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
+ l2tsel_bit = 0;
else
- return ice_vsi_manage_vlan_stripping(vsi, false);
+ l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
+
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ u16 pfq = vsi->rxq_map[i];
+ u32 qrx_context_offset;
+ u32 regval;
+
+ qrx_context_offset =
+ QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
+
+ regval = rd32(hw, qrx_context_offset);
+ regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
+ regval |= l2tsel_bit;
+ wr32(hw, qrx_context_offset, regval);
+ }
}
-static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = {
+/**
+ * ice_vc_ena_vlan_stripping_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
+ */
+static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_supported_caps *stripping_support;
+ struct virtchnl_vlan_setting *strip_msg =
+ (struct virtchnl_vlan_setting *)msg;
+ u32 ethertype_setting;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
+ if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = strip_msg->outer_ethertype_setting;
+ if (ethertype_setting) {
+ if (ice_vc_ena_vlan_offload(vsi,
+ vsi->outer_vlan_ops.ena_stripping,
+ ethertype_setting)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ } else {
+ enum ice_l2tsel l2tsel =
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND;
+
+ /* PF tells the VF that the outer VLAN tag is always
+ * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
+ * inner is always extracted to
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
+ * support outer stripping so the first tag always ends
+ * up in L2TAG2_2ND and the second/inner tag, if
+ * enabled, is extracted in L2TAG1.
+ */
+ ice_vsi_update_l2tsel(vsi, l2tsel);
+ }
+ }
+
+ ethertype_setting = strip_msg->inner_ethertype_setting;
+ if (ethertype_setting &&
+ ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping,
+ ethertype_setting)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_stripping_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
+ */
+static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_supported_caps *stripping_support;
+ struct virtchnl_vlan_setting *strip_msg =
+ (struct virtchnl_vlan_setting *)msg;
+ u32 ethertype_setting;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
+ if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = strip_msg->outer_ethertype_setting;
+ if (ethertype_setting) {
+ if (vsi->outer_vlan_ops.dis_stripping(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ } else {
+ enum ice_l2tsel l2tsel =
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1;
+
+ /* PF tells the VF that the outer VLAN tag is always
+ * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
+ * inner is always extracted to
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
+ * support inner stripping while outer stripping is
+ * disabled so that the first and only tag is extracted
+ * in L2TAG1.
+ */
+ ice_vsi_update_l2tsel(vsi, l2tsel);
+ }
+ }
+
+ ethertype_setting = strip_msg->inner_ethertype_setting;
+ if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_ena_vlan_insertion_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
+ */
+static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_supported_caps *insertion_support;
+ struct virtchnl_vlan_setting *insertion_msg =
+ (struct virtchnl_vlan_setting *)msg;
+ u32 ethertype_setting;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
+ if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = insertion_msg->outer_ethertype_setting;
+ if (ethertype_setting &&
+ ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion,
+ ethertype_setting)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = insertion_msg->inner_ethertype_setting;
+ if (ethertype_setting &&
+ ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion,
+ ethertype_setting)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_insertion_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
+ */
+static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_supported_caps *insertion_support;
+ struct virtchnl_vlan_setting *insertion_msg =
+ (struct virtchnl_vlan_setting *)msg;
+ u32 ethertype_setting;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
+ if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = insertion_msg->outer_ethertype_setting;
+ if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = insertion_msg->inner_ethertype_setting;
+ if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
+ v_ret, NULL, 0);
+}
+
+static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.get_ver_msg = ice_vc_get_ver_msg,
.get_vf_res_msg = ice_vc_get_vf_res_msg,
.reset_vf = ice_vc_reset_vf_msg,
@@ -4470,11 +3409,22 @@ static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = {
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
.add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
.del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
+ .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
+ .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
+ .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
+ .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
+ .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
+ .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
+ .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
};
-void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops)
+/**
+ * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops
+ * @vf: the VF to switch ops
+ */
+void ice_virtchnl_set_dflt_ops(struct ice_vf *vf)
{
- *ops = ice_vc_vf_dflt_ops;
+ vf->virtchnl_ops = &ice_virtchnl_dflt_ops;
}
/**
@@ -4607,15 +3557,44 @@ ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
NULL, 0);
}
-void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops)
+static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
+ .get_ver_msg = ice_vc_get_ver_msg,
+ .get_vf_res_msg = ice_vc_get_vf_res_msg,
+ .reset_vf = ice_vc_reset_vf_msg,
+ .add_mac_addr_msg = ice_vc_repr_add_mac,
+ .del_mac_addr_msg = ice_vc_repr_del_mac,
+ .cfg_qs_msg = ice_vc_cfg_qs_msg,
+ .ena_qs_msg = ice_vc_ena_qs_msg,
+ .dis_qs_msg = ice_vc_dis_qs_msg,
+ .request_qs_msg = ice_vc_request_qs_msg,
+ .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
+ .config_rss_key = ice_vc_config_rss_key,
+ .config_rss_lut = ice_vc_config_rss_lut,
+ .get_stats_msg = ice_vc_get_stats_msg,
+ .cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
+ .add_vlan_msg = ice_vc_repr_add_vlan,
+ .remove_vlan_msg = ice_vc_repr_del_vlan,
+ .ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping,
+ .dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping,
+ .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
+ .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
+ .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
+ .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
+ .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
+ .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
+ .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
+ .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
+ .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
+ .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
+};
+
+/**
+ * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops
+ * @vf: the VF to switch ops
+ */
+void ice_virtchnl_set_repr_ops(struct ice_vf *vf)
{
- ops->add_mac_addr_msg = ice_vc_repr_add_mac;
- ops->del_mac_addr_msg = ice_vc_repr_del_mac;
- ops->add_vlan_msg = ice_vc_repr_add_vlan;
- ops->remove_vlan_msg = ice_vc_repr_del_vlan;
- ops->ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping;
- ops->dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping;
- ops->cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode;
+ vf->virtchnl_ops = &ice_virtchnl_repr_ops;
}
/**
@@ -4630,20 +3609,21 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
{
u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
s16 vf_id = le16_to_cpu(event->desc.retval);
+ const struct ice_virtchnl_ops *ops;
u16 msglen = event->msg_len;
- struct ice_vc_vf_ops *ops;
u8 *msg = event->msg_buf;
struct ice_vf *vf = NULL;
struct device *dev;
int err = 0;
dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id)) {
- err = -EINVAL;
- goto error_handler;
- }
- vf = &pf->vf[vf_id];
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf) {
+ dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n",
+ vf_id, v_opcode, msglen);
+ return;
+ }
/* Check if VF is disabled. */
if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
@@ -4651,7 +3631,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
goto error_handler;
}
- ops = &vf->vc_ops;
+ ops = vf->virtchnl_ops;
/* Perform basic checks on the msg */
err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
@@ -4666,6 +3646,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
ice_vc_send_msg_to_vf(vf, v_opcode,
VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
0);
+ ice_put_vf(vf);
return;
}
@@ -4675,6 +3656,7 @@ error_handler:
NULL, 0);
dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
vf_id, v_opcode, msglen, err);
+ ice_put_vf(vf);
return;
}
@@ -4684,6 +3666,7 @@ error_handler:
if (!mutex_trylock(&vf->cfg_lock)) {
dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n",
vf->vf_id);
+ ice_put_vf(vf);
return;
}
@@ -4694,7 +3677,7 @@ error_handler:
case VIRTCHNL_OP_GET_VF_RESOURCES:
err = ops->get_vf_res_msg(vf, msg);
if (ice_vf_init_vlan_stripping(vf))
- dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
+ dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n",
vf->vf_id);
ice_vc_notify_vf_link_state(vf);
break;
@@ -4759,6 +3742,27 @@ error_handler:
case VIRTCHNL_OP_DEL_RSS_CFG:
err = ops->handle_rss_cfg_msg(vf, msg, false);
break;
+ case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
+ err = ops->get_offload_vlan_v2_caps(vf);
+ break;
+ case VIRTCHNL_OP_ADD_VLAN_V2:
+ err = ops->add_vlan_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_VLAN_V2:
+ err = ops->remove_vlan_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+ err = ops->ena_vlan_stripping_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+ err = ops->dis_vlan_stripping_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+ err = ops->ena_vlan_insertion_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+ err = ops->dis_vlan_insertion_v2_msg(vf, msg);
+ break;
case VIRTCHNL_OP_UNKNOWN:
default:
dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
@@ -4777,549 +3781,5 @@ error_handler:
}
mutex_unlock(&vf->cfg_lock);
-}
-
-/**
- * ice_get_vf_cfg
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @ivi: VF configuration structure
- *
- * return VF configuration
- */
-int
-ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vf *vf;
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
-
- if (ice_check_vf_init(pf, vf))
- return -EBUSY;
-
- ivi->vf = vf_id;
- ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
-
- /* VF configuration for VLAN and applicable QoS */
- ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
- ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
-
- ivi->trusted = vf->trusted;
- ivi->spoofchk = vf->spoofchk;
- if (!vf->link_forced)
- ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
- else if (vf->link_up)
- ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
- else
- ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
- ivi->max_tx_rate = vf->max_tx_rate;
- ivi->min_tx_rate = vf->min_tx_rate;
- return 0;
-}
-
-/**
- * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
- * @pf: PF used to reference the switch's rules
- * @umac: unicast MAC to compare against existing switch rules
- *
- * Return true on the first/any match, else return false
- */
-static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
-{
- struct ice_sw_recipe *mac_recipe_list =
- &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
- struct ice_fltr_mgmt_list_entry *list_itr;
- struct list_head *rule_head;
- struct mutex *rule_lock; /* protect MAC filter list access */
-
- rule_head = &mac_recipe_list->filt_rules;
- rule_lock = &mac_recipe_list->filt_rule_lock;
-
- mutex_lock(rule_lock);
- list_for_each_entry(list_itr, rule_head, list_entry) {
- u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
-
- if (ether_addr_equal(existing_mac, umac)) {
- mutex_unlock(rule_lock);
- return true;
- }
- }
-
- mutex_unlock(rule_lock);
-
- return false;
-}
-
-/**
- * ice_set_vf_mac
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @mac: MAC address
- *
- * program VF MAC address
- */
-int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vf *vf;
- int ret;
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- if (is_multicast_ether_addr(mac)) {
- netdev_err(netdev, "%pM not a valid unicast address\n", mac);
- return -EINVAL;
- }
-
- vf = &pf->vf[vf_id];
- /* nothing left to do, unicast MAC already set */
- if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
- ether_addr_equal(vf->hw_lan_addr.addr, mac))
- return 0;
-
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- if (ice_unicast_mac_exists(pf, mac)) {
- netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
- mac, vf_id, mac);
- return -EINVAL;
- }
-
- mutex_lock(&vf->cfg_lock);
-
- /* VF is notified of its new MAC via the PF's response to the
- * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
- */
- ether_addr_copy(vf->dev_lan_addr.addr, mac);
- ether_addr_copy(vf->hw_lan_addr.addr, mac);
- if (is_zero_ether_addr(mac)) {
- /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
- vf->pf_set_mac = false;
- netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
- vf->vf_id);
- } else {
- /* PF will add MAC rule for the VF */
- vf->pf_set_mac = true;
- netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
- mac, vf_id);
- }
-
- ice_vc_reset_vf(vf);
- mutex_unlock(&vf->cfg_lock);
- return 0;
-}
-
-/**
- * ice_set_vf_trust
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @trusted: Boolean value to enable/disable trusted VF
- *
- * Enable or disable a given VF as trusted
- */
-int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vf *vf;
- int ret;
-
- if (ice_is_eswitch_mode_switchdev(pf)) {
- dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
- return -EOPNOTSUPP;
- }
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- /* Check if already trusted */
- if (trusted == vf->trusted)
- return 0;
-
- mutex_lock(&vf->cfg_lock);
-
- vf->trusted = trusted;
- ice_vc_reset_vf(vf);
- dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
- vf_id, trusted ? "" : "un");
-
- mutex_unlock(&vf->cfg_lock);
-
- return 0;
-}
-
-/**
- * ice_set_vf_link_state
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @link_state: required link state
- *
- * Set VF's link state, irrespective of physical link state status
- */
-int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vf *vf;
- int ret;
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- switch (link_state) {
- case IFLA_VF_LINK_STATE_AUTO:
- vf->link_forced = false;
- break;
- case IFLA_VF_LINK_STATE_ENABLE:
- vf->link_forced = true;
- vf->link_up = true;
- break;
- case IFLA_VF_LINK_STATE_DISABLE:
- vf->link_forced = true;
- vf->link_up = false;
- break;
- default:
- return -EINVAL;
- }
-
- ice_vc_notify_vf_link_state(vf);
-
- return 0;
-}
-
-/**
- * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
- * @pf: PF associated with VFs
- */
-static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
-{
- int rate = 0, i;
-
- ice_for_each_vf(pf, i)
- rate += pf->vf[i].min_tx_rate;
-
- return rate;
-}
-
-/**
- * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
- * @vf: VF trying to configure min_tx_rate
- * @min_tx_rate: min Tx rate in Mbps
- *
- * Check if the min_tx_rate being passed in will cause oversubscription of total
- * min_tx_rate based on the current link speed and all other VFs configured
- * min_tx_rate
- *
- * Return true if the passed min_tx_rate would cause oversubscription, else
- * return false
- */
-static bool
-ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
-{
- int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf));
- int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
-
- /* this VF's previous rate is being overwritten */
- all_vfs_min_tx_rate -= vf->min_tx_rate;
-
- if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
- dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
- min_tx_rate, vf->vf_id,
- all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
- link_speed_mbps);
- return true;
- }
-
- return false;
-}
-
-/**
- * ice_set_vf_bw - set min/max VF bandwidth
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @min_tx_rate: Minimum Tx rate in Mbps
- * @max_tx_rate: Maximum Tx rate in Mbps
- */
-int
-ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
- int max_tx_rate)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vsi *vsi;
- struct device *dev;
- struct ice_vf *vf;
- int ret;
-
- dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- vsi = ice_get_vf_vsi(vf);
-
- /* when max_tx_rate is zero that means no max Tx rate limiting, so only
- * check if max_tx_rate is non-zero
- */
- if (max_tx_rate && min_tx_rate > max_tx_rate) {
- dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n",
- min_tx_rate, max_tx_rate);
- return -EINVAL;
- }
-
- if (min_tx_rate && ice_is_dcb_active(pf)) {
- dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
- return -EOPNOTSUPP;
- }
-
- if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate))
- return -EINVAL;
-
- if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
- ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
- if (ret) {
- dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
- vf->vf_id);
- return ret;
- }
-
- vf->min_tx_rate = min_tx_rate;
- }
-
- if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
- ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
- if (ret) {
- dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
- vf->vf_id);
- return ret;
- }
-
- vf->max_tx_rate = max_tx_rate;
- }
-
- return 0;
-}
-
-/**
- * ice_get_vf_stats - populate some stats for the VF
- * @netdev: the netdev of the PF
- * @vf_id: the host OS identifier (0-255)
- * @vf_stats: pointer to the OS memory to be initialized
- */
-int ice_get_vf_stats(struct net_device *netdev, int vf_id,
- struct ifla_vf_stats *vf_stats)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_eth_stats *stats;
- struct ice_vsi *vsi;
- struct ice_vf *vf;
- int ret;
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return -EINVAL;
-
- ice_update_eth_stats(vsi);
- stats = &vsi->eth_stats;
-
- memset(vf_stats, 0, sizeof(*vf_stats));
-
- vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
- stats->rx_multicast;
- vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
- stats->tx_multicast;
- vf_stats->rx_bytes = stats->rx_bytes;
- vf_stats->tx_bytes = stats->tx_bytes;
- vf_stats->broadcast = stats->rx_broadcast;
- vf_stats->multicast = stats->rx_multicast;
- vf_stats->rx_dropped = stats->rx_discards;
- vf_stats->tx_dropped = stats->tx_discards;
-
- return 0;
-}
-
-/**
- * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
- * @vf: pointer to the VF structure
- */
-void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- struct device *dev;
-
- dev = ice_pf_to_dev(pf);
-
- dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
- vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
- vf->dev_lan_addr.addr,
- test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
- ? "on" : "off");
-}
-
-/**
- * ice_print_vfs_mdd_events - print VFs malicious driver detect event
- * @pf: pointer to the PF structure
- *
- * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
- */
-void ice_print_vfs_mdd_events(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- int i;
-
- /* check that there are pending MDD events to print */
- if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
- return;
-
- /* VF MDD event logs are rate limited to one second intervals */
- if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
- return;
-
- pf->last_printed_mdd_jiffies = jiffies;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- /* only print Rx MDD event message if there are new events */
- if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
- vf->mdd_rx_events.last_printed =
- vf->mdd_rx_events.count;
- ice_print_vf_rx_mdd_event(vf);
- }
-
- /* only print Tx MDD event message if there are new events */
- if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
- vf->mdd_tx_events.last_printed =
- vf->mdd_tx_events.count;
-
- dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
- vf->mdd_tx_events.count, hw->pf_id, i,
- vf->dev_lan_addr.addr);
- }
- }
-}
-
-/**
- * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
- * @pdev: pointer to a pci_dev structure
- *
- * Called when recovering from a PF FLR to restore interrupt capability to
- * the VFs.
- */
-void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
-{
- u16 vf_id;
- int pos;
-
- if (!pci_num_vf(pdev))
- return;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
- if (pos) {
- struct pci_dev *vfdev;
-
- pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
- &vf_id);
- vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
- while (vfdev) {
- if (vfdev->is_virtfn && vfdev->physfn == pdev)
- pci_restore_msi_state(vfdev);
- vfdev = pci_get_device(pdev->vendor, vf_id,
- vfdev);
- }
- }
-}
-
-/**
- * ice_is_malicious_vf - helper function to detect a malicious VF
- * @pf: ptr to struct ice_pf
- * @event: pointer to the AQ event
- * @num_msg_proc: the number of messages processed so far
- * @num_msg_pending: the number of messages peinding in admin queue
- */
-bool
-ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
- u16 num_msg_proc, u16 num_msg_pending)
-{
- s16 vf_id = le16_to_cpu(event->desc.retval);
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_mbx_data mbxdata;
- bool malvf = false;
- struct ice_vf *vf;
- int status;
-
- if (ice_validate_vf_id(pf, vf_id))
- return false;
-
- vf = &pf->vf[vf_id];
- /* Check if VF is disabled. */
- if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
- return false;
-
- mbxdata.num_msg_proc = num_msg_proc;
- mbxdata.num_pending_arq = num_msg_pending;
- mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
-#define ICE_MBX_OVERFLOW_WATERMARK 64
- mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
-
- /* check to see if we have a malicious VF */
- status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
- if (status)
- return false;
-
- if (malvf) {
- bool report_vf = false;
-
- /* if the VF is malicious and we haven't let the user
- * know about it, then let them know now
- */
- status = ice_mbx_report_malvf(&pf->hw, pf->malvfs,
- ICE_MAX_VF_COUNT, vf_id,
- &report_vf);
- if (status)
- dev_dbg(dev, "Error reporting malicious VF\n");
-
- if (report_vf) {
- struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
-
- if (pf_vsi)
- dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
- &vf->dev_lan_addr.addr[0],
- pf_vsi->netdev->dev_addr);
- }
-
- return true;
- }
-
- /* if there was an error in detection or the VF is not malicious then
- * return false
- */
- return false;
+ ice_put_vf(vf);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
new file mode 100644
index 000000000000..b5a3fd8adbb4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_VIRTCHNL_H_
+#define _ICE_VIRTCHNL_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/if_ether.h>
+#include <linux/avf/virtchnl.h>
+#include "ice_vf_lib.h"
+
+/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
+#define ICE_MAX_VLAN_PER_VF 8
+
+/* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for
+ * broadcast, and 16 for additional unicast/multicast filters
+ */
+#define ICE_MAX_MACADDR_PER_VF 18
+
+struct ice_virtchnl_ops {
+ int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
+ void (*reset_vf)(struct ice_vf *vf);
+ int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*request_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
+ int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
+ int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_vlan_stripping)(struct ice_vf *vf);
+ int (*dis_vlan_stripping)(struct ice_vf *vf);
+ int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
+ int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_offload_vlan_v2_caps)(struct ice_vf *vf);
+ int (*add_vlan_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*remove_vlan_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*dis_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
+};
+
+#ifdef CONFIG_PCI_IOV
+void ice_virtchnl_set_dflt_ops(struct ice_vf *vf);
+void ice_virtchnl_set_repr_ops(struct ice_vf *vf);
+void ice_vc_notify_vf_link_state(struct ice_vf *vf);
+void ice_vc_notify_link_state(struct ice_pf *pf);
+void ice_vc_notify_reset(struct ice_pf *pf);
+int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
+bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
+#else /* CONFIG_PCI_IOV */
+static inline void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) { }
+static inline void ice_virtchnl_set_repr_ops(struct ice_vf *vf) { }
+static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
+static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
+static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
+
+static inline int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+{
+ return false;
+}
+#endif /* !CONFIG_PCI_IOV */
+
+#endif /* _ICE_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index 9feebe5f556c..5a82216e7d03 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -55,6 +55,15 @@ static const u32 vlan_allowlist_opcodes[] = {
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
};
+/* VIRTCHNL_VF_OFFLOAD_VLAN_V2 */
+static const u32 vlan_v2_allowlist_opcodes[] = {
+ VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, VIRTCHNL_OP_ADD_VLAN_V2,
+ VIRTCHNL_OP_DEL_VLAN_V2, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
+ VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
+ VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
+};
+
/* VIRTCHNL_VF_OFFLOAD_RSS_PF */
static const u32 rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT,
@@ -89,6 +98,7 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = {
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_RSS_PF, rss_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF, adv_rss_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes),
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index d64df81d4893..8e38ee2faf58 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -5,6 +5,7 @@
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_flow.h"
+#include "ice_vf_lib_private.h"
#define to_fltr_conf_from_desc(p) \
container_of(p, struct virtchnl_fdir_fltr_conf, input)
@@ -1288,15 +1289,16 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
union ice_32b_rx_flex_desc *rx_desc)
{
struct ice_pf *pf = ctrl_vsi->back;
+ struct ice_vf *vf = ctrl_vsi->vf;
struct ice_vf_fdir_ctx *ctx_done;
struct ice_vf_fdir_ctx *ctx_irq;
struct ice_vf_fdir *fdir;
unsigned long flags;
struct device *dev;
- struct ice_vf *vf;
int ret;
- vf = &pf->vf[ctrl_vsi->vf_id];
+ if (WARN_ON(!vf))
+ return;
fdir = &vf->fdir;
ctx_done = &fdir->ctx_done;
@@ -1571,15 +1573,16 @@ err_exit:
*/
void ice_flush_fdir_ctx(struct ice_pf *pf)
{
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
return;
- ice_for_each_vf(pf, i) {
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
struct device *dev = ice_pf_to_dev(pf);
enum virtchnl_fdir_prgm_status status;
- struct ice_vf *vf = &pf->vf[i];
struct ice_vf_fdir_ctx *ctx;
unsigned long flags;
int ret;
@@ -1633,6 +1636,7 @@ err_exit:
ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
}
+ mutex_unlock(&pf->vfs.table_lock);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
index f4e629f4c09b..c5bcc8d7481c 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
@@ -6,6 +6,7 @@
struct ice_vf;
struct ice_pf;
+struct ice_vsi;
enum ice_fdir_ctx_stat {
ICE_FDIR_CTX_READY,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
deleted file mode 100644
index 752487a1bdd6..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ /dev/null
@@ -1,346 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Intel Corporation. */
-
-#ifndef _ICE_VIRTCHNL_PF_H_
-#define _ICE_VIRTCHNL_PF_H_
-#include "ice.h"
-#include "ice_virtchnl_fdir.h"
-
-/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
-#define ICE_MAX_VLAN_PER_VF 8
-/* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for
- * broadcast, and 16 for additional unicast/multicast filters
- */
-#define ICE_MAX_MACADDR_PER_VF 18
-
-/* Malicious Driver Detection */
-#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
-#define ICE_MDD_EVENTS_THRESHOLD 30
-
-/* Static VF transaction/status register def */
-#define VF_DEVICE_STATUS 0xAA
-#define VF_TRANS_PENDING_M 0x20
-
-/* wait defines for polling PF_PCI_CIAD register status */
-#define ICE_PCI_CIAD_WAIT_COUNT 100
-#define ICE_PCI_CIAD_WAIT_DELAY_US 1
-
-/* VF resource constraints */
-#define ICE_MAX_VF_COUNT 256
-#define ICE_MIN_QS_PER_VF 1
-#define ICE_NONQ_VECS_VF 1
-#define ICE_MAX_SCATTER_QS_PER_VF 16
-#define ICE_MAX_RSS_QS_PER_VF 16
-#define ICE_NUM_VF_MSIX_MED 17
-#define ICE_NUM_VF_MSIX_SMALL 5
-#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3
-#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
-#define ICE_MAX_VF_RESET_TRIES 40
-#define ICE_MAX_VF_RESET_SLEEP_MS 20
-
-#define ice_for_each_vf(pf, i) \
- for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++)
-
-/* Specific VF states */
-enum ice_vf_states {
- ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
- ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */
- ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */
- ICE_VF_STATE_DIS,
- ICE_VF_STATE_MC_PROMISC,
- ICE_VF_STATE_UC_PROMISC,
- ICE_VF_STATES_NBITS
-};
-
-/* VF capabilities */
-enum ice_virtchnl_cap {
- ICE_VIRTCHNL_VF_CAP_L2 = 0,
- ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
-};
-
-struct ice_time_mac {
- unsigned long time_modified;
- u8 addr[ETH_ALEN];
-};
-
-/* VF MDD events print structure */
-struct ice_mdd_vf_events {
- u16 count; /* total count of Rx|Tx events */
- /* count number of the last printed event */
- u16 last_printed;
-};
-
-struct ice_vf;
-
-struct ice_vc_vf_ops {
- int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
- int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
- void (*reset_vf)(struct ice_vf *vf);
- int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
- int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
- int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg);
- int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg);
- int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg);
- int (*request_qs_msg)(struct ice_vf *vf, u8 *msg);
- int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
- int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
- int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
- int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
- int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
- int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
- int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
- int (*ena_vlan_stripping)(struct ice_vf *vf);
- int (*dis_vlan_stripping)(struct ice_vf *vf);
- int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
- int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
- int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
-};
-
-/* VF information structure */
-struct ice_vf {
- struct ice_pf *pf;
-
- /* Used during virtchnl message handling and NDO ops against the VF
- * that will trigger a VFR
- */
- struct mutex cfg_lock;
-
- u16 vf_id; /* VF ID in the PF space */
- u16 lan_vsi_idx; /* index into PF struct */
- u16 ctrl_vsi_idx;
- struct ice_vf_fdir fdir;
- /* first vector index of this VF in the PF space */
- int first_vector_idx;
- struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
- struct virtchnl_version_info vf_ver;
- u32 driver_caps; /* reported by VF driver */
- struct virtchnl_ether_addr dev_lan_addr;
- struct virtchnl_ether_addr hw_lan_addr;
- struct ice_time_mac legacy_last_added_umac;
- DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
- DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
- u16 port_vlan_info; /* Port VLAN ID and QoS */
- u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
- u8 trusted:1;
- u8 spoofchk:1;
- u8 link_forced:1;
- u8 link_up:1; /* only valid if VF link is forced */
- /* VSI indices - actual VSI pointers are maintained in the PF structure
- * When assigned, these will be non-zero, because VSI 0 is always
- * the main LAN VSI for the PF.
- */
- u16 lan_vsi_num; /* ID as used by firmware */
- unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
- unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
- DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
-
- u64 num_inval_msgs; /* number of continuous invalid msgs */
- u64 num_valid_msgs; /* number of valid msgs detected */
- unsigned long vf_caps; /* VF's adv. capabilities */
- u8 num_req_qs; /* num of queue pairs requested by VF */
- u16 num_mac;
- u16 num_vf_qs; /* num of queue configured per VF */
- struct ice_mdd_vf_events mdd_rx_events;
- struct ice_mdd_vf_events mdd_tx_events;
- DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
-
- struct ice_repr *repr;
-
- struct ice_vc_vf_ops vc_ops;
-
- /* devlink port data */
- struct devlink_port devlink_port;
-};
-
-#ifdef CONFIG_PCI_IOV
-struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
-void ice_process_vflr_event(struct ice_pf *pf);
-int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
-int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
-int
-ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
-
-void ice_free_vfs(struct ice_pf *pf);
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
-void ice_vc_notify_link_state(struct ice_pf *pf);
-void ice_vc_notify_reset(struct ice_pf *pf);
-void ice_vc_notify_vf_link_state(struct ice_vf *vf);
-void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops);
-void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops);
-bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
-bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
-void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
-bool
-ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
- u16 num_msg_proc, u16 num_msg_pending);
-
-int
-ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
- __be16 vlan_proto);
-
-int
-ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
- int max_tx_rate);
-
-int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
-
-int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
-
-int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
-
-bool ice_is_vf_disabled(struct ice_vf *vf);
-
-int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
-
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
-
-void ice_set_vf_state_qs_dis(struct ice_vf *vf);
-int
-ice_get_vf_stats(struct net_device *netdev, int vf_id,
- struct ifla_vf_stats *vf_stats);
-bool ice_is_any_vf_in_promisc(struct ice_pf *pf);
-void
-ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
-void ice_print_vfs_mdd_events(struct ice_pf *pf);
-void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
-bool
-ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
-struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
-int
-ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
- enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
-bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
-#else /* CONFIG_PCI_IOV */
-static inline void ice_process_vflr_event(struct ice_pf *pf) { }
-static inline void ice_free_vfs(struct ice_pf *pf) { }
-static inline
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { }
-static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
-static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
-static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
-static inline void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) { }
-static inline void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) { }
-static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) { }
-static inline
-void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
-static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
-static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
-static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
-
-static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
-{
- return -EOPNOTSUPP;
-}
-
-static inline bool ice_is_vf_disabled(struct ice_vf *vf)
-{
- return true;
-}
-
-static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
-{
- return NULL;
-}
-
-static inline bool
-ice_is_malicious_vf(struct ice_pf __always_unused *pf,
- struct ice_rq_event_info __always_unused *event,
- u16 __always_unused num_msg_proc,
- u16 __always_unused num_msg_pending)
-{
- return false;
-}
-
-static inline bool
-ice_reset_all_vfs(struct ice_pf __always_unused *pf,
- bool __always_unused is_vflr)
-{
- return true;
-}
-
-static inline bool
-ice_reset_vf(struct ice_vf __always_unused *vf, bool __always_unused is_vflr)
-{
- return true;
-}
-
-static inline int
-ice_sriov_configure(struct pci_dev __always_unused *pdev,
- int __always_unused num_vfs)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_mac(struct net_device __always_unused *netdev,
- int __always_unused vf_id, u8 __always_unused *mac)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_get_vf_cfg(struct net_device __always_unused *netdev,
- int __always_unused vf_id,
- struct ifla_vf_info __always_unused *ivi)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_trust(struct net_device __always_unused *netdev,
- int __always_unused vf_id, bool __always_unused trusted)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_port_vlan(struct net_device __always_unused *netdev,
- int __always_unused vf_id, u16 __always_unused vid,
- u8 __always_unused qos, __be16 __always_unused v_proto)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_spoofchk(struct net_device __always_unused *netdev,
- int __always_unused vf_id, bool __always_unused ena)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_link_state(struct net_device __always_unused *netdev,
- int __always_unused vf_id, int __always_unused link_state)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_bw(struct net_device __always_unused *netdev,
- int __always_unused vf_id, int __always_unused min_tx_rate,
- int __always_unused max_tx_rate)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
- struct ice_q_vector __always_unused *q_vector)
-{
- return 0;
-}
-
-static inline int
-ice_get_vf_stats(struct net_device __always_unused *netdev,
- int __always_unused vf_id,
- struct ifla_vf_stats __always_unused *vf_stats)
-{
- return -EOPNOTSUPP;
-}
-
-static inline bool ice_is_any_vf_in_promisc(struct ice_pf __always_unused *pf)
-{
- return false;
-}
-#endif /* CONFIG_PCI_IOV */
-#endif /* _ICE_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan.h b/drivers/net/ethernet/intel/ice/ice_vlan.h
new file mode 100644
index 000000000000..bc4550a03173
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vlan.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VLAN_H_
+#define _ICE_VLAN_H_
+
+#include <linux/types.h>
+#include "ice_type.h"
+
+struct ice_vlan {
+ u16 tpid;
+ u16 vid;
+ u8 prio;
+};
+
+#define ICE_VLAN(tpid, vid, prio) ((struct ice_vlan){ tpid, vid, prio })
+
+#endif /* _ICE_VLAN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
new file mode 100644
index 000000000000..1b618de592b7
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_common.h"
+
+/**
+ * ice_pkg_get_supported_vlan_mode - determine if DDP supports Double VLAN mode
+ * @hw: pointer to the HW struct
+ * @dvm: output variable to determine if DDP supports DVM(true) or SVM(false)
+ */
+static int
+ice_pkg_get_supported_vlan_mode(struct ice_hw *hw, bool *dvm)
+{
+ u16 meta_init_size = sizeof(struct ice_meta_init_section);
+ struct ice_meta_init_section *sect;
+ struct ice_buf_build *bld;
+ int status;
+
+ /* if anything fails, we assume there is no DVM support */
+ *dvm = false;
+
+ bld = ice_pkg_buf_alloc_single_section(hw,
+ ICE_SID_RXPARSER_METADATA_INIT,
+ meta_init_size, (void **)&sect);
+ if (!bld)
+ return -ENOMEM;
+
+ /* only need to read a single section */
+ sect->count = cpu_to_le16(1);
+ sect->offset = cpu_to_le16(ICE_META_VLAN_MODE_ENTRY);
+
+ status = ice_aq_upload_section(hw,
+ (struct ice_buf_hdr *)ice_pkg_buf(bld),
+ ICE_PKG_BUF_SIZE, NULL);
+ if (!status) {
+ DECLARE_BITMAP(entry, ICE_META_INIT_BITS);
+ u32 arr[ICE_META_INIT_DW_CNT];
+ u16 i;
+
+ /* convert to host bitmap format */
+ for (i = 0; i < ICE_META_INIT_DW_CNT; i++)
+ arr[i] = le32_to_cpu(sect->entry.bm[i]);
+
+ bitmap_from_arr32(entry, arr, (u16)ICE_META_INIT_BITS);
+
+ /* check if DVM is supported */
+ *dvm = test_bit(ICE_META_VLAN_MODE_BIT, entry);
+ }
+
+ ice_pkg_buf_free(hw, bld);
+
+ return status;
+}
+
+/**
+ * ice_aq_get_vlan_mode - get the VLAN mode of the device
+ * @hw: pointer to the HW structure
+ * @get_params: structure FW fills in based on the current VLAN mode config
+ *
+ * Get VLAN Mode Parameters (0x020D)
+ */
+static int
+ice_aq_get_vlan_mode(struct ice_hw *hw,
+ struct ice_aqc_get_vlan_mode *get_params)
+{
+ struct ice_aq_desc desc;
+
+ if (!get_params)
+ return -EINVAL;
+
+ ice_fill_dflt_direct_cmd_desc(&desc,
+ ice_aqc_opc_get_vlan_mode_parameters);
+
+ return ice_aq_send_cmd(hw, &desc, get_params, sizeof(*get_params),
+ NULL);
+}
+
+/**
+ * ice_aq_is_dvm_ena - query FW to check if double VLAN mode is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Returns true if the hardware/firmware is configured in double VLAN mode,
+ * else return false signaling that the hardware/firmware is configured in
+ * single VLAN mode.
+ *
+ * Also, return false if this call fails for any reason (i.e. firmware doesn't
+ * support this AQ call).
+ */
+static bool ice_aq_is_dvm_ena(struct ice_hw *hw)
+{
+ struct ice_aqc_get_vlan_mode get_params = { 0 };
+ int status;
+
+ status = ice_aq_get_vlan_mode(hw, &get_params);
+ if (status) {
+ ice_debug(hw, ICE_DBG_AQ, "Failed to get VLAN mode, status %d\n",
+ status);
+ return false;
+ }
+
+ return (get_params.vlan_mode & ICE_AQ_VLAN_MODE_DVM_ENA);
+}
+
+/**
+ * ice_is_dvm_ena - check if double VLAN mode is enabled
+ * @hw: pointer to the HW structure
+ *
+ * The device is configured in single or double VLAN mode on initialization and
+ * this cannot be dynamically changed during runtime. Based on this there is no
+ * need to make an AQ call every time the driver needs to know the VLAN mode.
+ * Instead, use the cached VLAN mode.
+ */
+bool ice_is_dvm_ena(struct ice_hw *hw)
+{
+ return hw->dvm_ena;
+}
+
+/**
+ * ice_cache_vlan_mode - cache VLAN mode after DDP is downloaded
+ * @hw: pointer to the HW structure
+ *
+ * This is only called after downloading the DDP and after the global
+ * configuration lock has been released because all ports on a device need to
+ * cache the VLAN mode.
+ */
+static void ice_cache_vlan_mode(struct ice_hw *hw)
+{
+ hw->dvm_ena = ice_aq_is_dvm_ena(hw) ? true : false;
+}
+
+/**
+ * ice_pkg_supports_dvm - find out if DDP supports DVM
+ * @hw: pointer to the HW structure
+ */
+static bool ice_pkg_supports_dvm(struct ice_hw *hw)
+{
+ bool pkg_supports_dvm;
+ int status;
+
+ status = ice_pkg_get_supported_vlan_mode(hw, &pkg_supports_dvm);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG, "Failed to get supported VLAN mode, status %d\n",
+ status);
+ return false;
+ }
+
+ return pkg_supports_dvm;
+}
+
+/**
+ * ice_fw_supports_dvm - find out if FW supports DVM
+ * @hw: pointer to the HW structure
+ */
+static bool ice_fw_supports_dvm(struct ice_hw *hw)
+{
+ struct ice_aqc_get_vlan_mode get_vlan_mode = { 0 };
+ int status;
+
+ /* If firmware returns success, then it supports DVM, else it only
+ * supports SVM
+ */
+ status = ice_aq_get_vlan_mode(hw, &get_vlan_mode);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to get VLAN mode, status %d\n",
+ status);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_is_dvm_supported - check if Double VLAN Mode is supported
+ * @hw: pointer to the hardware structure
+ *
+ * Returns true if Double VLAN Mode (DVM) is supported and false if only Single
+ * VLAN Mode (SVM) is supported. In order for DVM to be supported the DDP and
+ * firmware must support it, otherwise only SVM is supported. This function
+ * should only be called while the global config lock is held and after the
+ * package has been successfully downloaded.
+ */
+static bool ice_is_dvm_supported(struct ice_hw *hw)
+{
+ if (!ice_pkg_supports_dvm(hw)) {
+ ice_debug(hw, ICE_DBG_PKG, "DDP doesn't support DVM\n");
+ return false;
+ }
+
+ if (!ice_fw_supports_dvm(hw)) {
+ ice_debug(hw, ICE_DBG_PKG, "FW doesn't support DVM\n");
+ return false;
+ }
+
+ return true;
+}
+
+#define ICE_EXTERNAL_VLAN_ID_FV_IDX 11
+#define ICE_SW_LKUP_VLAN_LOC_LKUP_IDX 1
+#define ICE_SW_LKUP_VLAN_PKT_FLAGS_LKUP_IDX 2
+#define ICE_SW_LKUP_PROMISC_VLAN_LOC_LKUP_IDX 2
+#define ICE_PKT_FLAGS_0_TO_15_FV_IDX 1
+#define ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK 0xD000
+static struct ice_update_recipe_lkup_idx_params ice_dvm_dflt_recipes[] = {
+ {
+ /* Update recipe ICE_SW_LKUP_VLAN to filter based on the
+ * outer/single VLAN in DVM
+ */
+ .rid = ICE_SW_LKUP_VLAN,
+ .fv_idx = ICE_EXTERNAL_VLAN_ID_FV_IDX,
+ .ignore_valid = true,
+ .mask = 0,
+ .mask_valid = false, /* use pre-existing mask */
+ .lkup_idx = ICE_SW_LKUP_VLAN_LOC_LKUP_IDX,
+ },
+ {
+ /* Update recipe ICE_SW_LKUP_VLAN to filter based on the VLAN
+ * packet flags to support VLAN filtering on multiple VLAN
+ * ethertypes (i.e. 0x8100 and 0x88a8) in DVM
+ */
+ .rid = ICE_SW_LKUP_VLAN,
+ .fv_idx = ICE_PKT_FLAGS_0_TO_15_FV_IDX,
+ .ignore_valid = false,
+ .mask = ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK,
+ .mask_valid = true,
+ .lkup_idx = ICE_SW_LKUP_VLAN_PKT_FLAGS_LKUP_IDX,
+ },
+ {
+ /* Update recipe ICE_SW_LKUP_PROMISC_VLAN to filter based on the
+ * outer/single VLAN in DVM
+ */
+ .rid = ICE_SW_LKUP_PROMISC_VLAN,
+ .fv_idx = ICE_EXTERNAL_VLAN_ID_FV_IDX,
+ .ignore_valid = true,
+ .mask = 0,
+ .mask_valid = false, /* use pre-existing mask */
+ .lkup_idx = ICE_SW_LKUP_PROMISC_VLAN_LOC_LKUP_IDX,
+ },
+};
+
+/**
+ * ice_dvm_update_dflt_recipes - update default switch recipes in DVM
+ * @hw: hardware structure used to update the recipes
+ */
+static int ice_dvm_update_dflt_recipes(struct ice_hw *hw)
+{
+ unsigned long i;
+
+ for (i = 0; i < ARRAY_SIZE(ice_dvm_dflt_recipes); i++) {
+ struct ice_update_recipe_lkup_idx_params *params;
+ int status;
+
+ params = &ice_dvm_dflt_recipes[i];
+
+ status = ice_update_recipe_lkup_idx(hw, params);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to update RID %d lkup_idx %d fv_idx %d mask_valid %s mask 0x%04x\n",
+ params->rid, params->lkup_idx, params->fv_idx,
+ params->mask_valid ? "true" : "false",
+ params->mask);
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_aq_set_vlan_mode - set the VLAN mode of the device
+ * @hw: pointer to the HW structure
+ * @set_params: requested VLAN mode configuration
+ *
+ * Set VLAN Mode Parameters (0x020C)
+ */
+static int
+ice_aq_set_vlan_mode(struct ice_hw *hw,
+ struct ice_aqc_set_vlan_mode *set_params)
+{
+ u8 rdma_packet, mng_vlan_prot_id;
+ struct ice_aq_desc desc;
+
+ if (!set_params)
+ return -EINVAL;
+
+ if (set_params->l2tag_prio_tagging > ICE_AQ_VLAN_PRIO_TAG_MAX)
+ return -EINVAL;
+
+ rdma_packet = set_params->rdma_packet;
+ if (rdma_packet != ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING &&
+ rdma_packet != ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING)
+ return -EINVAL;
+
+ mng_vlan_prot_id = set_params->mng_vlan_prot_id;
+ if (mng_vlan_prot_id != ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER &&
+ mng_vlan_prot_id != ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER)
+ return -EINVAL;
+
+ ice_fill_dflt_direct_cmd_desc(&desc,
+ ice_aqc_opc_set_vlan_mode_parameters);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, set_params, sizeof(*set_params),
+ NULL);
+}
+
+/**
+ * ice_set_dvm - sets up software and hardware for double VLAN mode
+ * @hw: pointer to the hardware structure
+ */
+static int ice_set_dvm(struct ice_hw *hw)
+{
+ struct ice_aqc_set_vlan_mode params = { 0 };
+ int status;
+
+ params.l2tag_prio_tagging = ICE_AQ_VLAN_PRIO_TAG_OUTER_CTAG;
+ params.rdma_packet = ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING;
+ params.mng_vlan_prot_id = ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER;
+
+ status = ice_aq_set_vlan_mode(hw, &params);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set double VLAN mode parameters, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_dvm_update_dflt_recipes(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to update default recipes for double VLAN mode, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_aq_set_port_params(hw->port_info, true, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set port in double VLAN mode, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_set_dvm_boost_entries(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set boost TCAM entries for double VLAN mode, status %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_set_svm - set single VLAN mode
+ * @hw: pointer to the HW structure
+ */
+static int ice_set_svm(struct ice_hw *hw)
+{
+ struct ice_aqc_set_vlan_mode *set_params;
+ int status;
+
+ status = ice_aq_set_port_params(hw->port_info, false, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set port parameters for single VLAN mode\n");
+ return status;
+ }
+
+ set_params = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*set_params),
+ GFP_KERNEL);
+ if (!set_params)
+ return -ENOMEM;
+
+ /* default configuration for SVM configurations */
+ set_params->l2tag_prio_tagging = ICE_AQ_VLAN_PRIO_TAG_INNER_CTAG;
+ set_params->rdma_packet = ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING;
+ set_params->mng_vlan_prot_id = ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER;
+
+ status = ice_aq_set_vlan_mode(hw, set_params);
+ if (status)
+ ice_debug(hw, ICE_DBG_INIT, "Failed to configure port in single VLAN mode\n");
+
+ devm_kfree(ice_hw_to_dev(hw), set_params);
+ return status;
+}
+
+/**
+ * ice_set_vlan_mode
+ * @hw: pointer to the HW structure
+ */
+int ice_set_vlan_mode(struct ice_hw *hw)
+{
+ if (!ice_is_dvm_supported(hw))
+ return 0;
+
+ if (!ice_set_dvm(hw))
+ return 0;
+
+ return ice_set_svm(hw);
+}
+
+/**
+ * ice_print_dvm_not_supported - print if DDP and/or FW doesn't support DVM
+ * @hw: pointer to the HW structure
+ *
+ * The purpose of this function is to print that QinQ is not supported due to
+ * incompatibilty from the DDP and/or FW. This will give a hint to the user to
+ * update one and/or both components if they expect QinQ functionality.
+ */
+static void ice_print_dvm_not_supported(struct ice_hw *hw)
+{
+ bool pkg_supports_dvm = ice_pkg_supports_dvm(hw);
+ bool fw_supports_dvm = ice_fw_supports_dvm(hw);
+
+ if (!fw_supports_dvm && !pkg_supports_dvm)
+ dev_info(ice_hw_to_dev(hw), "QinQ functionality cannot be enabled on this device. Update your DDP package and NVM to versions that support QinQ.\n");
+ else if (!pkg_supports_dvm)
+ dev_info(ice_hw_to_dev(hw), "QinQ functionality cannot be enabled on this device. Update your DDP package to a version that supports QinQ.\n");
+ else if (!fw_supports_dvm)
+ dev_info(ice_hw_to_dev(hw), "QinQ functionality cannot be enabled on this device. Update your NVM to a version that supports QinQ.\n");
+}
+
+/**
+ * ice_post_pkg_dwnld_vlan_mode_cfg - configure VLAN mode after DDP download
+ * @hw: pointer to the HW structure
+ *
+ * This function is meant to configure any VLAN mode specific functionality
+ * after the global configuration lock has been released and the DDP has been
+ * downloaded.
+ *
+ * Since only one PF downloads the DDP and configures the VLAN mode there needs
+ * to be a way to configure the other PFs after the DDP has been downloaded and
+ * the global configuration lock has been released. All such code should go in
+ * this function.
+ */
+void ice_post_pkg_dwnld_vlan_mode_cfg(struct ice_hw *hw)
+{
+ ice_cache_vlan_mode(hw);
+
+ if (ice_is_dvm_ena(hw))
+ ice_change_proto_id_to_dvm();
+ else
+ ice_print_dvm_not_supported(hw);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan_mode.h b/drivers/net/ethernet/intel/ice/ice_vlan_mode.h
new file mode 100644
index 000000000000..a0fb743d08e2
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vlan_mode.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VLAN_MODE_H_
+#define _ICE_VLAN_MODE_H_
+
+struct ice_hw;
+
+bool ice_is_dvm_ena(struct ice_hw *hw);
+int ice_set_vlan_mode(struct ice_hw *hw);
+void ice_post_pkg_dwnld_vlan_mode_cfg(struct ice_hw *hw);
+
+#endif /* _ICE_VLAN_MODE_H */
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
new file mode 100644
index 000000000000..5b4a0abb4607
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_vsi_vlan_lib.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+#include "ice.h"
+
+static void print_invalid_tpid(struct ice_vsi *vsi, u16 tpid)
+{
+ dev_err(ice_pf_to_dev(vsi->back), "%s %d specified invalid VLAN tpid 0x%04x\n",
+ ice_vsi_type_str(vsi->type), vsi->idx, tpid);
+}
+
+/**
+ * validate_vlan - check if the ice_vlan passed in is valid
+ * @vsi: VSI used for printing error message
+ * @vlan: ice_vlan structure to validate
+ *
+ * Return true if the VLAN TPID is valid or if the VLAN TPID is 0 and the VLAN
+ * VID is 0, which allows for non-zero VLAN filters with the specified VLAN TPID
+ * and untagged VLAN 0 filters to be added to the prune list respectively.
+ */
+static bool validate_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ if (vlan->tpid != ETH_P_8021Q && vlan->tpid != ETH_P_8021AD &&
+ vlan->tpid != ETH_P_QINQ1 && (vlan->tpid || vlan->vid)) {
+ print_invalid_tpid(vsi, vlan->tpid);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_vsi_add_vlan - default add VLAN implementation for all VSI types
+ * @vsi: VSI being configured
+ * @vlan: VLAN filter to add
+ */
+int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ int err;
+
+ if (!validate_vlan(vsi, vlan))
+ return -EINVAL;
+
+ err = ice_fltr_add_vlan(vsi, vlan);
+ if (err && err != -EEXIST) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %d\n",
+ vlan->vid, vsi->vsi_num, err);
+ return err;
+ }
+
+ vsi->num_vlan++;
+ return 0;
+}
+
+/**
+ * ice_vsi_del_vlan - default del VLAN implementation for all VSI types
+ * @vsi: VSI being configured
+ * @vlan: VLAN filter to delete
+ */
+int ice_vsi_del_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ int err;
+
+ if (!validate_vlan(vsi, vlan))
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(pf);
+
+ err = ice_fltr_remove_vlan(vsi, vlan);
+ if (!err)
+ vsi->num_vlan--;
+ else if (err == -ENOENT || err == -EBUSY)
+ err = 0;
+ else
+ dev_err(dev, "Error removing VLAN %d on VSI %i error: %d\n",
+ vlan->vid, vsi->vsi_num, err);
+
+ return err;
+}
+
+/**
+ * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
+ * @vsi: the VSI being changed
+ */
+static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ /* Here we are configuring the VSI to let the driver add VLAN tags by
+ * setting inner_vlan_flags to ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL. The actual VLAN tag
+ * insertion happens in the Tx hot path, in ice_tx_map.
+ */
+ ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
+
+ /* Preserve existing VLAN strip setting */
+ ctxt->info.inner_vlan_flags |= (vsi->info.inner_vlan_flags &
+ ICE_AQ_VSI_INNER_VLAN_EMODE_M);
+
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ goto out;
+ }
+
+ vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
+out:
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
+ * @vsi: the VSI being changed
+ * @ena: boolean value indicating if this is a enable or disable request
+ */
+static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ /* do not allow modifying VLAN stripping when a port VLAN is configured
+ * on this VSI
+ */
+ if (vsi->info.port_based_inner_vlan)
+ return 0;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ /* Here we are configuring what the VSI should do with the VLAN tag in
+ * the Rx packet. We can either leave the tag in the packet or put it in
+ * the Rx descriptor.
+ */
+ if (ena)
+ /* Strip VLAN tag from Rx packet and put it in the desc */
+ ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
+ else
+ /* Disable stripping. Leave tag in packet */
+ ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
+
+ /* Allow all packets untagged/tagged */
+ ctxt->info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
+
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n",
+ ena, err, ice_aq_str(hw->adminq.sq_last_status));
+ goto out;
+ }
+
+ vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
+out:
+ kfree(ctxt);
+ return err;
+}
+
+int ice_vsi_ena_inner_stripping(struct ice_vsi *vsi, const u16 tpid)
+{
+ if (tpid != ETH_P_8021Q) {
+ print_invalid_tpid(vsi, tpid);
+ return -EINVAL;
+ }
+
+ return ice_vsi_manage_vlan_stripping(vsi, true);
+}
+
+int ice_vsi_dis_inner_stripping(struct ice_vsi *vsi)
+{
+ return ice_vsi_manage_vlan_stripping(vsi, false);
+}
+
+int ice_vsi_ena_inner_insertion(struct ice_vsi *vsi, const u16 tpid)
+{
+ if (tpid != ETH_P_8021Q) {
+ print_invalid_tpid(vsi, tpid);
+ return -EINVAL;
+ }
+
+ return ice_vsi_manage_vlan_insertion(vsi);
+}
+
+int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi)
+{
+ return ice_vsi_manage_vlan_insertion(vsi);
+}
+
+/**
+ * __ice_vsi_set_inner_port_vlan - set port VLAN VSI context settings to enable a port VLAN
+ * @vsi: the VSI to update
+ * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
+ */
+static int __ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, u16 pvid_info)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_aqc_vsi_props *info;
+ struct ice_vsi_ctx *ctxt;
+ int ret;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
+ info = &ctxt->info;
+ info->inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED |
+ ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
+ ICE_AQ_VSI_INNER_VLAN_EMODE_STR;
+ info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ info->port_based_inner_vlan = cpu_to_le16(pvid_info);
+ info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
+ goto out;
+ }
+
+ vsi->info.inner_vlan_flags = info->inner_vlan_flags;
+ vsi->info.sw_flags2 = info->sw_flags2;
+ vsi->info.port_based_inner_vlan = info->port_based_inner_vlan;
+out:
+ kfree(ctxt);
+ return ret;
+}
+
+int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ u16 port_vlan_info;
+
+ if (vlan->tpid != ETH_P_8021Q)
+ return -EINVAL;
+
+ if (vlan->prio > 7)
+ return -EINVAL;
+
+ port_vlan_info = vlan->vid | (vlan->prio << VLAN_PRIO_SHIFT);
+
+ return __ice_vsi_set_inner_port_vlan(vsi, port_vlan_info);
+}
+
+/**
+ * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
+ * @vsi: VSI to enable or disable VLAN pruning on
+ * @ena: set to true to enable VLAN pruning and false to disable it
+ *
+ * returns 0 if VSI is updated, negative otherwise
+ */
+static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
+{
+ struct ice_vsi_ctx *ctxt;
+ struct ice_pf *pf;
+ int status;
+
+ if (!vsi)
+ return -EINVAL;
+
+ /* Don't enable VLAN pruning if the netdev is currently in promiscuous
+ * mode. VLAN pruning will be enabled when the interface exits
+ * promiscuous mode if any VLAN filters are active.
+ */
+ if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena)
+ return 0;
+
+ pf = vsi->back;
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
+
+ if (ena)
+ ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ else
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
+
+ status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
+ if (status) {
+ netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n",
+ ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
+ ice_aq_str(pf->hw.adminq.sq_last_status));
+ goto err_out;
+ }
+
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+
+ kfree(ctxt);
+ return 0;
+
+err_out:
+ kfree(ctxt);
+ return status;
+}
+
+int ice_vsi_ena_rx_vlan_filtering(struct ice_vsi *vsi)
+{
+ return ice_cfg_vlan_pruning(vsi, true);
+}
+
+int ice_vsi_dis_rx_vlan_filtering(struct ice_vsi *vsi)
+{
+ return ice_cfg_vlan_pruning(vsi, false);
+}
+
+static int ice_cfg_vlan_antispoof(struct ice_vsi *vsi, bool enable)
+{
+ struct ice_vsi_ctx *ctx;
+ int err;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->info.sec_flags = vsi->info.sec_flags;
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+
+ if (enable)
+ ctx->info.sec_flags |= ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
+ else
+ ctx->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+
+ err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx VLAN anti-spoof %s for VSI %d, error %d\n",
+ enable ? "ON" : "OFF", vsi->vsi_num, err);
+ else
+ vsi->info.sec_flags = ctx->info.sec_flags;
+
+ kfree(ctx);
+
+ return err;
+}
+
+int ice_vsi_ena_tx_vlan_filtering(struct ice_vsi *vsi)
+{
+ return ice_cfg_vlan_antispoof(vsi, true);
+}
+
+int ice_vsi_dis_tx_vlan_filtering(struct ice_vsi *vsi)
+{
+ return ice_cfg_vlan_antispoof(vsi, false);
+}
+
+/**
+ * tpid_to_vsi_outer_vlan_type - convert from TPID to VSI context based tag_type
+ * @tpid: tpid used to translate into VSI context based tag_type
+ * @tag_type: output variable to hold the VSI context based tag type
+ */
+static int tpid_to_vsi_outer_vlan_type(u16 tpid, u8 *tag_type)
+{
+ switch (tpid) {
+ case ETH_P_8021Q:
+ *tag_type = ICE_AQ_VSI_OUTER_TAG_VLAN_8100;
+ break;
+ case ETH_P_8021AD:
+ *tag_type = ICE_AQ_VSI_OUTER_TAG_STAG;
+ break;
+ case ETH_P_QINQ1:
+ *tag_type = ICE_AQ_VSI_OUTER_TAG_VLAN_9100;
+ break;
+ default:
+ *tag_type = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vsi_ena_outer_stripping - enable outer VLAN stripping
+ * @vsi: VSI to configure
+ * @tpid: TPID to enable outer VLAN stripping for
+ *
+ * Enable outer VLAN stripping via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * Since the VSI context only supports a single TPID for insertion and
+ * stripping, setting the TPID for stripping will affect the TPID for insertion.
+ * Callers need to be aware of this limitation.
+ *
+ * Only modify outer VLAN stripping settings and the VLAN TPID. Outer VLAN
+ * insertion settings are unmodified.
+ *
+ * This enables hardware to strip a VLAN tag with the specified TPID to be
+ * stripped from the packet and placed in the receive descriptor.
+ */
+int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ u8 tag_type;
+ int err;
+
+ /* do not allow modifying VLAN stripping when a port VLAN is configured
+ * on this VSI
+ */
+ if (vsi->info.port_based_outer_vlan)
+ return 0;
+
+ if (tpid_to_vsi_outer_vlan_type(tpid, &tag_type))
+ return -EINVAL;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ /* clear current outer VLAN strip settings */
+ ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
+ ~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M);
+ ctxt->info.outer_vlan_flags |=
+ ((ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH <<
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
+ ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M));
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN stripping failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ else
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_dis_outer_stripping - disable outer VLAN stripping
+ * @vsi: VSI to configure
+ *
+ * Disable outer VLAN stripping via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * Only modify the outer VLAN stripping settings. The VLAN TPID and outer VLAN
+ * insertion settings are unmodified.
+ *
+ * This tells the hardware to not strip any VLAN tagged packets, thus leaving
+ * them in the packet. This enables software offloaded VLAN stripping and
+ * disables hardware offloaded VLAN stripping.
+ */
+int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ if (vsi->info.port_based_outer_vlan)
+ return 0;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ /* clear current outer VLAN strip settings */
+ ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
+ ~ICE_AQ_VSI_OUTER_VLAN_EMODE_M;
+ ctxt->info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING <<
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_S;
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN stripping failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ else
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_ena_outer_insertion - enable outer VLAN insertion
+ * @vsi: VSI to configure
+ * @tpid: TPID to enable outer VLAN insertion for
+ *
+ * Enable outer VLAN insertion via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * Since the VSI context only supports a single TPID for insertion and
+ * stripping, setting the TPID for insertion will affect the TPID for stripping.
+ * Callers need to be aware of this limitation.
+ *
+ * Only modify outer VLAN insertion settings and the VLAN TPID. Outer VLAN
+ * stripping settings are unmodified.
+ *
+ * This allows a VLAN tag with the specified TPID to be inserted in the transmit
+ * descriptor.
+ */
+int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ u8 tag_type;
+ int err;
+
+ if (vsi->info.port_based_outer_vlan)
+ return 0;
+
+ if (tpid_to_vsi_outer_vlan_type(tpid, &tag_type))
+ return -EINVAL;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ /* clear current outer VLAN insertion settings */
+ ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
+ ~(ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT |
+ ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M |
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M);
+ ctxt->info.outer_vlan_flags |=
+ ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M) |
+ ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN insertion failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ else
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_dis_outer_insertion - disable outer VLAN insertion
+ * @vsi: VSI to configure
+ *
+ * Disable outer VLAN insertion via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * Only modify the outer VLAN insertion settings. The VLAN TPID and outer VLAN
+ * settings are unmodified.
+ *
+ * This tells the hardware to not allow any VLAN tagged packets in the transmit
+ * descriptor. This enables software offloaded VLAN insertion and disables
+ * hardware offloaded VLAN insertion.
+ */
+int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ if (vsi->info.port_based_outer_vlan)
+ return 0;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ /* clear current outer VLAN insertion settings */
+ ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
+ ~(ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT |
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M);
+ ctxt->info.outer_vlan_flags |=
+ ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
+ ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN insertion failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ else
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * __ice_vsi_set_outer_port_vlan - set the outer port VLAN and related settings
+ * @vsi: VSI to configure
+ * @vlan_info: packed u16 that contains the VLAN prio and ID
+ * @tpid: TPID of the port VLAN
+ *
+ * Set the port VLAN prio, ID, and TPID.
+ *
+ * Enable VLAN pruning so the VSI doesn't receive any traffic that doesn't match
+ * a VLAN prune rule. The caller should take care to add a VLAN prune rule that
+ * matches the port VLAN ID and TPID.
+ *
+ * Tell hardware to strip outer VLAN tagged packets on receive and don't put
+ * them in the receive descriptor. VSI(s) in port VLANs should not be aware of
+ * the port VLAN ID or TPID they are assigned to.
+ *
+ * Tell hardware to prevent outer VLAN tag insertion on transmit and only allow
+ * untagged outer packets from the transmit descriptor.
+ *
+ * Also, tell the hardware to insert the port VLAN on transmit.
+ */
+static int
+__ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ u8 tag_type;
+ int err;
+
+ if (tpid_to_vsi_outer_vlan_type(tpid, &tag_type))
+ return -EINVAL;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
+
+ ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ ctxt->info.port_based_outer_vlan = cpu_to_le16(vlan_info);
+ ctxt->info.outer_vlan_flags =
+ (ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW <<
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
+ ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M) |
+ ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
+ (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTUNTAGGED <<
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) |
+ ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for setting outer port based VLAN failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ } else {
+ vsi->info.port_based_outer_vlan = ctxt->info.port_based_outer_vlan;
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+ }
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_set_outer_port_vlan - public version of __ice_vsi_set_outer_port_vlan
+ * @vsi: VSI to configure
+ * @vlan: ice_vlan structure used to set the port VLAN
+ *
+ * Set the outer port VLAN via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * This function does not support clearing the port VLAN as there is currently
+ * no use case for this.
+ *
+ * Use the ice_vlan structure passed in to set this VSI in a port VLAN.
+ */
+int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ u16 port_vlan_info;
+
+ if (vlan->prio > (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT))
+ return -EINVAL;
+
+ port_vlan_info = vlan->vid | (vlan->prio << VLAN_PRIO_SHIFT);
+
+ return __ice_vsi_set_outer_port_vlan(vsi, port_vlan_info, vlan->tpid);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
new file mode 100644
index 000000000000..f459909490ec
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VSI_VLAN_LIB_H_
+#define _ICE_VSI_VLAN_LIB_H_
+
+#include <linux/types.h>
+#include "ice_vlan.h"
+
+struct ice_vsi;
+
+int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+int ice_vsi_del_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+
+int ice_vsi_ena_inner_stripping(struct ice_vsi *vsi, u16 tpid);
+int ice_vsi_dis_inner_stripping(struct ice_vsi *vsi);
+int ice_vsi_ena_inner_insertion(struct ice_vsi *vsi, u16 tpid);
+int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi);
+int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+
+int ice_vsi_ena_rx_vlan_filtering(struct ice_vsi *vsi);
+int ice_vsi_dis_rx_vlan_filtering(struct ice_vsi *vsi);
+int ice_vsi_ena_tx_vlan_filtering(struct ice_vsi *vsi);
+int ice_vsi_dis_tx_vlan_filtering(struct ice_vsi *vsi);
+
+int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid);
+int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi);
+int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid);
+int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi);
+int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+
+#endif /* _ICE_VSI_VLAN_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
new file mode 100644
index 000000000000..4a6c850d83ac
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_pf_vsi_vlan_ops.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_lib.h"
+#include "ice.h"
+
+static int
+op_unsupported_vlan_arg(struct ice_vsi * __always_unused vsi,
+ struct ice_vlan * __always_unused vlan)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+op_unsupported_tpid_arg(struct ice_vsi *__always_unused vsi,
+ u16 __always_unused tpid)
+{
+ return -EOPNOTSUPP;
+}
+
+static int op_unsupported(struct ice_vsi *__always_unused vsi)
+{
+ return -EOPNOTSUPP;
+}
+
+/* If any new ops are added to the VSI VLAN ops interface then an unsupported
+ * implementation should be set here.
+ */
+static struct ice_vsi_vlan_ops ops_unsupported = {
+ .add_vlan = op_unsupported_vlan_arg,
+ .del_vlan = op_unsupported_vlan_arg,
+ .ena_stripping = op_unsupported_tpid_arg,
+ .dis_stripping = op_unsupported,
+ .ena_insertion = op_unsupported_tpid_arg,
+ .dis_insertion = op_unsupported,
+ .ena_rx_filtering = op_unsupported,
+ .dis_rx_filtering = op_unsupported,
+ .ena_tx_filtering = op_unsupported,
+ .dis_tx_filtering = op_unsupported,
+ .set_port_vlan = op_unsupported_vlan_arg,
+};
+
+/**
+ * ice_vsi_init_unsupported_vlan_ops - init all VSI VLAN ops to unsupported
+ * @vsi: VSI to initialize VSI VLAN ops to unsupported for
+ *
+ * By default all inner and outer VSI VLAN ops return -EOPNOTSUPP. This was done
+ * as oppsed to leaving the ops null to prevent unexpected crashes. Instead if
+ * an unsupported VSI VLAN op is called it will just return -EOPNOTSUPP.
+ *
+ */
+static void ice_vsi_init_unsupported_vlan_ops(struct ice_vsi *vsi)
+{
+ vsi->outer_vlan_ops = ops_unsupported;
+ vsi->inner_vlan_ops = ops_unsupported;
+}
+
+/**
+ * ice_vsi_init_vlan_ops - initialize type specific VSI VLAN ops
+ * @vsi: VSI to initialize ops for
+ *
+ * If any VSI types are added and/or require different ops than the PF or VF VSI
+ * then they will have to add a case here to handle that. Also, VSI type
+ * specific files should be added in the same manner that was done for PF VSI.
+ */
+void ice_vsi_init_vlan_ops(struct ice_vsi *vsi)
+{
+ /* Initialize all VSI types to have unsupported VSI VLAN ops */
+ ice_vsi_init_unsupported_vlan_ops(vsi);
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ case ICE_VSI_SWITCHDEV_CTRL:
+ ice_pf_vsi_init_vlan_ops(vsi);
+ break;
+ case ICE_VSI_VF:
+ ice_vf_vsi_init_vlan_ops(vsi);
+ break;
+ default:
+ dev_dbg(ice_pf_to_dev(vsi->back), "%s does not support VLAN operations\n",
+ ice_vsi_type_str(vsi->type));
+ break;
+ }
+}
+
+/**
+ * ice_get_compat_vsi_vlan_ops - Get VSI VLAN ops based on VLAN mode
+ * @vsi: VSI used to get the VSI VLAN ops
+ *
+ * This function is meant to be used when the caller doesn't know which VLAN ops
+ * to use (i.e. inner or outer). This allows backward compatibility for VLANs
+ * since most of the Outer VSI VLAN functins are not supported when
+ * the device is configured in Single VLAN Mode (SVM).
+ */
+struct ice_vsi_vlan_ops *ice_get_compat_vsi_vlan_ops(struct ice_vsi *vsi)
+{
+ if (ice_is_dvm_ena(&vsi->back->hw))
+ return &vsi->outer_vlan_ops;
+ else
+ return &vsi->inner_vlan_ops;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
new file mode 100644
index 000000000000..5b47568f6256
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VSI_VLAN_OPS_H_
+#define _ICE_VSI_VLAN_OPS_H_
+
+#include "ice_type.h"
+#include "ice_vsi_vlan_lib.h"
+
+struct ice_vsi;
+
+struct ice_vsi_vlan_ops {
+ int (*add_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan);
+ int (*del_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan);
+ int (*ena_stripping)(struct ice_vsi *vsi, const u16 tpid);
+ int (*dis_stripping)(struct ice_vsi *vsi);
+ int (*ena_insertion)(struct ice_vsi *vsi, const u16 tpid);
+ int (*dis_insertion)(struct ice_vsi *vsi);
+ int (*ena_rx_filtering)(struct ice_vsi *vsi);
+ int (*dis_rx_filtering)(struct ice_vsi *vsi);
+ int (*ena_tx_filtering)(struct ice_vsi *vsi);
+ int (*dis_tx_filtering)(struct ice_vsi *vsi);
+ int (*set_port_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan);
+};
+
+void ice_vsi_init_vlan_ops(struct ice_vsi *vsi);
+struct ice_vsi_vlan_ops *ice_get_compat_vsi_vlan_ops(struct ice_vsi *vsi);
+
+#endif /* _ICE_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 2388837d6d6c..88853a6ed931 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -327,6 +327,13 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
bool if_running, pool_present = !!pool;
int ret = 0, pool_failure = 0;
+ if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
+ !is_power_of_2(vsi->tx_rings[qid]->count)) {
+ netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
+ pool_failure = -EINVAL;
+ goto failure;
+ }
+
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
if (if_running) {
@@ -349,6 +356,7 @@ xsk_pool_if_up:
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
+failure:
if (pool_failure) {
netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
pool_present ? "en" : "dis", pool_failure);
@@ -359,33 +367,28 @@ xsk_pool_if_up:
}
/**
- * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
- * @rx_ring: Rx ring
+ * ice_fill_rx_descs - pick buffers from XSK buffer pool and use it
+ * @pool: XSK Buffer pool to pull the buffers from
+ * @xdp: SW ring of xdp_buff that will hold the buffers
+ * @rx_desc: Pointer to Rx descriptors that will be filled
* @count: The number of buffers to allocate
*
* This function allocates a number of Rx buffers from the fill ring
* or the internal recycle mechanism and places them on the Rx ring.
*
- * Returns true if all allocations were successful, false if any fail.
+ * Note that ring wrap should be handled by caller of this function.
+ *
+ * Returns the amount of allocated Rx descriptors
*/
-bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
+ union ice_32b_rx_flex_desc *rx_desc, u16 count)
{
- union ice_32b_rx_flex_desc *rx_desc;
- u16 ntu = rx_ring->next_to_use;
- struct xdp_buff **xdp;
- u32 nb_buffs, i;
dma_addr_t dma;
+ u16 buffs;
+ int i;
- rx_desc = ICE_RX_DESC(rx_ring, ntu);
- xdp = ice_xdp_buf(rx_ring, ntu);
-
- nb_buffs = min_t(u16, count, rx_ring->count - ntu);
- nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
- if (!nb_buffs)
- return false;
-
- i = nb_buffs;
- while (i--) {
+ buffs = xsk_buff_alloc_batch(pool, xdp, count);
+ for (i = 0; i < buffs; i++) {
dma = xsk_buff_xdp_get_dma(*xdp);
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->wb.status_error0 = 0;
@@ -394,13 +397,77 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
xdp++;
}
+ return buffs;
+}
+
+/**
+ * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
+ * @rx_ring: Rx ring
+ * @count: The number of buffers to allocate
+ *
+ * Place the @count of descriptors onto Rx ring. Handle the ring wrap
+ * for case where space from next_to_use up to the end of ring is less
+ * than @count. Finally do a tail bump.
+ *
+ * Returns true if all allocations were successful, false if any fail.
+ */
+static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+{
+ union ice_32b_rx_flex_desc *rx_desc;
+ u32 nb_buffs_extra = 0, nb_buffs;
+ u16 ntu = rx_ring->next_to_use;
+ u16 total_count = count;
+ struct xdp_buff **xdp;
+
+ rx_desc = ICE_RX_DESC(rx_ring, ntu);
+ xdp = ice_xdp_buf(rx_ring, ntu);
+
+ if (ntu + count >= rx_ring->count) {
+ nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
+ rx_desc,
+ rx_ring->count - ntu);
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ xdp = ice_xdp_buf(rx_ring, 0);
+ ntu = 0;
+ count -= nb_buffs_extra;
+ ice_release_rx_desc(rx_ring, 0);
+ }
+
+ nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
+
ntu += nb_buffs;
if (ntu == rx_ring->count)
ntu = 0;
- ice_release_rx_desc(rx_ring, ntu);
+ if (rx_ring->next_to_use != ntu)
+ ice_release_rx_desc(rx_ring, ntu);
+
+ return total_count == (nb_buffs_extra + nb_buffs);
+}
+
+/**
+ * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
+ * @rx_ring: Rx ring
+ * @count: The number of buffers to allocate
+ *
+ * Wrapper for internal allocation routine; figure out how many tail
+ * bumps should take place based on the given threshold
+ *
+ * Returns true if all calls to internal alloc routine succeeded
+ */
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+{
+ u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
+ u16 batched, leftover, i, tail_bumps;
+
+ batched = ALIGN_DOWN(count, rx_thresh);
+ tail_bumps = batched / rx_thresh;
+ leftover = count & (rx_thresh - 1);
- return count == nb_buffs;
+ for (i = 0; i < tail_bumps; i++)
+ if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
+ return false;
+ return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
}
/**
@@ -428,20 +495,24 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
static struct sk_buff *
ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
- unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
struct sk_buff *skb;
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
+ net_prefetch(xdp->data_meta);
+
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), xdp->data, datasize);
- if (metasize)
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
xsk_buff_free(xdp);
return skb;
@@ -528,7 +599,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
- if (!ice_test_staterr(rx_desc, stat_err_bits))
+ if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
break;
/* This memory barrier is needed to keep us from reading
@@ -583,9 +654,7 @@ construct_skb:
total_rx_bytes += skb->len;
total_rx_packets++;
- stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
- if (ice_test_staterr(rx_desc, stat_err_bits))
- vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
+ vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
ICE_RX_FLEX_DESC_PTYPE_M;
@@ -612,134 +681,221 @@ construct_skb:
}
/**
- * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
+ * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
* @xdp_ring: XDP Tx ring
- * @budget: max number of frames to xmit
+ * @tx_buf: Tx buffer to clean
+ */
+static void
+ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
+{
+ xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
+ xdp_ring->xdp_tx_active--;
+ dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+}
+
+/**
+ * ice_clean_xdp_irq_zc - Reclaim resources after transmit completes on XDP ring
+ * @xdp_ring: XDP ring to clean
+ * @napi_budget: amount of descriptors that NAPI allows us to clean
*
- * Returns true if cleanup/transmission is done.
+ * Returns count of cleaned descriptors
*/
-static bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, int budget)
+static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget)
{
- struct ice_tx_desc *tx_desc = NULL;
- bool work_done = true;
- struct xdp_desc desc;
- dma_addr_t dma;
+ u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
+ int budget = napi_budget / tx_thresh;
+ u16 next_dd = xdp_ring->next_dd;
+ u16 ntc, cleared_dds = 0;
- while (likely(budget-- > 0)) {
+ do {
+ struct ice_tx_desc *next_dd_desc;
+ u16 desc_cnt = xdp_ring->count;
struct ice_tx_buf *tx_buf;
+ u32 xsk_frames;
+ u16 i;
- if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) {
- xdp_ring->tx_stats.tx_busy++;
- work_done = false;
+ next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
+ if (!(next_dd_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
break;
- }
- tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
-
- if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
- break;
-
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
- desc.len);
-
- tx_buf->bytecount = desc.len;
+ cleared_dds++;
+ xsk_frames = 0;
+ if (likely(!xdp_ring->xdp_tx_active)) {
+ xsk_frames = tx_thresh;
+ goto skip;
+ }
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
- tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz =
- ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0);
+ ntc = xdp_ring->next_to_clean;
- xdp_ring->next_to_use++;
- if (xdp_ring->next_to_use == xdp_ring->count)
- xdp_ring->next_to_use = 0;
- }
+ for (i = 0; i < tx_thresh; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
- if (tx_desc) {
- ice_xdp_ring_update_tail(xdp_ring);
- xsk_tx_release(xdp_ring->xsk_pool);
- }
+ if (tx_buf->raw_buf) {
+ ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ tx_buf->raw_buf = NULL;
+ } else {
+ xsk_frames++;
+ }
- return budget > 0 && work_done;
+ ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
+skip:
+ xdp_ring->next_to_clean += tx_thresh;
+ if (xdp_ring->next_to_clean >= desc_cnt)
+ xdp_ring->next_to_clean -= desc_cnt;
+ if (xsk_frames)
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+ next_dd_desc->cmd_type_offset_bsz = 0;
+ next_dd = next_dd + tx_thresh;
+ if (next_dd >= desc_cnt)
+ next_dd = tx_thresh - 1;
+ } while (budget--);
+
+ xdp_ring->next_dd = next_dd;
+
+ return cleared_dds * tx_thresh;
}
/**
- * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
- * @xdp_ring: XDP Tx ring
- * @tx_buf: Tx buffer to clean
+ * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
+ * @xdp_ring: XDP ring to produce the HW Tx descriptor on
+ * @desc: AF_XDP descriptor to pull the DMA address and length from
+ * @total_bytes: bytes accumulator that will be used for stats update
*/
-static void
-ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
+static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
+ unsigned int *total_bytes)
{
- xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
- dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
+ struct ice_tx_desc *tx_desc;
+ dma_addr_t dma;
+
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
+
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ 0, desc->len, 0);
+
+ *total_bytes += desc->len;
}
/**
- * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
- * @xdp_ring: XDP Tx ring
- * @budget: NAPI budget
- *
- * Returns true if cleanup/tranmission is done.
+ * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
+ * @total_bytes: bytes accumulator that will be used for stats update
*/
-bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget)
+static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
+ unsigned int *total_bytes)
{
- int total_packets = 0, total_bytes = 0;
- s16 ntc = xdp_ring->next_to_clean;
+ u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
+ u16 ntu = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
- struct ice_tx_buf *tx_buf;
- u32 xsk_frames = 0;
- bool xmit_done;
+ u32 i;
- tx_desc = ICE_TX_DESC(xdp_ring, ntc);
- tx_buf = &xdp_ring->tx_buf[ntc];
- ntc -= xdp_ring->count;
+ loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ dma_addr_t dma;
- do {
- if (!(tx_desc->cmd_type_offset_bsz &
- cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
- break;
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
- total_bytes += tx_buf->bytecount;
- total_packets++;
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ 0, descs[i].len, 0);
- if (tx_buf->raw_buf) {
- ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
- tx_buf->raw_buf = NULL;
- } else {
- xsk_frames++;
- }
+ *total_bytes += descs[i].len;
+ }
- tx_desc->cmd_type_offset_bsz = 0;
- tx_buf++;
- tx_desc++;
- ntc++;
+ xdp_ring->next_to_use = ntu;
- if (unlikely(!ntc)) {
- ntc -= xdp_ring->count;
- tx_buf = xdp_ring->tx_buf;
- tx_desc = ICE_TX_DESC(xdp_ring, 0);
- }
+ if (xdp_ring->next_to_use > xdp_ring->next_rs) {
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+ xdp_ring->next_rs += tx_thresh;
+ }
+}
- prefetch(tx_desc);
+/**
+ * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
+ * @nb_pkts: count of packets to be send
+ * @total_bytes: bytes accumulator that will be used for stats update
+ */
+static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
+ u32 nb_pkts, unsigned int *total_bytes)
+{
+ u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
+ u32 batched, leftover, i;
+
+ batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
+ leftover = nb_pkts & (PKTS_PER_BATCH - 1);
+ for (i = 0; i < batched; i += PKTS_PER_BATCH)
+ ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
+ for (; i < batched + leftover; i++)
+ ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
+
+ if (xdp_ring->next_to_use > xdp_ring->next_rs) {
+ struct ice_tx_desc *tx_desc;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+ xdp_ring->next_rs += tx_thresh;
+ }
+}
- } while (likely(--budget));
+/**
+ * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @budget: number of free descriptors on HW Tx ring that can be used
+ * @napi_budget: amount of descriptors that NAPI allows us to clean
+ *
+ * Returns true if there is no more work that needs to be done, false otherwise
+ */
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget)
+{
+ struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
+ u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
+ u32 nb_pkts, nb_processed = 0;
+ unsigned int total_bytes = 0;
+
+ if (budget < tx_thresh)
+ budget += ice_clean_xdp_irq_zc(xdp_ring, napi_budget);
+
+ nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
+ if (!nb_pkts)
+ return true;
+
+ if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
+ struct ice_tx_desc *tx_desc;
+
+ nb_processed = xdp_ring->count - xdp_ring->next_to_use;
+ ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+ xdp_ring->next_rs = tx_thresh - 1;
+ xdp_ring->next_to_use = 0;
+ }
- ntc += xdp_ring->count;
- xdp_ring->next_to_clean = ntc;
+ ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
+ &total_bytes);
- if (xsk_frames)
- xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+ ice_xdp_ring_update_tail(xdp_ring);
+ ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
- ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
- xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
-
- return budget > 0 && xmit_done;
+ return nb_pkts < budget;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 4c7bd8e9dfc4..21faec8e97db 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -4,7 +4,16 @@
#ifndef _ICE_XSK_H_
#define _ICE_XSK_H_
#include "ice_txrx.h"
-#include "ice.h"
+
+#define PKTS_PER_BATCH 8
+
+#ifdef __clang__
+#define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for
+#elif __GNUC__ >= 8
+#define loop_unrolled_for _Pragma("GCC unroll 8") for
+#else
+#define loop_unrolled_for for
+#endif
struct ice_vsi;
@@ -12,13 +21,21 @@ struct ice_vsi;
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
-bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget);
#else
+static inline bool
+ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
+ u32 __always_unused budget,
+ int __always_unused napi_budget)
+{
+ return false;
+}
+
static inline int
ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
struct xsk_buff_pool __always_unused *pool,
@@ -35,13 +52,6 @@ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
}
static inline bool
-ice_clean_tx_irq_zc(struct ice_tx_ring __always_unused *xdp_ring,
- int __always_unused budget)
-{
- return false;
-}
-
-static inline bool
ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
u16 __always_unused count)
{
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 51a2dcaf553d..2a5782063f4c 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -965,10 +965,6 @@ static int igb_set_ringparam(struct net_device *netdev,
memcpy(&temp_ring[i], adapter->rx_ring[i],
sizeof(struct igb_ring));
- /* Clear copied XDP RX-queue info */
- memset(&temp_ring[i].xdp_rxq, 0,
- sizeof(temp_ring[i].xdp_rxq));
-
temp_ring[i].count = new_rx_count;
err = igb_setup_rx_resources(&temp_ring[i]);
if (err) {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 38ba92022cd4..34b33b21e0dc 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3164,8 +3164,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
s32 ret_val;
static int global_quad_port_a; /* global quad port a indication */
const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
- int err, pci_using_dac;
u8 part_str[E1000_PBANUM_LENGTH];
+ int err;
/* Catch broken hardware that put the wrong VF device ID in
* the PCIe SR-IOV capability.
@@ -3180,17 +3180,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, igb_driver_name);
@@ -3306,8 +3300,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->mac.type >= e1000_i350)
netdev->hw_features |= NETIF_F_NTUPLE;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
@@ -4352,7 +4345,18 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
struct igb_adapter *adapter = netdev_priv(rx_ring->netdev);
struct device *dev = rx_ring->dev;
- int size;
+ int size, res;
+
+ /* XDP RX-queue info */
+ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+ rx_ring->queue_index, 0);
+ if (res < 0) {
+ dev_err(dev, "Failed to register xdp_rxq index %u\n",
+ rx_ring->queue_index);
+ return res;
+ }
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
@@ -4375,14 +4379,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
rx_ring->xdp_prog = adapter->xdp_prog;
- /* XDP RX-queue info */
- if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->queue_index, 0) < 0)
- goto err;
-
return 0;
err:
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 6580fcddb4be..02fec948ce64 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -165,23 +165,21 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
unsigned long flags;
u64 ns;
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+
switch (adapter->hw.mac.type) {
case e1000_82576:
case e1000_82580:
case e1000_i354:
case e1000_i350:
spin_lock_irqsave(&adapter->tmreg_lock, flags);
-
ns = timecounter_cyc2time(&adapter->tc, systim);
-
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
- memset(hwtstamps, 0, sizeof(*hwtstamps));
hwtstamps->hwtstamp = ns_to_ktime(ns);
break;
case e1000_i210:
case e1000_i211:
- memset(hwtstamps, 0, sizeof(*hwtstamps));
/* Upper 32 bits contain s, lower 32 bits contain ns. */
hwtstamps->hwtstamp = ktime_set(systim >> 32,
systim & 0xFFFFFFFF);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index b78407289741..43ced78c3a2e 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2684,25 +2684,18 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct igbvf_adapter *adapter;
struct e1000_hw *hw;
const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
-
static int cards_found;
- int err, pci_using_dac;
+ int err;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_regions(pdev, igbvf_driver_name);
@@ -2783,10 +2776,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_GSO_PARTIAL |
IGBVF_GSO_PARTIAL_FEATURES;
- netdev->features = netdev->hw_features;
-
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 2f17f36e94fd..74b2c590ed5d 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -505,6 +505,9 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
u8 index = rx_ring->queue_index;
int size, desc_len, res;
+ /* XDP RX-queue info */
+ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
rx_ring->q_vector->napi.napi_id);
if (res < 0) {
@@ -2446,19 +2449,20 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
struct xdp_buff *xdp)
{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
- unsigned int totalsize = metasize + datasize;
struct sk_buff *skb;
- skb = __napi_alloc_skb(&ring->q_vector->napi,
- xdp->data_end - xdp->data_hard_start,
+ net_prefetch(xdp->data_meta);
+
+ skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
- memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize);
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
if (metasize) {
skb_metadata_set(skb, metasize);
__skb_pull(skb, metasize);
@@ -6251,23 +6255,17 @@ static int igc_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct igc_hw *hw;
const struct igc_info *ei = igc_info_tbl[ent->driver_data];
- int err, pci_using_dac;
+ int err;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, igc_driver_name);
@@ -6367,8 +6365,7 @@ static int igc_probe(struct pci_dev *pdev,
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= netdev->features;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 5cad31c3c7b0..40dbf4b43234 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -746,8 +746,6 @@ s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
if (ret_val)
return ret_val;
ret_val = igc_write_phy_reg_mdic(hw, offset, data);
- if (ret_val)
- return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr,
@@ -779,8 +777,6 @@ s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
if (ret_val)
return ret_val;
ret_val = igc_read_phy_reg_mdic(hw, offset, data);
- if (ret_val)
- return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr,
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 99d481904ce6..affdefcca7e3 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -361,7 +361,6 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev = NULL;
struct ixgb_adapter *adapter;
static int cards_found = 0;
- int pci_using_dac;
u8 addr[ETH_ALEN];
int i;
int err;
@@ -370,16 +369,10 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- pr_err("No usable DMA configuration, aborting\n");
- goto err_dma_mask;
- }
+ if (err) {
+ pr_err("No usable DMA configuration, aborting\n");
+ goto err_dma_mask;
}
err = pci_request_regions(pdev, ixgb_driver_name);
@@ -444,10 +437,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_RXCSUM;
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
/* MTU range: 68 - 16114 */
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 4a69823e6abd..921a4d977d65 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -177,11 +177,14 @@ struct vf_data_storage {
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
u16 tx_rate;
+ int link_enable;
+ int link_state;
u8 spoofchk_enabled;
bool rss_query_enabled;
u8 trusted;
int xcast_mode;
unsigned int vf_api;
+ u8 primary_abort_count;
};
enum ixgbevf_xcast_modes {
@@ -556,6 +559,8 @@ struct ixgbe_mac_addr {
#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
#define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */
+#define IXGBE_PRIMARY_ABORT_LIMIT 5
+
/* board specific private data structure */
struct ixgbe_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -614,6 +619,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RX_LEGACY BIT(16)
#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
#define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18)
+#define IXGBE_FLAG2_AUTO_DISABLE_VF BIT(19)
/* Tx fast path data */
int num_tx_queues;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index e90b5047e695..4c26c4b92f07 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -30,7 +30,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset);
-static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw);
/* Base table for registers values that change by MAC */
const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = {
@@ -746,10 +746,10 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
usleep_range(1000, 2000);
/*
- * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+ * Prevent the PCI-E bus from hanging by disabling PCI-E primary
* access and verify no pending requests
*/
- return ixgbe_disable_pcie_master(hw);
+ return ixgbe_disable_pcie_primary(hw);
}
/**
@@ -2506,15 +2506,15 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
}
/**
- * ixgbe_disable_pcie_master - Disable PCI-express master access
+ * ixgbe_disable_pcie_primary - Disable PCI-express primary access
* @hw: pointer to hardware structure
*
- * Disables PCI-Express master access and verifies there are no pending
- * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
- * bit hasn't caused the master requests to be disabled, else 0
- * is returned signifying master requests disabled.
+ * Disables PCI-Express primary access and verifies there are no pending
+ * requests. IXGBE_ERR_PRIMARY_REQUESTS_PENDING is returned if primary disable
+ * bit hasn't caused the primary requests to be disabled, else 0
+ * is returned signifying primary requests disabled.
**/
-static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
{
u32 i, poll;
u16 value;
@@ -2523,23 +2523,23 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
/* Poll for bit to read as set */
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) {
if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS)
break;
usleep_range(100, 120);
}
- if (i >= IXGBE_PCI_MASTER_DISABLE_TIMEOUT) {
+ if (i >= IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT) {
hw_dbg(hw, "GIO disable did not set - requesting resets\n");
goto gio_disable_fail;
}
- /* Exit if master requests are blocked */
+ /* Exit if primary requests are blocked */
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
ixgbe_removed(hw->hw_addr))
return 0;
- /* Poll for master request bit to clear */
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ /* Poll for primary request bit to clear */
+ for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) {
udelay(100);
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
return 0;
@@ -2547,13 +2547,13 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
/*
* Two consecutive resets are required via CTRL.RST per datasheet
- * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
- * of this need. The first reset prevents new master requests from
+ * 5.2.5.3.2 Primary Disable. We set a flag to inform the reset routine
+ * of this need. The first reset prevents new primary requests from
* being issued by our device. We then must wait 1usec or more for any
* remaining completions from the PCIe bus to trickle in, and then reset
* again to clear out any effects they may have had on our device.
*/
- hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
+ hw_dbg(hw, "GIO Primary Disable bit didn't clear - requesting resets\n");
gio_disable_fail:
hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
@@ -2575,7 +2575,7 @@ gio_disable_fail:
}
hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
- return IXGBE_ERR_MASTER_REQUESTS_PENDING;
+ return IXGBE_ERR_PRIMARY_REQUESTS_PENDING;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index f70967c32116..628d0eb0599f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -138,6 +138,8 @@ static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
"legacy-rx",
#define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1)
"vf-ipsec",
+#define IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF BIT(2)
+ "mdd-disable-vf",
};
#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
@@ -3510,6 +3512,9 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev)
if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)
priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN;
+ if (adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF)
+ priv_flags |= IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF;
+
return priv_flags;
}
@@ -3517,6 +3522,7 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
unsigned int flags2 = adapter->flags2;
+ unsigned int i;
flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
@@ -3526,6 +3532,21 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN)
flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED;
+ flags2 &= ~IXGBE_FLAG2_AUTO_DISABLE_VF;
+ if (priv_flags & IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF) {
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ /* Reset primary abort counter */
+ for (i = 0; i < adapter->num_vfs; i++)
+ adapter->vfinfo[i].primary_abort_count = 0;
+
+ flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
+ } else {
+ e_info(probe,
+ "Cannot set private flags: Operation not supported\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
if (flags2 != adapter->flags2) {
adapter->flags2 = flags2;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 89b467006291..c4a4954aa317 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5687,6 +5687,9 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+ /* update setting rx tx for all active vfs */
+ ixgbe_set_all_vfs(adapter);
}
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
@@ -5948,8 +5951,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
case IXGBE_ERR_SFP_NOT_PRESENT:
case IXGBE_ERR_SFP_NOT_SUPPORTED:
break;
- case IXGBE_ERR_MASTER_REQUESTS_PENDING:
- e_dev_err("master disable timed out\n");
+ case IXGBE_ERR_PRIMARY_REQUESTS_PENDING:
+ e_dev_err("primary disable timed out\n");
break;
case IXGBE_ERR_EEPROM_VERSION:
/* We are running on a pre-production device, log a warning */
@@ -6144,11 +6147,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
for (i = 0 ; i < adapter->num_vfs; i++)
adapter->vfinfo[i].clear_to_send = false;
- /* ping all the active vfs to let them know we are going down */
- ixgbe_ping_all_vfs(adapter);
-
- /* Disable all VFTE/VFRE TX/RX */
- ixgbe_disable_tx_rx(adapter);
+ /* update setting rx tx for all active vfs */
+ ixgbe_set_all_vfs(adapter);
}
/* disable transmits in the hardware now that interrupts are off */
@@ -7613,6 +7613,27 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
}
#ifdef CONFIG_PCI_IOV
+static void ixgbe_bad_vf_abort(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
+ adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) {
+ adapter->vfinfo[vf].primary_abort_count++;
+ if (adapter->vfinfo[vf].primary_abort_count ==
+ IXGBE_PRIMARY_ABORT_LIMIT) {
+ ixgbe_set_vf_link_state(adapter, vf,
+ IFLA_VF_LINK_STATE_DISABLE);
+ adapter->vfinfo[vf].primary_abort_count = 0;
+
+ e_info(drv,
+ "Malicious Driver Detection event detected on PF %d VF %d MAC: %pM mdd-disable-vf=on",
+ hw->bus.func, vf,
+ adapter->vfinfo[vf].vf_mac_addresses);
+ }
+ }
+}
+
static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -7644,8 +7665,10 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
continue;
pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
- status_reg & PCI_STATUS_REC_MASTER_ABORT)
+ status_reg & PCI_STATUS_REC_MASTER_ABORT) {
+ ixgbe_bad_vf_abort(adapter, vf);
pcie_flr(vfdev);
+ }
}
}
@@ -10284,6 +10307,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
.ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
+ .ndo_set_vf_link_state = ixgbe_ndo_set_vf_link_state,
.ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
.ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
@@ -10632,9 +10656,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbe_adapter *adapter = NULL;
struct ixgbe_hw *hw;
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
- int i, err, pci_using_dac, expected_gts;
unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH];
+ int i, err, expected_gts;
bool disable_dev = false;
#ifdef IXGBE_FCOE
u16 device_caps;
@@ -10654,16 +10678,11 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
- pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, ixgbe_driver_name);
@@ -10750,6 +10769,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
+
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
@@ -10861,8 +10883,7 @@ skip_sriov:
netdev->hw_features |= NETIF_F_NTUPLE |
NETIF_F_HW_TC;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->hw_enc_features |= netdev->vlan_features;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index a148534d7256..8f4316b19278 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -85,6 +85,8 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_IPSEC_ADD 0x0d
#define IXGBE_VF_IPSEC_DEL 0x0e
+#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 214a38de3f41..7f11c0a8e7a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -96,6 +96,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
for (i = 0; i < num_vfs; i++) {
/* enable spoof checking for all VFs */
adapter->vfinfo[i].spoofchk_enabled = true;
+ adapter->vfinfo[i].link_enable = true;
/* We support VF RSS querying only for 82599 and x540
* devices at the moment. These devices share RSS
@@ -820,6 +821,57 @@ static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf,
}
}
+/**
+ * ixgbe_set_vf_rx_tx - Set VF rx tx
+ * @adapter: Pointer to adapter struct
+ * @vf: VF identifier
+ *
+ * Set or reset correct transmit and receive for vf
+ **/
+static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf)
+{
+ u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg_offset, vf_shift;
+
+ vf_shift = vf % 32;
+ reg_offset = vf / 32;
+
+ reg_cur_tx = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
+ reg_cur_rx = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+
+ if (adapter->vfinfo[vf].link_enable) {
+ reg_req_tx = reg_cur_tx | 1 << vf_shift;
+ reg_req_rx = reg_cur_rx | 1 << vf_shift;
+ } else {
+ reg_req_tx = reg_cur_tx & ~(1 << vf_shift);
+ reg_req_rx = reg_cur_rx & ~(1 << vf_shift);
+ }
+
+ /* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
+ * For more info take a look at ixgbe_set_vf_lpe
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ struct net_device *dev = adapter->netdev;
+ int pf_max_frame = dev->mtu + ETH_HLEN;
+
+#if IS_ENABLED(CONFIG_FCOE)
+ if (dev->features & NETIF_F_FCOE_MTU)
+ pf_max_frame = max_t(int, pf_max_frame,
+ IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif /* CONFIG_FCOE */
+
+ if (pf_max_frame > ETH_FRAME_LEN)
+ reg_req_rx = reg_cur_rx & ~(1 << vf_shift);
+ }
+
+ /* Enable/Disable particular VF */
+ if (reg_cur_tx != reg_req_tx)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg_req_tx);
+ if (reg_cur_rx != reg_req_rx)
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg_req_rx);
+}
+
static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
@@ -845,11 +897,6 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
vf_shift = vf % 32;
reg_offset = vf / 32;
- /* enable transmit for vf */
- reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
- reg |= BIT(vf_shift);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
-
/* force drop enable for all VF Rx queues */
reg = IXGBE_QDE_ENABLE;
if (adapter->vfinfo[vf].pf_vlan)
@@ -857,27 +904,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
ixgbe_write_qde(adapter, vf, reg);
- /* enable receive for vf */
- reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
- reg |= BIT(vf_shift);
- /*
- * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
- * For more info take a look at ixgbe_set_vf_lpe
- */
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- struct net_device *dev = adapter->netdev;
- int pf_max_frame = dev->mtu + ETH_HLEN;
-
-#ifdef CONFIG_FCOE
- if (dev->features & NETIF_F_FCOE_MTU)
- pf_max_frame = max_t(int, pf_max_frame,
- IXGBE_FCOE_JUMBO_FRAME_SIZE);
-
-#endif /* CONFIG_FCOE */
- if (pf_max_frame > ETH_FRAME_LEN)
- reg &= ~BIT(vf_shift);
- }
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+ ixgbe_set_vf_rx_tx(adapter, vf);
/* enable VF mailbox for further messages */
adapter->vfinfo[vf].clear_to_send = true;
@@ -1202,6 +1229,26 @@ out:
return 0;
}
+static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ u32 *link_state = &msgbuf[1];
+
+ /* verify the PF is supporting the correct API */
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ *link_state = adapter->vfinfo[vf].link_enable;
+
+ return 0;
+}
+
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
{
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
@@ -1267,6 +1314,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
case IXGBE_VF_UPDATE_XCAST_MODE:
retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
break;
+ case IXGBE_VF_GET_LINK_STATE:
+ retval = ixgbe_get_vf_link_state(adapter, msgbuf, vf);
+ break;
case IXGBE_VF_IPSEC_ADD:
retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf);
break;
@@ -1322,18 +1372,6 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter)
}
}
-void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
-
- /* disable transmit and receive for all vfs */
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
-
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
-}
-
static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -1359,6 +1397,21 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
}
}
+/**
+ * ixgbe_set_all_vfs - update vfs queues
+ * @adapter: Pointer to adapter struct
+ *
+ * Update setting transmit and receive queues for all vfs
+ **/
+void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ for (i = 0 ; i < adapter->num_vfs; i++)
+ ixgbe_set_vf_link_state(adapter, i,
+ adapter->vfinfo[i].link_state);
+}
+
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -1656,6 +1709,84 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
return 0;
}
+/**
+ * ixgbe_set_vf_link_state - Set link state
+ * @adapter: Pointer to adapter struct
+ * @vf: VF identifier
+ * @state: required link state
+ *
+ * Set a link force state on/off a single vf
+ **/
+void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state)
+{
+ adapter->vfinfo[vf].link_state = state;
+
+ switch (state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ adapter->vfinfo[vf].link_enable = false;
+ else
+ adapter->vfinfo[vf].link_enable = true;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ adapter->vfinfo[vf].link_enable = true;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ adapter->vfinfo[vf].link_enable = false;
+ break;
+ }
+
+ ixgbe_set_vf_rx_tx(adapter, vf);
+
+ /* restart the VF */
+ adapter->vfinfo[vf].clear_to_send = false;
+ ixgbe_ping_vf(adapter, vf);
+}
+
+/**
+ * ixgbe_ndo_set_vf_link_state - Set link state
+ * @netdev: network interface device structure
+ * @vf: VF identifier
+ * @state: required link state
+ *
+ * Set the link state of a specified VF, regardless of physical link state
+ **/
+int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int ret = 0;
+
+ if (vf < 0 || vf >= adapter->num_vfs) {
+ dev_err(&adapter->pdev->dev,
+ "NDO set VF link - invalid VF identifier %d\n", vf);
+ return -EINVAL;
+ }
+
+ switch (state) {
+ case IFLA_VF_LINK_STATE_ENABLE:
+ dev_info(&adapter->pdev->dev,
+ "NDO set VF %d link state %d - not supported\n",
+ vf, state);
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ dev_info(&adapter->pdev->dev,
+ "NDO set VF %d link state disable\n", vf);
+ ixgbe_set_vf_link_state(adapter, vf, state);
+ break;
+ case IFLA_VF_LINK_STATE_AUTO:
+ dev_info(&adapter->pdev->dev,
+ "NDO set VF %d link state auto\n", vf);
+ ixgbe_set_vf_link_state(adapter, vf, state);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "NDO set VF %d - invalid link state %d\n", vf, state);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
bool setting)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 3ec21923c89c..0690ecb8dfa3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -17,8 +17,8 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
#endif
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
-void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
+void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos, __be16 vlan_proto);
@@ -31,7 +31,9 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
+int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
+void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state);
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 2647937f7f4d..6da9880d766a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1247,7 +1247,7 @@ struct ixgbe_nvm_version {
#define IXGBE_PSRTYPE_RQPL_SHIFT 29
/* CTRL Bit Masks */
-#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
+#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Primary Disable bit */
#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */
#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
@@ -1811,7 +1811,7 @@ enum {
/* STATUS Bit Masks */
#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/
-#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */
+#define IXGBE_STATUS_GIO 0x00080000 /* GIO Primary Enable Status */
#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
@@ -2193,8 +2193,8 @@ enum {
#define IXGBE_PCIDEVCTRL2_4_8s 0xd
#define IXGBE_PCIDEVCTRL2_17_34s 0xe
-/* Number of 100 microseconds we wait for PCI Express master disable */
-#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+/* Number of 100 microseconds we wait for PCI Express primary disable */
+#define IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT 800
/* RAH */
#define IXGBE_RAH_VIND_MASK 0x003C0000
@@ -3671,7 +3671,7 @@ struct ixgbe_info {
#define IXGBE_ERR_ADAPTER_STOPPED -9
#define IXGBE_ERR_INVALID_MAC_ADDR -10
#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
-#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12
+#define IXGBE_ERR_PRIMARY_REQUESTS_PENDING -12
#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
#define IXGBE_ERR_RESET_FAILED -15
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index b3fd8e5cd85b..dd7ff66d422f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -207,26 +207,28 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
}
static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
- struct ixgbe_rx_buffer *bi)
+ const struct xdp_buff *xdp)
{
- unsigned int metasize = bi->xdp->data - bi->xdp->data_meta;
- unsigned int datasize = bi->xdp->data_end - bi->xdp->data;
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
+ unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
+ net_prefetch(xdp->data_meta);
+
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- bi->xdp->data_end - bi->xdp->data_hard_start,
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, bi->xdp->data - bi->xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), bi->xdp->data, datasize);
- if (metasize)
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
- xsk_buff_free(bi->xdp);
- bi->xdp = NULL;
return skb;
}
@@ -317,12 +319,15 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
}
/* XDP_PASS path */
- skb = ixgbe_construct_skb_zc(rx_ring, bi);
+ skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp);
if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
break;
}
+ xsk_buff_free(bi->xdp);
+ bi->xdp = NULL;
+
cleaned_count++;
ixgbe_inc_ntc(rx_ring);
@@ -390,12 +395,14 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
u32 cmd_type;
while (budget-- > 0) {
- if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
- !netif_carrier_ok(xdp_ring->netdev)) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
work_done = false;
break;
}
+ if (!netif_carrier_ok(xdp_ring->netdev))
+ break;
+
if (!xsk_tx_peek_desc(pool, &desc))
break;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index e257390a4f6a..149c733fcc2b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -387,6 +387,8 @@ struct ixgbevf_adapter {
u32 *rss_key;
u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
u32 flags;
+ bool link_state;
+
#define IXGBEVF_FLAGS_LEGACY_RX BIT(1)
#ifdef CONFIG_XFRM
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 0f293acd17e8..55b87bc3a938 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2298,7 +2298,9 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
struct ixgbe_hw *hw = &adapter->hw;
+ bool state;
ixgbevf_configure_msix(adapter);
@@ -2311,6 +2313,11 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
spin_unlock_bh(&adapter->mbx_lock);
+ state = adapter->link_state;
+ hw->mac.ops.get_link_state(hw, &adapter->link_state);
+ if (state && state != adapter->link_state)
+ dev_info(&pdev->dev, "VF is administratively disabled\n");
+
smp_mb__before_atomic();
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
@@ -2753,7 +2760,7 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
ring->reg_idx = reg_idx;
/* assign ring to adapter */
- adapter->tx_ring[txr_idx] = ring;
+ adapter->tx_ring[txr_idx] = ring;
/* update count and index */
txr_count--;
@@ -3081,6 +3088,8 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
+ adapter->link_state = true;
+
set_bit(__IXGBEVF_DOWN, &adapter->state);
return 0;
@@ -3313,7 +3322,7 @@ static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
ixgbevf_watchdog_update_link(adapter);
- if (adapter->link_up)
+ if (adapter->link_up && adapter->link_state)
ixgbevf_watchdog_link_is_up(adapter);
else
ixgbevf_watchdog_link_is_down(adapter);
@@ -4512,22 +4521,17 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbevf_adapter *adapter = NULL;
struct ixgbe_hw *hw = NULL;
const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
- int err, pci_using_dac;
bool disable_dev = false;
+ int err;
err = pci_enable_device(pdev);
if (err)
return err;
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
- pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_regions(pdev, ixgbevf_driver_name);
@@ -4607,10 +4611,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_GSO_PARTIAL |
IXGBEVF_GSO_PARTIAL_FEATURES;
- netdev->features = netdev->hw_features;
-
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_SG |
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 7346ccf014a5..835bbcc5cc8e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -100,6 +100,8 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_IPSEC_ADD 0x0d
#define IXGBE_VF_IPSEC_DEL 0x0e
+#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 61d8970c6d1d..68fc32e36e88 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -585,6 +585,46 @@ static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
}
/**
+ * ixgbevf_get_link_state_vf - Get VF link state from PF
+ * @hw: pointer to the HW structure
+ * @link_state: link state storage
+ *
+ * Returns state of the operation error or success.
+ */
+static s32 ixgbevf_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
+{
+ u32 msgbuf[2];
+ s32 ret_val;
+ s32 err;
+
+ msgbuf[0] = IXGBE_VF_GET_LINK_STATE;
+ msgbuf[1] = 0x0;
+
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+
+ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
+ ret_val = IXGBE_ERR_MBX;
+ } else {
+ ret_val = 0;
+ *link_state = msgbuf[1];
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_hv_get_link_state_vf - * Hyper-V variant - just a stub.
+ * @hw: unused
+ * @link_state: unused
+ *
+ * Hyper-V variant; there is no mailbox communication.
+ */
+static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
@@ -968,6 +1008,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_update_xcast_mode,
+ .get_link_state = ixgbevf_get_link_state_vf,
.set_uc_addr = ixgbevf_set_uc_addr_vf,
.set_vfta = ixgbevf_set_vfta_vf,
.set_rlpml = ixgbevf_set_rlpml_vf,
@@ -985,6 +1026,7 @@ static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
.set_rar = ixgbevf_hv_set_rar_vf,
.update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_hv_update_xcast_mode,
+ .get_link_state = ixgbevf_hv_get_link_state_vf,
.set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
.set_vfta = ixgbevf_hv_set_vfta_vf,
.set_rlpml = ixgbevf_hv_set_rlpml_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 54158dac8707..b4eef5b6c172 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -39,6 +39,7 @@ struct ixgbe_mac_operations {
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*update_xcast_mode)(struct ixgbe_hw *, int);
+ s32 (*get_link_state)(struct ixgbe_hw *hw, bool *link_state);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 439674fc9765..b6c5122da995 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -28,6 +28,7 @@
#include <linux/udp.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
+#include <linux/jiffies.h>
#include <net/ip6_checksum.h>
#include "jme.h"
@@ -2179,7 +2180,7 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
}
if (unlikely(txbi->start_xmit &&
- (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
+ time_is_before_eq_jiffies(txbi->start_xmit + TX_TIMEOUT) &&
txbi->skb)) {
netif_stop_queue(jme->dev);
netif_info(jme, tx_queued, jme->dev,
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 41d11137cde0..5712c3e94be8 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -260,9 +260,9 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
if (ctl & LTQ_DMA_EOP) {
ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
- netif_receive_skb(ch->skb_head);
net_dev->stats.rx_packets++;
net_dev->stats.rx_bytes += ch->skb_head->len;
+ netif_receive_skb(ch->skb_head);
ch->skb_head = NULL;
ch->skb_tail = NULL;
ret = XRX200_DMA_PACKET_COMPLETE;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 143ca8be5eb5..5f9ab1842d49 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1661,7 +1661,7 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
if (er->rx_mini_pending || er->rx_jumbo_pending)
return -EINVAL;
- mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
+ mp->rx_ring_size = min(er->rx_pending, 4096U);
mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
MV643XX_MAX_SKB_DESCS * 2, 4096);
if (mp->tx_ring_size != er->tx_pending)
@@ -3092,8 +3092,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
struct mv643xx_eth_private *mp;
struct net_device *dev;
struct phy_device *phydev = NULL;
- struct resource *res;
- int err;
+ int err, irq;
pd = dev_get_platdata(&pdev->dev);
if (pd == NULL) {
@@ -3189,9 +3188,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- BUG_ON(!res);
- dev->irq = res->start;
+ irq = platform_get_irq(pdev, 0);
+ if (WARN_ON(irq < 0)) {
+ err = irq;
+ goto out;
+ }
+ dev->irq = irq;
dev->netdev_ops = &mv643xx_eth_netdev_ops;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 83c8908f0cc7..934f6dd90992 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -76,6 +76,8 @@
#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
#define MVNETA_BASE_ADDR_ENABLE 0x2290
+#define MVNETA_AC5_CNM_DDR_TARGET 0x2
+#define MVNETA_AC5_CNM_DDR_ATTR 0xb
#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
#define MVNETA_PORT_CONFIG 0x2400
#define MVNETA_UNI_PROMISC_MODE BIT(0)
@@ -544,6 +546,7 @@ struct mvneta_port {
/* Flags for special SoC configurations */
bool neta_armada3700;
+ bool neta_ac5;
u16 rx_offset_correction;
const struct mbus_dram_target_info *dram_target_info;
};
@@ -1884,8 +1887,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
bytes_compl += buf->skb->len;
pkts_compl++;
dev_kfree_skb_any(buf->skb);
- } else if (buf->type == MVNETA_TYPE_XDP_TX ||
- buf->type == MVNETA_TYPE_XDP_NDO) {
+ } else if ((buf->type == MVNETA_TYPE_XDP_TX ||
+ buf->type == MVNETA_TYPE_XDP_NDO) && buf->xdpf) {
if (napi && buf->type == MVNETA_TYPE_XDP_TX)
xdp_return_frame_rx_napi(buf->xdpf);
else
@@ -2060,61 +2063,104 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
static void
mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
- struct xdp_buff *xdp, struct skb_shared_info *sinfo,
- int sync_len)
+ struct xdp_buff *xdp, int sync_len)
{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
int i;
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
for (i = 0; i < sinfo->nr_frags; i++)
page_pool_put_full_page(rxq->page_pool,
skb_frag_page(&sinfo->frags[i]), true);
+
+out:
page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
sync_len, true);
}
static int
mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
- struct xdp_frame *xdpf, bool dma_map)
+ struct xdp_frame *xdpf, int *nxmit_byte, bool dma_map)
{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ struct device *dev = pp->dev->dev.parent;
struct mvneta_tx_desc *tx_desc;
- struct mvneta_tx_buf *buf;
- dma_addr_t dma_addr;
+ int i, num_frames = 1;
+ struct page *page;
+
+ if (unlikely(xdp_frame_has_frags(xdpf)))
+ num_frames += sinfo->nr_frags;
- if (txq->count >= txq->tx_stop_threshold)
+ if (txq->count + num_frames >= txq->size)
return MVNETA_XDP_DROPPED;
- tx_desc = mvneta_txq_next_desc_get(txq);
+ for (i = 0; i < num_frames; i++) {
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ skb_frag_t *frag = NULL;
+ int len = xdpf->len;
+ dma_addr_t dma_addr;
- buf = &txq->buf[txq->txq_put_index];
- if (dma_map) {
- /* ndo_xdp_xmit */
- dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
- xdpf->len, DMA_TO_DEVICE);
- if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
- mvneta_txq_desc_put(txq);
- return MVNETA_XDP_DROPPED;
+ if (unlikely(i)) { /* paged area */
+ frag = &sinfo->frags[i - 1];
+ len = skb_frag_size(frag);
}
- buf->type = MVNETA_TYPE_XDP_NDO;
- } else {
- struct page *page = virt_to_page(xdpf->data);
- dma_addr = page_pool_get_dma_addr(page) +
- sizeof(*xdpf) + xdpf->headroom;
- dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
- xdpf->len, DMA_BIDIRECTIONAL);
- buf->type = MVNETA_TYPE_XDP_TX;
- }
- buf->xdpf = xdpf;
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ if (dma_map) {
+ /* ndo_xdp_xmit */
+ void *data;
+
+ data = unlikely(frag) ? skb_frag_address(frag)
+ : xdpf->data;
+ dma_addr = dma_map_single(dev, data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ mvneta_txq_desc_put(txq);
+ goto unmap;
+ }
- tx_desc->command = MVNETA_TXD_FLZ_DESC;
- tx_desc->buf_phys_addr = dma_addr;
- tx_desc->data_size = xdpf->len;
+ buf->type = MVNETA_TYPE_XDP_NDO;
+ } else {
+ page = unlikely(frag) ? skb_frag_page(frag)
+ : virt_to_page(xdpf->data);
+ dma_addr = page_pool_get_dma_addr(page);
+ if (unlikely(frag))
+ dma_addr += skb_frag_off(frag);
+ else
+ dma_addr += sizeof(*xdpf) + xdpf->headroom;
+ dma_sync_single_for_device(dev, dma_addr, len,
+ DMA_BIDIRECTIONAL);
+ buf->type = MVNETA_TYPE_XDP_TX;
+ }
+ buf->xdpf = unlikely(i) ? NULL : xdpf;
- mvneta_txq_inc_put(txq);
- txq->pending++;
- txq->count++;
+ tx_desc->command = unlikely(i) ? 0 : MVNETA_TXD_F_DESC;
+ tx_desc->buf_phys_addr = dma_addr;
+ tx_desc->data_size = len;
+ *nxmit_byte += len;
+
+ mvneta_txq_inc_put(txq);
+ }
+ /*last descriptor */
+ tx_desc->command |= MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
+
+ txq->pending += num_frames;
+ txq->count += num_frames;
return MVNETA_XDP_TX;
+
+unmap:
+ for (i--; i >= 0; i--) {
+ mvneta_txq_desc_put(txq);
+ tx_desc = txq->descs + txq->next_desc_to_proc;
+ dma_unmap_single(dev, tx_desc->buf_phys_addr,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ }
+
+ return MVNETA_XDP_DROPPED;
}
static int
@@ -2123,8 +2169,8 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
struct mvneta_tx_queue *txq;
struct netdev_queue *nq;
+ int cpu, nxmit_byte = 0;
struct xdp_frame *xdpf;
- int cpu;
u32 ret;
xdpf = xdp_convert_buff_to_frame(xdp);
@@ -2136,10 +2182,10 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
nq = netdev_get_tx_queue(pp->dev, txq->id);
__netif_tx_lock(nq, cpu);
- ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
+ ret = mvneta_xdp_submit_frame(pp, txq, xdpf, &nxmit_byte, false);
if (ret == MVNETA_XDP_TX) {
u64_stats_update_begin(&stats->syncp);
- stats->es.ps.tx_bytes += xdpf->len;
+ stats->es.ps.tx_bytes += nxmit_byte;
stats->es.ps.tx_packets++;
stats->es.ps.xdp_tx++;
u64_stats_update_end(&stats->syncp);
@@ -2178,11 +2224,11 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
__netif_tx_lock(nq, cpu);
for (i = 0; i < num_frame; i++) {
- ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
+ ret = mvneta_xdp_submit_frame(pp, txq, frames[i], &nxmit_byte,
+ true);
if (ret != MVNETA_XDP_TX)
break;
- nxmit_byte += frames[i]->len;
nxmit++;
}
@@ -2205,7 +2251,6 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp,
u32 frame_sz, struct mvneta_stats *stats)
{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
unsigned int len, data_len, sync;
u32 ret, act;
@@ -2226,7 +2271,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
err = xdp_do_redirect(pp->dev, xdp, prog);
if (unlikely(err)) {
- mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sync);
ret = MVNETA_XDP_DROPPED;
} else {
ret = MVNETA_XDP_REDIR;
@@ -2237,7 +2282,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
case XDP_TX:
ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX)
- mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sync);
break;
default:
bpf_warn_invalid_xdp_action(pp->dev, prog, act);
@@ -2246,7 +2291,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
trace_xdp_exception(pp->dev, prog, act);
fallthrough;
case XDP_DROP:
- mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sync);
ret = MVNETA_XDP_DROPPED;
stats->xdp_drop++;
break;
@@ -2269,7 +2314,6 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
int data_len = -MVNETA_MH_SIZE, len;
struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir;
- struct skb_shared_info *sinfo;
if (*size > MVNETA_MAX_RX_BUF_SIZE) {
len = MVNETA_MAX_RX_BUF_SIZE;
@@ -2289,11 +2333,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
/* Prefetch header */
prefetch(data);
+ xdp_buff_clear_frags_flag(xdp);
xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
data_len, false);
-
- sinfo = xdp_get_shared_info_from_buff(xdp);
- sinfo->nr_frags = 0;
}
static void
@@ -2301,9 +2343,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc,
struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp, int *size,
- struct skb_shared_info *xdp_sinfo,
struct page *page)
{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir;
int data_len, len;
@@ -2321,25 +2363,25 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
len, dma_dir);
rx_desc->buf_phys_addr = 0;
- if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {
- skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];
+ if (!xdp_buff_has_frags(xdp))
+ sinfo->nr_frags = 0;
+
+ if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
+ skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++];
skb_frag_off_set(frag, pp->rx_offset_correction);
skb_frag_size_set(frag, data_len);
__skb_frag_set_page(frag, page);
+
+ if (!xdp_buff_has_frags(xdp)) {
+ sinfo->xdp_frags_size = *size;
+ xdp_buff_set_frags_flag(xdp);
+ }
+ if (page_is_pfmemalloc(page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
} else {
page_pool_put_full_page(rxq->page_pool, page, true);
}
-
- /* last fragment */
- if (len == *size) {
- struct skb_shared_info *sinfo;
-
- sinfo = xdp_get_shared_info_from_buff(xdp);
- sinfo->nr_frags = xdp_sinfo->nr_frags;
- memcpy(sinfo->frags, xdp_sinfo->frags,
- sinfo->nr_frags * sizeof(skb_frag_t));
- }
*size -= len;
}
@@ -2348,8 +2390,11 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
struct xdp_buff *xdp, u32 desc_status)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
- int i, num_frags = sinfo->nr_frags;
struct sk_buff *skb;
+ u8 num_frags;
+
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ num_frags = sinfo->nr_frags;
skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
if (!skb)
@@ -2361,13 +2406,11 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
skb_put(skb, xdp->data_end - xdp->data);
skb->ip_summed = mvneta_rx_csum(pp, desc_status);
- for (i = 0; i < num_frags; i++) {
- skb_frag_t *frag = &sinfo->frags[i];
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- skb_frag_page(frag), skb_frag_off(frag),
- skb_frag_size(frag), PAGE_SIZE);
- }
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ xdp_update_skb_shared_info(skb, num_frags,
+ sinfo->xdp_frags_size,
+ num_frags * xdp->frame_sz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
@@ -2379,7 +2422,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
{
int rx_proc = 0, rx_todo, refill, size = 0;
struct net_device *dev = pp->dev;
- struct skb_shared_info sinfo;
struct mvneta_stats ps = {};
struct bpf_prog *xdp_prog;
u32 desc_status, frame_sz;
@@ -2388,8 +2430,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
xdp_buf.data_hard_start = NULL;
- sinfo.nr_frags = 0;
-
/* Get number of received packets */
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -2431,7 +2471,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
}
mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
- &size, &sinfo, page);
+ &size, page);
} /* Middle or Last descriptor */
if (!(rx_status & MVNETA_RXD_LAST_DESC))
@@ -2439,7 +2479,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
continue;
if (size) {
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
goto next;
}
@@ -2451,7 +2491,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
if (IS_ERR(skb)) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
u64_stats_update_begin(&stats->syncp);
stats->es.skb_alloc_error++;
@@ -2468,11 +2508,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
napi_gro_receive(napi, skb);
next:
xdp_buf.data_hard_start = NULL;
- sinfo.nr_frags = 0;
}
if (xdp_buf.data_hard_start)
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
if (ps.xdp_redirect)
xdp_do_flush_map();
@@ -3260,7 +3299,8 @@ static int mvneta_create_page_pool(struct mvneta_port *pp,
return err;
}
- err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0);
+ err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0,
+ PAGE_SIZE);
if (err < 0)
goto err_free_pp;
@@ -3740,6 +3780,7 @@ static void mvneta_percpu_disable(void *arg)
static int mvneta_change_mtu(struct net_device *dev, int mtu)
{
struct mvneta_port *pp = netdev_priv(dev);
+ struct bpf_prog *prog = pp->xdp_prog;
int ret;
if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
@@ -3748,8 +3789,11 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
}
- if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) {
- netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu);
+ if (prog && !prog->aux->xdp_has_frags &&
+ mtu > MVNETA_MAX_RX_BUF_SIZE) {
+ netdev_info(dev, "Illegal MTU %d for XDP prog without frags\n",
+ mtu);
+
return -EINVAL;
}
@@ -3969,6 +4013,15 @@ static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = {
.pcs_an_restart = mvneta_pcs_an_restart,
};
+static struct phylink_pcs *mvneta_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct mvneta_port *pp = netdev_priv(ndev);
+
+ return &pp->phylink_pcs;
+}
+
static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
@@ -4169,13 +4222,14 @@ static void mvneta_mac_link_up(struct phylink_config *config,
mvneta_port_up(pp);
if (phy && pp->eee_enabled) {
- pp->eee_active = phy_init_eee(phy, 0) >= 0;
+ pp->eee_active = phy_init_eee(phy, false) >= 0;
mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
}
}
static const struct phylink_mac_ops mvneta_phylink_ops = {
.validate = phylink_generic_validate,
+ .mac_select_pcs = mvneta_mac_select_pcs,
.mac_prepare = mvneta_mac_prepare,
.mac_config = mvneta_mac_config,
.mac_finish = mvneta_mac_finish,
@@ -4490,8 +4544,9 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
struct mvneta_port *pp = netdev_priv(dev);
struct bpf_prog *old_prog;
- if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
- NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
+ if (prog && !prog->aux->xdp_has_frags &&
+ dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(extack, "prog does not support XDP frags");
return -EOPNOTSUPP;
}
@@ -5272,6 +5327,10 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
win_protect |= 3 << (2 * i);
}
} else {
+ if (pp->neta_ac5)
+ mvreg_write(pp, MVNETA_WIN_BASE(0),
+ (MVNETA_AC5_CNM_DDR_ATTR << 8) |
+ MVNETA_AC5_CNM_DDR_TARGET);
/* For Armada3700 open default 4GB Mbus window, leaving
* arbitration of target/attribute to a different layer
* of configuration.
@@ -5321,26 +5380,66 @@ static int mvneta_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
- dev->irq = irq_of_parse_and_map(dn, 0);
- if (dev->irq == 0)
- return -EINVAL;
+ dev->tx_queue_len = MVNETA_MAX_TXD;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->netdev_ops = &mvneta_netdev_ops;
+ dev->ethtool_ops = &mvneta_eth_tool_ops;
+
+ pp = netdev_priv(dev);
+ spin_lock_init(&pp->lock);
+ pp->dn = dn;
+
+ pp->rxq_def = rxq_def;
+ pp->indir[0] = rxq_def;
err = of_get_phy_mode(dn, &phy_mode);
if (err) {
dev_err(&pdev->dev, "incorrect phy-mode\n");
- goto err_free_irq;
+ return err;
}
+ pp->phy_interface = phy_mode;
+
comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
- if (comphy == ERR_PTR(-EPROBE_DEFER)) {
- err = -EPROBE_DEFER;
- goto err_free_irq;
- } else if (IS_ERR(comphy)) {
+ if (comphy == ERR_PTR(-EPROBE_DEFER))
+ return -EPROBE_DEFER;
+
+ if (IS_ERR(comphy))
comphy = NULL;
+
+ pp->comphy = comphy;
+
+ pp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pp->base))
+ return PTR_ERR(pp->base);
+
+ /* Get special SoC configurations */
+ if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
+ pp->neta_armada3700 = true;
+ if (of_device_is_compatible(dn, "marvell,armada-ac5-neta")) {
+ pp->neta_armada3700 = true;
+ pp->neta_ac5 = true;
}
- pp = netdev_priv(dev);
- spin_lock_init(&pp->lock);
+ dev->irq = irq_of_parse_and_map(dn, 0);
+ if (dev->irq == 0)
+ return -EINVAL;
+
+ pp->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(pp->clk))
+ pp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pp->clk)) {
+ err = PTR_ERR(pp->clk);
+ goto err_free_irq;
+ }
+
+ clk_prepare_enable(pp->clk);
+
+ pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
+ if (!IS_ERR(pp->clk_bus))
+ clk_prepare_enable(pp->clk_bus);
+
+ pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
pp->phylink_config.dev = &dev->dev;
pp->phylink_config.type = PHYLINK_NETDEV;
@@ -5377,55 +5476,16 @@ static int mvneta_probe(struct platform_device *pdev)
phy_mode, &mvneta_phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
- goto err_free_irq;
- }
-
- dev->tx_queue_len = MVNETA_MAX_TXD;
- dev->watchdog_timeo = 5 * HZ;
- dev->netdev_ops = &mvneta_netdev_ops;
-
- dev->ethtool_ops = &mvneta_eth_tool_ops;
-
- pp->phylink = phylink;
- pp->comphy = comphy;
- pp->phy_interface = phy_mode;
- pp->dn = dn;
-
- pp->rxq_def = rxq_def;
- pp->indir[0] = rxq_def;
-
- /* Get special SoC configurations */
- if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
- pp->neta_armada3700 = true;
-
- pp->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(pp->clk))
- pp->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pp->clk)) {
- err = PTR_ERR(pp->clk);
- goto err_free_phylink;
- }
-
- clk_prepare_enable(pp->clk);
-
- pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
- if (!IS_ERR(pp->clk_bus))
- clk_prepare_enable(pp->clk_bus);
-
- pp->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(pp->base)) {
- err = PTR_ERR(pp->base);
goto err_clk;
}
- pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
- phylink_set_pcs(phylink, &pp->phylink_pcs);
+ pp->phylink = phylink;
/* Alloc per-cpu port structure */
pp->ports = alloc_percpu(struct mvneta_pcpu_port);
if (!pp->ports) {
err = -ENOMEM;
- goto err_clk;
+ goto err_free_phylink;
}
/* Alloc per-cpu stats */
@@ -5569,12 +5629,12 @@ err_netdev:
free_percpu(pp->stats);
err_free_ports:
free_percpu(pp->ports);
-err_clk:
- clk_disable_unprepare(pp->clk_bus);
- clk_disable_unprepare(pp->clk);
err_free_phylink:
if (pp->phylink)
phylink_destroy(pp->phylink);
+err_clk:
+ clk_disable_unprepare(pp->clk_bus);
+ clk_disable_unprepare(pp->clk);
err_free_irq:
irq_dispose_mapping(dev->irq);
return err;
@@ -5720,6 +5780,7 @@ static const struct of_device_id mvneta_match[] = {
{ .compatible = "marvell,armada-370-neta" },
{ .compatible = "marvell,armada-xp-neta" },
{ .compatible = "marvell,armada-3700-neta" },
+ { .compatible = "marvell,armada-ac5-neta" },
{ }
};
MODULE_DEVICE_TABLE(of, mvneta_match);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 3631d612aaca..25491edc35ce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -578,31 +578,78 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
}
}
+static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (is_dev_rpm(cgx))
+ return 0;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
+ return 0;
+}
+
/* Enable or disable forwarding received pause frames to Tx block */
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
+ u8 rx_pause, tx_pause;
+ bool is_pfc_enabled;
+ struct lmac *lmac;
u64 cfg;
if (!cgx)
return;
- if (enable) {
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return;
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
+ cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause);
+ is_pfc_enabled = rx_pause ? false : true;
+
+ if (enable) {
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
} else {
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
}
}
@@ -722,26 +769,6 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
return !!(last & DATA_PKT_TX_EN);
}
-static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
- u8 *tx_pause, u8 *rx_pause)
-{
- struct cgx *cgx = cgxd;
- u64 cfg;
-
- if (is_dev_rpm(cgx))
- return 0;
-
- if (!is_lmac_valid(cgx, lmac_id))
- return -ENODEV;
-
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
-
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
- return 0;
-}
-
static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
u8 tx_pause, u8 rx_pause)
{
@@ -782,21 +809,8 @@ static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
if (!is_lmac_valid(cgx, lmac_id))
return;
- if (enable) {
- /* Enable receive pause frames */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
-
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
-
- /* Enable pause frames transmission */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
- cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+ if (enable) {
/* Set pause time and interval */
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
DEFAULT_PAUSE_TIME);
@@ -813,21 +827,120 @@ static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
cfg &= ~0xFFFFULL;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
cfg | (DEFAULT_PAUSE_TIME / 2));
- } else {
- /* ALL pause frames received are completely ignored */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+ /* ALL pause frames received are completely ignored */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ /* Disable pause frames transmission */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
+ cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
+ cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+}
+
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx)
+{
+ struct cgx *cgx = cgxd;
+ struct lmac *lmac;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ if (!rx_pause)
+ clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+
+ if (!tx_pause)
+ clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+
+ /* check if other pfvfs are using flow control */
+ if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Receive Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
+ }
+
+ if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Transmit Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ /* Return as no traffic classes are requested */
+ if (tx_pause && !pfc_en)
+ return 0;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
- /* Disable pause frames transmission */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
- cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+ if (rx_pause) {
+ cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
+ } else {
+ cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
}
+
+ if (tx_pause)
+ cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
+ else
+ cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
+
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
+
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+
+ /* Write source MAC address which will be filled into PFC packet */
+ cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg);
+
+ return 0;
+}
+
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+
+ *rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN);
+ *tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN);
+
+ return 0;
}
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
@@ -1489,6 +1602,16 @@ static int cgx_lmac_init(struct cgx *cgx)
/* Reserve first entry for default MAC address */
set_bit(0, lmac->mac_to_index_bmap.bmap);
+ lmac->rx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap);
+ if (err)
+ goto err_dmac_bmap_free;
+
+ lmac->tx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap);
+ if (err)
+ goto err_rx_fc_bmap_free;
+
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
spin_lock_init(&lmac->event_cb_lock);
@@ -1505,6 +1628,10 @@ static int cgx_lmac_init(struct cgx *cgx)
return cgx_lmac_verify_fwi_version(cgx);
err_bitmap_free:
+ rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
+err_rx_fc_bmap_free:
+ rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
+err_dmac_bmap_free:
rvu_free_bitmap(&lmac->mac_to_index_bmap);
err_name_free:
kfree(lmac->name);
@@ -1572,6 +1699,8 @@ static struct mac_ops cgx_mac_ops = {
.mac_enadis_ptp_config = cgx_lmac_ptp_config,
.mac_rx_tx_enable = cgx_lmac_rx_tx_enable,
.mac_tx_enable = cgx_lmac_tx_enable,
+ .pfc_config = cgx_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index ab1e4abdea38..bd2f33a26eee 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -76,6 +76,13 @@
#define CGXX_SMUX_TX_CTL 0x20178
#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
+#define CGXX_SMUX_SMAC 0x20108
+#define CGXX_SMUX_CBFC_CTL 0x20218
+#define CGXX_SMUX_CBFC_CTL_RX_EN BIT_ULL(0)
+#define CGXX_SMUX_CBFC_CTL_TX_EN BIT_ULL(1)
+#define CGXX_SMUX_CBFC_CTL_DRP_EN BIT_ULL(2)
+#define CGXX_SMUX_CBFC_CTL_BCK_EN BIT_ULL(3)
+#define CGX_PFC_CLASS_MASK GENMASK_ULL(47, 32)
#define CGXX_GMP_GMI_TX_PAUSE_PKT_TIME 0x38230
#define CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL 0x38248
#define CGX_SMUX_TX_CTL_L2P_BP_CONV BIT_ULL(7)
@@ -172,4 +179,10 @@ u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index);
u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id);
u64 cgx_read_dmac_entry(void *cgxd, int index);
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index b33e7d1d0851..f30581bf0688 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -17,6 +17,8 @@
* @resp: command response
* @link_info: link related information
* @mac_to_index_bmap: Mac address to CGX table index mapping
+ * @rx_fc_pfvf_bmap: Receive flow control enabled netdev mapping
+ * @tx_fc_pfvf_bmap: Transmit flow control enabled netdev mapping
* @event_cb: callback for linkchange events
* @event_cb_lock: lock for serializing callback with unregister
* @cgx: parent cgx port
@@ -33,6 +35,8 @@ struct lmac {
u64 resp;
struct cgx_link_user_info link_info;
struct rsrc_bmap mac_to_index_bmap;
+ struct rsrc_bmap rx_fc_pfvf_bmap;
+ struct rsrc_bmap tx_fc_pfvf_bmap;
struct cgx_event_cb event_cb;
/* lock for serializing callback with unregister */
spinlock_t event_cb_lock;
@@ -110,6 +114,12 @@ struct mac_ops {
int (*mac_rx_tx_enable)(void *cgxd, int lmac_id, bool enable);
int (*mac_tx_enable)(void *cgxd, int lmac_id, bool enable);
+ int (*pfc_config)(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause, u16 pfc_en);
+
+ int (*mac_get_pfc_frm_cfg)(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause);
+
};
struct cgx {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 58e2aeebc14f..550cb11197bf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -172,6 +172,8 @@ M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \
M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \
msg_rsp) \
+M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg, \
+ cgx_pfc_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -609,6 +611,21 @@ struct rpm_stats_rsp {
u64 tx_stats[RPM_TX_STATS_COUNT];
};
+struct cgx_pfc_cfg {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+ u16 pfc_en; /* bitmap indicating pfc enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+};
+
+ /* NPA mbox message formats */
+
struct npc_set_pkind {
struct mbox_msghdr hdr;
#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0)
@@ -1603,6 +1620,8 @@ enum cgx_af_status {
LMAC_AF_ERR_INVALID_PARAM = -1101,
LMAC_AF_ERR_PF_NOT_MAPPED = -1102,
LMAC_AF_ERR_PERM_DENIED = -1103,
+ LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED = -1104,
+ LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED = -1105,
};
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 77fd39e2c8db..9b6e587e78b4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -455,7 +455,7 @@ struct npc_coalesced_kpu_prfl {
u8 name[NPC_NAME_LEN]; /* KPU Profile name */
u64 version; /* KPU firmware/profile version */
u8 num_prfl; /* No of NPC profiles. */
- u16 prfl_sz[0];
+ u16 prfl_sz[];
};
struct npc_mcam_kex {
@@ -482,7 +482,7 @@ struct npc_kpu_fwdata {
* struct npc_kpu_profile_cam[entries];
* struct npc_kpu_profile_action[entries];
*/
- u8 data[0];
+ u8 data[];
} __packed;
struct npc_lt_def {
@@ -572,7 +572,7 @@ struct npc_kpu_profile_fwdata {
* Custom KPU CAM and ACTION configuration entries.
* struct npc_kpu_fwdata kpu[kpus];
*/
- u8 data[0];
+ u8 data[];
} __packed;
struct rvu_npc_mcam_rule {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index e682b7bfde64..67a6821d2dff 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -25,6 +25,9 @@
#define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP 0xB600
#define PCI_DEVID_OCTEONTX2_RST 0xA085
#define PCI_DEVID_CN10K_PTP 0xA09E
+#define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00
#define PCI_PTP_BAR_NO 0
@@ -46,10 +49,105 @@
#define PTP_CLOCK_HI 0xF10ULL
#define PTP_CLOCK_COMP 0xF18ULL
#define PTP_TIMESTAMP 0xF20ULL
+#define PTP_CLOCK_SEC 0xFD0ULL
+
+#define CYCLE_MULT 1000
static struct ptp *first_ptp_block;
static const struct pci_device_id ptp_id_table[];
+static bool cn10k_ptp_errata(struct ptp *ptp)
+{
+ if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
+ ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
+ return true;
+ return false;
+}
+
+static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp)
+{
+ if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
+ ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
+ return true;
+ return false;
+}
+
+static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp)
+{
+ u64 sec, sec1, nsec;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
+ sec = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec1 = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ /* check nsec rollover */
+ if (sec1 > sec) {
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec = sec1;
+ }
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+
+ return sec * NSEC_PER_SEC + nsec;
+}
+
+static u64 read_ptp_tstmp_nsec(struct ptp *ptp)
+{
+ return readq(ptp->reg_base + PTP_CLOCK_HI);
+}
+
+static u64 ptp_calc_adjusted_comp(u64 ptp_clock_freq)
+{
+ u64 comp, adj = 0, cycles_per_sec, ns_drift = 0;
+ u32 ptp_clock_nsec, cycle_time;
+ int cycle;
+
+ /* Errata:
+ * Issue #1: At the time of 1 sec rollover of the nano-second counter,
+ * the nano-second counter is set to 0. However, it should be set to
+ * (existing counter_value - 10^9).
+ *
+ * Issue #2: The nano-second counter rolls over at 0x3B9A_C9FF.
+ * It should roll over at 0x3B9A_CA00.
+ */
+
+ /* calculate ptp_clock_comp value */
+ comp = ((u64)1000000000ULL << 32) / ptp_clock_freq;
+ /* use CYCLE_MULT to avoid accuracy loss due to integer arithmetic */
+ cycle_time = NSEC_PER_SEC * CYCLE_MULT / ptp_clock_freq;
+ /* cycles per sec */
+ cycles_per_sec = ptp_clock_freq;
+
+ /* check whether ptp nanosecond counter rolls over early */
+ cycle = cycles_per_sec - 1;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ while (ptp_clock_nsec < NSEC_PER_SEC) {
+ if (ptp_clock_nsec == 0x3B9AC9FF)
+ goto calc_adj_comp;
+ cycle++;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ }
+ /* compute nanoseconds lost per second when nsec counter rolls over */
+ ns_drift = ptp_clock_nsec - NSEC_PER_SEC;
+ /* calculate ptp_clock_comp adjustment */
+ if (ns_drift > 0) {
+ adj = comp * ns_drift;
+ adj = adj / 1000000000ULL;
+ }
+ /* speed up the ptp clock to account for nanoseconds lost */
+ comp += adj;
+ return comp;
+
+calc_adj_comp:
+ /* slow down the ptp clock to not rollover early */
+ adj = comp * cycle_time;
+ adj = adj / 1000000000ULL;
+ adj = adj / CYCLE_MULT;
+ comp -= adj;
+
+ return comp;
+}
+
struct ptp *ptp_get(void)
{
struct ptp *ptp = first_ptp_block;
@@ -77,8 +175,8 @@ void ptp_put(struct ptp *ptp)
static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
{
bool neg_adj = false;
- u64 comp;
- u64 adj;
+ u32 freq, freq_adj;
+ u64 comp, adj;
s64 ppb;
if (scaled_ppm < 0) {
@@ -100,15 +198,22 @@ static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
* where tbase is the basic compensation value calculated
* initialy in the probe function.
*/
- comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
/* convert scaled_ppm to ppb */
ppb = 1 + scaled_ppm;
ppb *= 125;
ppb >>= 13;
- adj = comp * ppb;
- adj = div_u64(adj, 1000000000ull);
- comp = neg_adj ? comp - adj : comp + adj;
+ if (cn10k_ptp_errata(ptp)) {
+ /* calculate the new frequency based on ppb */
+ freq_adj = (ptp->clock_rate * ppb) / 1000000000ULL;
+ freq = neg_adj ? ptp->clock_rate + freq_adj : ptp->clock_rate - freq_adj;
+ comp = ptp_calc_adjusted_comp(freq);
+ } else {
+ comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ adj = comp * ppb;
+ adj = div_u64(adj, 1000000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+ }
writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
return 0;
@@ -117,7 +222,7 @@ static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
static int ptp_get_clock(struct ptp *ptp, u64 *clk)
{
/* Return the current PTP clock */
- *clk = readq(ptp->reg_base + PTP_CLOCK_HI);
+ *clk = ptp->read_ptp_tstmp(ptp);
return 0;
}
@@ -166,7 +271,11 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
- clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ if (cn10k_ptp_errata(ptp))
+ clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
+ else
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+
/* Initial compensation value to start the nanosecs counter */
writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
}
@@ -214,6 +323,12 @@ static int ptp_probe(struct pci_dev *pdev,
if (!first_ptp_block)
first_ptp_block = ptp;
+ spin_lock_init(&ptp->ptp_lock);
+ if (is_ptp_tsfmt_sec_nsec(ptp))
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
+ else
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
+
return 0;
error_free:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
index 1b81a0493cd3..95a955159f40 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -15,6 +15,8 @@
struct ptp {
struct pci_dev *pdev;
void __iomem *reg_base;
+ u64 (*read_ptp_tstmp)(struct ptp *ptp);
+ spinlock_t ptp_lock; /* lock */
u32 clock_rate;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index 9ea2f6ac38ec..47e83d7a5804 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -32,6 +32,8 @@ static struct mac_ops rpm_mac_ops = {
.mac_enadis_ptp_config = rpm_lmac_ptp_config,
.mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
.mac_tx_enable = rpm_lmac_tx_enable,
+ .pfc_config = rpm_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
};
struct mac_ops *rpm_get_mac_ops(void)
@@ -96,11 +98,20 @@ int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable)
void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
+ struct lmac *lmac;
u64 cfg;
if (!rpm)
return;
+ lmac = lmac_pdata(lmac_id, rpm);
+ if (!lmac)
+ return;
+
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
if (enable) {
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
@@ -122,13 +133,94 @@ int rpm_lmac_get_pause_frm_status(void *rpmd, int lmac_id,
return -ENODEV;
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ if (!(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE)) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
return 0;
}
+static void rpm_cfg_pfc_quanta_thresh(rpm_t *rpm, int lmac_id,
+ unsigned long pfc_en,
+ bool enable)
+{
+ u64 quanta_offset = 0, quanta_thresh = 0, cfg;
+ int i, shift;
+
+ /* Set pause time and interval */
+ for_each_set_bit(i, &pfc_en, 16) {
+ switch (i) {
+ case 0:
+ case 1:
+ quanta_offset = RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL01_QUANTA_THRESH;
+ break;
+ case 2:
+ case 3:
+ quanta_offset = RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL23_QUANTA_THRESH;
+ break;
+ case 4:
+ case 5:
+ quanta_offset = RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL45_QUANTA_THRESH;
+ break;
+ case 6:
+ case 7:
+ quanta_offset = RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL67_QUANTA_THRESH;
+ break;
+ case 8:
+ case 9:
+ quanta_offset = RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL89_QUANTA_THRESH;
+ break;
+ case 10:
+ case 11:
+ quanta_offset = RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH;
+ break;
+ case 12:
+ case 13:
+ quanta_offset = RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH;
+ break;
+ case 14:
+ case 15:
+ quanta_offset = RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH;
+ break;
+ }
+
+ if (!quanta_offset || !quanta_thresh)
+ continue;
+
+ shift = (i % 2) ? 1 : 0;
+ cfg = rpm_read(rpm, lmac_id, quanta_offset);
+ if (enable) {
+ cfg |= ((u64)RPM_DEFAULT_PAUSE_TIME << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_offset, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, quanta_thresh);
+ if (enable) {
+ cfg |= ((u64)(RPM_DEFAULT_PAUSE_TIME / 2) << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_thresh, cfg);
+ }
+}
+
int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
u8 rx_pause)
{
@@ -152,8 +244,12 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP);
if (tx_pause) {
+ /* Configure CL0 Pause Quanta & threshold for 802.3X frames */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true);
cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id);
} else {
+ /* Disable all Pause Quanta & threshold values */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id);
cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id);
}
@@ -166,56 +262,20 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
rpm_t *rpm = rpmd;
u64 cfg;
- if (enable) {
- /* Enable 802.3 pause frame mode */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Enable receive pause frames */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Enable forward pause to TX block */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Enable pause frames transmission */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Set pause time and interval */
- cfg = rpm_read(rpm, lmac_id,
- RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA);
- cfg &= ~0xFFFFULL;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA,
- cfg | RPM_DEFAULT_PAUSE_TIME);
- /* Set pause interval as the hardware default is too short */
- cfg = rpm_read(rpm, lmac_id,
- RPMX_MTI_MAC100X_CL01_QUANTA_THRESH);
- cfg &= ~0xFFFFULL;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_QUANTA_THRESH,
- cfg | (RPM_DEFAULT_PAUSE_TIME / 2));
-
- } else {
- /* ALL pause frames received are completely ignored */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ /* ALL pause frames received are completely ignored */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- /* Disable forward pause to TX block */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ /* Disable forward pause to TX block */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- /* Disable pause frames transmission */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- }
+ /* Disable pause frames transmission */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
}
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
@@ -323,3 +383,65 @@ void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
cfg &= ~RPMX_RX_TS_PREPEND;
rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg);
}
+
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ /* reset PFC class quanta and threshold */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+
+ if (rx_pause) {
+ cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ } else {
+ cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ }
+
+ if (tx_pause) {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, pfc_en, true);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ } else {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xfff, false);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ }
+
+ if (!rx_pause && !tx_pause)
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+ else
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ cfg = FIELD_SET(RPM_PFC_CLASS_MASK, pfc_en, cfg);
+ rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
+
+ return 0;
+}
+
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause, u8 *rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index ff580311edd0..9ab8d49dd180 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -33,7 +33,21 @@
#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE BIT_ULL(19)
#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA 0x80B0
+#define RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA 0x80B8
+#define RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA 0x80C0
#define RPMX_MTI_MAC100X_CL01_QUANTA_THRESH 0x80C8
+#define RPMX_MTI_MAC100X_CL23_QUANTA_THRESH 0x80D0
+#define RPMX_MTI_MAC100X_CL45_QUANTA_THRESH 0x80D8
+#define RPMX_MTI_MAC100X_CL67_QUANTA_THRESH 0x80E0
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA 0x8110
+#define RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA 0x8118
+#define RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA 0x8120
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH 0x8130
+#define RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH 0x8138
+#define RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH 0x8140
#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
#define RPMX_CMR_RX_OVR_BP 0x4120
#define RPMX_CMR_RX_OVR_BP_EN(x) BIT_ULL((x) + 8)
@@ -45,6 +59,18 @@
#define RPM_LMAC_FWI 0xa
#define RPM_TX_EN BIT_ULL(0)
#define RPM_RX_EN BIT_ULL(1)
+#define RPMX_CMRX_PRT_CBFC_CTL 0x5B08
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_RX_SHIFT 33
+#define RPMX_CMRX_PRT_CBFC_CTL_PHYS_BP_SHIFT 16
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_TX_SHIFT 0
+#define RPM_PFC_CLASS_MASK GENMASK_ULL(48, 33)
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_PAD_EN BIT_ULL(11)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD BIT_ULL(7)
+#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
@@ -61,4 +87,8 @@ int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable);
int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 5ed94cfb47d2..513b43ecd5be 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -807,6 +807,9 @@ u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 8a7ac5a8b821..9ffe99830e34 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -863,6 +863,45 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_pfc = 0, tx_pfc = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
+ return 0;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc);
+ if (tx_pfc || rx_pfc) {
+ dev_warn(rvu->dev,
+ "Can not configure 802.3X flow control as PFC frames are enabled");
+ return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause);
+}
+
int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
struct cgx_pause_frm_cfg *req,
struct cgx_pause_frm_cfg *rsp)
@@ -870,11 +909,9 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
int pf = rvu_get_pf(req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
+ int err = 0;
void *cgxd;
- if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
- return 0;
-
/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
@@ -886,13 +923,11 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
mac_ops = get_mac_ops(cgxd);
if (req->set)
- mac_ops->mac_enadis_pause_frm(cgxd, lmac_id,
- req->tx_pause, req->rx_pause);
+ err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause);
else
- mac_ops->mac_get_pause_frm_status(cgxd, lmac_id,
- &rsp->tx_pause,
- &rsp->rx_pause);
- return 0;
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+
+ return err;
}
int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
@@ -1079,3 +1114,67 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
}
+
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_8023 = 0, tx_8023 = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023);
+ if (tx_8023 || rx_8023) {
+ dev_warn(rvu->dev,
+ "Can not configure PFC as 802.3X pause frames are enabled");
+ return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en);
+}
+
+int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
+ struct cgx_pfc_cfg *req,
+ struct cgx_pfc_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+ int err;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause,
+ req->rx_pause, req->pfc_en);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index a73a8017e0ee..a79201a9a6f0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -605,6 +605,7 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
} else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
/* Registers that can be accessed from PF */
switch (offset) {
+ case CPT_AF_DIAG:
case CPT_AF_CTL:
case CPT_AF_PF_FUNC:
case CPT_AF_BLK_RST:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 97fb61915379..0fa625e2528e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -296,7 +296,6 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
struct rvu_hwinfo *hw = rvu->hw;
struct sdp_node_info *sdp_info;
int pkind, pf, vf, lbkid, vfid;
- struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
bool from_vf;
int err;
@@ -326,13 +325,6 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
rvu_npc_set_pkind(rvu, pkind, pfvf);
- mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
-
- /* By default we enable pause frames */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
- mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
- rvu),
- lmac_id, true, true);
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
@@ -533,7 +525,7 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
*/
switch (type) {
case NIX_INTF_TYPE_CGX:
- if ((req->chan_base + req->chan_cnt) > 15)
+ if ((req->chan_base + req->chan_cnt) > 16)
return -EINVAL;
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
/* Assign bpid based on cgx, lmac and chan id */
@@ -4578,6 +4570,12 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
pfvf->hw_rx_tstamp_en = false;
}
+ /* reset priority flow control config */
+ rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
+
+ /* reset 802.3x flow control config */
+ rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
+
nix_ctx_free(rvu, pfvf);
nix_free_all_bandprof(rvu, pcifunc);
@@ -5314,6 +5312,7 @@ int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
aq_req.op = NIX_AQ_INSTOP_WRITE;
memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
+ memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
/* Clear higher layer enable bit in the mid profile, just in case */
aq_req.prof.hl_en = 0;
aq_req.prof_mask.hl_en = 1;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 0048b5946712..d463dc72d80a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -11,4 +11,7 @@ rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_devlink.o
rvu_nicvf-y := otx2_vf.o otx2_devlink.o
+rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
+rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
+
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 66da31f30d3e..b9d7601138ca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -222,8 +222,11 @@ EXPORT_SYMBOL(otx2_set_mac_address);
int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
{
struct nix_frs_cfg *req;
+ u16 maxlen;
int err;
+ maxlen = otx2_get_max_mtu(pfvf) + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
if (!req) {
@@ -233,6 +236,10 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+ /* Use max receive length supported by hardware for loopback devices */
+ if (is_otx2_lbkvf(pfvf->pdev))
+ req->maxlen = maxlen;
+
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
return err;
@@ -262,6 +269,7 @@ unlock:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+EXPORT_SYMBOL(otx2_config_pause_frm);
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
{
@@ -931,7 +939,11 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
if (!is_otx2_lbkvf(pfvf->pdev)) {
/* Enable receive CQ backpressure */
aq->cq.bp_ena = 1;
+#ifdef CONFIG_DCB
+ aq->cq.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[qidx]];
+#else
aq->cq.bpid = pfvf->bpid[0];
+#endif
/* Set backpressure level is same as cq pass level */
aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
@@ -1036,7 +1048,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
struct nix_lf_alloc_rsp *rsp;
int err;
- pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512;
+ pfvf->qset.xqe_size = pfvf->hw.xqe_size;
/* Get memory to put this msg */
nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox);
@@ -1049,7 +1061,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
nixlf->rss_grps = MAX_RSS_GROUPS;
- nixlf->xqe_sz = NIX_XQESZ_W16;
+ nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64;
/* We don't know absolute NPA LF idx attached.
* AF will replace 'RVU_DEFAULT_PF_FUNC' with
* NPA LF attached to this RVU PF/VF.
@@ -1211,7 +1223,11 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
*/
if (pfvf->nix_blkaddr == BLKADDR_NIX1)
aq->aura.bp_ena = 1;
+#ifdef CONFIG_DCB
+ aq->aura.nix0_bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]];
+#else
aq->aura.nix0_bpid = pfvf->bpid[0];
+#endif
/* Set backpressure level for RQ's Aura */
aq->aura.bp = RQ_BP_LVL_AURA;
@@ -1538,11 +1554,18 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
return -ENOMEM;
req->chan_base = 0;
- req->chan_cnt = 1;
+#ifdef CONFIG_DCB
+ req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
+ req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
+#else
+ req->chan_cnt = 1;
req->bpid_per_chan = 0;
+#endif
+
return otx2_sync_mbox_msg(&pfvf->mbox);
}
+EXPORT_SYMBOL(otx2_nix_config_bp);
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
@@ -1704,6 +1727,56 @@ out:
}
EXPORT_SYMBOL(otx2_get_max_mtu);
+int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features)
+{
+ netdev_features_t changed = features ^ netdev->features;
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool ntuple = !!(features & NETIF_F_NTUPLE);
+ bool tc = !!(features & NETIF_F_HW_TC);
+
+ if ((changed & NETIF_F_NTUPLE) && !ntuple)
+ otx2_destroy_ntuple_flows(pfvf);
+
+ if ((changed & NETIF_F_NTUPLE) && ntuple) {
+ if (!pfvf->flow_cfg->max_flows) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE, MCAM entries not allocated\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_TC) && tc) {
+ if (!pfvf->flow_cfg->max_flows) {
+ netdev_err(netdev,
+ "Can't enable TC, MCAM entries not allocated\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_TC) && !tc &&
+ pfvf->flow_cfg && pfvf->flow_cfg->nr_flows) {
+ netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
+ return -EBUSY;
+ }
+
+ if ((changed & NETIF_F_NTUPLE) && ntuple &&
+ (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE when TC is active, disable TC and retry\n");
+ return -EINVAL;
+ }
+
+ if ((changed & NETIF_F_HW_TC) && tc &&
+ (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
+ netdev_err(netdev,
+ "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_handle_ntuple_tc_features);
+
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
int __weak \
otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 14509fc64cce..c587c14ac2a3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -17,6 +17,7 @@
#include <linux/soc/marvell/octeontx2/asm.h>
#include <net/pkt_cls.h>
#include <net/devlink.h>
+#include <linux/time64.h>
#include <mbox.h>
#include <npc.h>
@@ -178,6 +179,10 @@ struct otx2_hw {
u16 rqpool_cnt;
u16 sqpool_cnt;
+#define OTX2_DEFAULT_RBUF_LEN 2048
+ u16 rbuf_len;
+ u32 xqe_size;
+
/* NPA */
u32 stack_pg_ptrs; /* No of ptrs per stack page */
u32 stack_pg_bytes; /* Size of stack page */
@@ -272,6 +277,8 @@ struct otx2_ptp {
u64 thresh;
struct ptp_pin_desc extts_config;
+ u64 (*convert_rx_ptp_tstmp)(u64 timestamp);
+ u64 (*convert_tx_ptp_tstmp)(u64 timestamp);
};
#define OTX2_HW_TIMESTAMP_LEN 8
@@ -396,6 +403,11 @@ struct otx2_nic {
/* Devlink */
struct otx2_devlink *dl;
+#ifdef CONFIG_DCB
+ /* PFC */
+ u8 pfc_en;
+ u8 *queue_to_pfc_map;
+#endif
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -863,6 +875,8 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
+int otx2_handle_ntuple_tc_features(struct net_device *netdev,
+ netdev_features_t features);
/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
@@ -876,4 +890,11 @@ int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
+
+#ifdef CONFIG_DCB
+/* DCB support*/
+void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable);
+int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf);
+int otx2_dcbnl_set_ops(struct net_device *dev);
+#endif
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
new file mode 100644
index 000000000000..723d2506d309
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include "otx2_common.h"
+
+int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
+{
+ struct cgx_pfc_cfg *req;
+ struct cgx_pfc_rsp *rsp;
+ int err = 0;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ if (pfvf->pfc_en) {
+ req->rx_pause = true;
+ req->tx_pause = true;
+ } else {
+ req->rx_pause = false;
+ req->tx_pause = false;
+ }
+ req->pfc_en = pfvf->pfc_en;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct cgx_pfc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) {
+ dev_warn(pfvf->dev,
+ "Failed to config PFC\n");
+ err = -EPERM;
+ }
+ }
+unlock:
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
+ bool pfc_enable)
+{
+ bool if_up = netif_running(pfvf->netdev);
+ struct npa_aq_enq_req *npa_aq;
+ struct nix_aq_enq_req *aq;
+ int err = 0;
+
+ if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) {
+ dev_warn(pfvf->dev,
+ "PFC enable not permitted as Priority %d already mapped to Queue %d\n",
+ pfvf->queue_to_pfc_map[qidx], qidx);
+ return;
+ }
+
+ if (if_up) {
+ netif_tx_stop_all_queues(pfvf->netdev);
+ netif_carrier_off(pfvf->netdev);
+ }
+
+ pfvf->queue_to_pfc_map[qidx] = vlan_prio;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ aq->cq.bpid = pfvf->bpid[vlan_prio];
+ aq->cq_mask.bpid = GENMASK(8, 0);
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+ npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!npa_aq) {
+ err = -ENOMEM;
+ goto out;
+ }
+ npa_aq->aura.nix0_bpid = pfvf->bpid[vlan_prio];
+ npa_aq->aura_mask.nix0_bpid = GENMASK(8, 0);
+
+ /* Fill NPA AQ info */
+ npa_aq->aura_id = qidx;
+ npa_aq->ctype = NPA_AQ_CTYPE_AURA;
+ npa_aq->op = NPA_AQ_INSTOP_WRITE;
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+out:
+ if (if_up) {
+ netif_carrier_on(pfvf->netdev);
+ netif_tx_start_all_queues(pfvf->netdev);
+ }
+
+ if (err)
+ dev_warn(pfvf->dev,
+ "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
+ qidx, err);
+}
+
+static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+ pfc->pfc_en = pfvf->pfc_en;
+
+ return 0;
+}
+
+static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int err;
+
+ /* Save PFC configuration to interface */
+ pfvf->pfc_en = pfc->pfc_en;
+
+ err = otx2_config_priority_flow_ctrl(pfvf);
+ if (err)
+ return err;
+
+ /* Request Per channel Bpids */
+ if (pfc->pfc_en)
+ otx2_nix_config_bp(pfvf, true);
+
+ return 0;
+}
+
+static u8 otx2_dcbnl_getdcbx(struct net_device __always_unused *dev)
+{
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 otx2_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode)
+{
+ return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
+}
+
+static const struct dcbnl_rtnl_ops otx2_dcbnl_ops = {
+ .ieee_getpfc = otx2_dcbnl_ieee_getpfc,
+ .ieee_setpfc = otx2_dcbnl_ieee_setpfc,
+ .getdcbx = otx2_dcbnl_getdcbx,
+ .setdcbx = otx2_dcbnl_setdcbx,
+};
+
+int otx2_dcbnl_set_ops(struct net_device *dev)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ pfvf->queue_to_pfc_map = devm_kzalloc(pfvf->dev, pfvf->hw.rx_queues,
+ GFP_KERNEL);
+ if (!pfvf->queue_to_pfc_map)
+ return -ENOMEM;
+ dev->dcbnl_ops = &otx2_dcbnl_ops;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index d85db90632d6..fc328de5345e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -371,6 +371,8 @@ static void otx2_get_ringparam(struct net_device *netdev,
ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256);
ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX);
ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K);
+ kernel_ring->rx_buf_len = pfvf->hw.rbuf_len;
+ kernel_ring->cqe_size = pfvf->hw.xqe_size;
}
static int otx2_set_ringparam(struct net_device *netdev,
@@ -379,6 +381,9 @@ static int otx2_set_ringparam(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
+ u32 rx_buf_len = kernel_ring->rx_buf_len;
+ u32 old_rx_buf_len = pfvf->hw.rbuf_len;
+ u32 xqe_size = kernel_ring->cqe_size;
bool if_up = netif_running(netdev);
struct otx2_qset *qs = &pfvf->qset;
u32 rx_count, tx_count;
@@ -386,6 +391,21 @@ static int otx2_set_ringparam(struct net_device *netdev,
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
return -EINVAL;
+ /* Hardware supports max size of 32k for a receive buffer
+ * and 1536 is typical ethernet frame size.
+ */
+ if (rx_buf_len && (rx_buf_len < 1536 || rx_buf_len > 32768)) {
+ netdev_err(netdev,
+ "Receive buffer range is 1536 - 32768");
+ return -EINVAL;
+ }
+
+ if (xqe_size != 128 && xqe_size != 512) {
+ netdev_err(netdev,
+ "Completion event size must be 128 or 512");
+ return -EINVAL;
+ }
+
/* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */
rx_count = ring->rx_pending;
/* On some silicon variants a skid or reserved CQEs are
@@ -403,7 +423,8 @@ static int otx2_set_ringparam(struct net_device *netdev,
Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX));
tx_count = Q_COUNT(Q_SIZE(tx_count, 3));
- if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt)
+ if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt &&
+ rx_buf_len == old_rx_buf_len && xqe_size == pfvf->hw.xqe_size)
return 0;
if (if_up)
@@ -413,6 +434,9 @@ static int otx2_set_ringparam(struct net_device *netdev,
qs->sqe_cnt = tx_count;
qs->rqe_cnt = rx_count;
+ pfvf->hw.rbuf_len = rx_buf_len;
+ pfvf->hw.xqe_size = xqe_size;
+
if (if_up)
return netdev->netdev_ops->ndo_open(netdev);
@@ -1207,6 +1231,8 @@ end:
static const struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
+ .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
+ ETHTOOL_RING_USE_CQE_SIZE,
.get_link = otx2_get_link,
.get_drvinfo = otx2_get_drvinfo,
.get_strings = otx2_get_strings,
@@ -1326,6 +1352,8 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
static const struct ethtool_ops otx2vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
+ .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
+ ETHTOOL_RING_USE_CQE_SIZE,
.get_link = otx2_get_link,
.get_drvinfo = otx2vf_get_drvinfo,
.get_strings = otx2vf_get_strings,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 77a13fb555fb..54f235c216a9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -21,8 +21,10 @@ struct otx2_flow {
u16 entry;
bool is_vf;
u8 rss_ctx_id;
+#define DMAC_FILTER_RULE BIT(0)
+#define PFC_FLOWCTRL_RULE BIT(1)
+ u16 rule_type;
int vf;
- bool dmac_filter;
};
enum dmac_req {
@@ -899,6 +901,9 @@ static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
{
u64 ring_cookie = flow->flow_spec.ring_cookie;
+#ifdef CONFIG_DCB
+ int vlan_prio, qidx, pfc_rule = 0;
+#endif
struct npc_install_flow_req *req;
int err, vf = 0;
@@ -940,6 +945,24 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
mutex_unlock(&pfvf->mbox.lock);
return -EINVAL;
}
+
+#ifdef CONFIG_DCB
+ /* Identify PFC rule if PFC enabled and ntuple rule is vlan */
+ if (!vf && (req->features & BIT_ULL(NPC_OUTER_VID)) &&
+ pfvf->pfc_en && req->op != NIX_RX_ACTIONOP_RSS) {
+ vlan_prio = ntohs(req->packet.vlan_tci) &
+ ntohs(req->mask.vlan_tci);
+
+ /* Get the priority */
+ vlan_prio >>= 13;
+ flow->rule_type |= PFC_FLOWCTRL_RULE;
+ /* Check if PFC enabled for this priority */
+ if (pfvf->pfc_en & BIT(vlan_prio)) {
+ pfc_rule = true;
+ qidx = req->index;
+ }
+ }
+#endif
}
/* ethtool ring_cookie has (VF + 1) for VF */
@@ -951,6 +974,12 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
/* Send message to AF */
err = otx2_sync_mbox_msg(&pfvf->mbox);
+
+#ifdef CONFIG_DCB
+ if (!err && pfc_rule)
+ otx2_update_bpid_in_rqctx(pfvf, vlan_prio, qidx, true);
+#endif
+
mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -966,7 +995,7 @@ static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
return -ENOMEM;
pf_mac->entry = 0;
- pf_mac->dmac_filter = true;
+ pf_mac->rule_type |= DMAC_FILTER_RULE;
pf_mac->location = pfvf->flow_cfg->max_flows;
memcpy(&pf_mac->flow_spec, &flow->flow_spec,
sizeof(struct ethtool_rx_flow_spec));
@@ -1031,7 +1060,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
eth_hdr = &flow->flow_spec.h_u.ether_spec;
/* Sync dmac filter table with updated fields */
- if (flow->dmac_filter)
+ if (flow->rule_type & DMAC_FILTER_RULE)
return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
flow->entry);
@@ -1052,7 +1081,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (!test_bit(0, &flow_cfg->dmacflt_bmap))
otx2_add_flow_with_pfmac(pfvf, flow);
- flow->dmac_filter = true;
+ flow->rule_type |= DMAC_FILTER_RULE;
flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows);
fsp->location = flow_cfg->max_flows + flow->entry;
@@ -1120,7 +1149,7 @@ static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
bool found = false;
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
- if (iter->dmac_filter && iter->entry == 0) {
+ if ((iter->rule_type & DMAC_FILTER_RULE) && iter->entry == 0) {
eth_hdr = &iter->flow_spec.h_u.ether_spec;
if (req == DMAC_ADDR_DEL) {
otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
@@ -1156,7 +1185,7 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
if (!flow)
return -ENOENT;
- if (flow->dmac_filter) {
+ if (flow->rule_type & DMAC_FILTER_RULE) {
struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
/* user not allowed to remove dmac filter with interface mac */
@@ -1174,6 +1203,13 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
flow_cfg->dmacflt_max_flows) == 1)
otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
} else {
+#ifdef CONFIG_DCB
+ if (flow->rule_type & PFC_FLOWCTRL_RULE)
+ otx2_update_bpid_in_rqctx(pfvf, 0,
+ flow->flow_spec.ring_cookie,
+ false);
+#endif
+
err = otx2_remove_flow_msg(pfvf, flow->entry, false);
}
@@ -1383,7 +1419,7 @@ void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
struct ethhdr *eth_hdr;
list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
- if (iter->dmac_filter) {
+ if (iter->rule_type & DMAC_FILTER_RULE) {
eth_hdr = &iter->flow_spec.h_u.ether_spec;
otx2_dmacflt_add(pf, eth_hdr->h_dest,
iter->entry);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index d39341e4ab37..441aafc26a08 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1311,6 +1311,9 @@ static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
int total_size;
int rbuf_size;
+ if (pf->hw.rbuf_len)
+ return ALIGN(pf->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
+
/* The data transferred by NIX to memory consists of actual packet
* plus additional data which has timestamp and/or EDSA/HIGIG2
* headers if interface is configured in corresponding modes.
@@ -1694,9 +1697,6 @@ int otx2_open(struct net_device *netdev)
if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
otx2_handle_link_event(pf);
- /* Restore pause frame settings */
- otx2_config_pause_frm(pf);
-
/* Install DMAC Filters */
if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
otx2_dmacflt_reinstall_flows(pf);
@@ -1863,9 +1863,7 @@ static int otx2_set_features(struct net_device *netdev,
netdev_features_t features)
{
netdev_features_t changed = features ^ netdev->features;
- bool ntuple = !!(features & NETIF_F_NTUPLE);
struct otx2_nic *pf = netdev_priv(netdev);
- bool tc = !!(features & NETIF_F_HW_TC);
if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
return otx2_cgx_config_loopback(pf,
@@ -1875,46 +1873,7 @@ static int otx2_set_features(struct net_device *netdev,
return otx2_enable_rxvlan(pf,
features & NETIF_F_HW_VLAN_CTAG_RX);
- if ((changed & NETIF_F_NTUPLE) && !ntuple)
- otx2_destroy_ntuple_flows(pf);
-
- if ((changed & NETIF_F_NTUPLE) && ntuple) {
- if (!pf->flow_cfg->max_flows) {
- netdev_err(netdev,
- "Can't enable NTUPLE, MCAM entries not allocated\n");
- return -EINVAL;
- }
- }
-
- if ((changed & NETIF_F_HW_TC) && tc) {
- if (!pf->flow_cfg->max_flows) {
- netdev_err(netdev,
- "Can't enable TC, MCAM entries not allocated\n");
- return -EINVAL;
- }
- }
-
- if ((changed & NETIF_F_HW_TC) && !tc &&
- pf->flow_cfg && pf->flow_cfg->nr_flows) {
- netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
- return -EBUSY;
- }
-
- if ((changed & NETIF_F_NTUPLE) && ntuple &&
- (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
- netdev_err(netdev,
- "Can't enable NTUPLE when TC is active, disable TC and retry\n");
- return -EINVAL;
- }
-
- if ((changed & NETIF_F_HW_TC) && tc &&
- (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
- netdev_err(netdev,
- "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
- return -EINVAL;
- }
-
- return 0;
+ return otx2_handle_ntuple_tc_features(netdev, features);
}
static void otx2_reset_task(struct work_struct *work)
@@ -2625,6 +2584,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->tx_queues = qcount;
hw->tot_tx_queues = qcount;
hw->max_queues = qcount;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
num_vec = pci_msix_vec_count(pdev);
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
@@ -2778,9 +2740,11 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
- /* Enable pause frames by default */
- pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
- pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+#ifdef CONFIG_DCB
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_pf_sriov_init;
+#endif
return 0;
@@ -2925,6 +2889,21 @@ static void otx2_remove(struct pci_dev *pdev)
if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
otx2_config_hw_rx_tstamp(pf, false);
+ /* Disable 802.3x pause frames */
+ if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
+ (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
+ pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+ pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+ otx2_config_pause_frm(pf);
+ }
+
+#ifdef CONFIG_DCB
+ /* Disable PFC config */
+ if (pf->pfc_en) {
+ pf->pfc_en = 0;
+ otx2_config_priority_flow_ctrl(pf);
+ }
+#endif
cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 61c20907315f..fdc2c9315b91 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -294,6 +294,14 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
goto error;
}
+ if (is_dev_otx2(pfvf->pdev)) {
+ ptp_ptr->convert_rx_ptp_tstmp = &otx2_ptp_convert_rx_timestamp;
+ ptp_ptr->convert_tx_ptp_tstmp = &otx2_ptp_convert_tx_timestamp;
+ } else {
+ ptp_ptr->convert_rx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
+ ptp_ptr->convert_tx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
+ }
+
pfvf->ptp = ptp_ptr;
error:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
index 6ff284211d7b..7ff41927ceaf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
@@ -8,6 +8,21 @@
#ifndef OTX2_PTP_H
#define OTX2_PTP_H
+static inline u64 otx2_ptp_convert_rx_timestamp(u64 timestamp)
+{
+ return be64_to_cpu(*(__be64 *)&timestamp);
+}
+
+static inline u64 otx2_ptp_convert_tx_timestamp(u64 timestamp)
+{
+ return timestamp;
+}
+
+static inline u64 cn10k_ptp_convert_timestamp(u64 timestamp)
+{
+ return ((timestamp >> 32) * NSEC_PER_SEC) + (timestamp & 0xFFFFFFFFUL);
+}
+
int otx2_ptp_init(struct otx2_nic *pfvf);
void otx2_ptp_destroy(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 626961a41089..28b19945d716 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -58,7 +58,7 @@ int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
- if (!nic->flow_cfg->max_flows || is_otx2_vf(nic->pcifunc))
+ if (!nic->flow_cfg->max_flows)
return 0;
/* Max flows changed, free the existing bitmap */
@@ -190,6 +190,40 @@ static int otx2_tc_validate_flow(struct otx2_nic *nic,
return 0;
}
+static int otx2_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
struct tc_cls_matchall_offload *cls)
{
@@ -212,6 +246,10 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
entry = &cls->rule->action.entries[0];
switch (entry->id) {
case FLOW_ACTION_POLICE:
+ err = otx2_policer_validate(&cls->rule->action, entry, extack);
+ if (err)
+ return err;
+
if (entry->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
return -EOPNOTSUPP;
@@ -315,6 +353,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
u8 nr_police = 0;
bool pps = false;
u64 rate;
+ int err;
int i;
if (!flow_action_has_entries(flow_action)) {
@@ -355,6 +394,10 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
return -EOPNOTSUPP;
}
+ err = otx2_policer_validate(flow_action, act, extack);
+ if (err)
+ return err;
+
if (act->police.rate_bytes_ps > 0) {
rate = act->police.rate_bytes_ps * 8;
burst = act->police.burst;
@@ -1023,6 +1066,7 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
return -EOPNOTSUPP;
}
}
+EXPORT_SYMBOL(otx2_setup_tc);
static const struct rhashtable_params tc_flow_ht_params = {
.head_offset = offsetof(struct otx2_tc_flow, node),
@@ -1052,6 +1096,7 @@ int otx2_init_tc(struct otx2_nic *nic)
tc->flow_ht_params = tc_flow_ht_params;
return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
}
+EXPORT_SYMBOL(otx2_init_tc);
void otx2_shutdown_tc(struct otx2_nic *nic)
{
@@ -1060,3 +1105,4 @@ void otx2_shutdown_tc(struct otx2_nic *nic)
kfree(tc->tc_entries_bitmap);
rhashtable_destroy(&tc->flow_table);
}
+EXPORT_SYMBOL(otx2_shutdown_tc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 7c4068c5d1ac..c26de15b2ac3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -148,6 +148,7 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
if (timestamp != 1) {
+ timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp);
err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
if (!err) {
memset(&ts, 0, sizeof(ts));
@@ -167,14 +168,15 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
struct sk_buff *skb, void *data)
{
- u64 tsns;
+ u64 timestamp, tsns;
int err;
if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
return;
+ timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data);
/* The first 8 bytes is the timestamp */
- err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns);
+ err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
if (err)
return;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 925b74ebb8b0..9e87836ed8bf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -472,23 +472,7 @@ static void otx2vf_reset_task(struct work_struct *work)
static int otx2vf_set_features(struct net_device *netdev,
netdev_features_t features)
{
- netdev_features_t changed = features ^ netdev->features;
- bool ntuple_enabled = !!(features & NETIF_F_NTUPLE);
- struct otx2_nic *vf = netdev_priv(netdev);
-
- if (changed & NETIF_F_NTUPLE) {
- if (!ntuple_enabled) {
- otx2_mcam_flow_del(vf);
- return 0;
- }
-
- if (!otx2_get_maxflows(vf->flow_cfg)) {
- netdev_err(netdev,
- "Can't enable NTUPLE, MCAM entries not allocated\n");
- return -EINVAL;
- }
- }
- return 0;
+ return otx2_handle_ntuple_tc_features(netdev, features);
}
static const struct net_device_ops otx2vf_netdev_ops = {
@@ -502,6 +486,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_eth_ioctl = otx2_ioctl,
+ .ndo_setup_tc = otx2_setup_tc,
};
static int otx2_wq_init(struct otx2_nic *vf)
@@ -586,6 +571,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->tx_queues = qcount;
hw->max_queues = qcount;
hw->tot_tx_queues = qcount;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
@@ -662,6 +650,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features |= NETIF_F_NTUPLE;
netdev->hw_features |= NETIF_F_RXALL;
+ netdev->hw_features |= NETIF_F_HW_TC;
netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
@@ -697,16 +686,24 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_unreg_netdev;
- err = otx2_register_dl(vf);
+ err = otx2_init_tc(vf);
if (err)
goto err_unreg_netdev;
- /* Enable pause frames by default */
- vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
- vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+ err = otx2_register_dl(vf);
+ if (err)
+ goto err_shutdown_tc;
+
+#ifdef CONFIG_DCB
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_shutdown_tc;
+#endif
return 0;
+err_shutdown_tc:
+ otx2_shutdown_tc(vf);
err_unreg_netdev:
unregister_netdev(netdev);
err_ptp_destroy:
@@ -739,6 +736,22 @@ static void otx2vf_remove(struct pci_dev *pdev)
vf = netdev_priv(netdev);
+ /* Disable 802.3x pause frames */
+ if (vf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
+ (vf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
+ vf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+ vf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+ otx2_config_pause_frm(vf);
+ }
+
+#ifdef CONFIG_DCB
+ /* Disable PFC config */
+ if (vf->pfc_en) {
+ vf->pfc_en = 0;
+ otx2_config_priority_flow_ctrl(vf);
+ }
+#endif
+
cancel_work_sync(&vf->reset_task);
otx2_unregister_dl(vf);
unregister_netdev(netdev);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
index 2fd9ef2fe5d6..6f754ae2a584 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -281,8 +281,11 @@ struct prestera_router {
struct prestera_switch *sw;
struct list_head vr_list;
struct list_head rif_entry_list;
+ struct rhashtable fib_ht;
+ struct rhashtable kern_fib_cache_ht;
struct notifier_block inetaddr_nb;
struct notifier_block inetaddr_valid_nb;
+ struct notifier_block fib_nb;
};
struct prestera_rxtx_params {
@@ -325,6 +328,8 @@ int prestera_port_cfg_mac_write(struct prestera_port *port,
struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev);
+void prestera_queue_work(struct work_struct *work);
+
int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
bool prestera_netdev_check(const struct net_device *dev);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
index f0d9f592173b..47c899c08951 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -22,6 +22,7 @@ struct prestera_acl {
struct prestera_acl_ruleset_ht_key {
struct prestera_flow_block *block;
+ u32 chain_index;
};
struct prestera_acl_rule_entry {
@@ -34,6 +35,10 @@ struct prestera_acl_rule_entry {
u8 valid:1;
} accept, drop, trap;
struct {
+ struct prestera_acl_action_jump i;
+ u8 valid:1;
+ } jump;
+ struct {
u32 id;
struct prestera_counter_block *block;
} counter;
@@ -49,6 +54,7 @@ struct prestera_acl_ruleset {
refcount_t refcount;
void *keymask;
u32 vtcam_id;
+ u32 index;
u16 pcl_id;
bool offload;
};
@@ -83,20 +89,45 @@ static const struct rhashtable_params __prestera_acl_rule_entry_ht_params = {
.automatic_shrinking = true,
};
+int prestera_acl_chain_to_client(u32 chain_index, u32 *client)
+{
+ static const u32 client_map[] = {
+ PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0,
+ PRESTERA_HW_COUNTER_CLIENT_LOOKUP_1,
+ PRESTERA_HW_COUNTER_CLIENT_LOOKUP_2
+ };
+
+ if (chain_index >= ARRAY_SIZE(client_map))
+ return -EINVAL;
+
+ *client = client_map[chain_index];
+ return 0;
+}
+
+static bool prestera_acl_chain_is_supported(u32 chain_index)
+{
+ return (chain_index & ~PRESTERA_ACL_CHAIN_MASK) == 0;
+}
+
static struct prestera_acl_ruleset *
prestera_acl_ruleset_create(struct prestera_acl *acl,
- struct prestera_flow_block *block)
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
struct prestera_acl_ruleset *ruleset;
u32 uid = 0;
int err;
+ if (!prestera_acl_chain_is_supported(chain_index))
+ return ERR_PTR(-EINVAL);
+
ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL);
if (!ruleset)
return ERR_PTR(-ENOMEM);
ruleset->acl = acl;
ruleset->ht_key.block = block;
+ ruleset->ht_key.chain_index = chain_index;
refcount_set(&ruleset->refcount, 1);
err = rhashtable_init(&ruleset->rule_ht, &prestera_acl_rule_ht_params);
@@ -108,7 +139,9 @@ prestera_acl_ruleset_create(struct prestera_acl *acl,
goto err_ruleset_create;
/* make pcl-id based on uid */
- ruleset->pcl_id = (u8)uid;
+ ruleset->pcl_id = PRESTERA_ACL_PCL_ID_MAKE((u8)uid, chain_index);
+ ruleset->index = uid;
+
err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
prestera_acl_ruleset_ht_params);
if (err)
@@ -133,35 +166,64 @@ void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset)
{
+ struct prestera_acl_iface iface;
u32 vtcam_id;
int err;
if (ruleset->offload)
return -EEXIST;
- err = prestera_acl_vtcam_id_get(ruleset->acl, 0,
+ err = prestera_acl_vtcam_id_get(ruleset->acl,
+ ruleset->ht_key.chain_index,
ruleset->keymask, &vtcam_id);
if (err)
- return err;
+ goto err_vtcam_create;
+
+ if (ruleset->ht_key.chain_index) {
+ /* for chain > 0, bind iface index to pcl-id to be able
+ * to jump from any other ruleset to this one using the index.
+ */
+ iface.index = ruleset->index;
+ iface.type = PRESTERA_ACL_IFACE_TYPE_INDEX;
+ err = prestera_hw_vtcam_iface_bind(ruleset->acl->sw, &iface,
+ vtcam_id, ruleset->pcl_id);
+ if (err)
+ goto err_ruleset_bind;
+ }
ruleset->vtcam_id = vtcam_id;
ruleset->offload = true;
return 0;
+
+err_ruleset_bind:
+ prestera_acl_vtcam_id_put(ruleset->acl, ruleset->vtcam_id);
+err_vtcam_create:
+ return err;
}
static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset)
{
struct prestera_acl *acl = ruleset->acl;
u8 uid = ruleset->pcl_id & PRESTERA_ACL_KEYMASK_PCL_ID_USER;
+ int err;
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
prestera_acl_ruleset_ht_params);
- if (ruleset->offload)
+ if (ruleset->offload) {
+ if (ruleset->ht_key.chain_index) {
+ struct prestera_acl_iface iface = {
+ .type = PRESTERA_ACL_IFACE_TYPE_INDEX,
+ .index = ruleset->index
+ };
+ err = prestera_hw_vtcam_iface_unbind(acl->sw, &iface,
+ ruleset->vtcam_id);
+ WARN_ON(err);
+ }
WARN_ON(prestera_acl_vtcam_id_put(acl, ruleset->vtcam_id));
+ }
idr_remove(&acl->uid, uid);
-
rhashtable_destroy(&ruleset->rule_ht);
kfree(ruleset->keymask);
kfree(ruleset);
@@ -169,23 +231,26 @@ static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset)
static struct prestera_acl_ruleset *
__prestera_acl_ruleset_lookup(struct prestera_acl *acl,
- struct prestera_flow_block *block)
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
struct prestera_acl_ruleset_ht_key ht_key;
memset(&ht_key, 0, sizeof(ht_key));
ht_key.block = block;
+ ht_key.chain_index = chain_index;
return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
prestera_acl_ruleset_ht_params);
}
struct prestera_acl_ruleset *
prestera_acl_ruleset_lookup(struct prestera_acl *acl,
- struct prestera_flow_block *block)
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
struct prestera_acl_ruleset *ruleset;
- ruleset = __prestera_acl_ruleset_lookup(acl, block);
+ ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index);
if (!ruleset)
return ERR_PTR(-ENOENT);
@@ -195,17 +260,18 @@ prestera_acl_ruleset_lookup(struct prestera_acl *acl,
struct prestera_acl_ruleset *
prestera_acl_ruleset_get(struct prestera_acl *acl,
- struct prestera_flow_block *block)
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
struct prestera_acl_ruleset *ruleset;
- ruleset = __prestera_acl_ruleset_lookup(acl, block);
+ ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index);
if (ruleset) {
refcount_inc(&ruleset->refcount);
return ruleset;
}
- return prestera_acl_ruleset_create(acl, block);
+ return prestera_acl_ruleset_create(acl, block, chain_index);
}
void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset)
@@ -293,6 +359,11 @@ prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset,
prestera_acl_rule_ht_params);
}
+u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset)
+{
+ return ruleset->index;
+}
+
bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset)
{
return ruleset->offload;
@@ -300,7 +371,7 @@ bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset)
struct prestera_acl_rule *
prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
- unsigned long cookie)
+ unsigned long cookie, u32 chain_index)
{
struct prestera_acl_rule *rule;
@@ -310,6 +381,7 @@ prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
rule->ruleset = ruleset;
rule->cookie = cookie;
+ rule->chain_index = chain_index;
refcount_inc(&ruleset->refcount);
@@ -324,6 +396,10 @@ void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
void prestera_acl_rule_destroy(struct prestera_acl_rule *rule)
{
+ if (rule->jump_ruleset)
+ /* release ruleset kept by jump action */
+ prestera_acl_ruleset_put(rule->jump_ruleset);
+
prestera_acl_ruleset_put(rule->ruleset);
kfree(rule);
}
@@ -347,7 +423,10 @@ int prestera_acl_rule_add(struct prestera_switch *sw,
/* setup counter */
rule->re_arg.count.valid = true;
- rule->re_arg.count.client = PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0;
+ err = prestera_acl_chain_to_client(ruleset->ht_key.chain_index,
+ &rule->re_arg.count.client);
+ if (err)
+ goto err_rule_add;
rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key);
err = WARN_ON(rule->re) ? -EEXIST : 0;
@@ -360,8 +439,10 @@ int prestera_acl_rule_add(struct prestera_switch *sw,
if (err)
goto err_rule_add;
- /* bind the block (all ports) to chain index 0 */
- if (!ruleset->rule_count) {
+ /* bind the block (all ports) to chain index 0, rest of
+ * the chains are bound to goto action
+ */
+ if (!ruleset->ht_key.chain_index && !ruleset->rule_count) {
err = prestera_acl_ruleset_block_bind(ruleset, block);
if (err)
goto err_acl_block_bind;
@@ -395,7 +476,7 @@ void prestera_acl_rule_del(struct prestera_switch *sw,
prestera_acl_rule_entry_destroy(sw->acl, rule->re);
/* unbind block (all ports) */
- if (!ruleset->rule_count)
+ if (!ruleset->ht_key.chain_index && !ruleset->rule_count)
prestera_acl_ruleset_block_unbind(ruleset, block);
}
@@ -459,6 +540,12 @@ static int __prestera_acl_rule_entry2hw_add(struct prestera_switch *sw,
act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_TRAP;
act_num++;
}
+ /* jump */
+ if (e->jump.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_JUMP;
+ act_hw[act_num].jump = e->jump.i;
+ act_num++;
+ }
/* counter */
if (e->counter.block) {
act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_COUNT;
@@ -505,6 +592,9 @@ __prestera_acl_rule_entry_act_construct(struct prestera_switch *sw,
e->drop.valid = arg->drop.valid;
/* trap */
e->trap.valid = arg->trap.valid;
+ /* jump */
+ e->jump.valid = arg->jump.valid;
+ e->jump.i = arg->jump.i;
/* counter */
if (arg->count.valid) {
int err;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
index 40f6c1d961fa..6d2ad27682d1 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
@@ -10,6 +10,14 @@
#define PRESTERA_ACL_KEYMASK_PCL_ID 0x3FF
#define PRESTERA_ACL_KEYMASK_PCL_ID_USER \
(PRESTERA_ACL_KEYMASK_PCL_ID & 0x00FF)
+#define PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN \
+ (PRESTERA_ACL_KEYMASK_PCL_ID & 0xFF00)
+#define PRESTERA_ACL_CHAIN_MASK \
+ (PRESTERA_ACL_KEYMASK_PCL_ID >> 8)
+
+#define PRESTERA_ACL_PCL_ID_MAKE(uid, chain_id) \
+ (((uid) & PRESTERA_ACL_KEYMASK_PCL_ID_USER) | \
+ (((chain_id) << 8) & PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN))
#define rule_match_set_n(match_p, type, val_p, size) \
memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \
@@ -46,6 +54,7 @@ enum prestera_acl_rule_action {
PRESTERA_ACL_RULE_ACTION_ACCEPT = 0,
PRESTERA_ACL_RULE_ACTION_DROP = 1,
PRESTERA_ACL_RULE_ACTION_TRAP = 2,
+ PRESTERA_ACL_RULE_ACTION_JUMP = 5,
PRESTERA_ACL_RULE_ACTION_COUNT = 7,
PRESTERA_ACL_RULE_ACTION_MAX
@@ -61,6 +70,10 @@ struct prestera_acl_match {
__be32 mask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
};
+struct prestera_acl_action_jump {
+ u32 index;
+};
+
struct prestera_acl_action_count {
u32 id;
};
@@ -74,6 +87,7 @@ struct prestera_acl_hw_action_info {
enum prestera_acl_rule_action id;
union {
struct prestera_acl_action_count count;
+ struct prestera_acl_action_jump jump;
};
};
@@ -88,6 +102,10 @@ struct prestera_acl_rule_entry_arg {
u8 valid:1;
} accept, drop, trap;
struct {
+ struct prestera_acl_action_jump i;
+ u8 valid:1;
+ } jump;
+ struct {
u8 valid:1;
u32 client;
} count;
@@ -98,7 +116,9 @@ struct prestera_acl_rule {
struct rhash_head ht_node; /* Member of acl HT */
struct list_head list;
struct prestera_acl_ruleset *ruleset;
+ struct prestera_acl_ruleset *jump_ruleset;
unsigned long cookie;
+ u32 chain_index;
u32 priority;
struct prestera_acl_rule_entry_key re_key;
struct prestera_acl_rule_entry_arg re_arg;
@@ -122,7 +142,7 @@ void prestera_acl_fini(struct prestera_switch *sw);
struct prestera_acl_rule *
prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
- unsigned long cookie);
+ unsigned long cookie, u32 chain_index);
void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
u32 priority);
void prestera_acl_rule_destroy(struct prestera_acl_rule *rule);
@@ -147,10 +167,12 @@ prestera_acl_rule_entry_create(struct prestera_acl *acl,
struct prestera_acl_rule_entry_arg *arg);
struct prestera_acl_ruleset *
prestera_acl_ruleset_get(struct prestera_acl *acl,
- struct prestera_flow_block *block);
+ struct prestera_flow_block *block,
+ u32 chain_index);
struct prestera_acl_ruleset *
prestera_acl_ruleset_lookup(struct prestera_acl *acl,
- struct prestera_flow_block *block);
+ struct prestera_flow_block *block,
+ u32 chain_index);
void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
void *keymask);
bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset);
@@ -160,6 +182,7 @@ int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset,
struct prestera_port *port);
int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset,
struct prestera_port *port);
+u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset);
void
prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule,
u16 pcl_id);
@@ -167,5 +190,6 @@ prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule,
int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
void *keymask, u32 *vtcam_id);
int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id);
+int prestera_acl_chain_to_client(u32 chain_index, u32 *client);
#endif /* _PRESTERA_ACL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
index d849f046ece7..05c3ad98eba9 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
@@ -29,9 +29,6 @@ static int prestera_flow_block_mall_cb(struct prestera_flow_block *block,
static int prestera_flow_block_flower_cb(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
- if (f->common.chain_index != 0)
- return -EOPNOTSUPP;
-
switch (f->command) {
case FLOW_CLS_REPLACE:
return prestera_flower_replace(block, f);
@@ -71,6 +68,7 @@ static void prestera_flow_block_destroy(void *cb_priv)
prestera_flower_template_cleanup(block);
+ WARN_ON(!list_empty(&block->template_list));
WARN_ON(!list_empty(&block->binding_list));
kfree(block);
@@ -86,6 +84,7 @@ prestera_flow_block_create(struct prestera_switch *sw, struct net *net)
return NULL;
INIT_LIST_HEAD(&block->binding_list);
+ INIT_LIST_HEAD(&block->template_list);
block->net = net;
block->sw = sw;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
index 1ea5b745bf72..6550278b166a 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
@@ -8,7 +8,6 @@
struct prestera_port;
struct prestera_switch;
-struct prestera_flower_template;
struct prestera_flow_block_binding {
struct list_head list;
@@ -22,7 +21,7 @@ struct prestera_flow_block {
struct net *net;
struct prestera_acl_ruleset *ruleset_zero;
struct flow_block_cb *block_cb;
- struct prestera_flower_template *tmplt;
+ struct list_head template_list;
unsigned int rule_count;
};
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index 19c1417fd05f..921959a980ee 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -8,26 +8,63 @@
struct prestera_flower_template {
struct prestera_acl_ruleset *ruleset;
+ struct list_head list;
+ u32 chain_index;
};
+static void
+prestera_flower_template_free(struct prestera_flower_template *template)
+{
+ prestera_acl_ruleset_put(template->ruleset);
+ list_del(&template->list);
+ kfree(template);
+}
+
void prestera_flower_template_cleanup(struct prestera_flow_block *block)
{
- if (block->tmplt) {
- /* put the reference to the ruleset kept in create */
- prestera_acl_ruleset_put(block->tmplt->ruleset);
- kfree(block->tmplt);
- block->tmplt = NULL;
- return;
- }
+ struct prestera_flower_template *template, *tmp;
+
+ /* put the reference to all rulesets kept in tmpl create */
+ list_for_each_entry_safe(template, tmp, &block->template_list, list)
+ prestera_flower_template_free(template);
+}
+
+static int
+prestera_flower_parse_goto_action(struct prestera_flow_block *block,
+ struct prestera_acl_rule *rule,
+ u32 chain_index,
+ const struct flow_action_entry *act)
+{
+ struct prestera_acl_ruleset *ruleset;
+
+ if (act->chain_index <= chain_index)
+ /* we can jump only forward */
+ return -EINVAL;
+
+ if (rule->re_arg.jump.valid)
+ return -EEXIST;
+
+ ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
+ act->chain_index);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ rule->re_arg.jump.valid = 1;
+ rule->re_arg.jump.i.index = prestera_acl_ruleset_index_get(ruleset);
+
+ rule->jump_ruleset = ruleset;
+
+ return 0;
}
static int prestera_flower_parse_actions(struct prestera_flow_block *block,
struct prestera_acl_rule *rule,
struct flow_action *flow_action,
+ u32 chain_index,
struct netlink_ext_ack *extack)
{
const struct flow_action_entry *act;
- int i;
+ int err, i;
/* whole struct (rule->re_arg) must be initialized with 0 */
if (!flow_action_has_entries(flow_action))
@@ -53,6 +90,13 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block,
rule->re_arg.trap.valid = 1;
break;
+ case FLOW_ACTION_GOTO:
+ err = prestera_flower_parse_goto_action(block, rule,
+ chain_index,
+ act);
+ if (err)
+ return err;
+ break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
pr_err("Unsupported action\n");
@@ -259,6 +303,7 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
}
return prestera_flower_parse_actions(block, rule, &f->rule->action,
+ f->common.chain_index,
f->common.extack);
}
@@ -270,12 +315,13 @@ int prestera_flower_replace(struct prestera_flow_block *block,
struct prestera_acl_rule *rule;
int err;
- ruleset = prestera_acl_ruleset_get(acl, block);
+ ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
/* increments the ruleset reference */
- rule = prestera_acl_rule_create(ruleset, f->cookie);
+ rule = prestera_acl_rule_create(ruleset, f->cookie,
+ f->common.chain_index);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
goto err_rule_create;
@@ -312,7 +358,8 @@ void prestera_flower_destroy(struct prestera_flow_block *block,
struct prestera_acl_ruleset *ruleset;
struct prestera_acl_rule *rule;
- ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block);
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
+ f->common.chain_index);
if (IS_ERR(ruleset))
return;
@@ -345,7 +392,8 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block,
}
prestera_acl_rule_keymask_pcl_id_set(&rule, 0);
- ruleset = prestera_acl_ruleset_get(block->sw->acl, block);
+ ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
+ f->common.chain_index);
if (IS_ERR_OR_NULL(ruleset)) {
err = -EINVAL;
goto err_ruleset_get;
@@ -364,7 +412,8 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block,
/* keep the reference to the ruleset */
template->ruleset = ruleset;
- block->tmplt = template;
+ template->chain_index = f->common.chain_index;
+ list_add_rcu(&template->list, &block->template_list);
return 0;
err_ruleset_get:
@@ -377,7 +426,14 @@ err_malloc:
void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
- prestera_flower_template_cleanup(block);
+ struct prestera_flower_template *template, *tmp;
+
+ list_for_each_entry_safe(template, tmp, &block->template_list, list)
+ if (template->chain_index == f->common.chain_index) {
+ /* put the reference to the ruleset kept in create */
+ prestera_flower_template_free(template);
+ return;
+ }
}
int prestera_flower_stats(struct prestera_flow_block *block,
@@ -390,7 +446,8 @@ int prestera_flower_stats(struct prestera_flow_block *block,
u64 bytes;
int err;
- ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block);
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
+ f->common.chain_index);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.h b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
index dc3aa4280e9f..495f151e6fa9 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
@@ -6,7 +6,6 @@
#include <net/pkt_cls.h>
-struct prestera_switch;
struct prestera_flow_block;
int prestera_flower_replace(struct prestera_flow_block *block,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index e6bfadc874c5..c66cc929c820 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -55,6 +55,8 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE = 0x600,
PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE = 0x601,
+ PRESTERA_CMD_TYPE_ROUTER_LPM_ADD = 0x610,
+ PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE = 0x611,
PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630,
PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631,
@@ -424,6 +426,9 @@ struct prestera_msg_acl_action {
__le32 __reserved;
union {
struct {
+ __le32 index;
+ } jump;
+ struct {
__le32 id;
} count;
__le32 reserved[6];
@@ -499,6 +504,15 @@ struct prestera_msg_iface {
u8 __pad[3];
};
+struct prestera_msg_ip_addr {
+ union {
+ __be32 ipv4;
+ __be32 ipv6[4];
+ } u;
+ u8 v; /* e.g. PRESTERA_IPV4 */
+ u8 __pad[3];
+};
+
struct prestera_msg_rif_req {
struct prestera_msg_cmd cmd;
struct prestera_msg_iface iif;
@@ -515,6 +529,15 @@ struct prestera_msg_rif_resp {
u8 __pad[2];
};
+struct prestera_msg_lpm_req {
+ struct prestera_msg_cmd cmd;
+ struct prestera_msg_ip_addr dst;
+ __le32 grp_id;
+ __le32 dst_len;
+ __le16 vr_id;
+ u8 __pad[2];
+};
+
struct prestera_msg_vr_req {
struct prestera_msg_cmd cmd;
__le16 vr_id;
@@ -598,9 +621,11 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_counter_stats) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_rif_req) != 36);
BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_lpm_req) != 36);
/* structure that are part of req/resp fw messages */
BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_ip_addr) != 20);
/* check responses */
BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8);
@@ -1164,6 +1189,9 @@ prestera_acl_rule_add_put_action(struct prestera_msg_acl_action *action,
case PRESTERA_ACL_RULE_ACTION_TRAP:
/* just rule action id, no specific data */
break;
+ case PRESTERA_ACL_RULE_ACTION_JUMP:
+ action->jump.index = __cpu_to_le32(info->jump.index);
+ break;
case PRESTERA_ACL_RULE_ACTION_COUNT:
action->count.id = __cpu_to_le32(info->count.id);
break;
@@ -1891,6 +1919,33 @@ int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id)
sizeof(req));
}
+int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len, u32 grp_id)
+{
+ struct prestera_msg_lpm_req req = {
+ .dst_len = __cpu_to_le32(dst_len),
+ .vr_id = __cpu_to_le16(vr_id),
+ .grp_id = __cpu_to_le32(grp_id),
+ .dst.u.ipv4 = dst
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_ADD, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len)
+{
+ struct prestera_msg_lpm_req req = {
+ .dst_len = __cpu_to_le32(dst_len),
+ .vr_id = __cpu_to_le16(vr_id),
+ .dst.u.ipv4 = dst
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE, &req.cmd,
+ sizeof(req));
+}
+
int prestera_hw_rxtx_init(struct prestera_switch *sw,
struct prestera_rxtx_params *params)
{
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
index 3ff12bae5909..fd896a8838bb 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -249,6 +249,12 @@ int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id,
int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id);
int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id);
+/* LPM PI */
+int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len, u32 grp_id);
+int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len);
+
/* Event handlers */
int prestera_hw_event_handler_register(struct prestera_switch *sw,
enum prestera_event_type type,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index cad93f747d0c..3952fdcc9240 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -28,6 +28,12 @@
#define PRESTERA_MAC_ADDR_NUM_MAX 255
static struct workqueue_struct *prestera_wq;
+static struct workqueue_struct *prestera_owq;
+
+void prestera_queue_work(struct work_struct *work)
+{
+ queue_work(prestera_owq, work);
+}
int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
{
@@ -554,6 +560,7 @@ static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
dev_info(prestera_dev(sw), "using random base mac address\n");
}
of_node_put(base_mac_np);
+ of_node_put(np);
return prestera_hw_switch_mac_set(sw, sw->base_mac);
}
@@ -1024,12 +1031,19 @@ static int __init prestera_module_init(void)
if (!prestera_wq)
return -ENOMEM;
+ prestera_owq = alloc_ordered_workqueue("prestera_ordered", 0);
+ if (!prestera_owq) {
+ destroy_workqueue(prestera_wq);
+ return -ENOMEM;
+ }
+
return 0;
}
static void __exit prestera_module_exit(void)
{
destroy_workqueue(prestera_wq);
+ destroy_workqueue(prestera_owq);
}
module_init(prestera_module_init);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
index 6ef4d32b8fdd..6c5618cf4f08 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -5,10 +5,39 @@
#include <linux/types.h>
#include <linux/inetdevice.h>
#include <net/switchdev.h>
+#include <linux/rhashtable.h>
#include "prestera.h"
#include "prestera_router_hw.h"
+struct prestera_kern_fib_cache_key {
+ struct prestera_ip_addr addr;
+ u32 prefix_len;
+ u32 kern_tb_id; /* tb_id from kernel (not fixed) */
+};
+
+/* Subscribing on neighbours in kernel */
+struct prestera_kern_fib_cache {
+ struct prestera_kern_fib_cache_key key;
+ struct {
+ struct prestera_fib_key fib_key;
+ enum prestera_fib_type fib_type;
+ } lpm_info; /* hold prepared lpm info */
+ /* Indicate if route is not overlapped by another table */
+ struct rhash_head ht_node; /* node of prestera_router */
+ struct fib_info *fi;
+ u8 kern_tos;
+ u8 kern_type;
+ bool reachable;
+};
+
+static const struct rhashtable_params __prestera_kern_fib_cache_ht_params = {
+ .key_offset = offsetof(struct prestera_kern_fib_cache, key),
+ .head_offset = offsetof(struct prestera_kern_fib_cache, ht_node),
+ .key_len = sizeof(struct prestera_kern_fib_cache_key),
+ .automatic_shrinking = true,
+};
+
/* This util to be used, to convert kernel rules for default vr in hw_vr */
static u32 prestera_fix_tb_id(u32 tb_id)
{
@@ -20,6 +49,290 @@ static u32 prestera_fix_tb_id(u32 tb_id)
return tb_id;
}
+static void
+prestera_util_fen_info2fib_cache_key(struct fib_entry_notifier_info *fen_info,
+ struct prestera_kern_fib_cache_key *key)
+{
+ memset(key, 0, sizeof(*key));
+ key->addr.u.ipv4 = cpu_to_be32(fen_info->dst);
+ key->prefix_len = fen_info->dst_len;
+ key->kern_tb_id = fen_info->tb_id;
+}
+
+static struct prestera_kern_fib_cache *
+prestera_kern_fib_cache_find(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache_key *key)
+{
+ struct prestera_kern_fib_cache *fib_cache;
+
+ fib_cache =
+ rhashtable_lookup_fast(&sw->router->kern_fib_cache_ht, key,
+ __prestera_kern_fib_cache_ht_params);
+ return fib_cache;
+}
+
+static void
+prestera_kern_fib_cache_destroy(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fib_cache)
+{
+ fib_info_put(fib_cache->fi);
+ rhashtable_remove_fast(&sw->router->kern_fib_cache_ht,
+ &fib_cache->ht_node,
+ __prestera_kern_fib_cache_ht_params);
+ kfree(fib_cache);
+}
+
+/* Operations on fi (offload, etc) must be wrapped in utils.
+ * This function just create storage.
+ */
+static struct prestera_kern_fib_cache *
+prestera_kern_fib_cache_create(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache_key *key,
+ struct fib_info *fi, u8 tos, u8 type)
+{
+ struct prestera_kern_fib_cache *fib_cache;
+ int err;
+
+ fib_cache = kzalloc(sizeof(*fib_cache), GFP_KERNEL);
+ if (!fib_cache)
+ goto err_kzalloc;
+
+ memcpy(&fib_cache->key, key, sizeof(*key));
+ fib_info_hold(fi);
+ fib_cache->fi = fi;
+ fib_cache->kern_tos = tos;
+ fib_cache->kern_type = type;
+
+ err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht,
+ &fib_cache->ht_node,
+ __prestera_kern_fib_cache_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ return fib_cache;
+
+err_ht_insert:
+ fib_info_put(fi);
+ kfree(fib_cache);
+err_kzalloc:
+ return NULL;
+}
+
+static void
+__prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc,
+ bool fail, bool offload, bool trap)
+{
+ struct fib_rt_info fri;
+
+ if (fc->key.addr.v != PRESTERA_IPV4)
+ return;
+
+ fri.fi = fc->fi;
+ fri.tb_id = fc->key.kern_tb_id;
+ fri.dst = fc->key.addr.u.ipv4;
+ fri.dst_len = fc->key.prefix_len;
+ fri.tos = fc->kern_tos;
+ fri.type = fc->kern_type;
+ /* flags begin */
+ fri.offload = offload;
+ fri.trap = trap;
+ fri.offload_failed = fail;
+ /* flags end */
+ fib_alias_hw_flags_set(&init_net, &fri);
+}
+
+static int
+__prestera_pr_k_arb_fc_lpm_info_calc(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ memset(&fc->lpm_info, 0, sizeof(fc->lpm_info));
+
+ switch (fc->fi->fib_type) {
+ case RTN_UNICAST:
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP;
+ break;
+ /* Unsupported. Leave it for kernel: */
+ case RTN_BROADCAST:
+ case RTN_MULTICAST:
+ /* Routes we must trap by design: */
+ case RTN_LOCAL:
+ case RTN_UNREACHABLE:
+ case RTN_PROHIBIT:
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP;
+ break;
+ case RTN_BLACKHOLE:
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_DROP;
+ break;
+ default:
+ dev_err(sw->dev->dev, "Unsupported fib_type");
+ return -EOPNOTSUPP;
+ }
+
+ fc->lpm_info.fib_key.addr = fc->key.addr;
+ fc->lpm_info.fib_key.prefix_len = fc->key.prefix_len;
+ fc->lpm_info.fib_key.tb_id = prestera_fix_tb_id(fc->key.kern_tb_id);
+
+ return 0;
+}
+
+static int __prestera_k_arb_f_lpm_set(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc,
+ bool enabled)
+{
+ struct prestera_fib_node *fib_node;
+
+ fib_node = prestera_fib_node_find(sw, &fc->lpm_info.fib_key);
+ if (fib_node)
+ prestera_fib_node_destroy(sw, fib_node);
+
+ if (!enabled)
+ return 0;
+
+ fib_node = prestera_fib_node_create(sw, &fc->lpm_info.fib_key,
+ fc->lpm_info.fib_type);
+
+ if (!fib_node) {
+ dev_err(sw->dev->dev, "fib_node=NULL %pI4n/%d kern_tb_id = %d",
+ &fc->key.addr.u.ipv4, fc->key.prefix_len,
+ fc->key.kern_tb_id);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int __prestera_k_arb_fc_apply(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ int err;
+
+ err = __prestera_pr_k_arb_fc_lpm_info_calc(sw, fc);
+ if (err)
+ return err;
+
+ err = __prestera_k_arb_f_lpm_set(sw, fc, fc->reachable);
+ if (err) {
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc,
+ true, false, false);
+ return err;
+ }
+
+ switch (fc->lpm_info.fib_type) {
+ case PRESTERA_FIB_TYPE_TRAP:
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
+ false, fc->reachable);
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc, false, true,
+ fc->reachable);
+ break;
+ case PRESTERA_FIB_TYPE_INVALID:
+ break;
+ }
+
+ return 0;
+}
+
+static struct prestera_kern_fib_cache *
+__prestera_k_arb_util_fib_overlaps(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *rfc;
+
+ /* TODO: parse kernel rules */
+ rfc = NULL;
+ if (fc->key.kern_tb_id == RT_TABLE_LOCAL) {
+ memcpy(&fc_key, &fc->key, sizeof(fc_key));
+ fc_key.kern_tb_id = RT_TABLE_MAIN;
+ rfc = prestera_kern_fib_cache_find(sw, &fc_key);
+ }
+
+ return rfc;
+}
+
+static struct prestera_kern_fib_cache *
+__prestera_k_arb_util_fib_overlapped(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *rfc;
+
+ /* TODO: parse kernel rules */
+ rfc = NULL;
+ if (fc->key.kern_tb_id == RT_TABLE_MAIN) {
+ memcpy(&fc_key, &fc->key, sizeof(fc_key));
+ fc_key.kern_tb_id = RT_TABLE_LOCAL;
+ rfc = prestera_kern_fib_cache_find(sw, &fc_key);
+ }
+
+ return rfc;
+}
+
+static int
+prestera_k_arb_fib_evt(struct prestera_switch *sw,
+ bool replace, /* replace or del */
+ struct fib_entry_notifier_info *fen_info)
+{
+ struct prestera_kern_fib_cache *tfib_cache, *bfib_cache; /* top/btm */
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *fib_cache;
+ int err;
+
+ prestera_util_fen_info2fib_cache_key(fen_info, &fc_key);
+ fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
+ if (fib_cache) {
+ fib_cache->reachable = false;
+ err = __prestera_k_arb_fc_apply(sw, fib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying destroyed fib_cache failed");
+
+ bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
+ tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
+ if (!tfib_cache && bfib_cache) {
+ bfib_cache->reachable = true;
+ err = __prestera_k_arb_fc_apply(sw, bfib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying fib_cache btm failed");
+ }
+
+ prestera_kern_fib_cache_destroy(sw, fib_cache);
+ }
+
+ if (replace) {
+ fib_cache = prestera_kern_fib_cache_create(sw, &fc_key,
+ fen_info->fi,
+ fen_info->tos,
+ fen_info->type);
+ if (!fib_cache) {
+ dev_err(sw->dev->dev, "fib_cache == NULL");
+ return -ENOENT;
+ }
+
+ bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
+ tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
+ if (!tfib_cache)
+ fib_cache->reachable = true;
+
+ if (bfib_cache) {
+ bfib_cache->reachable = false;
+ err = __prestera_k_arb_fc_apply(sw, bfib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying fib_cache btm failed");
+ }
+
+ err = __prestera_k_arb_fc_apply(sw, fib_cache);
+ if (err)
+ dev_err(sw->dev->dev, "Applying fib_cache failed");
+ }
+
+ return 0;
+}
+
static int __prestera_inetaddr_port_event(struct net_device *port_dev,
unsigned long event,
struct netlink_ext_ack *extack)
@@ -137,6 +450,89 @@ out:
return notifier_from_errno(err);
}
+struct prestera_fib_event_work {
+ struct work_struct work;
+ struct prestera_switch *sw;
+ struct fib_entry_notifier_info fen_info;
+ unsigned long event;
+};
+
+static void __prestera_router_fib_event_work(struct work_struct *work)
+{
+ struct prestera_fib_event_work *fib_work =
+ container_of(work, struct prestera_fib_event_work, work);
+ struct prestera_switch *sw = fib_work->sw;
+ int err;
+
+ rtnl_lock();
+
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = prestera_k_arb_fib_evt(sw, true, &fib_work->fen_info);
+ if (err)
+ goto err_out;
+
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ err = prestera_k_arb_fib_evt(sw, false, &fib_work->fen_info);
+ if (err)
+ goto err_out;
+
+ break;
+ }
+
+ goto out;
+
+err_out:
+ dev_err(sw->dev->dev, "Error when processing %pI4h/%d",
+ &fib_work->fen_info.dst,
+ fib_work->fen_info.dst_len);
+out:
+ fib_info_put(fib_work->fen_info.fi);
+ rtnl_unlock();
+ kfree(fib_work);
+}
+
+/* Called with rcu_read_lock() */
+static int __prestera_router_fib_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct prestera_fib_event_work *fib_work;
+ struct fib_entry_notifier_info *fen_info;
+ struct fib_notifier_info *info = ptr;
+ struct prestera_router *router;
+
+ if (info->family != AF_INET)
+ return NOTIFY_DONE;
+
+ router = container_of(nb, struct prestera_router, fib_nb);
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_DEL:
+ fen_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ if (!fen_info->fi)
+ return NOTIFY_DONE;
+
+ fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+ if (WARN_ON(!fib_work))
+ return NOTIFY_BAD;
+
+ fib_info_hold(fen_info->fi);
+ fib_work->fen_info = *fen_info;
+ fib_work->event = event;
+ fib_work->sw = router->sw;
+ INIT_WORK(&fib_work->work, __prestera_router_fib_event_work);
+ prestera_queue_work(&fib_work->work);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_DONE;
+}
+
int prestera_router_init(struct prestera_switch *sw)
{
struct prestera_router *router;
@@ -153,6 +549,11 @@ int prestera_router_init(struct prestera_switch *sw)
if (err)
goto err_router_lib_init;
+ err = rhashtable_init(&router->kern_fib_cache_ht,
+ &__prestera_kern_fib_cache_ht_params);
+ if (err)
+ goto err_kern_fib_cache_ht_init;
+
router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb;
err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
if (err)
@@ -163,11 +564,21 @@ int prestera_router_init(struct prestera_switch *sw)
if (err)
goto err_register_inetaddr_notifier;
+ router->fib_nb.notifier_call = __prestera_router_fib_event;
+ err = register_fib_notifier(&init_net, &router->fib_nb,
+ /* TODO: flush fib entries */ NULL, NULL);
+ if (err)
+ goto err_register_fib_notifier;
+
return 0;
+err_register_fib_notifier:
+ unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
err_register_inetaddr_validator_notifier:
+ rhashtable_destroy(&router->kern_fib_cache_ht);
+err_kern_fib_cache_ht_init:
prestera_router_hw_fini(sw);
err_router_lib_init:
kfree(sw->router);
@@ -178,6 +589,7 @@ void prestera_router_fini(struct prestera_switch *sw)
{
unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
+ rhashtable_destroy(&sw->router->kern_fib_cache_ht);
prestera_router_hw_fini(sw);
kfree(sw->router);
sw->router = NULL;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
index e5592b69ad37..5b0cf3be9a9e 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
@@ -9,23 +9,41 @@
#include "prestera_acl.h"
/* +--+
- * +------->|vr|
- * | +--+
- * |
- * +-+-------+
- * |rif_entry|
- * +---------+
- * Rif is
+ * +------->|vr|<-+
+ * | +--+ |
+ * | |
+ * +-+-------+ +--+---+-+
+ * |rif_entry| |fib_node|
+ * +---------+ +--------+
+ * Rif is Fib - is exit point
* used as
* entry point
* for vr in hw
*/
+#define PRESTERA_NHGR_UNUSED (0)
+#define PRESTERA_NHGR_DROP (0xFFFFFFFF)
+
+static const struct rhashtable_params __prestera_fib_ht_params = {
+ .key_offset = offsetof(struct prestera_fib_node, key),
+ .head_offset = offsetof(struct prestera_fib_node, ht_node),
+ .key_len = sizeof(struct prestera_fib_key),
+ .automatic_shrinking = true,
+};
+
int prestera_router_hw_init(struct prestera_switch *sw)
{
+ int err;
+
+ err = rhashtable_init(&sw->router->fib_ht,
+ &__prestera_fib_ht_params);
+ if (err)
+ goto err_fib_ht_init;
+
INIT_LIST_HEAD(&sw->router->vr_list);
INIT_LIST_HEAD(&sw->router->rif_entry_list);
+err_fib_ht_init:
return 0;
}
@@ -33,6 +51,7 @@ void prestera_router_hw_fini(struct prestera_switch *sw)
{
WARN_ON(!list_empty(&sw->router->vr_list));
WARN_ON(!list_empty(&sw->router->rif_entry_list));
+ rhashtable_destroy(&sw->router->fib_ht);
}
static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw,
@@ -212,3 +231,102 @@ err_key_copy:
err_kzalloc:
return NULL;
}
+
+struct prestera_fib_node *
+prestera_fib_node_find(struct prestera_switch *sw, struct prestera_fib_key *key)
+{
+ struct prestera_fib_node *fib_node;
+
+ fib_node = rhashtable_lookup_fast(&sw->router->fib_ht, key,
+ __prestera_fib_ht_params);
+ return fib_node;
+}
+
+static void __prestera_fib_node_destruct(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node)
+{
+ struct prestera_vr *vr;
+
+ vr = fib_node->info.vr;
+ prestera_hw_lpm_del(sw, vr->hw_vr_id, fib_node->key.addr.u.ipv4,
+ fib_node->key.prefix_len);
+ switch (fib_node->info.type) {
+ case PRESTERA_FIB_TYPE_TRAP:
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ break;
+ default:
+ pr_err("Unknown fib_node->info.type = %d",
+ fib_node->info.type);
+ }
+
+ prestera_vr_put(sw, vr);
+}
+
+void prestera_fib_node_destroy(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node)
+{
+ __prestera_fib_node_destruct(sw, fib_node);
+ rhashtable_remove_fast(&sw->router->fib_ht, &fib_node->ht_node,
+ __prestera_fib_ht_params);
+ kfree(fib_node);
+}
+
+struct prestera_fib_node *
+prestera_fib_node_create(struct prestera_switch *sw,
+ struct prestera_fib_key *key,
+ enum prestera_fib_type fib_type)
+{
+ struct prestera_fib_node *fib_node;
+ u32 grp_id;
+ struct prestera_vr *vr;
+ int err;
+
+ fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
+ if (!fib_node)
+ goto err_kzalloc;
+
+ memcpy(&fib_node->key, key, sizeof(*key));
+ fib_node->info.type = fib_type;
+
+ vr = prestera_vr_get(sw, key->tb_id, NULL);
+ if (IS_ERR(vr))
+ goto err_vr_get;
+
+ fib_node->info.vr = vr;
+
+ switch (fib_type) {
+ case PRESTERA_FIB_TYPE_TRAP:
+ grp_id = PRESTERA_NHGR_UNUSED;
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ grp_id = PRESTERA_NHGR_DROP;
+ break;
+ default:
+ pr_err("Unsupported fib_type %d", fib_type);
+ goto err_nh_grp_get;
+ }
+
+ err = prestera_hw_lpm_add(sw, vr->hw_vr_id, key->addr.u.ipv4,
+ key->prefix_len, grp_id);
+ if (err)
+ goto err_lpm_add;
+
+ err = rhashtable_insert_fast(&sw->router->fib_ht, &fib_node->ht_node,
+ __prestera_fib_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ return fib_node;
+
+err_ht_insert:
+ prestera_hw_lpm_del(sw, vr->hw_vr_id, key->addr.u.ipv4,
+ key->prefix_len);
+err_lpm_add:
+err_nh_grp_get:
+ prestera_vr_put(sw, vr);
+err_vr_get:
+ kfree(fib_node);
+err_kzalloc:
+ return NULL;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
index b6b028551868..67dbb49c8bd4 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
@@ -22,6 +22,42 @@ struct prestera_rif_entry {
struct list_head router_node; /* ht */
};
+struct prestera_ip_addr {
+ union {
+ __be32 ipv4;
+ struct in6_addr ipv6;
+ } u;
+ enum {
+ PRESTERA_IPV4 = 0,
+ PRESTERA_IPV6
+ } v;
+};
+
+struct prestera_fib_key {
+ struct prestera_ip_addr addr;
+ u32 prefix_len;
+ u32 tb_id;
+};
+
+struct prestera_fib_info {
+ struct prestera_vr *vr;
+ struct list_head vr_node;
+ enum prestera_fib_type {
+ PRESTERA_FIB_TYPE_INVALID = 0,
+ /* It can be connected route
+ * and will be overlapped with neighbours
+ */
+ PRESTERA_FIB_TYPE_TRAP,
+ PRESTERA_FIB_TYPE_DROP
+ } type;
+};
+
+struct prestera_fib_node {
+ struct rhash_head ht_node; /* node of prestera_vr */
+ struct prestera_fib_key key;
+ struct prestera_fib_info info; /* action related info */
+};
+
struct prestera_rif_entry *
prestera_rif_entry_find(const struct prestera_switch *sw,
const struct prestera_rif_entry_key *k);
@@ -31,6 +67,14 @@ struct prestera_rif_entry *
prestera_rif_entry_create(struct prestera_switch *sw,
struct prestera_rif_entry_key *k,
u32 tb_id, const unsigned char *addr);
+struct prestera_fib_node *prestera_fib_node_find(struct prestera_switch *sw,
+ struct prestera_fib_key *key);
+void prestera_fib_node_destroy(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node);
+struct prestera_fib_node *
+prestera_fib_node_create(struct prestera_switch *sw,
+ struct prestera_fib_key *key,
+ enum prestera_fib_type fib_type);
int prestera_router_hw_init(struct prestera_switch *sw);
void prestera_router_hw_fini(struct prestera_switch *sw);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 89ca7960b225..4cd0747edaff 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1556,6 +1556,7 @@ static int mtk_star_probe(struct platform_device *pdev)
return devm_register_netdev(dev, ndev);
}
+#ifdef CONFIG_OF
static const struct of_device_id mtk_star_of_match[] = {
{ .compatible = "mediatek,mt8516-eth", },
{ .compatible = "mediatek,mt8518-eth", },
@@ -1563,6 +1564,7 @@ static const struct of_device_id mtk_star_of_match[] = {
{ }
};
MODULE_DEVICE_TABLE(of, mtk_star_of_match);
+#endif
static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops,
mtk_star_suspend, mtk_star_resume);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 8cfc649f226b..8f762fc170b3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1067,7 +1067,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
struct mlx4_qp_context *context;
int err = 0;
- context = kmalloc(sizeof(*context), GFP_KERNEL);
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
@@ -1078,7 +1078,6 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
}
qp->event = mlx4_en_sqp_event;
- memset(context, 0, sizeof(*context));
mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
qpn, ring->cqn, -1, context);
context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 817f4154b86d..f777151d226f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -42,7 +42,6 @@
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
-#include <linux/moduleparam.h>
#include <linux/indirect_call_wrapper.h>
#include "mlx4_en.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index fcfd38fa9e6c..4bc666714a35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -28,7 +28,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
- en/qos.o en/trap.o en/fs_tt_redirect.o
+ en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o
#
# Netdev extra
@@ -55,7 +55,11 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/a
en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \
en/tc/act/redirect_ingress.o
-mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
+ifneq ($(CONFIG_MLX5_TC_CT),)
+ mlx5_core-y += en/tc_ct.o en/tc/ct_fs_dmfs.o
+ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += en/tc/ct_fs_smfs.o
+endif
+
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
#
@@ -103,9 +107,10 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_icm_pool.o steering/dr_buddy.o \
steering/dr_ste.o steering/dr_send.o \
steering/dr_ste_v0.o steering/dr_ste_v1.o \
+ steering/dr_ste_v2.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o \
- steering/dr_dbg.o
+ steering/dr_dbg.o lib/smfs.o
#
# SF device
#
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 291e427e9e4f..e52b0bac09da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -71,53 +71,6 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
return cpu_handle;
}
-static int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
- struct mlx5_frag_buf *buf, int node)
-{
- dma_addr_t t;
-
- buf->size = size;
- buf->npages = 1;
- buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
-
- buf->frags = kzalloc(sizeof(*buf->frags), GFP_KERNEL);
- if (!buf->frags)
- return -ENOMEM;
-
- buf->frags->buf = mlx5_dma_zalloc_coherent_node(dev, size,
- &t, node);
- if (!buf->frags->buf)
- goto err_out;
-
- buf->frags->map = t;
-
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
-
- return 0;
-err_out:
- kfree(buf->frags);
- return -ENOMEM;
-}
-
-int mlx5_buf_alloc(struct mlx5_core_dev *dev,
- int size, struct mlx5_frag_buf *buf)
-{
- return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
-}
-EXPORT_SYMBOL(mlx5_buf_alloc);
-
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
-{
- dma_free_coherent(mlx5_core_dma_dev(dev), buf->size, buf->frags->buf,
- buf->frags->map);
-
- kfree(buf->frags);
-}
-EXPORT_SYMBOL_GPL(mlx5_buf_free);
-
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node)
{
@@ -183,11 +136,11 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
u32 db_per_page = PAGE_SIZE / cache_line_size();
struct mlx5_db_pgdir *pgdir;
- pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
+ pgdir = kzalloc_node(sizeof(*pgdir), GFP_KERNEL, node);
if (!pgdir)
return NULL;
- pgdir->bitmap = bitmap_zalloc(db_per_page, GFP_KERNEL);
+ pgdir->bitmap = bitmap_zalloc_node(db_per_page, GFP_KERNEL, node);
if (!pgdir->bitmap) {
kfree(pgdir);
return NULL;
@@ -286,19 +239,6 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
}
EXPORT_SYMBOL_GPL(mlx5_db_free);
-void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas)
-{
- u64 addr;
- int i;
-
- for (i = 0; i < buf->npages; i++) {
- addr = buf->frags->map + (i << buf->page_shift);
-
- pas[i] = cpu_to_be64(addr);
- }
-}
-EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
-
void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm)
{
int i;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 17fe05809653..26ba94cb432e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -31,7 +31,6 @@
*/
#include <linux/highmem.h>
-#include <linux/module.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
@@ -131,11 +130,8 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd)
static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cmd->alloc_lock, flags);
+ lockdep_assert_held(&cmd->alloc_lock);
set_bit(idx, &cmd->bitmask);
- spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
@@ -145,17 +141,21 @@ static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
{
+ struct mlx5_cmd *cmd = ent->cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
if (!refcount_dec_and_test(&ent->refcnt))
- return;
+ goto out;
if (ent->idx >= 0) {
- struct mlx5_cmd *cmd = ent->cmd;
-
cmd_free_index(cmd, ent->idx);
up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
}
cmd_free_ent(ent);
+out:
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
@@ -190,10 +190,10 @@ static int verify_block_sig(struct mlx5_cmd_prot_block *block)
int xor_len = sizeof(*block) - sizeof(block->data) - 1;
if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
- return -EINVAL;
+ return -EHWPOISON;
if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
- return -EINVAL;
+ return -EHWPOISON;
return 0;
}
@@ -259,12 +259,12 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
if (sig != 0xff)
- return -EINVAL;
+ return -EHWPOISON;
for (i = 0; i < n && next; i++) {
err = verify_block_sig(next->buf);
if (err)
- return err;
+ return -EHWPOISON;
next = next->next;
}
@@ -477,9 +477,14 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_VHCA_STATE:
case MLX5_CMD_OP_MODIFY_VHCA_STATE:
case MLX5_CMD_OP_ALLOC_SF:
+ case MLX5_CMD_OP_SUSPEND_VHCA:
+ case MLX5_CMD_OP_RESUME_VHCA:
+ case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE:
+ case MLX5_CMD_OP_SAVE_VHCA_STATE:
+ case MLX5_CMD_OP_LOAD_VHCA_STATE:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
- return -EIO;
+ return -ENOLINK;
default:
mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
return -EINVAL;
@@ -674,6 +679,11 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE);
MLX5_COMMAND_STR_CASE(ALLOC_SF);
MLX5_COMMAND_STR_CASE(DEALLOC_SF);
+ MLX5_COMMAND_STR_CASE(SUSPEND_VHCA);
+ MLX5_COMMAND_STR_CASE(RESUME_VHCA);
+ MLX5_COMMAND_STR_CASE(QUERY_VHCA_MIGRATION_STATE);
+ MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE);
+ MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE);
default: return "unknown command opcode";
}
}
@@ -760,44 +770,72 @@ struct mlx5_ifc_mbox_in_bits {
u8 reserved_at_40[0x40];
};
-void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
+void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
{
- *status = MLX5_GET(mbox_out, out, status);
- *syndrome = MLX5_GET(mbox_out, out, syndrome);
+ u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
+ u8 status = MLX5_GET(mbox_out, out, status);
+
+ mlx5_core_err_rl(dev,
+ "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
+ mlx5_command_str(opcode), opcode, op_mod,
+ cmd_status_str(status), status, syndrome, cmd_status_to_err(status));
}
+EXPORT_SYMBOL(mlx5_cmd_out_err);
-static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
+static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
{
+ u16 opcode, op_mod;
u32 syndrome;
u8 status;
- u16 opcode;
- u16 op_mod;
u16 uid;
+ int err;
- mlx5_cmd_mbox_status(out, &status, &syndrome);
- if (!status)
- return 0;
+ syndrome = MLX5_GET(mbox_out, out, syndrome);
+ status = MLX5_GET(mbox_out, out, status);
opcode = MLX5_GET(mbox_in, in, opcode);
op_mod = MLX5_GET(mbox_in, in, op_mod);
uid = MLX5_GET(mbox_in, in, uid);
+ err = cmd_status_to_err(status);
+
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
- mlx5_core_err_rl(dev,
- "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
- mlx5_command_str(opcode), opcode, op_mod,
- cmd_status_str(status), status, syndrome);
+ mlx5_cmd_out_err(dev, opcode, op_mod, out);
else
mlx5_core_dbg(dev,
- "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
- mlx5_command_str(opcode),
- opcode, op_mod,
- cmd_status_str(status),
- status,
- syndrome);
+ "%s(0x%x) op_mod(0x%x) uid(%d) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
+ mlx5_command_str(opcode), opcode, op_mod, uid,
+ cmd_status_str(status), status, syndrome, err);
+}
- return cmd_status_to_err(status);
+int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
+{
+ /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */
+ if (err == -ENXIO) {
+ u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ u32 syndrome;
+ u8 status;
+
+ /* PCI Error, emulate command return status, for smooth reset */
+ err = mlx5_internal_err_ret_value(dev, opcode, &syndrome, &status);
+ MLX5_SET(mbox_out, out, status, status);
+ MLX5_SET(mbox_out, out, syndrome, syndrome);
+ if (!err)
+ return 0;
+ }
+
+ /* driver or FW delivery error */
+ if (err != -EREMOTEIO && err)
+ return err;
+
+ /* check outbox status */
+ err = cmd_status_to_err(MLX5_GET(mbox_out, out, status));
+ if (err)
+ cmd_status_print(dev, in, out);
+
+ return err;
}
+EXPORT_SYMBOL(mlx5_cmd_check);
static void dump_command(struct mlx5_core_dev *dev,
struct mlx5_cmd_work_ent *ent, int input)
@@ -980,13 +1018,7 @@ static void cmd_work_handler(struct work_struct *work)
/* Skip sending command to fw if internal error */
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
- u8 status = 0;
- u32 drv_synd;
-
- ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
- MLX5_SET(mbox_out, ent->out, status, status);
- MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
-
+ ent->ret = -ENXIO;
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
return;
}
@@ -1005,6 +1037,31 @@ static void cmd_work_handler(struct work_struct *work)
}
}
+static int deliv_status_to_err(u8 status)
+{
+ switch (status) {
+ case MLX5_CMD_DELIVERY_STAT_OK:
+ case MLX5_DRIVER_STATUS_ABORTED:
+ return 0;
+ case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
+ case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
+ return -EBADR;
+ case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
+ case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
+ case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
+ return -EFAULT; /* Bad address */
+ case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
+ case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
+ case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
+ case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
+ return -ENOMSG;
+ case MLX5_CMD_DELIVERY_STAT_FW_ERR:
+ return -EIO;
+ default:
+ return -EINVAL;
+ }
+}
+
static const char *deliv_status_to_str(u8 status)
{
switch (status) {
@@ -1101,16 +1158,27 @@ out_err:
/* Notes:
* 1. Callback functions may not sleep
* 2. page queue commands do not support asynchrous completion
+ *
+ * return value in case (!callback):
+ * ret < 0 : Command execution couldn't be submitted by driver
+ * ret > 0 : Command execution couldn't be performed by firmware
+ * ret == 0: Command was executed by FW, Caller must check FW outbox status.
+ *
+ * return value in case (callback):
+ * ret < 0 : Command execution couldn't be submitted by driver
+ * ret == 0: Command will be submitted to FW for execution
+ * and the callback will be called for further status updates
*/
static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out, void *uout, int uout_size,
mlx5_cmd_cbk_t callback,
- void *context, int page_queue, u8 *status,
+ void *context, int page_queue,
u8 token, bool force_polling)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent;
struct mlx5_cmd_stats *stats;
+ u8 status = 0;
int err = 0;
s64 ds;
u16 op;
@@ -1141,12 +1209,12 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
cmd_work_handler(&ent->work);
} else if (!queue_work(cmd->wq, &ent->work)) {
mlx5_core_warn(dev, "failed to queue work\n");
- err = -ENOMEM;
+ err = -EALREADY;
goto out_free;
}
if (callback)
- goto out; /* mlx5_cmd_comp_handler() will put(ent) */
+ return 0; /* mlx5_cmd_comp_handler() will put(ent) */
err = wait_func(dev, ent);
if (err == -ETIMEDOUT || err == -ECANCELED)
@@ -1164,12 +1232,11 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
"fw exec time for %s is %lld nsec\n",
mlx5_command_str(op), ds);
- *status = ent->status;
out_free:
+ status = ent->status;
cmd_ent_put(ent);
-out:
- return err;
+ return err ? : status;
}
static ssize_t dbg_write(struct file *filp, const char __user *buf,
@@ -1486,7 +1553,7 @@ static void create_debugfs_files(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
- dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
+ dbg->dbg_root = debugfs_create_dir("cmd", mlx5_debugfs_get_dev_root(dev));
debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops);
debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops);
@@ -1612,15 +1679,15 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
ent->ts2 = ktime_get_ns();
memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
dump_command(dev, ent, 0);
- if (!ent->ret) {
+
+ if (vec & MLX5_TRIGGERED_CMD_COMP)
+ ent->ret = -ENXIO;
+
+ if (!ent->ret) { /* Command completed by FW */
if (!cmd->checksum_disabled)
ent->ret = verify_signature(ent);
- else
- ent->ret = 0;
- if (vec & MLX5_TRIGGERED_CMD_COMP)
- ent->status = MLX5_DRIVER_STATUS_ABORTED;
- else
- ent->status = ent->lay->status_own >> 1;
+
+ ent->status = ent->lay->status_own >> 1;
mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
ent->ret, deliv_status_to_str(ent->status), ent->status);
@@ -1638,21 +1705,18 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
callback = ent->callback;
context = ent->context;
- err = ent->ret;
- if (!err) {
+ err = ent->ret ? : ent->status;
+ if (err > 0) /* Failed in FW, command didn't execute */
+ err = deliv_status_to_err(err);
+
+ if (!err)
err = mlx5_copy_from_msg(ent->uout,
ent->out,
ent->uout_size);
- err = err ? err : mlx5_cmd_check(dev,
- ent->in->first.data,
- ent->uout);
- }
-
mlx5_free_cmd_msg(dev, ent->out);
free_msg(dev, ent->in);
- err = err ? err : ent->status;
/* final consumer is done, release ent */
cmd_ent_put(ent);
callback(err, context);
@@ -1666,7 +1730,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
}
}
-void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
+static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
unsigned long bitmask;
@@ -1719,31 +1783,6 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
up(&cmd->sem);
}
-static int status_to_err(u8 status)
-{
- switch (status) {
- case MLX5_CMD_DELIVERY_STAT_OK:
- case MLX5_DRIVER_STATUS_ABORTED:
- return 0;
- case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
- case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
- return -EBADR;
- case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
- case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
- case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
- return -EFAULT; /* Bad address */
- case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
- case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
- case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
- case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
- return -ENOMSG;
- case MLX5_CMD_DELIVERY_STAT_FW_ERR:
- return -EIO;
- default:
- return -EINVAL;
- }
-}
-
static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
gfp_t gfp)
{
@@ -1787,27 +1826,23 @@ static int is_manage_pages(void *in)
return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
}
+/* Notes:
+ * 1. Callback functions may not sleep
+ * 2. Page queue commands do not support asynchrous completion
+ */
static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size, mlx5_cmd_cbk_t callback, void *context,
bool force_polling)
{
- struct mlx5_cmd_msg *inb;
- struct mlx5_cmd_msg *outb;
+ u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ struct mlx5_cmd_msg *inb, *outb;
int pages_queue;
gfp_t gfp;
- int err;
- u8 status = 0;
- u32 drv_synd;
- u16 opcode;
u8 token;
+ int err;
- opcode = MLX5_GET(mbox_in, in, opcode);
- if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) {
- err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
- MLX5_SET(mbox_out, out, status, status);
- MLX5_SET(mbox_out, out, syndrome, drv_synd);
- return err;
- }
+ if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
+ return -ENXIO;
pages_queue = is_manage_pages(in);
gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
@@ -1833,39 +1868,133 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
}
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
- pages_queue, &status, token, force_polling);
+ pages_queue, token, force_polling);
+ if (callback)
+ return err;
+
+ if (err > 0) /* Failed in FW, command didn't execute */
+ err = deliv_status_to_err(err);
+
if (err)
goto out_out;
- mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
- if (status) {
- err = status_to_err(status);
- goto out_out;
+ /* command completed by FW */
+ err = mlx5_copy_from_msg(out, outb, out_size);
+out_out:
+ mlx5_free_cmd_msg(dev, outb);
+out_in:
+ free_msg(dev, inb);
+ return err;
+}
+
+static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, int err)
+{
+ struct mlx5_cmd_stats *stats;
+
+ if (!err)
+ return;
+
+ stats = &dev->cmd.stats[opcode];
+ spin_lock_irq(&stats->lock);
+ stats->failed++;
+ if (err < 0)
+ stats->last_failed_errno = -err;
+ if (err == -EREMOTEIO) {
+ stats->failed_mbox_status++;
+ stats->last_failed_mbox_status = status;
}
+ spin_unlock_irq(&stats->lock);
+}
- if (!callback)
- err = mlx5_copy_from_msg(out, outb, out_size);
+/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
+static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out)
+{
+ u8 status = MLX5_GET(mbox_out, out, status);
-out_out:
- if (!callback)
- mlx5_free_cmd_msg(dev, outb);
+ if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */
+ err = -EIO;
-out_in:
- if (!callback)
- free_msg(dev, inb);
+ if (!err && status != MLX5_CMD_STAT_OK)
+ err = -EREMOTEIO;
+
+ cmd_status_log(dev, opcode, status, err);
+ return err;
+}
+
+/**
+ * mlx5_cmd_do - Executes a fw command, wait for completion.
+ * Unlike mlx5_cmd_exec, this function will not translate or intercept
+ * outbox.status and will return -EREMOTEIO when
+ * outbox.status != MLX5_CMD_STAT_OK
+ *
+ * @dev: mlx5 core device
+ * @in: inbox mlx5_ifc command buffer
+ * @in_size: inbox buffer size
+ * @out: outbox mlx5_ifc buffer
+ * @out_size: outbox size
+ *
+ * @return:
+ * -EREMOTEIO : Command executed by FW, outbox.status != MLX5_CMD_STAT_OK.
+ * Caller must check FW outbox status.
+ * 0 : Command execution successful, outbox.status == MLX5_CMD_STAT_OK.
+ * < 0 : Command execution couldn't be performed by firmware or driver
+ */
+int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size)
+{
+ int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
+ u16 opcode = MLX5_GET(mbox_in, in, opcode);
+
+ err = cmd_status_err(dev, err, opcode, out);
return err;
}
+EXPORT_SYMBOL(mlx5_cmd_do);
+/**
+ * mlx5_cmd_exec - Executes a fw command, wait for completion
+ *
+ * @dev: mlx5 core device
+ * @in: inbox mlx5_ifc command buffer
+ * @in_size: inbox buffer size
+ * @out: outbox mlx5_ifc buffer
+ * @out_size: outbox size
+ *
+ * @return: 0 if no error, FW command execution was successful
+ * and outbox status is ok.
+ */
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size)
{
- int err;
+ int err = mlx5_cmd_do(dev, in, in_size, out, out_size);
- err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
- return err ? : mlx5_cmd_check(dev, in, out);
+ return mlx5_cmd_check(dev, err, in, out);
}
EXPORT_SYMBOL(mlx5_cmd_exec);
+/**
+ * mlx5_cmd_exec_polling - Executes a fw command, poll for completion
+ * Needed for driver force teardown, when command completion EQ
+ * will not be available to complete the command
+ *
+ * @dev: mlx5 core device
+ * @in: inbox mlx5_ifc command buffer
+ * @in_size: inbox buffer size
+ * @out: outbox mlx5_ifc buffer
+ * @out_size: outbox size
+ *
+ * @return: 0 if no error, FW command execution was successful
+ * and outbox status is ok.
+ */
+int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
+ void *out, int out_size)
+{
+ int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
+ u16 opcode = MLX5_GET(mbox_in, in, opcode);
+
+ err = cmd_status_err(dev, err, opcode, out);
+ return mlx5_cmd_check(dev, err, in, out);
+}
+EXPORT_SYMBOL(mlx5_cmd_exec_polling);
+
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
struct mlx5_async_ctx *ctx)
{
@@ -1894,8 +2023,10 @@ EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
static void mlx5_cmd_exec_cb_handler(int status, void *_work)
{
struct mlx5_async_work *work = _work;
- struct mlx5_async_ctx *ctx = work->ctx;
+ struct mlx5_async_ctx *ctx;
+ ctx = work->ctx;
+ status = cmd_status_err(ctx->dev, status, work->opcode, work->out);
work->user_callback(status, work);
if (atomic_dec_and_test(&ctx->num_inflight))
wake_up(&ctx->wait);
@@ -1909,6 +2040,8 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
work->ctx = ctx;
work->user_callback = callback;
+ work->opcode = MLX5_GET(mbox_in, in, opcode);
+ work->out = out;
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
return -EIO;
ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
@@ -1920,17 +2053,6 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
}
EXPORT_SYMBOL(mlx5_cmd_exec_cb);
-int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
- void *out, int out_size)
-{
- int err;
-
- err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
-
- return err ? : mlx5_cmd_check(dev, in, out);
-}
-EXPORT_SYMBOL(mlx5_cmd_exec_polling);
-
static void destroy_msg_cache(struct mlx5_core_dev *dev)
{
struct cmd_msg_cache *ch;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 5371ad0a12eb..4caa1b6f40ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/mlx5/driver.h>
#include <rdma/ib_verbs.h>
@@ -86,8 +85,9 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
}
-int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- u32 *in, int inlen, u32 *out, int outlen)
+/* Callers must verify outbox status in case of err */
+int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u32 *in, int inlen, u32 *out, int outlen)
{
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
c_eqn_or_apu_element);
@@ -101,7 +101,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
memset(out, 0, outlen);
MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
- err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
+ err = mlx5_cmd_do(dev, in, inlen, out, outlen);
if (err)
return err;
@@ -148,6 +148,16 @@ err_cmd:
mlx5_cmd_exec_in(dev, destroy_cq, din);
return err;
}
+EXPORT_SYMBOL(mlx5_create_cq);
+
+/* oubox is checked and err val is normalized */
+int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u32 *in, int inlen, u32 *out, int outlen)
+{
+ int err = mlx5_create_cq(dev, cq, in, inlen, out, outlen);
+
+ return mlx5_cmd_check(dev, err, in, out);
+}
EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 10d195042ab5..3d3e55a5cb11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/cq.h>
@@ -99,26 +98,32 @@ void mlx5_unregister_debugfs(void)
debugfs_remove(mlx5_debugfs_root);
}
+struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev)
+{
+ return dev->priv.dbg.dbg_root;
+}
+EXPORT_SYMBOL(mlx5_debugfs_get_dev_root);
+
void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
{
- dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
+ dev->priv.dbg.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg.dbg_root);
}
EXPORT_SYMBOL(mlx5_qp_debugfs_init);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- debugfs_remove_recursive(dev->priv.qp_debugfs);
+ debugfs_remove_recursive(dev->priv.dbg.qp_debugfs);
}
EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup);
void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
{
- dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root);
+ dev->priv.dbg.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg.dbg_root);
}
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- debugfs_remove_recursive(dev->priv.eq_debugfs);
+ debugfs_remove_recursive(dev->priv.dbg.eq_debugfs);
}
static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
@@ -168,8 +173,8 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
const char *namep;
int i;
- cmd = &dev->priv.cmdif_debugfs;
- *cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
+ cmd = &dev->priv.dbg.cmdif_debugfs;
+ *cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root);
for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
stats = &dev->cmd.stats[i];
@@ -180,23 +185,51 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
debugfs_create_file("average", 0400, stats->root, stats,
&stats_fops);
debugfs_create_u64("n", 0400, stats->root, &stats->n);
+ debugfs_create_u64("failed", 0400, stats->root, &stats->failed);
+ debugfs_create_u64("failed_mbox_status", 0400, stats->root,
+ &stats->failed_mbox_status);
+ debugfs_create_u32("last_failed_errno", 0400, stats->root,
+ &stats->last_failed_errno);
+ debugfs_create_u8("last_failed_mbox_status", 0400, stats->root,
+ &stats->last_failed_mbox_status);
}
}
}
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- debugfs_remove_recursive(dev->priv.cmdif_debugfs);
+ debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs);
}
void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
{
- dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root);
+ dev->priv.dbg.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg.dbg_root);
}
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- debugfs_remove_recursive(dev->priv.cq_debugfs);
+ debugfs_remove_recursive(dev->priv.dbg.cq_debugfs);
+}
+
+void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
+{
+ struct dentry *pages;
+
+ dev->priv.dbg.pages_debugfs = debugfs_create_dir("pages", dev->priv.dbg.dbg_root);
+ pages = dev->priv.dbg.pages_debugfs;
+
+ debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
+ debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages);
+ debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages);
+ debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
+ debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
+ debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
+ &dev->priv.reclaim_pages_discard);
+}
+
+void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev)
+{
+ debugfs_remove_recursive(dev->priv.dbg.pages_debugfs);
}
static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
@@ -441,7 +474,7 @@ int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
if (!mlx5_debugfs_root)
return 0;
- err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs,
+ err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.dbg.qp_debugfs,
&qp->dbg, qp->qpn, qp_fields,
ARRAY_SIZE(qp_fields), qp);
if (err)
@@ -468,7 +501,7 @@ int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
if (!mlx5_debugfs_root)
return 0;
- err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs,
+ err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.dbg.eq_debugfs,
&eq->dbg, eq->eqn, eq_fields,
ARRAY_SIZE(eq_fields), eq);
if (err)
@@ -493,7 +526,7 @@ int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
if (!mlx5_debugfs_root)
return 0;
- err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs,
+ err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.dbg.cq_debugfs,
&cq->dbg, cq->cqn, cq_fields,
ARRAY_SIZE(cq_fields), cq);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index d1093bb2d436..057dde6f4417 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -100,15 +100,11 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
}
net_port_alive = !!(reset_type & MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE);
- err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive);
+ err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive, extack);
if (err)
- goto out;
+ return err;
- err = mlx5_fw_reset_wait_reset_done(dev);
-out:
- if (err)
- NL_SET_ERR_MSG_MOD(extack, "FW activate command failed");
- return err;
+ return mlx5_fw_reset_wait_reset_done(dev);
}
static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index c14e06ca64d8..8653ac0fd865 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -59,6 +59,7 @@
#include "lib/hv_vhca.h"
#include "lib/clock.h"
#include "en/rx_res.h"
+#include "en/selq.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
@@ -172,8 +173,9 @@ struct page_pool;
#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
-#define MLX5E_MAX_KLM_PER_WQE \
- MLX5E_KLM_ENTRIES_PER_WQE(MLX5E_TX_MPW_MAX_NUM_DS << MLX5_MKEY_BSF_OCTO_SIZE)
+#define MLX5E_MAX_KLM_PER_WQE(mdev) \
+ MLX5E_KLM_ENTRIES_PER_WQE(mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)) \
+ << MLX5_MKEY_BSF_OCTO_SIZE)
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -221,6 +223,32 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
}
+/* The maximum WQE size can be retrieved by max_wqe_sz_sq in
+ * bytes units. Driver hardens the limitation to 1KB (16
+ * WQEBBs), unless firmware capability is stricter.
+ */
+static inline u16 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
+{
+ return min_t(u16, MLX5_SEND_WQE_MAX_WQEBBS,
+ MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB);
+}
+
+static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs)
+{
+/* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS.
+ * Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16,
+ * see mlx5e_get_max_sq_wqebbs(), the multiplication (16 * 4 == 64)
+ * overflows the 6-bit DS field of Ctrl Segment. Use a bound lower
+ * than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be
+ * cache-aligned.
+ */
+#if L1_CACHE_BYTES < 128
+ return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
+#else
+ return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 2);
+#endif
+}
+
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
@@ -377,6 +405,7 @@ enum {
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
+ MLX5E_SQ_STATE_XDP_MULTIBUF,
};
struct mlx5e_tx_mpwqe {
@@ -427,12 +456,12 @@ struct mlx5e_txqsq {
struct netdev_queue *txq;
u32 sqn;
u16 stop_room;
+ u16 max_sq_mpw_wqebbs;
u8 min_inline_mode;
struct device *pdev;
__be32 mkey_be;
unsigned long state;
unsigned int hw_mtu;
- struct hwtstamp_config *tstamp;
struct mlx5_clock *clock;
struct net_device *netdev;
struct mlx5_core_dev *mdev;
@@ -446,6 +475,7 @@ struct mlx5e_txqsq {
struct work_struct recover_work;
struct mlx5e_ptpsq *ptpsq;
cqe_ts_to_ns ptp_cyc2time;
+ u16 max_sq_wqebbs;
} ____cacheline_aligned_in_smp;
struct mlx5e_dma_info {
@@ -486,7 +516,7 @@ struct mlx5e_xdp_info {
} frame;
struct {
struct mlx5e_rq *rq;
- struct mlx5e_dma_info di;
+ struct page *page;
} page;
};
};
@@ -508,7 +538,7 @@ struct mlx5e_xdpsq;
typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
struct mlx5e_xmit_data *,
- struct mlx5e_xdp_info *,
+ struct skb_shared_info *,
int);
struct mlx5e_xdpsq {
@@ -540,6 +570,8 @@ struct mlx5e_xdpsq {
u32 sqn;
struct device *pdev;
__be32 mkey_be;
+ u16 stop_room;
+ u16 max_sq_mpw_wqebbs;
u8 min_inline_mode;
unsigned long state;
unsigned int hw_mtu;
@@ -547,6 +579,7 @@ struct mlx5e_xdpsq {
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
+ u16 max_sq_wqebbs;
} ____cacheline_aligned_in_smp;
struct mlx5e_ktls_resync_resp;
@@ -575,6 +608,7 @@ struct mlx5e_icosq {
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
+ u16 max_sq_wqebbs;
struct work_struct recover_work;
} ____cacheline_aligned_in_smp;
@@ -681,6 +715,7 @@ struct mlx5e_rq {
u8 umr_in_progress;
u8 umr_last_bulk;
u8 umr_completed;
+ u8 min_wqe_bulk;
struct mlx5e_shampo_hd *shampo;
} mpwqe;
};
@@ -876,9 +911,8 @@ struct mlx5e_trap;
struct mlx5e_priv {
/* priv data path fields - start */
+ struct mlx5e_selq selq;
struct mlx5e_txqsq **txq2sq;
- int **channel_tc2realtxq;
- int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx_dp dcbx_dp;
#endif
@@ -921,7 +955,6 @@ struct mlx5e_priv {
u16 drop_rq_q_counter;
struct notifier_block events_nb;
struct notifier_block blocking_events_nb;
- int num_tc_x_num_ch;
struct udp_tunnel_nic_info nic_info;
#ifdef CONFIG_MLX5_CORE_EN_DCB
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 66180ffb4606..08fd1370a8b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -178,16 +178,28 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
}
+u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)
+{
+#define UMR_WQE_BULK (2)
+ return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1);
+}
+
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- bool is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
- mlx5e_rx_is_linear_skb(params, xsk) :
- mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
+ u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
+
+ if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
+ return linear_headroom;
+
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
+ return linear_headroom;
+
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
+ return linear_headroom;
- return is_linear_skb || params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO ?
- mlx5e_get_linear_rq_headroom(params, xsk) : 0;
+ return 0;
}
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
@@ -196,13 +208,13 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
u16 stop_room;
stop_room = mlx5e_tls_get_stop_room(mdev, params);
- stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ stop_room += mlx5e_stop_room_for_max_wqe(mdev);
if (is_mpwqe)
/* A MPWQE can take up to the maximum-sized WQE + all the normal
* stop room can be taken if a new packet breaks the active
* MPWQE session and allocates its WQEs right away.
*/
- stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ stop_room += mlx5e_stop_room_for_max_wqe(mdev);
return stop_room;
}
@@ -359,12 +371,13 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
{
/* Prefer Striding RQ, unless any of the following holds:
* - Striding RQ configuration is not possible/supported.
- * - Slow PCI heuristic.
+ * - CQE compression is ON, and stride_index mini_cqe layout is not supported.
* - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
*
* No XSK params: checking the availability of striding RQ in general.
*/
- if (!slow_pci_heuristic(mdev) &&
+ if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ||
+ MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
mlx5e_striding_rq_possible(mdev, params) &&
(mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
!mlx5e_rx_is_linear_skb(params, NULL)))
@@ -385,16 +398,29 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
};
}
+static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp)
+{
+ if (xdp)
+ /* XDP requires all fragments to be of the same size. */
+ return first_frag_size + (MLX5E_MAX_RX_FRAGS - 1) * frag_size;
+
+ /* Optimization for small packets: the last fragment is bigger than the others. */
+ return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE;
+}
+
#define DEFAULT_FRAG_SIZE (2048)
-static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
- struct mlx5e_rq_frags_info *info)
+static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_frags_info *info)
{
u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
int frag_size_max = DEFAULT_FRAG_SIZE;
+ int first_frag_size_max;
u32 buf_size = 0;
+ u16 headroom;
+ int max_mtu;
int i;
if (mlx5_fpga_is_ipsec_device(mdev))
@@ -413,21 +439,48 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
goto out;
}
- if (byte_count > PAGE_SIZE +
- (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
+ headroom = mlx5e_get_linear_rq_headroom(params, xsk);
+ first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
+
+ max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
+ params->xdp_prog);
+ if (byte_count > max_mtu || params->xdp_prog) {
frag_size_max = PAGE_SIZE;
+ first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
+
+ max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
+ params->xdp_prog);
+ if (byte_count > max_mtu) {
+ mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n",
+ params->sw_mtu, max_mtu);
+ return -EINVAL;
+ }
+ }
i = 0;
while (buf_size < byte_count) {
int frag_size = byte_count - buf_size;
- if (i < MLX5E_MAX_RX_FRAGS - 1)
+ if (i == 0)
+ frag_size = min(frag_size, first_frag_size_max);
+ else if (i < MLX5E_MAX_RX_FRAGS - 1)
frag_size = min(frag_size, frag_size_max);
info->arr[i].frag_size = frag_size;
- info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
-
buf_size += frag_size;
+
+ if (params->xdp_prog) {
+ /* XDP multi buffer expects fragments of the same size. */
+ info->arr[i].frag_stride = frag_size_max;
+ } else {
+ if (i == 0) {
+ /* Ensure that headroom and tailroom are included. */
+ frag_size += headroom;
+ frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
+ info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
+ }
+
i++;
}
info->num_frags = i;
@@ -437,6 +490,8 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
out:
info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
info->log_num_frags = order_base_2(info->num_frags);
+
+ return 0;
}
static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
@@ -533,6 +588,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
int ndsegs = 1;
+ int err;
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
@@ -572,7 +628,9 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
}
default: /* MLX5_WQ_TYPE_CYCLIC */
MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
- mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
+ err = mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
+ if (err)
+ return err;
ndsegs = param->frags_info.num_frags;
}
@@ -717,7 +775,7 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
u32 wqebbs;
- max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE;
+ max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
rest = max_hd_per_wqe % max_klm_per_umr;
@@ -774,10 +832,10 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
mlx5e_build_sq_param_common(mdev, param);
- param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
+ param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */
param->is_tls = mlx5e_accel_is_ktls_rx(mdev);
if (param->is_tls)
- param->stop_room += mlx5e_stop_room_for_wqe(1); /* for TLS RX resync NOP */
+ param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
mlx5e_build_ico_cq_param(mdev, log_wq_size, &param->cqp);
@@ -785,6 +843,7 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
@@ -793,6 +852,7 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
+ param->is_xdp_mb = !mlx5e_rx_is_linear_skb(params, xsk);
mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}
@@ -812,7 +872,7 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
- mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
+ mlx5e_build_xdpsq_param(mdev, params, NULL, &cparam->xdp_sq);
mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 433e6967692d..f5c46e78eebc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -31,6 +31,7 @@ struct mlx5e_sq_param {
struct mlx5_wq_param wq;
bool is_mpw;
bool is_tls;
+ bool is_xdp_mb;
u16 stop_room;
};
@@ -129,6 +130,7 @@ u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
+u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz);
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
@@ -154,6 +156,7 @@ void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
struct mlx5e_cq_param *param);
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
struct mlx5e_sq_param *param);
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 82baafd3c00c..335b20b6383b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -195,7 +195,6 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
int node;
sq->pdev = c->pdev;
- sq->tstamp = c->tstamp;
sq->clock = &mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
@@ -449,7 +448,7 @@ static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
- param->stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ param->stop_room = mlx5e_stop_room_for_max_wqe(mdev);
mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index c1e07496c89c..9db677e9ca9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -50,7 +50,6 @@ static int mlx5e_find_unused_qos_qid(struct mlx5e_priv *priv)
struct mlx5e_qos_node {
struct hlist_node hnode;
- struct rcu_head rcu;
struct mlx5e_qos_node *parent;
u64 rate;
u32 bw_share;
@@ -132,7 +131,11 @@ static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node
__clear_bit(node->qid, priv->htb.qos_used_qids);
mlx5e_update_tx_netdev_queues(priv);
}
- kfree_rcu(node, rcu);
+ /* Make sure this qid is no longer selected by mlx5e_select_queue, so
+ * that mlx5e_reactivate_qos_sq can safely restart the netdev TX queue.
+ */
+ synchronize_net();
+ kfree(node);
}
/* TX datapath API */
@@ -273,10 +276,18 @@ err_free_sq:
static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
{
struct mlx5e_txqsq *sq;
+ u16 qid;
sq = mlx5e_get_qos_sq(priv, node->qid);
- WRITE_ONCE(priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, node->qid)], sq);
+ qid = mlx5e_qid_from_qos(&priv->channels, node->qid);
+
+ /* If it's a new queue, it will be marked as started at this point.
+ * Stop it before updating txq2sq.
+ */
+ mlx5e_tx_disable_queue(netdev_get_tx_queue(priv->netdev, qid));
+
+ priv->txq2sq[qid] = sq;
/* Make the change to txq2sq visible before the queue is started.
* As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
@@ -299,8 +310,13 @@ static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
qos_dbg(priv->mdev, "Deactivate QoS SQ qid %u\n", qid);
mlx5e_deactivate_txqsq(sq);
- /* The queue is disabled, no synchronization with datapath is needed. */
priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
+
+ /* Make the change to txq2sq visible before the queue is started again.
+ * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
+ * which pairs with this barrier.
+ */
+ smp_wmb();
}
static void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
@@ -485,9 +501,11 @@ int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (opened) {
+ mlx5e_selq_prepare(&priv->selq, &priv->channels.params, true);
+
err = mlx5e_qos_alloc_queues(priv, &priv->channels);
if (err)
- return err;
+ goto err_cancel_selq;
}
root = mlx5e_sw_node_create_root(priv);
@@ -508,6 +526,9 @@ int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
*/
smp_store_release(&priv->htb.maj_id, htb_maj_id);
+ if (opened)
+ mlx5e_selq_apply(&priv->selq);
+
return 0;
err_sw_node_delete:
@@ -516,6 +537,8 @@ err_sw_node_delete:
err_free_queues:
if (opened)
mlx5e_qos_close_all_queues(&priv->channels);
+err_cancel_selq:
+ mlx5e_selq_cancel(&priv->selq);
return err;
}
@@ -526,8 +549,15 @@ int mlx5e_htb_root_del(struct mlx5e_priv *priv)
qos_dbg(priv->mdev, "TC_HTB_DESTROY\n");
+ /* Wait until real_num_tx_queues is updated for mlx5e_select_queue,
+ * so that we can safely switch to its non-HTB non-PTP fastpath.
+ */
+ synchronize_net();
+
+ mlx5e_selq_prepare(&priv->selq, &priv->channels.params, false);
+ mlx5e_selq_apply(&priv->selq);
+
WRITE_ONCE(priv->htb.maj_id, 0);
- synchronize_rcu(); /* Sync with mlx5e_select_htb_queue and TX data path. */
root = mlx5e_sw_node_find(priv, MLX5E_HTB_CLASSID_ROOT);
if (!root) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
index b7558907ba20..5d9bd91d86c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
@@ -18,7 +18,6 @@ int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
/* TX datapath API */
int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid);
-struct mlx5e_txqsq *mlx5e_get_sq(struct mlx5e_priv *priv, int qid);
/* SQ lifecycle */
int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 0991345c4ae5..86fa0bdbee36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -263,14 +263,14 @@ int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv)
INIT_LIST_HEAD(&uplink_priv->unready_flows);
/* init shared tc flow table */
- err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
+ err = mlx5e_tc_esw_init(uplink_priv);
return err;
}
void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv)
{
/* delete shared tc flow table */
- mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
+ mlx5e_tc_esw_cleanup(&rpriv->uplink_priv);
mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index c1cdd8c2e37a..7f93426b88b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -442,7 +442,7 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
goto inner_tir;
err = mlx5e_tir_modify(tir, builder);
if (err) {
- mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of indirect TIR %#x for traffic type %d: err = %d\n",
+ mlx5e_rss_warn(rss->mdev, "Failed to update packet merge state of indirect TIR %#x for traffic type %d: err = %d\n",
mlx5e_tir_get_tirn(tir), tt, err);
if (!final_err)
final_err = err;
@@ -457,7 +457,7 @@ inner_tir:
continue;
err = mlx5e_tir_modify(tir, builder);
if (err) {
- mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of inner indirect TIR %#x for traffic type %d: err = %d\n",
+ mlx5e_rss_warn(rss->mdev, "Failed to update packet merge state of inner indirect TIR %#x for traffic type %d: err = %d\n",
mlx5e_tir_get_tirn(tir), tt, err);
if (!final_err)
final_err = err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
new file mode 100644
index 000000000000..d98a277eb7f8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "selq.h"
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/rcupdate.h>
+#include "en.h"
+#include "en/ptp.h"
+
+struct mlx5e_selq_params {
+ unsigned int num_regular_queues;
+ unsigned int num_channels;
+ unsigned int num_tcs;
+ union {
+ u8 is_special_queues;
+ struct {
+ bool is_htb : 1;
+ bool is_ptp : 1;
+ };
+ };
+};
+
+int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
+{
+ struct mlx5e_selq_params *init_params;
+
+ selq->state_lock = state_lock;
+
+ selq->standby = kvzalloc(sizeof(*selq->standby), GFP_KERNEL);
+ if (!selq->standby)
+ return -ENOMEM;
+
+ init_params = kvzalloc(sizeof(*selq->active), GFP_KERNEL);
+ if (!init_params) {
+ kvfree(selq->standby);
+ selq->standby = NULL;
+ return -ENOMEM;
+ }
+ /* Assign dummy values, so that mlx5e_select_queue won't crash. */
+ *init_params = (struct mlx5e_selq_params) {
+ .num_regular_queues = 1,
+ .num_channels = 1,
+ .num_tcs = 1,
+ .is_htb = false,
+ .is_ptp = false,
+ };
+ rcu_assign_pointer(selq->active, init_params);
+
+ return 0;
+}
+
+void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
+{
+ WARN_ON_ONCE(selq->is_prepared);
+
+ kvfree(selq->standby);
+ selq->standby = NULL;
+ selq->is_prepared = true;
+
+ mlx5e_selq_apply(selq);
+
+ kvfree(selq->standby);
+ selq->standby = NULL;
+}
+
+void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb)
+{
+ lockdep_assert_held(selq->state_lock);
+ WARN_ON_ONCE(selq->is_prepared);
+
+ selq->is_prepared = true;
+
+ selq->standby->num_channels = params->num_channels;
+ selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params);
+ selq->standby->num_regular_queues =
+ selq->standby->num_channels * selq->standby->num_tcs;
+ selq->standby->is_htb = htb;
+ selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS);
+}
+
+void mlx5e_selq_apply(struct mlx5e_selq *selq)
+{
+ struct mlx5e_selq_params *old_params;
+
+ WARN_ON_ONCE(!selq->is_prepared);
+
+ selq->is_prepared = false;
+
+ old_params = rcu_replace_pointer(selq->active, selq->standby,
+ lockdep_is_held(selq->state_lock));
+ synchronize_net(); /* Wait until ndo_select_queue starts emitting correct values. */
+ selq->standby = old_params;
+}
+
+void mlx5e_selq_cancel(struct mlx5e_selq *selq)
+{
+ lockdep_assert_held(selq->state_lock);
+ WARN_ON_ONCE(!selq->is_prepared);
+
+ selq->is_prepared = false;
+}
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+static int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
+{
+ int dscp_cp = 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+
+ return priv->dcbx_dp.dscp2prio[dscp_cp];
+}
+#endif
+
+static int mlx5e_get_up(struct mlx5e_priv *priv, struct sk_buff *skb)
+{
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (READ_ONCE(priv->dcbx_dp.trust_state) == MLX5_QPTS_TRUST_DSCP)
+ return mlx5e_get_dscp_up(priv, skb);
+#endif
+ if (skb_vlan_tag_present(skb))
+ return skb_vlan_tag_get_prio(skb);
+ return 0;
+}
+
+static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb,
+ struct mlx5e_selq_params *selq)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int up;
+
+ up = selq->num_tcs > 1 ? mlx5e_get_up(priv, skb) : 0;
+
+ return selq->num_regular_queues + up;
+}
+
+static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb)
+{
+ u16 classid;
+
+ /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
+ if ((TC_H_MAJ(skb->priority) >> 16) == smp_load_acquire(&priv->htb.maj_id))
+ classid = TC_H_MIN(skb->priority);
+ else
+ classid = READ_ONCE(priv->htb.defcls);
+
+ if (!classid)
+ return 0;
+
+ return mlx5e_get_txq_by_classid(priv, classid);
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_selq_params *selq;
+ int txq_ix, up;
+
+ selq = rcu_dereference_bh(priv->selq.active);
+
+ /* This is a workaround needed only for the mlx5e_netdev_change_profile
+ * flow that zeroes out the whole priv without unregistering the netdev
+ * and without preventing ndo_select_queue from being called.
+ */
+ if (unlikely(!selq))
+ return 0;
+
+ if (likely(!selq->is_special_queues)) {
+ /* No special queues, netdev_pick_tx returns one of the regular ones. */
+
+ txq_ix = netdev_pick_tx(dev, skb, NULL);
+
+ if (selq->num_tcs <= 1)
+ return txq_ix;
+
+ up = mlx5e_get_up(priv, skb);
+
+ /* Normalize any picked txq_ix to [0, num_channels),
+ * So we can return a txq_ix that matches the channel and
+ * packet UP.
+ */
+ return mlx5e_txq_to_ch_ix(txq_ix, selq->num_channels) +
+ up * selq->num_channels;
+ }
+
+ if (unlikely(selq->is_htb)) {
+ /* num_tcs == 1, shortcut for PTP */
+
+ txq_ix = mlx5e_select_htb_queue(priv, skb);
+ if (txq_ix > 0)
+ return txq_ix;
+
+ if (unlikely(selq->is_ptp && mlx5e_use_ptpsq(skb)))
+ return selq->num_channels;
+
+ txq_ix = netdev_pick_tx(dev, skb, NULL);
+
+ /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
+ * If they are selected, switch to regular queues.
+ * Driver to select these queues only at mlx5e_select_ptpsq()
+ * and mlx5e_select_htb_queue().
+ */
+ return mlx5e_txq_to_ch_ix_htb(txq_ix, selq->num_channels);
+ }
+
+ /* PTP is enabled */
+
+ if (mlx5e_use_ptpsq(skb))
+ return mlx5e_select_ptpsq(dev, skb, selq);
+
+ txq_ix = netdev_pick_tx(dev, skb, NULL);
+
+ /* Normalize any picked txq_ix to [0, num_channels). Queues in range
+ * [0, num_regular_queues) will be mapped to the corresponding channel
+ * index, so that we can apply the packet's UP (if num_tcs > 1).
+ * If netdev_pick_tx() picks ptp_channel, switch to a regular queue,
+ * because driver should select the PTP only at mlx5e_select_ptpsq().
+ */
+ txq_ix = mlx5e_txq_to_ch_ix(txq_ix, selq->num_channels);
+
+ if (selq->num_tcs <= 1)
+ return txq_ix;
+
+ up = mlx5e_get_up(priv, skb);
+
+ return txq_ix + up * selq->num_channels;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
new file mode 100644
index 000000000000..6c070141d8f1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_SELQ_H__
+#define __MLX5_EN_SELQ_H__
+
+#include <linux/kernel.h>
+
+struct mlx5e_selq_params;
+
+struct mlx5e_selq {
+ struct mlx5e_selq_params __rcu *active;
+ struct mlx5e_selq_params *standby;
+ struct mutex *state_lock; /* points to priv->state_lock */
+ bool is_prepared;
+};
+
+struct mlx5e_params;
+struct net_device;
+struct sk_buff;
+
+int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock);
+void mlx5e_selq_cleanup(struct mlx5e_selq *selq);
+void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb);
+void mlx5e_selq_apply(struct mlx5e_selq *selq);
+void mlx5e_selq_cancel(struct mlx5e_selq *selq);
+
+static inline u16 mlx5e_txq_to_ch_ix(u16 txq, u16 num_channels)
+{
+ while (unlikely(txq >= num_channels))
+ txq -= num_channels;
+ return txq;
+}
+
+static inline u16 mlx5e_txq_to_ch_ix_htb(u16 txq, u16 num_channels)
+{
+ if (unlikely(txq >= num_channels)) {
+ if (unlikely(txq >= num_channels << 3))
+ txq %= num_channels;
+ else
+ do
+ txq -= num_channels;
+ while (txq >= num_channels);
+ }
+ return txq;
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+
+#endif /* __MLX5_EN_SELQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
index b0de6b999675..21aab96357b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
@@ -7,7 +7,8 @@
static bool
tc_act_can_offload_accept(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
@@ -18,9 +19,8 @@ tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->flags |= MLX5_ATTR_FLAG_ACCEPT;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
index e600924e30ea..af37a8d247a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -2,6 +2,7 @@
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
+#include "en/tc/post_act.h"
#include "en/tc_priv.h"
#include "mlx5_core.h"
@@ -34,6 +35,13 @@ static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
NULL, /* FLOW_ACTION_CT_METADATA, */
&mlx5e_tc_act_mpls_push,
&mlx5e_tc_act_mpls_pop,
+ NULL, /* FLOW_ACTION_MPLS_MANGLE, */
+ NULL, /* FLOW_ACTION_GATE, */
+ NULL, /* FLOW_ACTION_PPPOE_PUSH, */
+ NULL, /* FLOW_ACTION_JUMP, */
+ NULL, /* FLOW_ACTION_PIPE, */
+ &mlx5e_tc_act_vlan,
+ &mlx5e_tc_act_vlan,
};
/* Must be aligned with enum flow_action_id. */
@@ -101,3 +109,75 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
parse_state->num_actions = flow_action->num_entries;
parse_state->extack = extack;
}
+
+void
+mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
+ struct mlx5e_tc_flow_action *flow_action_reorder)
+{
+ struct flow_action_entry *act;
+ int i, j = 0;
+
+ flow_action_for_each(i, act, flow_action) {
+ /* Add CT action to be first. */
+ if (act->id == FLOW_ACTION_CT)
+ flow_action_reorder->entries[j++] = act;
+ }
+
+ flow_action_for_each(i, act, flow_action) {
+ if (act->id == FLOW_ACTION_CT)
+ continue;
+ flow_action_reorder->entries[j++] = act;
+ }
+}
+
+int
+mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
+ struct flow_action *flow_action,
+ struct mlx5_flow_attr *attr,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct flow_action_entry *act;
+ struct mlx5e_tc_act *tc_act;
+ struct mlx5e_priv *priv;
+ int err = 0, i;
+
+ priv = parse_state->flow->priv;
+
+ flow_action_for_each(i, act, flow_action) {
+ tc_act = mlx5e_tc_act_get(act->id, ns_type);
+ if (!tc_act || !tc_act->post_parse ||
+ !tc_act->can_offload(parse_state, act, i, attr))
+ continue;
+
+ err = tc_act->post_parse(parse_state, priv, attr);
+ if (err)
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+int
+mlx5e_tc_act_set_next_post_act(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_attr *next_attr)
+{
+ struct mlx5_core_dev *mdev = flow->priv->mdev;
+ struct mlx5e_tc_mod_hdr_acts *mod_acts;
+ int err;
+
+ mod_acts = &attr->parse_attr->mod_hdr_acts;
+
+ /* Set handle on current post act rule to next post act rule. */
+ err = mlx5e_tc_post_act_set_handle(mdev, next_attr->post_act_handle, mod_acts);
+ if (err) {
+ mlx5_core_warn(mdev, "Failed setting post action handle");
+ return err;
+ }
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index 9cc844bd00f5..f34714c5ddd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -16,14 +16,17 @@ struct mlx5e_tc_act_parse_state {
unsigned int num_actions;
struct mlx5e_tc_flow *flow;
struct netlink_ext_ack *extack;
+ u32 actions;
+ bool ct;
bool ct_clear;
bool encap;
bool decap;
bool mpls_push;
+ bool eth_push;
+ bool eth_pop;
bool ptype_host;
const struct ip_tunnel_info *tun_info;
struct mlx5e_mpls_info mpls_info;
- struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
int if_count;
struct mlx5_tc_ct_priv *ct_priv;
@@ -32,7 +35,8 @@ struct mlx5e_tc_act_parse_state {
struct mlx5e_tc_act {
bool (*can_offload)(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index);
+ int act_index,
+ struct mlx5_flow_attr *attr);
int (*parse_action)(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -42,6 +46,15 @@ struct mlx5e_tc_act {
int (*post_parse)(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr);
+
+ bool (*is_multi_table_act)(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr);
+};
+
+struct mlx5e_tc_flow_action {
+ unsigned int num_entries;
+ struct flow_action_entry **entries;
};
extern struct mlx5e_tc_act mlx5e_tc_act_drop;
@@ -74,4 +87,19 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action,
struct netlink_ext_ack *extack);
+void
+mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
+ struct mlx5e_tc_flow_action *flow_action_reorder);
+
+int
+mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
+ struct flow_action *flow_action,
+ struct mlx5_flow_attr *attr,
+ enum mlx5_flow_namespace_type ns_type);
+
+int
+mlx5e_tc_act_set_next_post_act(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_attr *next_attr);
+
#endif /* __MLX5_EN_TC_ACT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c
index 29920ef0180a..c0f08ae6a57f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c
@@ -38,11 +38,12 @@ csum_offload_supported(struct mlx5e_priv *priv,
static bool
tc_act_can_offload_csum(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_flow *flow = parse_state->flow;
- return csum_offload_supported(flow->priv, flow->attr->action,
+ return csum_offload_supported(flow->priv, attr->action,
act->csum_flags, parse_state->extack);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
index 58cc33f1363d..b9d38fe807df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -8,13 +8,14 @@
static bool
tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
+ bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
struct netlink_ext_ack *extack = parse_state->extack;
- if (flow_flag_test(parse_state->flow, SAMPLE)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Sample action with connection tracking is not supported");
+ if (parse_state->ct && !clear_action) {
+ NL_SET_ERR_MSG_MOD(extack, "Multiple CT actions are not supported");
return false;
}
@@ -40,18 +41,34 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
- flow_flag_set(parse_state->flow, CT);
if (mlx5e_is_eswitch_flow(parse_state->flow))
attr->esw_attr->split_count = attr->esw_attr->out_count;
+ if (!clear_action) {
+ attr->flags |= MLX5_ATTR_FLAG_CT;
+ flow_flag_set(parse_state->flow, CT);
+ parse_state->ct = true;
+ }
parse_state->ct_clear = clear_action;
return 0;
}
+static bool
+tc_act_is_multi_table_act_ct(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr)
+{
+ if (act->ct.action & TCA_CT_ACT_CLEAR)
+ return false;
+
+ return true;
+}
+
struct mlx5e_tc_act mlx5e_tc_act_ct = {
.can_offload = tc_act_can_offload_ct,
.parse_action = tc_act_parse_ct,
+ .is_multi_table_act = tc_act_is_multi_table_act_ct,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
index 2e29a23bed12..dd025a95c439 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
@@ -7,7 +7,8 @@
static bool
tc_act_can_offload_drop(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
@@ -18,8 +19,7 @@ tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
index f44515061228..4726bcb46eec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
@@ -8,6 +8,7 @@
static int
validate_goto_chain(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
@@ -32,7 +33,7 @@ validate_goto_chain(struct mlx5e_priv *priv,
}
if (!mlx5_chains_backwards_supported(chains) &&
- dest_chain <= flow->attr->chain) {
+ dest_chain <= attr->chain) {
NL_SET_ERR_MSG_MOD(extack, "Goto lower numbered chain isn't supported");
return -EOPNOTSUPP;
}
@@ -43,8 +44,8 @@ validate_goto_chain(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (flow->attr->action & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
- MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
+ if (attr->action & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
!reformat_and_fwd) {
NL_SET_ERR_MSG_MOD(extack,
"Goto chain is not allowed if action has reformat or decap");
@@ -57,12 +58,13 @@ validate_goto_chain(struct mlx5e_priv *priv,
static bool
tc_act_can_offload_goto(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
- if (validate_goto_chain(flow->priv, flow, act, extack))
+ if (validate_goto_chain(flow->priv, flow, attr, act, extack))
return false;
return true;
@@ -74,8 +76,7 @@ tc_act_parse_goto(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->dest_chain = act->chain_index;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c
index d775c3d9edf3..e8d227595b3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c
@@ -7,7 +7,8 @@
static bool
tc_act_can_offload_mark(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
if (act->mark & ~MLX5E_TC_FLOW_ID_MASK) {
NL_SET_ERR_MSG_MOD(parse_state->extack, "Bad flow mark, only 16 bit supported");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index 2e615e0ba972..2b002c6a2e73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -99,7 +99,8 @@ get_fdb_out_dev(struct net_device *uplink_dev, struct net_device *out_dev)
static bool
tc_act_can_offload_mirred(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
@@ -108,8 +109,8 @@ tc_act_can_offload_mirred(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv = flow->priv;
struct mlx5_esw_flow_attr *esw_attr;
- parse_attr = flow->attr->parse_attr;
- esw_attr = flow->attr->esw_attr;
+ parse_attr = attr->parse_attr;
+ esw_attr = attr->esw_attr;
if (!out_dev) {
/* out_dev is NULL when filters with
@@ -124,6 +125,16 @@ tc_act_can_offload_mirred(struct mlx5e_tc_act_parse_state *parse_state,
return false;
}
+ if (parse_state->eth_pop && !parse_state->mpls_push) {
+ NL_SET_ERR_MSG_MOD(extack, "vlan pop eth is supported only with mpls push");
+ return false;
+ }
+
+ if (flow_flag_test(parse_state->flow, L3_TO_L2_DECAP) && !parse_state->eth_push) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls pop is only supported with vlan eth push");
+ return false;
+ }
+
if (mlx5e_is_ft_flow(flow) && out_dev == priv->netdev) {
/* Ignore forward to self rules generated
* by adding both mlx5 devs to the flow table
@@ -301,8 +312,7 @@ tc_act_parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
index 2c74567b6d25..90b4c1b34776 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
@@ -7,7 +7,8 @@
static bool
tc_act_can_offload_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
@@ -39,8 +40,7 @@ tc_act_parse_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
{
attr->parse_attr->mirred_ifindex[0] = act->dev->ifindex;
flow_flag_set(parse_state->flow, HAIRPIN);
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
index 89ca88c78840..f106190bf37c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
@@ -8,7 +8,8 @@
static bool
tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_priv *priv = parse_state->flow->priv;
@@ -47,21 +48,22 @@ tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
static bool
tc_act_can_offload_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
- struct mlx5e_tc_flow *flow = parse_state->flow;
struct net_device *filter_dev;
- filter_dev = flow->attr->parse_attr->filter_dev;
+ filter_dev = attr->parse_attr->filter_dev;
/* we only support mpls pop if it is the first action
+ * or it is second action after tunnel key unset
* and the filter net device is bareudp. Subsequent
* actions can be pedit and the last can be mirred
* egress redirect.
*/
- if (act_index) {
- NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only as first action");
+ if ((act_index == 1 && !parse_state->decap) || act_index > 1) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only as first action or with decap");
return false;
}
@@ -79,7 +81,7 @@ tc_act_parse_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->parse_attr->eth.h_proto = act->mpls_pop.proto;
+ attr->esw_attr->eth.h_proto = act->mpls_pop.proto;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
flow_flag_set(parse_state->flow, L3_TO_L2_DECAP);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
index 79addbbef087..47597c524e59 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
@@ -42,12 +42,11 @@ out_err:
return -EOPNOTSUPP;
}
-static int
-parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
- const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- struct netlink_ext_ack *extack)
+int
+mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act, int namespace,
+ struct pedit_headers_action *hdrs,
+ struct netlink_ext_ack *extack)
{
u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
u8 htype = act->mangle.htype;
@@ -79,51 +78,11 @@ out_err:
return err;
}
-static int
-parse_pedit_to_reformat(const struct flow_action_entry *act,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct netlink_ext_ack *extack)
-{
- u32 mask, val, offset;
- u32 *p;
-
- if (act->id != FLOW_ACTION_MANGLE) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported action id");
- return -EOPNOTSUPP;
- }
-
- if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
- NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
- return -EOPNOTSUPP;
- }
-
- mask = ~act->mangle.mask;
- val = act->mangle.val;
- offset = act->mangle.offset;
- p = (u32 *)&parse_attr->eth;
- *(p + (offset >> 2)) |= (val & mask);
-
- return 0;
-}
-
-int
-mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
- const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
-{
- if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
- return parse_pedit_to_reformat(act, parse_attr, extack);
-
- return parse_pedit_to_modify_hdr(priv, act, namespace, parse_attr, hdrs, extack);
-}
-
static bool
tc_act_can_offload_pedit(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
@@ -141,21 +100,16 @@ tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state,
ns_type = mlx5e_get_flow_namespace(flow);
- err = mlx5e_tc_act_pedit_parse_action(flow->priv, act, ns_type,
- attr->parse_attr, parse_state->hdrs,
- flow, parse_state->extack);
+ err = mlx5e_tc_act_pedit_parse_action(flow->priv, act, ns_type, attr->parse_attr->hdrs,
+ parse_state->extack);
if (err)
return err;
- if (flow_flag_test(flow, L3_TO_L2_DECAP))
- goto out;
-
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
esw_attr->split_count = esw_attr->out_count;
-out:
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h
index da8ab03af58f..434c8bd710a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h
@@ -24,9 +24,7 @@ struct pedit_headers_action {
int
mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
- struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack);
#endif /* __MLX5_EN_TC_ACT_PEDIT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
index 0819110193dc..6454b031ff7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
@@ -7,7 +7,8 @@
static bool
tc_act_can_offload_ptype(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
index 1c32e24e528d..ad09a8a5f36e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
@@ -7,16 +7,16 @@
static bool
tc_act_can_offload_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
- struct mlx5e_tc_flow *flow = parse_state->flow;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct net_device *out_dev = act->dev;
struct mlx5_esw_flow_attr *esw_attr;
- parse_attr = flow->attr->parse_attr;
- esw_attr = flow->attr->esw_attr;
+ parse_attr = attr->parse_attr;
+ esw_attr = attr->esw_attr;
if (!out_dev)
return false;
@@ -58,8 +58,7 @@ tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
struct net_device *out_dev = act->dev;
int err;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
MLX5E_TC_INT_PORT_INGRESS,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
index 6699bdf5cf01..2c0196431302 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
@@ -4,17 +4,21 @@
#include <net/psample.h>
#include "act.h"
#include "en/tc_priv.h"
+#include "en/tc/act/sample.h"
static bool
tc_act_can_offload_sample(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
+ bool ct_nat;
- if (flow_flag_test(parse_state->flow, CT)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Sample action with connection tracking is not supported");
+ ct_nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
+
+ if (flow_flag_test(parse_state->flow, CT) && ct_nat) {
+ NL_SET_ERR_MSG_MOD(extack, "Sample action with CT NAT is not supported");
return false;
}
@@ -27,11 +31,7 @@ tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- struct mlx5e_sample_attr *sample_attr;
-
- sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL);
- if (!sample_attr)
- return -ENOMEM;
+ struct mlx5e_sample_attr *sample_attr = &attr->sample_attr;
sample_attr->rate = act->sample.rate;
sample_attr->group_num = act->sample.psample_group->group_num;
@@ -39,13 +39,33 @@ tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state,
if (act->sample.truncate)
sample_attr->trunc_size = act->sample.trunc_size;
- attr->sample_attr = sample_attr;
+ attr->flags |= MLX5_ATTR_FLAG_SAMPLE;
flow_flag_set(parse_state->flow, SAMPLE);
return 0;
}
+bool
+mlx5e_tc_act_sample_is_multi_table(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_attr *attr)
+{
+ if (MLX5_CAP_GEN(mdev, reg_c_preserve) ||
+ attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
+ return true;
+
+ return false;
+}
+
+static bool
+tc_act_is_multi_table_act_sample(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr)
+{
+ return mlx5e_tc_act_sample_is_multi_table(priv->mdev, attr);
+}
+
struct mlx5e_tc_act mlx5e_tc_act_sample = {
.can_offload = tc_act_can_offload_sample,
.parse_action = tc_act_parse_sample,
+ .is_multi_table_act = tc_act_is_multi_table_act_sample,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.h
new file mode 100644
index 000000000000..3efb3a15c5d2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_TC_ACT_SAMPLE_H__
+#define __MLX5_EN_TC_ACT_SAMPLE_H__
+
+#include <net/flow_offload.h>
+#include "en/tc_priv.h"
+
+bool
+mlx5e_tc_act_sample_is_multi_table(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_attr *attr);
+
+#endif /* __MLX5_EN_TC_ACT_SAMPLE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
index 046b64c2cec4..a7d9eab19e4a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
@@ -7,7 +7,8 @@
static bool
tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
@@ -25,9 +26,8 @@ tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
index 6f4a2cf46afd..b4fa2de9711d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
@@ -8,7 +8,8 @@
static bool
tc_act_can_offload_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
if (!act->tunnel) {
NL_SET_ERR_MSG_MOD(parse_state->extack,
@@ -34,7 +35,8 @@ tc_act_parse_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
static bool
tc_act_can_offload_tun_decap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
index 70fc0c2d8813..b86ac604d0c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
@@ -9,7 +9,6 @@
static int
add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack)
{
const struct flow_action_entry prio_tag_act = {
@@ -26,7 +25,7 @@ add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
};
return mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
- &prio_tag_act, parse_attr, hdrs, action,
+ &prio_tag_act, parse_attr, action,
extack);
}
@@ -35,7 +34,8 @@ parse_tc_vlan_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_esw_flow_attr *attr,
u32 *action,
- struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack,
+ struct mlx5e_tc_act_parse_state *parse_state)
{
u8 vlan_idx = attr->total_vlan;
@@ -85,6 +85,16 @@ parse_tc_vlan_action(struct mlx5e_priv *priv,
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
}
break;
+ case FLOW_ACTION_VLAN_POP_ETH:
+ parse_state->eth_pop = true;
+ break;
+ case FLOW_ACTION_VLAN_PUSH_ETH:
+ if (!flow_flag_test(parse_state->flow, L3_TO_L2_DECAP))
+ return -EOPNOTSUPP;
+ parse_state->eth_push = true;
+ memcpy(attr->eth.h_dest, act->vlan_push_eth.dst, ETH_ALEN);
+ memcpy(attr->eth.h_source, act->vlan_push_eth.src, ETH_ALEN);
+ break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
return -EINVAL;
@@ -110,7 +120,7 @@ mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
};
int err;
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack);
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack, NULL);
if (err)
return err;
@@ -140,7 +150,7 @@ mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
priv->netdev->lower_level;
while (nest_level--) {
err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action,
- extack);
+ extack, NULL);
if (err)
return err;
}
@@ -151,7 +161,8 @@ mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
static bool
tc_act_can_offload_vlan(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
@@ -170,11 +181,11 @@ tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
/* Replace vlan pop+push with vlan modify */
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
err = mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB, act,
- attr->parse_attr, parse_state->hdrs,
- &attr->action, parse_state->extack);
+ attr->parse_attr, &attr->action,
+ parse_state->extack);
} else {
err = parse_tc_vlan_action(priv, act, esw_attr, &attr->action,
- parse_state->extack);
+ parse_state->extack, parse_state);
}
if (err)
@@ -191,7 +202,6 @@ tc_act_post_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
- struct pedit_headers_action *hdrs = parse_state->hdrs;
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err;
@@ -202,7 +212,7 @@ tc_act_post_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
* tag rewrite.
*/
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
+ err = add_vlan_prio_tag_rewrite_action(priv, parse_attr,
&attr->action, extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h
index 3d62f13ab61f..2fa58c6f44eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h
@@ -24,7 +24,6 @@ int
mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack);
#endif /* __MLX5_EN_TC_ACT_VLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
index 63e36e7f53e3..9a8a1a6bd99e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
@@ -12,7 +12,6 @@ int
mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack)
{
u16 mask16 = VLAN_VID_MASK;
@@ -44,8 +43,8 @@ mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
return -EOPNOTSUPP;
}
- err = mlx5e_tc_act_pedit_parse_action(priv, &pedit_act, namespace, parse_attr, hdrs,
- NULL, extack);
+ err = mlx5e_tc_act_pedit_parse_action(priv, &pedit_act, namespace, parse_attr->hdrs,
+ extack);
*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
return err;
@@ -54,7 +53,8 @@ mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
static bool
tc_act_can_offload_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
@@ -69,8 +69,7 @@ tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
int err;
ns_type = mlx5e_get_flow_namespace(parse_state->flow);
- err = mlx5e_tc_act_vlan_add_rewrite_action(priv, ns_type, act,
- attr->parse_attr, parse_state->hdrs,
+ err = mlx5e_tc_act_vlan_add_rewrite_action(priv, ns_type, act, attr->parse_attr,
&attr->action, parse_state->extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
new file mode 100644
index 000000000000..bb6b1a979ba1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_EN_TC_CT_FS_H__
+#define __MLX5_EN_TC_CT_FS_H__
+
+struct mlx5_ct_fs {
+ const struct net_device *netdev;
+ struct mlx5_core_dev *dev;
+
+ /* private data */
+ void *priv_data[];
+};
+
+struct mlx5_ct_fs_rule {
+};
+
+struct mlx5_ct_fs_ops {
+ int (*init)(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+ struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct);
+ void (*destroy)(struct mlx5_ct_fs *fs);
+
+ struct mlx5_ct_fs_rule * (*ct_rule_add)(struct mlx5_ct_fs *fs,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr,
+ struct flow_rule *flow_rule);
+ void (*ct_rule_del)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule);
+
+ size_t priv_size;
+};
+
+static inline void *mlx5_ct_fs_priv(struct mlx5_ct_fs *fs)
+{
+ return &fs->priv_data;
+}
+
+struct mlx5_ct_fs_ops *mlx5_ct_fs_dmfs_ops_get(void);
+
+#if IS_ENABLED(CONFIG_MLX5_SW_STEERING)
+struct mlx5_ct_fs_ops *mlx5_ct_fs_smfs_ops_get(void);
+#else
+static inline struct mlx5_ct_fs_ops *
+mlx5_ct_fs_smfs_ops_get(void)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */
+
+#endif /* __MLX5_EN_TC_CT_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
new file mode 100644
index 000000000000..ae4f55be48ce
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#include "en_tc.h"
+#include "en/tc_ct.h"
+#include "en/tc/ct_fs.h"
+
+#define ct_dbg(fmt, args...)\
+ netdev_dbg(fs->netdev, "ct_fs_dmfs debug: " fmt "\n", ##args)
+
+struct mlx5_ct_fs_dmfs_rule {
+ struct mlx5_ct_fs_rule fs_rule;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
+};
+
+static int
+mlx5_ct_fs_dmfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+ struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
+{
+ return 0;
+}
+
+static void
+mlx5_ct_fs_dmfs_destroy(struct mlx5_ct_fs *fs)
+{
+}
+
+static struct mlx5_ct_fs_rule *
+mlx5_ct_fs_dmfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
+{
+ struct mlx5e_priv *priv = netdev_priv(fs->netdev);
+ struct mlx5_ct_fs_dmfs_rule *dmfs_rule;
+ int err;
+
+ dmfs_rule = kzalloc(sizeof(*dmfs_rule), GFP_KERNEL);
+ if (!dmfs_rule)
+ return ERR_PTR(-ENOMEM);
+
+ dmfs_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
+ if (IS_ERR(dmfs_rule->rule)) {
+ err = PTR_ERR(dmfs_rule->rule);
+ ct_dbg("Failed to add ct entry fs rule");
+ goto err_insert;
+ }
+
+ dmfs_rule->attr = attr;
+
+ return &dmfs_rule->fs_rule;
+
+err_insert:
+ kfree(dmfs_rule);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_ct_fs_dmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
+{
+ struct mlx5_ct_fs_dmfs_rule *dmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_dmfs_rule,
+ fs_rule);
+
+ mlx5_tc_rule_delete(netdev_priv(fs->netdev), dmfs_rule->rule, dmfs_rule->attr);
+ kfree(dmfs_rule);
+}
+
+static struct mlx5_ct_fs_ops dmfs_ops = {
+ .ct_rule_add = mlx5_ct_fs_dmfs_ct_rule_add,
+ .ct_rule_del = mlx5_ct_fs_dmfs_ct_rule_del,
+
+ .init = mlx5_ct_fs_dmfs_init,
+ .destroy = mlx5_ct_fs_dmfs_destroy,
+};
+
+struct mlx5_ct_fs_ops *mlx5_ct_fs_dmfs_ops_get(void)
+{
+ return &dmfs_ops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
new file mode 100644
index 000000000000..59988e24b704
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#include <linux/refcount.h>
+
+#include "en_tc.h"
+#include "en/tc_priv.h"
+#include "en/tc_ct.h"
+#include "en/tc/ct_fs.h"
+
+#include "lib/smfs.h"
+
+#define INIT_ERR_PREFIX "ct_fs_smfs init failed"
+#define ct_dbg(fmt, args...)\
+ netdev_dbg(fs->netdev, "ct_fs_smfs debug: " fmt "\n", ##args)
+#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
+
+struct mlx5_ct_fs_smfs_matcher {
+ struct mlx5dr_matcher *dr_matcher;
+ struct list_head list;
+ int prio;
+ refcount_t ref;
+};
+
+struct mlx5_ct_fs_smfs_matchers {
+ struct mlx5_ct_fs_smfs_matcher smfs_matchers[4];
+ struct list_head used;
+};
+
+struct mlx5_ct_fs_smfs {
+ struct mlx5dr_table *ct_tbl, *ct_nat_tbl;
+ struct mlx5_ct_fs_smfs_matchers matchers;
+ struct mlx5_ct_fs_smfs_matchers matchers_nat;
+ struct mlx5dr_action *fwd_action;
+ struct mlx5_flow_table *ct_nat;
+ struct mutex lock; /* Guards matchers */
+};
+
+struct mlx5_ct_fs_smfs_rule {
+ struct mlx5_ct_fs_rule fs_rule;
+ struct mlx5dr_rule *rule;
+ struct mlx5dr_action *count_action;
+ struct mlx5_ct_fs_smfs_matcher *smfs_matcher;
+};
+
+static inline void
+mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp)
+{
+ void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
+
+ if (likely(MLX5_CAP_FLOWTABLE_NIC_RX(fs->dev, ft_field_support.outer_ip_version)))
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
+ else
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
+ if (likely(ipv4)) {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ } else {
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ 0xFF,
+ MLX5_FLD_SZ_BYTES(fte_match_set_lyr_2_4,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6));
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ 0xFF,
+ MLX5_FLD_SZ_BYTES(fte_match_set_lyr_2_4,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6));
+ }
+
+ if (likely(tcp)) {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_sport);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
+ ntohs(MLX5_CT_TCP_FLAGS_MASK));
+ } else {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport);
+ }
+
+ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, 0, MLX5_CT_ZONE_MASK);
+}
+
+static struct mlx5dr_matcher *
+mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4,
+ bool tcp, u32 priority)
+{
+ struct mlx5dr_matcher *dr_matcher;
+ struct mlx5_flow_spec *spec;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
+
+ dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec);
+ kfree(spec);
+ if (!dr_matcher)
+ return ERR_PTR(-EINVAL);
+
+ return dr_matcher;
+}
+
+static struct mlx5_ct_fs_smfs_matcher *
+mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp)
+{
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+ struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher;
+ struct mlx5_ct_fs_smfs_matchers *matchers;
+ struct mlx5dr_matcher *dr_matcher;
+ struct mlx5dr_table *tbl;
+ struct list_head *prev;
+ int prio;
+
+ matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers;
+ smfs_matcher = &matchers->smfs_matchers[ipv4 * 2 + tcp];
+
+ if (refcount_inc_not_zero(&smfs_matcher->ref))
+ return smfs_matcher;
+
+ mutex_lock(&fs_smfs->lock);
+
+ /* Retry with lock, as another thread might have already created the relevant matcher
+ * till we acquired the lock
+ */
+ if (refcount_inc_not_zero(&smfs_matcher->ref))
+ goto out_unlock;
+
+ // Find next available priority in sorted used list
+ prio = 0;
+ prev = &matchers->used;
+ list_for_each_entry(m, &matchers->used, list) {
+ prev = &m->list;
+
+ if (m->prio == prio)
+ prio = m->prio + 1;
+ else
+ break;
+ }
+
+ tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl;
+ dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, prio);
+ if (IS_ERR(dr_matcher)) {
+ netdev_warn(fs->netdev,
+ "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d), err: %ld\n",
+ nat, ipv4, tcp, PTR_ERR(dr_matcher));
+
+ smfs_matcher = ERR_CAST(dr_matcher);
+ goto out_unlock;
+ }
+
+ smfs_matcher->dr_matcher = dr_matcher;
+ smfs_matcher->prio = prio;
+ list_add(&smfs_matcher->list, prev);
+ refcount_set(&smfs_matcher->ref, 1);
+
+out_unlock:
+ mutex_unlock(&fs_smfs->lock);
+ return smfs_matcher;
+}
+
+static void
+mlx5_ct_fs_smfs_matcher_put(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_smfs_matcher *smfs_matcher)
+{
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+
+ if (!refcount_dec_and_mutex_lock(&smfs_matcher->ref, &fs_smfs->lock))
+ return;
+
+ mlx5_smfs_matcher_destroy(smfs_matcher->dr_matcher);
+ list_del(&smfs_matcher->list);
+ mutex_unlock(&fs_smfs->lock);
+}
+
+static int
+mlx5_ct_fs_smfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+ struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
+{
+ struct mlx5dr_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl;
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+
+ post_ct_tbl = mlx5_smfs_table_get_from_fs_ft(post_ct);
+ ct_nat_tbl = mlx5_smfs_table_get_from_fs_ft(ct_nat);
+ ct_tbl = mlx5_smfs_table_get_from_fs_ft(ct);
+ fs_smfs->ct_nat = ct_nat;
+
+ if (!ct_tbl || !ct_nat_tbl || !post_ct_tbl) {
+ netdev_warn(fs->netdev, "ct_fs_smfs: failed to init, missing backing dr tables");
+ return -EOPNOTSUPP;
+ }
+
+ ct_dbg("using smfs steering");
+
+ fs_smfs->fwd_action = mlx5_smfs_action_create_dest_table(post_ct_tbl);
+ if (!fs_smfs->fwd_action) {
+ return -EINVAL;
+ }
+
+ fs_smfs->ct_tbl = ct_tbl;
+ fs_smfs->ct_nat_tbl = ct_nat_tbl;
+ mutex_init(&fs_smfs->lock);
+ INIT_LIST_HEAD(&fs_smfs->matchers.used);
+ INIT_LIST_HEAD(&fs_smfs->matchers_nat.used);
+
+ return 0;
+}
+
+static void
+mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
+{
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+
+ mlx5_smfs_action_destroy(fs_smfs->fwd_action);
+}
+
+static inline bool
+mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys)
+{
+#define DISSECTOR_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
+ const u32 basic_keys = DISSECTOR_BIT(BASIC) | DISSECTOR_BIT(CONTROL) |
+ DISSECTOR_BIT(PORTS) | DISSECTOR_BIT(META);
+ const u32 ipv4_tcp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS) | DISSECTOR_BIT(TCP);
+ const u32 ipv4_udp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS);
+ const u32 ipv6_tcp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS) | DISSECTOR_BIT(TCP);
+ const u32 ipv6_udp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS);
+
+ return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
+ used_keys == ipv6_udp);
+}
+
+static bool
+mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *flow_rule)
+{
+ struct flow_match_ipv4_addrs ipv4_addrs;
+ struct flow_match_ipv6_addrs ipv6_addrs;
+ struct flow_match_control control;
+ struct flow_match_basic basic;
+ struct flow_match_ports ports;
+ struct flow_match_tcp tcp;
+
+ if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
+ ct_dbg("rule uses unexpected dissectors (0x%08x)",
+ flow_rule->match.dissector->used_keys);
+ return false;
+ }
+
+ flow_rule_match_basic(flow_rule, &basic);
+ flow_rule_match_control(flow_rule, &control);
+ flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
+ flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
+ flow_rule_match_ports(flow_rule, &ports);
+ flow_rule_match_tcp(flow_rule, &tcp);
+
+ if (basic.mask->n_proto != htons(0xFFFF) ||
+ (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
+ basic.mask->ip_proto != 0xFF ||
+ (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP)) {
+ ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
+ ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
+ basic.key->ip_proto, basic.mask->ip_proto);
+ return false;
+ }
+
+ if (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF)) {
+ ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
+ ports.mask->src, ports.mask->dst);
+ return false;
+ }
+
+ if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
+ ct_dbg("rule uses unexpected tcp match (flags 0x%02x)", tcp.mask->flags);
+ return false;
+ }
+
+ return true;
+}
+
+static struct mlx5_ct_fs_rule *
+mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
+{
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+ struct mlx5_ct_fs_smfs_matcher *smfs_matcher;
+ struct mlx5_ct_fs_smfs_rule *smfs_rule;
+ struct mlx5dr_action *actions[5];
+ struct mlx5dr_rule *rule;
+ int num_actions = 0, err;
+ bool nat, tcp, ipv4;
+
+ if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ smfs_rule = kzalloc(sizeof(*smfs_rule), GFP_KERNEL);
+ if (!smfs_rule)
+ return ERR_PTR(-ENOMEM);
+
+ smfs_rule->count_action = mlx5_smfs_action_create_flow_counter(mlx5_fc_id(attr->counter));
+ if (!smfs_rule->count_action) {
+ err = -EINVAL;
+ goto err_count;
+ }
+
+ actions[num_actions++] = smfs_rule->count_action;
+ actions[num_actions++] = attr->modify_hdr->action.dr_action;
+ actions[num_actions++] = fs_smfs->fwd_action;
+
+ nat = (attr->ft == fs_smfs->ct_nat);
+ ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
+ tcp = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_TCP;
+
+ smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp);
+ if (IS_ERR(smfs_matcher)) {
+ err = PTR_ERR(smfs_matcher);
+ goto err_matcher;
+ }
+
+ rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions,
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT);
+ if (!rule) {
+ err = -EINVAL;
+ goto err_create;
+ }
+
+ smfs_rule->rule = rule;
+ smfs_rule->smfs_matcher = smfs_matcher;
+
+ return &smfs_rule->fs_rule;
+
+err_create:
+ mlx5_ct_fs_smfs_matcher_put(fs, smfs_matcher);
+err_matcher:
+ mlx5_smfs_action_destroy(smfs_rule->count_action);
+err_count:
+ kfree(smfs_rule);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
+{
+ struct mlx5_ct_fs_smfs_rule *smfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_smfs_rule,
+ fs_rule);
+
+ mlx5_smfs_rule_destroy(smfs_rule->rule);
+ mlx5_ct_fs_smfs_matcher_put(fs, smfs_rule->smfs_matcher);
+ mlx5_smfs_action_destroy(smfs_rule->count_action);
+ kfree(smfs_rule);
+}
+
+static struct mlx5_ct_fs_ops fs_smfs_ops = {
+ .ct_rule_add = mlx5_ct_fs_smfs_ct_rule_add,
+ .ct_rule_del = mlx5_ct_fs_smfs_ct_rule_del,
+
+ .init = mlx5_ct_fs_smfs_init,
+ .destroy = mlx5_ct_fs_smfs_destroy,
+
+ .priv_size = sizeof(struct mlx5_ct_fs_smfs),
+};
+
+struct mlx5_ct_fs_ops *
+mlx5_ct_fs_smfs_ops_get(void)
+{
+ return &fs_smfs_ops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index 31b4e39be2d3..dea137dd744b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+#include "en/tc_priv.h"
#include "en_tc.h"
#include "post_act.h"
#include "mlx5_core.h"
@@ -75,21 +76,47 @@ mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act)
kfree(post_act);
}
+int
+mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
+ struct mlx5e_post_act_handle *handle)
+{
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ /* Post action rule matches on fte_id and executes original rule's tc rule action */
+ mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG, handle->id, MLX5_POST_ACTION_MASK);
+
+ handle->rule = mlx5e_tc_rule_offload(post_act->priv, spec, handle->attr);
+ if (IS_ERR(handle->rule)) {
+ err = PTR_ERR(handle->rule);
+ netdev_warn(post_act->priv->netdev, "Failed to add post action rule");
+ goto err_rule;
+ }
+
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ kvfree(spec);
+ return err;
+}
+
struct mlx5e_post_act_handle *
mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr)
{
u32 attr_sz = ns_to_attr_sz(post_act->ns_type);
- struct mlx5e_post_act_handle *handle = NULL;
- struct mlx5_flow_attr *post_attr = NULL;
- struct mlx5_flow_spec *spec = NULL;
+ struct mlx5e_post_act_handle *handle;
+ struct mlx5_flow_attr *post_attr;
int err;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
post_attr = mlx5_alloc_flow_attr(post_act->ns_type);
- if (!handle || !spec || !post_attr) {
+ if (!handle || !post_attr) {
kfree(post_attr);
- kvfree(spec);
kfree(handle);
return ERR_PTR(-ENOMEM);
}
@@ -100,7 +127,7 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
post_attr->ft = post_act->ft;
post_attr->inner_match_level = MLX5_MATCH_NONE;
post_attr->outer_match_level = MLX5_MATCH_NONE;
- post_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
+ post_attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_DECAP;
handle->ns_type = post_act->ns_type;
/* Splits were handled before post action */
@@ -112,36 +139,29 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
if (err)
goto err_xarray;
- /* Post action rule matches on fte_id and executes original rule's
- * tc rule action
- */
- mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG,
- handle->id, MLX5_POST_ACTION_MASK);
-
- handle->rule = mlx5_tc_rule_insert(post_act->priv, spec, post_attr);
- if (IS_ERR(handle->rule)) {
- err = PTR_ERR(handle->rule);
- netdev_warn(post_act->priv->netdev, "Failed to add post action rule");
- goto err_rule;
- }
handle->attr = post_attr;
- kvfree(spec);
return handle;
-err_rule:
- xa_erase(&post_act->ids, handle->id);
err_xarray:
kfree(post_attr);
- kvfree(spec);
kfree(handle);
return ERR_PTR(err);
}
void
+mlx5e_tc_post_act_unoffload(struct mlx5e_post_act *post_act,
+ struct mlx5e_post_act_handle *handle)
+{
+ mlx5e_tc_rule_unoffload(post_act->priv, handle->rule, handle->attr);
+ handle->rule = NULL;
+}
+
+void
mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle)
{
- mlx5_tc_rule_delete(post_act->priv, handle->rule, handle->attr);
+ if (!IS_ERR_OR_NULL(handle->rule))
+ mlx5e_tc_post_act_unoffload(post_act, handle);
xa_erase(&post_act->ids, handle->id);
kfree(handle->attr);
kfree(handle);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
index b530ec1981a5..f476774c0b75 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
@@ -24,6 +24,14 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
void
mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle);
+int
+mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
+ struct mlx5e_post_act_handle *handle);
+
+void
+mlx5e_tc_post_act_unoffload(struct mlx5e_post_act *post_act,
+ struct mlx5e_post_act_handle *handle);
+
struct mlx5_flow_table *
mlx5e_tc_post_act_get_ft(struct mlx5e_post_act *post_act);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
index ff4b4f8a5a9d..fd4504518578 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
@@ -5,6 +5,7 @@
#include <net/psample.h>
#include "en/mapping.h"
#include "en/tc/post_act.h"
+#include "en/tc/act/sample.h"
#include "en/mod_hdr.h"
#include "sample.h"
#include "eswitch.h"
@@ -46,14 +47,12 @@ struct mlx5e_sample_flow {
struct mlx5_flow_handle *pre_rule;
struct mlx5_flow_attr *post_attr;
struct mlx5_flow_handle *post_rule;
- struct mlx5e_post_act_handle *post_act_handle;
};
struct mlx5e_sample_restore {
struct hlist_node hlist;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_flow_handle *rule;
- struct mlx5e_post_act_handle *post_act_handle;
u32 obj_id;
int count;
};
@@ -231,69 +230,46 @@ sampler_put(struct mlx5e_tc_psample *tc_psample, struct mlx5e_sampler *sampler)
*/
static struct mlx5_modify_hdr *
sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id,
- struct mlx5e_post_act_handle *handle)
+ struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
- struct mlx5e_tc_mod_hdr_acts mod_acts = {};
struct mlx5_modify_hdr *modify_hdr;
int err;
- err = mlx5e_tc_match_to_reg_set(mdev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
+ err = mlx5e_tc_match_to_reg_set(mdev, mod_acts, MLX5_FLOW_NAMESPACE_FDB,
CHAIN_TO_REG, obj_id);
if (err)
goto err_set_regc0;
- if (handle) {
- err = mlx5e_tc_post_act_set_handle(mdev, handle, &mod_acts);
- if (err)
- goto err_post_act;
- }
-
modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
- mod_acts.num_actions,
- mod_acts.actions);
+ mod_acts->num_actions,
+ mod_acts->actions);
if (IS_ERR(modify_hdr)) {
err = PTR_ERR(modify_hdr);
goto err_modify_hdr;
}
- mlx5e_mod_hdr_dealloc(&mod_acts);
+ mlx5e_mod_hdr_dealloc(mod_acts);
return modify_hdr;
err_modify_hdr:
-err_post_act:
- mlx5e_mod_hdr_dealloc(&mod_acts);
+ mlx5e_mod_hdr_dealloc(mod_acts);
err_set_regc0:
return ERR_PTR(err);
}
-static u32
-restore_hash(u32 obj_id, struct mlx5e_post_act_handle *post_act_handle)
-{
- return jhash_2words(obj_id, hash32_ptr(post_act_handle), 0);
-}
-
-static bool
-restore_equal(struct mlx5e_sample_restore *restore, u32 obj_id,
- struct mlx5e_post_act_handle *post_act_handle)
-{
- return restore->obj_id == obj_id && restore->post_act_handle == post_act_handle;
-}
-
static struct mlx5e_sample_restore *
sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id,
- struct mlx5e_post_act_handle *post_act_handle)
+ struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
struct mlx5_eswitch *esw = tc_psample->esw;
struct mlx5_core_dev *mdev = esw->dev;
struct mlx5e_sample_restore *restore;
struct mlx5_modify_hdr *modify_hdr;
- u32 hash_key;
int err;
mutex_lock(&tc_psample->restore_lock);
- hash_key = restore_hash(obj_id, post_act_handle);
- hash_for_each_possible(tc_psample->restore_hashtbl, restore, hlist, hash_key)
- if (restore_equal(restore, obj_id, post_act_handle))
+ hash_for_each_possible(tc_psample->restore_hashtbl, restore, hlist, obj_id)
+ if (restore->obj_id == obj_id)
goto add_ref;
restore = kzalloc(sizeof(*restore), GFP_KERNEL);
@@ -302,9 +278,8 @@ sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id,
goto err_alloc;
}
restore->obj_id = obj_id;
- restore->post_act_handle = post_act_handle;
- modify_hdr = sample_modify_hdr_get(mdev, obj_id, post_act_handle);
+ modify_hdr = sample_modify_hdr_get(mdev, obj_id, mod_acts);
if (IS_ERR(modify_hdr)) {
err = PTR_ERR(modify_hdr);
goto err_modify_hdr;
@@ -317,7 +292,7 @@ sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id,
goto err_restore;
}
- hash_add(tc_psample->restore_hashtbl, &restore->hlist, hash_key);
+ hash_add(tc_psample->restore_hashtbl, &restore->hlist, obj_id);
add_ref:
restore->count++;
mutex_unlock(&tc_psample->restore_lock);
@@ -403,7 +378,7 @@ add_post_rule(struct mlx5_eswitch *esw, struct mlx5e_sample_flow *sample_flow,
post_attr->chain = 0;
post_attr->prio = 0;
post_attr->ft = default_tbl;
- post_attr->flags = MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
+ post_attr->flags = MLX5_ATTR_FLAG_NO_IN_PORT;
/* When offloading sample and encap action, if there is no valid
* neigh data struct, a slow path rule is offloaded first. Source
@@ -492,16 +467,16 @@ del_post_rule(struct mlx5_eswitch *esw, struct mlx5e_sample_flow *sample_flow,
struct mlx5_flow_handle *
mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *attr,
- u32 tunnel_id)
+ struct mlx5_flow_attr *attr)
{
- struct mlx5e_post_act_handle *post_act_handle = NULL;
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_esw_flow_attr *pre_esw_attr;
struct mlx5_mapped_obj restore_obj = {};
+ struct mlx5e_tc_mod_hdr_acts *mod_acts;
struct mlx5e_sample_flow *sample_flow;
struct mlx5e_sample_attr *sample_attr;
struct mlx5_flow_attr *pre_attr;
+ u32 tunnel_id = attr->tunnel_id;
struct mlx5_eswitch *esw;
u32 default_tbl_id;
u32 obj_id;
@@ -513,7 +488,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
sample_flow = kzalloc(sizeof(*sample_flow), GFP_KERNEL);
if (!sample_flow)
return ERR_PTR(-ENOMEM);
- sample_attr = attr->sample_attr;
+ sample_attr = &attr->sample_attr;
sample_attr->sample_flow = sample_flow;
/* For NICs with reg_c_preserve support or decap action, use
@@ -522,18 +497,11 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
* original flow table.
*/
esw = tc_psample->esw;
- if (MLX5_CAP_GEN(esw->dev, reg_c_preserve) ||
- attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ if (mlx5e_tc_act_sample_is_multi_table(esw->dev, attr)) {
struct mlx5_flow_table *ft;
ft = mlx5e_tc_post_act_get_ft(tc_psample->post_act);
default_tbl_id = ft->id;
- post_act_handle = mlx5e_tc_post_act_add(tc_psample->post_act, attr);
- if (IS_ERR(post_act_handle)) {
- err = PTR_ERR(post_act_handle);
- goto err_post_act;
- }
- sample_flow->post_act_handle = post_act_handle;
} else {
err = add_post_rule(esw, sample_flow, spec, attr, &default_tbl_id);
if (err)
@@ -546,6 +514,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
err = PTR_ERR(sample_flow->sampler);
goto err_sampler;
}
+ sample_attr->sampler_id = sample_flow->sampler->sampler_id;
/* Create an id mapping reg_c0 value to sample object. */
restore_obj.type = MLX5_MAPPED_OBJ_SAMPLE;
@@ -559,7 +528,8 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
sample_attr->restore_obj_id = obj_id;
/* Create sample restore context. */
- sample_flow->restore = sample_restore_get(tc_psample, obj_id, post_act_handle);
+ mod_acts = &attr->parse_attr->mod_hdr_acts;
+ sample_flow->restore = sample_restore_get(tc_psample, obj_id, mod_acts);
if (IS_ERR(sample_flow->restore)) {
err = PTR_ERR(sample_flow->restore);
goto err_sample_restore;
@@ -580,13 +550,13 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
if (tunnel_id)
pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
pre_attr->modify_hdr = sample_flow->restore->modify_hdr;
- pre_attr->flags = MLX5_ESW_ATTR_FLAG_SAMPLE;
+ pre_attr->flags = MLX5_ATTR_FLAG_SAMPLE;
pre_attr->inner_match_level = attr->inner_match_level;
pre_attr->outer_match_level = attr->outer_match_level;
pre_attr->chain = attr->chain;
pre_attr->prio = attr->prio;
- pre_attr->sample_attr = attr->sample_attr;
- sample_attr->sampler_id = sample_flow->sampler->sampler_id;
+ pre_attr->ft = attr->ft;
+ pre_attr->sample_attr = *sample_attr;
pre_esw_attr = pre_attr->esw_attr;
pre_esw_attr->in_mdev = esw_attr->in_mdev;
pre_esw_attr->in_rep = esw_attr->in_rep;
@@ -611,9 +581,6 @@ err_sampler:
if (sample_flow->post_rule)
del_post_rule(esw, sample_flow, attr);
err_post_rule:
- if (post_act_handle)
- mlx5e_tc_post_act_del(tc_psample->post_act, post_act_handle);
-err_post_act:
kfree(sample_flow);
return ERR_PTR(err);
}
@@ -633,15 +600,13 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
* will hit fw syndromes.
*/
esw = tc_psample->esw;
- sample_flow = attr->sample_attr->sample_flow;
+ sample_flow = attr->sample_attr.sample_flow;
mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, sample_flow->pre_attr);
sample_restore_put(tc_psample, sample_flow->restore);
- mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr->restore_obj_id);
+ mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr.restore_obj_id);
sampler_put(tc_psample, sample_flow->sampler);
- if (sample_flow->post_act_handle)
- mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle);
- else
+ if (sample_flow->post_rule)
del_post_rule(esw, sample_flow, attr);
kfree(sample_flow->pre_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
index 9ef8a49d7801..a569367eae4d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
@@ -26,8 +26,7 @@ void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj
struct mlx5_flow_handle *
mlx5e_tc_sample_offload(struct mlx5e_tc_psample *sample_priv,
struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *attr,
- u32 tunnel_id);
+ struct mlx5_flow_attr *attr);
void
mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *sample_priv,
@@ -45,8 +44,7 @@ mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample);
static inline struct mlx5_flow_handle *
mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *attr,
- u32 tunnel_id)
+ struct mlx5_flow_attr *attr)
{ return ERR_PTR(-EOPNOTSUPP); }
static inline void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 4a0d38d219ed..e49f51124c74 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -18,15 +18,16 @@
#include "lib/fs_chains.h"
#include "en/tc_ct.h"
+#include "en/tc/ct_fs.h"
+#include "en/tc_priv.h"
#include "en/mod_hdr.h"
#include "en/mapping.h"
#include "en/tc/post_act.h"
#include "en.h"
#include "en_tc.h"
#include "en_rep.h"
+#include "fs_core.h"
-#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen)
-#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
#define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1)
#define MLX5_CT_STATE_TRK_BIT BIT(2)
#define MLX5_CT_STATE_NAT_BIT BIT(3)
@@ -62,19 +63,20 @@ struct mlx5_tc_ct_priv {
struct mapping_ctx *labels_mapping;
enum mlx5_flow_namespace_type ns_type;
struct mlx5_fs_chains *chains;
+ struct mlx5_ct_fs *fs;
+ struct mlx5_ct_fs_ops *fs_ops;
spinlock_t ht_lock; /* protects ft entries */
};
struct mlx5_ct_flow {
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_flow_handle *pre_ct_rule;
- struct mlx5e_post_act_handle *post_act_handle;
struct mlx5_ct_ft *ft;
u32 chain_mapping;
};
struct mlx5_ct_zone_rule {
- struct mlx5_flow_handle *rule;
+ struct mlx5_ct_fs_rule *rule;
struct mlx5e_mod_hdr_handle *mh;
struct mlx5_flow_attr *attr;
bool nat;
@@ -258,7 +260,8 @@ mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
return -EOPNOTSUPP;
}
} else {
- return -EOPNOTSUPP;
+ if (tuple->ip_proto != IPPROTO_GRE)
+ return -EOPNOTSUPP;
}
return 0;
@@ -505,7 +508,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
- mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
+ ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
kfree(attr);
@@ -807,16 +810,20 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
attr->dest_chain = 0;
attr->dest_ft = mlx5e_tc_post_act_get_ft(ct_priv->post_act);
attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
- attr->outer_match_level = MLX5_MATCH_L4;
+ if (entry->tuple.ip_proto == IPPROTO_TCP ||
+ entry->tuple.ip_proto == IPPROTO_UDP)
+ attr->outer_match_level = MLX5_MATCH_L4;
+ else
+ attr->outer_match_level = MLX5_MATCH_L3;
attr->counter = entry->counter->counter;
- attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
+ attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB)
attr->esw_attr->in_mdev = priv->mdev;
mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
- zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
+ zone_rule->rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule);
if (IS_ERR(zone_rule->rule)) {
err = PTR_ERR(zone_rule->rule);
ct_dbg("Failed to add ct entry rule, nat: %d", nat);
@@ -1154,7 +1161,6 @@ mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
}
rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
- mlx5_tc_ct_entry_remove_from_tuples(entry);
spin_unlock_bh(&ct_priv->ht_lock);
mlx5_tc_ct_entry_put(entry);
@@ -1224,16 +1230,20 @@ mlx5_tc_ct_skb_to_tuple(struct sk_buff *skb, struct mlx5_ct_tuple *tuple,
struct flow_keys flow_keys;
skb_reset_network_header(skb);
- skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
+ skb_flow_dissect_flow_keys(skb, &flow_keys, FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
tuple->zone = zone;
if (flow_keys.basic.ip_proto != IPPROTO_TCP &&
- flow_keys.basic.ip_proto != IPPROTO_UDP)
+ flow_keys.basic.ip_proto != IPPROTO_UDP &&
+ flow_keys.basic.ip_proto != IPPROTO_GRE)
return false;
- tuple->port.src = flow_keys.ports.src;
- tuple->port.dst = flow_keys.ports.dst;
+ if (flow_keys.basic.ip_proto == IPPROTO_TCP ||
+ flow_keys.basic.ip_proto == IPPROTO_UDP) {
+ tuple->port.src = flow_keys.ports.src;
+ tuple->port.dst = flow_keys.ports.dst;
+ }
tuple->n_proto = flow_keys.basic.n_proto;
tuple->ip_proto = flow_keys.basic.ip_proto;
@@ -1756,7 +1766,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
/* We translate the tc filter with CT action to the following HW model:
*
* +---------------------+
- * + ft prio (tc chain) +
+ * + ft prio (tc chain) +
* + original match +
* +---------------------+
* | set chain miss mapping
@@ -1766,7 +1776,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* v
* +---------------------+
* + pre_ct/pre_ct_nat + if matches +-------------------------+
- * + zone+nat match +---------------->+ post_act (see below) +
+ * + zone+nat match +---------------->+ post_act (see below) +
* +---------------------+ set zone +-------------------------+
* | set zone
* v
@@ -1781,21 +1791,19 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* | do nat (if needed)
* v
* +--------------+
- * + post_act + original filter actions
+ * + post_act + original filter actions
* + fte_id match +------------------------>
* +--------------+
*/
static struct mlx5_flow_handle *
__mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *orig_spec,
struct mlx5_flow_attr *attr)
{
bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
- struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
+ struct mlx5e_tc_mod_hdr_acts *pre_mod_acts;
u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
- struct mlx5e_post_act_handle *handle;
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_modify_hdr *mod_hdr;
struct mlx5_ct_flow *ct_flow;
@@ -1818,14 +1826,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
ct_flow->ft = ft;
- handle = mlx5e_tc_post_act_add(ct_priv->post_act, attr);
- if (IS_ERR(handle)) {
- err = PTR_ERR(handle);
- ct_dbg("Failed to allocate post action handle");
- goto err_post_act_handle;
- }
- ct_flow->post_act_handle = handle;
-
/* Base flow attributes of both rules on original rule attribute */
ct_flow->pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
if (!ct_flow->pre_ct_attr) {
@@ -1835,6 +1835,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
pre_ct_attr = ct_flow->pre_ct_attr;
memcpy(pre_ct_attr, attr, attr_sz);
+ pre_mod_acts = &pre_ct_attr->parse_attr->mod_hdr_acts;
/* Modify the original rule's action to fwd and modify, leave decap */
pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP;
@@ -1853,30 +1854,22 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
ct_flow->chain_mapping = chain_mapping;
- err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, ct_priv->ns_type,
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type,
CHAIN_TO_REG, chain_mapping);
if (err) {
ct_dbg("Failed to set chain register mapping");
goto err_mapping;
}
- err = mlx5e_tc_post_act_set_handle(priv->mdev, handle, &pre_mod_acts);
- if (err) {
- ct_dbg("Failed to set post action handle");
- goto err_mapping;
- }
-
/* If original flow is decap, we do it before going into ct table
* so add a rewrite for the tunnel match_id.
*/
if ((pre_ct_attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
attr->chain == 0) {
- u32 tun_id = mlx5e_tc_get_flow_tun_id(flow);
-
- err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts,
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts,
ct_priv->ns_type,
TUNNEL_TO_REG,
- tun_id);
+ attr->tunnel_id);
if (err) {
ct_dbg("Failed to set tunnel register mapping");
goto err_mapping;
@@ -1884,8 +1877,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
- pre_mod_acts.num_actions,
- pre_mod_acts.actions);
+ pre_mod_acts->num_actions,
+ pre_mod_acts->actions);
if (IS_ERR(mod_hdr)) {
err = PTR_ERR(mod_hdr);
ct_dbg("Failed to create pre ct mod hdr");
@@ -1905,20 +1898,18 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
attr->ct_attr.ct_flow = ct_flow;
- mlx5e_mod_hdr_dealloc(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(pre_mod_acts);
return ct_flow->pre_ct_rule;
err_insert_orig:
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping:
- mlx5e_mod_hdr_dealloc(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(pre_mod_acts);
mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
err_get_chain:
kfree(ct_flow->pre_ct_attr);
err_alloc_pre:
- mlx5e_tc_post_act_del(ct_priv->post_act, handle);
-err_post_act_handle:
mlx5_tc_ct_del_ft_cb(ct_priv, ft);
err_ft:
kfree(ct_flow);
@@ -1926,87 +1917,19 @@ err_ft:
return ERR_PTR(err);
}
-static struct mlx5_flow_handle *
-__mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv,
- struct mlx5_flow_spec *orig_spec,
- struct mlx5_flow_attr *attr,
- struct mlx5e_tc_mod_hdr_acts *mod_acts)
-{
- struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
- u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
- struct mlx5_flow_attr *pre_ct_attr;
- struct mlx5_modify_hdr *mod_hdr;
- struct mlx5_flow_handle *rule;
- struct mlx5_ct_flow *ct_flow;
- int err;
-
- ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
- if (!ct_flow)
- return ERR_PTR(-ENOMEM);
-
- /* Base esw attributes on original rule attribute */
- pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
- if (!pre_ct_attr) {
- err = -ENOMEM;
- goto err_attr;
- }
-
- memcpy(pre_ct_attr, attr, attr_sz);
-
- mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
- mod_acts->num_actions,
- mod_acts->actions);
- if (IS_ERR(mod_hdr)) {
- err = PTR_ERR(mod_hdr);
- ct_dbg("Failed to add create ct clear mod hdr");
- goto err_mod_hdr;
- }
-
- pre_ct_attr->modify_hdr = mod_hdr;
-
- rule = mlx5_tc_rule_insert(priv, orig_spec, pre_ct_attr);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- ct_dbg("Failed to add ct clear rule");
- goto err_insert;
- }
-
- attr->ct_attr.ct_flow = ct_flow;
- ct_flow->pre_ct_attr = pre_ct_attr;
- ct_flow->pre_ct_rule = rule;
- return rule;
-
-err_insert:
- mlx5_modify_header_dealloc(priv->mdev, mod_hdr);
-err_mod_hdr:
- netdev_warn(priv->netdev,
- "Failed to offload ct clear flow, err %d\n", err);
- kfree(pre_ct_attr);
-err_attr:
- kfree(ct_flow);
-
- return ERR_PTR(err);
-}
-
struct mlx5_flow_handle *
mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
{
- bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
struct mlx5_flow_handle *rule;
if (!priv)
return ERR_PTR(-EOPNOTSUPP);
mutex_lock(&priv->control_lock);
-
- if (clear_action)
- rule = __mlx5_tc_ct_flow_offload_clear(priv, spec, attr, mod_hdr_acts);
- else
- rule = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr);
+ rule = __mlx5_tc_ct_flow_offload(priv, spec, attr);
mutex_unlock(&priv->control_lock);
return rule;
@@ -2014,21 +1937,17 @@ mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
static void
__mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
- struct mlx5e_tc_flow *flow,
- struct mlx5_ct_flow *ct_flow)
+ struct mlx5_ct_flow *ct_flow,
+ struct mlx5_flow_attr *attr)
{
struct mlx5_flow_attr *pre_ct_attr = ct_flow->pre_ct_attr;
struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
- mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule,
- pre_ct_attr);
+ mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule, pre_ct_attr);
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
- if (ct_flow->post_act_handle) {
- mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
- mlx5e_tc_post_act_del(ct_priv->post_act, ct_flow->post_act_handle);
- mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
- }
+ mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
+ mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
kfree(ct_flow->pre_ct_attr);
kfree(ct_flow);
@@ -2036,7 +1955,6 @@ __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
void
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr)
{
struct mlx5_ct_flow *ct_flow = attr->ct_attr.ct_flow;
@@ -2048,11 +1966,43 @@ mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
return;
mutex_lock(&priv->control_lock);
- __mlx5_tc_ct_delete_flow(priv, flow, ct_flow);
+ __mlx5_tc_ct_delete_flow(priv, ct_flow, attr);
mutex_unlock(&priv->control_lock);
}
static int
+mlx5_tc_ct_fs_init(struct mlx5_tc_ct_priv *ct_priv)
+{
+ struct mlx5_flow_table *post_ct = mlx5e_tc_post_act_get_ft(ct_priv->post_act);
+ struct mlx5_ct_fs_ops *fs_ops = mlx5_ct_fs_dmfs_ops_get();
+ int err;
+
+ if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
+ ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
+ ct_dbg("Using SMFS ct flow steering provider");
+ fs_ops = mlx5_ct_fs_smfs_ops_get();
+ }
+
+ ct_priv->fs = kzalloc(sizeof(*ct_priv->fs) + fs_ops->priv_size, GFP_KERNEL);
+ if (!ct_priv->fs)
+ return -ENOMEM;
+
+ ct_priv->fs->netdev = ct_priv->netdev;
+ ct_priv->fs->dev = ct_priv->dev;
+ ct_priv->fs_ops = fs_ops;
+
+ err = ct_priv->fs_ops->init(ct_priv->fs, ct_priv->ct, ct_priv->ct_nat, post_ct);
+ if (err)
+ goto err_init;
+
+ return 0;
+
+err_init:
+ kfree(ct_priv->fs);
+ return err;
+}
+
+static int
mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw,
const char **err_msg)
{
@@ -2190,8 +2140,14 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params))
goto err_ct_tuples_nat_ht;
+ err = mlx5_tc_ct_fs_init(ct_priv);
+ if (err)
+ goto err_init_fs;
+
return ct_priv;
+err_init_fs:
+ rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
err_ct_tuples_nat_ht:
rhashtable_destroy(&ct_priv->ct_tuples_ht);
err_ct_tuples_ht:
@@ -2222,6 +2178,9 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
chains = ct_priv->chains;
+ ct_priv->fs_ops->destroy(ct_priv->fs);
+ kfree(ct_priv->fs);
+
mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
mlx5_chains_destroy_global_table(chains, ct_priv->ct);
mapping_destroy(ct_priv->zone_mapping);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 99662af1e41a..36d3652bf829 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -86,6 +86,8 @@ struct mlx5_ct_attr {
#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
#define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset)
+#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen)
+#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
#if IS_ENABLED(CONFIG_MLX5_TC_CT)
@@ -116,13 +118,11 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_handle *
mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
void
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr);
bool
@@ -183,7 +183,6 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
static inline struct mlx5_flow_handle *
mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
@@ -193,7 +192,6 @@ mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
static inline void
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr)
{
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index 70b40ae384e4..3b74a6fd5c43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -38,9 +38,9 @@ struct mlx5e_tc_flow_parse_attr {
struct mlx5e_mpls_info mpls_info[MLX5_MAX_FLOW_FWD_VPORTS];
struct net_device *filter_dev;
struct mlx5_flow_spec spec;
+ struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
- struct ethhdr eth;
struct mlx5e_tc_act_parse_state parse_state;
};
@@ -108,10 +108,20 @@ struct mlx5e_tc_flow {
struct rcu_head rcu_head;
struct completion init_done;
struct completion del_hw_done;
- int tunnel_id; /* the mapped tunnel id of this flow */
struct mlx5_flow_attr *attr;
+ struct list_head attrs;
};
+struct mlx5_flow_handle *
+mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
+
+void
+mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr);
+
u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer);
struct mlx5_flow_handle *
@@ -120,6 +130,12 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr);
+struct mlx5_flow_attr *
+mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow);
+
+void mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow);
+int mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow);
+
bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
@@ -174,6 +190,7 @@ struct mlx5_flow_handle *
mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec);
+
void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index d39d0dae22fc..5aff97914367 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -173,19 +173,29 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW))
continue;
- attr = flow->attr;
- esw_attr = attr->esw_attr;
- spec = &attr->parse_attr->spec;
+ spec = &flow->attr->parse_attr->spec;
+
+ attr = mlx5e_tc_get_encap_attr(flow);
+ esw_attr = attr->esw_attr;
esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat;
esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
/* Do not offload flows with unresolved neighbors */
if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
continue;
+
+ err = mlx5e_tc_offload_flow_post_acts(flow);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n",
+ err);
+ continue;
+ }
+
/* update from slow path rule to encap rule */
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr);
if (IS_ERR(rule)) {
+ mlx5e_tc_unoffload_flow_post_acts(flow);
err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
err);
@@ -214,12 +224,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
continue;
- attr = flow->attr;
- esw_attr = attr->esw_attr;
- spec = &attr->parse_attr->spec;
+ spec = &flow->attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
+
+ attr = mlx5e_tc_get_encap_attr(flow);
+ esw_attr = attr->esw_attr;
/* mark the flow's encap dest as non-valid */
esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
@@ -230,7 +241,8 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
continue;
}
- mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
+ mlx5e_tc_unoffload_flow_post_acts(flow);
flow->rule[0] = rule;
/* was unset when fast path rule removed */
flow_flag_set(flow, OFFLOADED);
@@ -488,12 +500,17 @@ static void mlx5e_detach_encap_route(struct mlx5e_priv *priv,
int out_index);
void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow, int out_index)
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ int out_index)
{
struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- if (flow->attr->esw_attr->dests[out_index].flags &
+ if (!mlx5e_is_eswitch_flow(flow))
+ return;
+
+ if (attr->esw_attr->dests[out_index].flags &
MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
mlx5e_detach_encap_route(priv, flow, out_index);
@@ -733,6 +750,7 @@ static unsigned int mlx5e_route_tbl_get_last_update(struct mlx5e_priv *priv)
static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
struct mlx5e_encap_entry *e,
bool new_encap_entry,
unsigned long tbl_time_before,
@@ -740,6 +758,7 @@ static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
struct net_device *mirred_dev,
int out_index,
struct netlink_ext_ack *extack,
@@ -748,7 +767,6 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5_flow_attr *attr = flow->attr;
const struct ip_tunnel_info *tun_info;
const struct mlx5e_mpls_info *mpls_info;
unsigned long tbl_time_before = 0;
@@ -837,8 +855,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
e->compl_result = 1;
attach_flow:
- err = mlx5e_attach_encap_route(priv, flow, e, entry_created, tbl_time_before,
- out_index);
+ err = mlx5e_attach_encap_route(priv, flow, attr, e, entry_created,
+ tbl_time_before, out_index);
if (err)
goto out_err;
@@ -888,20 +906,18 @@ int mlx5e_attach_decap(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5_pkt_reformat_params reformat_params;
- struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_decap_entry *d;
struct mlx5e_decap_key key;
uintptr_t hash_key;
int err = 0;
- parse_attr = flow->attr->parse_attr;
- if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
+ if (sizeof(attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
NL_SET_ERR_MSG_MOD(extack,
"encap header larger than max supported");
return -EOPNOTSUPP;
}
- key.key = parse_attr->eth;
+ key.key = attr->eth;
hash_key = hash_decap_info(&key);
mutex_lock(&esw->offloads.decap_tbl_lock);
d = mlx5e_decap_get(priv, &key, hash_key);
@@ -931,8 +947,8 @@ int mlx5e_attach_decap(struct mlx5e_priv *priv,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
- reformat_params.size = sizeof(parse_attr->eth);
- reformat_params.data = &parse_attr->eth;
+ reformat_params.size = sizeof(attr->eth);
+ reformat_params.data = &attr->eth;
d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
&reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
@@ -1201,6 +1217,7 @@ out:
static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
struct mlx5e_encap_entry *e,
bool new_encap_entry,
unsigned long tbl_time_before,
@@ -1209,7 +1226,6 @@ static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
unsigned long tbl_time_after = tbl_time_before;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5_flow_attr *attr = flow->attr;
const struct ip_tunnel_info *tun_info;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5e_route_entry *r;
@@ -1360,17 +1376,19 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
list_for_each_entry(flow, encap_flows, tmp_list) {
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
struct mlx5_flow_spec *spec;
if (flow_flag_test(flow, FAILED))
continue;
+ spec = &flow->attr->parse_attr->spec;
+
+ attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr;
parse_attr = attr->parse_attr;
- spec = &parse_attr->spec;
err = mlx5e_update_vf_tunnel(esw, esw_attr, &parse_attr->mod_hdr_acts,
e->out_dev, e->route_dev_ifindex,
@@ -1380,7 +1398,7 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
continue;
}
- err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow);
+ err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr);
if (err) {
mlx5_core_warn(priv->mdev, "Failed to update flow mod_hdr err=%d",
err);
@@ -1392,9 +1410,18 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
goto offload_to_slow_path;
+
+ err = mlx5e_tc_offload_flow_post_acts(flow);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n",
+ err);
+ goto offload_to_slow_path;
+ }
+
/* update from slow path rule to encap rule */
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr);
if (IS_ERR(rule)) {
+ mlx5e_tc_unoffload_flow_post_acts(flow);
err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
index 3391504d9a08..d542b8476491 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
@@ -7,15 +7,19 @@
#include "tc_priv.h"
void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow, int out_index);
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ int out_index);
int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
struct net_device *mirred_dev,
int out_index,
struct netlink_ext_ack *extack,
struct net_device **encap_dev,
bool *encap_valid);
+
int mlx5e_attach_decap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
index da169b816665..d4239e3b3c88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
@@ -88,9 +88,6 @@ void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder,
(MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, pkt_merge_param->timeout);
break;
- case MLX5E_PACKET_MERGE_SHAMPO:
- MLX5_SET(tirc, tirc, packet_merge_mask, MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO);
- break;
default:
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index b789af07829c..c208ea307bff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -9,19 +9,6 @@
#define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
-/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
- * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
- * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
- * full-session WQE be cache-aligned.
- */
-#if L1_CACHE_BYTES < 128
-#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
-#else
-#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
-#endif
-
-#define MLX5E_TX_MPW_MAX_NUM_DS (MLX5E_TX_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
-
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
@@ -57,10 +44,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
/* RX */
-void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
-void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info,
- bool recycle);
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page);
+void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle);
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
@@ -68,8 +53,6 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
/* TX */
-u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
@@ -308,9 +291,9 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more);
void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
-static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
+static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
{
- return session->ds_count == MLX5E_TX_MPW_MAX_NUM_DS;
+ return session->ds_count == max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
}
static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
@@ -431,10 +414,10 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
}
}
-static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
-{
- BUILD_BUG_ON(PAGE_SIZE / MLX5_SEND_WQE_BB < MLX5_SEND_WQE_MAX_WQEBBS);
+#define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1)
+static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
+{
/* A WQE must not cross the page boundary, hence two conditions:
* 1. Its size must not exceed the page size.
* 2. If the WQE size is X, and the space remaining in a page is less
@@ -443,18 +426,28 @@ static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
* stop room of X-1 + X.
* WQE size is also limited by the hardware limit.
*/
+ WARN_ONCE(wqe_size > mlx5e_get_max_sq_wqebbs(mdev),
+ "wqe_size %u is greater than max SQ WQEBBs %u",
+ wqe_size, mlx5e_get_max_sq_wqebbs(mdev));
- if (__builtin_constant_p(wqe_size))
- BUILD_BUG_ON(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
- else
- WARN_ON_ONCE(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
- return wqe_size * 2 - 1;
+ return MLX5E_STOP_ROOM(wqe_size);
+}
+
+static inline u16 mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev *mdev)
+{
+ return MLX5E_STOP_ROOM(mlx5e_get_max_sq_wqebbs(mdev));
}
static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
{
- u16 room = sq->reserved_room + mlx5e_stop_room_for_wqe(wqe_size);
+ u16 room = sq->reserved_room;
+
+ WARN_ONCE(wqe_size > sq->max_sq_wqebbs,
+ "wqe_size %u is greater than max SQ WQEBBs %u",
+ wqe_size, sq->max_sq_wqebbs);
+
+ room += MLX5E_STOP_ROOM(wqe_size);
return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 56e10c84a706..8f321a6c0809 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -57,12 +57,14 @@ int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
static inline bool
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
- struct mlx5e_dma_info *di, struct xdp_buff *xdp)
+ struct page *page, struct xdp_buff *xdp)
{
+ struct skb_shared_info *sinfo = NULL;
struct mlx5e_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
struct xdp_frame *xdpf;
dma_addr_t dma_addr;
+ int i;
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
@@ -96,46 +98,77 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdptxd.dma_addr = dma_addr;
xdpi.frame.xdpf = xdpf;
xdpi.frame.dma_addr = dma_addr;
- } else {
- /* Driver assumes that xdp_convert_buff_to_frame returns
- * an xdp_frame that points to the same memory region as
- * the original xdp_buff. It allows to map the memory only
- * once and to use the DMA_BIDIRECTIONAL mode.
- */
- xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
+ if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
+ mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0)))
+ return false;
+
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+ return true;
+ }
+
+ /* Driver assumes that xdp_convert_buff_to_frame returns an xdp_frame
+ * that points to the same memory region as the original xdp_buff. It
+ * allows to map the memory only once and to use the DMA_BIDIRECTIONAL
+ * mode.
+ */
+
+ xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
+ xdpi.page.rq = rq;
+
+ dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
+ dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE);
+
+ if (unlikely(xdp_frame_has_frags(xdpf))) {
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
- dma_addr = di->addr + (xdpf->data - (void *)xdpf);
- dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len,
- DMA_TO_DEVICE);
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+ dma_addr_t addr;
+ u32 len;
- xdptxd.dma_addr = dma_addr;
- xdpi.page.rq = rq;
- xdpi.page.di = *di;
+ addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
+ skb_frag_off(frag);
+ len = skb_frag_size(frag);
+ dma_sync_single_for_device(sq->pdev, addr, len,
+ DMA_TO_DEVICE);
+ }
}
- return INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0);
+ xdptxd.dma_addr = dma_addr;
+
+ if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
+ mlx5e_xmit_xdp_frame, sq, &xdptxd, sinfo, 0)))
+ return false;
+
+ xdpi.page.page = page;
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+
+ if (unlikely(xdp_frame_has_frags(xdpf))) {
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ xdpi.page.page = skb_frag_page(frag);
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+ }
+ }
+
+ return true;
}
/* returns true if packet was consumed by xdp */
-bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
- u32 *len, struct xdp_buff *xdp)
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
+ struct bpf_prog *prog, struct xdp_buff *xdp)
{
- struct bpf_prog *prog = rcu_dereference(rq->xdp_prog);
u32 act;
int err;
- if (!prog)
- return false;
-
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
- *len = xdp->data_end - xdp->data;
return false;
case XDP_TX:
- if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, xdp)))
+ if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, page, xdp)))
goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
return true;
@@ -147,7 +180,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
__set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL)
- mlx5e_page_dma_unmap(rq, di);
+ mlx5e_page_dma_unmap(rq, page);
rq->stats->xdp_redirect++;
return true;
default:
@@ -199,7 +232,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
struct mlx5e_tx_wqe *wqe;
u16 pi;
- pi = mlx5e_xdpsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
+ pi = mlx5e_xdpsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
net_prefetchw(wqe->data);
@@ -245,10 +278,8 @@ enum {
INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
{
if (unlikely(!sq->mpwqe.wqe)) {
- const u16 stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
-
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
- stop_room))) {
+ sq->stop_room))) {
/* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq);
sq->stats->full++;
@@ -262,12 +293,26 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq
}
INDIRECT_CALLABLE_SCOPE bool
+mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
+ struct skb_shared_info *sinfo, int check_result);
+
+INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- struct mlx5e_xdp_info *xdpi, int check_result)
+ struct skb_shared_info *sinfo, int check_result)
{
struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
+ if (unlikely(sinfo)) {
+ /* MPWQE is enabled, but a multi-buffer packet is queued for
+ * transmission. MPWQE can't send fragmented packets, so close
+ * the current session and fall back to a regular WQE.
+ */
+ if (unlikely(sq->mpwqe.wqe))
+ mlx5e_xdp_mpwqe_complete(sq);
+ return mlx5e_xmit_xdp_frame(sq, xdptxd, sinfo, 0);
+ }
+
if (unlikely(xdptxd->len > sq->hw_mtu)) {
stats->err++;
return false;
@@ -288,17 +333,16 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
- if (unlikely(mlx5e_xdp_mpqwe_is_full(session)))
+ if (unlikely(mlx5e_xdp_mpqwe_is_full(session, sq->max_sq_mpw_wqebbs)))
mlx5e_xdp_mpwqe_complete(sq);
- mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
stats->xmit++;
return true;
}
-INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
+static int mlx5e_xmit_xdp_frame_check_stop_room(struct mlx5e_xdpsq *sq, int stop_room)
{
- if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
+ if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, stop_room))) {
/* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq);
sq->stats->full++;
@@ -308,45 +352,76 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
return MLX5E_XDP_CHECK_OK;
}
+INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
+{
+ return mlx5e_xmit_xdp_frame_check_stop_room(sq, 1);
+}
+
INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- struct mlx5e_xdp_info *xdpi, int check_result)
+ struct skb_shared_info *sinfo, int check_result)
{
struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
-
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
- struct mlx5_wqe_data_seg *dseg = wqe->data;
+ struct mlx5_wqe_ctrl_seg *cseg;
+ struct mlx5_wqe_data_seg *dseg;
+ struct mlx5_wqe_eth_seg *eseg;
+ struct mlx5e_tx_wqe *wqe;
dma_addr_t dma_addr = xdptxd->dma_addr;
u32 dma_len = xdptxd->len;
+ u16 ds_cnt, inline_hdr_sz;
+ u8 num_wqebbs = 1;
+ int num_frags = 0;
+ u16 pi;
struct mlx5e_xdpsq_stats *stats = sq->stats;
- net_prefetchw(wqe);
-
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
stats->err++;
return false;
}
- if (!check_result)
- check_result = mlx5e_xmit_xdp_frame_check(sq);
+ ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
+ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE)
+ ds_cnt++;
+
+ /* check_result must be 0 if sinfo is passed. */
+ if (!check_result) {
+ int stop_room = 1;
+
+ if (unlikely(sinfo)) {
+ ds_cnt += sinfo->nr_frags;
+ num_frags = sinfo->nr_frags;
+ num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
+ /* Assuming MLX5_CAP_GEN(mdev, max_wqe_sz_sq) is big
+ * enough to hold all fragments.
+ */
+ stop_room = MLX5E_STOP_ROOM(num_wqebbs);
+ }
+
+ check_result = mlx5e_xmit_xdp_frame_check_stop_room(sq, stop_room);
+ }
if (unlikely(check_result < 0))
return false;
- cseg->fm_ce_se = 0;
+ pi = mlx5e_xdpsq_get_next_pi(sq, num_wqebbs);
+ wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ net_prefetchw(wqe);
+
+ cseg = &wqe->ctrl;
+ eseg = &wqe->eth;
+ dseg = wqe->data;
+
+ inline_hdr_sz = 0;
/* copy the inline part if required */
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start));
- eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start),
MLX5E_XDP_MIN_INLINE - sizeof(eseg->inline_hdr.start));
dma_len -= MLX5E_XDP_MIN_INLINE;
dma_addr += MLX5E_XDP_MIN_INLINE;
+ inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
dseg++;
}
@@ -356,11 +431,45 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- sq->pc++;
+ if (unlikely(test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state))) {
+ u8 num_pkts = 1 + num_frags;
+ int i;
+
+ memset(&cseg->trailer, 0, sizeof(cseg->trailer));
+ memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
+
+ eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+ dseg->lkey = sq->mkey_be;
+
+ for (i = 0; i < num_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+ dma_addr_t addr;
+
+ addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
+ skb_frag_off(frag);
+
+ dseg++;
+ dseg->addr = cpu_to_be64(addr);
+ dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
+ dseg->lkey = sq->mkey_be;
+ }
+
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+
+ sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
+ .num_wqebbs = num_wqebbs,
+ .num_pkts = num_pkts,
+ };
+
+ sq->pc += num_wqebbs;
+ } else {
+ cseg->fm_ce_se = 0;
+
+ sq->pc++;
+ }
sq->doorbell_cseg = cseg;
- mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
stats->xmit++;
return true;
}
@@ -386,7 +495,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
break;
case MLX5E_XDP_XMIT_MODE_PAGE:
/* XDP_TX from the regular RQ */
- mlx5e_page_release_dynamic(xdpi.page.rq, &xdpi.page.di, recycle);
+ mlx5e_page_release_dynamic(xdpi.page.rq, xdpi.page.page, recycle);
break;
case MLX5E_XDP_XMIT_MODE_XSK:
/* AF_XDP send */
@@ -539,12 +648,13 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdpi.frame.dma_addr = xdptxd.dma_addr;
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0);
+ mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0);
if (unlikely(!ret)) {
dma_unmap_single(sq->pdev, xdptxd.dma_addr,
xdptxd.len, DMA_TO_DEVICE);
break;
}
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
nxmit++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 8d991c3b7a50..287e17911251 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -38,7 +38,6 @@
#include "en/txrx.h"
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-#define MLX5E_XDP_TX_DS_COUNT (MLX5E_TX_WQE_EMPTY_DS_COUNT + 1 /* SG DS */)
#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT 16
#define MLX5E_XDP_INLINE_WQE_SZ_THRSD \
@@ -47,8 +46,8 @@
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
-bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
- u32 *len, struct xdp_buff *xdp);
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
+ struct bpf_prog *prog, struct xdp_buff *xdp);
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
@@ -59,11 +58,11 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
- struct mlx5e_xdp_info *xdpi,
+ struct skb_shared_info *sinfo,
int check_result));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
- struct mlx5e_xdp_info *xdpi,
+ struct skb_shared_info *sinfo,
int check_result));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
@@ -123,12 +122,13 @@ static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
return cur;
}
-static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_tx_mpwqe *session)
+static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
{
if (session->inline_on)
return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
- MLX5E_TX_MPW_MAX_NUM_DS;
- return mlx5e_tx_mpwqe_is_full(session);
+ max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
+
+ return mlx5e_tx_mpwqe_is_full(session, max_sq_mpw_wqebbs);
}
struct mlx5e_xdp_wqe_info {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 8e7b877d8a12..021da085e603 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -4,6 +4,7 @@
#include "rx.h"
#include "en/xdp.h"
#include <net/xdp_sock_drv.h>
+#include <linux/filter.h>
/* RX data path */
@@ -30,7 +31,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
u32 page_idx)
{
struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk;
- u32 cqe_bcnt32 = cqe_bcnt;
+ struct bpf_prog *prog;
/* Check packet size. Note LRO doesn't use linear SKB */
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
@@ -45,7 +46,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(head_offset);
- xdp->data_end = xdp->data + cqe_bcnt32;
+ xdp->data_end = xdp->data + cqe_bcnt;
xdp_set_data_meta_invalid(xdp);
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
net_prefetch(xdp->data);
@@ -65,7 +66,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
* allocated first from the Reuse Ring, so it has enough space.
*/
- if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp))) {
+ prog = rcu_dereference(rq->xdp_prog);
+ if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp))) {
if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
@@ -74,7 +76,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
* frame. On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt32);
+ return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data);
}
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
@@ -83,6 +85,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
u32 cqe_bcnt)
{
struct xdp_buff *xdp = wi->di->xsk;
+ struct bpf_prog *prog;
/* wi->offset is not used in this function, because xdp->data and the
* DMA address point directly to the necessary place. Furthermore, the
@@ -101,12 +104,13 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
return NULL;
}
- if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp)))
+ prog = rcu_dereference(rq->xdp_prog);
+ if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp)))
return NULL; /* page/packet was consumed by XDP */
/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
* will be handled by mlx5e_put_rx_frag.
* On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt);
+ return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 25eac9e20342..3ad7f1301fa8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -43,7 +43,7 @@ static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev,
struct mlx5e_channel_param *cparam)
{
mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq);
- mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
+ mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq);
}
static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 8e96260fce1d..3ec0c17db010 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -103,12 +103,15 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len);
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, check_result);
+ mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL,
+ check_result);
if (unlikely(!ret)) {
if (sq->mpwqe.wqe)
mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xsk_tx_post_err(sq, &xdpi);
+ } else {
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
}
flush = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index d964665eaa63..62cde3e87c2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -139,15 +139,6 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
return true;
}
-static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *state)
-{
-#ifdef CONFIG_MLX5_EN_IPSEC
- return mlx5e_ipsec_is_tx_flow(&state->ipsec);
-#else
- return false;
-#endif
-}
-
static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
struct mlx5e_accel_tx_state *state)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 7cab08a2f715..299e3f0fcb5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -35,7 +35,6 @@
#include <crypto/aead.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
-#include <linux/module.h>
#include "en.h"
#include "en_accel/ipsec.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 9ad3459fb63a..aaf11c66bf4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -32,9 +32,9 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
- stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
- stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
- stop_room += num_dumps * mlx5e_stop_room_for_wqe(MLX5E_KTLS_DUMP_WQEBBS);
+ stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
+ stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
+ stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS);
return stop_room;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 7a700f913582..a05580cea481 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -386,5 +386,5 @@ u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
/* FPGA */
/* Resync SKB. */
- return mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ return mlx5e_stop_room_for_max_wqe(mdev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index a4c8d8d00d5a..d659fe07d464 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1142,7 +1142,7 @@ static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context)
err = mlx5_set_trust_state(priv->mdev, *trust_state);
if (err)
return err;
- priv->dcbx_dp.trust_state = *trust_state;
+ WRITE_ONCE(priv->dcbx_dp.trust_state, *trust_state);
return 0;
}
@@ -1187,16 +1187,18 @@ static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ u8 trust_state;
int err;
- priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
-
- if (!MLX5_DSCP_SUPPORTED(mdev))
+ if (!MLX5_DSCP_SUPPORTED(mdev)) {
+ WRITE_ONCE(priv->dcbx_dp.trust_state, MLX5_QPTS_TRUST_PCP);
return 0;
+ }
- err = mlx5_query_trust_state(priv->mdev, &priv->dcbx_dp.trust_state);
+ err = mlx5_query_trust_state(priv->mdev, &trust_state);
if (err)
return err;
+ WRITE_ONCE(priv->dcbx_dp.trust_state, trust_state);
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
priv->dcbx_dp.trust_state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index bf80fb612449..2f1dedc721d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -72,12 +72,13 @@
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
- bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
- MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
- MLX5_CAP_ETH(mdev, reg_umr_sq);
- u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq);
- bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap;
+ bool striding_rq_umr, inline_umr;
+ u16 max_wqe_sz_cap;
+ striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
+ MLX5_CAP_ETH(mdev, reg_umr_sq);
+ max_wqe_sz_cap = mlx5e_get_max_sq_wqebbs(mdev) * MLX5_SEND_WQE_BB;
+ inline_umr = max_wqe_sz_cap >= MLX5E_UMR_WQE_INLINE_SZ;
if (!striding_rq_umr)
return false;
if (!inline_umr) {
@@ -594,6 +595,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
rq->mpwqe.num_strides =
BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
+ rq->mpwqe.min_wqe_bulk = mlx5e_mpwqe_get_min_wqe_bulk(wq_sz);
rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
@@ -778,7 +780,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
* entered, and it's safe to call mlx5e_page_release_dynamic
* directly.
*/
- mlx5e_page_release_dynamic(rq, dma_info, false);
+ mlx5e_page_release_dynamic(rq, dma_info->page, false);
}
xdp_rxq_info_unreg(&rq->xdp_rxq);
@@ -1164,6 +1166,9 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
is_redirect ?
&c->priv->channel_stats[c->ix]->xdpsq :
&c->priv->channel_stats[c->ix]->rq_xdpsq;
+ sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
+ sq->stop_room = MLX5E_STOP_ROOM(sq->max_sq_wqebbs);
+ sq->max_sq_mpw_wqebbs = mlx5e_get_sw_max_sq_mpw_wqebbs(sq->max_sq_wqebbs);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1238,6 +1243,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->reserved_room = param->stop_room;
+ sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1313,7 +1319,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int err;
sq->pdev = c->pdev;
- sq->tstamp = c->tstamp;
sq->clock = &mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
@@ -1324,6 +1329,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
+ sq->max_sq_mpw_wqebbs = mlx5e_get_sw_max_sq_mpw_wqebbs(sq->max_sq_wqebbs);
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
@@ -1659,14 +1666,22 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+
+ /* Don't enable multi buffer on XDP_REDIRECT SQ, as it's not yet
+ * supported by upstream, and there is no defined trigger to allow
+ * transmitting redirected multi-buffer frames.
+ */
+ if (param->is_xdp_mb && !is_redirect)
+ set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
+
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_xdpsq;
mlx5e_set_xmit_fp(sq, param->is_mpw);
- if (!param->is_mpw) {
- unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
+ if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
+ unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
unsigned int inline_hdr_sz = 0;
int i;
@@ -2677,39 +2692,41 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
struct mlx5e_txqsq *sq = &c->sq[tc];
priv->txq2sq[sq->txq_ix] = sq;
- priv->channel_tc2realtxq[i][tc] = i + tc * ch;
}
}
if (!priv->channels.ptp)
- return;
+ goto out;
if (!test_bit(MLX5E_PTP_STATE_TX, priv->channels.ptp->state))
- return;
+ goto out;
for (tc = 0; tc < num_tc; tc++) {
struct mlx5e_ptp *c = priv->channels.ptp;
struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
priv->txq2sq[sq->txq_ix] = sq;
- priv->port_ptp_tc2realtxq[tc] = priv->num_tc_x_num_ch + tc;
}
-}
-static void mlx5e_update_num_tc_x_num_ch(struct mlx5e_priv *priv)
-{
- /* Sync with mlx5e_select_queue. */
- WRITE_ONCE(priv->num_tc_x_num_ch,
- mlx5e_get_dcb_num_tc(&priv->channels.params) * priv->channels.num);
+out:
+ /* Make the change to txq2sq visible before the queue is started.
+ * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
+ * which pairs with this barrier.
+ */
+ smp_wmb();
}
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{
- mlx5e_update_num_tc_x_num_ch(priv);
mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(&priv->channels);
mlx5e_qos_activate_queues(priv);
mlx5e_xdp_tx_enable(priv);
+
+ /* dev_watchdog() wants all TX queues to be started when the carrier is
+ * OK, including the ones in range real_num_tx_queues..num_tx_queues-1.
+ * Make it happy to avoid TX timeout false alarms.
+ */
netif_tx_start_all_queues(priv->netdev);
if (mlx5e_is_vport_rep(priv))
@@ -2729,11 +2746,13 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
if (mlx5e_is_vport_rep(priv))
mlx5e_remove_sqs_fwd_rules(priv);
- /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
- * polling for inactive tx queues.
+ /* The results of ndo_select_queue are unreliable, while netdev config
+ * is being changed (real_num_tx_queues, num_tc). Stop all queues to
+ * prevent ndo_start_xmit from being called, so that it can assume that
+ * the selected queue is always valid.
*/
- netif_tx_stop_all_queues(priv->netdev);
netif_tx_disable(priv->netdev);
+
mlx5e_xdp_tx_disable(priv);
mlx5e_deactivate_channels(&priv->channels);
}
@@ -2793,6 +2812,7 @@ static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
mlx5e_close_channels(&old_chs);
priv->profile->update_rx(priv);
+ mlx5e_selq_apply(&priv->selq);
out:
mlx5e_activate_priv_channels(priv);
@@ -2816,13 +2836,24 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
return mlx5e_switch_priv_params(priv, params, preactivate, context);
new_chs.params = *params;
+
+ mlx5e_selq_prepare(&priv->selq, &new_chs.params, !!priv->htb.maj_id);
+
err = mlx5e_open_channels(priv, &new_chs);
if (err)
- return err;
+ goto err_cancel_selq;
+
err = mlx5e_switch_priv_channels(priv, &new_chs, preactivate, context);
if (err)
- mlx5e_close_channels(&new_chs);
+ goto err_close;
+
+ return 0;
+
+err_close:
+ mlx5e_close_channels(&new_chs);
+err_cancel_selq:
+ mlx5e_selq_cancel(&priv->selq);
return err;
}
@@ -2862,6 +2893,8 @@ int mlx5e_open_locked(struct net_device *netdev)
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
+ mlx5e_selq_prepare(&priv->selq, &priv->channels.params, !!priv->htb.maj_id);
+
set_bit(MLX5E_STATE_OPENED, &priv->state);
err = mlx5e_open_channels(priv, &priv->channels);
@@ -2869,6 +2902,7 @@ int mlx5e_open_locked(struct net_device *netdev)
goto err_clear_state_opened_flag;
priv->profile->update_rx(priv);
+ mlx5e_selq_apply(&priv->selq);
mlx5e_activate_priv_channels(priv);
mlx5e_apply_traps(priv, true);
if (priv->profile->update_carrier)
@@ -2879,6 +2913,7 @@ int mlx5e_open_locked(struct net_device *netdev)
err_clear_state_opened_flag:
clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ mlx5e_selq_cancel(&priv->selq);
return err;
}
@@ -3616,8 +3651,7 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable)
goto out;
}
- err = mlx5e_safe_switch_params(priv, &new_params,
- mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
out:
mutex_unlock(&priv->state_lock);
return err;
@@ -3919,6 +3953,31 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
return true;
}
+static bool mlx5e_params_validate_xdp(struct net_device *netdev, struct mlx5e_params *params)
+{
+ bool is_linear;
+
+ /* No XSK params: AF_XDP can't be enabled yet at the point of setting
+ * the XDP program.
+ */
+ is_linear = mlx5e_rx_is_linear_skb(params, NULL);
+
+ if (!is_linear && params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
+ netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
+ params->sw_mtu,
+ mlx5e_xdp_max_mtu(params, NULL));
+ return false;
+ }
+ if (!is_linear && !params->xdp_prog->aux->xdp_has_frags) {
+ netdev_warn(netdev, "MTU(%d) > %d, too big for an XDP program not aware of multi buffer\n",
+ params->sw_mtu,
+ mlx5e_xdp_max_mtu(params, NULL));
+ return false;
+ }
+
+ return true;
+}
+
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
mlx5e_fp_preactivate preactivate)
{
@@ -3938,10 +3997,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (err)
goto out;
- if (params->xdp_prog &&
- !mlx5e_rx_is_linear_skb(&new_params, NULL)) {
- netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
- new_mtu, mlx5e_xdp_max_mtu(params, NULL));
+ if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, &new_params)) {
err = -EINVAL;
goto out;
}
@@ -4424,15 +4480,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
new_params = priv->channels.params;
new_params.xdp_prog = prog;
- /* No XSK params: AF_XDP can't be enabled yet at the point of setting
- * the XDP program.
- */
- if (!mlx5e_rx_is_linear_skb(&new_params, NULL)) {
- netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
- new_params.sw_mtu,
- mlx5e_xdp_max_mtu(&new_params, NULL));
+ if (!mlx5e_params_validate_xdp(netdev, &new_params))
return -EINVAL;
- }
return 0;
}
@@ -4637,11 +4686,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
priv->max_nch);
mlx5e_params_mqprio_reset(params);
- /* Set an initial non-zero value, so that mlx5e_select_queue won't
- * divide by zero if called before first activating channels.
- */
- priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
-
/* SQ */
params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
@@ -5194,7 +5238,8 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
struct net_device *netdev,
struct mlx5_core_dev *mdev)
{
- int nch, num_txqs, node, i;
+ int nch, num_txqs, node;
+ int err;
num_txqs = netdev->num_tx_queues;
nch = mlx5e_calc_max_nch(mdev, netdev, profile);
@@ -5211,6 +5256,11 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
return -ENOMEM;
mutex_init(&priv->state_lock);
+
+ err = mlx5e_selq_init(&priv->selq, &priv->state_lock);
+ if (err)
+ goto err_free_cpumask;
+
hash_init(priv->htb.qos_tc2node);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
@@ -5219,7 +5269,7 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
priv->wq = create_singlethread_workqueue("mlx5e");
if (!priv->wq)
- goto err_free_cpumask;
+ goto err_free_selq;
priv->txq2sq = kcalloc_node(num_txqs, sizeof(*priv->txq2sq), GFP_KERNEL, node);
if (!priv->txq2sq)
@@ -5229,36 +5279,21 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
if (!priv->tx_rates)
goto err_free_txq2sq;
- priv->channel_tc2realtxq =
- kcalloc_node(nch, sizeof(*priv->channel_tc2realtxq), GFP_KERNEL, node);
- if (!priv->channel_tc2realtxq)
- goto err_free_tx_rates;
-
- for (i = 0; i < nch; i++) {
- priv->channel_tc2realtxq[i] =
- kcalloc_node(profile->max_tc, sizeof(**priv->channel_tc2realtxq),
- GFP_KERNEL, node);
- if (!priv->channel_tc2realtxq[i])
- goto err_free_channel_tc2realtxq;
- }
-
priv->channel_stats =
kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node);
if (!priv->channel_stats)
- goto err_free_channel_tc2realtxq;
+ goto err_free_tx_rates;
return 0;
-err_free_channel_tc2realtxq:
- while (--i >= 0)
- kfree(priv->channel_tc2realtxq[i]);
- kfree(priv->channel_tc2realtxq);
err_free_tx_rates:
kfree(priv->tx_rates);
err_free_txq2sq:
kfree(priv->txq2sq);
err_destroy_workqueue:
destroy_workqueue(priv->wq);
+err_free_selq:
+ mlx5e_selq_cleanup(&priv->selq);
err_free_cpumask:
free_cpumask_var(priv->scratchpad.cpumask);
return -ENOMEM;
@@ -5275,12 +5310,12 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
for (i = 0; i < priv->stats_nch; i++)
kvfree(priv->channel_stats[i]);
kfree(priv->channel_stats);
- for (i = 0; i < priv->max_nch; i++)
- kfree(priv->channel_tc2realtxq[i]);
- kfree(priv->channel_tc2realtxq);
kfree(priv->tx_rates);
kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
+ mutex_lock(&priv->state_lock);
+ mlx5e_selq_cleanup(&priv->selq);
+ mutex_unlock(&priv->state_lock);
free_cpumask_var(priv->scratchpad.cpumask);
for (i = 0; i < priv->htb.max_qos_sqs; i++)
@@ -5346,6 +5381,7 @@ mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *prof
}
netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
dev_net_set(netdev, mlx5_core_net(mdev));
return netdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 06d1f46f1688..6b7e7ea6ded2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -55,6 +55,7 @@
#include "diag/en_rep_tracepoint.h"
#include "en_accel/ipsec.h"
#include "en/tc/int_port.h"
+#include "en/ptp.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
@@ -401,13 +402,18 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
+ int n, tc, nch, num_sqs = 0;
struct mlx5e_channel *c;
- int n, tc, num_sqs = 0;
int err = -ENOMEM;
+ bool ptp_sq;
u32 *sqs;
- sqs = kcalloc(priv->channels.num * mlx5e_get_dcb_num_tc(&priv->channels.params),
- sizeof(*sqs), GFP_KERNEL);
+ ptp_sq = !!(priv->channels.ptp &&
+ MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS));
+ nch = priv->channels.num + ptp_sq;
+
+ sqs = kcalloc(nch * mlx5e_get_dcb_num_tc(&priv->channels.params), sizeof(*sqs),
+ GFP_KERNEL);
if (!sqs)
goto out;
@@ -416,6 +422,12 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
for (tc = 0; tc < c->num_tc; tc++)
sqs[num_sqs++] = c->sq[tc].sqn;
}
+ if (ptp_sq) {
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+
+ for (tc = 0; tc < ptp_ch->num_tc; tc++)
+ sqs[num_sqs++] = ptp_ch->ptpsq[tc].txqsq.sqn;
+ }
err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
kfree(sqs);
@@ -632,11 +644,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
params->mqprio.num_tc = 1;
params->tunneled_offload_en = false;
- /* Set an initial non-zero value, so that mlx5e_select_queue won't
- * divide by zero if called before first activating channels.
- */
- priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
-
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
}
@@ -935,15 +942,21 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
return err;
}
+ err = mlx5e_tc_ht_init(&rpriv->tc_ht);
+ if (err)
+ goto err_ht_init;
+
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
err = mlx5e_init_uplink_rep_tx(rpriv);
if (err)
- goto destroy_tises;
+ goto err_init_tx;
}
return 0;
-destroy_tises:
+err_init_tx:
+ mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
+err_ht_init:
mlx5e_destroy_tises(priv);
return err;
}
@@ -963,6 +976,8 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
mlx5e_cleanup_uplink_rep_tx(rpriv);
+
+ mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
}
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
@@ -1099,6 +1114,7 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
&MLX5E_STATS_GRP(ipsec_sw),
&MLX5E_STATS_GRP(ipsec_hw),
#endif
+ &MLX5E_STATS_GRP(ptp),
};
static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index b3f7520dfd08..adf5cc6a7b8c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -64,11 +64,6 @@ struct mlx5e_tc_tun_encap;
struct mlx5e_post_act;
struct mlx5_rep_uplink_priv {
- /* Filters DB - instantiated by the uplink representor and shared by
- * the uplink's VFs
- */
- struct rhashtable tc_ht;
-
/* indirect block callbacks are invoked on bind/unbind events
* on registered higher level devices (e.g. tunnel devices)
*
@@ -113,6 +108,7 @@ struct mlx5e_rep_priv {
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
struct rtnl_link_stats64 prev_vf_vport_stats;
+ struct rhashtable tc_ht;
};
static inline
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 6530d7bd5045..56bb58704bf9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -34,6 +34,7 @@
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/bitmap.h>
+#include <linux/filter.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
#include <net/inet_ecn.h>
@@ -221,8 +222,7 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
}
-static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
+static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page)
{
struct mlx5e_page_cache *cache = &rq->page_cache;
u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
@@ -233,12 +233,13 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
return false;
}
- if (!dev_page_is_reusable(dma_info->page)) {
+ if (!dev_page_is_reusable(page)) {
stats->cache_waive++;
return false;
}
- cache->page_cache[cache->tail] = *dma_info;
+ cache->page_cache[cache->tail].page = page;
+ cache->page_cache[cache->tail].addr = page_pool_get_dma_addr(page);
cache->tail = tail_next;
return true;
}
@@ -286,6 +287,7 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
dma_info->page = NULL;
return -ENOMEM;
}
+ page_pool_set_dma_addr(dma_info->page, dma_info->addr);
return 0;
}
@@ -299,26 +301,27 @@ static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
return mlx5e_page_alloc_pool(rq, dma_info);
}
-void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page)
{
- dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir,
+ dma_addr_t dma_addr = page_pool_get_dma_addr(page);
+
+ dma_unmap_page_attrs(rq->pdev, dma_addr, PAGE_SIZE, rq->buff.map_dir,
DMA_ATTR_SKIP_CPU_SYNC);
+ page_pool_set_dma_addr(page, 0);
}
-void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info,
- bool recycle)
+void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle)
{
if (likely(recycle)) {
- if (mlx5e_rx_cache_put(rq, dma_info))
+ if (mlx5e_rx_cache_put(rq, page))
return;
- mlx5e_page_dma_unmap(rq, dma_info);
- page_pool_recycle_direct(rq->page_pool, dma_info->page);
+ mlx5e_page_dma_unmap(rq, page);
+ page_pool_recycle_direct(rq->page_pool, page);
} else {
- mlx5e_page_dma_unmap(rq, dma_info);
- page_pool_release_page(rq->page_pool, dma_info->page);
- put_page(dma_info->page);
+ mlx5e_page_dma_unmap(rq, page);
+ page_pool_release_page(rq->page_pool, page);
+ put_page(page);
}
}
@@ -333,7 +336,7 @@ static inline void mlx5e_page_release(struct mlx5e_rq *rq,
*/
xsk_buff_free(dma_info->xsk);
else
- mlx5e_page_release_dynamic(rq, dma_info, recycle);
+ mlx5e_page_release_dynamic(rq, dma_info->page, recycle);
}
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
@@ -373,12 +376,15 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
int i;
for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
+ u16 headroom;
+
err = mlx5e_get_rx_frag(rq, frag);
if (unlikely(err))
goto free_frags;
+ headroom = i == 0 ? rq->buff.headroom : 0;
wqe->data[i].addr = cpu_to_be64(frag->di->addr +
- frag->offset + rq->buff.headroom);
+ frag->offset + headroom);
}
return 0;
@@ -620,7 +626,7 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
struct mlx5e_icosq *sq = rq->icosq;
int i, err, max_klm_entries, len;
- max_klm_entries = MLX5E_MAX_KLM_PER_WQE;
+ max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
klm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
@@ -960,8 +966,7 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
rq->stats->congst_umr++;
-#define UMR_WQE_BULK (2)
- if (likely(missing < UMR_WQE_BULK))
+ if (likely(missing < rq->mpwqe.min_wqe_bulk))
return false;
if (rq->page_pool)
@@ -1490,7 +1495,7 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
static inline
struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
u32 frag_size, u16 headroom,
- u32 cqe_bcnt)
+ u32 cqe_bcnt, u32 metasize)
{
struct sk_buff *skb = build_skb(va, frag_size);
@@ -1502,6 +1507,9 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
skb_reserve(skb, headroom);
skb_put(skb, cqe_bcnt);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
return skb;
}
@@ -1509,7 +1517,7 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
u32 len, struct xdp_buff *xdp)
{
xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
- xdp_prepare_buff(xdp, va, headroom, len, false);
+ xdp_prepare_buff(xdp, va, headroom, len, true);
}
static struct sk_buff *
@@ -1518,8 +1526,9 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
{
struct mlx5e_dma_info *di = wi->di;
u16 rx_headroom = rq->buff.headroom;
- struct xdp_buff xdp;
+ struct bpf_prog *prog;
struct sk_buff *skb;
+ u32 metasize = 0;
void *va, *data;
u32 frag_size;
@@ -1529,16 +1538,23 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
frag_size, DMA_FROM_DEVICE);
- net_prefetchw(va); /* xdp_frame data area */
net_prefetch(data);
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp))
- return NULL; /* page/packet was consumed by XDP */
+ prog = rcu_dereference(rq->xdp_prog);
+ if (prog) {
+ struct xdp_buff xdp;
- rx_headroom = xdp.data - xdp.data_hard_start;
+ net_prefetchw(va); /* xdp_frame data area */
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
+ if (mlx5e_xdp_handle(rq, di->page, prog, &xdp))
+ return NULL; /* page/packet was consumed by XDP */
+
+ rx_headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
+ cqe_bcnt = xdp.data_end - xdp.data;
+ }
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
+ skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
if (unlikely(!skb))
return NULL;
@@ -1554,41 +1570,103 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
struct mlx5e_wqe_frag_info *head_wi = wi;
- u16 headlen = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt);
- u16 frag_headlen = headlen;
- u16 byte_cnt = cqe_bcnt - headlen;
+ u16 rx_headroom = rq->buff.headroom;
+ struct mlx5e_dma_info *di = wi->di;
+ struct skb_shared_info *sinfo;
+ u32 frag_consumed_bytes;
+ struct bpf_prog *prog;
+ struct xdp_buff xdp;
struct sk_buff *skb;
+ u32 truesize;
+ void *va;
- /* XDP is not supported in this configuration, as incoming packets
- * might spread among multiple pages.
- */
- skb = napi_alloc_skb(rq->cq.napi,
- ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
- if (unlikely(!skb)) {
- rq->stats->buff_alloc_err++;
- return NULL;
- }
+ va = page_address(di->page) + wi->offset;
+ frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- net_prefetchw(skb->data);
+ dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
+ rq->buff.frame0_sz, DMA_FROM_DEVICE);
+ net_prefetchw(va); /* xdp_frame data area */
+ net_prefetch(va + rx_headroom);
+
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp);
+ sinfo = xdp_get_shared_info_from_buff(&xdp);
+ truesize = 0;
+
+ cqe_bcnt -= frag_consumed_bytes;
+ frag_info++;
+ wi++;
+
+ while (cqe_bcnt) {
+ skb_frag_t *frag;
+
+ di = wi->di;
+
+ frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
+
+ dma_sync_single_for_cpu(rq->pdev, di->addr + wi->offset,
+ frag_consumed_bytes, DMA_FROM_DEVICE);
+
+ if (!xdp_buff_has_frags(&xdp)) {
+ /* Init on the first fragment to avoid cold cache access
+ * when possible.
+ */
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(&xdp);
+ }
- while (byte_cnt) {
- u16 frag_consumed_bytes =
- min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt);
+ frag = &sinfo->frags[sinfo->nr_frags++];
+ __skb_frag_set_page(frag, di->page);
+ skb_frag_off_set(frag, wi->offset);
+ skb_frag_size_set(frag, frag_consumed_bytes);
- mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen,
- frag_consumed_bytes, frag_info->frag_stride);
- byte_cnt -= frag_consumed_bytes;
- frag_headlen = 0;
+ if (page_is_pfmemalloc(di->page))
+ xdp_buff_set_frag_pfmemalloc(&xdp);
+
+ sinfo->xdp_frags_size += frag_consumed_bytes;
+ truesize += frag_info->frag_stride;
+
+ cqe_bcnt -= frag_consumed_bytes;
frag_info++;
wi++;
}
- /* copy header */
- mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, head_wi->offset,
- headlen);
- /* skb linear part was allocated with headlen and aligned to long */
- skb->tail += headlen;
- skb->len += headlen;
+ di = head_wi->di;
+
+ prog = rcu_dereference(rq->xdp_prog);
+ if (prog && mlx5e_xdp_handle(rq, di->page, prog, &xdp)) {
+ if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
+ int i;
+
+ for (i = wi - head_wi; i < rq->wqe.info.num_frags; i++)
+ mlx5e_put_rx_frag(rq, &head_wi[i], true);
+ }
+ return NULL; /* page/packet was consumed by XDP */
+ }
+
+ skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz,
+ xdp.data - xdp.data_hard_start,
+ xdp.data_end - xdp.data,
+ xdp.data - xdp.data_meta);
+ if (unlikely(!skb))
+ return NULL;
+
+ page_ref_inc(di->page);
+
+ if (unlikely(xdp_buff_has_frags(&xdp))) {
+ int i;
+
+ /* sinfo->nr_frags is reset by build_skb, calculate again. */
+ xdp_update_skb_shared_info(skb, wi - head_wi - 1,
+ sinfo->xdp_frags_size, truesize,
+ xdp_buff_is_frag_pfmemalloc(&xdp));
+
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ page_ref_inc(skb_frag_page(frag));
+ }
+ }
return skb;
}
@@ -1832,9 +1910,9 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
{
struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
u16 rx_headroom = rq->buff.headroom;
- u32 cqe_bcnt32 = cqe_bcnt;
- struct xdp_buff xdp;
+ struct bpf_prog *prog;
struct sk_buff *skb;
+ u32 metasize = 0;
void *va, *data;
u32 frag_size;
@@ -1846,23 +1924,30 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
va = page_address(di->page) + head_offset;
data = va + rx_headroom;
- frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
frag_size, DMA_FROM_DEVICE);
- net_prefetchw(va); /* xdp_frame data area */
net_prefetch(data);
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
- if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) {
- if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
- __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
- return NULL; /* page/packet was consumed by XDP */
- }
+ prog = rcu_dereference(rq->xdp_prog);
+ if (prog) {
+ struct xdp_buff xdp;
- rx_headroom = xdp.data - xdp.data_hard_start;
- frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
- skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
+ net_prefetchw(va); /* xdp_frame data area */
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
+ if (mlx5e_xdp_handle(rq, di->page, prog, &xdp)) {
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
+ __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
+ return NULL; /* page/packet was consumed by XDP */
+ }
+
+ rx_headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
+ cqe_bcnt = xdp.data_end - xdp.data;
+ }
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
+ skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
if (unlikely(!skb))
return NULL;
@@ -1893,7 +1978,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE);
prefetchw(hdr);
prefetch(data);
- skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size);
+ skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
if (unlikely(!skb))
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 00f1d16db456..bdc870f9c2f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -37,6 +37,10 @@
#include "en/ptp.h"
#include "en/port.h"
+#ifdef CONFIG_PAGE_POOL_STATS
+#include <net/page_pool.h>
+#endif
+
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
{
return !priv->profile->stats_grps_num ? 0 :
@@ -183,6 +187,19 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
+#ifdef CONFIG_PAGE_POOL_STATS
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
+#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
@@ -349,6 +366,19 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_congst_umr += rq_stats->congst_umr;
s->rx_arfs_err += rq_stats->arfs_err;
s->rx_recover += rq_stats->recover;
+#ifdef CONFIG_PAGE_POOL_STATS
+ s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast;
+ s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow;
+ s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty;
+ s->rx_pp_alloc_refill += rq_stats->pp_alloc_refill;
+ s->rx_pp_alloc_waive += rq_stats->pp_alloc_waive;
+ s->rx_pp_alloc_slow_high_order += rq_stats->pp_alloc_slow_high_order;
+ s->rx_pp_recycle_cached += rq_stats->pp_recycle_cached;
+ s->rx_pp_recycle_cache_full += rq_stats->pp_recycle_cache_full;
+ s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring;
+ s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full;
+ s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref;
+#endif
#ifdef CONFIG_MLX5_EN_TLS
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
@@ -455,6 +485,35 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
}
}
+#ifdef CONFIG_PAGE_POOL_STATS
+static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
+{
+ struct mlx5e_rq_stats *rq_stats = c->rq.stats;
+ struct page_pool *pool = c->rq.page_pool;
+ struct page_pool_stats stats = { 0 };
+
+ if (!page_pool_get_stats(pool, &stats))
+ return;
+
+ rq_stats->pp_alloc_fast = stats.alloc_stats.fast;
+ rq_stats->pp_alloc_slow = stats.alloc_stats.slow;
+ rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
+ rq_stats->pp_alloc_empty = stats.alloc_stats.empty;
+ rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
+ rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
+
+ rq_stats->pp_recycle_cached = stats.recycle_stats.cached;
+ rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full;
+ rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
+ rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
+ rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
+}
+#else
+static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
+{
+}
+#endif
+
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -462,9 +521,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
memset(s, 0, sizeof(*s));
+ for (i = 0; i < priv->channels.num; i++) /* for active channels only */
+ mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]);
+
for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats =
priv->channel_stats[i];
+
int j;
mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
@@ -1887,6 +1950,19 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
+#ifdef CONFIG_PAGE_POOL_STATS
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
+#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
@@ -2348,7 +2424,7 @@ MLX5E_DEFINE_STATS_GRP(channels, 0);
MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
static MLX5E_DEFINE_STATS_GRP(tls, 0);
-static MLX5E_DEFINE_STATS_GRP(ptp, 0);
+MLX5E_DEFINE_STATS_GRP(ptp, 0);
static MLX5E_DEFINE_STATS_GRP(qos, 0);
/* The stats groups order is opposite to the update_stats() order calls */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 2c1ed5b81be6..a7a025d15c14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -205,7 +205,19 @@ struct mlx5e_sw_stats {
u64 ch_aff_change;
u64 ch_force_irq;
u64 ch_eq_rearm;
-
+#ifdef CONFIG_PAGE_POOL_STATS
+ u64 rx_pp_alloc_fast;
+ u64 rx_pp_alloc_slow;
+ u64 rx_pp_alloc_slow_high_order;
+ u64 rx_pp_alloc_empty;
+ u64 rx_pp_alloc_refill;
+ u64 rx_pp_alloc_waive;
+ u64 rx_pp_recycle_cached;
+ u64 rx_pp_recycle_cache_full;
+ u64 rx_pp_recycle_ring;
+ u64 rx_pp_recycle_ring_full;
+ u64 rx_pp_recycle_released_ref;
+#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
@@ -352,6 +364,19 @@ struct mlx5e_rq_stats {
u64 congst_umr;
u64 arfs_err;
u64 recover;
+#ifdef CONFIG_PAGE_POOL_STATS
+ u64 pp_alloc_fast;
+ u64 pp_alloc_slow;
+ u64 pp_alloc_slow_high_order;
+ u64 pp_alloc_empty;
+ u64 pp_alloc_refill;
+ u64 pp_alloc_waive;
+ u64 pp_recycle_cached;
+ u64 pp_recycle_cache_full;
+ u64 pp_recycle_ring;
+ u64 pp_recycle_ring_full;
+ u64 pp_recycle_released_ref;
+#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_decrypted_packets;
u64 tls_decrypted_bytes;
@@ -459,5 +484,6 @@ extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
+extern MLX5E_DECLARE_STATS_GRP(ptp);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index b27532a9301e..e3fc15ae7bb1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -115,6 +115,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
static struct lock_class_key tc_ht_lock_key;
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
+static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
void
mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
@@ -273,6 +274,23 @@ get_sample_priv(struct mlx5e_priv *priv)
return NULL;
}
+static struct mlx5e_post_act *
+get_post_action(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ if (is_mdev_switchdev_mode(priv->mdev)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ return uplink_priv->post_act;
+ }
+
+ return priv->fs.tc.post_act;
+}
+
struct mlx5_flow_handle *
mlx5_tc_rule_insert(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
@@ -295,13 +313,62 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv,
if (is_mdev_switchdev_mode(priv->mdev)) {
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
-
return;
}
mlx5e_del_offloaded_nic_rule(priv, rule, attr);
}
+struct mlx5_flow_handle *
+mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (attr->flags & MLX5_ATTR_FLAG_CT) {
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts =
+ &attr->parse_attr->mod_hdr_acts;
+
+ return mlx5_tc_ct_flow_offload(get_ct_priv(priv),
+ spec, attr,
+ mod_hdr_acts);
+ }
+
+ if (!is_mdev_switchdev_mode(priv->mdev))
+ return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
+
+ if (attr->flags & MLX5_ATTR_FLAG_SAMPLE)
+ return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr);
+
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+}
+
+void
+mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (attr->flags & MLX5_ATTR_FLAG_CT) {
+ mlx5_tc_ct_delete_flow(get_ct_priv(priv), attr);
+ return;
+ }
+
+ if (!is_mdev_switchdev_mode(priv->mdev)) {
+ mlx5e_del_offloaded_nic_rule(priv, rule, attr);
+ return;
+ }
+
+ if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
+ mlx5e_tc_sample_unoffload(get_sample_priv(priv), rule, attr);
+ return;
+ }
+
+ mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+}
+
int
mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
@@ -1039,6 +1106,21 @@ err_ft_get:
}
static int
+alloc_flow_attr_counter(struct mlx5_core_dev *counter_dev,
+ struct mlx5_flow_attr *attr)
+
+{
+ struct mlx5_fc *counter;
+
+ counter = mlx5_fc_create(counter_dev, true);
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
+
+ attr->counter = counter;
+ return 0;
+}
+
+static int
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
@@ -1046,7 +1128,6 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_core_dev *dev = priv->mdev;
- struct mlx5_fc *counter;
int err;
parse_attr = attr->parse_attr;
@@ -1058,11 +1139,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(dev, true);
- if (IS_ERR(counter))
- return PTR_ERR(counter);
-
- attr->counter = counter;
+ err = alloc_flow_attr_counter(dev, attr);
+ if (err)
+ return err;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
@@ -1072,8 +1151,8 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
return err;
}
- if (flow_flag_test(flow, CT))
- flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), flow, &parse_attr->spec,
+ if (attr->flags & MLX5_ATTR_FLAG_CT)
+ flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), &parse_attr->spec,
attr, &parse_attr->mod_hdr_acts);
else
flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
@@ -1107,8 +1186,8 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
flow_flag_clear(flow, OFFLOADED);
- if (flow_flag_test(flow, CT))
- mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
+ if (attr->flags & MLX5_ATTR_FLAG_CT)
+ mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
else if (!IS_ERR_OR_NULL(flow->rule[0]))
mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
@@ -1132,6 +1211,8 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
if (flow_flag_test(flow, HAIRPIN))
mlx5e_hairpin_flow_del(priv, flow);
+ free_flow_post_acts(flow);
+
kvfree(attr->parse_attr);
kfree(flow->attr);
}
@@ -1142,40 +1223,27 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct mlx5_flow_handle *rule;
- if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+ if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
- if (flow_flag_test(flow, CT)) {
- mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
-
- rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
- flow, spec, attr,
- mod_hdr_acts);
- } else if (flow_flag_test(flow, SAMPLE)) {
- rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr,
- mlx5e_tc_get_flow_tun_id(flow));
- } else {
- rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
- }
+ rule = mlx5e_tc_rule_offload(flow->priv, spec, attr);
if (IS_ERR(rule))
return rule;
if (attr->esw_attr->split_count) {
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
- if (IS_ERR(flow->rule[1])) {
- if (flow_flag_test(flow, CT))
- mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
- else
- mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
- return flow->rule[1];
- }
+ if (IS_ERR(flow->rule[1]))
+ goto err_rule1;
}
return rule;
+
+err_rule1:
+ mlx5e_tc_rule_unoffload(flow->priv, rule, attr);
+ return flow->rule[1];
}
void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
@@ -1184,19 +1252,13 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
{
flow_flag_clear(flow, OFFLOADED);
- if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
- goto offload_rule_0;
+ if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
+ return mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
- if (flow_flag_test(flow, CT))
- mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
- else if (flow_flag_test(flow, SAMPLE))
- mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
- else
-offload_rule_0:
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
+ mlx5e_tc_rule_unoffload(flow->priv, flow->rule[0], attr);
}
struct mlx5_flow_handle *
@@ -1214,7 +1276,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->esw_attr->split_count = 0;
- slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
@@ -1239,7 +1301,7 @@ void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->esw_attr->split_count = 0;
- slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
flow_flag_clear(flow, SLOW);
kfree(slow_attr);
@@ -1348,10 +1410,10 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
}
int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr)
{
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &parse_attr->mod_hdr_acts;
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
struct mlx5_modify_hdr *mod_hdr;
mod_hdr = mlx5_modify_header_alloc(priv->mdev,
@@ -1361,13 +1423,107 @@ int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
if (IS_ERR(mod_hdr))
return PTR_ERR(mod_hdr);
- WARN_ON(flow->attr->modify_hdr);
- flow->attr->modify_hdr = mod_hdr;
+ WARN_ON(attr->modify_hdr);
+ attr->modify_hdr = mod_hdr;
return 0;
}
static int
+set_encap_dests(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct netlink_ext_ack *extack,
+ bool *encap_valid,
+ bool *vf_tun)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct net_device *encap_dev = NULL;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_priv *out_priv;
+ int out_index;
+ int err = 0;
+
+ if (!mlx5e_is_eswitch_flow(flow))
+ return 0;
+
+ parse_attr = attr->parse_attr;
+ esw_attr = attr->esw_attr;
+ *vf_tun = false;
+ *encap_valid = true;
+
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ struct net_device *out_dev;
+ int mirred_ifindex;
+
+ if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+ continue;
+
+ mirred_ifindex = parse_attr->mirred_ifindex[out_index];
+ out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
+ if (!out_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
+ err = -ENODEV;
+ goto out;
+ }
+ err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
+ extack, &encap_dev, encap_valid);
+ dev_put(out_dev);
+ if (err)
+ goto out;
+
+ if (esw_attr->dests[out_index].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
+ !esw_attr->dest_int_port)
+ *vf_tun = true;
+
+ out_priv = netdev_priv(encap_dev);
+ rpriv = out_priv->ppriv;
+ esw_attr->dests[out_index].rep = rpriv->rep;
+ esw_attr->dests[out_index].mdev = out_priv->mdev;
+ }
+
+ if (*vf_tun && esw_attr->out_count > 1) {
+ NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static void
+clean_encap_dests(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ bool *vf_tun)
+{
+ struct mlx5_esw_flow_attr *esw_attr;
+ int out_index;
+
+ if (!mlx5e_is_eswitch_flow(flow))
+ return;
+
+ esw_attr = attr->esw_attr;
+ *vf_tun = false;
+
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+ continue;
+
+ if (esw_attr->dests[out_index].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
+ !esw_attr->dest_int_port)
+ *vf_tun = true;
+
+ mlx5e_detach_encap(priv, flow, attr, out_index);
+ kfree(attr->parse_attr->tun_info[out_index]);
+ }
+}
+
+static int
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
@@ -1375,15 +1531,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
- bool vf_tun = false, encap_valid = true;
- struct net_device *encap_dev = NULL;
struct mlx5_esw_flow_attr *esw_attr;
- struct mlx5e_rep_priv *rpriv;
- struct mlx5e_priv *out_priv;
- struct mlx5_fc *counter;
+ bool vf_tun, encap_valid;
u32 max_prio, max_chain;
int err = 0;
- int out_index;
parse_attr = attr->parse_attr;
esw_attr = attr->esw_attr;
@@ -1472,50 +1623,17 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->int_port = int_port;
}
- for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
- struct net_device *out_dev;
- int mirred_ifindex;
-
- if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
- continue;
-
- mirred_ifindex = parse_attr->mirred_ifindex[out_index];
- out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
- if (!out_dev) {
- NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
- err = -ENODEV;
- goto err_out;
- }
- err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
- extack, &encap_dev, &encap_valid);
- dev_put(out_dev);
- if (err)
- goto err_out;
-
- if (esw_attr->dests[out_index].flags &
- MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
- !esw_attr->dest_int_port)
- vf_tun = true;
- out_priv = netdev_priv(encap_dev);
- rpriv = out_priv->ppriv;
- esw_attr->dests[out_index].rep = rpriv->rep;
- esw_attr->dests[out_index].mdev = out_priv->mdev;
- }
-
- if (vf_tun && esw_attr->out_count > 1) {
- NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
- err = -EOPNOTSUPP;
+ err = set_encap_dests(priv, flow, attr, extack, &encap_valid, &vf_tun);
+ if (err)
goto err_out;
- }
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err)
goto err_out;
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
- !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
if (vf_tun) {
- err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow);
+ err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr);
if (err)
goto err_out;
} else {
@@ -1526,20 +1644,16 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(esw_attr->counter_dev, true);
- if (IS_ERR(counter)) {
- err = PTR_ERR(counter);
+ err = alloc_flow_attr_counter(esw_attr->counter_dev, attr);
+ if (err)
goto err_out;
- }
-
- attr->counter = counter;
}
/* we get here if one of the following takes place:
* (1) there's no error
* (2) there's an encap action and we don't have valid neigh
*/
- if (!encap_valid)
+ if (!encap_valid || flow_flag_test(flow, SLOW))
flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
else
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
@@ -1576,8 +1690,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
- bool vf_tun = false;
- int out_index;
+ bool vf_tun;
esw_attr = attr->esw_attr;
mlx5e_put_flow_tunnel_id(flow);
@@ -1601,16 +1714,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow->decap_route)
mlx5e_detach_decap_route(priv, flow);
- for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
- if (esw_attr->dests[out_index].flags &
- MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
- !esw_attr->dest_int_port)
- vf_tun = true;
- if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
- mlx5e_detach_encap(priv, flow, out_index);
- kfree(attr->parse_attr->tun_info[out_index]);
- }
- }
+ clean_encap_dests(priv, flow, attr, &vf_tun);
mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
@@ -1634,7 +1738,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
- kfree(attr->sample_attr);
+ free_flow_post_acts(flow);
+
kvfree(attr->esw_attr->rx_tun_attr);
kvfree(attr->parse_attr);
kfree(flow->attr);
@@ -1642,7 +1747,10 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
{
- return flow->attr->counter;
+ struct mlx5_flow_attr *attr;
+
+ attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list);
+ return attr->counter;
}
/* Iterate over tmp_list of flows attached to flow_list head. */
@@ -1854,7 +1962,7 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
}
- flow->tunnel_id = value;
+ flow->attr->tunnel_id = value;
return 0;
err_set:
@@ -1868,8 +1976,8 @@ err_enc_opts:
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
{
- u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
- u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
+ u32 enc_opts_id = flow->attr->tunnel_id & ENC_OPTS_BITS_MASK;
+ u32 tun_id = flow->attr->tunnel_id >> ENC_OPTS_BITS;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
struct mlx5_eswitch *esw;
@@ -1885,11 +1993,6 @@ static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
enc_opts_id);
}
-u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
-{
- return flow->tunnel_id;
-}
-
void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
struct flow_match_basic *match, bool outer,
void *headers_c, void *headers_v)
@@ -2811,14 +2914,15 @@ static unsigned long mask_to_le(unsigned long mask, int size)
return mask;
}
+
static int offload_pedit_fields(struct mlx5e_priv *priv,
int namespace,
- struct pedit_headers_action *hdrs,
struct mlx5e_tc_flow_parse_attr *parse_attr,
u32 *action_flags,
struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
+ struct pedit_headers_action *hdrs = parse_attr->hdrs;
void *headers_c, *headers_v, *action, *vals_p;
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
struct mlx5e_tc_mod_hdr_acts *mod_acts;
@@ -2944,35 +3048,43 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
static const struct pedit_headers zero_masks = {};
-static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- u32 *action_flags,
- struct netlink_ext_ack *extack)
+static int verify_offload_pedit_fields(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct netlink_ext_ack *extack)
{
struct pedit_headers *cmd_masks;
- int err;
u8 cmd;
- err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
- action_flags, extack);
- if (err < 0)
- goto out_dealloc_parsed_actions;
-
for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
- cmd_masks = &hdrs[cmd].masks;
+ cmd_masks = &parse_attr->hdrs[cmd].masks;
if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
- NL_SET_ERR_MSG_MOD(extack,
- "attempt to offload an unsupported field");
+ NL_SET_ERR_MSG_MOD(extack, "attempt to offload an unsupported field");
netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
16, 1, cmd_masks, sizeof(zero_masks), true);
- err = -EOPNOTSUPP;
- goto out_dealloc_parsed_actions;
+ return -EOPNOTSUPP;
}
}
return 0;
+}
+
+static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ u32 *action_flags,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = offload_pedit_fields(priv, namespace, parse_attr, action_flags, extack);
+ if (err)
+ goto out_dealloc_parsed_actions;
+
+ err = verify_offload_pedit_fields(priv, parse_attr, extack);
+ if (err)
+ goto out_dealloc_parsed_actions;
+
+ return 0;
out_dealloc_parsed_actions:
mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
@@ -3176,11 +3288,11 @@ actions_match_supported_fdb(struct mlx5e_priv *priv,
static bool
actions_match_supported(struct mlx5e_priv *priv,
struct flow_action *flow_action,
+ u32 actions,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
- u32 actions = flow->attr->action;
bool ct_flow, ct_clear;
ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
@@ -3248,57 +3360,13 @@ bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
}
static int
-parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
- struct flow_action *flow_action)
-{
- struct netlink_ext_ack *extack = parse_state->extack;
- struct mlx5e_tc_flow *flow = parse_state->flow;
- struct mlx5_flow_attr *attr = flow->attr;
- enum mlx5_flow_namespace_type ns_type;
- struct mlx5e_priv *priv = flow->priv;
- const struct flow_action_entry *act;
- struct mlx5e_tc_act *tc_act;
- int err, i;
-
- ns_type = mlx5e_get_flow_namespace(flow);
-
- flow_action_for_each(i, act, flow_action) {
- tc_act = mlx5e_tc_act_get(act->id, ns_type);
- if (!tc_act) {
- NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
- return -EOPNOTSUPP;
- }
-
- if (!tc_act->can_offload(parse_state, act, i))
- return -EOPNOTSUPP;
-
- err = tc_act->parse_action(parse_state, act, priv, attr);
- if (err)
- return err;
- }
-
- flow_action_for_each(i, act, flow_action) {
- tc_act = mlx5e_tc_act_get(act->id, ns_type);
- if (!tc_act || !tc_act->post_parse ||
- !tc_act->can_offload(parse_state, act, i))
- continue;
-
- err = tc_act->post_parse(parse_state, priv, attr);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int
actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
- struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct pedit_headers_action *hdrs = parse_attr->hdrs;
enum mlx5_flow_namespace_type ns_type;
int err;
@@ -3308,8 +3376,7 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
ns_type = mlx5e_get_flow_namespace(flow);
- err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs,
- &attr->action, extack);
+ err = alloc_tc_pedit_action(priv, ns_type, parse_attr, &attr->action, extack);
if (err)
return err;
@@ -3330,6 +3397,299 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
return 0;
}
+static struct mlx5_flow_attr*
+mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ u32 attr_sz = ns_to_attr_sz(ns_type);
+ struct mlx5_flow_attr *attr2;
+
+ attr2 = mlx5_alloc_flow_attr(ns_type);
+ parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
+ if (!attr2 || !parse_attr) {
+ kvfree(parse_attr);
+ kfree(attr2);
+ return NULL;
+ }
+
+ memcpy(attr2, attr, attr_sz);
+ INIT_LIST_HEAD(&attr2->list);
+ parse_attr->filter_dev = attr->parse_attr->filter_dev;
+ attr2->action = 0;
+ attr2->flags = 0;
+ attr2->parse_attr = parse_attr;
+ return attr2;
+}
+
+static struct mlx5_core_dev *
+get_flow_counter_dev(struct mlx5e_tc_flow *flow)
+{
+ return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
+}
+
+struct mlx5_flow_attr *
+mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_flow_attr *attr;
+ int i;
+
+ list_for_each_entry(attr, &flow->attrs, list) {
+ esw_attr = attr->esw_attr;
+ for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
+ if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)
+ return attr;
+ }
+ }
+
+ return NULL;
+}
+
+void
+mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5_flow_attr *attr;
+
+ list_for_each_entry(attr, &flow->attrs, list) {
+ if (list_is_last(&attr->list, &flow->attrs))
+ break;
+
+ mlx5e_tc_post_act_unoffload(post_act, attr->post_act_handle);
+ }
+}
+
+static void
+free_flow_post_acts(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5_flow_attr *attr, *tmp;
+ bool vf_tun;
+
+ list_for_each_entry_safe(attr, tmp, &flow->attrs, list) {
+ if (list_is_last(&attr->list, &flow->attrs))
+ break;
+
+ if (attr->post_act_handle)
+ mlx5e_tc_post_act_del(post_act, attr->post_act_handle);
+
+ clean_encap_dests(flow->priv, flow, attr, &vf_tun);
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+ mlx5_fc_destroy(counter_dev, attr->counter);
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
+ if (attr->modify_hdr)
+ mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr);
+ }
+
+ list_del(&attr->list);
+ kvfree(attr->parse_attr);
+ kfree(attr);
+ }
+}
+
+int
+mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5_flow_attr *attr;
+ int err = 0;
+
+ list_for_each_entry(attr, &flow->attrs, list) {
+ if (list_is_last(&attr->list, &flow->attrs))
+ break;
+
+ err = mlx5e_tc_post_act_offload(post_act, attr->post_act_handle);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/* TC filter rule HW translation:
+ *
+ * +---------------------+
+ * + ft prio (tc chain) +
+ * + original match +
+ * +---------------------+
+ * |
+ * | if multi table action
+ * |
+ * v
+ * +---------------------+
+ * + post act ft |<----.
+ * + match fte id | | split on multi table action
+ * + do actions |-----'
+ * +---------------------+
+ * |
+ * |
+ * v
+ * Do rest of the actions after last multi table action.
+ */
+static int
+alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
+{
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5_flow_attr *attr, *next_attr = NULL;
+ struct mlx5e_post_act_handle *handle;
+ bool vf_tun, encap_valid = true;
+ int err;
+
+ /* This is going in reverse order as needed.
+ * The first entry is the last attribute.
+ */
+ list_for_each_entry(attr, &flow->attrs, list) {
+ if (!next_attr) {
+ /* Set counter action on last post act rule. */
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ } else {
+ err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr);
+ if (err)
+ goto out_free;
+ }
+
+ /* Don't add post_act rule for first attr (last in the list).
+ * It's being handled by the caller.
+ */
+ if (list_is_last(&attr->list, &flow->attrs))
+ break;
+
+ err = set_encap_dests(flow->priv, flow, attr, extack, &encap_valid, &vf_tun);
+ if (err)
+ goto out_free;
+
+ if (!encap_valid)
+ flow_flag_set(flow, SLOW);
+
+ err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
+ if (err)
+ goto out_free;
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr);
+ if (err)
+ goto out_free;
+ }
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
+ if (err)
+ goto out_free;
+ }
+
+ handle = mlx5e_tc_post_act_add(post_act, attr);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto out_free;
+ }
+
+ attr->post_act_handle = handle;
+ next_attr = attr;
+ }
+
+ if (flow_flag_test(flow, SLOW))
+ goto out;
+
+ err = mlx5e_tc_offload_flow_post_acts(flow);
+ if (err)
+ goto out_free;
+
+out:
+ return 0;
+
+out_free:
+ free_flow_post_acts(flow);
+ return err;
+}
+
+static int
+parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
+ struct flow_action *flow_action)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow_action flow_action_reorder;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct mlx5_flow_attr *attr = flow->attr;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_priv *priv = flow->priv;
+ struct flow_action_entry *act, **_act;
+ struct mlx5e_tc_act *tc_act;
+ int err, i;
+
+ flow_action_reorder.num_entries = flow_action->num_entries;
+ flow_action_reorder.entries = kcalloc(flow_action->num_entries,
+ sizeof(flow_action), GFP_KERNEL);
+ if (!flow_action_reorder.entries)
+ return -ENOMEM;
+
+ mlx5e_tc_act_reorder_flow_actions(flow_action, &flow_action_reorder);
+
+ ns_type = mlx5e_get_flow_namespace(flow);
+ list_add(&attr->list, &flow->attrs);
+
+ flow_action_for_each(i, _act, &flow_action_reorder) {
+ act = *_act;
+ tc_act = mlx5e_tc_act_get(act->id, ns_type);
+ if (!tc_act) {
+ NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
+ err = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ if (!tc_act->can_offload(parse_state, act, i, attr)) {
+ err = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ err = tc_act->parse_action(parse_state, act, priv, attr);
+ if (err)
+ goto out_free;
+
+ parse_state->actions |= attr->action;
+
+ /* Split attr for multi table act if not the last act. */
+ if (tc_act->is_multi_table_act &&
+ tc_act->is_multi_table_act(priv, act, attr) &&
+ i < flow_action_reorder.num_entries - 1) {
+ err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
+ if (err)
+ goto out_free;
+
+ attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type);
+ if (!attr) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ list_add(&attr->list, &flow->attrs);
+ }
+ }
+
+ kfree(flow_action_reorder.entries);
+
+ err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
+ if (err)
+ goto out_free_post_acts;
+
+ err = alloc_flow_post_acts(flow, extack);
+ if (err)
+ goto out_free_post_acts;
+
+ return 0;
+
+out_free:
+ kfree(flow_action_reorder.entries);
+out_free_post_acts:
+ free_flow_post_acts(flow);
+
+ return err;
+}
+
static int
flow_action_supported(struct flow_action *flow_action,
struct netlink_ext_ack *extack)
@@ -3357,7 +3717,6 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_act_parse_state *parse_state;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
- struct pedit_headers_action *hdrs;
int err;
err = flow_action_supported(flow_action, extack);
@@ -3369,17 +3728,17 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
parse_state = &parse_attr->parse_state;
mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
parse_state->ct_priv = get_ct_priv(priv);
- hdrs = parse_state->hdrs;
err = parse_tc_actions(parse_state, flow_action);
if (err)
return err;
- err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
+ err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
if (err)
return err;
- if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
+ if (!actions_match_supported(priv, flow_action, parse_state->actions,
+ parse_attr, flow, extack))
return -EOPNOTSUPP;
return 0;
@@ -3480,7 +3839,6 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
- struct pedit_headers_action *hdrs;
int err;
err = flow_action_supported(flow_action, extack);
@@ -3492,7 +3850,6 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv,
parse_state = &parse_attr->parse_state;
mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
parse_state->ct_priv = get_ct_priv(priv);
- hdrs = parse_state->hdrs;
err = parse_tc_actions(parse_state, flow_action);
if (err)
@@ -3506,11 +3863,12 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
+ err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
if (err)
return err;
- if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
+ if (!actions_match_supported(priv, flow_action, parse_state->actions,
+ parse_attr, flow, extack))
return -EOPNOTSUPP;
return 0;
@@ -3545,12 +3903,11 @@ static const struct rhashtable_params tc_ht_params = {
static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
unsigned long flags)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_rep_priv *rpriv;
if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- return &uplink_rpriv->uplink_priv.tc_ht;
+ rpriv = priv->ppriv;
+ return &rpriv->tc_ht;
} else /* NIC offload */
return &priv->fs.tc.ht;
}
@@ -3585,7 +3942,12 @@ mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
sizeof(struct mlx5_nic_flow_attr);
struct mlx5_flow_attr *attr;
- return kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
+ attr = kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
+ if (!attr)
+ return attr;
+
+ INIT_LIST_HEAD(&attr->list);
+ return attr;
}
static int
@@ -3619,6 +3981,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
INIT_LIST_HEAD(&flow->encaps[out_index].list);
INIT_LIST_HEAD(&flow->hairpin);
INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
+ INIT_LIST_HEAD(&flow->attrs);
refcount_set(&flow->refcnt, 1);
init_completion(&flow->init_done);
init_completion(&flow->del_hw_done);
@@ -4119,6 +4482,46 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
return err;
}
+static int mlx5e_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct netlink_ext_ack *extack)
@@ -4146,10 +4549,10 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
- if (act->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+ err = mlx5e_policer_validate(flow_action, act, extack);
+ if (err)
+ return err;
+
err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
if (err)
return err;
@@ -4383,10 +4786,27 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
mlx5_chains_destroy(tc->chains);
}
-int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
+int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
+{
+ int err;
+
+ err = rhashtable_init(tc_ht, &tc_ht_params);
+ if (err)
+ return err;
+
+ lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
+
+ return 0;
+}
+
+void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
+{
+ rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
+}
+
+int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
{
const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
- struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *rpriv;
struct mapping_ctx *mapping;
struct mlx5_eswitch *esw;
@@ -4394,7 +4814,6 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
u64 mapping_id;
int err = 0;
- uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
priv = netdev_priv(rpriv->netdev);
esw = priv->mdev->priv.eswitch;
@@ -4434,12 +4853,6 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
}
uplink_priv->tunnel_enc_opts_mapping = mapping;
- err = rhashtable_init(tc_ht, &tc_ht_params);
- if (err)
- goto err_ht_init;
-
- lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
-
uplink_priv->encap = mlx5e_tc_tun_init(priv);
if (IS_ERR(uplink_priv->encap)) {
err = PTR_ERR(uplink_priv->encap);
@@ -4449,8 +4862,6 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
return 0;
err_register_fib_notifier:
- rhashtable_destroy(tc_ht);
-err_ht_init:
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
err_enc_opts_mapping:
mapping_destroy(uplink_priv->tunnel_mapping);
@@ -4464,13 +4875,8 @@ err_tun_mapping:
return err;
}
-void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
+void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
{
- struct mlx5_rep_uplink_priv *uplink_priv;
-
- uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
-
- rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
mlx5e_tc_tun_cleanup(uplink_priv->encap);
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 5ffae9b13066..a80b00946f1b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -53,7 +53,6 @@
ESW_FLOW_ATTR_SZ :\
NIC_FLOW_ATTR_SZ)
-
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
@@ -71,7 +70,7 @@ struct mlx5_flow_attr {
struct mlx5_fc *counter;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_ct_attr ct_attr;
- struct mlx5e_sample_attr *sample_attr;
+ struct mlx5e_sample_attr sample_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
u32 chain;
u16 prio;
@@ -82,13 +81,33 @@ struct mlx5_flow_attr {
u8 outer_match_level;
u8 ip_version;
u8 tun_ip_version;
+ int tunnel_id; /* mapped tunnel id */
u32 flags;
+ struct list_head list;
+ struct mlx5e_post_act_handle *post_act_handle;
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
};
};
+enum {
+ MLX5_ATTR_FLAG_VLAN_HANDLED = BIT(0),
+ MLX5_ATTR_FLAG_SLOW_PATH = BIT(1),
+ MLX5_ATTR_FLAG_NO_IN_PORT = BIT(2),
+ MLX5_ATTR_FLAG_SRC_REWRITE = BIT(3),
+ MLX5_ATTR_FLAG_SAMPLE = BIT(4),
+ MLX5_ATTR_FLAG_ACCEPT = BIT(5),
+ MLX5_ATTR_FLAG_CT = BIT(6),
+};
+
+/* Returns true if any of the flags that require skipping further TC/NF processing are set. */
+static inline bool
+mlx5e_tc_attr_flags_skip(u32 attr_flags)
+{
+ return attr_flags & (MLX5_ATTR_FLAG_SLOW_PATH | MLX5_ATTR_FLAG_ACCEPT);
+}
+
struct mlx5_rx_tun_attr {
u16 decap_vport;
union {
@@ -149,8 +168,11 @@ enum {
#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
-int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
-void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
+int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv);
+void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv);
+
+int mlx5e_tc_ht_init(struct rhashtable *tc_ht);
+void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags);
@@ -243,11 +265,8 @@ int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
u32 data);
int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow);
-
-struct mlx5e_tc_flow;
-u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow);
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr);
void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
struct flow_match_basic *match, bool outer,
@@ -289,6 +308,8 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
#else /* CONFIG_MLX5_CLS_ACT */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
+static inline int mlx5e_tc_ht_init(struct rhashtable *tc_ht) { return 0; }
+static inline void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) {}
static inline int
mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{ return -EOPNOTSUPP; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index ee7ecb88adc1..2dc48406cd08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -53,117 +53,6 @@ static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
}
}
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
-{
- int dscp_cp = 0;
-
- if (skb->protocol == htons(ETH_P_IP))
- dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
- else if (skb->protocol == htons(ETH_P_IPV6))
- dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
-
- return priv->dcbx_dp.dscp2prio[dscp_cp];
-}
-#endif
-
-static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
- int up = 0;
-
- if (!netdev_get_num_tc(dev))
- goto return_txq;
-
-#ifdef CONFIG_MLX5_CORE_EN_DCB
- if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
- up = mlx5e_get_dscp_up(priv, skb);
- else
-#endif
- if (skb_vlan_tag_present(skb))
- up = skb_vlan_tag_get_prio(skb);
-
-return_txq:
- return priv->port_ptp_tc2realtxq[up];
-}
-
-static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
- u16 htb_maj_id)
-{
- u16 classid;
-
- if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
- classid = TC_H_MIN(skb->priority);
- else
- classid = READ_ONCE(priv->htb.defcls);
-
- if (!classid)
- return 0;
-
- return mlx5e_get_txq_by_classid(priv, classid);
-}
-
-u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
- int num_tc_x_num_ch;
- int txq_ix;
- int up = 0;
- int ch_ix;
-
- /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
- num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
- if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) {
- struct mlx5e_ptp *ptp_channel;
-
- /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
- u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id);
-
- if (unlikely(htb_maj_id)) {
- txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id);
- if (txq_ix > 0)
- return txq_ix;
- }
-
- ptp_channel = READ_ONCE(priv->channels.ptp);
- if (unlikely(ptp_channel &&
- test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
- mlx5e_use_ptpsq(skb)))
- return mlx5e_select_ptpsq(dev, skb);
-
- txq_ix = netdev_pick_tx(dev, skb, NULL);
- /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
- * If they are selected, switch to regular queues.
- * Driver to select these queues only at mlx5e_select_ptpsq()
- * and mlx5e_select_htb_queue().
- */
- if (unlikely(txq_ix >= num_tc_x_num_ch))
- txq_ix %= num_tc_x_num_ch;
- } else {
- txq_ix = netdev_pick_tx(dev, skb, NULL);
- }
-
- if (!netdev_get_num_tc(dev))
- return txq_ix;
-
-#ifdef CONFIG_MLX5_CORE_EN_DCB
- if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
- up = mlx5e_get_dscp_up(priv, skb);
- else
-#endif
- if (skb_vlan_tag_present(skb))
- up = skb_vlan_tag_get_prio(skb);
-
- /* Normalize any picked txq_ix to [0, num_channels),
- * So we can return a txq_ix that matches the channel and
- * packet UP.
- */
- ch_ix = priv->txq2sq[txq_ix]->ch_ix;
-
- return priv->channel_tc2realtxq[ch_ix][up];
-}
-
static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
{
#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
@@ -544,7 +433,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe *wqe;
u16 pi;
- pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
+ pi = mlx5e_txqsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
net_prefetchw(wqe->data);
@@ -645,7 +534,7 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_tx_skb_update_hwts_flags(skb);
- if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
+ if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) {
/* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
cseg = mlx5e_tx_mpwqe_session_complete(sq);
@@ -691,8 +580,21 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
struct mlx5e_txqsq *sq;
u16 pi;
+ /* All changes to txq2sq are performed in sync with mlx5e_xmit, when the
+ * queue being changed is disabled, and smp_wmb guarantees that the
+ * changes are visible before mlx5e_xmit tries to read from txq2sq. It
+ * guarantees that the value of txq2sq[qid] doesn't change while
+ * mlx5e_xmit is running on queue number qid. smb_wmb is paired with
+ * HARD_TX_LOCK around ndo_start_xmit, which serves as an ACQUIRE.
+ */
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
if (unlikely(!sq)) {
+ /* Two cases when sq can be NULL:
+ * 1. The HTB node is registered, and mlx5e_select_queue
+ * selected its queue ID, but the SQ itself is not yet created.
+ * 2. HTB SQ creation failed. Similar to the previous case, but
+ * the SQ won't be created.
+ */
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 48a45aa54a3c..229728c80233 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -5,7 +5,6 @@
#include <linux/interrupt.h>
#include <linux/notifier.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eq.h>
@@ -439,7 +438,8 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
struct mlx5_eq_table *eq_table;
int i;
- eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
+ eq_table = kvzalloc_node(sizeof(*eq_table), GFP_KERNEL,
+ dev->priv.numa_node);
if (!eq_table)
return -ENOMEM;
@@ -728,7 +728,8 @@ struct mlx5_eq *
mlx5_eq_create_generic(struct mlx5_core_dev *dev,
struct mlx5_eq_param *param)
{
- struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
+ struct mlx5_eq *eq = kvzalloc_node(sizeof(*eq), GFP_KERNEL,
+ dev->priv.numa_node);
int err;
if (!eq)
@@ -888,10 +889,11 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
return ncomp_eqs;
INIT_LIST_HEAD(&table->comp_eqs_list);
nent = comp_eq_depth_devlink_param_get(dev);
+
for (i = 0; i < ncomp_eqs; i++) {
struct mlx5_eq_param param = {};
- eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+ eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node);
if (!eq) {
err = -ENOMEM;
goto clean;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
index 39e948bc1204..a994e71e05c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
@@ -92,6 +92,7 @@ static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
+ flow_act.fg = vport->ingress.offloads.metadata_allmatch_grp;
vport->ingress.offloads.modify_metadata_rule =
mlx5_add_flow_rules(vport->ingress.acl,
NULL, &flow_act, NULL, 0);
@@ -117,6 +118,36 @@ static void esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch *esw,
vport->ingress.offloads.modify_metadata_rule = NULL;
}
+static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *flow_rule;
+ int err = 0;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ flow_act.fg = vport->ingress.offloads.drop_grp;
+ flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ goto out;
+ }
+
+ vport->ingress.offloads.drop_rule = flow_rule;
+out:
+ return err;
+}
+
+static void esw_acl_ingress_src_port_drop_destroy(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (!vport->ingress.offloads.drop_rule)
+ return;
+
+ mlx5_del_flow_rules(vport->ingress.offloads.drop_rule);
+ vport->ingress.offloads.drop_rule = NULL;
+}
+
static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
@@ -154,6 +185,7 @@ static void esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch *esw,
{
esw_acl_ingress_allow_rule_destroy(vport);
esw_acl_ingress_mod_metadata_destroy(esw, vport);
+ esw_acl_ingress_src_port_drop_destroy(esw, vport);
}
static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
@@ -170,10 +202,29 @@ static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
if (!flow_group_in)
return -ENOMEM;
+ if (vport->vport == MLX5_VPORT_UPLINK) {
+ /* This group can hold an FTE to drop all traffic.
+ * Need in case LAG is enabled.
+ */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
+
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ ret = PTR_ERR(g);
+ esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
+ vport->vport, ret);
+ goto drop_err;
+ }
+ vport->ingress.offloads.drop_grp = g;
+ flow_index++;
+ }
+
if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
/* This group is to hold FTE to match untagged packets when prio_tag
* is enabled.
*/
+ memset(flow_group_in, 0, inlen);
match_criteria = MLX5_ADDR_OF(create_flow_group_in,
flow_group_in, match_criteria);
MLX5_SET(create_flow_group_in, flow_group_in,
@@ -221,6 +272,11 @@ metadata_err:
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
}
prio_tag_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.offloads.drop_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
+ vport->ingress.offloads.drop_grp = NULL;
+ }
+drop_err:
kvfree(flow_group_in);
return ret;
}
@@ -236,6 +292,11 @@ static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
}
+
+ if (vport->ingress.offloads.drop_grp) {
+ mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
+ vport->ingress.offloads.drop_grp = NULL;
+ }
}
int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
@@ -252,6 +313,8 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
num_ftes++;
+ if (vport->vport == MLX5_VPORT_UPLINK)
+ num_ftes++;
if (esw_acl_ingress_prio_tag_enabled(esw, vport))
num_ftes++;
@@ -320,3 +383,27 @@ out:
vport->metadata = vport->default_metadata;
return err;
}
+
+int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ if (IS_ERR(vport)) {
+ esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
+ return PTR_ERR(vport);
+ }
+
+ return esw_acl_ingress_src_port_drop_create(esw, vport);
+}
+
+void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ if (WARN_ON_ONCE(IS_ERR(vport))) {
+ esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
+ return;
+ }
+
+ esw_acl_ingress_src_port_drop_destroy(esw, vport);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
index c57869b93d60..11d3d3978848 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
@@ -6,6 +6,7 @@
#include "eswitch.h"
+#ifdef CONFIG_MLX5_ESWITCH
/* Eswitch acl egress external APIs */
int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport);
@@ -25,5 +26,19 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vpor
void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num,
u32 metadata);
+void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num);
+#else /* CONFIG_MLX5_ESWITCH */
+static void
+mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw,
+ u16 vport_num)
+{}
+
+static int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw,
+ u16 vport_num)
+{
+ return 0;
+}
+#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_ACL_OFLD_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
index c275fe028b6d..0abef71cb839 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
@@ -86,7 +86,7 @@ mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
mlx5_eswitch_is_vf_vport(esw, vport_num) &&
esw->dev == dest_mdev &&
attr->ip_version &&
- attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
+ attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE;
}
u16
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index ead5e8acc8be..bac5160837c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -113,8 +113,11 @@ struct vport_ingress {
* packet with metadata.
*/
struct mlx5_flow_group *metadata_allmatch_grp;
+ /* Optional group to add a drop all rule */
+ struct mlx5_flow_group *drop_grp;
struct mlx5_modify_hdr *modify_metadata;
struct mlx5_flow_handle *modify_metadata_rule;
+ struct mlx5_flow_handle *drop_rule;
} offloads;
};
@@ -448,22 +451,6 @@ enum {
MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2),
};
-enum {
- MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
- MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
- MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
- MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3),
- MLX5_ESW_ATTR_FLAG_SAMPLE = BIT(4),
- MLX5_ESW_ATTR_FLAG_ACCEPT = BIT(5),
-};
-
-/* Returns true if any of the flags that require skipping further TC/NF processing are set. */
-static inline bool
-mlx5_esw_attr_flags_skip(u32 attr_flags)
-{
- return attr_flags & (MLX5_ESW_ATTR_FLAG_SLOW_PATH | MLX5_ESW_ATTR_FLAG_ACCEPT);
-}
-
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_core_dev *in_mdev;
@@ -487,6 +474,7 @@ struct mlx5_esw_flow_attr {
int src_port_rewrite_act_id;
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5_rx_tun_attr *rx_tun_attr;
+ struct ethhdr eth;
struct mlx5_pkt_reformat *decap_pkt_reformat;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index cfcd72bad9af..3f63df127091 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -180,7 +180,7 @@ esw_setup_decap_indir(struct mlx5_eswitch *esw,
{
struct mlx5_flow_table *ft;
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP;
ft = mlx5_esw_indir_table_get(esw, attr, spec,
@@ -201,12 +201,12 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
static int
esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
- struct mlx5_flow_attr *attr,
+ u32 sampler_id,
int i)
{
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
- dest[i].sampler_id = attr->sample_attr->sampler_id;
+ dest[i].sampler_id = sampler_id;
return 0;
}
@@ -297,7 +297,7 @@ esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
int err;
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP;
/* flow steering cannot handle more than one dest with the same ft
@@ -364,7 +364,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
int j, err;
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP;
for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
@@ -463,15 +463,16 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
esw_src_port_rewrite_supported(esw))
- attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
+ attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE;
- if (attr->flags & MLX5_ESW_ATTR_FLAG_SAMPLE) {
- esw_setup_sampler_dest(dest, flow_act, attr, *i);
+ if (attr->flags & MLX5_ATTR_FLAG_SAMPLE &&
+ !(attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)) {
+ esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
(*i)++;
} else if (attr->dest_ft) {
esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
(*i)++;
- } else if (mlx5_esw_attr_flags_skip(attr->flags)) {
+ } else if (mlx5e_tc_attr_flags_skip(attr->flags)) {
esw_setup_slow_path_dest(dest, flow_act, chains, *i);
(*i)++;
} else if (attr->dest_chain) {
@@ -498,7 +499,7 @@ esw_cleanup_dests(struct mlx5_eswitch *esw,
if (attr->dest_ft) {
esw_cleanup_decap_indir(esw, attr);
- } else if (!mlx5_esw_attr_flags_skip(attr->flags)) {
+ } else if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
if (attr->dest_chain)
esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
else if (esw_is_indir_table(esw, attr))
@@ -589,7 +590,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
else
fdb = attr->ft;
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
+ if (!(attr->flags & MLX5_ATTR_FLAG_NO_IN_PORT))
mlx5_eswitch_set_rule_source_port(esw, spec, attr,
esw_attr->in_mdev->priv.eswitch,
esw_attr->in_rep->vport);
@@ -721,7 +722,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
mlx5_del_flow_rules(rule);
- if (!mlx5_esw_attr_flags_skip(attr->flags)) {
+ if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
/* unref the term table */
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
if (esw_attr->dests[i].termtbl)
@@ -863,7 +864,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (err)
goto unlock;
- attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
+ attr->flags &= ~MLX5_ATTR_FLAG_VLAN_HANDLED;
vport = esw_vlan_action_get_vport(esw_attr, push, pop);
@@ -871,7 +872,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
/* tracks VF --> wire rules without vlan push action */
if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
vport->vlan_refcount++;
- attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
+ attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED;
}
goto unlock;
@@ -902,7 +903,7 @@ skip_set_push:
}
out:
if (!err)
- attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
+ attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED;
unlock:
mutex_unlock(&esw->state_lock);
return err;
@@ -921,7 +922,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return 0;
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
+ if (!(attr->flags & MLX5_ATTR_FLAG_VLAN_HANDLED))
return 0;
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
@@ -2378,60 +2379,6 @@ void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
}
-static int esw_set_uplink_slave_ingress_root(struct mlx5_core_dev *master,
- struct mlx5_core_dev *slave)
-{
- u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
- u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
- struct mlx5_eswitch *esw;
- struct mlx5_flow_root_namespace *root;
- struct mlx5_flow_namespace *ns;
- struct mlx5_vport *vport;
- int err;
-
- MLX5_SET(set_flow_table_root_in, in, opcode,
- MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
- MLX5_SET(set_flow_table_root_in, in, table_type, FS_FT_ESW_INGRESS_ACL);
- MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
- MLX5_SET(set_flow_table_root_in, in, vport_number, MLX5_VPORT_UPLINK);
-
- if (master) {
- esw = master->priv.eswitch;
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
- MLX5_SET(set_flow_table_root_in, in, table_of_other_vport, 1);
- MLX5_SET(set_flow_table_root_in, in, table_vport_number,
- MLX5_VPORT_UPLINK);
-
- ns = mlx5_get_flow_vport_acl_namespace(master,
- MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- vport->index);
- root = find_root(&ns->node);
- mutex_lock(&root->chain_lock);
-
- MLX5_SET(set_flow_table_root_in, in,
- table_eswitch_owner_vhca_id_valid, 1);
- MLX5_SET(set_flow_table_root_in, in,
- table_eswitch_owner_vhca_id,
- MLX5_CAP_GEN(master, vhca_id));
- MLX5_SET(set_flow_table_root_in, in, table_id,
- root->root_ft->id);
- } else {
- esw = slave->priv.eswitch;
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
- ns = mlx5_get_flow_vport_acl_namespace(slave,
- MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- vport->index);
- root = find_root(&ns->node);
- mutex_lock(&root->chain_lock);
- MLX5_SET(set_flow_table_root_in, in, table_id, root->root_ft->id);
- }
-
- err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
- mutex_unlock(&root->chain_lock);
-
- return err;
-}
-
static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
struct mlx5_core_dev *slave)
{
@@ -2613,15 +2560,10 @@ int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
{
int err;
- err = esw_set_uplink_slave_ingress_root(master_esw->dev,
- slave_esw->dev);
- if (err)
- return -EINVAL;
-
err = esw_set_slave_root_fdb(master_esw->dev,
slave_esw->dev);
if (err)
- goto err_fdb;
+ return err;
err = esw_set_master_egress_rule(master_esw->dev,
slave_esw->dev);
@@ -2633,9 +2575,6 @@ int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
err_acl:
esw_set_slave_root_fdb(NULL, slave_esw->dev);
-err_fdb:
- esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev);
-
return err;
}
@@ -2644,7 +2583,6 @@ void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
{
esw_unset_master_egress_rule(master_esw->dev);
esw_set_slave_root_fdb(NULL, slave_esw->dev);
- esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev);
}
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
@@ -2841,6 +2779,19 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
return true;
}
+#define MLX5_ESW_METADATA_RSVD_UPLINK 1
+
+/* Share the same metadata for uplink's. This is fine because:
+ * (a) In shared FDB mode (LAG) both uplink's are treated the
+ * same and tagged with the same metadata.
+ * (b) In non shared FDB mode, packets from physical port0
+ * cannot hit eswitch of PF1 and vice versa.
+ */
+static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw)
+{
+ return MLX5_ESW_METADATA_RSVD_UPLINK;
+}
+
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
{
u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
@@ -2855,8 +2806,10 @@ u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
return 0;
/* Metadata is 4 bits of PFNUM and 12 bits of unique id */
- /* Use only non-zero vport_id (1-4095) for all PF's */
- id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL);
+ /* Use only non-zero vport_id (2-4095) for all PF's */
+ id = ida_alloc_range(&esw->offloads.vport_metadata_ida,
+ MLX5_ESW_METADATA_RSVD_UPLINK + 1,
+ vport_end_ida, GFP_KERNEL);
if (id < 0)
return 0;
id = (pf_num << ESW_VPORT_BITS) | id;
@@ -2874,7 +2827,11 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
+ if (vport->vport == MLX5_VPORT_UPLINK)
+ vport->default_metadata = mlx5_esw_match_metadata_reserved(esw);
+ else
+ vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
+
vport->metadata = vport->default_metadata;
return vport->metadata ? 0 : -ENOSPC;
}
@@ -2885,6 +2842,9 @@ static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
if (!vport->default_metadata)
return;
+ if (vport->vport == MLX5_VPORT_UPLINK)
+ return;
+
WARN_ON(vport->metadata != vport->default_metadata);
mlx5_esw_match_metadata_free(esw, vport->default_metadata);
}
@@ -3377,6 +3337,27 @@ static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
!mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
}
+/* FIXME: devl_unlock() followed by devl_lock() inside driver callback
+ * is never correct and prone to races. It's a transitional workaround,
+ * never repeat this pattern.
+ *
+ * This code MUST be fixed before removing devlink_mutex as it is safe
+ * to do only because of that mutex.
+ */
+static void mlx5_eswtich_mode_callback_enter(struct devlink *devlink,
+ struct mlx5_eswitch *esw)
+{
+ devl_unlock(devlink);
+ down_write(&esw->mode_lock);
+}
+
+static void mlx5_eswtich_mode_callback_exit(struct devlink *devlink,
+ struct mlx5_eswitch *esw)
+{
+ up_write(&esw->mode_lock);
+ devl_lock(devlink);
+}
+
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
@@ -3391,6 +3372,15 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
+ /* FIXME: devl_unlock() followed by devl_lock() inside driver callback
+ * is never correct and prone to races. It's a transitional workaround,
+ * never repeat this pattern.
+ *
+ * This code MUST be fixed before removing devlink_mutex as it is safe
+ * to do only because of that mutex.
+ */
+ devl_unlock(devlink);
+
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
@@ -3421,6 +3411,7 @@ unlock:
mlx5_esw_unlock(esw);
enable_lag:
mlx5_lag_enable_change(esw->dev);
+ devl_lock(devlink);
return err;
}
@@ -3433,14 +3424,14 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_enter(devlink, esw);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
err = esw_mode_to_devlink(esw->mode, mode);
unlock:
- up_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_exit(devlink, esw);
return err;
}
@@ -3487,7 +3478,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_enter(devlink, esw);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto out;
@@ -3524,11 +3515,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
goto out;
esw->offloads.inline_mode = mlx5_mode;
- up_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_exit(devlink, esw);
return 0;
out:
- up_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_exit(devlink, esw);
return err;
}
@@ -3541,14 +3532,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_enter(devlink, esw);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
unlock:
- up_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_exit(devlink, esw);
return err;
}
@@ -3564,7 +3555,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_enter(devlink, esw);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
@@ -3610,7 +3601,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
}
unlock:
- up_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_exit(devlink, esw);
return err;
}
@@ -3624,15 +3615,14 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
if (IS_ERR(esw))
return PTR_ERR(esw);
-
- down_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_enter(devlink, esw);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
*encap = esw->offloads.encap;
unlock:
- up_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_exit(devlink, esw);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index 182306bbefaa..ee568bf34ae2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -219,12 +219,14 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) ||
- mlx5_esw_attr_flags_skip(attr->flags) ||
+ mlx5e_tc_attr_flags_skip(attr->flags) ||
(!mlx5_eswitch_offload_is_uplink_port(esw, spec) && !esw_attr->int_port))
return false;
/* push vlan on RX */
- if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)
+ if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
+ !(mlx5_fs_get_capabilities(esw->dev, MLX5_FLOW_NAMESPACE_FDB) &
+ MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX))
return true;
/* hairpin */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index 2ce4241459ce..39c03dcbd196 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index dafe341358c7..a0ac17c3f12f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -152,6 +152,12 @@ static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
return 0;
}
+static u32 mlx5_cmd_stub_get_capabilities(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type)
+{
+ return 0;
+}
+
static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master,
struct mlx5_core_dev *slave,
bool ft_id_valid,
@@ -971,6 +977,12 @@ static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
return err ? err : MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
}
+static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type)
+{
+ return 0;
+}
+
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.create_flow_table = mlx5_cmd_create_flow_table,
.destroy_flow_table = mlx5_cmd_destroy_flow_table,
@@ -990,6 +1002,7 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
+ .get_capabilities = mlx5_cmd_get_capabilities,
};
static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
@@ -1011,6 +1024,7 @@ static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
+ .get_capabilities = mlx5_cmd_stub_get_capabilities,
};
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 220ec632d35a..274004e80f03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -101,6 +101,9 @@ struct mlx5_flow_cmds {
u16 format_id, u32 *match_mask);
int (*destroy_match_definer)(struct mlx5_flow_root_namespace *ns,
int definer_id);
+
+ u32 (*get_capabilities)(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type);
};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 537c82b9aa53..816d991f7621 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1696,6 +1696,7 @@ static void free_match_list(struct match_list *head, bool ft_locked)
static int build_match_list(struct match_list *match_head,
struct mlx5_flow_table *ft,
const struct mlx5_flow_spec *spec,
+ struct mlx5_flow_group *fg,
bool ft_locked)
{
struct rhlist_head *tmp, *list;
@@ -1710,6 +1711,9 @@ static int build_match_list(struct match_list *match_head,
rhl_for_each_entry_rcu(g, tmp, list, hash) {
struct match_list *curr_match;
+ if (fg && fg != g)
+ continue;
+
if (unlikely(!tree_get_node(&g->node)))
continue;
@@ -1889,6 +1893,9 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (!check_valid_spec(spec))
return ERR_PTR(-EINVAL);
+ if (flow_act->fg && ft->autogroup.active)
+ return ERR_PTR(-EINVAL);
+
for (i = 0; i < dest_num; i++) {
if (!dest_is_valid(&dest[i], flow_act, ft))
return ERR_PTR(-EINVAL);
@@ -1898,7 +1905,7 @@ search_again_locked:
version = atomic_read(&ft->node.version);
/* Collect all fgs which has a matching match_criteria */
- err = build_match_list(&match_head, ft, spec, take_write);
+ err = build_match_list(&match_head, ft, spec, flow_act->fg, take_write);
if (err) {
if (take_write)
up_write_ref_node(&ft->node, false);
@@ -3042,6 +3049,22 @@ void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
steering->esw_ingress_root_ns = NULL;
}
+u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
+{
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_namespace *ns;
+
+ ns = mlx5_get_flow_namespace(dev, type);
+ if (!ns)
+ return 0;
+
+ root = find_root(&ns->node);
+ if (!root)
+ return 0;
+
+ return root->cmds->get_capabilities(root, root->table_type);
+}
+
static int init_egress_root_ns(struct mlx5_flow_steering *steering)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 5469b08d635f..c488a7c5b07e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -120,6 +120,11 @@ enum mlx5_flow_steering_mode {
MLX5_FLOW_STEERING_MODE_SMFS
};
+enum mlx5_flow_steering_capabilty {
+ MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX = 1UL << 0,
+ MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX = 1UL << 1,
+};
+
struct mlx5_flow_steering {
struct mlx5_core_dev *dev;
enum mlx5_flow_steering_mode mode;
@@ -301,6 +306,8 @@ void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports);
void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
+u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type);
+
struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 2d8406fab844..614687e0e3d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -32,7 +32,6 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
-#include <linux/module.h>
#include "mlx5_core.h"
#include "../../mlxfw/mlxfw.h"
#include "lib/tout.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 84dbe46d5ede..4aa22dce9b77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -57,7 +57,8 @@ static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level,
return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MFRL, 0, 1);
}
-static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
+static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
+ u8 *reset_type, u8 *reset_state)
{
u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
@@ -71,25 +72,67 @@ static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *r
*reset_level = MLX5_GET(mfrl_reg, out, reset_level);
if (reset_type)
*reset_type = MLX5_GET(mfrl_reg, out, reset_type);
+ if (reset_state)
+ *reset_state = MLX5_GET(mfrl_reg, out, reset_state);
return 0;
}
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
{
- return mlx5_reg_mfrl_query(dev, reset_level, reset_type);
+ return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL);
}
-int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel)
+static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
+ struct netlink_ext_ack *extack)
+{
+ u8 reset_state;
+
+ if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state))
+ goto out;
+
+ switch (reset_state) {
+ case MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION:
+ case MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS:
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset was already triggered");
+ return -EBUSY;
+ case MLX5_MFRL_REG_RESET_STATE_TIMEOUT:
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset got timeout");
+ return -ETIMEDOUT;
+ case MLX5_MFRL_REG_RESET_STATE_NACK:
+ NL_SET_ERR_MSG_MOD(extack, "One of the hosts disabled reset");
+ return -EPERM;
+ }
+
+out:
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset failed");
+ return -EIO;
+}
+
+int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
+ struct netlink_ext_ack *extack)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
int err;
set_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
- err = mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, reset_type_sel, 0, true);
- if (err)
- clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
- return err;
+
+ MLX5_SET(mfrl_reg, in, reset_level, MLX5_MFRL_REG_RESET_LEVEL3);
+ MLX5_SET(mfrl_reg, in, rst_type_sel, reset_type_sel);
+ MLX5_SET(mfrl_reg, in, pci_sync_for_fw_update_start, 1);
+ err = mlx5_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MFRL, 0, 1, false);
+ if (!err)
+ return 0;
+
+ clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
+ if (err == -EREMOTEIO && MLX5_CAP_MCAM_FEATURE(dev, reset_state))
+ return mlx5_fw_reset_get_reset_state_err(dev, extack);
+
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset command failed");
+ return mlx5_cmd_check(dev, err, in, out);
}
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
index 7761ee5fc7d0..694fc7cb2684 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
@@ -9,7 +9,8 @@
void mlx5_fw_reset_enable_remote_dev_reset_set(struct mlx5_core_dev *dev, bool enable);
bool mlx5_fw_reset_enable_remote_dev_reset_get(struct mlx5_core_dev *dev);
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type);
-int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel);
+int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
+ struct netlink_ext_ack *extack);
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 737df402c927..659021c31cbd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 4ddf6b330a44..6cad3b72c133 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -31,15 +31,22 @@
*/
#include <linux/netdevice.h>
+#include <net/bonding.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/vport.h>
#include "lib/devcom.h"
#include "mlx5_core.h"
#include "eswitch.h"
+#include "esw/acl/ofld.h"
#include "lag.h"
#include "mp.h"
+enum {
+ MLX5_LAG_EGRESS_PORT_1 = 1,
+ MLX5_LAG_EGRESS_PORT_2,
+};
+
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
* under it).
@@ -193,15 +200,71 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled &&
tracker->netdev_state[MLX5_LAG_P2].link_up;
- *port1 = 1;
- *port2 = 2;
+ *port1 = MLX5_LAG_EGRESS_PORT_1;
+ *port2 = MLX5_LAG_EGRESS_PORT_2;
if ((!p1en && !p2en) || (p1en && p2en))
return;
if (p1en)
- *port2 = 1;
+ *port2 = MLX5_LAG_EGRESS_PORT_1;
+ else
+ *port1 = MLX5_LAG_EGRESS_PORT_2;
+}
+
+static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
+{
+ return ldev->pf[MLX5_LAG_P1].has_drop || ldev->pf[MLX5_LAG_P2].has_drop;
+}
+
+static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
+{
+ int i;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ if (!ldev->pf[i].has_drop)
+ continue;
+
+ mlx5_esw_acl_ingress_vport_drop_rule_destroy(ldev->pf[i].dev->priv.eswitch,
+ MLX5_VPORT_UPLINK);
+ ldev->pf[i].has_drop = false;
+ }
+}
+
+static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
+ struct mlx5_core_dev *inactive;
+ u8 v2p_port1, v2p_port2;
+ int inactive_idx;
+ int err;
+
+ /* First delete the current drop rule so there won't be any dropped
+ * packets
+ */
+ mlx5_lag_drop_rule_cleanup(ldev);
+
+ if (!ldev->tracker.has_inactive)
+ return;
+
+ mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1, &v2p_port2);
+
+ if (v2p_port1 == MLX5_LAG_EGRESS_PORT_1) {
+ inactive = dev1;
+ inactive_idx = MLX5_LAG_P2;
+ } else {
+ inactive = dev0;
+ inactive_idx = MLX5_LAG_P1;
+ }
+
+ err = mlx5_esw_acl_ingress_vport_drop_rule_create(inactive->priv.eswitch,
+ MLX5_VPORT_UPLINK);
+ if (!err)
+ ldev->pf[inactive_idx].has_drop = true;
else
- *port1 = 2;
+ mlx5_core_err(inactive,
+ "Failed to create lag drop rule, error: %d", err);
}
static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2)
@@ -238,6 +301,10 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
ldev->v2p_map[MLX5_LAG_P1],
ldev->v2p_map[MLX5_LAG_P2]);
}
+
+ if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
+ !(ldev->flags & MLX5_LAG_FLAG_ROCE))
+ mlx5_lag_drop_rule_setup(ldev, tracker);
}
static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
@@ -339,6 +406,10 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
return err;
}
+ if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
+ !roce_lag)
+ mlx5_lag_drop_rule_setup(ldev, tracker);
+
ldev->flags |= flags;
ldev->shared_fdb = shared_fdb;
return 0;
@@ -347,6 +418,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
u8 flags = ldev->flags;
@@ -356,8 +428,8 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
mlx5_lag_mp_reset(ldev);
if (ldev->shared_fdb) {
- mlx5_eswitch_offloads_destroy_single_fdb(ldev->pf[MLX5_LAG_P1].dev->priv.eswitch,
- ldev->pf[MLX5_LAG_P2].dev->priv.eswitch);
+ mlx5_eswitch_offloads_destroy_single_fdb(dev0->priv.eswitch,
+ dev1->priv.eswitch);
ldev->shared_fdb = false;
}
@@ -372,11 +444,15 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
"Failed to deactivate VF LAG; driver restart required\n"
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
}
- } else if (flags & MLX5_LAG_FLAG_HASH_BASED) {
- mlx5_lag_port_sel_destroy(ldev);
+ return err;
}
- return err;
+ if (flags & MLX5_LAG_FLAG_HASH_BASED)
+ mlx5_lag_port_sel_destroy(ldev);
+ if (mlx5_lag_has_drop_rule(ldev))
+ mlx5_lag_drop_rule_cleanup(ldev);
+
+ return 0;
}
static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
@@ -613,6 +689,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
struct net_device *upper = info->upper_dev, *ndev_tmp;
struct netdev_lag_upper_info *lag_upper_info = NULL;
bool is_bonded, is_in_lag, mode_supported;
+ bool has_inactive = 0;
+ struct slave *slave;
int bond_status = 0;
int num_slaves = 0;
int changed = 0;
@@ -632,8 +710,12 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
- if (idx >= 0)
+ if (idx >= 0) {
+ slave = bond_slave_get_rcu(ndev_tmp);
+ if (slave)
+ has_inactive |= bond_is_slave_inactive(slave);
bond_status |= (1 << idx);
+ }
num_slaves++;
}
@@ -648,6 +730,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
tracker->hash_type = lag_upper_info->hash_type;
}
+ tracker->has_inactive = has_inactive;
/* Determine bonding status:
* A device is considered bonded if both its physical ports are slaves
* of the same lag master, and only them.
@@ -704,6 +787,38 @@ static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
return 1;
}
+static int mlx5_handle_changeinfodata_event(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker,
+ struct net_device *ndev)
+{
+ struct net_device *ndev_tmp;
+ struct slave *slave;
+ bool has_inactive = 0;
+ int idx;
+
+ if (!netif_is_lag_master(ndev))
+ return 0;
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(ndev, ndev_tmp) {
+ idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
+ if (idx < 0)
+ continue;
+
+ slave = bond_slave_get_rcu(ndev_tmp);
+ if (slave)
+ has_inactive |= bond_is_slave_inactive(slave);
+ }
+ rcu_read_unlock();
+
+ if (tracker->has_inactive == has_inactive)
+ return 0;
+
+ tracker->has_inactive = has_inactive;
+
+ return 1;
+}
+
static int mlx5_lag_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -712,7 +827,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
struct mlx5_lag *ldev;
int changed = 0;
- if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
+ if (event != NETDEV_CHANGEUPPER &&
+ event != NETDEV_CHANGELOWERSTATE &&
+ event != NETDEV_CHANGEINFODATA)
return NOTIFY_DONE;
ldev = container_of(this, struct mlx5_lag, nb);
@@ -728,6 +845,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
ndev, ptr);
break;
+ case NETDEV_CHANGEINFODATA:
+ changed = mlx5_handle_changeinfodata_event(ldev, &tracker, ndev);
+ break;
}
ldev->tracker = tracker;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index e5d231c31b54..cbf9a9003e55 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -28,6 +28,7 @@ enum {
struct lag_func {
struct mlx5_core_dev *dev;
struct net_device *netdev;
+ bool has_drop;
};
/* Used for collection of netdev event info. */
@@ -35,6 +36,7 @@ struct lag_tracker {
enum netdev_lag_tx_type tx_type;
struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
unsigned int is_bonded:1;
+ unsigned int has_inactive:1;
enum netdev_lag_hash hash_type;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index 1ca01a5b6cdd..4a6ec15ef046 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -50,7 +50,7 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
enum mlx5_lag_port_affinity port)
{
- struct lag_tracker tracker;
+ struct lag_tracker tracker = {};
if (!__mlx5_lag_is_multipath(ldev))
return;
@@ -126,6 +126,10 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
return;
}
+ /* Handle multipath entry with lower priority value */
+ if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority)
+ return;
+
/* Handle add/replace event */
nhs = fib_info_num_path(fi);
if (nhs == 1) {
@@ -135,12 +139,13 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
if (i < 0)
- i = MLX5_LAG_NORMAL_AFFINITY;
- else
- ++i;
+ return;
+ i++;
mlx5_lag_set_port_affinity(ldev, i);
}
+
+ mp->mfi = fi;
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index 1e8ec4f236b2..df58cba37930 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -121,9 +121,6 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{
- if (!mlx5_chains_prios_supported(chains))
- return 1;
-
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h
index 4bad6a5fde56..f240ffe5116c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h
@@ -92,13 +92,6 @@ mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca,
static inline void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent)
{
}
-
-static inline int
-mlx5_hv_vhca_write_agent(struct mlx5_hv_vhca_agent *agent,
- void *buf, int len)
-{
- return 0;
-}
#endif
#endif /* __LIB_HV_VHCA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
index e042e0924079..4571c56ec3c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/port.h>
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c
new file mode 100644
index 000000000000..9b8c051ccf65
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#include <linux/kernel.h>
+#include <linux/mlx5/driver.h>
+
+#include "smfs.h"
+
+struct mlx5dr_matcher *
+mlx5_smfs_matcher_create(struct mlx5dr_table *table, u32 priority, struct mlx5_flow_spec *spec)
+{
+ struct mlx5dr_match_parameters matcher_mask = {};
+
+ matcher_mask.match_buf = (u64 *)&spec->match_criteria;
+ matcher_mask.match_sz = DR_SZ_MATCH_PARAM;
+
+ return mlx5dr_matcher_create(table, priority, spec->match_criteria_enable, &matcher_mask);
+}
+
+void
+mlx5_smfs_matcher_destroy(struct mlx5dr_matcher *matcher)
+{
+ mlx5dr_matcher_destroy(matcher);
+}
+
+struct mlx5dr_table *
+mlx5_smfs_table_get_from_fs_ft(struct mlx5_flow_table *ft)
+{
+ return mlx5dr_table_get_from_fs_ft(ft);
+}
+
+struct mlx5dr_action *
+mlx5_smfs_action_create_dest_table(struct mlx5dr_table *table)
+{
+ return mlx5dr_action_create_dest_table(table);
+}
+
+struct mlx5dr_action *
+mlx5_smfs_action_create_flow_counter(u32 counter_id)
+{
+ return mlx5dr_action_create_flow_counter(counter_id);
+}
+
+void
+mlx5_smfs_action_destroy(struct mlx5dr_action *action)
+{
+ mlx5dr_action_destroy(action);
+}
+
+struct mlx5dr_rule *
+mlx5_smfs_rule_create(struct mlx5dr_matcher *matcher, struct mlx5_flow_spec *spec,
+ size_t num_actions, struct mlx5dr_action *actions[],
+ u32 flow_source)
+{
+ struct mlx5dr_match_parameters value = {};
+
+ value.match_buf = (u64 *)spec->match_value;
+ value.match_sz = DR_SZ_MATCH_PARAM;
+
+ return mlx5dr_rule_create(matcher, &value, num_actions, actions, flow_source);
+}
+
+void
+mlx5_smfs_rule_destroy(struct mlx5dr_rule *rule)
+{
+ mlx5dr_rule_destroy(rule);
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
new file mode 100644
index 000000000000..452d0df339ac
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_LIB_SMFS_H__
+#define __MLX5_LIB_SMFS_H__
+
+#include "steering/mlx5dr.h"
+#include "steering/dr_types.h"
+
+struct mlx5dr_matcher *
+mlx5_smfs_matcher_create(struct mlx5dr_table *table, u32 priority, struct mlx5_flow_spec *spec);
+
+void
+mlx5_smfs_matcher_destroy(struct mlx5dr_matcher *matcher);
+
+struct mlx5dr_table *
+mlx5_smfs_table_get_from_fs_ft(struct mlx5_flow_table *ft);
+
+struct mlx5dr_action *
+mlx5_smfs_action_create_dest_table(struct mlx5dr_table *table);
+
+struct mlx5dr_action *
+mlx5_smfs_action_create_flow_counter(u32 counter_id);
+
+void
+mlx5_smfs_action_destroy(struct mlx5dr_action *action);
+
+struct mlx5dr_rule *
+mlx5_smfs_rule_create(struct mlx5dr_matcher *matcher, struct mlx5_flow_spec *spec,
+ size_t num_actions, struct mlx5dr_action *actions[],
+ u32 flow_source);
+
+void
+mlx5_smfs_rule_destroy(struct mlx5dr_rule *rule);
+
+#endif /* __MLX5_LIB_SMFS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index e3b0a131c3e1..d55e15c1f380 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/refcount.h>
#include <linux/mlx5/driver.h>
#include <net/vxlan.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index bba72b220cc3..2589e39eb9c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -736,10 +736,9 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out);
if (err) {
- u32 syndrome;
- u8 status;
+ u32 syndrome = MLX5_GET(query_issi_out, query_out, syndrome);
+ u8 status = MLX5_GET(query_issi_out, query_out, status);
- mlx5_cmd_mbox_status(query_out, &status, &syndrome);
if (!status || syndrome == MLX5_DRIVER_SYND) {
mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
err, status, syndrome);
@@ -1488,8 +1487,8 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
INIT_LIST_HEAD(&priv->pgdir_list);
priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
- priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
- mlx5_debugfs_root);
+ priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device),
+ mlx5_debugfs_root);
INIT_LIST_HEAD(&priv->traps);
err = mlx5_tout_init(dev);
@@ -1525,7 +1524,7 @@ err_pagealloc_init:
err_health_init:
mlx5_tout_cleanup(dev);
err_timeout_init:
- debugfs_remove(dev->priv.dbg_root);
+ debugfs_remove(dev->priv.dbg.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
mutex_destroy(&priv->bfregs.wc_head.lock);
@@ -1543,7 +1542,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
mlx5_tout_cleanup(dev);
- debugfs_remove_recursive(dev->priv.dbg_root);
+ debugfs_remove_recursive(dev->priv.dbg.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
mutex_destroy(&priv->bfregs.wc_head.lock);
@@ -1620,6 +1619,7 @@ static void remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(dev);
devlink_unregister(devlink);
+ mlx5_sriov_disable(pdev);
mlx5_crdump_disable(dev);
mlx5_drain_health_wq(dev);
mlx5_uninit_one(dev);
@@ -1882,6 +1882,50 @@ static struct pci_driver mlx5_core_driver = {
.sriov_set_msix_vec_count = mlx5_core_sriov_set_msix_vec_count,
};
+/**
+ * mlx5_vf_get_core_dev - Get the mlx5 core device from a given VF PCI device if
+ * mlx5_core is its driver.
+ * @pdev: The associated PCI device.
+ *
+ * Upon return the interface state lock stay held to let caller uses it safely.
+ * Caller must ensure to use the returned mlx5 device for a narrow window
+ * and put it back with mlx5_vf_put_core_dev() immediately once usage was over.
+ *
+ * Return: Pointer to the associated mlx5_core_dev or NULL.
+ */
+struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev)
+ __acquires(&mdev->intf_state_mutex)
+{
+ struct mlx5_core_dev *mdev;
+
+ mdev = pci_iov_get_pf_drvdata(pdev, &mlx5_core_driver);
+ if (IS_ERR(mdev))
+ return NULL;
+
+ mutex_lock(&mdev->intf_state_mutex);
+ if (!test_bit(MLX5_INTERFACE_STATE_UP, &mdev->intf_state)) {
+ mutex_unlock(&mdev->intf_state_mutex);
+ return NULL;
+ }
+
+ return mdev;
+}
+EXPORT_SYMBOL(mlx5_vf_get_core_dev);
+
+/**
+ * mlx5_vf_put_core_dev - Put the mlx5 core device back.
+ * @mdev: The mlx5 core device.
+ *
+ * Upon return the interface state lock is unlocked and caller should not
+ * access the mdev any more.
+ */
+void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev)
+ __releases(&mdev->intf_state_mutex)
+{
+ mutex_unlock(&mdev->intf_state_mutex);
+}
+EXPORT_SYMBOL(mlx5_vf_put_core_dev);
+
static void mlx5_core_verify_params(void)
{
if (prof_sel >= ARRAY_SIZE(profile)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
index e019d68062d8..495cca58dccc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 6f8baa0f2a73..a9b2d6ead542 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -164,6 +164,7 @@ void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
int mlx5_sriov_attach(struct mlx5_core_dev *dev);
void mlx5_sriov_detach(struct mlx5_core_dev *dev);
int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
+void mlx5_sriov_disable(struct pci_dev *pdev);
int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
@@ -176,7 +177,6 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id);
int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages);
-void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
void mlx5_cmd_flush(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index f099a087400e..9d735c343a3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index f6b5451328fc..ec76a8b1acc1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -32,7 +32,6 @@
#include <linux/highmem.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mlx5/driver.h>
#include <linux/xarray.h>
@@ -327,11 +326,12 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
}
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
- int notify_fail, bool ec_function)
+ int event, bool ec_function)
{
u32 function = get_function(func_id, ec_function);
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
+ int notify_fail = event;
u64 addr;
int err;
u32 *in;
@@ -351,8 +351,10 @@ retry:
if (err) {
if (err == -ENOMEM)
err = alloc_system_page(dev, function);
- if (err)
+ if (err) {
+ dev->priv.fw_pages_alloc_failed += (npages - i);
goto out_4k;
+ }
goto retry;
}
@@ -365,11 +367,20 @@ retry:
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_do(dev, in, inlen, out, sizeof(out));
+ if (err == -EREMOTEIO) {
+ notify_fail = 0;
+ /* if triggered by FW and failed by FW ignore */
+ if (event) {
+ err = 0;
+ goto out_dropped;
+ }
+ }
if (err) {
+ err = mlx5_cmd_check(dev, err, in, out);
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
- goto out_4k;
+ goto out_dropped;
}
dev->priv.fw_pages += npages;
@@ -384,6 +395,8 @@ retry:
kvfree(in);
return 0;
+out_dropped:
+ dev->priv.give_pages_dropped += npages;
out_4k:
for (i--; i >= 0; i--)
free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
@@ -455,7 +468,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 i = 0;
if (!mlx5_cmd_is_down(dev))
- return mlx5_cmd_exec(dev, in, in_size, out, out_size);
+ return mlx5_cmd_do(dev, in, in_size, out, out_size);
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
@@ -479,7 +492,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
}
static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
- int *nclaimed, bool ec_function)
+ int *nclaimed, bool event, bool ec_function)
{
u32 function = get_function(func_id, ec_function);
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
@@ -507,6 +520,14 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
func_id, npages, outlen);
err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
if (err) {
+ npages = MLX5_GET(manage_pages_in, in, input_num_entries);
+ dev->priv.reclaim_pages_discard += npages;
+ }
+ /* if triggered by FW event and failed by FW then ignore */
+ if (event && err == -EREMOTEIO)
+ err = 0;
+ if (err) {
+ err = mlx5_cmd_check(dev, err, in, out);
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
@@ -546,7 +567,7 @@ static void pages_work_handler(struct work_struct *work)
release_all_pages(dev, req->func_id, req->ec_function);
else if (req->npages < 0)
err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
- req->ec_function);
+ true, req->ec_function);
else if (req->npages > 0)
err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
@@ -645,7 +666,7 @@ static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
int err;
err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
- &nclaimed, mlx5_core_is_ecpf(dev));
+ &nclaimed, false, mlx5_core_is_ecpf(dev));
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
err, func_id);
@@ -700,12 +721,14 @@ int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
return -ENOMEM;
xa_init(&dev->priv.page_root_xa);
+ mlx5_pages_debugfs_init(dev);
return 0;
}
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
{
+ mlx5_pages_debugfs_cleanup(dev);
xa_destroy(&dev->priv.page_root_xa);
destroy_workqueue(dev->priv.pg_wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 41807ef55201..db77f1d2eeb4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -3,7 +3,6 @@
#include <linux/interrupt.h>
#include <linux/notifier.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
#include "mlx5_irq.h"
@@ -601,7 +600,8 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev)
if (mlx5_core_is_sf(dev))
return 0;
- irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
+ irq_table = kvzalloc_node(sizeof(*irq_table), GFP_KERNEL,
+ dev->priv.numa_node);
if (!irq_table)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
index aabc53ad8bdd..ee5ffdeb9015 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 7b16a1188aab..e1bd54574ea5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -33,9 +33,10 @@
#include <linux/mlx5/port.h>
#include "mlx5_core.h"
-int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
- int size_in, void *data_out, int size_out,
- u16 reg_id, int arg, int write)
+/* calling with verbose false will not print error to log */
+int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in,
+ void *data_out, int size_out, u16 reg_id, int arg,
+ int write, bool verbose)
{
int outlen = MLX5_ST_SZ_BYTES(access_register_out) + size_out;
int inlen = MLX5_ST_SZ_BYTES(access_register_in) + size_in;
@@ -57,7 +58,9 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
MLX5_SET(access_register_in, in, argument, arg);
MLX5_SET(access_register_in, in, register_id, reg_id);
- err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
+ err = mlx5_cmd_do(dev, in, inlen, out, outlen);
+ if (verbose)
+ err = mlx5_cmd_check(dev, err, in, out);
if (err)
goto out;
@@ -69,6 +72,15 @@ out:
kvfree(in);
return err;
}
+EXPORT_SYMBOL_GPL(mlx5_access_reg);
+
+int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
+ int size_in, void *data_out, int size_out,
+ u16 reg_id, int arg, int write)
+{
+ return mlx5_access_reg(dev, data_in, size_in, data_out, size_out,
+ reg_id, arg, write, true);
+}
EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
@@ -263,7 +275,6 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
{
u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
- int module_mapping;
int err;
MLX5_SET(pmlp_reg, in, local_port, 1);
@@ -272,8 +283,9 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
if (err)
return err;
- module_mapping = MLX5_GET(pmlp_reg, out, lane0_module_mapping);
- *module_num = module_mapping & MLX5_EEPROM_IDENTIFIER_BYTE_MASK;
+ *module_num = MLX5_GET(lane_2_module_mapping,
+ MLX5_ADDR_OF(pmlp_reg, out, lane0_module_mapping),
+ module);
return 0;
}
@@ -353,6 +365,12 @@ static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset
*offset -= MLX5_EEPROM_PAGE_LENGTH;
}
+static int mlx5_mcia_max_bytes(struct mlx5_core_dev *dev)
+{
+ /* mcia supports either 12 dwords or 32 dwords */
+ return (MLX5_CAP_MCAM_FEATURE(dev, mcia_32dwords) ? 32 : 12) * sizeof(u32);
+}
+
static int mlx5_query_mcia(struct mlx5_core_dev *dev,
struct mlx5_module_eeprom_query_params *params, u8 *data)
{
@@ -362,7 +380,7 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev,
void *ptr;
u16 size;
- size = min_t(int, params->size, MLX5_EEPROM_MAX_BYTES);
+ size = min_t(int, params->size, mlx5_mcia_max_bytes(dev));
MLX5_SET(mcia_reg, in, l, 0);
MLX5_SET(mcia_reg, in, size, size);
@@ -433,35 +451,12 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
struct mlx5_module_eeprom_query_params *params,
u8 *data)
{
- u8 module_id;
int err;
err = mlx5_query_module_num(dev, &params->module_number);
if (err)
return err;
- err = mlx5_query_module_id(dev, params->module_number, &module_id);
- if (err)
- return err;
-
- switch (module_id) {
- case MLX5_MODULE_ID_SFP:
- if (params->page > 0)
- return -EINVAL;
- break;
- case MLX5_MODULE_ID_QSFP:
- case MLX5_MODULE_ID_QSFP28:
- case MLX5_MODULE_ID_QSFP_PLUS:
- if (params->page > 3)
- return -EINVAL;
- break;
- case MLX5_MODULE_ID_DSFP:
- break;
- default:
- mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
- return -EINVAL;
- }
-
if (params->i2c_address != MLX5_I2C_ADDR_HIGH &&
params->i2c_address != MLX5_I2C_ADDR_LOW) {
mlx5_core_err(dev, "I2C address not recognized: 0x%x\n", params->i2c_address);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index 7161220afe30..9f8b4005f4bd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index e8185b69ac6c..887ee0f729d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -161,7 +161,7 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
return err;
}
-static void mlx5_sriov_disable(struct pci_dev *pdev)
+void mlx5_sriov_disable(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int num_vfs = pci_num_vf(dev->pdev);
@@ -205,19 +205,8 @@ int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count)
mlx5_get_default_msix_vec_count(dev, pci_num_vf(pf));
sriov = &dev->priv.sriov;
-
- /* Reversed translation of PCI VF function number to the internal
- * function_id, which exists in the name of virtfn symlink.
- */
- for (id = 0; id < pci_num_vf(pf); id++) {
- if (!sriov->vfs_ctx[id].enabled)
- continue;
-
- if (vf->devfn == pci_iov_virtfn_devfn(pf, id))
- break;
- }
-
- if (id == pci_num_vf(pf) || !sriov->vfs_ctx[id].enabled)
+ id = pci_iov_vf_id(vf);
+ if (id < 0 || !sriov->vfs_ctx[id].enabled)
return -EINVAL;
return mlx5_set_msix_vec_count(dev, id + 1, msix_vec_count);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index c61a5e83c78c..850937cd8bf9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -570,6 +570,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
for (i = 0; i < num_actions; i++) {
struct mlx5dr_action_dest_tbl *dest_tbl;
+ struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_action *action;
int max_actions_type = 1;
u32 action_type;
@@ -598,9 +599,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
matcher->tbl->level,
dest_tbl->tbl->level);
}
- attr.final_icm_addr = rx_rule ?
- dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
- dest_tbl->tbl->tx.s_anchor->chunk->icm_addr;
+ chunk = rx_rule ? dest_tbl->tbl->rx.s_anchor->chunk :
+ dest_tbl->tbl->tx.s_anchor->chunk;
+ attr.final_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
} else {
struct mlx5dr_cmd_query_flow_table_details output;
int ret;
@@ -669,15 +670,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
case DR_ACTION_TYP_VPORT:
attr.hit_gvmi = action->vport->caps->vhca_gvmi;
dest_action = action;
- if (rx_rule) {
- if (action->vport->caps->num == MLX5_VPORT_UPLINK) {
- mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n");
- return -EOPNOTSUPP;
- }
- attr.final_icm_addr = action->vport->caps->icm_address_rx;
- } else {
- attr.final_icm_addr = action->vport->caps->icm_address_tx;
- }
+ attr.final_icm_addr = rx_rule ?
+ action->vport->caps->icm_address_rx :
+ action->vport->caps->icm_address_tx;
break;
case DR_ACTION_TYP_POP_VLAN:
if (!rx_rule && !(dmn->ste_ctx->actions_caps &
@@ -1129,7 +1124,8 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
action->rewrite->data = (void *)hw_actions;
- action->rewrite->index = (action->rewrite->chunk->icm_addr -
+ action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr
+ (action->rewrite->chunk) -
dmn->info.caps.hdr_modify_icm_addr) /
ACTION_CACHE_LINE_SIZE;
@@ -1708,7 +1704,7 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
action->rewrite->modify_ttl = modify_ttl;
action->rewrite->data = (u8 *)hw_actions;
action->rewrite->num_of_actions = num_hw_actions;
- action->rewrite->index = (chunk->icm_addr -
+ action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr(chunk) -
dmn->info.caps.hdr_modify_icm_addr) /
ACTION_CACHE_LINE_SIZE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index 2784cd59fefe..d5998ef59be4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -3,7 +3,6 @@
#include <linux/debugfs.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/seq_file.h>
#include "dr_types.h"
@@ -218,7 +217,8 @@ dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V1;
}
- dr_dump_hex_print(hw_ste_dump, (char *)ste->hw_ste, DR_STE_SIZE_REDUCED);
+ dr_dump_hex_print(hw_ste_dump, (char *)mlx5dr_ste_get_hw_ste(ste),
+ DR_STE_SIZE_REDUCED);
seq_printf(file, "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)), rule_id,
@@ -347,16 +347,19 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
const u64 matcher_id)
{
enum dr_dump_rec_type rec_type;
+ u64 s_icm_addr, e_icm_addr;
int i, ret;
rec_type = is_rx ? DR_DUMP_REC_TYPE_MATCHER_RX :
DR_DUMP_REC_TYPE_MATCHER_TX;
+ s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->s_htbl->chunk);
+ e_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->e_anchor->chunk);
seq_printf(file, "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
matcher_id, matcher_rx_tx->num_of_builders,
- dr_dump_icm_to_idx(matcher_rx_tx->s_htbl->chunk->icm_addr),
- dr_dump_icm_to_idx(matcher_rx_tx->e_anchor->chunk->icm_addr));
+ dr_dump_icm_to_idx(s_icm_addr),
+ dr_dump_icm_to_idx(e_icm_addr));
for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
ret = dr_dump_matcher_builder(file,
@@ -427,12 +430,14 @@ dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
const u64 table_id)
{
enum dr_dump_rec_type rec_type;
+ u64 s_icm_addr;
rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX :
DR_DUMP_REC_TYPE_TABLE_TX;
+ s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(table_rx_tx->s_anchor->chunk);
seq_printf(file, "%d,0x%llx,0x%llx\n", rec_type, table_id,
- dr_dump_icm_to_idx(table_rx_tx->s_anchor->chunk->icm_addr));
+ dr_dump_icm_to_idx(s_icm_addr));
return 0;
}
@@ -630,7 +635,7 @@ void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn)
}
dmn->dump_info.steering_debugfs =
- debugfs_create_dir("steering", dev->priv.dbg_root);
+ debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev));
dmn->dump_info.fdb_debugfs =
debugfs_create_dir("fdb", dmn->dump_info.steering_debugfs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 5fa7f9d6d8b9..fc6ae49b5ecc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -8,7 +8,7 @@
#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
((dmn)->info.caps.dmn_type##_sw_owner || \
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
- (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
+ (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index e289cfdbce07..4ca67fa24cc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -57,6 +57,36 @@ static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
return mlx5_core_create_mkey(mdev, mkey, in, inlen);
}
+u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk)
+{
+ u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
+
+ return (u64)offset * chunk->seg;
+}
+
+u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk)
+{
+ return chunk->buddy_mem->icm_mr->mkey;
+}
+
+u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk)
+{
+ u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
+
+ return (u64)chunk->buddy_mem->icm_mr->icm_start_addr + size * chunk->seg;
+}
+
+u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk)
+{
+ return mlx5dr_icm_pool_chunk_size_to_byte(chunk->size,
+ chunk->buddy_mem->pool->icm_type);
+}
+
+u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk)
+{
+ return mlx5dr_icm_pool_chunk_size_to_entries(chunk->size);
+}
+
static struct mlx5dr_icm_mr *
dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
{
@@ -158,12 +188,13 @@ static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
{
+ int num_of_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
memset(chunk->hw_ste_arr, 0,
- chunk->num_of_entries * dr_icm_buddy_get_ste_size(buddy));
+ num_of_entries * dr_icm_buddy_get_ste_size(buddy));
memset(chunk->ste_arr, 0,
- chunk->num_of_entries * sizeof(chunk->ste_arr[0]));
+ num_of_entries * sizeof(chunk->ste_arr[0]));
}
static enum mlx5dr_icm_type
@@ -177,7 +208,7 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
{
enum mlx5dr_icm_type icm_type = get_chunk_icm_type(chunk);
- buddy->used_memory -= chunk->byte_size;
+ buddy->used_memory -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
list_del(&chunk->chunk_list);
if (icm_type == DR_ICM_TYPE_STE)
@@ -298,21 +329,14 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
- chunk->rkey = buddy_mem_pool->icm_mr->mkey;
- chunk->mr_addr = offset;
- chunk->icm_addr =
- (uintptr_t)buddy_mem_pool->icm_mr->icm_start_addr + offset;
- chunk->num_of_entries =
- mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
- chunk->byte_size =
- mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type);
chunk->seg = seg;
+ chunk->size = chunk_size;
chunk->buddy_mem = buddy_mem_pool;
if (pool->icm_type == DR_ICM_TYPE_STE)
dr_icm_chunk_ste_init(chunk, offset);
- buddy_mem_pool->used_memory += chunk->byte_size;
+ buddy_mem_pool->used_memory += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
INIT_LIST_HEAD(&chunk->chunk_list);
/* chunk now is part of the used_list */
@@ -336,6 +360,7 @@ static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
{
struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
+ u32 num_entries;
int err;
err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
@@ -348,9 +373,9 @@ static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
struct mlx5dr_icm_chunk *chunk, *tmp_chunk;
list_for_each_entry_safe(chunk, tmp_chunk, &buddy->hot_list, chunk_list) {
- mlx5dr_buddy_free_mem(buddy, chunk->seg,
- ilog2(chunk->num_of_entries));
- pool->hot_memory_size -= chunk->byte_size;
+ num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
+ mlx5dr_buddy_free_mem(buddy, chunk->seg, ilog2(num_entries));
+ pool->hot_memory_size -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
dr_icm_chunk_destroy(chunk, buddy);
}
@@ -448,7 +473,7 @@ void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
/* move the memory to the waiting list AKA "hot" */
mutex_lock(&pool->mutex);
list_move_tail(&chunk->chunk_list, &buddy->hot_list);
- pool->hot_memory_size += chunk->byte_size;
+ pool->hot_memory_size += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
/* Check if we have chunks that are waiting for sync-ste */
if (dr_icm_pool_is_sync_required(pool))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 38971fe1dfe1..0726848eb3ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -47,6 +47,11 @@ static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec)
return spec->ttl_hoplimit;
}
+static bool dr_mask_is_ipv4_ihl_set(struct mlx5dr_match_spec *spec)
+{
+ return spec->ipv4_ihl;
+}
+
#define DR_MASK_IS_L2_DST(_spec, _misc, _inner_outer) (_spec.first_vid || \
(_spec).first_cfi || (_spec).first_prio || (_spec).cvlan_tag || \
(_spec).svlan_tag || (_spec).dmac_47_16 || (_spec).dmac_15_0 || \
@@ -103,7 +108,7 @@ dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
static bool
dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
{
- return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
(caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED);
}
@@ -144,7 +149,7 @@ static bool dr_mask_is_tnl_geneve_tlv_opt_exist_set(struct mlx5dr_match_misc *mi
static bool
dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
{
- return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
(caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED);
}
@@ -261,13 +266,13 @@ static bool dr_mask_is_tnl_gtpu_any(struct mlx5dr_match_param *mask,
static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps)
{
- return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED);
}
static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps)
{
- return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED);
}
@@ -507,7 +512,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (dr_mask_is_ttl_set(&mask.outer))
+ if (dr_mask_is_ttl_set(&mask.outer) ||
+ dr_mask_is_ipv4_ihl_set(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
@@ -614,7 +620,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (dr_mask_is_ttl_set(&mask.inner))
+ if (dr_mask_is_ttl_set(&mask.inner) ||
+ dr_mask_is_ipv4_ihl_set(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
@@ -698,7 +705,7 @@ static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn,
/* Connect start hash table to end anchor */
info.type = CONNECT_MISS;
- info.miss_icm_addr = curr_nic_matcher->e_anchor->chunk->icm_addr;
+ info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(curr_nic_matcher->e_anchor->chunk);
ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
curr_nic_matcher->s_htbl,
&info, false);
@@ -719,12 +726,14 @@ static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn,
return ret;
/* Update the pointing ste and next hash table */
- curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->ste_arr;
- prev_htbl->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl;
+ curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->chunk->ste_arr;
+ prev_htbl->chunk->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl;
if (next_nic_matcher) {
- next_nic_matcher->s_htbl->pointing_ste = curr_nic_matcher->e_anchor->ste_arr;
- curr_nic_matcher->e_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
+ next_nic_matcher->s_htbl->pointing_ste =
+ curr_nic_matcher->e_anchor->chunk->ste_arr;
+ curr_nic_matcher->e_anchor->chunk->ste_arr[0].next_htbl =
+ next_nic_matcher->s_htbl;
}
return 0;
@@ -1036,12 +1045,12 @@ static int dr_matcher_disconnect_nic(struct mlx5dr_domain *dmn,
if (next_nic_matcher) {
info.type = CONNECT_HIT;
info.hit_next_htbl = next_nic_matcher->s_htbl;
- next_nic_matcher->s_htbl->pointing_ste = prev_anchor->ste_arr;
- prev_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
+ next_nic_matcher->s_htbl->pointing_ste = prev_anchor->chunk->ste_arr;
+ prev_anchor->chunk->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
} else {
info.type = CONNECT_MISS;
info.miss_icm_addr = nic_tbl->default_icm_addr;
- prev_anchor->ste_arr[0].next_htbl = NULL;
+ prev_anchor->chunk->ste_arr[0].next_htbl = NULL;
}
return mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_anchor,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index b4374578425b..ddfaf7891188 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -21,12 +21,12 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
if (!ste_info_last)
return -ENOMEM;
- mlx5dr_ste_set_miss_addr(ste_ctx, last_ste->hw_ste,
+ mlx5dr_ste_set_miss_addr(ste_ctx, mlx5dr_ste_get_hw_ste(last_ste),
mlx5dr_ste_get_icm_addr(new_last_ste));
list_add_tail(&new_last_ste->miss_list_node, miss_list);
mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
- 0, last_ste->hw_ste,
+ 0, mlx5dr_ste_get_hw_ste(last_ste),
ste_info_last, send_list, true);
return 0;
@@ -41,6 +41,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_htbl *new_htbl;
struct mlx5dr_ste *ste;
+ u64 icm_addr;
/* Create new table for miss entry */
new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
@@ -53,9 +54,9 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
}
/* One and only entry, never grows */
- ste = new_htbl->ste_arr;
- mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste,
- nic_matcher->e_anchor->chunk->icm_addr);
+ ste = new_htbl->chunk->ste_arr;
+ icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+ mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
mlx5dr_htbl_get(new_htbl);
return ste;
@@ -79,7 +80,7 @@ dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste;
/* In collision entry, all members share the same miss_list_head */
- ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
+ ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
/* Next table */
if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
@@ -107,9 +108,11 @@ dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
* is already written to the hw.
*/
if (ste_info->size == DR_STE_SIZE_CTRL)
- memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_CTRL);
+ memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
+ ste_info->data, DR_STE_SIZE_CTRL);
else
- memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
+ memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
+ ste_info->data, DR_STE_SIZE_REDUCED);
ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
ste_info->size, ste_info->offset);
@@ -159,7 +162,7 @@ dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
/* Check if hw_ste is present in the list */
list_for_each_entry(ste, miss_list, miss_list_node) {
- if (mlx5dr_ste_equal_tag(ste->hw_ste, hw_ste))
+ if (mlx5dr_ste_equal_tag(mlx5dr_ste_get_hw_ste(ste), hw_ste))
return ste;
}
@@ -185,7 +188,7 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste;
/* In collision entry, all members share the same miss_list_head */
- new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
+ new_ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(col_ste);
/* Update the previous from the list */
ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
@@ -235,6 +238,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
bool use_update_list = false;
u8 hw_ste[DR_STE_SIZE] = {};
struct mlx5dr_ste *new_ste;
+ u64 icm_addr;
int new_idx;
u8 sb_idx;
@@ -243,12 +247,12 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
/* Copy STE control and tag */
- memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
- mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
- nic_matcher->e_anchor->chunk->icm_addr);
+ icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+ memcpy(hw_ste, mlx5dr_ste_get_hw_ste(cur_ste), DR_STE_SIZE_REDUCED);
+ mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
- new_ste = &new_htbl->ste_arr[new_idx];
+ new_ste = &new_htbl->chunk->ste_arr[new_idx];
if (mlx5dr_ste_is_not_used(new_ste)) {
mlx5dr_htbl_get(new_htbl);
@@ -269,7 +273,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
use_update_list = true;
}
- memcpy(new_ste->hw_ste, hw_ste, DR_STE_SIZE_REDUCED);
+ memcpy(mlx5dr_ste_get_hw_ste(new_ste), hw_ste, DR_STE_SIZE_REDUCED);
new_htbl->ctrl.num_of_valid_entries++;
@@ -334,7 +338,7 @@ static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
int err = 0;
int i;
- cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk_size);
+ cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk->size);
if (cur_entries < 1) {
mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
@@ -342,7 +346,7 @@ static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
}
for (i = 0; i < cur_entries; i++) {
- cur_ste = &cur_htbl->ste_arr[i];
+ cur_ste = &cur_htbl->chunk->ste_arr[i];
if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
continue;
@@ -398,7 +402,7 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
/* Write new table to HW */
info.type = CONNECT_MISS;
- info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
+ info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
dmn->info.caps.gvmi,
nic_dmn->type,
@@ -446,21 +450,21 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
* (48B len) which works only on first 32B
*/
mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
- prev_htbl->ste_arr[0].hw_ste,
- new_htbl->chunk->icm_addr,
- new_htbl->chunk->num_of_entries);
+ prev_htbl->chunk->hw_ste_arr,
+ mlx5dr_icm_pool_get_chunk_icm_addr(new_htbl->chunk),
+ mlx5dr_icm_pool_get_chunk_num_of_entries(new_htbl->chunk));
- ste_to_update = &prev_htbl->ste_arr[0];
+ ste_to_update = &prev_htbl->chunk->ste_arr[0];
} else {
mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
- cur_htbl->pointing_ste->hw_ste,
+ mlx5dr_ste_get_hw_ste(cur_htbl->pointing_ste),
new_htbl);
ste_to_update = cur_htbl->pointing_ste;
}
mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
- 0, ste_to_update->hw_ste, ste_info,
- update_list, false);
+ 0, mlx5dr_ste_get_hw_ste(ste_to_update),
+ ste_info, update_list, false);
return new_htbl;
@@ -489,10 +493,10 @@ static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
enum mlx5dr_icm_chunk_size new_size;
- new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk_size);
+ new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk->size);
new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
- if (new_size == cur_htbl->chunk_size)
+ if (new_size == cur_htbl->chunk->size)
return NULL; /* Skip rehash, we already at the max size */
return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
@@ -659,13 +663,13 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
int threshold;
- if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size)
+ if (dmn->info.max_log_sw_icm_sz <= htbl->chunk->size)
return false;
if (!mlx5dr_ste_htbl_may_grow(htbl))
return false;
- if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
+ if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk->size)
return false;
threshold = mlx5dr_ste_htbl_increase_threshold(htbl);
@@ -755,6 +759,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
+ u64 icm_addr;
/* Take ref on table, only on first time this ste is used */
mlx5dr_htbl_get(cur_htbl);
@@ -762,8 +767,8 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
/* new entry -> new branch */
list_add_tail(&ste->miss_list_node, miss_list);
- mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
- nic_matcher->e_anchor->chunk->icm_addr);
+ icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+ mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
ste->ste_chain_location = ste_location;
@@ -822,7 +827,7 @@ dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
again:
index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
miss_list = &cur_htbl->chunk->miss_list[index];
- ste = &cur_htbl->ste_arr[index];
+ ste = &cur_htbl->chunk->ste_arr[index];
if (mlx5dr_ste_is_not_used(ste)) {
if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
@@ -858,7 +863,7 @@ again:
ste_location, send_ste_list);
if (!new_htbl) {
mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
- cur_htbl->chunk_size);
+ cur_htbl->chunk->size);
mlx5dr_htbl_put(cur_htbl);
} else {
cur_htbl = new_htbl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 00aef47d7682..ef19a66f5233 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -407,17 +407,17 @@ static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn,
int *iterations,
int *num_stes)
{
+ u32 chunk_byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
int alloc_size;
- if (htbl->chunk->byte_size > dmn->send_ring->max_post_send_size) {
- *iterations = htbl->chunk->byte_size /
- dmn->send_ring->max_post_send_size;
+ if (chunk_byte_size > dmn->send_ring->max_post_send_size) {
+ *iterations = chunk_byte_size / dmn->send_ring->max_post_send_size;
*byte_size = dmn->send_ring->max_post_send_size;
alloc_size = *byte_size;
*num_stes = *byte_size / DR_STE_SIZE;
} else {
*iterations = 1;
- *num_stes = htbl->chunk->num_of_entries;
+ *num_stes = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
alloc_size = *num_stes * DR_STE_SIZE;
}
@@ -453,7 +453,7 @@ int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste,
send_info.write.length = size;
send_info.write.lkey = 0;
send_info.remote_addr = mlx5dr_ste_get_mr_addr(ste) + offset;
- send_info.rkey = ste->htbl->chunk->rkey;
+ send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(ste->htbl->chunk);
return dr_postsend_icm_data(dmn, &send_info);
}
@@ -462,7 +462,7 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
struct mlx5dr_ste_htbl *htbl,
u8 *formatted_ste, u8 *mask)
{
- u32 byte_size = htbl->chunk->byte_size;
+ u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
int num_stes_per_iter;
int iterations;
u8 *data;
@@ -486,7 +486,7 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
* need to add the bit_mask
*/
for (j = 0; j < num_stes_per_iter; j++) {
- struct mlx5dr_ste *ste = &htbl->ste_arr[ste_index + j];
+ struct mlx5dr_ste *ste = &htbl->chunk->ste_arr[ste_index + j];
u32 ste_off = j * DR_STE_SIZE;
if (mlx5dr_ste_is_not_used(ste)) {
@@ -495,7 +495,8 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
} else {
/* Copy data */
memcpy(data + ste_off,
- htbl->ste_arr[ste_index + j].hw_ste,
+ htbl->chunk->hw_ste_arr +
+ DR_STE_SIZE_REDUCED * (ste_index + j),
DR_STE_SIZE_REDUCED);
/* Copy bit_mask */
memcpy(data + ste_off + DR_STE_SIZE_REDUCED,
@@ -511,8 +512,8 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
send_info.write.length = byte_size;
send_info.write.lkey = 0;
send_info.remote_addr =
- mlx5dr_ste_get_mr_addr(htbl->ste_arr + ste_index);
- send_info.rkey = htbl->chunk->rkey;
+ mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
+ send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
ret = dr_postsend_icm_data(dmn, &send_info);
if (ret)
@@ -530,7 +531,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
u8 *ste_init_data,
bool update_hw_ste)
{
- u32 byte_size = htbl->chunk->byte_size;
+ u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
int iterations;
int num_stes;
u8 *copy_dst;
@@ -546,7 +547,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
if (update_hw_ste) {
/* Copy the reduced STE to hash table ste_arr */
for (i = 0; i < num_stes; i++) {
- copy_dst = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
+ copy_dst = htbl->chunk->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
memcpy(copy_dst, ste_init_data, DR_STE_SIZE_REDUCED);
}
}
@@ -568,8 +569,8 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
send_info.write.length = byte_size;
send_info.write.lkey = 0;
send_info.remote_addr =
- mlx5dr_ste_get_mr_addr(htbl->ste_arr + ste_index);
- send_info.rkey = htbl->chunk->rkey;
+ mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
+ send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
ret = dr_postsend_icm_data(dmn, &send_info);
if (ret)
@@ -591,8 +592,9 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
send_info.write.length = action->rewrite->num_of_actions *
DR_MODIFY_ACTION_SIZE;
send_info.write.lkey = 0;
- send_info.remote_addr = action->rewrite->chunk->mr_addr;
- send_info.rkey = action->rewrite->chunk->rkey;
+ send_info.remote_addr =
+ mlx5dr_icm_pool_get_chunk_mr_addr(action->rewrite->chunk);
+ send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(action->rewrite->chunk);
ret = dr_postsend_icm_data(dmn, &send_info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 187e29b409b6..09ebd3088857 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -25,6 +25,7 @@ bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
{
+ u32 num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
u8 masked[DR_STE_SIZE_TAG] = {};
u32 crc32, index;
@@ -32,7 +33,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
int i;
/* Don't calculate CRC if the result is predicted */
- if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
+ if (num_entries == 1 || htbl->byte_mask == 0)
return 0;
/* Mask tag using byte mask, bit per byte */
@@ -45,7 +46,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
}
crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
- index = crc32 & (htbl->chunk->num_of_entries - 1);
+ index = crc32 & (num_entries - 1);
return index;
}
@@ -96,13 +97,11 @@ void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
}
static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
- struct mlx5dr_ste *ste, u64 miss_addr)
+ u8 *hw_ste, u64 miss_addr)
{
- u8 *hw_ste_p = ste->hw_ste;
-
- ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
- ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
- dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
+ ste_ctx->set_next_lu_type(hw_ste, MLX5DR_STE_LU_TYPE_DONT_CARE);
+ ste_ctx->set_miss_addr(hw_ste, miss_addr);
+ dr_ste_set_always_miss((struct dr_hw_ste_format *)hw_ste);
}
void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
@@ -113,37 +112,45 @@ void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
{
- u32 index = ste - ste->htbl->ste_arr;
+ u64 base_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(ste->htbl->chunk);
+ u32 index = ste - ste->htbl->chunk->ste_arr;
- return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
+ return base_icm_addr + DR_STE_SIZE * index;
}
u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
{
- u32 index = ste - ste->htbl->ste_arr;
+ u32 index = ste - ste->htbl->chunk->ste_arr;
- return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
+ return mlx5dr_icm_pool_get_chunk_mr_addr(ste->htbl->chunk) + DR_STE_SIZE * index;
+}
+
+u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste)
+{
+ u64 index = ste - ste->htbl->chunk->ste_arr;
+
+ return ste->htbl->chunk->hw_ste_arr + DR_STE_SIZE_REDUCED * index;
}
struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
{
- u32 index = ste - ste->htbl->ste_arr;
+ u32 index = ste - ste->htbl->chunk->ste_arr;
- return &ste->htbl->miss_list[index];
+ return &ste->htbl->chunk->miss_list[index];
}
static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
- struct mlx5dr_ste *ste,
+ u8 *hw_ste,
struct mlx5dr_ste_htbl *next_htbl)
{
struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
- u8 *hw_ste = ste->hw_ste;
ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
- ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+ ste_ctx->set_hit_addr(hw_ste, mlx5dr_icm_pool_get_chunk_icm_addr(chunk),
+ mlx5dr_icm_pool_get_chunk_num_of_entries(chunk));
- dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
+ dr_ste_set_always_hit((struct dr_hw_ste_format *)hw_ste);
}
bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
@@ -166,7 +173,8 @@ bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
*/
static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
{
- memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
+ memcpy(mlx5dr_ste_get_hw_ste(dst), mlx5dr_ste_get_hw_ste(src),
+ DR_STE_SIZE_REDUCED);
dst->next_htbl = src->next_htbl;
if (dst->next_htbl)
dst->next_htbl->pointing_ste = dst;
@@ -184,18 +192,17 @@ dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_htbl *stats_tbl)
{
u8 tmp_data_ste[DR_STE_SIZE] = {};
- struct mlx5dr_ste tmp_ste = {};
u64 miss_addr;
- tmp_ste.hw_ste = tmp_data_ste;
+ miss_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
/* Use temp ste because dr_ste_always_miss_addr
* touches bit_mask area which doesn't exist at ste->hw_ste.
+ * Need to use a full-sized (DR_STE_SIZE) hw_ste.
*/
- memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
- miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
- dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr);
- memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
+ memcpy(tmp_data_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
+ dr_ste_always_miss_addr(ste_ctx, tmp_data_ste, miss_addr);
+ memcpy(mlx5dr_ste_get_hw_ste(ste), tmp_data_ste, DR_STE_SIZE_REDUCED);
list_del_init(&ste->miss_list_node);
@@ -237,7 +244,7 @@ dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
/* Copy all 64 hw_ste bytes */
- memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
+ memcpy(hw_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
sb_idx = ste->ste_chain_location - 1;
mlx5dr_ste_set_bit_mask(hw_ste,
nic_matcher->ste_builder[sb_idx].bit_mask);
@@ -273,12 +280,13 @@ static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
if (WARN_ON(!prev_ste))
return;
- miss_addr = ste_ctx->get_miss_addr(ste->hw_ste);
- ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr);
+ miss_addr = ste_ctx->get_miss_addr(mlx5dr_ste_get_hw_ste(ste));
+ ste_ctx->set_miss_addr(mlx5dr_ste_get_hw_ste(prev_ste), miss_addr);
mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
- prev_ste->hw_ste, ste_info,
- send_ste_list, true /* Copy data*/);
+ mlx5dr_ste_get_hw_ste(prev_ste),
+ ste_info, send_ste_list,
+ true /* Copy data*/);
list_del_init(&ste->miss_list_node);
@@ -364,9 +372,11 @@ void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste,
struct mlx5dr_ste_htbl *next_htbl)
{
- struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
+ u64 icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(next_htbl->chunk);
+ u32 num_entries =
+ mlx5dr_icm_pool_get_chunk_num_of_entries(next_htbl->chunk);
- ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+ ste_ctx->set_hit_addr(hw_ste, icm_addr, num_entries);
}
void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
@@ -385,15 +395,22 @@ void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_htbl_connect_info *connect_info)
{
bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
- struct mlx5dr_ste ste = {};
+ u8 tmp_hw_ste[DR_STE_SIZE] = {0};
ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
- ste.hw_ste = formatted_ste;
+ /* Use temp ste because dr_ste_always_miss_addr/hit_htbl
+ * touches bit_mask area which doesn't exist at ste->hw_ste.
+ * Need to use a full-sized (DR_STE_SIZE) hw_ste.
+ */
+ memcpy(tmp_hw_ste, formatted_ste, DR_STE_SIZE_REDUCED);
if (connect_info->type == CONNECT_HIT)
- dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl);
+ dr_ste_always_hit_htbl(ste_ctx, tmp_hw_ste,
+ connect_info->hit_next_htbl);
else
- dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr);
+ dr_ste_always_miss_addr(ste_ctx, tmp_hw_ste,
+ connect_info->miss_icm_addr);
+ memcpy(formatted_ste, tmp_hw_ste, DR_STE_SIZE_REDUCED);
}
int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
@@ -444,7 +461,8 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
/* Write new table to HW */
info.type = CONNECT_MISS;
- info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
+ info.miss_icm_addr =
+ mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
&info, false)) {
mlx5dr_info(dmn, "Failed writing table to HW\n");
@@ -470,6 +488,7 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
{
struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_ste_htbl *htbl;
+ u32 num_entries;
int i;
htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
@@ -483,22 +502,18 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
htbl->chunk = chunk;
htbl->lu_type = lu_type;
htbl->byte_mask = byte_mask;
- htbl->ste_arr = chunk->ste_arr;
- htbl->hw_ste_arr = chunk->hw_ste_arr;
- htbl->miss_list = chunk->miss_list;
htbl->refcount = 0;
+ num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
- for (i = 0; i < chunk->num_of_entries; i++) {
- struct mlx5dr_ste *ste = &htbl->ste_arr[i];
+ for (i = 0; i < num_entries; i++) {
+ struct mlx5dr_ste *ste = &chunk->ste_arr[i];
- ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
ste->htbl = htbl;
ste->refcount = 0;
INIT_LIST_HEAD(&ste->miss_list_node);
- INIT_LIST_HEAD(&htbl->miss_list[i]);
+ INIT_LIST_HEAD(&chunk->miss_list[i]);
}
- htbl->chunk_size = chunk_size;
return htbl;
out_free_htbl:
@@ -523,8 +538,8 @@ void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr,
- attr, added_stes);
+ ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps,
+ hw_ste_arr, attr, added_stes);
}
void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
@@ -534,8 +549,8 @@ void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr,
- attr, added_stes);
+ ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps,
+ hw_ste_arr, attr, added_stes);
}
const struct mlx5dr_ste_action_modify_field *
@@ -793,6 +808,7 @@ static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bo
spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr);
spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr);
+ spec->ipv4_ihl = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ipv4_ihl, clr);
spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr);
spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr);
@@ -1360,15 +1376,14 @@ void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_tnl_header_0_1_init(sb, mask);
}
-static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
- [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
- [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
-};
-
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
{
- if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX)
- return NULL;
+ if (version == MLX5_STEERING_FORMAT_CONNECTX_5)
+ return mlx5dr_ste_get_ctx_v0();
+ else if (version == MLX5_STEERING_FORMAT_CONNECTX_6DX)
+ return mlx5dr_ste_get_ctx_v1();
+ else if (version == MLX5_STEERING_FORMAT_CONNECTX_7)
+ return mlx5dr_ste_get_ctx_v2();
- return mlx5dr_ste_ctx_arr[version];
+ return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
index ca8fa32b8680..17513baff9b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
@@ -161,11 +161,13 @@ struct mlx5dr_ste_ctx {
u32 actions_caps;
void (*set_actions_rx)(struct mlx5dr_domain *dmn,
u8 *action_type_set,
+ u32 actions_caps,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
void (*set_actions_tx)(struct mlx5dr_domain *dmn,
u8 *action_type_set,
+ u32 actions_caps,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
@@ -197,7 +199,8 @@ struct mlx5dr_ste_ctx {
void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
};
-extern struct mlx5dr_ste_ctx ste_ctx_v0;
-extern struct mlx5dr_ste_ctx ste_ctx_v1;
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void);
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void);
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void);
#endif /* _DR_STE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index 2d62950f7a29..5a322335f204 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -408,6 +408,7 @@ static void dr_ste_v0_arr_init_next(u8 **last_ste,
static void
dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
u8 *action_type_set,
+ u32 actions_caps,
u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
@@ -477,6 +478,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
static void
dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
u8 *action_type_set,
+ u32 actions_caps,
u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
@@ -1152,6 +1154,7 @@ dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
+ DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, ihl, spec, ipv4_ihl);
return 0;
}
@@ -1897,7 +1900,7 @@ static void dr_ste_v0_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_header_0_1_tag;
}
-struct mlx5dr_ste_ctx ste_ctx_v0 = {
+static struct mlx5dr_ste_ctx ste_ctx_v0 = {
/* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init,
.build_eth_l3_ipv6_src_init = &dr_ste_v0_build_eth_l3_ipv6_src_init,
@@ -1950,3 +1953,8 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = {
.set_action_copy = &dr_ste_v0_set_action_copy,
.set_action_decap_l3_list = &dr_ste_v0_set_action_decap_l3_list,
};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void)
+{
+ return &ste_ctx_v0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index 6ca06800f1d9..fcb962c6db2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -3,7 +3,7 @@
#include <linux/types.h>
#include "mlx5_ifc_dr_ste_v1.h"
-#include "dr_ste.h"
+#include "dr_ste_v1.h"
#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
@@ -121,12 +121,12 @@ enum {
DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2 = 0x8c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3 = 0x8d,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4 = 0x8e,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5 = 0x8f,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6 = 0x90,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7 = 0x91,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
};
static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
@@ -223,22 +223,22 @@ static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
@@ -262,7 +262,7 @@ static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
}
-static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
+void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
{
u64 index = miss_addr >> 6;
@@ -270,7 +270,7 @@ static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index);
}
-static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
+u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
{
u64 index =
((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
@@ -279,12 +279,12 @@ static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
return index << 6;
}
-static void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
+void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask);
}
-static u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
+u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
{
return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask);
}
@@ -295,13 +295,13 @@ static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF);
}
-static void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
+void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8);
MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF);
}
-static u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
+u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
{
u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format);
u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx);
@@ -314,7 +314,7 @@ static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
}
-static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
+void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
{
u64 index = (icm_addr >> 5) | ht_size;
@@ -322,8 +322,7 @@ static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index);
}
-static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type,
- bool is_rx, u16 gvmi)
+void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi)
{
dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
@@ -333,8 +332,7 @@ static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type,
MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi);
}
-static void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p,
- u32 ste_size)
+void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size)
{
u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL;
u8 *mask = tag + DR_STE_SIZE_TAG;
@@ -511,11 +509,12 @@ static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
}
-static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
- u8 *action_type_set,
- u8 *last_ste,
- struct mlx5dr_ste_actions_attr *attr,
- u32 *added_stes)
+void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u32 actions_caps,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
{
u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
@@ -533,7 +532,10 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
- allow_modify_hdr = false;
+
+ /* Check if vlan_pop and modify_hdr on same STE is supported */
+ if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
+ allow_modify_hdr = false;
}
if (action_type_set[DR_ACTION_TYP_CTR])
@@ -631,11 +633,12 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
-static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
- u8 *action_type_set,
- u8 *last_ste,
- struct mlx5dr_ste_actions_attr *attr,
- u32 *added_stes)
+void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u32 actions_caps,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
{
u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
@@ -677,13 +680,16 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
- allow_modify_hdr = false;
- allow_ctr = false;
}
dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
+ allow_ctr = false;
+
+ /* Check if vlan_pop and modify_hdr on same STE is supported */
+ if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
+ allow_modify_hdr = false;
}
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
@@ -731,9 +737,9 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_modify_hdr = true;
- allow_ctr = false;
}
dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
+ allow_ctr = false;
}
if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
@@ -800,11 +806,11 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
-static void dr_ste_v1_set_action_set(u8 *d_action,
- u8 hw_field,
- u8 shifter,
- u8 length,
- u32 data)
+void dr_ste_v1_set_action_set(u8 *d_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
{
shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET);
@@ -814,11 +820,11 @@ static void dr_ste_v1_set_action_set(u8 *d_action,
MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data);
}
-static void dr_ste_v1_set_action_add(u8 *d_action,
- u8 hw_field,
- u8 shifter,
- u8 length,
- u32 data)
+void dr_ste_v1_set_action_add(u8 *d_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
{
shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD);
@@ -828,12 +834,12 @@ static void dr_ste_v1_set_action_add(u8 *d_action,
MLX5_SET(ste_double_action_add_v1, d_action, add_value, data);
}
-static void dr_ste_v1_set_action_copy(u8 *d_action,
- u8 dst_hw_field,
- u8 dst_shifter,
- u8 dst_len,
- u8 src_hw_field,
- u8 src_shifter)
+void dr_ste_v1_set_action_copy(u8 *d_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter)
{
dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
@@ -848,11 +854,11 @@ static void dr_ste_v1_set_action_copy(u8 *d_action,
#define DR_STE_DECAP_L3_ACTION_NUM 8
#define DR_STE_L2_HDR_MAX_SZ 20
-static int dr_ste_v1_set_action_decap_l3_list(void *data,
- u32 data_sz,
- u8 *hw_action,
- u32 hw_action_sz,
- u16 *used_hw_action_num)
+int dr_ste_v1_set_action_decap_l3_list(void *data,
+ u32 data_sz,
+ u8 *hw_action,
+ u32 hw_action_sz,
+ u16 *used_hw_action_num)
{
u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
void *data_ptr = padded_data;
@@ -977,8 +983,8 @@ static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
@@ -1001,8 +1007,8 @@ static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
@@ -1025,8 +1031,8 @@ static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
@@ -1060,8 +1066,8 @@ static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *va
return 0;
}
-static void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
@@ -1079,8 +1085,8 @@ static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid);
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi);
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio);
- DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag); // ?
- DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype); // ?
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype);
DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version);
if (mask->svlan_tag || mask->cvlan_tag) {
@@ -1201,8 +1207,8 @@ static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
}
-static void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
@@ -1234,8 +1240,8 @@ static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
}
-static void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask);
@@ -1314,8 +1320,8 @@ static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
@@ -1331,12 +1337,13 @@ static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit);
+ DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, ihl, spec, ipv4_ihl);
return 0;
}
-static void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
@@ -1375,8 +1382,8 @@ static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
@@ -1399,8 +1406,8 @@ static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask);
@@ -1426,8 +1433,8 @@ static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask);
@@ -1471,8 +1478,8 @@ static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
@@ -1506,8 +1513,8 @@ static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *valu
return 0;
}
-static void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
@@ -1547,8 +1554,8 @@ static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *valu
return 0;
}
-static void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
@@ -1594,8 +1601,8 @@ static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
@@ -1616,8 +1623,8 @@ static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask);
@@ -1643,8 +1650,8 @@ static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
@@ -1673,9 +1680,8 @@ dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void
-dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
@@ -1703,9 +1709,8 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void
-dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
@@ -1726,8 +1731,8 @@ static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
@@ -1749,8 +1754,8 @@ static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask);
@@ -1773,8 +1778,8 @@ static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask);
@@ -1837,8 +1842,8 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
@@ -1892,8 +1897,8 @@ static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
@@ -1901,8 +1906,8 @@ static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
}
-static void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
@@ -1926,7 +1931,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *va
return 0;
}
-static void
+void
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
@@ -1959,7 +1964,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_par
return 0;
}
-static void
+void
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
@@ -1982,8 +1987,8 @@ static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *v
return 0;
}
-static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
@@ -2008,7 +2013,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void
+void
dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
@@ -2035,7 +2040,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void
+void
dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
@@ -2046,7 +2051,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag;
}
-struct mlx5dr_ste_ctx ste_ctx_v1 = {
+static struct mlx5dr_ste_ctx ste_ctx_v1 = {
/* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
.build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
@@ -2091,7 +2096,8 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
/* Actions */
.actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
DR_STE_CTX_ACTION_CAP_RX_PUSH |
- DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP |
+ DR_STE_CTX_ACTION_CAP_POP_MDFY,
.set_actions_rx = &dr_ste_v1_set_actions_rx,
.set_actions_tx = &dr_ste_v1_set_actions_tx,
.modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
@@ -2103,3 +2109,8 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void)
+{
+ return &ste_ctx_v1;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
new file mode 100644
index 000000000000..8a1d49790c6e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef _DR_STE_V1_
+#define _DR_STE_V1_
+
+#include "dr_types.h"
+#include "dr_ste.h"
+
+void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
+u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
+void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask);
+u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p);
+void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type);
+u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p);
+void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
+void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi);
+void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size);
+void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set,
+ u32 actions_caps, u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
+void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set,
+ u32 actions_caps, u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
+void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter,
+ u8 length, u32 data);
+void dr_ste_v1_set_action_add(u8 *d_action, u8 hw_field, u8 shifter,
+ u8 length, u32 data);
+void dr_ste_v1_set_action_copy(u8 *d_action, u8 dst_hw_field, u8 dst_shifter,
+ u8 dst_len, u8 src_hw_field, u8 src_shifter);
+int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action,
+ u32 hw_action_sz, u16 *used_hw_action_num);
+void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+
+#endif /* _DR_STE_V1_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
new file mode 100644
index 000000000000..c60fddd125d2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "dr_ste_v1.h"
+
+enum {
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
+ DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
+ DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
+ DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
+ DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
+ DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
+ DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
+ DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
+ DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
+ DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
+ DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = {
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
+ },
+};
+
+static struct mlx5dr_ste_ctx ste_ctx_v2 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v1_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
+ .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
+ .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
+ .build_icmp_init = &dr_ste_v1_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+ .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
+ .build_tnl_geneve_tlv_opt_exist_init =
+ &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
+ .build_register_0_init = &dr_ste_v1_build_register_0_init,
+ .build_register_1_init = &dr_ste_v1_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
+ .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
+ .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
+ .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
+ .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
+ .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
+
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v1_init,
+ .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .set_miss_addr = &dr_ste_v1_set_miss_addr,
+ .get_miss_addr = &dr_ste_v1_get_miss_addr,
+ .set_hit_addr = &dr_ste_v1_set_hit_addr,
+ .set_byte_mask = &dr_ste_v1_set_byte_mask,
+ .get_byte_mask = &dr_ste_v1_get_byte_mask,
+
+ /* Actions */
+ .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
+ DR_STE_CTX_ACTION_CAP_RX_PUSH |
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+ .set_actions_rx = &dr_ste_v1_set_actions_rx,
+ .set_actions_tx = &dr_ste_v1_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v2_action_modify_field_arr,
+ .set_action_set = &dr_ste_v1_set_action_set,
+ .set_action_add = &dr_ste_v1_set_action_add,
+ .set_action_copy = &dr_ste_v1_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
+
+ /* Send */
+ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void)
+{
+ return &ste_ctx_v2;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index 8ca110643cc0..e5f6412baea9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -10,6 +10,7 @@ static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
struct mlx5dr_matcher_rx_tx *last_nic_matcher = NULL;
struct mlx5dr_htbl_connect_info info;
struct mlx5dr_ste_htbl *last_htbl;
+ struct mlx5dr_icm_chunk *chunk;
int ret;
if (!list_empty(&nic_tbl->nic_matcher_list))
@@ -22,13 +23,14 @@ static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
else
last_htbl = nic_tbl->s_anchor;
- if (action)
- nic_tbl->default_icm_addr =
- nic_tbl->nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX ?
- action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
- action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr;
- else
+ if (action) {
+ chunk = nic_tbl->nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX ?
+ action->dest_tbl->tbl->rx.s_anchor->chunk :
+ action->dest_tbl->tbl->tx.s_anchor->chunk;
+ nic_tbl->default_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
+ } else {
nic_tbl->default_icm_addr = nic_tbl->nic_dmn->default_icm_addr;
+ }
info.type = CONNECT_MISS;
info.miss_icm_addr = nic_tbl->default_icm_addr;
@@ -222,10 +224,10 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
int ret;
if (tbl->rx.s_anchor)
- icm_addr_rx = tbl->rx.s_anchor->chunk->icm_addr;
+ icm_addr_rx = mlx5dr_icm_pool_get_chunk_icm_addr(tbl->rx.s_anchor->chunk);
if (tbl->tx.s_anchor)
- icm_addr_tx = tbl->tx.s_anchor->chunk->icm_addr;
+ icm_addr_tx = mlx5dr_icm_pool_get_chunk_icm_addr(tbl->tx.s_anchor->chunk);
ft_attr.table_type = tbl->table_type;
ft_attr.icm_addr_rx = icm_addr_rx;
@@ -305,3 +307,8 @@ u32 mlx5dr_table_get_id(struct mlx5dr_table *tbl)
{
return tbl->table_id;
}
+
+struct mlx5dr_table *mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft)
+{
+ return ft->fs_dr_table.dr_table;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 55fcb751e24a..46866a5fc5ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -91,6 +91,7 @@ enum mlx5dr_ste_ctx_action_cap {
DR_STE_CTX_ACTION_CAP_TX_POP = 1 << 0,
DR_STE_CTX_ACTION_CAP_RX_PUSH = 1 << 1,
DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 2,
+ DR_STE_CTX_ACTION_CAP_POP_MDFY = 1 << 3,
};
enum {
@@ -146,10 +147,12 @@ struct mlx5dr_matcher_rx_tx;
struct mlx5dr_ste_ctx;
struct mlx5dr_ste {
- u8 *hw_ste;
/* refcount: indicates the num of rules that using this ste */
u32 refcount;
+ /* this ste is part of a rule, located in ste's chain */
+ u8 ste_chain_location;
+
/* attached to the miss_list head at each htbl entry */
struct list_head miss_list_node;
@@ -160,9 +163,6 @@ struct mlx5dr_ste {
/* The rule this STE belongs to */
struct mlx5dr_rule_rx_tx *rule_rx_tx;
-
- /* this ste is part of a rule, located in ste's chain */
- u8 ste_chain_location;
};
struct mlx5dr_ste_htbl_ctrl {
@@ -180,14 +180,7 @@ struct mlx5dr_ste_htbl {
u16 byte_mask;
u32 refcount;
struct mlx5dr_icm_chunk *chunk;
- struct mlx5dr_ste *ste_arr;
- u8 *hw_ste_arr;
-
- struct list_head *miss_list;
-
- enum mlx5dr_icm_chunk_size chunk_size;
struct mlx5dr_ste *pointing_ste;
-
struct mlx5dr_ste_htbl_ctrl ctrl;
};
@@ -555,7 +548,9 @@ struct mlx5dr_match_spec {
*/
u32 tcp_dport:16;
- u32 reserved_auto1:24;
+ u32 reserved_auto1:16;
+ u32 ipv4_ihl:4;
+ u32 reserved_auto2:4;
u32 ttl_hoplimit:8;
/* UDP source port.;tcp and udp sport/dport are mutually exclusive */
@@ -1094,16 +1089,12 @@ int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
struct mlx5dr_icm_chunk {
struct mlx5dr_icm_buddy_mem *buddy_mem;
struct list_head chunk_list;
- u32 rkey;
- u32 num_of_entries;
- u32 byte_size;
- u64 icm_addr;
- u64 mr_addr;
/* indicates the index of this chunk in the whole memory,
* used for deleting the chunk from the buddy
*/
unsigned int seg;
+ enum mlx5dr_icm_chunk_size size;
/* Memory optimisation */
struct mlx5dr_ste *ste_arr;
@@ -1143,6 +1134,13 @@ int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
enum mlx5dr_ipv outer_ipv,
enum mlx5dr_ipv inner_ipv);
+u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk);
+u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk);
+u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste);
+
static inline int
mlx5dr_icm_pool_dm_type_to_entry_size(enum mlx5dr_icm_type icm_type)
{
@@ -1175,7 +1173,7 @@ static inline int
mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl)
{
int num_of_entries =
- mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
+ mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk->size);
/* Threshold is 50%, one is added to table of size 1 */
return (num_of_entries + 1) / 2;
@@ -1184,7 +1182,7 @@ mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl)
static inline bool
mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl)
{
- if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
+ if (htbl->chunk->size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
return false;
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 3f311462bedf..045b0cf90063 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -754,6 +754,16 @@ static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
return mlx5dr_domain_destroy(ns->fs_dr_domain.dr_domain);
}
+static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type)
+{
+ if (ft_type != FS_FT_FDB ||
+ MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5)
+ return 0;
+
+ return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX | MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;
+}
+
bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
{
return mlx5dr_is_supported(dev);
@@ -778,6 +788,7 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = {
.set_peer = mlx5_cmd_dr_set_peer,
.create_ns = mlx5_cmd_dr_create_ns,
.destroy_ns = mlx5_cmd_dr_destroy_ns,
+ .get_capabilities = mlx5_cmd_dr_get_capabilities,
};
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index dfa223415fe2..ec5cbec0d455 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -53,6 +53,9 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_table *
mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags);
+struct mlx5dr_table *
+mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft);
+
int mlx5dr_table_destroy(struct mlx5dr_table *table);
u32 mlx5dr_table_get_id(struct mlx5dr_table *table);
@@ -136,7 +139,7 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev)
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
(MLX5_CAP_GEN(dev, steering_format_version) <=
- MLX5_STEERING_FORMAT_CONNECTX_6DX)));
+ MLX5_STEERING_FORMAT_CONNECTX_7)));
}
/* buddy functions & structure */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 01e9c412977c..8455e79bc44a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
@@ -100,19 +99,21 @@ static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
int err = -ENOMEM;
phys_addr_t pfn;
int bfregs;
+ int node;
int i;
bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
- up = kzalloc(sizeof(*up), GFP_KERNEL);
+ node = mdev->priv.numa_node;
+ up = kzalloc_node(sizeof(*up), GFP_KERNEL, node);
if (!up)
return ERR_PTR(err);
up->mdev = mdev;
- up->reg_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
+ up->reg_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
if (!up->reg_bitmap)
goto error1;
- up->fp_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
+ up->fp_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
if (!up->fp_bitmap)
goto error1;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 866b9357939b..b13e0f8d232a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -177,17 +177,6 @@ void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_driver_priv);
-bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core)
-{
- return mlxsw_core->driver->res_query_enabled;
-}
-EXPORT_SYMBOL(mlxsw_core_res_query_enabled);
-
-bool mlxsw_core_temp_warn_enabled(const struct mlxsw_core *mlxsw_core)
-{
- return mlxsw_core->driver->temp_warn_enabled;
-}
-
bool
mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
const struct mlxsw_fw_rev *req_rev)
@@ -212,6 +201,32 @@ struct mlxsw_event_listener_item {
void *priv;
};
+static const u8 mlxsw_core_trap_groups[] = {
+ MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_REG_HTGT_TRAP_GROUP_CORE_EVENT,
+};
+
+static int mlxsw_core_trap_groups_set(struct mlxsw_core *mlxsw_core)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ int err;
+ int i;
+
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_core_trap_groups); i++) {
+ mlxsw_reg_htgt_pack(htgt_pl, mlxsw_core_trap_groups[i],
+ MLXSW_REG_HTGT_INVALID_POLICER,
+ MLXSW_REG_HTGT_DEFAULT_PRIORITY,
+ MLXSW_REG_HTGT_DEFAULT_TC);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
/******************
* EMAD processing
******************/
@@ -777,16 +792,10 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
if (err)
goto err_trap_register;
- err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
- if (err)
- goto err_emad_trap_set;
mlxsw_core->emad.use_emad = true;
return 0;
-err_emad_trap_set:
- mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
- mlxsw_core);
err_trap_register:
destroy_workqueue(mlxsw_core->emad_wq);
return err;
@@ -1208,36 +1217,37 @@ static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core)
ARRAY_SIZE(mlxsw_core_fw_devlink_params));
}
+static void *__dl_port(struct devlink_port *devlink_port)
+{
+ return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
+}
+
static int mlxsw_devlink_port_split(struct devlink *devlink,
- unsigned int port_index,
+ struct devlink_port *port,
unsigned int count,
struct netlink_ext_ack *extack)
{
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(port);
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- if (port_index >= mlxsw_core->max_ports) {
- NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
- return -EINVAL;
- }
if (!mlxsw_core->driver->port_split)
return -EOPNOTSUPP;
- return mlxsw_core->driver->port_split(mlxsw_core, port_index, count,
- extack);
+ return mlxsw_core->driver->port_split(mlxsw_core,
+ mlxsw_core_port->local_port,
+ count, extack);
}
static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
- unsigned int port_index,
+ struct devlink_port *port,
struct netlink_ext_ack *extack)
{
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(port);
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- if (port_index >= mlxsw_core->max_ports) {
- NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
- return -EINVAL;
- }
if (!mlxsw_core->driver->port_unsplit)
return -EOPNOTSUPP;
- return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index,
+ return mlxsw_core->driver->port_unsplit(mlxsw_core,
+ mlxsw_core_port->local_port,
extack);
}
@@ -1271,11 +1281,6 @@ mlxsw_devlink_sb_pool_set(struct devlink *devlink,
extack);
}
-static void *__dl_port(struct devlink_port *devlink_port)
-{
- return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
-}
-
static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
enum devlink_port_type port_type)
{
@@ -1706,7 +1711,7 @@ static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg,
}
static const struct mlxsw_listener mlxsw_core_health_listener =
- MLXSW_EVENTL(mlxsw_core_health_listener_func, MFDE, MFDE);
+ MLXSW_CORE_EVENTL(mlxsw_core_health_listener_func, MFDE);
static int
mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl,
@@ -2019,7 +2024,7 @@ static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
struct devlink_health_reporter *fw_fatal;
int err;
- if (!mlxsw_core->driver->fw_fatal_enabled)
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
return 0;
fw_fatal = devlink_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
@@ -2049,7 +2054,7 @@ err_trap_register:
static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
{
- if (!mlxsw_core->driver->fw_fatal_enabled)
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
return;
mlxsw_core_health_fw_fatal_config(mlxsw_core, false);
@@ -2069,7 +2074,6 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
struct mlxsw_driver *mlxsw_driver;
- struct mlxsw_res *res;
size_t alloc_size;
int err;
@@ -2095,8 +2099,8 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
mlxsw_core->bus_priv = bus_priv;
mlxsw_core->bus_info = mlxsw_bus_info;
- res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL;
- err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res);
+ err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
+ &mlxsw_core->res);
if (err)
goto err_bus_init;
@@ -2122,6 +2126,10 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
}
}
+ err = mlxsw_core_trap_groups_set(mlxsw_core);
+ if (err)
+ goto err_trap_groups_set;
+
err = mlxsw_emad_init(mlxsw_core);
if (err)
goto err_emad_init;
@@ -2181,6 +2189,7 @@ err_fw_rev_validate:
err_register_params:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
+err_trap_groups_set:
kfree(mlxsw_core->lag.mapping);
err_alloc_lag_mapping:
mlxsw_ports_fini(mlxsw_core, reload);
@@ -2500,6 +2509,9 @@ int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
char hpkt_pl[MLXSW_REG_HPKT_LEN];
int err;
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return 0;
+
err = mlxsw_core_listener_register(mlxsw_core, listener, priv,
listener->enabled_on_register);
if (err)
@@ -2529,6 +2541,9 @@ void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
{
char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return;
+
if (!listener->is_event) {
mlxsw_reg_hpkt_pack(hpkt_pl, listener->dis_action,
listener->trap_id, listener->dis_trap_group,
@@ -2540,6 +2555,45 @@ void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_trap_unregister);
+int mlxsw_core_traps_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listeners,
+ size_t listeners_count, void *priv)
+{
+ int i, err;
+
+ for (i = 0; i < listeners_count; i++) {
+ err = mlxsw_core_trap_register(mlxsw_core,
+ &listeners[i],
+ priv);
+ if (err)
+ goto err_listener_register;
+ }
+ return 0;
+
+err_listener_register:
+ for (i--; i >= 0; i--) {
+ mlxsw_core_trap_unregister(mlxsw_core,
+ &listeners[i],
+ priv);
+ }
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_core_traps_register);
+
+void mlxsw_core_traps_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listeners,
+ size_t listeners_count, void *priv)
+{
+ int i;
+
+ for (i = 0; i < listeners_count; i++) {
+ mlxsw_core_trap_unregister(mlxsw_core,
+ &listeners[i],
+ priv);
+ }
+}
+EXPORT_SYMBOL(mlxsw_core_traps_unregister);
+
int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
bool enabled)
@@ -2925,7 +2979,7 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
attrs.switch_id.id_len = switch_id_len;
mlxsw_core_port->local_port = local_port;
devlink_port_attrs_set(devlink_port, &attrs);
- err = devlink_port_register(devlink, devlink_port, local_port);
+ err = devl_port_register(devlink, devlink_port, local_port);
if (err)
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
return err;
@@ -2937,7 +2991,7 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port
&mlxsw_core->ports[local_port];
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
- devlink_port_unregister(devlink_port);
+ devl_port_unregister(devlink_port);
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
}
@@ -3181,9 +3235,6 @@ int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
u16 id;
int err;
- if (!res)
- return 0;
-
mlxsw_cmd_mbox_zero(mbox);
for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index f30bb8614e69..16ee5e90973d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -35,10 +35,6 @@ unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
-bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core);
-
-bool mlxsw_core_temp_warn_enabled(const struct mlxsw_core *mlxsw_core);
-
bool
mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
const struct mlxsw_fw_rev *req_rev);
@@ -163,6 +159,9 @@ struct mlxsw_listener {
.enabled_on_register = true, \
}
+#define MLXSW_CORE_EVENTL(_func, _trap_id) \
+ MLXSW_EVENTL(_func, _trap_id, CORE_EVENT)
+
int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
const struct mlxsw_rx_listener *rxl,
void *priv, bool enabled);
@@ -181,6 +180,12 @@ int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
void *priv);
+int mlxsw_core_traps_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listeners,
+ size_t listeners_count, void *priv);
+void mlxsw_core_traps_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listeners,
+ size_t listeners_count, void *priv);
int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
bool enabled);
@@ -315,7 +320,6 @@ struct mlxsw_driver {
const struct mlxsw_bus_info *mlxsw_bus_info,
struct netlink_ext_ack *extack);
void (*fini)(struct mlxsw_core *mlxsw_core);
- int (*basic_trap_groups_set)(struct mlxsw_core *mlxsw_core);
int (*port_type_set)(struct mlxsw_core *mlxsw_core, u16 local_port,
enum devlink_port_type new_type);
int (*port_split)(struct mlxsw_core *mlxsw_core, u16 local_port,
@@ -398,9 +402,6 @@ struct mlxsw_driver {
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
- bool res_query_enabled;
- bool fw_fatal_enabled;
- bool temp_warn_enabled;
};
int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 77e82e6cf6e8..fa33caecc91d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -1957,6 +1957,83 @@ int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
}
EXPORT_SYMBOL(mlxsw_afa_block_append_mcrouter);
+/* SIP DIP Action
+ * --------------
+ * The SIP_DIP_ACTION is used for modifying the SIP and DIP fields of the
+ * packet, e.g. for NAT. The L3 checksum is updated. Also, if the L4 is TCP or
+ * if the L4 is UDP and the checksum field is not zero, then the L4 checksum is
+ * updated.
+ */
+
+#define MLXSW_AFA_IP_CODE 0x11
+#define MLXSW_AFA_IP_SIZE 2
+
+enum mlxsw_afa_ip_s_d {
+ /* ip refers to dip */
+ MLXSW_AFA_IP_S_D_DIP,
+ /* ip refers to sip */
+ MLXSW_AFA_IP_S_D_SIP,
+};
+
+/* afa_ip_s_d
+ * Source or destination.
+ */
+MLXSW_ITEM32(afa, ip, s_d, 0x00, 31, 1);
+
+enum mlxsw_afa_ip_m_l {
+ /* LSB: ip[63:0] refers to ip[63:0] */
+ MLXSW_AFA_IP_M_L_LSB,
+ /* MSB: ip[63:0] refers to ip[127:64] */
+ MLXSW_AFA_IP_M_L_MSB,
+};
+
+/* afa_ip_m_l
+ * MSB or LSB.
+ */
+MLXSW_ITEM32(afa, ip, m_l, 0x00, 30, 1);
+
+/* afa_ip_ip_63_32
+ * Bits [63:32] in the IP address to change to.
+ */
+MLXSW_ITEM32(afa, ip, ip_63_32, 0x08, 0, 32);
+
+/* afa_ip_ip_31_0
+ * Bits [31:0] in the IP address to change to.
+ */
+MLXSW_ITEM32(afa, ip, ip_31_0, 0x0C, 0, 32);
+
+static void mlxsw_afa_ip_pack(char *payload, enum mlxsw_afa_ip_s_d s_d,
+ enum mlxsw_afa_ip_m_l m_l, u32 ip_31_0,
+ u32 ip_63_32)
+{
+ mlxsw_afa_ip_s_d_set(payload, s_d);
+ mlxsw_afa_ip_m_l_set(payload, m_l);
+ mlxsw_afa_ip_ip_31_0_set(payload, ip_31_0);
+ mlxsw_afa_ip_ip_63_32_set(payload, ip_63_32);
+}
+
+int mlxsw_afa_block_append_ip(struct mlxsw_afa_block *block, bool is_dip,
+ bool is_lsb, u32 val_31_0, u32 val_63_32,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_afa_ip_s_d s_d = is_dip ? MLXSW_AFA_IP_S_D_DIP :
+ MLXSW_AFA_IP_S_D_SIP;
+ enum mlxsw_afa_ip_m_l m_l = is_lsb ? MLXSW_AFA_IP_M_L_LSB :
+ MLXSW_AFA_IP_M_L_MSB;
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_IP_CODE,
+ MLXSW_AFA_IP_SIZE);
+
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append IP action");
+ return PTR_ERR(act);
+ }
+
+ mlxsw_afa_ip_pack(act, s_d, m_l, val_31_0, val_63_32);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_ip);
+
/* L4 Port Action
* --------------
* The L4_PORT_ACTION is used for modifying the sport and dport fields of the packet, e.g. for NAT.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 16cbd6acbb01..db58037be46e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -92,6 +92,9 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
u16 expected_irif, u16 min_mtu,
bool rmid_valid, u32 kvdl_index);
+int mlxsw_afa_block_append_ip(struct mlxsw_afa_block *block, bool is_dip,
+ bool is_lsb, u32 val_31_0, u32 val_63_32,
+ struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_l4port(struct mlxsw_afa_block *block, bool is_dport, u16 l4_port,
struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 6dd4ae2f45f4..29a74b8bd5b5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -18,6 +18,7 @@ struct mlxsw_env_module_info {
int num_ports_mapped;
int num_ports_up;
enum ethtool_module_power_mode_policy power_mode_policy;
+ enum mlxsw_reg_pmtm_module_type type;
};
struct mlxsw_env {
@@ -27,14 +28,47 @@ struct mlxsw_env {
struct mlxsw_env_module_info module_info[];
};
-static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
- bool *qsfp, bool *cmis)
+static int __mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(core);
+ int err;
+
+ switch (mlxsw_env->module_info[module].type) {
+ case MLXSW_REG_PMTM_MODULE_TYPE_TWISTED_PAIR:
+ err = -EINVAL;
+ break;
+ default:
+ err = 0;
+ }
+
+ return err;
+}
+
+static int mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(core);
+ int err;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+ err = __mlxsw_env_validate_module_type(core, module);
+ mutex_unlock(&mlxsw_env->module_info_lock);
+
+ return err;
+}
+
+static int
+mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, bool *qsfp,
+ bool *cmis)
{
char mcia_pl[MLXSW_REG_MCIA_LEN];
char *eeprom_tmp;
u8 ident;
int err;
+ err = mlxsw_env_validate_module_type(core, id);
+ if (err)
+ return err;
+
mlxsw_reg_mcia_pack(mcia_pl, id, 0, MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1,
MLXSW_REG_MCIA_I2C_ADDR_LOW);
err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl);
@@ -53,6 +87,7 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
*qsfp = true;
break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD:
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_OSFP:
*qsfp = true;
*cmis = true;
break;
@@ -206,7 +241,8 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
return 0;
}
-int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
+int mlxsw_env_get_module_info(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, int module,
struct ethtool_modinfo *modinfo)
{
u8 module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE];
@@ -215,6 +251,13 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
unsigned int read_size;
int err;
+ err = mlxsw_env_validate_module_type(mlxsw_core, module);
+ if (err) {
+ netdev_err(netdev,
+ "EEPROM is not equipped on port module type");
+ return err;
+ }
+
err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 0, offset,
module_info, false, &read_size);
if (err)
@@ -261,6 +304,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN / 2;
break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD:
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_OSFP:
/* Use SFF_8636 as base type. ethtool should recognize specific
* type through the identifier value.
*/
@@ -356,6 +400,13 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
{
u32 bytes_read = 0;
u16 device_addr;
+ int err;
+
+ err = mlxsw_env_validate_module_type(mlxsw_core, module);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "EEPROM is not equipped on port module type");
+ return err;
+ }
/* Offset cannot be larger than 2 * ETH_MODULE_EEPROM_PAGE_LEN */
device_addr = page->offset;
@@ -364,7 +415,6 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
char mcia_pl[MLXSW_REG_MCIA_LEN];
char *eeprom_tmp;
u8 size;
- int err;
size = min_t(u8, page->length - bytes_read,
MLXSW_REG_MCIA_EEPROM_SIZE);
@@ -414,11 +464,14 @@ int mlxsw_env_reset_module(struct net_device *netdev,
!(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)))
return 0;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return -EINVAL;
-
mutex_lock(&mlxsw_env->module_info_lock);
+ err = __mlxsw_env_validate_module_type(mlxsw_core, module);
+ if (err) {
+ netdev_err(netdev, "Reset module is not supported on port module type\n");
+ goto out;
+ }
+
if (mlxsw_env->module_info[module].num_ports_up) {
netdev_err(netdev, "Cannot reset module when ports using it are administratively up\n");
err = -EINVAL;
@@ -456,11 +509,14 @@ mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
u32 status_bits;
int err;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return -EINVAL;
-
mutex_lock(&mlxsw_env->module_info_lock);
+ err = __mlxsw_env_validate_module_type(mlxsw_core, module);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Power mode is not supported on port module type");
+ goto out;
+ }
+
params->policy = mlxsw_env->module_info[module].power_mode_policy;
mlxsw_reg_mcion_pack(mcion_pl, module);
@@ -560,9 +616,6 @@ mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
bool low_power;
int err = 0;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return -EINVAL;
-
if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH &&
policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy");
@@ -571,6 +624,13 @@ mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
mutex_lock(&mlxsw_env->module_info_lock);
+ err = __mlxsw_env_validate_module_type(mlxsw_core, module);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Power mode set is not supported on port module type");
+ goto out;
+ }
+
if (mlxsw_env->module_info[module].power_mode_policy == policy)
goto out;
@@ -661,13 +721,12 @@ static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core,
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl);
}
-static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core,
- u8 module_count)
+static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core)
{
int i, err, sensor_index;
bool has_temp_sensor;
- for (i = 0; i < module_count; i++) {
+ for (i = 0; i < mlxsw_core_env(mlxsw_core)->module_count; i++) {
err = mlxsw_env_module_has_temp_sensor(mlxsw_core, i,
&has_temp_sensor);
if (err)
@@ -759,15 +818,12 @@ mlxsw_env_mtwe_listener_func(const struct mlxsw_reg_info *reg, char *mtwe_pl,
}
static const struct mlxsw_listener mlxsw_env_temp_warn_listener =
- MLXSW_EVENTL(mlxsw_env_mtwe_listener_func, MTWE, MTWE);
+ MLXSW_CORE_EVENTL(mlxsw_env_mtwe_listener_func, MTWE);
static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
- if (!mlxsw_core_temp_warn_enabled(mlxsw_core))
- return 0;
-
return mlxsw_core_trap_register(mlxsw_core,
&mlxsw_env_temp_warn_listener,
mlxsw_env);
@@ -775,9 +831,6 @@ static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core)
static void mlxsw_env_temp_warn_event_unregister(struct mlxsw_env *mlxsw_env)
{
- if (!mlxsw_core_temp_warn_enabled(mlxsw_env->core))
- return;
-
mlxsw_core_trap_unregister(mlxsw_env->core,
&mlxsw_env_temp_warn_listener, mlxsw_env);
}
@@ -849,16 +902,13 @@ mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl,
}
static const struct mlxsw_listener mlxsw_env_module_plug_listener =
- MLXSW_EVENTL(mlxsw_env_pmpe_listener_func, PMPE, PMPE);
+ MLXSW_CORE_EVENTL(mlxsw_env_pmpe_listener_func, PMPE);
static int
mlxsw_env_module_plug_event_register(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
- if (!mlxsw_core_temp_warn_enabled(mlxsw_core))
- return 0;
-
return mlxsw_core_trap_register(mlxsw_core,
&mlxsw_env_module_plug_listener,
mlxsw_env);
@@ -867,21 +917,17 @@ mlxsw_env_module_plug_event_register(struct mlxsw_core *mlxsw_core)
static void
mlxsw_env_module_plug_event_unregister(struct mlxsw_env *mlxsw_env)
{
- if (!mlxsw_core_temp_warn_enabled(mlxsw_env->core))
- return;
-
mlxsw_core_trap_unregister(mlxsw_env->core,
&mlxsw_env_module_plug_listener,
mlxsw_env);
}
static int
-mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core,
- u8 module_count)
+mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core)
{
int i, err;
- for (i = 0; i < module_count; i++) {
+ for (i = 0; i < mlxsw_core_env(mlxsw_core)->module_count; i++) {
char pmaos_pl[MLXSW_REG_PMAOS_LEN];
mlxsw_reg_pmaos_pack(pmaos_pl, i);
@@ -901,9 +947,6 @@ mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return -EINVAL;
-
mutex_lock(&mlxsw_env->module_info_lock);
*p_counter = mlxsw_env->module_info[module].module_overheat_counter;
mutex_unlock(&mlxsw_env->module_info_lock);
@@ -916,9 +959,6 @@ void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return;
-
mutex_lock(&mlxsw_env->module_info_lock);
mlxsw_env->module_info[module].num_ports_mapped++;
mutex_unlock(&mlxsw_env->module_info_lock);
@@ -929,9 +969,6 @@ void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return;
-
mutex_lock(&mlxsw_env->module_info_lock);
mlxsw_env->module_info[module].num_ports_mapped--;
mutex_unlock(&mlxsw_env->module_info_lock);
@@ -943,9 +980,6 @@ int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module)
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int err = 0;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return -EINVAL;
-
mutex_lock(&mlxsw_env->module_info_lock);
if (mlxsw_env->module_info[module].power_mode_policy !=
@@ -975,9 +1009,6 @@ void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return;
-
mutex_lock(&mlxsw_env->module_info_lock);
mlxsw_env->module_info[module].num_ports_up--;
@@ -999,6 +1030,28 @@ out_unlock:
}
EXPORT_SYMBOL(mlxsw_env_module_port_down);
+static int
+mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ int i;
+
+ for (i = 0; i < mlxsw_env->module_count; i++) {
+ char pmtm_pl[MLXSW_REG_PMTM_LEN];
+ int err;
+
+ mlxsw_reg_pmtm_pack(pmtm_pl, 0, i);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl);
+ if (err)
+ return err;
+
+ mlxsw_env->module_info[i].type =
+ mlxsw_reg_pmtm_module_type_get(pmtm_pl);
+ }
+
+ return 0;
+}
+
int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
{
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
@@ -1037,17 +1090,21 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
if (err)
goto err_module_plug_event_register;
- err = mlxsw_env_module_oper_state_event_enable(mlxsw_core,
- env->module_count);
+ err = mlxsw_env_module_oper_state_event_enable(mlxsw_core);
if (err)
goto err_oper_state_event_enable;
- err = mlxsw_env_module_temp_event_enable(mlxsw_core, env->module_count);
+ err = mlxsw_env_module_temp_event_enable(mlxsw_core);
if (err)
goto err_temp_event_enable;
+ err = mlxsw_env_module_type_set(mlxsw_core);
+ if (err)
+ goto err_type_set;
+
return 0;
+err_type_set:
err_temp_event_enable:
err_oper_state_event_enable:
mlxsw_env_module_plug_event_unregister(env);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index da121b1a84b4..ec6564e5d2ee 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -12,7 +12,8 @@ struct ethtool_eeprom;
int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
int off, int *temp);
-int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
+int mlxsw_env_get_module_info(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, int module,
struct ethtool_modinfo *modinfo);
int mlxsw_env_get_module_eeprom(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index d41afdfbd085..8b170ad92302 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -57,14 +57,14 @@ static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
int temp, index;
int err;
- index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
+ index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
mlxsw_hwmon->module_sensor_max);
mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
@@ -80,14 +80,14 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
int temp_max, index;
int err;
- index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
+ index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
mlxsw_hwmon->module_sensor_max);
mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
@@ -103,9 +103,9 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
unsigned long val;
int index;
@@ -117,7 +117,7 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
if (val != 1)
return -EINVAL;
- index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
+ index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
mlxsw_hwmon->module_sensor_max);
mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, index);
@@ -138,13 +138,13 @@ static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char mfsm_pl[MLXSW_REG_MFSM_LEN];
int err;
- mlxsw_reg_mfsm_pack(mfsm_pl, mlwsw_hwmon_attr->type_index);
+ mlxsw_reg_mfsm_pack(mfsm_pl, mlxsw_hwmon_attr->type_index);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfsm), mfsm_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query fan\n");
@@ -157,9 +157,9 @@ static ssize_t mlxsw_hwmon_fan_fault_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char fore_pl[MLXSW_REG_FORE_LEN];
bool fault;
int err;
@@ -169,7 +169,7 @@ static ssize_t mlxsw_hwmon_fan_fault_show(struct device *dev,
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query fan\n");
return err;
}
- mlxsw_reg_fore_unpack(fore_pl, mlwsw_hwmon_attr->type_index, &fault);
+ mlxsw_reg_fore_unpack(fore_pl, mlxsw_hwmon_attr->type_index, &fault);
return sprintf(buf, "%u\n", fault);
}
@@ -178,13 +178,13 @@ static ssize_t mlxsw_hwmon_pwm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
int err;
- mlxsw_reg_mfsc_pack(mfsc_pl, mlwsw_hwmon_attr->type_index, 0);
+ mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_hwmon_attr->type_index, 0);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query PWM\n");
@@ -198,9 +198,9 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
unsigned long val;
int err;
@@ -211,7 +211,7 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev,
if (val > 255)
return -EINVAL;
- mlxsw_reg_mfsc_pack(mfsc_pl, mlwsw_hwmon_attr->type_index, val);
+ mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_hwmon_attr->type_index, val);
err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to write PWM\n");
@@ -224,14 +224,14 @@ static int mlxsw_hwmon_module_temp_get(struct device *dev,
struct device_attribute *attr,
int *p_temp)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
u8 module;
int err;
- module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
@@ -261,15 +261,15 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0};
u8 module, fault;
u16 temp;
int err;
- module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
1);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl);
@@ -303,13 +303,13 @@ static int mlxsw_hwmon_module_temp_critical_get(struct device *dev,
struct device_attribute *attr,
int *p_temp)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
u8 module;
int err;
- module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
SFP_TEMP_HIGH_WARN, p_temp);
if (err) {
@@ -337,13 +337,13 @@ static int mlxsw_hwmon_module_temp_emergency_get(struct device *dev,
struct device_attribute *attr,
int *p_temp)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
u8 module;
int err;
- module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
SFP_TEMP_HIGH_ALARM, p_temp);
if (err) {
@@ -373,11 +373,11 @@ mlxsw_hwmon_module_temp_label_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
return sprintf(buf, "front panel %03u\n",
- mlwsw_hwmon_attr->type_index);
+ mlxsw_hwmon_attr->type_index);
}
static ssize_t
@@ -385,10 +385,10 @@ mlxsw_hwmon_gbox_temp_label_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
- int index = mlwsw_hwmon_attr->type_index -
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ int index = mlxsw_hwmon_attr->type_index -
mlxsw_hwmon->module_sensor_max + 1;
return sprintf(buf, "gearbox %03u\n", index);
@@ -655,9 +655,6 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
u8 module_sensor_max;
int i, err;
- if (!mlxsw_core_res_query_enabled(mlxsw_hwmon->core))
- return 0;
-
mlxsw_reg_mgpir_pack(mgpir_pl);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index b29824448aa8..05f54bd982c0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -357,6 +357,10 @@ static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev,
return 0;
}
+static struct thermal_zone_params mlxsw_thermal_params = {
+ .no_hwmon = true,
+};
+
static struct thermal_zone_device_ops mlxsw_thermal_ops = {
.bind = mlxsw_thermal_bind,
.unbind = mlxsw_thermal_unbind,
@@ -388,11 +392,11 @@ static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
trip->min_state,
THERMAL_WEIGHT_DEFAULT);
if (err < 0)
- goto err_bind_cooling_device;
+ goto err_thermal_zone_bind_cooling_device;
}
return 0;
-err_bind_cooling_device:
+err_thermal_zone_bind_cooling_device:
for (j = i - 1; j >= 0; j--)
thermal_zone_unbind_cooling_device(tzdev, j, cdev);
return err;
@@ -678,7 +682,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
MLXSW_THERMAL_TRIP_MASK,
module_tz,
&mlxsw_thermal_module_ops,
- NULL, 0,
+ &mlxsw_thermal_params,
+ 0,
module_tz->parent->polling_delay);
if (IS_ERR(module_tz->tzdev)) {
err = PTR_ERR(module_tz->tzdev);
@@ -741,9 +746,6 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
int i, err;
- if (!mlxsw_core_res_query_enabled(core))
- return 0;
-
mlxsw_reg_mgpir_pack(mgpir_pl);
err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
@@ -761,7 +763,7 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
for (i = 0; i < thermal->tz_module_num; i++) {
err = mlxsw_thermal_module_init(dev, core, thermal, i);
if (err)
- goto err_unreg_tz_module_arr;
+ goto err_thermal_module_init;
}
for (i = 0; i < thermal->tz_module_num; i++) {
@@ -770,12 +772,13 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
continue;
err = mlxsw_thermal_module_tz_init(module_tz);
if (err)
- goto err_unreg_tz_module_arr;
+ goto err_thermal_module_tz_init;
}
return 0;
-err_unreg_tz_module_arr:
+err_thermal_module_tz_init:
+err_thermal_module_init:
for (i = thermal->tz_module_num - 1; i >= 0; i--)
mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
kfree(thermal->tz_module_arr);
@@ -787,9 +790,6 @@ mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal)
{
int i;
- if (!mlxsw_core_res_query_enabled(thermal->core))
- return;
-
for (i = thermal->tz_module_num - 1; i >= 0; i--)
mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
kfree(thermal->tz_module_arr);
@@ -808,7 +808,7 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
MLXSW_THERMAL_TRIP_MASK,
gearbox_tz,
&mlxsw_thermal_gearbox_ops,
- NULL, 0,
+ &mlxsw_thermal_params, 0,
gearbox_tz->parent->polling_delay);
if (IS_ERR(gearbox_tz->tzdev))
return PTR_ERR(gearbox_tz->tzdev);
@@ -837,9 +837,6 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
int i;
int err;
- if (!mlxsw_core_res_query_enabled(core))
- return 0;
-
mlxsw_reg_mgpir_pack(mgpir_pl);
err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
@@ -866,12 +863,12 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
gearbox_tz->parent = thermal;
err = mlxsw_thermal_gearbox_tz_init(gearbox_tz);
if (err)
- goto err_unreg_tz_gearbox;
+ goto err_thermal_gearbox_tz_init;
}
return 0;
-err_unreg_tz_gearbox:
+err_thermal_gearbox_tz_init:
for (i--; i >= 0; i--)
mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]);
kfree(thermal->tz_gearbox_arr);
@@ -883,9 +880,6 @@ mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal)
{
int i;
- if (!mlxsw_core_res_query_enabled(thermal->core))
- return;
-
for (i = thermal->tz_gearbox_num - 1; i >= 0; i--)
mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]);
kfree(thermal->tz_gearbox_arr);
@@ -915,7 +909,7 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl);
if (err) {
dev_err(dev, "Failed to probe PWMs\n");
- goto err_free_thermal;
+ goto err_reg_query;
}
mlxsw_reg_mfcr_unpack(mfcr_pl, &freq, &tacho_active, &pwm_active);
@@ -929,14 +923,14 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsl),
mfsl_pl);
if (err)
- goto err_free_thermal;
+ goto err_reg_query;
/* set the minimal RPMs to 0 */
mlxsw_reg_mfsl_tach_min_set(mfsl_pl, 0);
err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsl),
mfsl_pl);
if (err)
- goto err_free_thermal;
+ goto err_reg_write;
}
}
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) {
@@ -949,7 +943,7 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
if (IS_ERR(cdev)) {
err = PTR_ERR(cdev);
dev_err(dev, "Failed to register cooling device\n");
- goto err_unreg_cdevs;
+ goto err_thermal_cooling_device_register;
}
thermal->cdevs[i] = cdev;
}
@@ -968,43 +962,45 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
MLXSW_THERMAL_TRIP_MASK,
thermal,
&mlxsw_thermal_ops,
- NULL, 0,
+ &mlxsw_thermal_params, 0,
thermal->polling_delay);
if (IS_ERR(thermal->tzdev)) {
err = PTR_ERR(thermal->tzdev);
dev_err(dev, "Failed to register thermal zone\n");
- goto err_unreg_cdevs;
+ goto err_thermal_zone_device_register;
}
err = mlxsw_thermal_modules_init(dev, core, thermal);
if (err)
- goto err_unreg_tzdev;
+ goto err_thermal_modules_init;
err = mlxsw_thermal_gearboxes_init(dev, core, thermal);
if (err)
- goto err_unreg_modules_tzdev;
+ goto err_thermal_gearboxes_init;
err = thermal_zone_device_enable(thermal->tzdev);
if (err)
- goto err_unreg_gearboxes;
+ goto err_thermal_zone_device_enable;
*p_thermal = thermal;
return 0;
-err_unreg_gearboxes:
+err_thermal_zone_device_enable:
mlxsw_thermal_gearboxes_fini(thermal);
-err_unreg_modules_tzdev:
+err_thermal_gearboxes_init:
mlxsw_thermal_modules_fini(thermal);
-err_unreg_tzdev:
+err_thermal_modules_init:
if (thermal->tzdev) {
thermal_zone_device_unregister(thermal->tzdev);
thermal->tzdev = NULL;
}
-err_unreg_cdevs:
+err_thermal_zone_device_register:
+err_thermal_cooling_device_register:
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
if (thermal->cdevs[i])
thermal_cooling_device_unregister(thermal->cdevs[i]);
-err_free_thermal:
+err_reg_write:
+err_reg_query:
devm_kfree(dev, thermal);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 10d13f5f9c7d..3bc012dafd08 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -110,7 +110,8 @@ static int mlxsw_m_get_module_info(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_info(core, mlxsw_m_port->module, modinfo);
+ return mlxsw_env_get_module_info(netdev, core, mlxsw_m_port->module,
+ modinfo);
}
static int
@@ -421,6 +422,7 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
struct netlink_ext_ack *extack)
{
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
int err;
mlxsw_m->core = mlxsw_core;
@@ -436,7 +438,9 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
return err;
}
+ devl_lock(devlink);
err = mlxsw_m_ports_create(mlxsw_m);
+ devl_unlock(devlink);
if (err) {
dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n");
return err;
@@ -448,8 +452,11 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ devl_lock(devlink);
mlxsw_m_ports_remove(mlxsw_m);
+ devl_unlock(devlink);
}
static const struct mlxsw_config_profile mlxsw_m_config_profile;
@@ -460,7 +467,6 @@ static struct mlxsw_driver mlxsw_m_driver = {
.init = mlxsw_m_init,
.fini = mlxsw_m_fini,
.profile = &mlxsw_m_config_profile,
- .res_query_enabled = true,
};
static const struct i2c_device_id mlxsw_m_i2c_id[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 24cc65018b41..67b1a2f8397f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4482,6 +4482,8 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32);
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 BIT(22)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4 BIT(23)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T BIT(24)
+#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_T BIT(25)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR BIT(27)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR BIT(28)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR BIT(29)
@@ -6062,6 +6064,58 @@ static inline void mlxsw_reg_pllp_unpack(char *payload, u8 *label_port,
*slot_index = mlxsw_reg_pllp_slot_index_get(payload);
}
+/* PMTM - Port Module Type Mapping Register
+ * ----------------------------------------
+ * The PMTM register allows query or configuration of module types.
+ * The register can only be set when the module is disabled by PMAOS register
+ */
+#define MLXSW_REG_PMTM_ID 0x5067
+#define MLXSW_REG_PMTM_LEN 0x10
+
+MLXSW_REG_DEFINE(pmtm, MLXSW_REG_PMTM_ID, MLXSW_REG_PMTM_LEN);
+
+/* reg_pmtm_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtm, slot_index, 0x00, 24, 4);
+
+/* reg_pmtm_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtm, module, 0x00, 16, 8);
+
+enum mlxsw_reg_pmtm_module_type {
+ MLXSW_REG_PMTM_MODULE_TYPE_BACKPLANE_4_LANES = 0,
+ MLXSW_REG_PMTM_MODULE_TYPE_QSFP = 1,
+ MLXSW_REG_PMTM_MODULE_TYPE_SFP = 2,
+ MLXSW_REG_PMTM_MODULE_TYPE_BACKPLANE_SINGLE_LANE = 4,
+ MLXSW_REG_PMTM_MODULE_TYPE_BACKPLANE_2_LANES = 8,
+ MLXSW_REG_PMTM_MODULE_TYPE_CHIP2CHIP4X = 10,
+ MLXSW_REG_PMTM_MODULE_TYPE_CHIP2CHIP2X = 11,
+ MLXSW_REG_PMTM_MODULE_TYPE_CHIP2CHIP1X = 12,
+ MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD = 14,
+ MLXSW_REG_PMTM_MODULE_TYPE_OSFP = 15,
+ MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD = 16,
+ MLXSW_REG_PMTM_MODULE_TYPE_DSFP = 17,
+ MLXSW_REG_PMTM_MODULE_TYPE_CHIP2CHIP8X = 18,
+ MLXSW_REG_PMTM_MODULE_TYPE_TWISTED_PAIR = 19,
+};
+
+/* reg_pmtm_module_type
+ * Module type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtm, module_type, 0x04, 0, 5);
+
+static inline void mlxsw_reg_pmtm_pack(char *payload, u8 slot_index, u8 module)
+{
+ MLXSW_REG_ZERO(pmtm, payload);
+ mlxsw_reg_pmtm_slot_index_set(payload, slot_index);
+ mlxsw_reg_pmtm_module_set(payload, module);
+}
+
/* HTGT - Host Trap Group Table
* ----------------------------
* Configures the properties for forwarding to CPU.
@@ -6087,9 +6141,7 @@ MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
- MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
- MLXSW_REG_HTGT_TRAP_GROUP_MTWE,
- MLXSW_REG_HTGT_TRAP_GROUP_PMPE,
+ MLXSW_REG_HTGT_TRAP_GROUP_CORE_EVENT,
MLXSW_REG_HTGT_TRAP_GROUP_SP_STP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP,
@@ -6732,12 +6784,14 @@ static inline void mlxsw_reg_ritr_counter_pack(char *payload, u32 index,
set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_BASIC;
else
set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_NO_COUNT;
- mlxsw_reg_ritr_egress_counter_set_type_set(payload, set_type);
- if (egress)
+ if (egress) {
+ mlxsw_reg_ritr_egress_counter_set_type_set(payload, set_type);
mlxsw_reg_ritr_egress_counter_index_set(payload, index);
- else
+ } else {
+ mlxsw_reg_ritr_ingress_counter_set_type_set(payload, set_type);
mlxsw_reg_ritr_ingress_counter_index_set(payload, index);
+ }
}
static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
@@ -9985,6 +10039,7 @@ enum mlxsw_reg_mcia_eeprom_module_info_id {
MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D,
MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11,
MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD = 0x18,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_OSFP = 0x19,
};
enum mlxsw_reg_mcia_eeprom_module_info {
@@ -11271,24 +11326,24 @@ enum mlxsw_reg_mgpir_device_type {
MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE,
};
-/* device_type
+/* mgpir_device_type
* Access: RO
*/
MLXSW_ITEM32(reg, mgpir, device_type, 0x00, 24, 4);
-/* devices_per_flash
+/* mgpir_devices_per_flash
* Number of devices of device_type per flash (can be shared by few devices).
* Access: RO
*/
MLXSW_ITEM32(reg, mgpir, devices_per_flash, 0x00, 16, 8);
-/* num_of_devices
+/* mgpir_num_of_devices
* Number of devices of device_type.
* Access: RO
*/
MLXSW_ITEM32(reg, mgpir, num_of_devices, 0x00, 0, 8);
-/* num_of_modules
+/* mgpir_num_of_modules
* Number of modules.
* Access: RO
*/
@@ -12568,6 +12623,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pddr),
MLXSW_REG(pmmp),
MLXSW_REG(pllp),
+ MLXSW_REG(pmtm),
MLXSW_REG(htgt),
MLXSW_REG(hpkt),
MLXSW_REG(rgcr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index c7fc650608eb..daacf6291253 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -33,6 +33,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_ACL_MAX_REGIONS,
MLXSW_RES_ID_ACL_MAX_GROUPS,
MLXSW_RES_ID_ACL_MAX_GROUP_SIZE,
+ MLXSW_RES_ID_ACL_MAX_DEFAULT_ACTIONS,
MLXSW_RES_ID_ACL_FLEX_KEYS,
MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE,
MLXSW_RES_ID_ACL_ACTIONS_PER_SET,
@@ -90,6 +91,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903,
[MLXSW_RES_ID_ACL_MAX_GROUPS] = 0x2904,
[MLXSW_RES_ID_ACL_MAX_GROUP_SIZE] = 0x2905,
+ [MLXSW_RES_ID_ACL_MAX_DEFAULT_ACTIONS] = 0x2908,
[MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910,
[MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911,
[MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index aa411dec62f0..8eb05090ffec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -45,52 +45,49 @@
#include "spectrum_ptp.h"
#include "spectrum_trap.h"
+#define MLXSW_SP_FWREV_MINOR 2010
+#define MLXSW_SP_FWREV_SUBMINOR 1006
+
#define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 2010
-#define MLXSW_SP1_FWREV_SUBMINOR 1006
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
.major = MLXSW_SP1_FWREV_MAJOR,
- .minor = MLXSW_SP1_FWREV_MINOR,
- .subminor = MLXSW_SP1_FWREV_SUBMINOR,
+ .minor = MLXSW_SP_FWREV_MINOR,
+ .subminor = MLXSW_SP_FWREV_SUBMINOR,
.can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
};
#define MLXSW_SP1_FW_FILENAME \
"mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
- "." __stringify(MLXSW_SP1_FWREV_MINOR) \
- "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
+ "." __stringify(MLXSW_SP_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP2_FWREV_MAJOR 29
-#define MLXSW_SP2_FWREV_MINOR 2010
-#define MLXSW_SP2_FWREV_SUBMINOR 1006
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
- .minor = MLXSW_SP2_FWREV_MINOR,
- .subminor = MLXSW_SP2_FWREV_SUBMINOR,
+ .minor = MLXSW_SP_FWREV_MINOR,
+ .subminor = MLXSW_SP_FWREV_SUBMINOR,
};
#define MLXSW_SP2_FW_FILENAME \
"mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
- "." __stringify(MLXSW_SP2_FWREV_MINOR) \
- "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
+ "." __stringify(MLXSW_SP_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP3_FWREV_MAJOR 30
-#define MLXSW_SP3_FWREV_MINOR 2010
-#define MLXSW_SP3_FWREV_SUBMINOR 1006
static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
.major = MLXSW_SP3_FWREV_MAJOR,
- .minor = MLXSW_SP3_FWREV_MINOR,
- .subminor = MLXSW_SP3_FWREV_SUBMINOR,
+ .minor = MLXSW_SP_FWREV_MINOR,
+ .subminor = MLXSW_SP_FWREV_SUBMINOR,
};
#define MLXSW_SP3_FW_FILENAME \
"mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
- "." __stringify(MLXSW_SP3_FWREV_MINOR) \
- "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2"
+ "." __stringify(MLXSW_SP_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
@@ -2148,13 +2145,11 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_port *mlxsw_sp_port;
enum mlxsw_reg_pude_oper_status status;
- unsigned int max_ports;
u16 local_port;
- max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
local_port = mlxsw_reg_pude_local_port_get(pude_pl);
- if (WARN_ON_ONCE(!local_port || local_port >= max_ports))
+ if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
return;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port)
@@ -2393,45 +2388,6 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
return 0;
}
-static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_listener listeners[],
- size_t listeners_count)
-{
- int i;
- int err;
-
- for (i = 0; i < listeners_count; i++) {
- err = mlxsw_core_trap_register(mlxsw_sp->core,
- &listeners[i],
- mlxsw_sp);
- if (err)
- goto err_listener_register;
-
- }
- return 0;
-
-err_listener_register:
- for (i--; i >= 0; i--) {
- mlxsw_core_trap_unregister(mlxsw_sp->core,
- &listeners[i],
- mlxsw_sp);
- }
- return err;
-}
-
-static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_listener listeners[],
- size_t listeners_count)
-{
- int i;
-
- for (i = 0; i < listeners_count; i++) {
- mlxsw_core_trap_unregister(mlxsw_sp->core,
- &listeners[i],
- mlxsw_sp);
- }
-}
-
static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_trap *trap;
@@ -2456,21 +2412,23 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_trap_groups_set;
- err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener,
- ARRAY_SIZE(mlxsw_sp_listener));
+ err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener,
+ ARRAY_SIZE(mlxsw_sp_listener),
+ mlxsw_sp);
if (err)
goto err_traps_register;
- err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners,
- mlxsw_sp->listeners_count);
+ err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners,
+ mlxsw_sp->listeners_count, mlxsw_sp);
if (err)
goto err_extra_traps_init;
return 0;
err_extra_traps_init:
- mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
- ARRAY_SIZE(mlxsw_sp_listener));
+ mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
+ ARRAY_SIZE(mlxsw_sp_listener),
+ mlxsw_sp);
err_traps_register:
err_trap_groups_set:
err_cpu_policers_set:
@@ -2480,10 +2438,11 @@ err_cpu_policers_set:
static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
{
- mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners,
- mlxsw_sp->listeners_count);
- mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
- ARRAY_SIZE(mlxsw_sp_listener));
+ mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners,
+ mlxsw_sp->listeners_count,
+ mlxsw_sp);
+ mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
+ ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp);
kfree(mlxsw_sp->trap);
}
@@ -2528,42 +2487,6 @@ static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp->lags);
}
-static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
-{
- char htgt_pl[MLXSW_REG_HTGT_LEN];
- int err;
-
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
- MLXSW_REG_HTGT_INVALID_POLICER,
- MLXSW_REG_HTGT_DEFAULT_PRIORITY,
- MLXSW_REG_HTGT_DEFAULT_TC);
- err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
- if (err)
- return err;
-
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
- MLXSW_REG_HTGT_INVALID_POLICER,
- MLXSW_REG_HTGT_DEFAULT_PRIORITY,
- MLXSW_REG_HTGT_DEFAULT_TC);
- err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
- if (err)
- return err;
-
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MTWE,
- MLXSW_REG_HTGT_INVALID_POLICER,
- MLXSW_REG_HTGT_DEFAULT_PRIORITY,
- MLXSW_REG_HTGT_DEFAULT_TC);
- err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
- if (err)
- return err;
-
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_PMPE,
- MLXSW_REG_HTGT_INVALID_POLICER,
- MLXSW_REG_HTGT_DEFAULT_PRIORITY,
- MLXSW_REG_HTGT_DEFAULT_TC);
- return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
-}
-
static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
.clock_init = mlxsw_sp1_ptp_clock_init,
.clock_fini = mlxsw_sp1_ptp_clock_fini,
@@ -2895,6 +2818,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
int err;
mlxsw_sp->core = mlxsw_core;
@@ -3055,7 +2979,9 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_sample_trigger_init;
}
+ devl_lock(devlink);
err = mlxsw_sp_ports_create(mlxsw_sp);
+ devl_unlock(devlink);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
goto err_ports_create;
@@ -3236,8 +3162,12 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ devl_lock(devlink);
mlxsw_sp_ports_remove(mlxsw_sp);
+ devl_unlock(devlink);
+
rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
mlxsw_sp_port_module_info_fini(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
@@ -3677,7 +3607,6 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.fw_filename = MLXSW_SP1_FW_FILENAME,
.init = mlxsw_sp1_init,
.fini = mlxsw_sp_fini,
- .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
.sb_pool_get = mlxsw_sp_sb_pool_get,
@@ -3705,9 +3634,6 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
- .res_query_enabled = true,
- .fw_fatal_enabled = true,
- .temp_warn_enabled = true,
};
static struct mlxsw_driver mlxsw_sp2_driver = {
@@ -3717,7 +3643,6 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.fw_filename = MLXSW_SP2_FW_FILENAME,
.init = mlxsw_sp2_init,
.fini = mlxsw_sp_fini,
- .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
.sb_pool_get = mlxsw_sp_sb_pool_get,
@@ -3746,9 +3671,6 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
- .res_query_enabled = true,
- .fw_fatal_enabled = true,
- .temp_warn_enabled = true,
};
static struct mlxsw_driver mlxsw_sp3_driver = {
@@ -3758,7 +3680,6 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.fw_filename = MLXSW_SP3_FW_FILENAME,
.init = mlxsw_sp3_init,
.fini = mlxsw_sp_fini,
- .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
.sb_pool_get = mlxsw_sp_sb_pool_get,
@@ -3787,9 +3708,6 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
- .res_query_enabled = true,
- .fw_fatal_enabled = true,
- .temp_warn_enabled = true,
};
static struct mlxsw_driver mlxsw_sp4_driver = {
@@ -3797,7 +3715,6 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.priv_size = sizeof(struct mlxsw_sp),
.init = mlxsw_sp4_init,
.fini = mlxsw_sp_fini,
- .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
.sb_pool_get = mlxsw_sp_sb_pool_get,
@@ -3826,9 +3743,6 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
- .res_query_enabled = true,
- .fw_fatal_enabled = true,
- .temp_warn_enabled = true,
};
bool mlxsw_sp_port_dev_check(const struct net_device *dev)
@@ -4916,6 +4830,22 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+static bool mlxsw_sp_netdevice_event_is_router(unsigned long event)
+{
+ switch (event) {
+ case NETDEV_PRE_CHANGEADDR:
+ case NETDEV_CHANGEADDR:
+ case NETDEV_CHANGEMTU:
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -4940,9 +4870,7 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
event, ptr);
- else if (event == NETDEV_PRE_CHANGEADDR ||
- event == NETDEV_CHANGEADDR ||
- event == NETDEV_CHANGEMTU)
+ else if (mlxsw_sp_netdevice_event_is_router(event))
err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
else if (mlxsw_sp_is_vrf_event(event, ptr))
err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index bb2442e1f705..20588e699588 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -481,6 +481,13 @@ int
mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_8021ad_tagged,
bool is_8021q_tagged);
+static inline bool
+mlxsw_sp_local_port_is_valid(struct mlxsw_sp *mlxsw_sp, u16 local_port)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+
+ return local_port < max_ports && local_port;
+}
/* spectrum_buffers.c */
struct mlxsw_sp_hdroom_prio {
@@ -813,6 +820,24 @@ int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
/* spectrum2_kvdl.c */
extern const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops;
+enum mlxsw_sp_acl_mangle_field {
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP4_SIP,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP4_DIP,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_2,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_4,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_2,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_4,
+};
+
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
@@ -821,9 +846,14 @@ struct mlxsw_sp_acl_rule_info {
ingress_bind_blocker:1,
egress_bind_blocker:1,
counter_valid:1,
- policer_index_valid:1;
+ policer_index_valid:1,
+ ipv6_valid:1;
unsigned int counter_index;
u16 policer_index;
+ struct {
+ u32 prev_val;
+ enum mlxsw_sp_acl_mangle_field prev_field;
+ } ipv6;
};
/* spectrum_flow.c */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
index a9fff8adc75e..d20e794e01ca 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
@@ -213,7 +213,6 @@ mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_kvdl_part *part;
bool need_update = true;
unsigned int nr_entries;
- size_t usage_size;
u64 resource_size;
int err;
@@ -225,8 +224,8 @@ mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
}
nr_entries = div_u64(resource_size, info->alloc_size);
- usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
- part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
+ part = kzalloc(struct_size(part, usage, BITS_TO_LONGS(nr_entries)),
+ GFP_KERNEL);
if (!part)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
index ad69913f19c1..5b0210862655 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -77,7 +77,14 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
int i;
int err;
+ /* Some TCAM regions are not exposed to the host and used internally
+ * by the device. Allocate KVDL entries for the default actions of
+ * these regions to avoid the host from overwriting them.
+ */
tcam->kvdl_count = _tcam->max_regions;
+ if (MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_DEFAULT_ACTIONS))
+ tcam->kvdl_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_DEFAULT_ACTIONS);
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
tcam->kvdl_count, &tcam->kvdl_index);
if (err)
@@ -97,7 +104,10 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
goto err_afa_block_continue;
enc_actions = mlxsw_afa_block_cur_set(afa_block);
- for (i = 0; i < tcam->kvdl_count; i++) {
+ /* Only write to KVDL entries used by TCAM regions exposed to the
+ * host.
+ */
+ for (i = 0; i < _tcam->max_regions; i++) {
mlxsw_reg_pefa_pack(pefa_pl, tcam->kvdl_index + i,
true, enc_actions);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 70c11bfac08f..6c5af018546f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -505,14 +505,6 @@ int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
extack);
}
-enum mlxsw_sp_acl_mangle_field {
- MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD,
- MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP,
- MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN,
- MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT,
- MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT,
-};
-
struct mlxsw_sp_acl_mangle_action {
enum flow_action_mangle_base htype;
/* Offset is u32-aligned. */
@@ -561,6 +553,18 @@ static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions[] = {
MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0x0000ffff, 16, IP_SPORT),
MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0xffff0000, 0, IP_DPORT),
+
+ MLXSW_SP_ACL_MANGLE_ACTION_IP4(12, 0x00000000, 0, IP4_SIP),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP4(16, 0x00000000, 0, IP4_DIP),
+
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(8, 0x00000000, 0, IP6_SIP_1),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(12, 0x00000000, 0, IP6_SIP_2),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(16, 0x00000000, 0, IP6_SIP_3),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(20, 0x00000000, 0, IP6_SIP_4),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(24, 0x00000000, 0, IP6_DIP_1),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(28, 0x00000000, 0, IP6_DIP_2),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(32, 0x00000000, 0, IP6_DIP_3),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(36, 0x00000000, 0, IP6_DIP_4),
};
static int
@@ -599,6 +603,22 @@ static int mlxsw_sp1_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
return err;
}
+static int
+mlxsw_sp2_acl_rulei_act_mangle_field_ip_odd(struct mlxsw_sp_acl_rule_info *rulei,
+ enum mlxsw_sp_acl_mangle_field field,
+ u32 val, struct netlink_ext_ack *extack)
+{
+ if (!rulei->ipv6_valid) {
+ rulei->ipv6.prev_val = val;
+ rulei->ipv6_valid = true;
+ rulei->ipv6.prev_field = field;
+ return 0;
+ }
+
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field order");
+ return -EOPNOTSUPP;
+}
+
static int mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_acl_mangle_action *mact,
@@ -615,6 +635,61 @@ static int mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
return mlxsw_afa_block_append_l4port(rulei->act_block, false, val, extack);
case MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT:
return mlxsw_afa_block_append_l4port(rulei->act_block, true, val, extack);
+ /* IPv4 fields */
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP4_SIP:
+ return mlxsw_afa_block_append_ip(rulei->act_block, false,
+ true, val, 0, extack);
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP4_DIP:
+ return mlxsw_afa_block_append_ip(rulei->act_block, true,
+ true, val, 0, extack);
+ /* IPv6 fields */
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1:
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3:
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1:
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3:
+ return mlxsw_sp2_acl_rulei_act_mangle_field_ip_odd(rulei,
+ mact->field,
+ val, extack);
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_2:
+ if (rulei->ipv6_valid &&
+ rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1) {
+ rulei->ipv6_valid = false;
+ return mlxsw_afa_block_append_ip(rulei->act_block,
+ false, false, val,
+ rulei->ipv6.prev_val,
+ extack);
+ }
+ break;
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_4:
+ if (rulei->ipv6_valid &&
+ rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3) {
+ rulei->ipv6_valid = false;
+ return mlxsw_afa_block_append_ip(rulei->act_block,
+ false, true, val,
+ rulei->ipv6.prev_val,
+ extack);
+ }
+ break;
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_2:
+ if (rulei->ipv6_valid &&
+ rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1) {
+ rulei->ipv6_valid = false;
+ return mlxsw_afa_block_append_ip(rulei->act_block,
+ true, false, val,
+ rulei->ipv6.prev_val,
+ extack);
+ }
+ break;
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_4:
+ if (rulei->ipv6_valid &&
+ rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3) {
+ rulei->ipv6_valid = false;
+ return mlxsw_afa_block_append_ip(rulei->act_block,
+ true, true, val,
+ rulei->ipv6.prev_val,
+ extack);
+ }
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 1a2fef2a5379..5d494fabf93d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -266,10 +266,10 @@ static int mlxsw_sp_dpipe_table_erif_counters_update(void *priv, bool enable)
if (!rif)
continue;
if (enable)
- mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
+ mlxsw_sp_rif_counter_alloc(rif,
MLXSW_SP_RIF_COUNTER_EGRESS);
else
- mlxsw_sp_rif_counter_free(mlxsw_sp, rif,
+ mlxsw_sp_rif_counter_free(rif,
MLXSW_SP_RIF_COUNTER_EGRESS);
}
mutex_unlock(&mlxsw_sp->router->lock);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 20530712eadb..8b5d7f83b9b0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1034,13 +1034,10 @@ static int mlxsw_sp_get_module_info(struct net_device *netdev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- int err;
-
- err = mlxsw_env_get_module_info(mlxsw_sp->core,
- mlxsw_sp_port->mapping.module,
- modinfo);
- return err;
+ return mlxsw_env_get_module_info(netdev, mlxsw_sp->core,
+ mlxsw_sp_port->mapping.module,
+ modinfo);
}
static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
@@ -1048,13 +1045,10 @@ static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- int err;
-
- err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core,
- mlxsw_sp_port->mapping.module, ee,
- data);
- return err;
+ return mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core,
+ mlxsw_sp_port->mapping.module, ee,
+ data);
}
static int
@@ -1273,12 +1267,22 @@ struct mlxsw_sp1_port_link_mode {
static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
{
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ .speed = SPEED_100,
+ },
+ {
.mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
.mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
.speed = SPEED_1000,
},
{
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_1000BASE_T,
+ .mask_ethtool = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ .speed = SPEED_1000,
+ },
+ {
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
.mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index bb417db773b9..e91fb205e0b4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -15,6 +15,46 @@
#include "spectrum.h"
#include "core_acl_flex_keys.h"
+static int mlxsw_sp_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
@@ -191,10 +231,9 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
- if (act->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+ err = mlxsw_sp_policer_validate(flow_action, act, extack);
+ if (err)
+ return err;
/* The kernel might adjust the requested burst size so
* that it is not exactly a power of two. Re-adjust it
@@ -233,6 +272,12 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
}
+
+ if (rulei->ipv6_valid) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 0ff163fbc775..35422e64d89f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -568,12 +568,11 @@ void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
u8 domain_number, u16 sequence_id,
u64 timestamp)
{
- unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp1_ptp_key key;
u8 types;
- if (WARN_ON_ONCE(local_port >= max_ports))
+ if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
return;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index d40762cfc453..79deb19e3a19 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -225,6 +225,64 @@ int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+struct mlxsw_sp_rif_counter_set_basic {
+ u64 good_unicast_packets;
+ u64 good_multicast_packets;
+ u64 good_broadcast_packets;
+ u64 good_unicast_bytes;
+ u64 good_multicast_bytes;
+ u64 good_broadcast_bytes;
+ u64 error_packets;
+ u64 discard_packets;
+ u64 error_bytes;
+ u64 discard_bytes;
+};
+
+static int
+mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir,
+ struct mlxsw_sp_rif_counter_set_basic *set)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ricnt_pl[MLXSW_REG_RICNT_LEN];
+ unsigned int *p_counter_index;
+ int err;
+
+ if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
+ return -EINVAL;
+
+ p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
+ if (!p_counter_index)
+ return -EINVAL;
+
+ mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
+ MLXSW_REG_RICNT_OPCODE_CLEAR);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
+ if (err)
+ return err;
+
+ if (!set)
+ return 0;
+
+#define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME) \
+ (set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
+
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
+
+#undef MLXSW_SP_RIF_COUNTER_EXTRACT
+
+ return 0;
+}
+
static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index)
{
@@ -235,16 +293,20 @@ static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
}
-int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif,
+int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir)
{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
unsigned int *p_counter_index;
int err;
+ if (mlxsw_sp_rif_counter_valid_get(rif, dir))
+ return 0;
+
p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
if (!p_counter_index)
return -EINVAL;
+
err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
p_counter_index);
if (err)
@@ -268,10 +330,10 @@ err_counter_clear:
return err;
}
-void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif,
+void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir)
{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
unsigned int *p_counter_index;
if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
@@ -296,14 +358,12 @@ static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
if (!devlink_dpipe_table_counter_enabled(devlink,
MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
return;
- mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
}
static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
{
- struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
-
- mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
}
#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
@@ -8148,6 +8208,166 @@ u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
return lb_rif->ul_rif_id;
}
+static bool
+mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_rif_counter_valid_get(rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS) &&
+ mlxsw_sp_rif_counter_valid_get(rif,
+ MLXSW_SP_RIF_COUNTER_INGRESS);
+}
+
+static int
+mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
+{
+ int err;
+
+ err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+ if (err)
+ return err;
+
+ /* Clear stale data. */
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_INGRESS,
+ NULL);
+ if (err)
+ goto err_clear_ingress;
+
+ err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ if (err)
+ goto err_alloc_egress;
+
+ /* Clear stale data. */
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS,
+ NULL);
+ if (err)
+ goto err_clear_egress;
+
+ return 0;
+
+err_clear_egress:
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+err_alloc_egress:
+err_clear_ingress:
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+ return err;
+}
+
+static void
+mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
+{
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+}
+
+static void
+mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
+ return;
+ netdev_offload_xstats_report_used(info->report_used);
+}
+
+static int
+mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
+ struct rtnl_hw_stats64 *p_stats)
+{
+ struct mlxsw_sp_rif_counter_set_basic ingress;
+ struct mlxsw_sp_rif_counter_set_basic egress;
+ int err;
+
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_INGRESS,
+ &ingress);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS,
+ &egress);
+ if (err)
+ return err;
+
+#define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX) \
+ ((SET.good_unicast_ ## SFX) + \
+ (SET.good_multicast_ ## SFX) + \
+ (SET.good_broadcast_ ## SFX))
+
+ p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
+ p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
+ p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
+ p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
+ p_stats->rx_errors = ingress.error_packets;
+ p_stats->tx_errors = egress.error_packets;
+ p_stats->rx_dropped = ingress.discard_packets;
+ p_stats->tx_dropped = egress.discard_packets;
+ p_stats->multicast = ingress.good_multicast_packets +
+ ingress.good_broadcast_packets;
+
+#undef MLXSW_SP_ROUTER_ALL_GOOD
+
+ return 0;
+}
+
+static int
+mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ struct rtnl_hw_stats64 stats = {};
+ int err;
+
+ if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
+ return 0;
+
+ err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
+ if (err)
+ return err;
+
+ netdev_offload_xstats_report_delta(info->report_delta, &stats);
+ return 0;
+}
+
+struct mlxsw_sp_router_hwstats_notify_work {
+ struct work_struct work;
+ struct net_device *dev;
+};
+
+static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
+{
+ struct mlxsw_sp_router_hwstats_notify_work *hws_work =
+ container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
+ work);
+
+ rtnl_lock();
+ rtnl_offload_xstats_notify(hws_work->dev);
+ rtnl_unlock();
+ dev_put(hws_work->dev);
+ kfree(hws_work);
+}
+
+static void
+mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
+{
+ struct mlxsw_sp_router_hwstats_notify_work *hws_work;
+
+ /* To collect notification payload, the core ends up sending another
+ * notifier block message, which would deadlock on the attempt to
+ * acquire the router lock again. Just postpone the notification until
+ * later.
+ */
+
+ hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
+ if (!hws_work)
+ return;
+
+ INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
+ dev_hold(dev);
+ hws_work->dev = dev;
+ mlxsw_core_schedule_work(&hws_work->work);
+}
+
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
{
return rif->dev->ifindex;
@@ -8158,6 +8378,16 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
return rif->dev;
}
+static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
+{
+ struct rtnl_hw_stats64 stats = {};
+
+ if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
+ netdev_offload_xstats_push_delta(rif->dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3,
+ &stats);
+}
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_params *params,
@@ -8218,10 +8448,19 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
goto err_mr_rif_add;
}
- mlxsw_sp_rif_counters_alloc(rif);
+ if (netdev_offload_xstats_enabled(rif->dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
+ err = mlxsw_sp_router_port_l3_stats_enable(rif);
+ if (err)
+ goto err_stats_enable;
+ mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
+ } else {
+ mlxsw_sp_rif_counters_alloc(rif);
+ }
return rif;
+err_stats_enable:
err_mr_rif_add:
for (i--; i >= 0; i--)
mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
@@ -8251,7 +8490,15 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
vr = &mlxsw_sp->router->vrs[rif->vr_id];
- mlxsw_sp_rif_counters_free(rif);
+ if (netdev_offload_xstats_enabled(rif->dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
+ mlxsw_sp_rif_push_l3_stats(rif);
+ mlxsw_sp_router_port_l3_stats_disable(rif);
+ mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
+ } else {
+ mlxsw_sp_rif_counters_free(rif);
+ }
+
for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
ops->deconfigure(rif);
@@ -9128,6 +9375,35 @@ static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
return -ENOBUFS;
}
+static int
+mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
+ unsigned long event,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ switch (info->type) {
+ case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
+ break;
+ default:
+ return 0;
+ }
+
+ switch (event) {
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ return mlxsw_sp_router_port_l3_stats_enable(rif);
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ mlxsw_sp_router_port_l3_stats_disable(rif);
+ return 0;
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ mlxsw_sp_router_port_l3_stats_report_used(rif, info);
+ return 0;
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
+ }
+
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
unsigned long event, void *ptr)
{
@@ -9153,6 +9429,15 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
case NETDEV_PRE_CHANGEADDR:
err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
break;
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ err = mlxsw_sp_router_port_offload_xstats_cmd(rif, event, ptr);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
}
out:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 99e8371a82a5..fa829658a11b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -159,11 +159,9 @@ int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir,
u64 *cnt);
-void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif,
+void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir);
-int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif,
+int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir);
struct mlxsw_sp_neigh_entry *
mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index f9671cc53002..b73466470f75 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -269,8 +269,7 @@ mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
return NULL;
- if (!vid ||
- br_vlan_get_info(br_dev, vid, &vinfo) ||
+ if (!vid || br_vlan_get_info(br_dev, vid, &vinfo) ||
!(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 65c1724c63b0..3bf12092a8a2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1234,8 +1234,7 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
if (netif_is_bridge_master(orig_dev)) {
int err = 0;
- if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
- br_vlan_enabled(orig_dev))
+ if (br_vlan_enabled(orig_dev))
err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
orig_dev, vlan);
if (!err)
@@ -2616,7 +2615,6 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
char *sfn_pl, int rec_index,
bool adding)
{
- unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
@@ -2630,7 +2628,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
- if (WARN_ON_ONCE(local_port >= max_ports))
+ if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
return;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 0303e727e99f..82d55fc27edc 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -293,7 +293,7 @@ static void ks8851_wrfifo_spi(struct ks8851_net *ks, struct sk_buff *txp,
*/
static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
{
- netif_rx_ni(skb);
+ netif_rx(skb);
}
/**
@@ -452,11 +452,9 @@ static int ks8851_probe_spi(struct spi_device *spi)
return ks8851_probe_common(netdev, dev, msg_enable);
}
-static int ks8851_remove_spi(struct spi_device *spi)
+static void ks8851_remove_spi(struct spi_device *spi)
{
ks8851_remove_common(&spi->dev);
-
- return 0;
}
static const struct of_device_id ks8851_match_table[] = {
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index d024983815da..2b3eb5ed8233 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5225,7 +5225,6 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
* Linux network device functions
*/
-static unsigned long next_jiffies;
#ifdef CONFIG_NET_POLL_CONTROLLER
static void netdev_netpoll(struct net_device *dev)
@@ -5411,10 +5410,12 @@ static int netdev_open(struct net_device *dev)
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port *port = &priv->port;
+ unsigned long next_jiffies;
int i;
int p;
int rc = 0;
+ next_jiffies = jiffies + HZ * 2;
priv->multicast = 0;
priv->promiscuous = 0;
@@ -5428,10 +5429,7 @@ static int netdev_open(struct net_device *dev)
if (rc)
return rc;
for (i = 0; i < hw->mib_port_cnt; i++) {
- if (next_jiffies < jiffies)
- next_jiffies = jiffies + HZ * 2;
- else
- next_jiffies += HZ * 1;
+ next_jiffies += HZ * 1;
hw_priv->counter[i].time = next_jiffies;
hw->port_mib[i].state = media_disconnected;
port_init_cnt(hw, i);
@@ -6563,6 +6561,7 @@ static void mib_read_work(struct work_struct *work)
struct dev_info *hw_priv =
container_of(work, struct dev_info, mib_read);
struct ksz_hw *hw = &hw_priv->hw;
+ unsigned long next_jiffies;
struct ksz_port_mib *mib;
int i;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 634ac7649c43..559ad94a44d0 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -975,7 +975,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
/* update statistics */
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += len;
- netif_rx_ni(skb);
+ netif_rx(skb);
}
}
/*
@@ -1612,15 +1612,13 @@ error_alloc:
return ret;
}
-static int enc28j60_remove(struct spi_device *spi)
+static void enc28j60_remove(struct spi_device *spi)
{
struct enc28j60_net *priv = spi_get_drvdata(spi);
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
free_netdev(priv->netdev);
-
- return 0;
}
static const struct of_device_id enc28j60_dt_ids[] = {
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index b90efc80fb59..dc1840cb5b10 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -1093,7 +1093,7 @@ error_out:
return ret;
}
-static int encx24j600_spi_remove(struct spi_device *spi)
+static void encx24j600_spi_remove(struct spi_device *spi)
{
struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
@@ -1101,8 +1101,6 @@ static int encx24j600_spi_remove(struct spi_device *spi)
kthread_stop(priv->kworker_task);
free_netdev(priv->ndev);
-
- return 0;
}
static const struct spi_device_id encx24j600_spi_id_table[] = {
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 91a755efe2e6..c8fe8b31f07b 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -7,6 +7,8 @@
#include <linux/phy.h>
#include "lan743x_main.h"
#include "lan743x_ethtool.h"
+#include <linux/sched.h>
+#include <linux/iopoll.h>
/* eeprom */
#define LAN743X_EEPROM_MAGIC (0x74A5)
@@ -19,6 +21,10 @@
#define OTP_INDICATOR_1 (0xF3)
#define OTP_INDICATOR_2 (0xF7)
+#define LOCK_TIMEOUT_MAX_CNT (100) // 1 sec (10 msce * 100)
+
+#define LAN743X_CSR_READ_OP(offset) lan743x_csr_read(adapter, offset)
+
static int lan743x_otp_power_up(struct lan743x_adapter *adapter)
{
u32 reg_value;
@@ -149,6 +155,217 @@ static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset,
return 0;
}
+static int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter,
+ u16 timeout)
+{
+ u16 timeout_cnt = 0;
+ u32 val;
+
+ do {
+ spin_lock(&adapter->eth_syslock_spinlock);
+ if (adapter->eth_syslock_acquire_cnt == 0) {
+ lan743x_csr_write(adapter, ETH_SYSTEM_SYS_LOCK_REG,
+ SYS_LOCK_REG_ENET_SS_LOCK_);
+ val = lan743x_csr_read(adapter,
+ ETH_SYSTEM_SYS_LOCK_REG);
+ if (val & SYS_LOCK_REG_ENET_SS_LOCK_) {
+ adapter->eth_syslock_acquire_cnt++;
+ WARN_ON(adapter->eth_syslock_acquire_cnt == 0);
+ spin_unlock(&adapter->eth_syslock_spinlock);
+ break;
+ }
+ } else {
+ adapter->eth_syslock_acquire_cnt++;
+ WARN_ON(adapter->eth_syslock_acquire_cnt == 0);
+ spin_unlock(&adapter->eth_syslock_spinlock);
+ break;
+ }
+
+ spin_unlock(&adapter->eth_syslock_spinlock);
+
+ if (timeout_cnt++ < timeout)
+ usleep_range(10000, 11000);
+ else
+ return -ETIMEDOUT;
+ } while (true);
+
+ return 0;
+}
+
+static void lan743x_hs_syslock_release(struct lan743x_adapter *adapter)
+{
+ u32 val;
+
+ spin_lock(&adapter->eth_syslock_spinlock);
+ WARN_ON(adapter->eth_syslock_acquire_cnt == 0);
+
+ if (adapter->eth_syslock_acquire_cnt) {
+ adapter->eth_syslock_acquire_cnt--;
+ if (adapter->eth_syslock_acquire_cnt == 0) {
+ lan743x_csr_write(adapter, ETH_SYSTEM_SYS_LOCK_REG, 0);
+ val = lan743x_csr_read(adapter,
+ ETH_SYSTEM_SYS_LOCK_REG);
+ WARN_ON((val & SYS_LOCK_REG_ENET_SS_LOCK_) != 0);
+ }
+ }
+
+ spin_unlock(&adapter->eth_syslock_spinlock);
+}
+
+static void lan743x_hs_otp_power_up(struct lan743x_adapter *adapter)
+{
+ u32 reg_value;
+
+ reg_value = lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+ if (reg_value & OTP_PWR_DN_PWRDN_N_) {
+ reg_value &= ~OTP_PWR_DN_PWRDN_N_;
+ lan743x_csr_write(adapter, HS_OTP_PWR_DN, reg_value);
+ /* To flush the posted write so the subsequent delay is
+ * guaranteed to happen after the write at the hardware
+ */
+ lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+ udelay(1);
+ }
+}
+
+static void lan743x_hs_otp_power_down(struct lan743x_adapter *adapter)
+{
+ u32 reg_value;
+
+ reg_value = lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+ if (!(reg_value & OTP_PWR_DN_PWRDN_N_)) {
+ reg_value |= OTP_PWR_DN_PWRDN_N_;
+ lan743x_csr_write(adapter, HS_OTP_PWR_DN, reg_value);
+ /* To flush the posted write so the subsequent delay is
+ * guaranteed to happen after the write at the hardware
+ */
+ lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+ udelay(1);
+ }
+}
+
+static void lan743x_hs_otp_set_address(struct lan743x_adapter *adapter,
+ u32 address)
+{
+ lan743x_csr_write(adapter, HS_OTP_ADDR_HIGH, (address >> 8) & 0x03);
+ lan743x_csr_write(adapter, HS_OTP_ADDR_LOW, address & 0xFF);
+}
+
+static void lan743x_hs_otp_read_go(struct lan743x_adapter *adapter)
+{
+ lan743x_csr_write(adapter, HS_OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+ lan743x_csr_write(adapter, HS_OTP_CMD_GO, OTP_CMD_GO_GO_);
+}
+
+static int lan743x_hs_otp_cmd_cmplt_chk(struct lan743x_adapter *adapter)
+{
+ u32 val;
+
+ return readx_poll_timeout(LAN743X_CSR_READ_OP, HS_OTP_STATUS, val,
+ !(val & OTP_STATUS_BUSY_),
+ 80, 10000);
+}
+
+static int lan743x_hs_otp_read(struct lan743x_adapter *adapter, u32 offset,
+ u32 length, u8 *data)
+{
+ int ret;
+ int i;
+
+ ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+ lan743x_hs_otp_power_up(adapter);
+
+ ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+ if (ret < 0)
+ goto power_down;
+
+ lan743x_hs_syslock_release(adapter);
+
+ for (i = 0; i < length; i++) {
+ ret = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+ lan743x_hs_otp_set_address(adapter, offset + i);
+
+ lan743x_hs_otp_read_go(adapter);
+ ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+ if (ret < 0)
+ goto power_down;
+
+ data[i] = lan743x_csr_read(adapter, HS_OTP_READ_DATA);
+
+ lan743x_hs_syslock_release(adapter);
+ }
+
+ ret = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+power_down:
+ lan743x_hs_otp_power_down(adapter);
+ lan743x_hs_syslock_release(adapter);
+
+ return ret;
+}
+
+static int lan743x_hs_otp_write(struct lan743x_adapter *adapter, u32 offset,
+ u32 length, u8 *data)
+{
+ int ret;
+ int i;
+
+ ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+ lan743x_hs_otp_power_up(adapter);
+
+ ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+ if (ret < 0)
+ goto power_down;
+
+ /* set to BYTE program mode */
+ lan743x_csr_write(adapter, HS_OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+
+ lan743x_hs_syslock_release(adapter);
+
+ for (i = 0; i < length; i++) {
+ ret = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+ lan743x_hs_otp_set_address(adapter, offset + i);
+
+ lan743x_csr_write(adapter, HS_OTP_PRGM_DATA, data[i]);
+ lan743x_csr_write(adapter, HS_OTP_TST_CMD,
+ OTP_TST_CMD_PRGVRFY_);
+ lan743x_csr_write(adapter, HS_OTP_CMD_GO, OTP_CMD_GO_GO_);
+
+ ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+ if (ret < 0)
+ goto power_down;
+
+ lan743x_hs_syslock_release(adapter);
+ }
+
+ ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+power_down:
+ lan743x_hs_otp_power_down(adapter);
+ lan743x_hs_syslock_release(adapter);
+
+ return ret;
+}
+
static int lan743x_eeprom_wait(struct lan743x_adapter *adapter)
{
unsigned long start_time = jiffies;
@@ -263,6 +480,100 @@ static int lan743x_eeprom_write(struct lan743x_adapter *adapter,
return 0;
}
+static int lan743x_hs_eeprom_cmd_cmplt_chk(struct lan743x_adapter *adapter)
+{
+ u32 val;
+
+ return readx_poll_timeout(LAN743X_CSR_READ_OP, HS_E2P_CMD, val,
+ (!(val & HS_E2P_CMD_EPC_BUSY_) ||
+ (val & HS_E2P_CMD_EPC_TIMEOUT_)),
+ 50, 10000);
+}
+
+static int lan743x_hs_eeprom_read(struct lan743x_adapter *adapter,
+ u32 offset, u32 length, u8 *data)
+{
+ int retval;
+ u32 val;
+ int i;
+
+ retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (retval < 0)
+ return retval;
+
+ retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+ lan743x_hs_syslock_release(adapter);
+ if (retval < 0)
+ return retval;
+
+ for (i = 0; i < length; i++) {
+ retval = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (retval < 0)
+ return retval;
+
+ val = HS_E2P_CMD_EPC_BUSY_ | HS_E2P_CMD_EPC_CMD_READ_;
+ val |= (offset & HS_E2P_CMD_EPC_ADDR_MASK_);
+ lan743x_csr_write(adapter, HS_E2P_CMD, val);
+ retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+ if (retval < 0) {
+ lan743x_hs_syslock_release(adapter);
+ return retval;
+ }
+
+ val = lan743x_csr_read(adapter, HS_E2P_DATA);
+
+ lan743x_hs_syslock_release(adapter);
+
+ data[i] = val & 0xFF;
+ offset++;
+ }
+
+ return 0;
+}
+
+static int lan743x_hs_eeprom_write(struct lan743x_adapter *adapter,
+ u32 offset, u32 length, u8 *data)
+{
+ int retval;
+ u32 val;
+ int i;
+
+ retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (retval < 0)
+ return retval;
+
+ retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+ lan743x_hs_syslock_release(adapter);
+ if (retval < 0)
+ return retval;
+
+ for (i = 0; i < length; i++) {
+ retval = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (retval < 0)
+ return retval;
+
+ /* Fill data register */
+ val = data[i];
+ lan743x_csr_write(adapter, HS_E2P_DATA, val);
+
+ /* Send "write" command */
+ val = HS_E2P_CMD_EPC_BUSY_ | HS_E2P_CMD_EPC_CMD_WRITE_;
+ val |= (offset & HS_E2P_CMD_EPC_ADDR_MASK_);
+ lan743x_csr_write(adapter, HS_E2P_CMD, val);
+
+ retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+ lan743x_hs_syslock_release(adapter);
+ if (retval < 0)
+ return retval;
+
+ offset++;
+ }
+
+ return 0;
+}
+
static void lan743x_ethtool_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -304,10 +615,21 @@ static int lan743x_ethtool_get_eeprom(struct net_device *netdev,
struct lan743x_adapter *adapter = netdev_priv(netdev);
int ret = 0;
- if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP)
- ret = lan743x_otp_read(adapter, ee->offset, ee->len, data);
- else
- ret = lan743x_eeprom_read(adapter, ee->offset, ee->len, data);
+ if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) {
+ if (adapter->is_pci11x1x)
+ ret = lan743x_hs_otp_read(adapter, ee->offset,
+ ee->len, data);
+ else
+ ret = lan743x_otp_read(adapter, ee->offset,
+ ee->len, data);
+ } else {
+ if (adapter->is_pci11x1x)
+ ret = lan743x_hs_eeprom_read(adapter, ee->offset,
+ ee->len, data);
+ else
+ ret = lan743x_eeprom_read(adapter, ee->offset,
+ ee->len, data);
+ }
return ret;
}
@@ -321,13 +643,22 @@ static int lan743x_ethtool_set_eeprom(struct net_device *netdev,
if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) {
/* Beware! OTP is One Time Programming ONLY! */
if (ee->magic == LAN743X_OTP_MAGIC) {
- ret = lan743x_otp_write(adapter, ee->offset,
- ee->len, data);
+ if (adapter->is_pci11x1x)
+ ret = lan743x_hs_otp_write(adapter, ee->offset,
+ ee->len, data);
+ else
+ ret = lan743x_otp_write(adapter, ee->offset,
+ ee->len, data);
}
} else {
if (ee->magic == LAN743X_EEPROM_MAGIC) {
- ret = lan743x_eeprom_write(adapter, ee->offset,
- ee->len, data);
+ if (adapter->is_pci11x1x)
+ ret = lan743x_hs_eeprom_write(adapter,
+ ee->offset,
+ ee->len, data);
+ else
+ ret = lan743x_eeprom_write(adapter, ee->offset,
+ ee->len, data);
}
}
@@ -365,6 +696,14 @@ static const char lan743x_set1_sw_cnt_strings[][ETH_GSTRING_LEN] = {
"RX Queue 3 Frames",
};
+static const char lan743x_tx_queue_cnt_strings[][ETH_GSTRING_LEN] = {
+ "TX Queue 0 Frames",
+ "TX Queue 1 Frames",
+ "TX Queue 2 Frames",
+ "TX Queue 3 Frames",
+ "TX Total Queue Frames",
+};
+
static const char lan743x_set2_hw_cnt_strings[][ETH_GSTRING_LEN] = {
"RX Total Frames",
"EEE RX LPI Transitions",
@@ -462,6 +801,8 @@ static const char lan743x_priv_flags_strings[][ETH_GSTRING_LEN] = {
static void lan743x_ethtool_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, lan743x_set0_hw_cnt_strings,
@@ -473,6 +814,13 @@ static void lan743x_ethtool_get_strings(struct net_device *netdev,
sizeof(lan743x_set1_sw_cnt_strings)],
lan743x_set2_hw_cnt_strings,
sizeof(lan743x_set2_hw_cnt_strings));
+ if (adapter->is_pci11x1x) {
+ memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings) +
+ sizeof(lan743x_set1_sw_cnt_strings) +
+ sizeof(lan743x_set2_hw_cnt_strings)],
+ lan743x_tx_queue_cnt_strings,
+ sizeof(lan743x_tx_queue_cnt_strings));
+ }
break;
case ETH_SS_PRIV_FLAGS:
memcpy(data, lan743x_priv_flags_strings,
@@ -486,7 +834,9 @@ static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev,
u64 *data)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u64 total_queue_count = 0;
int data_index = 0;
+ u64 pkt_cnt;
u32 buf;
int i;
@@ -500,6 +850,14 @@ static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev,
buf = lan743x_csr_read(adapter, lan743x_set2_hw_cnt_addr[i]);
data[data_index++] = (u64)buf;
}
+ if (adapter->is_pci11x1x) {
+ for (i = 0; i < ARRAY_SIZE(adapter->tx); i++) {
+ pkt_cnt = (u64)(adapter->tx[i].frame_count);
+ data[data_index++] = pkt_cnt;
+ total_queue_count += pkt_cnt;
+ }
+ data[data_index++] = total_queue_count;
+ }
}
static u32 lan743x_ethtool_get_priv_flags(struct net_device *netdev)
@@ -520,6 +878,8 @@ static int lan743x_ethtool_set_priv_flags(struct net_device *netdev, u32 flags)
static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
switch (sset) {
case ETH_SS_STATS:
{
@@ -528,6 +888,8 @@ static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
ret = ARRAY_SIZE(lan743x_set0_hw_cnt_strings);
ret += ARRAY_SIZE(lan743x_set1_sw_cnt_strings);
ret += ARRAY_SIZE(lan743x_set2_hw_cnt_strings);
+ if (adapter->is_pci11x1x)
+ ret += ARRAY_SIZE(lan743x_tx_queue_cnt_strings);
return ret;
}
case ETH_SS_PRIV_FLAGS:
@@ -750,7 +1112,7 @@ static int lan743x_ethtool_set_eee(struct net_device *netdev,
}
if (eee->eee_enabled) {
- ret = phy_init_eee(phydev, 0);
+ ret = phy_init_eee(phydev, false);
if (ret) {
netif_err(adapter, drv, adapter->netdev,
"EEE initialization failed\n");
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 8c6390d95158..9ac0c2b96a15 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -18,6 +18,51 @@
#include "lan743x_main.h"
#include "lan743x_ethtool.h"
+#define MMD_ACCESS_ADDRESS 0
+#define MMD_ACCESS_WRITE 1
+#define MMD_ACCESS_READ 2
+#define MMD_ACCESS_READ_INC 3
+
+static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
+{
+ u32 chip_rev;
+ u32 strap;
+
+ strap = lan743x_csr_read(adapter, STRAP_READ);
+ if (strap & STRAP_READ_USE_SGMII_EN_) {
+ if (strap & STRAP_READ_SGMII_EN_)
+ adapter->is_sgmii_en = true;
+ else
+ adapter->is_sgmii_en = false;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "STRAP_READ: 0x%08X\n", strap);
+ } else {
+ chip_rev = lan743x_csr_read(adapter, FPGA_REV);
+ if (chip_rev) {
+ if (chip_rev & FPGA_SGMII_OP)
+ adapter->is_sgmii_en = true;
+ else
+ adapter->is_sgmii_en = false;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "FPGA_REV: 0x%08X\n", chip_rev);
+ } else {
+ adapter->is_sgmii_en = false;
+ }
+ }
+}
+
+static bool is_pci11x1x_chip(struct lan743x_adapter *adapter)
+{
+ struct lan743x_csr *csr = &adapter->csr;
+ u32 id_rev = csr->id_rev;
+
+ if (((id_rev & 0xFFFF0000) == ID_REV_ID_A011_) ||
+ ((id_rev & 0xFFFF0000) == ID_REV_ID_A041_)) {
+ return true;
+ }
+ return false;
+}
+
static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
{
pci_release_selected_regions(adapter->pdev,
@@ -250,7 +295,7 @@ static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
}
}
if (int_sts & INT_BIT_ALL_TX_) {
- for (channel = 0; channel < LAN743X_USED_TX_CHANNELS;
+ for (channel = 0; channel < adapter->used_tx_channels;
channel++) {
u32 int_bit = INT_BIT_DMA_TX_(channel);
@@ -410,7 +455,7 @@ static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter,
{
int index;
- for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
+ for (index = 0; index < adapter->max_vector_count; index++) {
if (adapter->intr.vector_list[index].int_mask & int_mask)
return adapter->intr.vector_list[index].flags;
}
@@ -423,9 +468,12 @@ static void lan743x_intr_close(struct lan743x_adapter *adapter)
int index = 0;
lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
- lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
+ if (adapter->is_pci11x1x)
+ lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x0000FFFF);
+ else
+ lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
- for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
+ for (index = 0; index < intr->number_of_vectors; index++) {
if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) {
lan743x_intr_unregister_isr(adapter, index);
intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index);
@@ -445,9 +493,11 @@ static void lan743x_intr_close(struct lan743x_adapter *adapter)
static int lan743x_intr_open(struct lan743x_adapter *adapter)
{
- struct msix_entry msix_entries[LAN743X_MAX_VECTOR_COUNT];
+ struct msix_entry msix_entries[PCI11X1X_MAX_VECTOR_COUNT];
struct lan743x_intr *intr = &adapter->intr;
+ unsigned int used_tx_channels;
u32 int_vec_en_auto_clr = 0;
+ u8 max_vector_count;
u32 int_vec_map0 = 0;
u32 int_vec_map1 = 0;
int ret = -ENODEV;
@@ -457,13 +507,15 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
intr->number_of_vectors = 0;
/* Try to set up MSIX interrupts */
+ max_vector_count = adapter->max_vector_count;
memset(&msix_entries[0], 0,
- sizeof(struct msix_entry) * LAN743X_MAX_VECTOR_COUNT);
- for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++)
+ sizeof(struct msix_entry) * max_vector_count);
+ for (index = 0; index < max_vector_count; index++)
msix_entries[index].entry = index;
+ used_tx_channels = adapter->used_tx_channels;
ret = pci_enable_msix_range(adapter->pdev,
msix_entries, 1,
- 1 + LAN743X_USED_TX_CHANNELS +
+ 1 + used_tx_channels +
LAN743X_USED_RX_CHANNELS);
if (ret > 0) {
@@ -556,8 +608,15 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD);
lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD);
lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD);
- lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
- lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
+ if (adapter->is_pci11x1x) {
+ lan743x_csr_write(adapter, INT_MOD_CFG8, LAN743X_INT_MOD);
+ lan743x_csr_write(adapter, INT_MOD_CFG9, LAN743X_INT_MOD);
+ lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00007654);
+ lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00003210);
+ } else {
+ lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
+ lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
+ }
lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF);
}
@@ -570,8 +629,8 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
if (intr->number_of_vectors > 1) {
int number_of_tx_vectors = intr->number_of_vectors - 1;
- if (number_of_tx_vectors > LAN743X_USED_TX_CHANNELS)
- number_of_tx_vectors = LAN743X_USED_TX_CHANNELS;
+ if (number_of_tx_vectors > used_tx_channels)
+ number_of_tx_vectors = used_tx_channels;
flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
@@ -609,9 +668,9 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
INT_VEC_EN_(vector));
}
}
- if ((intr->number_of_vectors - LAN743X_USED_TX_CHANNELS) > 1) {
+ if ((intr->number_of_vectors - used_tx_channels) > 1) {
int number_of_rx_vectors = intr->number_of_vectors -
- LAN743X_USED_TX_CHANNELS - 1;
+ used_tx_channels - 1;
if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS)
number_of_rx_vectors = LAN743X_USED_RX_CHANNELS;
@@ -632,7 +691,7 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
}
for (index = 0; index < number_of_rx_vectors; index++) {
- int vector = index + 1 + LAN743X_USED_TX_CHANNELS;
+ int vector = index + 1 + used_tx_channels;
u32 int_bit = INT_BIT_DMA_RX_(index);
/* map RX interrupt to vector */
@@ -760,6 +819,96 @@ static int lan743x_mdiobus_write(struct mii_bus *bus,
return ret;
}
+static u32 lan743x_mac_mmd_access(int id, int index, int op)
+{
+ u16 dev_addr;
+ u32 ret;
+
+ dev_addr = (index >> 16) & 0x1f;
+ ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
+ MAC_MII_ACC_PHY_ADDR_MASK_;
+ ret |= (dev_addr << MAC_MII_ACC_MIIMMD_SHIFT_) &
+ MAC_MII_ACC_MIIMMD_MASK_;
+ if (op == MMD_ACCESS_WRITE)
+ ret |= MAC_MII_ACC_MIICMD_WRITE_;
+ else if (op == MMD_ACCESS_READ)
+ ret |= MAC_MII_ACC_MIICMD_READ_;
+ else if (op == MMD_ACCESS_READ_INC)
+ ret |= MAC_MII_ACC_MIICMD_READ_INC_;
+ else
+ ret |= MAC_MII_ACC_MIICMD_ADDR_;
+ ret |= (MAC_MII_ACC_MII_BUSY_ | MAC_MII_ACC_MIICL45_);
+
+ return ret;
+}
+
+static int lan743x_mdiobus_c45_read(struct mii_bus *bus, int phy_id, int index)
+{
+ struct lan743x_adapter *adapter = bus->priv;
+ u32 mmd_access;
+ int ret;
+
+ /* comfirm MII not busy */
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ if (index & MII_ADDR_C45) {
+ /* Load Register Address */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff));
+ mmd_access = lan743x_mac_mmd_access(phy_id, index,
+ MMD_ACCESS_ADDRESS);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ /* Read Data */
+ mmd_access = lan743x_mac_mmd_access(phy_id, index,
+ MMD_ACCESS_READ);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ ret = lan743x_csr_read(adapter, MAC_MII_DATA);
+ return (int)(ret & 0xFFFF);
+ }
+
+ ret = lan743x_mdiobus_read(bus, phy_id, index);
+ return ret;
+}
+
+static int lan743x_mdiobus_c45_write(struct mii_bus *bus,
+ int phy_id, int index, u16 regval)
+{
+ struct lan743x_adapter *adapter = bus->priv;
+ u32 mmd_access;
+ int ret;
+
+ /* confirm MII not busy */
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ if (index & MII_ADDR_C45) {
+ /* Load Register Address */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff));
+ mmd_access = lan743x_mac_mmd_access(phy_id, index,
+ MMD_ACCESS_ADDRESS);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ /* Write Data */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval);
+ mmd_access = lan743x_mac_mmd_access(phy_id, index,
+ MMD_ACCESS_WRITE);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ } else {
+ ret = lan743x_mdiobus_write(bus, phy_id, index, regval);
+ }
+
+ return ret;
+}
+
static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
u8 *addr)
{
@@ -1627,6 +1776,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
dev_kfree_skb_irq(skb);
goto unlock;
}
+ tx->frame_count++;
if (gso)
lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
@@ -2491,7 +2641,8 @@ static int lan743x_netdev_close(struct net_device *netdev)
struct lan743x_adapter *adapter = netdev_priv(netdev);
int index;
- lan743x_tx_close(&adapter->tx[0]);
+ for (index = 0; index < adapter->used_tx_channels; index++)
+ lan743x_tx_close(&adapter->tx[index]);
for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
lan743x_rx_close(&adapter->rx[index]);
@@ -2537,12 +2688,19 @@ static int lan743x_netdev_open(struct net_device *netdev)
goto close_rx;
}
- ret = lan743x_tx_open(&adapter->tx[0]);
- if (ret)
- goto close_rx;
-
+ for (index = 0; index < adapter->used_tx_channels; index++) {
+ ret = lan743x_tx_open(&adapter->tx[index]);
+ if (ret)
+ goto close_tx;
+ }
return 0;
+close_tx:
+ for (index = 0; index < adapter->used_tx_channels; index++) {
+ if (adapter->tx[index].ring_cpu_ptr)
+ lan743x_tx_close(&adapter->tx[index]);
+ }
+
close_rx:
for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
if (adapter->rx[index].ring_cpu_ptr)
@@ -2569,8 +2727,12 @@ static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u8 ch = 0;
- return lan743x_tx_xmit_frame(&adapter->tx[0], skb);
+ if (adapter->is_pci11x1x)
+ ch = skb->queue_mapping % PCI11X1X_USED_TX_CHANNELS;
+
+ return lan743x_tx_xmit_frame(&adapter->tx[ch], skb);
}
static int lan743x_netdev_ioctl(struct net_device *netdev,
@@ -2701,6 +2863,19 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
int index;
int ret;
+ adapter->is_pci11x1x = is_pci11x1x_chip(adapter);
+ if (adapter->is_pci11x1x) {
+ adapter->max_tx_channels = PCI11X1X_MAX_TX_CHANNELS;
+ adapter->used_tx_channels = PCI11X1X_USED_TX_CHANNELS;
+ adapter->max_vector_count = PCI11X1X_MAX_VECTOR_COUNT;
+ pci11x1x_strap_get_status(adapter);
+ spin_lock_init(&adapter->eth_syslock_spinlock);
+ } else {
+ adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
+ adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
+ adapter->max_vector_count = LAN743X_MAX_VECTOR_COUNT;
+ }
+
adapter->intr.irq = adapter->pdev->irq;
lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
@@ -2731,15 +2906,19 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
adapter->rx[index].channel_number = index;
}
- tx = &adapter->tx[0];
- tx->adapter = adapter;
- tx->channel_number = 0;
- spin_lock_init(&tx->ring_lock);
+ for (index = 0; index < adapter->used_tx_channels; index++) {
+ tx = &adapter->tx[index];
+ tx->adapter = adapter;
+ tx->channel_number = index;
+ spin_lock_init(&tx->ring_lock);
+ }
+
return 0;
}
static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
{
+ u32 sgmii_ctl;
int ret;
adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
@@ -2749,9 +2928,35 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
}
adapter->mdiobus->priv = (void *)adapter;
- adapter->mdiobus->read = lan743x_mdiobus_read;
- adapter->mdiobus->write = lan743x_mdiobus_write;
- adapter->mdiobus->name = "lan743x-mdiobus";
+ if (adapter->is_pci11x1x) {
+ if (adapter->is_sgmii_en) {
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII operation\n");
+ } else {
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+ netif_dbg(adapter, drv, adapter->netdev,
+ "(R)GMII operation\n");
+ }
+
+ adapter->mdiobus->probe_capabilities = MDIOBUS_C22_C45;
+ adapter->mdiobus->read = lan743x_mdiobus_c45_read;
+ adapter->mdiobus->write = lan743x_mdiobus_c45_write;
+ adapter->mdiobus->name = "lan743x-mdiobus-c45";
+ netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus-c45\n");
+ } else {
+ adapter->mdiobus->read = lan743x_mdiobus_read;
+ adapter->mdiobus->write = lan743x_mdiobus_write;
+ adapter->mdiobus->name = "lan743x-mdiobus";
+ netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n");
+ }
+
snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
"pci-%s", pci_name(adapter->pdev));
@@ -2786,8 +2991,17 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
struct net_device *netdev = NULL;
int ret = -ENODEV;
- netdev = devm_alloc_etherdev(&pdev->dev,
- sizeof(struct lan743x_adapter));
+ if (id->device == PCI_DEVICE_ID_SMSC_A011 ||
+ id->device == PCI_DEVICE_ID_SMSC_A041) {
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct lan743x_adapter),
+ PCI11X1X_USED_TX_CHANNELS,
+ LAN743X_USED_RX_CHANNELS);
+ } else {
+ netdev = devm_alloc_etherdev(&pdev->dev,
+ sizeof(struct lan743x_adapter));
+ }
+
if (!netdev)
goto return_error;
@@ -3056,6 +3270,8 @@ static const struct dev_pm_ops lan743x_pm_ops = {
static const struct pci_device_id lan743x_pcidev_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A011) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A041) },
{ 0, }
};
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index aaf7aaeaba0c..1ca5f3216403 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -16,8 +16,13 @@
#define ID_REV_ID_MASK_ (0xFFFF0000)
#define ID_REV_ID_LAN7430_ (0x74300000)
#define ID_REV_ID_LAN7431_ (0x74310000)
-#define ID_REV_IS_VALID_CHIP_ID_(id_rev) \
- (((id_rev) & 0xFFF00000) == 0x74300000)
+#define ID_REV_ID_LAN743X_ (0x74300000)
+#define ID_REV_ID_A011_ (0xA0110000) // PCI11010
+#define ID_REV_ID_A041_ (0xA0410000) // PCI11414
+#define ID_REV_ID_A0X1_ (0xA0010000)
+#define ID_REV_IS_VALID_CHIP_ID_(id_rev) \
+ ((((id_rev) & 0xFFF00000) == ID_REV_ID_LAN743X_) || \
+ (((id_rev) & 0xFF0F0000) == ID_REV_ID_A0X1_))
#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
#define ID_REV_CHIP_REV_A0_ (0x00000000)
#define ID_REV_CHIP_REV_B0_ (0x00000010)
@@ -25,6 +30,17 @@
#define FPGA_REV (0x04)
#define FPGA_REV_GET_MINOR_(fpga_rev) (((fpga_rev) >> 8) & 0x000000FF)
#define FPGA_REV_GET_MAJOR_(fpga_rev) ((fpga_rev) & 0x000000FF)
+#define FPGA_SGMII_OP BIT(24)
+
+#define STRAP_READ (0x0C)
+#define STRAP_READ_USE_SGMII_EN_ BIT(22)
+#define STRAP_READ_SGMII_EN_ BIT(6)
+#define STRAP_READ_SGMII_REFCLK_ BIT(5)
+#define STRAP_READ_SGMII_2_5G_ BIT(4)
+#define STRAP_READ_BASE_X_ BIT(3)
+#define STRAP_READ_RGMII_TXC_DELAY_EN_ BIT(2)
+#define STRAP_READ_RGMII_RXC_DELAY_EN_ BIT(1)
+#define STRAP_READ_ADV_PM_DISABLE_ BIT(0)
#define HW_CFG (0x010)
#define HW_CFG_RELOAD_TYPE_ALL_ (0x00000FC0)
@@ -70,6 +86,40 @@
#define E2P_DATA (0x044)
+/* Hearthstone top level & System Reg Addresses */
+#define ETH_CTRL_REG_ADDR_BASE (0x0000)
+#define ETH_SYS_REG_ADDR_BASE (0x4000)
+#define CONFIG_REG_ADDR_BASE (0x0000)
+#define ETH_EEPROM_REG_ADDR_BASE (0x0E00)
+#define ETH_OTP_REG_ADDR_BASE (0x1000)
+#define SYS_LOCK_REG (0x00A0)
+#define SYS_LOCK_REG_MAIN_LOCK_ BIT(7)
+#define SYS_LOCK_REG_GEN_PERI_LOCK_ BIT(5)
+#define SYS_LOCK_REG_SPI_PERI_LOCK_ BIT(4)
+#define SYS_LOCK_REG_SMBUS_PERI_LOCK_ BIT(3)
+#define SYS_LOCK_REG_UART_SS_LOCK_ BIT(2)
+#define SYS_LOCK_REG_ENET_SS_LOCK_ BIT(1)
+#define SYS_LOCK_REG_USB_SS_LOCK_ BIT(0)
+#define ETH_SYSTEM_SYS_LOCK_REG (ETH_SYS_REG_ADDR_BASE + \
+ CONFIG_REG_ADDR_BASE + \
+ SYS_LOCK_REG)
+#define HS_EEPROM_REG_ADDR_BASE (ETH_SYS_REG_ADDR_BASE + \
+ ETH_EEPROM_REG_ADDR_BASE)
+#define HS_E2P_CMD (HS_EEPROM_REG_ADDR_BASE + 0x0000)
+#define HS_E2P_CMD_EPC_BUSY_ BIT(31)
+#define HS_E2P_CMD_EPC_CMD_WRITE_ GENMASK(29, 28)
+#define HS_E2P_CMD_EPC_CMD_READ_ (0x0)
+#define HS_E2P_CMD_EPC_TIMEOUT_ BIT(17)
+#define HS_E2P_CMD_EPC_ADDR_MASK_ GENMASK(15, 0)
+#define HS_E2P_DATA (HS_EEPROM_REG_ADDR_BASE + 0x0004)
+#define HS_E2P_DATA_MASK_ GENMASK(7, 0)
+#define HS_E2P_CFG (HS_EEPROM_REG_ADDR_BASE + 0x0008)
+#define HS_E2P_CFG_I2C_PULSE_MASK_ GENMASK(19, 16)
+#define HS_E2P_CFG_EEPROM_SIZE_SEL_ BIT(12)
+#define HS_E2P_CFG_I2C_BAUD_RATE_MASK_ GENMASK(9, 8)
+#define HS_E2P_CFG_TEST_EEPR_TO_BYP_ BIT(0)
+#define HS_E2P_PAD_CTL (HS_EEPROM_REG_ADDR_BASE + 0x000C)
+
#define GPIO_CFG0 (0x050)
#define GPIO_CFG0_GPIO_DIR_BIT_(bit) BIT(16 + (bit))
#define GPIO_CFG0_GPIO_DATA_BIT_(bit) BIT(0 + (bit))
@@ -135,6 +185,13 @@
#define MAC_RX_ADDRL (0x11C)
#define MAC_MII_ACC (0x120)
+#define MAC_MII_ACC_MDC_CYCLE_SHIFT_ (16)
+#define MAC_MII_ACC_MDC_CYCLE_MASK_ (0x00070000)
+#define MAC_MII_ACC_MDC_CYCLE_2_5MHZ_ (0)
+#define MAC_MII_ACC_MDC_CYCLE_5MHZ_ (1)
+#define MAC_MII_ACC_MDC_CYCLE_12_5MHZ_ (2)
+#define MAC_MII_ACC_MDC_CYCLE_25MHZ_ (3)
+#define MAC_MII_ACC_MDC_CYCLE_1_25MHZ_ (4)
#define MAC_MII_ACC_PHY_ADDR_SHIFT_ (11)
#define MAC_MII_ACC_PHY_ADDR_MASK_ (0x0000F800)
#define MAC_MII_ACC_MIIRINDA_SHIFT_ (6)
@@ -143,6 +200,15 @@
#define MAC_MII_ACC_MII_WRITE_ (0x00000002)
#define MAC_MII_ACC_MII_BUSY_ BIT(0)
+#define MAC_MII_ACC_MIIMMD_SHIFT_ (6)
+#define MAC_MII_ACC_MIIMMD_MASK_ (0x000007C0)
+#define MAC_MII_ACC_MIICL45_ BIT(3)
+#define MAC_MII_ACC_MIICMD_MASK_ (0x00000006)
+#define MAC_MII_ACC_MIICMD_ADDR_ (0x00000000)
+#define MAC_MII_ACC_MIICMD_WRITE_ (0x00000002)
+#define MAC_MII_ACC_MIICMD_READ_ (0x00000004)
+#define MAC_MII_ACC_MIICMD_READ_INC_ (0x00000006)
+
#define MAC_MII_DATA (0x124)
#define MAC_EEE_TX_LPI_REQ_DLY_CNT (0x130)
@@ -214,6 +280,11 @@
#define MAC_WUCSR2 (0x600)
+#define SGMII_CTL (0x728)
+#define SGMII_CTL_SGMII_ENABLE_ BIT(31)
+#define SGMII_CTL_LINK_STATUS_SOURCE_ BIT(8)
+#define SGMII_CTL_SGMII_POWER_DN_ BIT(1)
+
#define INT_STS (0x780)
#define INT_BIT_DMA_RX_(channel) BIT(24 + (channel))
#define INT_BIT_ALL_RX_ (0x0F000000)
@@ -261,8 +332,11 @@
#define INT_MOD_CFG5 (0x7D4)
#define INT_MOD_CFG6 (0x7D8)
#define INT_MOD_CFG7 (0x7DC)
+#define INT_MOD_CFG8 (0x7E0)
+#define INT_MOD_CFG9 (0x7E4)
#define PTP_CMD_CTL (0x0A00)
+#define PTP_CMD_CTL_PTP_LTC_TARGET_READ_ BIT(13)
#define PTP_CMD_CTL_PTP_CLK_STP_NSEC_ BIT(6)
#define PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_ BIT(5)
#define PTP_CMD_CTL_PTP_CLOCK_LOAD_ BIT(4)
@@ -284,9 +358,51 @@
(((value) & 0x7) << (1 + ((channel) << 2)))
#define PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) << 2))
+#define HS_PTP_GENERAL_CONFIG (0x0A04)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(channel) \
+ (0xf << (4 + ((channel) << 2)))
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_ (0)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500NS_ (1)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1US_ (2)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5US_ (3)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_ (4)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50US_ (5)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_ (6)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500US_ (7)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_ (8)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5MS_ (9)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_ (10)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50MS_ (11)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100MS_ (12)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_ (13)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGG_ (14)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_INT_ (15)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \
+ (((value) & 0xf) << (4 + ((channel) << 2)))
+#define HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(channel) (BIT(1 + ((channel) * 2)))
+#define HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) * 2))
+
#define PTP_INT_STS (0x0A08)
+#define PTP_INT_IO_FE_MASK_ GENMASK(31, 24)
+#define PTP_INT_IO_FE_SHIFT_ (24)
+#define PTP_INT_IO_FE_SET_(channel) BIT(24 + (channel))
+#define PTP_INT_IO_RE_MASK_ GENMASK(23, 16)
+#define PTP_INT_IO_RE_SHIFT_ (16)
+#define PTP_INT_IO_RE_SET_(channel) BIT(16 + (channel))
+#define PTP_INT_TX_TS_OVRFL_INT_ BIT(14)
+#define PTP_INT_TX_SWTS_ERR_INT_ BIT(13)
+#define PTP_INT_TX_TS_INT_ BIT(12)
+#define PTP_INT_RX_TS_OVRFL_INT_ BIT(9)
+#define PTP_INT_RX_TS_INT_ BIT(8)
+#define PTP_INT_TIMER_INT_B_ BIT(1)
+#define PTP_INT_TIMER_INT_A_ BIT(0)
#define PTP_INT_EN_SET (0x0A0C)
+#define PTP_INT_EN_FE_EN_SET_(channel) BIT(24 + (channel))
+#define PTP_INT_EN_RE_EN_SET_(channel) BIT(16 + (channel))
+#define PTP_INT_EN_TIMER_SET_(channel) BIT(channel)
#define PTP_INT_EN_CLR (0x0A10)
+#define PTP_INT_EN_FE_EN_CLR_(channel) BIT(24 + (channel))
+#define PTP_INT_EN_RE_EN_CLR_(channel) BIT(16 + (channel))
#define PTP_INT_BIT_TX_SWTS_ERR_ BIT(13)
#define PTP_INT_BIT_TX_TS_ BIT(12)
#define PTP_INT_BIT_TIMER_B_ BIT(1)
@@ -304,6 +420,16 @@
#define PTP_CLOCK_TARGET_NS_X(channel) (0x0A34 + ((channel) << 4))
#define PTP_CLOCK_TARGET_RELOAD_SEC_X(channel) (0x0A38 + ((channel) << 4))
#define PTP_CLOCK_TARGET_RELOAD_NS_X(channel) (0x0A3C + ((channel) << 4))
+#define PTP_LTC_SET_SEC_HI (0x0A50)
+#define PTP_LTC_SET_SEC_HI_SEC_47_32_MASK_ GENMASK(15, 0)
+#define PTP_VERSION (0x0A54)
+#define PTP_VERSION_TX_UP_MASK_ GENMASK(31, 24)
+#define PTP_VERSION_TX_LO_MASK_ GENMASK(23, 16)
+#define PTP_VERSION_RX_UP_MASK_ GENMASK(15, 8)
+#define PTP_VERSION_RX_LO_MASK_ GENMASK(7, 0)
+#define PTP_IO_SEL (0x0A58)
+#define PTP_IO_SEL_MASK_ GENMASK(10, 8)
+#define PTP_IO_SEL_SHIFT_ (8)
#define PTP_LATENCY (0x0A5C)
#define PTP_LATENCY_TX_SET_(tx_latency) (((u32)(tx_latency)) << 16)
#define PTP_LATENCY_RX_SET_(rx_latency) \
@@ -328,6 +454,59 @@
#define PTP_TX_MSG_HEADER_MSG_TYPE_ (0x000F0000)
#define PTP_TX_MSG_HEADER_MSG_TYPE_SYNC_ (0x00000000)
+#define PTP_TX_CAP_INFO (0x0AB8)
+#define PTP_TX_CAP_INFO_TX_CH_MASK_ GENMASK(1, 0)
+#define PTP_TX_DOMAIN (0x0ABC)
+#define PTP_TX_DOMAIN_MASK_ GENMASK(23, 16)
+#define PTP_TX_DOMAIN_RANGE_EN_ BIT(15)
+#define PTP_TX_DOMAIN_RANGE_MASK_ GENMASK(7, 0)
+#define PTP_TX_SDOID (0x0AC0)
+#define PTP_TX_SDOID_MASK_ GENMASK(23, 16)
+#define PTP_TX_SDOID_RANGE_EN_ BIT(15)
+#define PTP_TX_SDOID_11_0_MASK_ GENMASK(7, 0)
+#define PTP_IO_CAP_CONFIG (0x0AC4)
+#define PTP_IO_CAP_CONFIG_LOCK_FE_(channel) BIT(24 + (channel))
+#define PTP_IO_CAP_CONFIG_LOCK_RE_(channel) BIT(16 + (channel))
+#define PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel) BIT(8 + (channel))
+#define PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel) BIT(0 + (channel))
+#define PTP_IO_RE_LTC_SEC_CAP_X (0x0AC8)
+#define PTP_IO_RE_LTC_NS_CAP_X (0x0ACC)
+#define PTP_IO_FE_LTC_SEC_CAP_X (0x0AD0)
+#define PTP_IO_FE_LTC_NS_CAP_X (0x0AD4)
+#define PTP_IO_EVENT_OUTPUT_CFG (0x0AD8)
+#define PTP_IO_EVENT_OUTPUT_CFG_SEL_(channel) BIT(16 + (channel))
+#define PTP_IO_EVENT_OUTPUT_CFG_EN_(channel) BIT(0 + (channel))
+#define PTP_IO_PIN_CFG (0x0ADC)
+#define PTP_IO_PIN_CFG_OBUF_TYPE_(channel) BIT(0 + (channel))
+#define PTP_LTC_RD_SEC_HI (0x0AF0)
+#define PTP_LTC_RD_SEC_HI_SEC_47_32_MASK_ GENMASK(15, 0)
+#define PTP_LTC_RD_SEC_LO (0x0AF4)
+#define PTP_LTC_RD_NS (0x0AF8)
+#define PTP_LTC_RD_NS_29_0_MASK_ GENMASK(29, 0)
+#define PTP_LTC_RD_SUBNS (0x0AFC)
+#define PTP_RX_USER_MAC_HI (0x0B00)
+#define PTP_RX_USER_MAC_HI_47_32_MASK_ GENMASK(15, 0)
+#define PTP_RX_USER_MAC_LO (0x0B04)
+#define PTP_RX_USER_IP_ADDR_0 (0x0B20)
+#define PTP_RX_USER_IP_ADDR_1 (0x0B24)
+#define PTP_RX_USER_IP_ADDR_2 (0x0B28)
+#define PTP_RX_USER_IP_ADDR_3 (0x0B2C)
+#define PTP_RX_USER_IP_MASK_0 (0x0B30)
+#define PTP_RX_USER_IP_MASK_1 (0x0B34)
+#define PTP_RX_USER_IP_MASK_2 (0x0B38)
+#define PTP_RX_USER_IP_MASK_3 (0x0B3C)
+#define PTP_TX_USER_MAC_HI (0x0B40)
+#define PTP_TX_USER_MAC_HI_47_32_MASK_ GENMASK(15, 0)
+#define PTP_TX_USER_MAC_LO (0x0B44)
+#define PTP_TX_USER_IP_ADDR_0 (0x0B60)
+#define PTP_TX_USER_IP_ADDR_1 (0x0B64)
+#define PTP_TX_USER_IP_ADDR_2 (0x0B68)
+#define PTP_TX_USER_IP_ADDR_3 (0x0B6C)
+#define PTP_TX_USER_IP_MASK_0 (0x0B70)
+#define PTP_TX_USER_IP_MASK_1 (0x0B74)
+#define PTP_TX_USER_IP_MASK_2 (0x0B78)
+#define PTP_TX_USER_IP_MASK_3 (0x0B7C)
+
#define DMAC_CFG (0xC00)
#define DMAC_CFG_COAL_EN_ BIT(16)
#define DMAC_CFG_CH_ARB_SEL_RX_HIGH_ (0x00000000)
@@ -483,6 +662,20 @@
#define OTP_STATUS (0x1030)
#define OTP_STATUS_BUSY_ BIT(0)
+/* Hearthstone OTP block registers */
+#define HS_OTP_BLOCK_BASE (ETH_SYS_REG_ADDR_BASE + \
+ ETH_OTP_REG_ADDR_BASE)
+#define HS_OTP_PWR_DN (HS_OTP_BLOCK_BASE + 0x0)
+#define HS_OTP_ADDR_HIGH (HS_OTP_BLOCK_BASE + 0x4)
+#define HS_OTP_ADDR_LOW (HS_OTP_BLOCK_BASE + 0x8)
+#define HS_OTP_PRGM_DATA (HS_OTP_BLOCK_BASE + 0x10)
+#define HS_OTP_PRGM_MODE (HS_OTP_BLOCK_BASE + 0x14)
+#define HS_OTP_READ_DATA (HS_OTP_BLOCK_BASE + 0x18)
+#define HS_OTP_FUNC_CMD (HS_OTP_BLOCK_BASE + 0x20)
+#define HS_OTP_TST_CMD (HS_OTP_BLOCK_BASE + 0x24)
+#define HS_OTP_CMD_GO (HS_OTP_BLOCK_BASE + 0x28)
+#define HS_OTP_STATUS (HS_OTP_BLOCK_BASE + 0x30)
+
/* MAC statistics registers */
#define STAT_RX_FCS_ERRORS (0x1200)
#define STAT_RX_ALIGNMENT_ERRORS (0x1204)
@@ -541,10 +734,12 @@
#define LAN743X_MAX_RX_CHANNELS (4)
#define LAN743X_MAX_TX_CHANNELS (1)
+#define PCI11X1X_MAX_TX_CHANNELS (4)
struct lan743x_adapter;
#define LAN743X_USED_RX_CHANNELS (4)
#define LAN743X_USED_TX_CHANNELS (1)
+#define PCI11X1X_USED_TX_CHANNELS (4)
#define LAN743X_INT_MOD (400)
#if (LAN743X_USED_RX_CHANNELS > LAN743X_MAX_RX_CHANNELS)
@@ -553,12 +748,17 @@ struct lan743x_adapter;
#if (LAN743X_USED_TX_CHANNELS > LAN743X_MAX_TX_CHANNELS)
#error Invalid LAN743X_USED_TX_CHANNELS
#endif
+#if (PCI11X1X_USED_TX_CHANNELS > PCI11X1X_MAX_TX_CHANNELS)
+#error Invalid PCI11X1X_USED_TX_CHANNELS
+#endif
/* PCI */
/* SMSC acquired EFAR late 1990's, MCHP acquired SMSC 2012 */
#define PCI_VENDOR_ID_SMSC PCI_VENDOR_ID_EFAR
#define PCI_DEVICE_ID_SMSC_LAN7430 (0x7430)
#define PCI_DEVICE_ID_SMSC_LAN7431 (0x7431)
+#define PCI_DEVICE_ID_SMSC_A011 (0xA011)
+#define PCI_DEVICE_ID_SMSC_A041 (0xA041)
#define PCI_CONFIG_LENGTH (0x1000)
@@ -607,13 +807,14 @@ struct lan743x_vector {
};
#define LAN743X_MAX_VECTOR_COUNT (8)
+#define PCI11X1X_MAX_VECTOR_COUNT (16)
struct lan743x_intr {
int flags;
unsigned int irq;
- struct lan743x_vector vector_list[LAN743X_MAX_VECTOR_COUNT];
+ struct lan743x_vector vector_list[PCI11X1X_MAX_VECTOR_COUNT];
int number_of_vectors;
bool using_vectors;
@@ -668,6 +869,7 @@ struct lan743x_tx {
int last_tail;
struct napi_struct napi;
+ u32 frame_count;
struct sk_buff *overflow_skb;
};
@@ -721,8 +923,17 @@ struct lan743x_adapter {
u8 mac_address[ETH_ALEN];
struct lan743x_phy phy;
- struct lan743x_tx tx[LAN743X_MAX_TX_CHANNELS];
- struct lan743x_rx rx[LAN743X_MAX_RX_CHANNELS];
+ struct lan743x_tx tx[PCI11X1X_USED_TX_CHANNELS];
+ struct lan743x_rx rx[LAN743X_USED_RX_CHANNELS];
+ bool is_pci11x1x;
+ bool is_sgmii_en;
+ /* protect ethernet syslock */
+ spinlock_t eth_syslock_spinlock;
+ bool eth_syslock_en;
+ u32 eth_syslock_acquire_cnt;
+ u8 max_tx_channels;
+ u8 used_tx_channels;
+ u8 max_vector_count;
#define LAN743X_ADAPTER_FLAG_OTP BIT(0)
u32 flags;
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 8b7a8d879083..6a11e2ceb013 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -25,6 +25,18 @@ static void lan743x_ptp_clock_set(struct lan743x_adapter *adapter,
u32 seconds, u32 nano_seconds,
u32 sub_nano_seconds);
+static int lan743x_get_channel(u32 ch_map)
+{
+ int idx;
+
+ for (idx = 0; idx < 32; idx++) {
+ if (ch_map & (0x1 << idx))
+ return idx;
+ }
+
+ return -EINVAL;
+}
+
int lan743x_gpio_init(struct lan743x_adapter *adapter)
{
struct lan743x_gpio *gpio = &adapter->gpio;
@@ -179,6 +191,8 @@ static void lan743x_ptp_release_event_ch(struct lan743x_adapter *adapter,
static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
u32 *seconds, u32 *nano_seconds,
u32 *sub_nano_seconds);
+static void lan743x_ptp_io_clock_get(struct lan743x_adapter *adapter,
+ u32 *sec, u32 *nsec, u32 *sub_nsec);
static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
s64 time_step_ns);
@@ -407,7 +421,11 @@ static int lan743x_ptpci_gettime64(struct ptp_clock_info *ptpci,
u32 nano_seconds = 0;
u32 seconds = 0;
- lan743x_ptp_clock_get(adapter, &seconds, &nano_seconds, NULL);
+ if (adapter->is_pci11x1x)
+ lan743x_ptp_io_clock_get(adapter, &seconds, &nano_seconds,
+ NULL);
+ else
+ lan743x_ptp_clock_get(adapter, &seconds, &nano_seconds, NULL);
ts->tv_sec = seconds;
ts->tv_nsec = nano_seconds;
@@ -671,6 +689,322 @@ failed:
return ret;
}
+static void lan743x_ptp_io_perout_off(struct lan743x_adapter *adapter,
+ u32 index)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int perout_pin;
+ int event_ch;
+ u32 gen_cfg;
+ int val;
+
+ event_ch = ptp->ptp_io_perout[index];
+ if (event_ch >= 0) {
+ /* set target to far in the future, effectively disabling it */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(event_ch),
+ 0xFFFF0000);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(event_ch),
+ 0);
+
+ gen_cfg = lan743x_csr_read(adapter, HS_PTP_GENERAL_CONFIG);
+ gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_
+ (event_ch));
+ gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(event_ch));
+ gen_cfg |= HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(event_ch);
+ lan743x_csr_write(adapter, HS_PTP_GENERAL_CONFIG, gen_cfg);
+ if (event_ch)
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_TIMER_INT_B_);
+ else
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_TIMER_INT_A_);
+ lan743x_ptp_release_event_ch(adapter, event_ch);
+ ptp->ptp_io_perout[index] = -1;
+ }
+
+ perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, index);
+
+ /* Deselect Event output */
+ val = lan743x_csr_read(adapter, PTP_IO_EVENT_OUTPUT_CFG);
+
+ /* Disables the output of Local Time Target compare events */
+ val &= ~PTP_IO_EVENT_OUTPUT_CFG_EN_(perout_pin);
+ lan743x_csr_write(adapter, PTP_IO_EVENT_OUTPUT_CFG, val);
+
+ /* Configured as an opendrain driver*/
+ val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG);
+ val &= ~PTP_IO_PIN_CFG_OBUF_TYPE_(perout_pin);
+ lan743x_csr_write(adapter, PTP_IO_PIN_CFG, val);
+ /* Dummy read to make sure write operation success */
+ val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG);
+}
+
+static int lan743x_ptp_io_perout(struct lan743x_adapter *adapter, int on,
+ struct ptp_perout_request *perout_request)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 period_sec, period_nsec;
+ u32 start_sec, start_nsec;
+ u32 pulse_sec, pulse_nsec;
+ int pulse_width;
+ int perout_pin;
+ int event_ch;
+ u32 gen_cfg;
+ u32 index;
+ int val;
+
+ index = perout_request->index;
+ event_ch = ptp->ptp_io_perout[index];
+
+ if (on) {
+ perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, index);
+ if (perout_pin < 0)
+ return -EBUSY;
+ } else {
+ lan743x_ptp_io_perout_off(adapter, index);
+ return 0;
+ }
+
+ if (event_ch >= LAN743X_PTP_N_EVENT_CHAN) {
+ /* already on, turn off first */
+ lan743x_ptp_io_perout_off(adapter, index);
+ }
+
+ event_ch = lan743x_ptp_reserve_event_ch(adapter, index);
+ if (event_ch < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Failed to reserve event channel %d for PEROUT\n",
+ index);
+ goto failed;
+ }
+ ptp->ptp_io_perout[index] = event_ch;
+
+ if (perout_request->flags & PTP_PEROUT_DUTY_CYCLE) {
+ pulse_sec = perout_request->on.sec;
+ pulse_sec += perout_request->on.nsec / 1000000000;
+ pulse_nsec = perout_request->on.nsec % 1000000000;
+ } else {
+ pulse_sec = perout_request->period.sec;
+ pulse_sec += perout_request->period.nsec / 1000000000;
+ pulse_nsec = perout_request->period.nsec % 1000000000;
+ }
+
+ if (pulse_sec == 0) {
+ if (pulse_nsec >= 400000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ } else if (pulse_nsec >= 200000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100MS_;
+ } else if (pulse_nsec >= 100000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50MS_;
+ } else if (pulse_nsec >= 20000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
+ } else if (pulse_nsec >= 10000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5MS_;
+ } else if (pulse_nsec >= 2000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
+ } else if (pulse_nsec >= 1000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500US_;
+ } else if (pulse_nsec >= 200000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
+ } else if (pulse_nsec >= 100000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50US_;
+ } else if (pulse_nsec >= 20000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
+ } else if (pulse_nsec >= 10000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5US_;
+ } else if (pulse_nsec >= 2000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1US_;
+ } else if (pulse_nsec >= 1000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500NS_;
+ } else if (pulse_nsec >= 200) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "perout period too small, min is 200nS\n");
+ goto failed;
+ }
+ } else {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ }
+
+ /* turn off by setting target far in future */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(event_ch),
+ 0xFFFF0000);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(event_ch), 0);
+
+ /* Configure to pulse every period */
+ gen_cfg = lan743x_csr_read(adapter, HS_PTP_GENERAL_CONFIG);
+ gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(event_ch));
+ gen_cfg |= HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_
+ (event_ch, pulse_width);
+ gen_cfg |= HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(event_ch);
+ gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(event_ch));
+ lan743x_csr_write(adapter, HS_PTP_GENERAL_CONFIG, gen_cfg);
+
+ /* set the reload to one toggle cycle */
+ period_sec = perout_request->period.sec;
+ period_sec += perout_request->period.nsec / 1000000000;
+ period_nsec = perout_request->period.nsec % 1000000000;
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_RELOAD_SEC_X(event_ch),
+ period_sec);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_RELOAD_NS_X(event_ch),
+ period_nsec);
+
+ start_sec = perout_request->start.sec;
+ start_sec += perout_request->start.nsec / 1000000000;
+ start_nsec = perout_request->start.nsec % 1000000000;
+
+ /* set the start time */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(event_ch),
+ start_sec);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(event_ch),
+ start_nsec);
+
+ /* Enable LTC Target Read */
+ val = lan743x_csr_read(adapter, PTP_CMD_CTL);
+ val |= PTP_CMD_CTL_PTP_LTC_TARGET_READ_;
+ lan743x_csr_write(adapter, PTP_CMD_CTL, val);
+
+ /* Configure as an push/pull driver */
+ val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG);
+ val |= PTP_IO_PIN_CFG_OBUF_TYPE_(perout_pin);
+ lan743x_csr_write(adapter, PTP_IO_PIN_CFG, val);
+
+ /* Select Event output */
+ val = lan743x_csr_read(adapter, PTP_IO_EVENT_OUTPUT_CFG);
+ if (event_ch)
+ /* Channel B as the output */
+ val |= PTP_IO_EVENT_OUTPUT_CFG_SEL_(perout_pin);
+ else
+ /* Channel A as the output */
+ val &= ~PTP_IO_EVENT_OUTPUT_CFG_SEL_(perout_pin);
+
+ /* Enables the output of Local Time Target compare events */
+ val |= PTP_IO_EVENT_OUTPUT_CFG_EN_(perout_pin);
+ lan743x_csr_write(adapter, PTP_IO_EVENT_OUTPUT_CFG, val);
+
+ return 0;
+
+failed:
+ lan743x_ptp_io_perout_off(adapter, index);
+ return -ENODEV;
+}
+
+static void lan743x_ptp_io_extts_off(struct lan743x_adapter *adapter,
+ u32 index)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ struct lan743x_extts *extts;
+ int val;
+
+ extts = &ptp->extts[index];
+ /* PTP Interrupt Enable Clear Register */
+ if (extts->flags & PTP_FALLING_EDGE)
+ val = PTP_INT_EN_FE_EN_CLR_(index);
+ else
+ val = PTP_INT_EN_RE_EN_CLR_(index);
+ lan743x_csr_write(adapter, PTP_INT_EN_CLR, val);
+
+ /* Disables PTP-IO edge lock */
+ val = lan743x_csr_read(adapter, PTP_IO_CAP_CONFIG);
+ if (extts->flags & PTP_FALLING_EDGE) {
+ val &= ~PTP_IO_CAP_CONFIG_LOCK_FE_(index);
+ val &= ~PTP_IO_CAP_CONFIG_FE_CAP_EN_(index);
+ } else {
+ val &= ~PTP_IO_CAP_CONFIG_LOCK_RE_(index);
+ val &= ~PTP_IO_CAP_CONFIG_RE_CAP_EN_(index);
+ }
+ lan743x_csr_write(adapter, PTP_IO_CAP_CONFIG, val);
+
+ /* PTP-IO De-select register */
+ val = lan743x_csr_read(adapter, PTP_IO_SEL);
+ val &= ~PTP_IO_SEL_MASK_;
+ lan743x_csr_write(adapter, PTP_IO_SEL, val);
+
+ /* Clear timestamp */
+ memset(&extts->ts, 0, sizeof(struct timespec64));
+ extts->flags = 0;
+}
+
+static int lan743x_ptp_io_event_cap_en(struct lan743x_adapter *adapter,
+ u32 flags, u32 channel)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int val;
+
+ if ((flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptp->command_lock);
+ /* PTP-IO Event Capture Enable */
+ val = lan743x_csr_read(adapter, PTP_IO_CAP_CONFIG);
+ if (flags & PTP_FALLING_EDGE) {
+ val &= ~PTP_IO_CAP_CONFIG_LOCK_RE_(channel);
+ val &= ~PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel);
+ val |= PTP_IO_CAP_CONFIG_LOCK_FE_(channel);
+ val |= PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel);
+ } else {
+ /* Rising eventing as Default */
+ val &= ~PTP_IO_CAP_CONFIG_LOCK_FE_(channel);
+ val &= ~PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel);
+ val |= PTP_IO_CAP_CONFIG_LOCK_RE_(channel);
+ val |= PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel);
+ }
+ lan743x_csr_write(adapter, PTP_IO_CAP_CONFIG, val);
+
+ /* PTP-IO Select */
+ val = lan743x_csr_read(adapter, PTP_IO_SEL);
+ val &= ~PTP_IO_SEL_MASK_;
+ val |= channel << PTP_IO_SEL_SHIFT_;
+ lan743x_csr_write(adapter, PTP_IO_SEL, val);
+
+ /* PTP Interrupt Enable Register */
+ if (flags & PTP_FALLING_EDGE)
+ val = PTP_INT_EN_FE_EN_SET_(channel);
+ else
+ val = PTP_INT_EN_RE_EN_SET_(channel);
+ lan743x_csr_write(adapter, PTP_INT_EN_SET, val);
+
+ mutex_unlock(&ptp->command_lock);
+
+ return 0;
+}
+
+static int lan743x_ptp_io_extts(struct lan743x_adapter *adapter, int on,
+ struct ptp_extts_request *extts_request)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 flags = extts_request->flags;
+ u32 index = extts_request->index;
+ struct lan743x_extts *extts;
+ int extts_pin;
+ int ret = 0;
+
+ extts = &ptp->extts[index];
+
+ if (on) {
+ extts_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, index);
+ if (extts_pin < 0)
+ return -EBUSY;
+
+ ret = lan743x_ptp_io_event_cap_en(adapter, flags, index);
+ if (!ret)
+ extts->flags = flags;
+ } else {
+ lan743x_ptp_io_extts_off(adapter, index);
+ }
+
+ return ret;
+}
+
static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
struct ptp_clock_request *request, int on)
{
@@ -682,11 +1016,19 @@ static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
if (request) {
switch (request->type) {
case PTP_CLK_REQ_EXTTS:
+ if (request->extts.index < ptpci->n_ext_ts)
+ return lan743x_ptp_io_extts(adapter, on,
+ &request->extts);
return -EINVAL;
case PTP_CLK_REQ_PEROUT:
- if (request->perout.index < ptpci->n_per_out)
- return lan743x_ptp_perout(adapter, on,
+ if (request->perout.index < ptpci->n_per_out) {
+ if (adapter->is_pci11x1x)
+ return lan743x_ptp_io_perout(adapter, on,
+ &request->perout);
+ else
+ return lan743x_ptp_perout(adapter, on,
&request->perout);
+ }
return -EINVAL;
case PTP_CLK_REQ_PPS:
return -EINVAL;
@@ -715,8 +1057,8 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
switch (func) {
case PTP_PF_NONE:
case PTP_PF_PEROUT:
- break;
case PTP_PF_EXTTS:
+ break;
case PTP_PF_PHYSYNC:
default:
result = -1;
@@ -725,6 +1067,33 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
return result;
}
+static void lan743x_ptp_io_event_clock_get(struct lan743x_adapter *adapter,
+ bool fe, u8 channel,
+ struct timespec64 *ts)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ struct lan743x_extts *extts;
+ u32 sec, nsec;
+
+ mutex_lock(&ptp->command_lock);
+ if (fe) {
+ sec = lan743x_csr_read(adapter, PTP_IO_FE_LTC_SEC_CAP_X);
+ nsec = lan743x_csr_read(adapter, PTP_IO_FE_LTC_NS_CAP_X);
+ } else {
+ sec = lan743x_csr_read(adapter, PTP_IO_RE_LTC_SEC_CAP_X);
+ nsec = lan743x_csr_read(adapter, PTP_IO_RE_LTC_NS_CAP_X);
+ }
+
+ mutex_unlock(&ptp->command_lock);
+
+ /* Update Local timestamp */
+ extts = &ptp->extts[channel];
+ extts->ts.tv_sec = sec;
+ extts->ts.tv_nsec = nsec;
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci)
{
struct lan743x_ptp *ptp =
@@ -733,41 +1102,121 @@ static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci)
container_of(ptp, struct lan743x_adapter, ptp);
u32 cap_info, cause, header, nsec, seconds;
bool new_timestamp_available = false;
+ struct ptp_clock_event ptp_event;
+ struct timespec64 ts;
+ int ptp_int_sts;
int count = 0;
+ int channel;
+ s64 ns;
- while ((count < 100) &&
- (lan743x_csr_read(adapter, PTP_INT_STS) & PTP_INT_BIT_TX_TS_)) {
+ ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS);
+ while ((count < 100) && ptp_int_sts) {
count++;
- cap_info = lan743x_csr_read(adapter, PTP_CAP_INFO);
-
- if (PTP_CAP_INFO_TX_TS_CNT_GET_(cap_info) > 0) {
- seconds = lan743x_csr_read(adapter,
- PTP_TX_EGRESS_SEC);
- nsec = lan743x_csr_read(adapter, PTP_TX_EGRESS_NS);
- cause = (nsec &
- PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_);
- header = lan743x_csr_read(adapter,
- PTP_TX_MSG_HEADER);
-
- if (cause == PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_) {
- nsec &= PTP_TX_EGRESS_NS_TS_NS_MASK_;
- lan743x_ptp_tx_ts_enqueue_ts(adapter,
- seconds, nsec,
- header);
- new_timestamp_available = true;
- } else if (cause ==
- PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_) {
- netif_err(adapter, drv, adapter->netdev,
- "Auto capture cause not supported\n");
+
+ if (ptp_int_sts & PTP_INT_BIT_TX_TS_) {
+ cap_info = lan743x_csr_read(adapter, PTP_CAP_INFO);
+
+ if (PTP_CAP_INFO_TX_TS_CNT_GET_(cap_info) > 0) {
+ seconds = lan743x_csr_read(adapter,
+ PTP_TX_EGRESS_SEC);
+ nsec = lan743x_csr_read(adapter,
+ PTP_TX_EGRESS_NS);
+ cause = (nsec &
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_);
+ header = lan743x_csr_read(adapter,
+ PTP_TX_MSG_HEADER);
+
+ if (cause ==
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_) {
+ nsec &= PTP_TX_EGRESS_NS_TS_NS_MASK_;
+ lan743x_ptp_tx_ts_enqueue_ts(adapter,
+ seconds,
+ nsec,
+ header);
+ new_timestamp_available = true;
+ } else if (cause ==
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Auto capture cause not supported\n");
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "unknown tx timestamp capture cause\n");
+ }
} else {
netif_warn(adapter, drv, adapter->netdev,
- "unknown tx timestamp capture cause\n");
+ "TX TS INT but no TX TS CNT\n");
}
- } else {
- netif_warn(adapter, drv, adapter->netdev,
- "TX TS INT but no TX TS CNT\n");
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_BIT_TX_TS_);
+ }
+
+ if (ptp_int_sts & PTP_INT_IO_FE_MASK_) {
+ do {
+ channel = lan743x_get_channel((ptp_int_sts &
+ PTP_INT_IO_FE_MASK_) >>
+ PTP_INT_IO_FE_SHIFT_);
+ if (channel >= 0 &&
+ channel < PCI11X1X_PTP_IO_MAX_CHANNELS) {
+ lan743x_ptp_io_event_clock_get(adapter,
+ true,
+ channel,
+ &ts);
+ /* PTP Falling Event post */
+ ns = timespec64_to_ns(&ts);
+ ptp_event.timestamp = ns;
+ ptp_event.index = channel;
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(ptp->ptp_clock,
+ &ptp_event);
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_IO_FE_SET_
+ (channel));
+ ptp_int_sts &= ~(1 <<
+ (PTP_INT_IO_FE_SHIFT_ +
+ channel));
+ } else {
+ /* Clear falling event interrupts */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_IO_FE_MASK_);
+ ptp_int_sts &= ~PTP_INT_IO_FE_MASK_;
+ }
+ } while (ptp_int_sts & PTP_INT_IO_FE_MASK_);
+ }
+
+ if (ptp_int_sts & PTP_INT_IO_RE_MASK_) {
+ do {
+ channel = lan743x_get_channel((ptp_int_sts &
+ PTP_INT_IO_RE_MASK_) >>
+ PTP_INT_IO_RE_SHIFT_);
+ if (channel >= 0 &&
+ channel < PCI11X1X_PTP_IO_MAX_CHANNELS) {
+ lan743x_ptp_io_event_clock_get(adapter,
+ false,
+ channel,
+ &ts);
+ /* PTP Rising Event post */
+ ns = timespec64_to_ns(&ts);
+ ptp_event.timestamp = ns;
+ ptp_event.index = channel;
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(ptp->ptp_clock,
+ &ptp_event);
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_IO_RE_SET_
+ (channel));
+ ptp_int_sts &= ~(1 <<
+ (PTP_INT_IO_RE_SHIFT_ +
+ channel));
+ } else {
+ /* Clear Rising event interrupt */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_IO_RE_MASK_);
+ ptp_int_sts &= ~PTP_INT_IO_RE_MASK_;
+ }
+ } while (ptp_int_sts & PTP_INT_IO_RE_MASK_);
}
- lan743x_csr_write(adapter, PTP_INT_STS, PTP_INT_BIT_TX_TS_);
+
+ ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS);
}
if (new_timestamp_available)
@@ -802,6 +1251,28 @@ static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
mutex_unlock(&ptp->command_lock);
}
+static void lan743x_ptp_io_clock_get(struct lan743x_adapter *adapter,
+ u32 *sec, u32 *nsec, u32 *sub_nsec)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_READ_);
+
+ if (sec)
+ (*sec) = lan743x_csr_read(adapter, PTP_LTC_RD_SEC_LO);
+
+ if (nsec)
+ (*nsec) = lan743x_csr_read(adapter, PTP_LTC_RD_NS);
+
+ if (sub_nsec)
+ (*sub_nsec) =
+ lan743x_csr_read(adapter, PTP_LTC_RD_SUBNS);
+
+ mutex_unlock(&ptp->command_lock);
+}
+
static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
s64 time_step_ns)
{
@@ -815,8 +1286,12 @@ static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
if (time_step_ns > 15000000000LL) {
/* convert to clock set */
- lan743x_ptp_clock_get(adapter, &unsigned_seconds,
- &nano_seconds, NULL);
+ if (adapter->is_pci11x1x)
+ lan743x_ptp_io_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ else
+ lan743x_ptp_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL,
&remainder);
nano_seconds += remainder;
@@ -831,8 +1306,13 @@ static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
/* convert to clock set */
time_step_ns = -time_step_ns;
- lan743x_ptp_clock_get(adapter, &unsigned_seconds,
- &nano_seconds, NULL);
+ if (adapter->is_pci11x1x) {
+ lan743x_ptp_io_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ } else {
+ lan743x_ptp_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ }
unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
&remainder);
nano_seconds_step = remainder;
@@ -1061,6 +1541,8 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
n_pins = LAN7430_N_GPIO;
break;
case ID_REV_ID_LAN7431_:
+ case ID_REV_ID_A011_:
+ case ID_REV_ID_A041_:
n_pins = LAN7431_N_GPIO;
break;
default:
@@ -1088,10 +1570,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
adapter->netdev->dev_addr);
ptp->ptp_clock_info.max_adj = LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB;
ptp->ptp_clock_info.n_alarm = 0;
- ptp->ptp_clock_info.n_ext_ts = 0;
+ ptp->ptp_clock_info.n_ext_ts = LAN743X_PTP_N_EXTTS;
ptp->ptp_clock_info.n_per_out = LAN743X_PTP_N_EVENT_CHAN;
ptp->ptp_clock_info.n_pins = n_pins;
- ptp->ptp_clock_info.pps = 0;
+ ptp->ptp_clock_info.pps = LAN743X_PTP_N_PPS;
ptp->ptp_clock_info.pin_config = ptp->pin_config;
ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine;
ptp->ptp_clock_info.adjfreq = lan743x_ptpci_adjfreq;
@@ -1307,21 +1789,21 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
- for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
- index++)
+ for (index = 0; index < adapter->used_tx_channels;
+ index++)
lan743x_tx_set_timestamping_mode(&adapter->tx[index],
false, false);
lan743x_ptp_set_sync_ts_insert(adapter, false);
break;
case HWTSTAMP_TX_ON:
- for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
+ for (index = 0; index < adapter->used_tx_channels;
index++)
lan743x_tx_set_timestamping_mode(&adapter->tx[index],
true, false);
lan743x_ptp_set_sync_ts_insert(adapter, false);
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
- for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
+ for (index = 0; index < adapter->used_tx_channels;
index++)
lan743x_tx_set_timestamping_mode(&adapter->tx[index],
true, true);
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
index 7663bf5d2e33..e26d4eff7133 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.h
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -18,6 +18,9 @@
*/
#define LAN743X_PTP_N_EVENT_CHAN 2
#define LAN743X_PTP_N_PEROUT LAN743X_PTP_N_EVENT_CHAN
+#define LAN743X_PTP_N_EXTTS 4
+#define LAN743X_PTP_N_PPS 0
+#define PCI11X1X_PTP_IO_MAX_CHANNELS 8
struct lan743x_adapter;
@@ -60,6 +63,11 @@ struct lan743x_ptp_perout {
int gpio_pin; /* GPIO pin where output appears */
};
+struct lan743x_extts {
+ int flags;
+ struct timespec64 ts;
+};
+
struct lan743x_ptp {
int flags;
@@ -72,6 +80,8 @@ struct lan743x_ptp {
unsigned long used_event_ch;
struct lan743x_ptp_perout perout[LAN743X_PTP_N_PEROUT];
+ int ptp_io_perout[LAN743X_PTP_N_PEROUT]; /* PTP event channel (0=channel A, 1=channel B) */
+ struct lan743x_extts extts[LAN743X_PTP_N_EXTTS];
bool leds_multiplexed;
bool led_enabled[LAN7430_N_LED];
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
index ac273f84b69e..4241ff0e5098 100644
--- a/drivers/net/ethernet/microchip/lan966x/Kconfig
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -1,5 +1,6 @@
config LAN966X_SWITCH
tristate "Lan966x switch driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on HAS_IOMEM
depends on OF
depends on NET_SWITCHDEV
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
index 040cfff9f577..a9ffc719aa0e 100644
--- a/drivers/net/ethernet/microchip/lan966x/Makefile
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o
lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \
- lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o
+ lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o \
+ lan966x_ptp.o
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
index 614f12c2fe6a..e58a27fd8b50 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
@@ -545,6 +545,39 @@ static int lan966x_set_pauseparam(struct net_device *dev,
return phylink_ethtool_set_pauseparam(port->phylink, pause);
}
+static int lan966x_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_phc *phc;
+
+ if (!lan966x->ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+
+ info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
const struct ethtool_ops lan966x_ethtool_ops = {
.get_link_ksettings = lan966x_get_link_ksettings,
.set_link_ksettings = lan966x_set_link_ksettings,
@@ -556,6 +589,7 @@ const struct ethtool_ops lan966x_ethtool_ops = {
.get_eth_mac_stats = lan966x_get_eth_mac_stats,
.get_rmon_stats = lan966x_get_eth_rmon_stats,
.get_link = ethtool_op_get_link,
+ .get_ts_info = lan966x_get_ts_info,
};
static void lan966x_check_stats_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 1f60fd125a1d..e1bcb28039dc 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -4,11 +4,13 @@
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/iopoll.h>
+#include <linux/ip.h>
#include <linux/of_platform.h>
#include <linux/of_net.h>
#include <linux/packing.h>
#include <linux/phy/phy.h>
#include <linux/reset.h>
+#include <net/addrconf.h>
#include "lan966x_main.h"
@@ -44,6 +46,7 @@ static const struct lan966x_main_io_resource lan966x_main_iomap[] = {
{ TARGET_ORG, 0, 1 }, /* 0xe2000000 */
{ TARGET_GCB, 0x4000, 1 }, /* 0xe2004000 */
{ TARGET_QS, 0x8000, 1 }, /* 0xe2008000 */
+ { TARGET_PTP, 0xc000, 1 }, /* 0xe200c000 */
{ TARGET_CHIP_TOP, 0x10000, 1 }, /* 0xe2010000 */
{ TARGET_REW, 0x14000, 1 }, /* 0xe2014000 */
{ TARGET_SYS, 0x28000, 1 }, /* 0xe2028000 */
@@ -182,6 +185,9 @@ static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp)
{
u32 val;
+ if (lan_rd(lan966x, QS_INJ_STATUS) & QS_INJ_STATUS_FIFO_RDY_SET(BIT(grp)))
+ return 0;
+
return readx_poll_timeout_atomic(lan966x_port_inj_status, lan966x, val,
QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
READL_SLEEP_US, READL_TIMEOUT_US);
@@ -201,7 +207,7 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
val = lan_rd(lan966x, QS_INJ_STATUS);
if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp)) ||
(QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)))
- return NETDEV_TX_BUSY;
+ goto err;
/* Write start of frame */
lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
@@ -213,7 +219,7 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
/* Wait until the fifo is ready */
err = lan966x_port_inj_ready(lan966x, grp);
if (err)
- return NETDEV_TX_BUSY;
+ goto err;
lan_wr((__force u32)ifh[i], lan966x, QS_INJ_WR(grp));
}
@@ -225,7 +231,7 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
/* Wait until the fifo is ready */
err = lan966x_port_inj_ready(lan966x, grp);
if (err)
- return NETDEV_TX_BUSY;
+ goto err;
lan_wr(((u32 *)skb->data)[i], lan966x, QS_INJ_WR(grp));
}
@@ -235,7 +241,7 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
/* Wait until the fifo is ready */
err = lan966x_port_inj_ready(lan966x, grp);
if (err)
- return NETDEV_TX_BUSY;
+ goto err;
lan_wr(0, lan966x, QS_INJ_WR(grp));
++i;
@@ -255,8 +261,19 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ return NETDEV_TX_OK;
+
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
+
+err:
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ lan966x_ptp_txtstamp_release(port, skb);
+
+ return NETDEV_TX_BUSY;
}
static void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
@@ -289,10 +306,24 @@ static void lan966x_ifh_set_vid(void *ifh, u64 vid)
IFH_POS_TCI, IFH_LEN * 4, PACK, 0);
}
+static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op)
+{
+ packing(ifh, &rew_op, IFH_POS_REW_CMD + IFH_WID_REW_CMD - 1,
+ IFH_POS_REW_CMD, IFH_LEN * 4, PACK, 0);
+}
+
+static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
+{
+ packing(ifh, &timestamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1,
+ IFH_POS_TIMESTAMP, IFH_LEN * 4, PACK, 0);
+}
+
static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
__be32 ifh[IFH_LEN];
+ int err;
memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
@@ -302,7 +333,20 @@ static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
lan966x_ifh_set_ipv(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
lan966x_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
- return lan966x_port_ifh_xmit(skb, ifh, dev);
+ if (port->lan966x->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ err = lan966x_ptp_txtstamp_request(port, skb);
+ if (err)
+ return err;
+
+ lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op);
+ lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id);
+ }
+
+ spin_lock(&lan966x->tx_lock);
+ err = lan966x_port_ifh_xmit(skb, ifh, dev);
+ spin_unlock(&lan966x->tx_lock);
+
+ return err;
}
static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
@@ -350,6 +394,23 @@ static int lan966x_port_get_parent_id(struct net_device *dev,
return 0;
}
+static int lan966x_port_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ if (!phy_has_hwtstamp(dev->phydev) && port->lan966x->ptp) {
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return lan966x_ptp_hwtstamp_set(port, ifr);
+ case SIOCGHWTSTAMP:
+ return lan966x_ptp_hwtstamp_get(port, ifr);
+ }
+ }
+
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
+}
+
static const struct net_device_ops lan966x_port_netdev_ops = {
.ndo_open = lan966x_port_open,
.ndo_stop = lan966x_port_stop,
@@ -360,6 +421,7 @@ static const struct net_device_ops lan966x_port_netdev_ops = {
.ndo_get_stats64 = lan966x_stats_get,
.ndo_set_mac_address = lan966x_port_set_mac_address,
.ndo_get_port_parent_id = lan966x_port_get_parent_id,
+ .ndo_eth_ioctl = lan966x_port_ioctl,
};
bool lan966x_netdevice_check(const struct net_device *dev)
@@ -367,6 +429,33 @@ bool lan966x_netdevice_check(const struct net_device *dev)
return dev->netdev_ops == &lan966x_port_netdev_ops;
}
+static bool lan966x_hw_offload(struct lan966x *lan966x, u32 port,
+ struct sk_buff *skb)
+{
+ u32 val;
+
+ /* The IGMP and MLD frames are not forward by the HW if
+ * multicast snooping is enabled, therefor don't mark as
+ * offload to allow the SW to forward the frames accordingly.
+ */
+ val = lan_rd(lan966x, ANA_CPU_FWD_CFG(port));
+ if (!(val & (ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
+ ANA_CPU_FWD_CFG_MLD_REDIR_ENA)))
+ return true;
+
+ if (skb->protocol == htons(ETH_P_IP) &&
+ ip_hdr(skb)->protocol == IPPROTO_IGMP)
+ return false;
+
+ if (IS_ENABLED(CONFIG_IPV6) &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) &&
+ !ipv6_mc_check_mld(skb))
+ return false;
+
+ return true;
+}
+
static int lan966x_port_xtr_status(struct lan966x *lan966x, u8 grp)
{
return lan_rd(lan966x, QS_XTR_RD(grp));
@@ -434,6 +523,12 @@ static void lan966x_ifh_get_len(void *ifh, u64 *len)
IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0);
}
+static void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp)
+{
+ packing(ifh, timestamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1,
+ IFH_POS_TIMESTAMP, IFH_LEN * 4, UNPACK, 0);
+}
+
static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
{
struct lan966x *lan966x = args;
@@ -443,10 +538,10 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
return IRQ_NONE;
do {
+ u64 src_port, len, timestamp;
struct net_device *dev;
struct sk_buff *skb;
int sz = 0, buf_len;
- u64 src_port, len;
u32 ifh[IFH_LEN];
u32 *buf;
u32 val;
@@ -461,6 +556,7 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
lan966x_ifh_get_src_port(ifh, &src_port);
lan966x_ifh_get_len(ifh, &len);
+ lan966x_ifh_get_timestamp(ifh, &timestamp);
WARN_ON(src_port >= lan966x->num_phys_ports);
@@ -501,12 +597,20 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
*buf = val;
}
+ lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
skb->protocol = eth_type_trans(skb, dev);
- if (lan966x->bridge_mask & BIT(src_port))
+ if (lan966x->bridge_mask & BIT(src_port)) {
skb->offload_fwd_mark = 1;
- netif_rx_ni(skb);
+ skb_reset_network_header(skb);
+ if (!lan966x_hw_offload(lan966x, src_port, skb))
+ skb->offload_fwd_mark = 0;
+ }
+
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(skb);
+
dev->stats.rx_bytes += len;
dev->stats.rx_packets++;
@@ -628,7 +732,6 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
}
port->phylink = phylink;
- phylink_set_pcs(phylink, &port->phylink_pcs);
err = register_netdev(dev);
if (err) {
@@ -708,7 +811,7 @@ static void lan966x_init(struct lan966x *lan966x)
/* Setup flooding PGIDs */
lan_wr(ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(PGID_MCIPV4) |
ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(PGID_MC) |
- ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MC) |
+ ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MCIPV6) |
ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(PGID_MC),
lan966x, ANA_FLOODING_IPMC);
@@ -770,6 +873,10 @@ static void lan966x_init(struct lan966x *lan966x)
ANA_PGID_PGID,
lan966x, ANA_PGID(PGID_MCIPV4));
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_MCIPV6));
+
/* Unicast to all other ports */
lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
ANA_PGID_PGID,
@@ -786,6 +893,8 @@ static void lan966x_init(struct lan966x *lan966x)
lan_rmw(ANA_ANAINTR_INTR_ENA_SET(1),
ANA_ANAINTR_INTR_ENA,
lan966x, ANA_ANAINTR);
+
+ spin_lock_init(&lan966x->tx_lock);
}
static int lan966x_ram_init(struct lan966x *lan966x)
@@ -897,6 +1006,17 @@ static int lan966x_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, err, "Unable to use ana irq");
}
+ lan966x->ptp_irq = platform_get_irq_byname(pdev, "ptp");
+ if (lan966x->ptp_irq > 0) {
+ err = devm_request_threaded_irq(&pdev->dev, lan966x->ptp_irq, NULL,
+ lan966x_ptp_irq_handler, IRQF_ONESHOT,
+ "ptp irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Unable to use ptp irq");
+
+ lan966x->ptp = 1;
+ }
+
/* init switch */
lan966x_init(lan966x);
lan966x_stats_init(lan966x);
@@ -931,8 +1051,15 @@ static int lan966x_probe(struct platform_device *pdev)
if (err)
goto cleanup_ports;
+ err = lan966x_ptp_init(lan966x);
+ if (err)
+ goto cleanup_fdb;
+
return 0;
+cleanup_fdb:
+ lan966x_fdb_deinit(lan966x);
+
cleanup_ports:
fwnode_handle_put(portnp);
@@ -958,6 +1085,7 @@ static int lan966x_remove(struct platform_device *pdev)
lan966x_mac_purge_entries(lan966x);
lan966x_mdb_deinit(lan966x);
lan966x_fdb_deinit(lan966x);
+ lan966x_ptp_deinit(lan966x);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 99c6d0a9f946..ae282da1da74 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -8,6 +8,7 @@
#include <linux/jiffies.h>
#include <linux/phy.h>
#include <linux/phylink.h>
+#include <linux/ptp_clock_kernel.h>
#include <net/switchdev.h>
#include "lan966x_regs.h"
@@ -50,6 +51,13 @@
#define LAN966X_SPEED_100 2
#define LAN966X_SPEED_10 3
+#define LAN966X_PHC_COUNT 3
+#define LAN966X_PHC_PORT 0
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_ONE_STEP_PTP 0x3
+#define IFH_REW_OP_TWO_STEP_PTP 0x4
+
/* MAC table entry types.
* ENTRYTYPE_NORMAL is subject to aging.
* ENTRYTYPE_LOCKED is not subject to aging.
@@ -70,6 +78,24 @@ struct lan966x_stat_layout {
char name[ETH_GSTRING_LEN];
};
+struct lan966x_phc {
+ struct ptp_clock *clock;
+ struct ptp_clock_info info;
+ struct hwtstamp_config hwtstamp_config;
+ struct lan966x *lan966x;
+ u8 index;
+};
+
+struct lan966x_skb_cb {
+ u8 rew_op;
+ u16 ts_id;
+ unsigned long jiffies;
+};
+
+#define LAN966X_PTP_TIMEOUT msecs_to_jiffies(10)
+#define LAN966X_SKB_CB(skb) \
+ ((struct lan966x_skb_cb *)((skb)->cb))
+
struct lan966x {
struct device *dev;
@@ -82,6 +108,8 @@ struct lan966x {
u8 base_mac[ETH_ALEN];
+ spinlock_t tx_lock; /* lock for frame transmition */
+
struct net_device *bridge;
u16 bridge_mask;
u16 bridge_fwd_mask;
@@ -105,6 +133,7 @@ struct lan966x {
/* interrupts */
int xtr_irq;
int ana_irq;
+ int ptp_irq;
/* worqueue for fdb */
struct workqueue_struct *fdb_work;
@@ -113,6 +142,14 @@ struct lan966x {
/* mdb */
struct list_head mdb_entries;
struct list_head pgid_entries;
+
+ /* ptp */
+ bool ptp;
+ struct lan966x_phc phc[LAN966X_PHC_COUNT];
+ spinlock_t ptp_clock_lock; /* lock for phc */
+ spinlock_t ptp_ts_id_lock; /* lock for ts_id */
+ struct mutex ptp_lock; /* lock for ptp interface state */
+ u16 ptp_skbs;
};
struct lan966x_port_config {
@@ -135,6 +172,7 @@ struct lan966x_port {
bool vlan_aware;
bool learn_ena;
+ bool mcast_ena;
struct phylink_config phylink_config;
struct phylink_pcs phylink_pcs;
@@ -142,6 +180,10 @@ struct lan966x_port {
struct phylink *phylink;
struct phy *serdes;
struct fwnode_handle *fwnode;
+
+ u8 ptp_cmd;
+ u16 ts_id;
+ struct sk_buff_head tx_skbs;
};
extern const struct phylink_mac_ops lan966x_phylink_mac_ops;
@@ -227,6 +269,20 @@ int lan966x_handle_port_mdb_del(struct lan966x_port *port,
const struct switchdev_obj *obj);
void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid);
void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid);
+void lan966x_mdb_clear_entries(struct lan966x *lan966x);
+void lan966x_mdb_restore_entries(struct lan966x *lan966x);
+
+int lan966x_ptp_init(struct lan966x *lan966x);
+void lan966x_ptp_deinit(struct lan966x *lan966x);
+int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr);
+int lan966x_ptp_hwtstamp_get(struct lan966x_port *port, struct ifreq *ifr);
+void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
+ u64 timestamp);
+int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
+ struct sk_buff *skb);
+void lan966x_ptp_txtstamp_release(struct lan966x_port *port,
+ struct sk_buff *skb);
+irqreturn_t lan966x_ptp_irq_handler(int irq, void *args);
static inline void __iomem *lan_addr(void __iomem *base[],
int id, int tinst, int tcnt,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
index c68d0a99d292..2af55268bf4d 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
@@ -504,3 +504,48 @@ void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid)
lan966x_mdb_l2_cpu_remove(lan966x, mdb_entry, type);
}
}
+
+void lan966x_mdb_clear_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+ unsigned char mac[ETH_ALEN];
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ type = lan966x_mdb_classify(mdb_entry->mac);
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ /* Remove just the MAC entry, still keep the PGID in case of L2
+ * entries because this can be restored at later point
+ */
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ }
+}
+
+void lan966x_mdb_restore_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+ unsigned char mac[ETH_ALEN];
+ bool cpu_copy = false;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ type = lan966x_mdb_classify(mdb_entry->mac);
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) {
+ /* Copy the frame to CPU only if the CPU is in the VLAN */
+ if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ mdb_entry->vid) &&
+ mdb_entry->cpu_copy)
+ cpu_copy = true;
+
+ lan966x_mac_ip_learn(lan966x, cpu_copy, mac,
+ mdb_entry->vid, type);
+ } else {
+ lan966x_mac_learn(lan966x, mdb_entry->pgid->index,
+ mdb_entry->mac,
+ mdb_entry->vid, type);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
index b66a9aa00ea4..38a7e95d69b4 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
@@ -9,6 +9,14 @@
#include "lan966x_main.h"
+static struct phylink_pcs *lan966x_phylink_mac_select(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+
+ return &port->phylink_pcs;
+}
+
static void lan966x_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
@@ -114,6 +122,7 @@ static void lan966x_pcs_aneg_restart(struct phylink_pcs *pcs)
const struct phylink_mac_ops lan966x_phylink_mac_ops = {
.validate = phylink_generic_validate,
+ .mac_select_pcs = lan966x_phylink_mac_select,
.mac_config = lan966x_phylink_mac_config,
.mac_prepare = lan966x_phylink_mac_prepare,
.mac_link_down = lan966x_phylink_mac_link_down,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
new file mode 100644
index 000000000000..ae782778d6dd
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -0,0 +1,618 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/ptp_classify.h>
+
+#include "lan966x_main.h"
+
+#define LAN966X_MAX_PTP_ID 512
+
+/* Represents 1ppm adjustment in 2^59 format with 6.037735849ns as reference
+ * The value is calculated as following: (1/1000000)/((2^-59)/6.037735849)
+ */
+#define LAN966X_1PPM_FORMAT 3480517749723LL
+
+/* Represents 1ppb adjustment in 2^29 format with 6.037735849ns as reference
+ * The value is calculated as following: (1/1000000000)/((2^59)/6.037735849)
+ */
+#define LAN966X_1PPB_FORMAT 3480517749LL
+
+#define TOD_ACC_PIN 0x5
+
+enum {
+ PTP_PIN_ACTION_IDLE = 0,
+ PTP_PIN_ACTION_LOAD,
+ PTP_PIN_ACTION_SAVE,
+ PTP_PIN_ACTION_CLOCK,
+ PTP_PIN_ACTION_DELTA,
+ PTP_PIN_ACTION_TOD
+};
+
+static u64 lan966x_ptp_get_nominal_value(void)
+{
+ u64 res = 0x304d2df1;
+
+ res <<= 32;
+ return res;
+}
+
+int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct hwtstamp_config cfg;
+ struct lan966x_phc *phc;
+
+ /* For now don't allow to run ptp on ports that are part of a bridge,
+ * because in case of transparent clock the HW will still forward the
+ * frames, so there would be duplicate frames
+ */
+ if (lan966x->bridge_mask & BIT(port->chip_port))
+ return -EINVAL;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_ON:
+ port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ port->ptp_cmd = IFH_REW_OP_ONE_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ port->ptp_cmd = IFH_REW_OP_NOOP;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Commit back the result & save it */
+ mutex_lock(&lan966x->ptp_lock);
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+ memcpy(&phc->hwtstamp_config, &cfg, sizeof(cfg));
+ mutex_unlock(&lan966x->ptp_lock);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+int lan966x_ptp_hwtstamp_get(struct lan966x_port *port, struct ifreq *ifr)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_phc *phc;
+
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+ return copy_to_user(ifr->ifr_data, &phc->hwtstamp_config,
+ sizeof(phc->hwtstamp_config)) ? -EFAULT : 0;
+}
+
+static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb)
+{
+ struct ptp_header *header;
+ u8 msgtype;
+ int type;
+
+ if (port->ptp_cmd == IFH_REW_OP_NOOP)
+ return IFH_REW_OP_NOOP;
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return IFH_REW_OP_NOOP;
+
+ header = ptp_parse_header(skb, type);
+ if (!header)
+ return IFH_REW_OP_NOOP;
+
+ if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
+ return IFH_REW_OP_TWO_STEP_PTP;
+
+ /* If it is sync and run 1 step then set the correct operation,
+ * otherwise run as 2 step
+ */
+ msgtype = ptp_get_msgtype(header, type);
+ if ((msgtype & 0xf) == 0)
+ return IFH_REW_OP_ONE_STEP_PTP;
+
+ return IFH_REW_OP_TWO_STEP_PTP;
+}
+
+static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port)
+{
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if time_after(LAN966X_SKB_CB(skb)->jiffies + LAN966X_PTP_TIMEOUT,
+ jiffies)
+ break;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ dev_kfree_skb_any(skb);
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+}
+
+int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
+ struct sk_buff *skb)
+{
+ struct lan966x *lan966x = port->lan966x;
+ unsigned long flags;
+ u8 rew_op;
+
+ rew_op = lan966x_ptp_classify(port, skb);
+ LAN966X_SKB_CB(skb)->rew_op = rew_op;
+
+ if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
+ return 0;
+
+ lan966x_ptp_txtstamp_old_release(port);
+
+ spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
+ if (lan966x->ptp_skbs == LAN966X_MAX_PTP_ID) {
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+ return -EBUSY;
+ }
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ skb_queue_tail(&port->tx_skbs, skb);
+ LAN966X_SKB_CB(skb)->ts_id = port->ts_id;
+ LAN966X_SKB_CB(skb)->jiffies = jiffies;
+
+ lan966x->ptp_skbs++;
+ port->ts_id++;
+ if (port->ts_id == LAN966X_MAX_PTP_ID)
+ port->ts_id = 0;
+
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+
+ return 0;
+}
+
+void lan966x_ptp_txtstamp_release(struct lan966x_port *port,
+ struct sk_buff *skb)
+{
+ struct lan966x *lan966x = port->lan966x;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
+ port->ts_id--;
+ lan966x->ptp_skbs--;
+ skb_unlink(skb, &port->tx_skbs);
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+}
+
+static void lan966x_get_hwtimestamp(struct lan966x *lan966x,
+ struct timespec64 *ts,
+ u32 nsec)
+{
+ /* Read current PTP time to get seconds */
+ unsigned long flags;
+ u32 curr_nsec;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PIN_CFG_PIN_DOM_SET(LAN966X_PHC_PORT) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ ts->tv_sec = lan_rd(lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ curr_nsec = lan_rd(lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ ts->tv_nsec = nsec;
+
+ /* Sec has incremented since the ts was registered */
+ if (curr_nsec < nsec)
+ ts->tv_sec--;
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+}
+
+irqreturn_t lan966x_ptp_irq_handler(int irq, void *args)
+{
+ int budget = LAN966X_MAX_PTP_ID;
+ struct lan966x *lan966x = args;
+
+ while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct lan966x_port *port;
+ struct timespec64 ts;
+ unsigned long flags;
+ u32 val, id, txport;
+ u32 delay;
+
+ val = lan_rd(lan966x, PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & PTP_TWOSTEP_CTRL_VLD))
+ break;
+
+ WARN_ON(val & PTP_TWOSTEP_CTRL_OVFL);
+
+ if (!(val & PTP_TWOSTEP_CTRL_STAMP_TX))
+ continue;
+
+ /* Retrieve the ts Tx port */
+ txport = PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val);
+
+ /* Retrieve its associated skb */
+ port = lan966x->ports[txport];
+
+ /* Retrieve the delay */
+ delay = lan_rd(lan966x, PTP_TWOSTEP_STAMP);
+ delay = PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay);
+
+ /* Get next timestamp from fifo, which needs to be the
+ * rx timestamp which represents the id of the frame
+ */
+ lan_rmw(PTP_TWOSTEP_CTRL_NXT_SET(1),
+ PTP_TWOSTEP_CTRL_NXT,
+ lan966x, PTP_TWOSTEP_CTRL);
+
+ val = lan_rd(lan966x, PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retried */
+ if (!(val & PTP_TWOSTEP_CTRL_VLD))
+ break;
+
+ /* Read RX timestamping to get the ID */
+ id = lan_rd(lan966x, PTP_TWOSTEP_STAMP);
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (LAN966X_SKB_CB(skb)->ts_id != id)
+ continue;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
+ /* Next ts */
+ lan_rmw(PTP_TWOSTEP_CTRL_NXT_SET(1),
+ PTP_TWOSTEP_CTRL_NXT,
+ lan966x, PTP_TWOSTEP_CTRL);
+
+ if (WARN_ON(!skb_match))
+ continue;
+
+ spin_lock(&lan966x->ptp_ts_id_lock);
+ lan966x->ptp_skbs--;
+ spin_unlock(&lan966x->ptp_ts_id_lock);
+
+ /* Get the h/w timestamp */
+ lan966x_get_hwtimestamp(lan966x, &ts, delay);
+
+ /* Set the timestamp into the skb */
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb_match, &shhwtstamps);
+
+ dev_kfree_skb_any(skb_match);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int lan966x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+ bool neg_adj = 0;
+ u64 tod_inc;
+ u64 ref;
+
+ if (!scaled_ppm)
+ return 0;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ tod_inc = lan966x_ptp_get_nominal_value();
+
+ /* The multiplication is split in 2 separate additions because of
+ * overflow issues. If scaled_ppm with 16bit fractional part was bigger
+ * than 20ppm then we got overflow.
+ */
+ ref = LAN966X_1PPM_FORMAT * (scaled_ppm >> 16);
+ ref += (LAN966X_1PPM_FORMAT * (0xffff & scaled_ppm)) >> 16;
+ tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(1 << BIT(phc->index)),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ lan_wr((u32)tod_inc & 0xFFFFFFFF, lan966x,
+ PTP_CLK_PER_CFG(phc->index, 0));
+ lan_wr((u32)(tod_inc >> 32), lan966x,
+ PTP_CLK_PER_CFG(phc->index, 1));
+
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ /* Set new value */
+ lan_wr(PTP_TOD_SEC_MSB_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)),
+ lan966x, PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ lan_wr(lower_32_bits(ts->tv_sec),
+ lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ lan_wr(ts->tv_nsec, lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Apply new values */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_gettime64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+ time64_t s;
+ s64 ns;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ s = lan_rd(lan966x, PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ s <<= 32;
+ s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ ns = lan_rd(lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+ ns &= PTP_TOD_NSEC_TOD_NSEC;
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ /* Deal with negative values */
+ if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+
+static int lan966x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+
+ if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ lan_wr(PTP_TOD_NSEC_TOD_NSEC_SET(delta),
+ lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Adjust time with the value of PTP_TOD_NSEC */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+ } else {
+ /* Fall back using lan966x_ptp_settime64 which is not exact */
+ struct timespec64 ts;
+ u64 now;
+
+ lan966x_ptp_gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ lan966x_ptp_settime64(ptp, &ts);
+ }
+
+ return 0;
+}
+
+static struct ptp_clock_info lan966x_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "lan966x ptp",
+ .max_adj = 200000,
+ .gettime64 = lan966x_ptp_gettime64,
+ .settime64 = lan966x_ptp_settime64,
+ .adjtime = lan966x_ptp_adjtime,
+ .adjfine = lan966x_ptp_adjfine,
+};
+
+static int lan966x_ptp_phc_init(struct lan966x *lan966x,
+ int index,
+ struct ptp_clock_info *clock_info)
+{
+ struct lan966x_phc *phc = &lan966x->phc[index];
+
+ phc->info = *clock_info;
+ phc->clock = ptp_clock_register(&phc->info, lan966x->dev);
+ if (IS_ERR(phc->clock))
+ return PTR_ERR(phc->clock);
+
+ phc->index = index;
+ phc->lan966x = lan966x;
+
+ /* PTP Rx stamping is always enabled. */
+ phc->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+
+ return 0;
+}
+
+int lan966x_ptp_init(struct lan966x *lan966x)
+{
+ u64 tod_adj = lan966x_ptp_get_nominal_value();
+ struct lan966x_port *port;
+ int err, i;
+
+ if (!lan966x->ptp)
+ return 0;
+
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ err = lan966x_ptp_phc_init(lan966x, i, &lan966x_ptp_clock_info);
+ if (err)
+ return err;
+ }
+
+ spin_lock_init(&lan966x->ptp_clock_lock);
+ spin_lock_init(&lan966x->ptp_ts_id_lock);
+ mutex_init(&lan966x->ptp_lock);
+
+ /* Disable master counters */
+ lan_wr(PTP_DOM_CFG_ENA_SET(0), lan966x, PTP_DOM_CFG);
+
+ /* Configure the nominal TOD increment per clock cycle */
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0x7),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ lan_wr((u32)tod_adj & 0xFFFFFFFF, lan966x,
+ PTP_CLK_PER_CFG(i, 0));
+ lan_wr((u32)(tod_adj >> 32), lan966x,
+ PTP_CLK_PER_CFG(i, 1));
+ }
+
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ /* Enable master counters */
+ lan_wr(PTP_DOM_CFG_ENA_SET(0x7), lan966x, PTP_DOM_CFG);
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_head_init(&port->tx_skbs);
+ }
+
+ return 0;
+}
+
+void lan966x_ptp_deinit(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_purge(&port->tx_skbs);
+ }
+
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i)
+ ptp_clock_unregister(lan966x->phc[i].clock);
+}
+
+void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
+ u64 timestamp)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct lan966x_phc *phc;
+ struct timespec64 ts;
+ u64 full_ts_in_ns;
+
+ if (!lan966x->ptp)
+ return;
+
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+ lan966x_ptp_gettime64(&phc->info, &ts);
+
+ /* Drop the sub-ns precision */
+ timestamp = timestamp >> 2;
+ if (ts.tv_nsec < timestamp)
+ ts.tv_sec--;
+ ts.tv_nsec = timestamp;
+ full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
index 797560172aca..0c0b3e173d53 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
@@ -19,6 +19,7 @@ enum lan966x_target {
TARGET_DEV = 13,
TARGET_GCB = 27,
TARGET_ORG = 36,
+ TARGET_PTP = 41,
TARGET_QS = 42,
TARGET_QSYS = 46,
TARGET_REW = 47,
@@ -298,6 +299,24 @@ enum lan966x_target {
/* ANA:PORT:CPU_FWD_CFG */
#define ANA_CPU_FWD_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 96, 0, 1, 4)
+#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA BIT(6)
+#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_MLD_REDIR_ENA, x)
+#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_MLD_REDIR_ENA, x)
+
+#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA BIT(5)
+#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA, x)
+#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA, x)
+
+#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA BIT(4)
+#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, x)
+#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, x)
+
#define ANA_CPU_FWD_CFG_SRC_COPY_ENA BIT(3)
#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_SET(x)\
FIELD_PREP(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x)
@@ -559,6 +578,108 @@ enum lan966x_target {
#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\
FIELD_GET(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+/* PTP:PTP_CFG:PTP_DOM_CFG */
+#define PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 12, 0, 1, 4)
+
+#define PTP_DOM_CFG_ENA GENMASK(11, 9)
+#define PTP_DOM_CFG_ENA_SET(x)\
+ FIELD_PREP(PTP_DOM_CFG_ENA, x)
+#define PTP_DOM_CFG_ENA_GET(x)\
+ FIELD_GET(PTP_DOM_CFG_ENA, x)
+
+#define PTP_DOM_CFG_CLKCFG_DIS GENMASK(2, 0)
+#define PTP_DOM_CFG_CLKCFG_DIS_SET(x)\
+ FIELD_PREP(PTP_DOM_CFG_CLKCFG_DIS, x)
+#define PTP_DOM_CFG_CLKCFG_DIS_GET(x)\
+ FIELD_GET(PTP_DOM_CFG_CLKCFG_DIS, x)
+
+/* PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */
+#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP, 0, 1, 528, g, 3, 28, 0, r, 2, 4)
+
+/* PTP:PTP_PINS:PTP_PIN_CFG */
+#define PTP_PIN_CFG(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 0, 0, 1, 4)
+
+#define PTP_PIN_CFG_PIN_ACTION GENMASK(29, 27)
+#define PTP_PIN_CFG_PIN_ACTION_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_ACTION, x)
+#define PTP_PIN_CFG_PIN_ACTION_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_ACTION, x)
+
+#define PTP_PIN_CFG_PIN_SYNC GENMASK(26, 25)
+#define PTP_PIN_CFG_PIN_SYNC_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_SYNC, x)
+#define PTP_PIN_CFG_PIN_SYNC_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_SYNC, x)
+
+#define PTP_PIN_CFG_PIN_DOM GENMASK(17, 16)
+#define PTP_PIN_CFG_PIN_DOM_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_DOM, x)
+#define PTP_PIN_CFG_PIN_DOM_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_DOM, x)
+
+/* PTP:PTP_PINS:PTP_TOD_SEC_MSB */
+#define PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 4, 0, 1, 4)
+
+#define PTP_TOD_SEC_MSB_TOD_SEC_MSB GENMASK(15, 0)
+#define PTP_TOD_SEC_MSB_TOD_SEC_MSB_SET(x)\
+ FIELD_PREP(PTP_TOD_SEC_MSB_TOD_SEC_MSB, x)
+#define PTP_TOD_SEC_MSB_TOD_SEC_MSB_GET(x)\
+ FIELD_GET(PTP_TOD_SEC_MSB_TOD_SEC_MSB, x)
+
+/* PTP:PTP_PINS:PTP_TOD_SEC_LSB */
+#define PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 8, 0, 1, 4)
+
+/* PTP:PTP_PINS:PTP_TOD_NSEC */
+#define PTP_TOD_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 12, 0, 1, 4)
+
+#define PTP_TOD_NSEC_TOD_NSEC GENMASK(29, 0)
+#define PTP_TOD_NSEC_TOD_NSEC_SET(x)\
+ FIELD_PREP(PTP_TOD_NSEC_TOD_NSEC, x)
+#define PTP_TOD_NSEC_TOD_NSEC_GET(x)\
+ FIELD_GET(PTP_TOD_NSEC_TOD_NSEC, x)
+
+/* PTP:PTP_TS_FIFO:PTP_TWOSTEP_CTRL */
+#define PTP_TWOSTEP_CTRL __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 0, 0, 1, 4)
+
+#define PTP_TWOSTEP_CTRL_NXT BIT(11)
+#define PTP_TWOSTEP_CTRL_NXT_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_NXT, x)
+#define PTP_TWOSTEP_CTRL_NXT_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_NXT, x)
+
+#define PTP_TWOSTEP_CTRL_VLD BIT(10)
+#define PTP_TWOSTEP_CTRL_VLD_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_VLD, x)
+#define PTP_TWOSTEP_CTRL_VLD_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_VLD, x)
+
+#define PTP_TWOSTEP_CTRL_STAMP_TX BIT(9)
+#define PTP_TWOSTEP_CTRL_STAMP_TX_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_TX, x)
+#define PTP_TWOSTEP_CTRL_STAMP_TX_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_TX, x)
+
+#define PTP_TWOSTEP_CTRL_STAMP_PORT GENMASK(8, 1)
+#define PTP_TWOSTEP_CTRL_STAMP_PORT_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+#define PTP_TWOSTEP_CTRL_STAMP_PORT_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+
+#define PTP_TWOSTEP_CTRL_OVFL BIT(0)
+#define PTP_TWOSTEP_CTRL_OVFL_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_OVFL, x)
+#define PTP_TWOSTEP_CTRL_OVFL_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_OVFL, x)
+
+/* PTP:PTP_TS_FIFO:PTP_TWOSTEP_STAMP */
+#define PTP_TWOSTEP_STAMP __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 4, 0, 1, 4)
+
+#define PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(31, 2)
+#define PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+#define PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+
/* DEVCPU_QS:XTR:XTR_GRP_CFG */
#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
index 7de55f6a4da8..e3555c94294d 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -9,6 +9,37 @@ static struct notifier_block lan966x_netdevice_nb __read_mostly;
static struct notifier_block lan966x_switchdev_nb __read_mostly;
static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
+static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port,
+ u32 pgid_ip)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 flood_mask_ip;
+
+ flood_mask_ip = lan_rd(lan966x, ANA_PGID(pgid_ip));
+ flood_mask_ip = ANA_PGID_PGID_GET(flood_mask_ip);
+
+ /* If mcast snooping is not enabled then use mcast flood mask
+ * to decide to enable multicast flooding or not.
+ */
+ if (!port->mcast_ena) {
+ u32 flood_mask;
+
+ flood_mask = lan_rd(lan966x, ANA_PGID(PGID_MC));
+ flood_mask = ANA_PGID_PGID_GET(flood_mask);
+
+ if (flood_mask & BIT(port->chip_port))
+ flood_mask_ip |= BIT(port->chip_port);
+ else
+ flood_mask_ip &= ~BIT(port->chip_port);
+ } else {
+ flood_mask_ip &= ~BIT(port->chip_port);
+ }
+
+ lan_rmw(ANA_PGID_PGID_SET(flood_mask_ip),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_ip));
+}
+
static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
bool enabled)
{
@@ -23,6 +54,11 @@ static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
lan_rmw(ANA_PGID_PGID_SET(val),
ANA_PGID_PGID,
port->lan966x, ANA_PGID(PGID_MC));
+
+ if (!port->mcast_ena) {
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
+ }
}
static void lan966x_port_set_ucast_flood(struct lan966x_port *port,
@@ -144,6 +180,28 @@ static void lan966x_port_ageing_set(struct lan966x_port *port,
lan966x_mac_set_ageing(port->lan966x, ageing_time);
}
+static void lan966x_port_mc_set(struct lan966x_port *port, bool mcast_ena)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ port->mcast_ena = mcast_ena;
+ if (mcast_ena)
+ lan966x_mdb_restore_entries(lan966x);
+ else
+ lan966x_mdb_clear_entries(lan966x);
+
+ lan_rmw(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(mcast_ena) |
+ ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(mcast_ena) |
+ ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(mcast_ena),
+ ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
+ ANA_CPU_FWD_CFG_MLD_REDIR_ENA |
+ ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA,
+ lan966x, ANA_CPU_FWD_CFG(port->chip_port));
+
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
+}
+
static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
@@ -171,6 +229,9 @@ static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering);
lan966x_vlan_port_apply(port);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+ lan966x_port_mc_set(port, !attr->u.mc_disabled);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -358,6 +419,9 @@ static int lan966x_netdevice_event(struct notifier_block *nb,
return notifier_from_errno(ret);
}
+/* We don't offload uppers such as LAG as bridge ports, so every device except
+ * the bridge itself is foreign.
+ */
static bool lan966x_foreign_dev_check(const struct net_device *dev,
const struct net_device *foreign_dev)
{
@@ -365,10 +429,10 @@ static bool lan966x_foreign_dev_check(const struct net_device *dev,
struct lan966x *lan966x = port->lan966x;
if (netif_is_bridge_master(foreign_dev))
- if (lan966x->bridge != foreign_dev)
- return true;
+ if (lan966x->bridge == foreign_dev)
+ return false;
- return false;
+ return true;
}
static int lan966x_switchdev_event(struct notifier_block *nb,
@@ -388,8 +452,7 @@ static int lan966x_switchdev_event(struct notifier_block *nb,
err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
lan966x_netdevice_check,
lan966x_foreign_dev_check,
- lan966x_handle_fdb,
- NULL);
+ lan966x_handle_fdb);
return notifier_from_errno(err);
}
@@ -402,18 +465,6 @@ static int lan966x_handle_port_vlan_add(struct lan966x_port *port,
const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
struct lan966x *lan966x = port->lan966x;
- /* When adding a port to a vlan, we get a callback for the port but
- * also for the bridge. When get the callback for the bridge just bail
- * out. Then when the bridge is added to the vlan, then we get a
- * callback here but in this case the flags has set:
- * BRIDGE_VLAN_INFO_BRENTRY. In this case it means that the CPU
- * port is added to the vlan, so the broadcast frames and unicast frames
- * with dmac of the bridge should be foward to CPU.
- */
- if (netif_is_bridge_master(obj->orig_dev) &&
- !(v->flags & BRIDGE_VLAN_INFO_BRENTRY))
- return 0;
-
if (!netif_is_bridge_master(obj->orig_dev))
lan966x_vlan_port_add_vlan(port, v->vid,
v->flags & BRIDGE_VLAN_INFO_PVID,
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index c271e86ee292..4402c3ed1dc5 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o
sparx5-switch-objs := sparx5_main.o sparx5_packet.o \
sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \
- sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o
+ sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \
+ sparx5_ptp.o sparx5_pgid.o
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index 10b866e9f726..6b0febcb7fa9 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -1183,6 +1183,39 @@ static void sparx5_config_port_stats(struct sparx5 *sparx5, int portno)
sparx5, ANA_AC_PORT_STAT_CFG(portno, SPX5_PORT_POLICER_DROPS));
}
+static int sparx5_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_phc *phc;
+
+ if (!sparx5->ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ phc = &sparx5->phc[SPARX5_PHC_PORT];
+
+ info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
const struct ethtool_ops sparx5_ethtool_ops = {
.get_sset_count = sparx5_get_sset_count,
.get_strings = sparx5_get_sset_strings,
@@ -1194,6 +1227,7 @@ const struct ethtool_ops sparx5_ethtool_ops = {
.get_eth_mac_stats = sparx5_get_eth_mac_stats,
.get_eth_ctrl_stats = sparx5_get_eth_mac_ctrl_stats,
.get_rmon_stats = sparx5_get_eth_rmon_stats,
+ .get_ts_info = sparx5_get_ts_info,
};
int sparx_stats_init(struct sparx5 *sparx5)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
index 7436f62fa152..2dc87584023a 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
@@ -240,6 +240,8 @@ static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx
skb_pull(skb, IFH_LEN * sizeof(u32));
if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
skb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
skb->protocol = eth_type_trans(skb, skb->dev);
/* Everything we see on an interface that is in the HW bridge
* has already been forwarded
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
index 9a8e4f201eb1..35abb3d0ce19 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
@@ -186,11 +186,11 @@ bool sparx5_mact_getnext(struct sparx5 *sparx5,
return ret == 0;
}
-static int sparx5_mact_lookup(struct sparx5 *sparx5,
- const unsigned char mac[ETH_ALEN],
- u16 vid)
+bool sparx5_mact_find(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2)
{
int ret;
+ u32 cfg2;
mutex_lock(&sparx5->lock);
@@ -202,16 +202,29 @@ static int sparx5_mact_lookup(struct sparx5 *sparx5,
sparx5, LRN_COMMON_ACCESS_CTRL);
ret = sparx5_mact_wait_for_completion(sparx5);
- if (ret)
- goto out;
-
- ret = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET
- (spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2));
+ if (ret == 0) {
+ cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2);
+ if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2))
+ *pcfg2 = cfg2;
+ else
+ ret = -ENOENT;
+ }
-out:
mutex_unlock(&sparx5->lock);
- return ret;
+ return ret == 0;
+}
+
+static int sparx5_mact_lookup(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN],
+ u16 vid)
+{
+ u32 pcfg2;
+
+ if (sparx5_mact_find(sparx5, mac, vid, &pcfg2))
+ return 1;
+
+ return 0;
}
int sparx5_mact_forget(struct sparx5 *sparx5,
@@ -286,7 +299,8 @@ static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
}
int sparx5_add_mact_entry(struct sparx5 *sparx5,
- struct sparx5_port *port,
+ struct net_device *dev,
+ u16 portno,
const unsigned char *addr, u16 vid)
{
struct sparx5_mact_entry *mact_entry;
@@ -302,14 +316,14 @@ int sparx5_add_mact_entry(struct sparx5 *sparx5,
* mact thread to start the frame will reach CPU and the CPU will
* add the entry but without the extern_learn flag.
*/
- mact_entry = find_mact_entry(sparx5, addr, vid, port->portno);
+ mact_entry = find_mact_entry(sparx5, addr, vid, portno);
if (mact_entry)
goto update_hw;
/* Add the entry in SW MAC table not to get the notification when
* SW is pulling again
*/
- mact_entry = alloc_mact_entry(sparx5, addr, vid, port->portno);
+ mact_entry = alloc_mact_entry(sparx5, addr, vid, portno);
if (!mact_entry)
return -ENOMEM;
@@ -318,13 +332,13 @@ int sparx5_add_mact_entry(struct sparx5 *sparx5,
mutex_unlock(&sparx5->mact_lock);
update_hw:
- ret = sparx5_mact_learn(sparx5, port->portno, addr, vid);
+ ret = sparx5_mact_learn(sparx5, portno, addr, vid);
/* New entry? */
if (mact_entry->flags == 0) {
mact_entry->flags |= MAC_ENT_LOCK; /* Don't age this */
sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, addr, vid,
- port->ndev, true);
+ dev, true);
}
return ret;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 16266275dd36..01be7bd84181 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -190,6 +190,7 @@ static const struct sparx5_main_io_resource sparx5_main_iomap[] = {
{ TARGET_ASM, 0x10600000, 1 }, /* 0x610600000 */
{ TARGET_GCB, 0x11010000, 2 }, /* 0x611010000 */
{ TARGET_QS, 0x11030000, 2 }, /* 0x611030000 */
+ { TARGET_PTP, 0x11040000, 2 }, /* 0x611040000 */
{ TARGET_ANA_ACL, 0x11050000, 2 }, /* 0x611050000 */
{ TARGET_LRN, 0x11060000, 2 }, /* 0x611060000 */
{ TARGET_VCAP_SUPER, 0x11080000, 2 }, /* 0x611080000 */
@@ -291,7 +292,6 @@ static int sparx5_create_port(struct sparx5 *sparx5,
/* Create a phylink for PHY management. Also handles SFPs */
spx5_port->phylink_config.dev = &spx5_port->ndev->dev;
spx5_port->phylink_config.type = PHYLINK_NETDEV;
- spx5_port->phylink_config.pcs_poll = true;
spx5_port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD |
MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD;
@@ -328,7 +328,6 @@ static int sparx5_create_port(struct sparx5 *sparx5,
return PTR_ERR(phylink);
spx5_port->phylink = phylink;
- phylink_set_pcs(phylink, &spx5_port->phylink_pcs);
return 0;
}
@@ -627,6 +626,9 @@ static int sparx5_start(struct sparx5 *sparx5)
/* Init MAC table, ageing */
sparx5_mact_init(sparx5);
+ /* Init PGID table arbitrator */
+ sparx5_pgid_init(sparx5);
+
/* Setup VLANs */
sparx5_vlan_init(sparx5);
@@ -694,6 +696,18 @@ static int sparx5_start(struct sparx5 *sparx5)
} else {
sparx5->xtr_irq = -ENXIO;
}
+
+ if (sparx5->ptp_irq >= 0) {
+ err = devm_request_threaded_irq(sparx5->dev, sparx5->ptp_irq,
+ NULL, sparx5_ptp_irq_handler,
+ IRQF_ONESHOT, "sparx5-ptp",
+ sparx5);
+ if (err)
+ sparx5->ptp_irq = -ENXIO;
+
+ sparx5->ptp = 1;
+ }
+
return err;
}
@@ -810,6 +824,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
sparx5->fdma_irq = platform_get_irq_byname(sparx5->pdev, "fdma");
sparx5->xtr_irq = platform_get_irq_byname(sparx5->pdev, "xtr");
+ sparx5->ptp_irq = platform_get_irq_byname(sparx5->pdev, "ptp");
/* Read chip ID to check CPU interface */
sparx5->chip_id = spx5_rd(sparx5, GCB_CHIP_ID);
@@ -848,6 +863,12 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
dev_err(sparx5->dev, "Start failed\n");
goto cleanup_ports;
}
+
+ err = sparx5_ptp_init(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "PTP failed\n");
+ goto cleanup_ports;
+ }
goto cleanup_config;
cleanup_ports:
@@ -871,6 +892,7 @@ static int mchp_sparx5_remove(struct platform_device *pdev)
disable_irq(sparx5->fdma_irq);
sparx5->fdma_irq = -ENXIO;
}
+ sparx5_ptp_deinit(sparx5);
sparx5_fdma_stop(sparx5);
sparx5_cleanup_ports(sparx5);
/* Unregister netdevs */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index a1acc9b461f2..7a04b8f2a546 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -14,8 +14,12 @@
#include <linux/if_vlan.h>
#include <linux/bitmap.h>
#include <linux/phylink.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/hrtimer.h>
+#include "sparx5_main_regs.h"
+
/* Target chip type */
enum spx5_target_chiptype {
SPX5_TARGET_CT_7546 = 0x7546, /* SparX-5-64 Enterprise */
@@ -62,6 +66,12 @@ enum sparx5_vlan_port_type {
#define PGID_BCAST (PGID_BASE + 6)
#define PGID_CPU (PGID_BASE + 7)
+#define PGID_TABLE_SIZE 3290
+
+#define PGID_MCAST_START 65
+#define PGID_GLAG_START 833
+#define PGID_GLAG_END 1088
+
#define IFH_LEN 9 /* 36 bytes */
#define NULL_VID 0
#define SPX5_MACT_PULL_DELAY (2 * HZ)
@@ -77,6 +87,18 @@ enum sparx5_vlan_port_type {
#define FDMA_RX_DCB_MAX_DBS 15
#define FDMA_TX_DCB_MAX_DBS 1
+#define SPARX5_PHC_COUNT 3
+#define SPARX5_PHC_PORT 0
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_ONE_STEP_PTP 0x3
+#define IFH_REW_OP_TWO_STEP_PTP 0x4
+
+#define IFH_PDU_TYPE_NONE 0x0
+#define IFH_PDU_TYPE_PTP 0x5
+#define IFH_PDU_TYPE_IPV4_UDP_PTP 0x6
+#define IFH_PDU_TYPE_IPV6_UDP_PTP 0x7
+
struct sparx5;
struct sparx5_db_hw {
@@ -165,9 +187,12 @@ struct sparx5_port {
enum sparx5_port_max_tags max_vlan_tags;
enum sparx5_vlan_port_type vlan_type;
u32 custom_etype;
- u32 ifh[IFH_LEN];
bool vlan_aware;
struct hrtimer inj_timer;
+ /* ptp */
+ u8 ptp_cmd;
+ u16 ts_id;
+ struct sk_buff_head tx_skbs;
};
enum sparx5_core_clockfreq {
@@ -177,6 +202,26 @@ enum sparx5_core_clockfreq {
SPX5_CORE_CLOCK_625MHZ, /* 625MHZ core clock frequency */
};
+struct sparx5_phc {
+ struct ptp_clock *clock;
+ struct ptp_clock_info info;
+ struct hwtstamp_config hwtstamp_config;
+ struct sparx5 *sparx5;
+ u8 index;
+};
+
+struct sparx5_skb_cb {
+ u8 rew_op;
+ u8 pdu_type;
+ u8 pdu_w16_offset;
+ u16 ts_id;
+ unsigned long jiffies;
+};
+
+#define SPARX5_PTP_TIMEOUT msecs_to_jiffies(10)
+#define SPARX5_SKB_CB(skb) \
+ ((struct sparx5_skb_cb *)((skb)->cb))
+
struct sparx5 {
struct platform_device *pdev;
struct device *dev;
@@ -224,6 +269,16 @@ struct sparx5 {
int fdma_irq;
struct sparx5_rx rx;
struct sparx5_tx tx;
+ /* PTP */
+ bool ptp;
+ struct sparx5_phc phc[SPARX5_PHC_COUNT];
+ spinlock_t ptp_clock_lock; /* lock for phc */
+ spinlock_t ptp_ts_id_lock; /* lock for ts_id */
+ struct mutex ptp_lock; /* lock for ptp interface state */
+ u16 ptp_skbs;
+ int ptp_irq;
+ /* PGID allocation map */
+ u8 pgid_map[PGID_TABLE_SIZE];
};
/* sparx5_switchdev.c */
@@ -233,6 +288,7 @@ void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5);
/* sparx5_packet.c */
struct frame_info {
int src_port;
+ u32 timestamp;
};
void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp);
@@ -254,10 +310,13 @@ int sparx5_mact_learn(struct sparx5 *sparx5, int port,
const unsigned char mac[ETH_ALEN], u16 vid);
bool sparx5_mact_getnext(struct sparx5 *sparx5,
unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2);
+bool sparx5_mact_find(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2);
int sparx5_mact_forget(struct sparx5 *sparx5,
const unsigned char mac[ETH_ALEN], u16 vid);
int sparx5_add_mact_entry(struct sparx5 *sparx5,
- struct sparx5_port *port,
+ struct net_device *dev,
+ u16 portno,
const unsigned char *addr, u16 vid);
int sparx5_del_mact_entry(struct sparx5 *sparx5,
const unsigned char *addr,
@@ -286,12 +345,43 @@ void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats
int sparx_stats_init(struct sparx5 *sparx5);
/* sparx5_netdev.c */
+void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp);
+void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op);
+void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type);
+void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset);
+void sparx5_set_port_ifh(void *ifh_hdr, u16 portno);
bool sparx5_netdevice_check(const struct net_device *dev);
struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno);
int sparx5_register_netdevs(struct sparx5 *sparx5);
void sparx5_destroy_netdevs(struct sparx5 *sparx5);
void sparx5_unregister_netdevs(struct sparx5 *sparx5);
+/* sparx5_ptp.c */
+int sparx5_ptp_init(struct sparx5 *sparx5);
+void sparx5_ptp_deinit(struct sparx5 *sparx5);
+int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr);
+int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr);
+void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb,
+ u64 timestamp);
+int sparx5_ptp_txtstamp_request(struct sparx5_port *port,
+ struct sk_buff *skb);
+void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
+ struct sk_buff *skb);
+irqreturn_t sparx5_ptp_irq_handler(int irq, void *args);
+
+/* sparx5_pgid.c */
+enum sparx5_pgid_type {
+ SPX5_PGID_FREE,
+ SPX5_PGID_RESERVED,
+ SPX5_PGID_MULTICAST,
+ SPX5_PGID_GLAG
+};
+
+void sparx5_pgid_init(struct sparx5 *spx5);
+int sparx5_pgid_alloc_glag(struct sparx5 *spx5, u16 *idx);
+int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx);
+int sparx5_pgid_free(struct sparx5 *spx5, u16 idx);
+
/* Clock period in picoseconds */
static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock)
{
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index 5ab2373a7178..c94de436b281 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -4,8 +4,8 @@
* Copyright (c) 2021 Microchip Technology Inc.
*/
-/* This file is autogenerated by cml-utils 2021-05-06 13:06:37 +0200.
- * Commit ID: 9ae4ec441e25e4b9003f4e514df5cb12a36b84d3
+/* This file is autogenerated by cml-utils 2022-02-26 14:15:01 +0100.
+ * Commit ID: 98bdd3d171cc2a1afd30d241d41a4281d471a48c (dirty)
*/
#ifndef _SPARX5_MAIN_REGS_H_
@@ -40,6 +40,7 @@ enum sparx5_target {
TARGET_PCS25G_BR = 144,
TARGET_PCS5G_BR = 160,
TARGET_PORT_CONF = 173,
+ TARGET_PTP = 174,
TARGET_QFWD = 175,
TARGET_QRES = 176,
TARGET_QS = 177,
@@ -4156,6 +4157,249 @@ enum sparx5_target {
#define PORT_CONF_USGMII_CFG_QUAD_MODE_GET(x)\
FIELD_GET(PORT_CONF_USGMII_CFG_QUAD_MODE, x)
+/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR */
+#define PTP_PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 0, 0, 1, 4)
+
+#define PTP_PTP_PIN_INTR_INTR_PTP GENMASK(4, 0)
+#define PTP_PTP_PIN_INTR_INTR_PTP_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_INTR_INTR_PTP, x)
+#define PTP_PTP_PIN_INTR_INTR_PTP_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_INTR_INTR_PTP, x)
+
+/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR_ENA */
+#define PTP_PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 4, 0, 1, 4)
+
+#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA GENMASK(4, 0)
+#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
+#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
+
+/* DEVCPU_PTP:PTP_CFG:PTP_INTR_IDENT */
+#define PTP_PTP_INTR_IDENT __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 8, 0, 1, 4)
+
+#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT GENMASK(4, 0)
+#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_SET(x)\
+ FIELD_PREP(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
+#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_GET(x)\
+ FIELD_GET(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
+
+/* DEVCPU_PTP:PTP_CFG:PTP_DOM_CFG */
+#define PTP_PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 12, 0, 1, 4)
+
+#define PTP_PTP_DOM_CFG_PTP_ENA GENMASK(11, 9)
+#define PTP_PTP_DOM_CFG_PTP_ENA_SET(x)\
+ FIELD_PREP(PTP_PTP_DOM_CFG_PTP_ENA, x)
+#define PTP_PTP_DOM_CFG_PTP_ENA_GET(x)\
+ FIELD_GET(PTP_PTP_DOM_CFG_PTP_ENA, x)
+
+#define PTP_PTP_DOM_CFG_PTP_HOLD GENMASK(8, 6)
+#define PTP_PTP_DOM_CFG_PTP_HOLD_SET(x)\
+ FIELD_PREP(PTP_PTP_DOM_CFG_PTP_HOLD, x)
+#define PTP_PTP_DOM_CFG_PTP_HOLD_GET(x)\
+ FIELD_GET(PTP_PTP_DOM_CFG_PTP_HOLD, x)
+
+#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE GENMASK(5, 3)
+#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE_SET(x)\
+ FIELD_PREP(PTP_PTP_DOM_CFG_PTP_TOD_FREEZE, x)
+#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE_GET(x)\
+ FIELD_GET(PTP_PTP_DOM_CFG_PTP_TOD_FREEZE, x)
+
+#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS GENMASK(2, 0)
+#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(x)\
+ FIELD_PREP(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x)
+#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_GET(x)\
+ FIELD_GET(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */
+#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 0, r, 2, 4)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC */
+#define PTP_PTP_CUR_NSEC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 8, 0, 1, 4)
+
+#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC GENMASK(29, 0)
+#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_SET(x)\
+ FIELD_PREP(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x)
+#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_GET(x)\
+ FIELD_GET(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC_FRAC */
+#define PTP_PTP_CUR_NSEC_FRAC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 12, 0, 1, 4)
+
+#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC GENMASK(7, 0)
+#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_SET(x)\
+ FIELD_PREP(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x)
+#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_GET(x)\
+ FIELD_GET(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_LSB */
+#define PTP_PTP_CUR_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 16, 0, 1, 4)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_MSB */
+#define PTP_PTP_CUR_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 20, 0, 1, 4)
+
+#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB GENMASK(15, 0)
+#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_SET(x)\
+ FIELD_PREP(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x)
+#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_GET(x)\
+ FIELD_GET(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:NTP_CUR_NSEC */
+#define PTP_NTP_CUR_NSEC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 24, 0, 1, 4)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_PIN_CFG */
+#define PTP_PTP_PIN_CFG(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 0, 0, 1, 4)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION GENMASK(28, 26)
+#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC GENMASK(25, 24)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL BIT(23)
+#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT GENMASK(22, 21)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x)
+
+#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT GENMASK(20, 18)
+#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_CLK_SELECT, x)
+#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_CLK_SELECT, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_DOM GENMASK(17, 16)
+#define PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_DOM, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_DOM_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_DOM, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_OPT GENMASK(15, 14)
+#define PTP_PTP_PIN_CFG_PTP_PIN_OPT_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_OPT, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_OPT_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OPT, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK BIT(13)
+#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS GENMASK(12, 0)
+#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_MSB */
+#define PTP_PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 4, 0, 1, 4)
+
+#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB GENMASK(15, 0)
+#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(x)\
+ FIELD_PREP(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x)
+#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_GET(x)\
+ FIELD_GET(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_LSB */
+#define PTP_PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 8, 0, 1, 4)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC */
+#define PTP_PTP_TOD_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 12, 0, 1, 4)
+
+#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC GENMASK(29, 0)
+#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(x)\
+ FIELD_PREP(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x)
+#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_GET(x)\
+ FIELD_GET(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC_FRAC */
+#define PTP_PTP_TOD_NSEC_FRAC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 16, 0, 1, 4)
+
+#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC GENMASK(7, 0)
+#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_SET(x)\
+ FIELD_PREP(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x)
+#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_GET(x)\
+ FIELD_GET(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x)
+
+/* DEVCPU_PTP:PTP_PINS:NTP_NSEC */
+#define PTP_NTP_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 20, 0, 1, 4)
+
+/* DEVCPU_PTP:PTP_PINS:PIN_WF_HIGH_PERIOD */
+#define PTP_PIN_WF_HIGH_PERIOD(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 24, 0, 1, 4)
+
+#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH GENMASK(29, 0)
+#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_SET(x)\
+ FIELD_PREP(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x)
+#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_GET(x)\
+ FIELD_GET(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x)
+
+/* DEVCPU_PTP:PTP_PINS:PIN_WF_LOW_PERIOD */
+#define PTP_PIN_WF_LOW_PERIOD(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 28, 0, 1, 4)
+
+#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL GENMASK(29, 0)
+#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_SET(x)\
+ FIELD_PREP(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x)
+#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_GET(x)\
+ FIELD_GET(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x)
+
+/* DEVCPU_PTP:PTP_PINS:PIN_IOBOUNCH_DELAY */
+#define PTP_PIN_IOBOUNCH_DELAY(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 32, 0, 1, 4)
+
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL GENMASK(18, 3)
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_SET(x)\
+ FIELD_PREP(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL, x)
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_GET(x)\
+ FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL, x)
+
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG GENMASK(2, 0)
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG_SET(x)\
+ FIELD_PREP(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x)
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG_GET(x)\
+ FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x)
+
+/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CTRL */
+#define PTP_PHAD_CTRL(g) __REG(TARGET_PTP, 0, 1, 420, g, 5, 8, 0, 0, 1, 4)
+
+#define PTP_PHAD_CTRL_PHAD_ENA BIT(7)
+#define PTP_PHAD_CTRL_PHAD_ENA_SET(x)\
+ FIELD_PREP(PTP_PHAD_CTRL_PHAD_ENA, x)
+#define PTP_PHAD_CTRL_PHAD_ENA_GET(x)\
+ FIELD_GET(PTP_PHAD_CTRL_PHAD_ENA, x)
+
+#define PTP_PHAD_CTRL_PHAD_FAILED BIT(6)
+#define PTP_PHAD_CTRL_PHAD_FAILED_SET(x)\
+ FIELD_PREP(PTP_PHAD_CTRL_PHAD_FAILED, x)
+#define PTP_PHAD_CTRL_PHAD_FAILED_GET(x)\
+ FIELD_GET(PTP_PHAD_CTRL_PHAD_FAILED, x)
+
+#define PTP_PHAD_CTRL_REDUCED_RES GENMASK(5, 3)
+#define PTP_PHAD_CTRL_REDUCED_RES_SET(x)\
+ FIELD_PREP(PTP_PHAD_CTRL_REDUCED_RES, x)
+#define PTP_PHAD_CTRL_REDUCED_RES_GET(x)\
+ FIELD_GET(PTP_PHAD_CTRL_REDUCED_RES, x)
+
+#define PTP_PHAD_CTRL_LOCK_ACC GENMASK(2, 0)
+#define PTP_PHAD_CTRL_LOCK_ACC_SET(x)\
+ FIELD_PREP(PTP_PHAD_CTRL_LOCK_ACC, x)
+#define PTP_PHAD_CTRL_LOCK_ACC_GET(x)\
+ FIELD_GET(PTP_PHAD_CTRL_LOCK_ACC, x)
+
+/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CYC_STAT */
+#define PTP_PHAD_CYC_STAT(g) __REG(TARGET_PTP, 0, 1, 420, g, 5, 8, 4, 0, 1, 4)
+
/* QFWD:SYSTEM:SWITCH_PORT_MODE */
#define QFWD_SWITCH_PORT_MODE(r) __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 0, r, 70, 4)
@@ -4528,6 +4772,93 @@ enum sparx5_target {
#define REW_TAG_CTRL_TAG_DEI_CFG_GET(x)\
FIELD_GET(REW_TAG_CTRL_TAG_DEI_CFG, x)
+/* REW:PTP_CTRL:PTP_TWOSTEP_CTRL */
+#define REW_PTP_TWOSTEP_CTRL __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 0, 0, 1, 4)
+
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA BIT(12)
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x)
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x)
+
+#define REW_PTP_TWOSTEP_CTRL_PTP_NXT BIT(11)
+#define REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_NXT, x)
+#define REW_PTP_TWOSTEP_CTRL_PTP_NXT_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_NXT, x)
+
+#define REW_PTP_TWOSTEP_CTRL_PTP_VLD BIT(10)
+#define REW_PTP_TWOSTEP_CTRL_PTP_VLD_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_VLD, x)
+#define REW_PTP_TWOSTEP_CTRL_PTP_VLD_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_VLD, x)
+
+#define REW_PTP_TWOSTEP_CTRL_STAMP_TX BIT(9)
+#define REW_PTP_TWOSTEP_CTRL_STAMP_TX_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_STAMP_TX, x)
+#define REW_PTP_TWOSTEP_CTRL_STAMP_TX_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_STAMP_TX, x)
+
+#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT GENMASK(8, 1)
+#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL BIT(0)
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x)
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x)
+
+/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP */
+#define REW_PTP_TWOSTEP_STAMP __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 4, 0, 1, 4)
+
+#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(29, 0)
+#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+
+/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP_SUBNS */
+#define REW_PTP_TWOSTEP_STAMP_SUBNS __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 8, 0, 1, 4)
+
+#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC GENMASK(7, 0)
+#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x)
+#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x)
+
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO */
+#define REW_PTP_RSRV_NOT_ZERO __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 12, 0, 1, 4)
+
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO1 */
+#define REW_PTP_RSRV_NOT_ZERO1 __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 16, 0, 1, 4)
+
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO2 */
+#define REW_PTP_RSRV_NOT_ZERO2 __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 20, 0, 1, 4)
+
+#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2 GENMASK(5, 0)
+#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_SET(x)\
+ FIELD_PREP(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x)
+#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_GET(x)\
+ FIELD_GET(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x)
+
+/* REW:PTP_CTRL:PTP_GEN_STAMP_FMT */
+#define REW_PTP_GEN_STAMP_FMT(r) __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 24, r, 4, 4)
+
+#define REW_PTP_GEN_STAMP_FMT_RT_OFS GENMASK(6, 2)
+#define REW_PTP_GEN_STAMP_FMT_RT_OFS_SET(x)\
+ FIELD_PREP(REW_PTP_GEN_STAMP_FMT_RT_OFS, x)
+#define REW_PTP_GEN_STAMP_FMT_RT_OFS_GET(x)\
+ FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_OFS, x)
+
+#define REW_PTP_GEN_STAMP_FMT_RT_FMT GENMASK(1, 0)
+#define REW_PTP_GEN_STAMP_FMT_RT_FMT_SET(x)\
+ FIELD_PREP(REW_PTP_GEN_STAMP_FMT_RT_FMT, x)
+#define REW_PTP_GEN_STAMP_FMT_RT_FMT_GET(x)\
+ FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_FMT, x)
+
/* REW:RAM_CTRL:RAM_INIT */
#define REW_RAM_INIT __REG(TARGET_REW, 0, 1, 378696, 0, 1, 4, 0, 0, 1, 4)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
index e042f117dc7a..af4d3e1f1a6d 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
@@ -54,7 +54,7 @@ static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
ifh_hdr[byte - 5] |= (u8)((encode & 0xFF0000000000) >> 40);
}
-static void sparx5_set_port_ifh(void *ifh_hdr, u16 portno)
+void sparx5_set_port_ifh(void *ifh_hdr, u16 portno)
{
/* VSTAX.RSV = 1. MSBit must be 1 */
ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 79, 1);
@@ -74,6 +74,26 @@ static void sparx5_set_port_ifh(void *ifh_hdr, u16 portno)
ifh_encode_bitfield(ifh_hdr, 1, 67, 1);
}
+void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op)
+{
+ ifh_encode_bitfield(ifh_hdr, rew_op, VSTAX + 32, 10);
+}
+
+void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type)
+{
+ ifh_encode_bitfield(ifh_hdr, pdu_type, 191, 4);
+}
+
+void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset)
+{
+ ifh_encode_bitfield(ifh_hdr, pdu_w16_offset, 195, 6);
+}
+
+void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp)
+{
+ ifh_encode_bitfield(ifh_hdr, timestamp, 232, 40);
+}
+
static int sparx5_port_open(struct net_device *ndev)
{
struct sparx5_port *port = netdev_priv(ndev);
@@ -179,6 +199,24 @@ static int sparx5_get_port_parent_id(struct net_device *dev,
return 0;
}
+static int sparx5_port_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ struct sparx5_port *sparx5_port = netdev_priv(dev);
+ struct sparx5 *sparx5 = sparx5_port->sparx5;
+
+ if (!phy_has_hwtstamp(dev->phydev) && sparx5->ptp) {
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return sparx5_ptp_hwtstamp_set(sparx5_port, ifr);
+ case SIOCGHWTSTAMP:
+ return sparx5_ptp_hwtstamp_get(sparx5_port, ifr);
+ }
+ }
+
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
+}
+
static const struct net_device_ops sparx5_port_netdev_ops = {
.ndo_open = sparx5_port_open,
.ndo_stop = sparx5_port_stop,
@@ -189,6 +227,7 @@ static const struct net_device_ops sparx5_port_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats64 = sparx5_get_stats64,
.ndo_get_port_parent_id = sparx5_get_port_parent_id,
+ .ndo_eth_ioctl = sparx5_port_ioctl,
};
bool sparx5_netdevice_check(const struct net_device *dev)
@@ -210,7 +249,6 @@ struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
spx5_port->ndev = ndev;
spx5_port->sparx5 = sparx5;
spx5_port->portno = portno;
- sparx5_set_port_ifh(spx5_port->ifh, portno);
ndev->netdev_ops = &sparx5_port_netdev_ops;
ndev->ethtool_ops = &sparx5_ethtool_ops;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index 148d431fcde4..304f84aadc36 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -44,6 +44,12 @@ void sparx5_ifh_parse(u32 *ifh, struct frame_info *info)
((u32)xtr_hdr[30] << 0);
fwd = (fwd >> 5);
info->src_port = FIELD_GET(GENMASK(7, 1), fwd);
+
+ info->timestamp =
+ ((u64)xtr_hdr[2] << 24) |
+ ((u64)xtr_hdr[3] << 16) |
+ ((u64)xtr_hdr[4] << 8) |
+ ((u64)xtr_hdr[5] << 0);
}
static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
@@ -144,6 +150,7 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
/* Finish up skb */
skb_put(skb, byte_cnt - ETH_FCS_LEN);
eth_skb_pad(skb);
+ sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
skb->protocol = eth_type_trans(skb, netdev);
netdev->stats.rx_bytes += skb->len;
netdev->stats.rx_packets++;
@@ -218,20 +225,44 @@ int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
struct net_device_stats *stats = &dev->stats;
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *sparx5 = port->sparx5;
+ u32 ifh[IFH_LEN];
int ret;
+ memset(ifh, 0, IFH_LEN * 4);
+ sparx5_set_port_ifh(ifh, port->portno);
+
+ if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ ret = sparx5_ptp_txtstamp_request(port, skb);
+ if (ret)
+ return ret;
+
+ sparx5_set_port_ifh_rew_op(ifh, SPARX5_SKB_CB(skb)->rew_op);
+ sparx5_set_port_ifh_pdu_type(ifh, SPARX5_SKB_CB(skb)->pdu_type);
+ sparx5_set_port_ifh_pdu_w16_offset(ifh, SPARX5_SKB_CB(skb)->pdu_w16_offset);
+ sparx5_set_port_ifh_timestamp(ifh, SPARX5_SKB_CB(skb)->ts_id);
+ }
+
+ skb_tx_timestamp(skb);
if (sparx5->fdma_irq > 0)
- ret = sparx5_fdma_xmit(sparx5, port->ifh, skb);
+ ret = sparx5_fdma_xmit(sparx5, ifh, skb);
else
- ret = sparx5_inject(sparx5, port->ifh, skb, dev);
+ ret = sparx5_inject(sparx5, ifh, skb, dev);
if (ret == NETDEV_TX_OK) {
stats->tx_bytes += skb->len;
stats->tx_packets++;
- skb_tx_timestamp(skb);
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ return ret;
+
dev_kfree_skb_any(skb);
} else {
stats->tx_dropped++;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ sparx5_ptp_txtstamp_release(port, skb);
}
return ret;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c
new file mode 100644
index 000000000000..90366fcb9958
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include "sparx5_main.h"
+
+void sparx5_pgid_init(struct sparx5 *spx5)
+{
+ int i;
+
+ for (i = 0; i < PGID_TABLE_SIZE; i++)
+ spx5->pgid_map[i] = SPX5_PGID_FREE;
+
+ /* Reserved for unicast, flood control, broadcast, and CPU.
+ * These cannot be freed.
+ */
+ for (i = 0; i <= PGID_CPU; i++)
+ spx5->pgid_map[i] = SPX5_PGID_RESERVED;
+}
+
+int sparx5_pgid_alloc_glag(struct sparx5 *spx5, u16 *idx)
+{
+ int i;
+
+ for (i = PGID_GLAG_START; i <= PGID_GLAG_END; i++)
+ if (spx5->pgid_map[i] == SPX5_PGID_FREE) {
+ spx5->pgid_map[i] = SPX5_PGID_GLAG;
+ *idx = i;
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx)
+{
+ int i;
+
+ for (i = PGID_MCAST_START; i < PGID_TABLE_SIZE; i++) {
+ if (i == PGID_GLAG_START)
+ i = PGID_GLAG_END + 1;
+
+ if (spx5->pgid_map[i] == SPX5_PGID_FREE) {
+ spx5->pgid_map[i] = SPX5_PGID_MULTICAST;
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return -EBUSY;
+}
+
+int sparx5_pgid_free(struct sparx5 *spx5, u16 idx)
+{
+ if (idx <= PGID_CPU || idx >= PGID_TABLE_SIZE)
+ return -EINVAL;
+
+ if (spx5->pgid_map[idx] == SPX5_PGID_FREE)
+ return -EINVAL;
+
+ spx5->pgid_map[idx] = SPX5_PGID_FREE;
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
index 8ba33bc1a001..830da0e5ff27 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
@@ -26,6 +26,15 @@ static bool port_conf_has_changed(struct sparx5_port_config *a, struct sparx5_po
return false;
}
+static struct phylink_pcs *
+sparx5_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
+
+ return &port->phylink_pcs;
+}
+
static void sparx5_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
@@ -130,6 +139,7 @@ const struct phylink_pcs_ops sparx5_phylink_pcs_ops = {
const struct phylink_mac_ops sparx5_phylink_mac_ops = {
.validate = phylink_generic_validate,
+ .mac_select_pcs = sparx5_phylink_mac_select_pcs,
.mac_config = sparx5_phylink_mac_config,
.mac_link_down = sparx5_phylink_mac_link_down,
.mac_link_up = sparx5_phylink_mac_link_up,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
new file mode 100644
index 000000000000..0ed1ea7727c5
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
@@ -0,0 +1,685 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ *
+ * The Sparx5 Chip Register Model can be browsed at this location:
+ * https://github.com/microchip-ung/sparx-5_reginfo
+ */
+#include <linux/ptp_classify.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+#define SPARX5_MAX_PTP_ID 512
+
+#define TOD_ACC_PIN 0x4
+
+enum {
+ PTP_PIN_ACTION_IDLE = 0,
+ PTP_PIN_ACTION_LOAD,
+ PTP_PIN_ACTION_SAVE,
+ PTP_PIN_ACTION_CLOCK,
+ PTP_PIN_ACTION_DELTA,
+ PTP_PIN_ACTION_TOD
+};
+
+static u64 sparx5_ptp_get_1ppm(struct sparx5 *sparx5)
+{
+ /* Represents 1ppm adjustment in 2^59 format with 1.59687500000(625)
+ * 1.99609375000(500), 3.99218750000(250) as reference
+ * The value is calculated as following:
+ * (1/1000000)/((2^-59)/X)
+ */
+
+ u64 res = 0;
+
+ switch (sparx5->coreclock) {
+ case SPX5_CORE_CLOCK_250MHZ:
+ res = 2301339409586;
+ break;
+ case SPX5_CORE_CLOCK_500MHZ:
+ res = 1150669704793;
+ break;
+ case SPX5_CORE_CLOCK_625MHZ:
+ res = 920535763834;
+ break;
+ default:
+ WARN(1, "Invalid core clock");
+ break;
+ }
+
+ return res;
+}
+
+static u64 sparx5_ptp_get_nominal_value(struct sparx5 *sparx5)
+{
+ u64 res = 0;
+
+ switch (sparx5->coreclock) {
+ case SPX5_CORE_CLOCK_250MHZ:
+ res = 0x1FF0000000000000;
+ break;
+ case SPX5_CORE_CLOCK_500MHZ:
+ res = 0x0FF8000000000000;
+ break;
+ case SPX5_CORE_CLOCK_625MHZ:
+ res = 0x0CC6666666666666;
+ break;
+ default:
+ WARN(1, "Invalid core clock");
+ break;
+ }
+
+ return res;
+}
+
+int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ struct hwtstamp_config cfg;
+ struct sparx5_phc *phc;
+
+ /* For now don't allow to run ptp on ports that are part of a bridge,
+ * because in case of transparent clock the HW will still forward the
+ * frames, so there would be duplicate frames
+ */
+
+ if (test_bit(port->portno, sparx5->bridge_mask))
+ return -EINVAL;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_ON:
+ port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ port->ptp_cmd = IFH_REW_OP_ONE_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ port->ptp_cmd = IFH_REW_OP_NOOP;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Commit back the result & save it */
+ mutex_lock(&sparx5->ptp_lock);
+ phc = &sparx5->phc[SPARX5_PHC_PORT];
+ memcpy(&phc->hwtstamp_config, &cfg, sizeof(cfg));
+ mutex_unlock(&sparx5->ptp_lock);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_phc *phc;
+
+ phc = &sparx5->phc[SPARX5_PHC_PORT];
+ return copy_to_user(ifr->ifr_data, &phc->hwtstamp_config,
+ sizeof(phc->hwtstamp_config)) ? -EFAULT : 0;
+}
+
+static void sparx5_ptp_classify(struct sparx5_port *port, struct sk_buff *skb,
+ u8 *rew_op, u8 *pdu_type, u8 *pdu_w16_offset)
+{
+ struct ptp_header *header;
+ u8 msgtype;
+ int type;
+
+ if (port->ptp_cmd == IFH_REW_OP_NOOP) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ *pdu_w16_offset = 0;
+ return;
+ }
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ *pdu_w16_offset = 0;
+ return;
+ }
+
+ header = ptp_parse_header(skb, type);
+ if (!header) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ *pdu_w16_offset = 0;
+ return;
+ }
+
+ *pdu_w16_offset = 7;
+ if (type & PTP_CLASS_L2)
+ *pdu_type = IFH_PDU_TYPE_PTP;
+ if (type & PTP_CLASS_IPV4)
+ *pdu_type = IFH_PDU_TYPE_IPV4_UDP_PTP;
+ if (type & PTP_CLASS_IPV6)
+ *pdu_type = IFH_PDU_TYPE_IPV6_UDP_PTP;
+
+ if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
+ return;
+ }
+
+ /* If it is sync and run 1 step then set the correct operation,
+ * otherwise run as 2 step
+ */
+ msgtype = ptp_get_msgtype(header, type);
+ if ((msgtype & 0xf) == 0) {
+ *rew_op = IFH_REW_OP_ONE_STEP_PTP;
+ return;
+ }
+
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
+}
+
+static void sparx5_ptp_txtstamp_old_release(struct sparx5_port *port)
+{
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if time_after(SPARX5_SKB_CB(skb)->jiffies + SPARX5_PTP_TIMEOUT,
+ jiffies)
+ break;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ dev_kfree_skb_any(skb);
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+}
+
+int sparx5_ptp_txtstamp_request(struct sparx5_port *port,
+ struct sk_buff *skb)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ u8 rew_op, pdu_type, pdu_w16_offset;
+ unsigned long flags;
+
+ sparx5_ptp_classify(port, skb, &rew_op, &pdu_type, &pdu_w16_offset);
+ SPARX5_SKB_CB(skb)->rew_op = rew_op;
+ SPARX5_SKB_CB(skb)->pdu_type = pdu_type;
+ SPARX5_SKB_CB(skb)->pdu_w16_offset = pdu_w16_offset;
+
+ if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
+ return 0;
+
+ sparx5_ptp_txtstamp_old_release(port);
+
+ spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags);
+ if (sparx5->ptp_skbs == SPARX5_MAX_PTP_ID) {
+ spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
+ return -EBUSY;
+ }
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ skb_queue_tail(&port->tx_skbs, skb);
+ SPARX5_SKB_CB(skb)->ts_id = port->ts_id;
+ SPARX5_SKB_CB(skb)->jiffies = jiffies;
+
+ sparx5->ptp_skbs++;
+ port->ts_id++;
+ if (port->ts_id == SPARX5_MAX_PTP_ID)
+ port->ts_id = 0;
+
+ spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
+
+ return 0;
+}
+
+void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
+ struct sk_buff *skb)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags);
+ port->ts_id--;
+ sparx5->ptp_skbs--;
+ skb_unlink(skb, &port->tx_skbs);
+ spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
+}
+
+static void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
+ struct timespec64 *ts,
+ u32 nsec)
+{
+ /* Read current PTP time to get seconds */
+ unsigned long flags;
+ u32 curr_nsec;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(SPARX5_PHC_PORT) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ ts->tv_sec = spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ curr_nsec = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ ts->tv_nsec = nsec;
+
+ /* Sec has incremented since the ts was registered */
+ if (curr_nsec < nsec)
+ ts->tv_sec--;
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+}
+
+irqreturn_t sparx5_ptp_irq_handler(int irq, void *args)
+{
+ int budget = SPARX5_MAX_PTP_ID;
+ struct sparx5 *sparx5 = args;
+
+ while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sparx5_port *port;
+ struct timespec64 ts;
+ unsigned long flags;
+ u32 val, id, txport;
+ u32 delay;
+
+ val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD))
+ break;
+
+ WARN_ON(val & REW_PTP_TWOSTEP_CTRL_PTP_OVFL);
+
+ if (!(val & REW_PTP_TWOSTEP_CTRL_STAMP_TX))
+ continue;
+
+ /* Retrieve the ts Tx port */
+ txport = REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val);
+
+ /* Retrieve its associated skb */
+ port = sparx5->ports[txport];
+
+ /* Retrieve the delay */
+ delay = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP);
+ delay = REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay);
+
+ /* Get next timestamp from fifo, which needs to be the
+ * rx timestamp which represents the id of the frame
+ */
+ spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
+ REW_PTP_TWOSTEP_CTRL_PTP_NXT,
+ sparx5, REW_PTP_TWOSTEP_CTRL);
+
+ val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retried */
+ if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD))
+ break;
+
+ /* Read RX timestamping to get the ID */
+ id = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP);
+ id <<= 8;
+ id |= spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP_SUBNS);
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (SPARX5_SKB_CB(skb)->ts_id != id)
+ continue;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
+ /* Next ts */
+ spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
+ REW_PTP_TWOSTEP_CTRL_PTP_NXT,
+ sparx5, REW_PTP_TWOSTEP_CTRL);
+
+ if (WARN_ON(!skb_match))
+ continue;
+
+ spin_lock(&sparx5->ptp_ts_id_lock);
+ sparx5->ptp_skbs--;
+ spin_unlock(&sparx5->ptp_ts_id_lock);
+
+ /* Get the h/w timestamp */
+ sparx5_get_hwtimestamp(sparx5, &ts, delay);
+
+ /* Set the timestamp into the skb */
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb_match, &shhwtstamps);
+
+ dev_kfree_skb_any(skb_match);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int sparx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
+ struct sparx5 *sparx5 = phc->sparx5;
+ unsigned long flags;
+ bool neg_adj = 0;
+ u64 tod_inc;
+ u64 ref;
+
+ if (!scaled_ppm)
+ return 0;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ tod_inc = sparx5_ptp_get_nominal_value(sparx5);
+
+ /* The multiplication is split in 2 separate additions because of
+ * overflow issues. If scaled_ppm with 16bit fractional part was bigger
+ * than 20ppm then we got overflow.
+ */
+ ref = sparx5_ptp_get_1ppm(sparx5) * (scaled_ppm >> 16);
+ ref += (sparx5_ptp_get_1ppm(sparx5) * (0xffff & scaled_ppm)) >> 16;
+ tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(1 << BIT(phc->index)),
+ PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
+ sparx5, PTP_PTP_DOM_CFG);
+
+ spx5_wr((u32)tod_inc & 0xFFFFFFFF, sparx5,
+ PTP_CLK_PER_CFG(phc->index, 0));
+ spx5_wr((u32)(tod_inc >> 32), sparx5,
+ PTP_CLK_PER_CFG(phc->index, 1));
+
+ spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0),
+ PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, sparx5,
+ PTP_PTP_DOM_CFG);
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int sparx5_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
+ struct sparx5 *sparx5 = phc->sparx5;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ /* Set new value */
+ spx5_wr(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)),
+ sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ spx5_wr(lower_32_bits(ts->tv_sec),
+ sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ spx5_wr(ts->tv_nsec, sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Apply new values */
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int sparx5_ptp_gettime64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
+ struct sparx5 *sparx5 = phc->sparx5;
+ unsigned long flags;
+ time64_t s;
+ s64 ns;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ s = spx5_rd(sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ s <<= 32;
+ s |= spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ ns = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+ ns &= PTP_PTP_TOD_NSEC_PTP_TOD_NSEC;
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+
+ /* Deal with negative values */
+ if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+
+static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
+ struct sparx5 *sparx5 = phc->sparx5;
+
+ if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spx5_wr(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(delta),
+ sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Adjust time with the value of PTP_TOD_NSEC */
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+ } else {
+ /* Fall back using sparx5_ptp_settime64 which is not exact */
+ struct timespec64 ts;
+ u64 now;
+
+ sparx5_ptp_gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ sparx5_ptp_settime64(ptp, &ts);
+ }
+
+ return 0;
+}
+
+static struct ptp_clock_info sparx5_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "sparx5 ptp",
+ .max_adj = 200000,
+ .gettime64 = sparx5_ptp_gettime64,
+ .settime64 = sparx5_ptp_settime64,
+ .adjtime = sparx5_ptp_adjtime,
+ .adjfine = sparx5_ptp_adjfine,
+};
+
+static int sparx5_ptp_phc_init(struct sparx5 *sparx5,
+ int index,
+ struct ptp_clock_info *clock_info)
+{
+ struct sparx5_phc *phc = &sparx5->phc[index];
+
+ phc->info = *clock_info;
+ phc->clock = ptp_clock_register(&phc->info, sparx5->dev);
+ if (IS_ERR(phc->clock))
+ return PTR_ERR(phc->clock);
+
+ phc->index = index;
+ phc->sparx5 = sparx5;
+
+ /* PTP Rx stamping is always enabled. */
+ phc->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+
+ return 0;
+}
+
+int sparx5_ptp_init(struct sparx5 *sparx5)
+{
+ u64 tod_adj = sparx5_ptp_get_nominal_value(sparx5);
+ struct sparx5_port *port;
+ int err, i;
+
+ if (!sparx5->ptp)
+ return 0;
+
+ for (i = 0; i < SPARX5_PHC_COUNT; ++i) {
+ err = sparx5_ptp_phc_init(sparx5, i, &sparx5_ptp_clock_info);
+ if (err)
+ return err;
+ }
+
+ spin_lock_init(&sparx5->ptp_clock_lock);
+ spin_lock_init(&sparx5->ptp_ts_id_lock);
+ mutex_init(&sparx5->ptp_lock);
+
+ /* Disable master counters */
+ spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0), sparx5, PTP_PTP_DOM_CFG);
+
+ /* Configure the nominal TOD increment per clock cycle */
+ spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0x7),
+ PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
+ sparx5, PTP_PTP_DOM_CFG);
+
+ for (i = 0; i < SPARX5_PHC_COUNT; ++i) {
+ spx5_wr((u32)tod_adj & 0xFFFFFFFF, sparx5,
+ PTP_CLK_PER_CFG(i, 0));
+ spx5_wr((u32)(tod_adj >> 32), sparx5,
+ PTP_CLK_PER_CFG(i, 1));
+ }
+
+ spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0),
+ PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
+ sparx5, PTP_PTP_DOM_CFG);
+
+ /* Enable master counters */
+ spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG);
+
+ for (i = 0; i < sparx5->port_count; i++) {
+ port = sparx5->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_head_init(&port->tx_skbs);
+ }
+
+ return 0;
+}
+
+void sparx5_ptp_deinit(struct sparx5 *sparx5)
+{
+ struct sparx5_port *port;
+ int i;
+
+ for (i = 0; i < sparx5->port_count; i++) {
+ port = sparx5->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_purge(&port->tx_skbs);
+ }
+
+ for (i = 0; i < SPARX5_PHC_COUNT; ++i)
+ ptp_clock_unregister(sparx5->phc[i].clock);
+}
+
+void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb,
+ u64 timestamp)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct sparx5_phc *phc;
+ struct timespec64 ts;
+ u64 full_ts_in_ns;
+
+ if (!sparx5->ptp)
+ return;
+
+ phc = &sparx5->phc[SPARX5_PHC_PORT];
+ sparx5_ptp_gettime64(&phc->info, &ts);
+
+ if (ts.tv_nsec < timestamp)
+ ts.tv_sec--;
+ ts.tv_nsec = timestamp;
+ full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index 649ca609884a..2d8e0b81c839 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -16,14 +16,31 @@ struct sparx5_switchdev_event_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
struct net_device *dev;
+ struct sparx5 *sparx5;
unsigned long event;
};
+static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
struct switchdev_brport_flags flags)
{
+ int pgid;
+
if (flags.mask & BR_MCAST_FLOOD)
- sparx5_pgid_update_mask(port, PGID_MC_FLOOD, true);
+ for (pgid = PGID_MC_FLOOD; pgid <= PGID_IPV6_MC_CTRL; pgid++)
+ sparx5_pgid_update_mask(port, pgid, !!(flags.val & BR_MCAST_FLOOD));
+ if (flags.mask & BR_FLOOD)
+ sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD));
+ if (flags.mask & BR_BCAST_FLOOD)
+ sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD));
}
static void sparx5_attr_stp_state_set(struct sparx5_port *port,
@@ -72,6 +89,9 @@ static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
struct sparx5_port *port = netdev_priv(dev);
switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ return sparx5_port_attr_pre_bridge_flags(port,
+ attr->u.brport_flags);
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
break;
@@ -82,6 +102,11 @@ static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ /* Used PVID 1 when default_pvid is 0, to avoid
+ * collision with non-bridged ports.
+ */
+ if (port->pvid == 0)
+ port->pvid = 1;
port->vlan_aware = attr->u.vlan_filtering;
sparx5_vlan_port_apply(port->sparx5, port);
break;
@@ -117,6 +142,9 @@ static int sparx5_port_bridge_join(struct sparx5_port *port,
if (err)
goto err_switchdev_offload;
+ /* Remove standalone port entry */
+ sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
+
/* Port enters in bridge mode therefor don't need to copy to CPU
* frames for multicast in case the bridge is not requesting them
*/
@@ -145,6 +173,9 @@ static void sparx5_port_bridge_leave(struct sparx5_port *port,
port->pvid = NULL_VID;
port->vid = NULL_VID;
+ /* Forward frames to CPU */
+ sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0);
+
/* Port enters in host more therefore restore mc list */
__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
}
@@ -228,31 +259,43 @@ static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
struct switchdev_notifier_fdb_info *fdb_info;
struct sparx5_port *port;
struct sparx5 *sparx5;
+ bool host_addr;
+ u16 vid;
rtnl_lock();
- if (!sparx5_netdevice_check(dev))
- goto out;
-
- port = netdev_priv(dev);
- sparx5 = port->sparx5;
+ if (!sparx5_netdevice_check(dev)) {
+ host_addr = true;
+ sparx5 = switchdev_work->sparx5;
+ } else {
+ host_addr = false;
+ sparx5 = switchdev_work->sparx5;
+ port = netdev_priv(dev);
+ }
fdb_info = &switchdev_work->fdb_info;
+ /* Used PVID 1 when default_pvid is 0, to avoid
+ * collision with non-bridged ports.
+ */
+ if (fdb_info->vid == 0)
+ vid = 1;
+ else
+ vid = fdb_info->vid;
+
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
- if (!fdb_info->added_by_user)
- break;
- sparx5_add_mact_entry(sparx5, port, fdb_info->addr,
- fdb_info->vid);
+ if (host_addr)
+ sparx5_add_mact_entry(sparx5, dev, PGID_CPU,
+ fdb_info->addr, vid);
+ else
+ sparx5_add_mact_entry(sparx5, port->ndev, port->portno,
+ fdb_info->addr, vid);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
- if (!fdb_info->added_by_user)
- break;
- sparx5_del_mact_entry(sparx5, fdb_info->addr, fdb_info->vid);
+ sparx5_del_mact_entry(sparx5, fdb_info->addr, vid);
break;
}
-out:
rtnl_unlock();
kfree(switchdev_work->fdb_info.addr);
kfree(switchdev_work);
@@ -264,15 +307,18 @@ static void sparx5_schedule_work(struct work_struct *work)
queue_work(sparx5_owq, work);
}
-static int sparx5_switchdev_event(struct notifier_block *unused,
+static int sparx5_switchdev_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
struct sparx5_switchdev_event_work *switchdev_work;
struct switchdev_notifier_fdb_info *fdb_info;
struct switchdev_notifier_info *info = ptr;
+ struct sparx5 *spx5;
int err;
+ spx5 = container_of(nb, struct sparx5, switchdev_nb);
+
switch (event) {
case SWITCHDEV_PORT_ATTR_SET:
err = switchdev_handle_port_attr_set(dev, ptr,
@@ -288,6 +334,7 @@ static int sparx5_switchdev_event(struct notifier_block *unused,
switchdev_work->dev = dev;
switchdev_work->event = event;
+ switchdev_work->sparx5 = spx5;
fdb_info = container_of(info,
struct switchdev_notifier_fdb_info,
@@ -314,77 +361,132 @@ err_addr_alloc:
return NOTIFY_BAD;
}
-static void sparx5_sync_port_dev_addr(struct sparx5 *sparx5,
- struct sparx5_port *port,
- u16 vid, bool add)
+static int sparx5_handle_port_vlan_add(struct net_device *dev,
+ struct notifier_block *nb,
+ const struct switchdev_obj_port_vlan *v)
{
- if (!port ||
- !test_bit(port->portno, sparx5->bridge_mask))
- return; /* Skip null/host interfaces */
-
- /* Bridge connects to vid? */
- if (add) {
- /* Add port MAC address from the VLAN */
- sparx5_mact_learn(sparx5, PGID_CPU,
- port->ndev->dev_addr, vid);
- } else {
- /* Control port addr visibility depending on
- * port VLAN connectivity.
- */
- if (test_bit(port->portno, sparx5->vlan_mask[vid]))
- sparx5_mact_learn(sparx5, PGID_CPU,
- port->ndev->dev_addr, vid);
- else
- sparx5_mact_forget(sparx5,
- port->ndev->dev_addr, vid);
+ struct sparx5_port *port = netdev_priv(dev);
+
+ if (netif_is_bridge_master(dev)) {
+ struct sparx5 *sparx5 =
+ container_of(nb, struct sparx5,
+ switchdev_blocking_nb);
+
+ /* Flood broadcast to CPU */
+ sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
+ v->vid);
+ return 0;
}
+
+ if (!sparx5_netdevice_check(dev))
+ return -EOPNOTSUPP;
+
+ return sparx5_vlan_vid_add(port, v->vid,
+ v->flags & BRIDGE_VLAN_INFO_PVID,
+ v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
}
-static void sparx5_sync_bridge_dev_addr(struct net_device *dev,
- struct sparx5 *sparx5,
- u16 vid, bool add)
+static int sparx5_handle_port_mdb_add(struct net_device *dev,
+ struct notifier_block *nb,
+ const struct switchdev_obj_port_mdb *v)
{
- int i;
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *spx5 = port->sparx5;
+ u16 pgid_idx, vid;
+ u32 mact_entry;
+ int res, err;
- /* First, handle bridge address'es */
- if (add) {
- sparx5_mact_learn(sparx5, PGID_CPU, dev->dev_addr,
- vid);
- sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
- vid);
+ /* When VLAN unaware the vlan value is not parsed and we receive vid 0.
+ * Fall back to bridge vid 1.
+ */
+ if (!br_vlan_enabled(spx5->hw_bridge_dev))
+ vid = 1;
+ else
+ vid = v->vid;
+
+ res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
+
+ if (res) {
+ pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
+
+ /* MC_IDX has an offset of 65 in the PGID table. */
+ pgid_idx += PGID_MCAST_START;
+ sparx5_pgid_update_mask(port, pgid_idx, true);
} else {
- sparx5_mact_forget(sparx5, dev->dev_addr, vid);
- sparx5_mact_forget(sparx5, dev->broadcast, vid);
+ err = sparx5_pgid_alloc_mcast(spx5, &pgid_idx);
+ if (err) {
+ netdev_warn(dev, "multicast pgid table full\n");
+ return err;
+ }
+ sparx5_pgid_update_mask(port, pgid_idx, true);
+ err = sparx5_mact_learn(spx5, pgid_idx, v->addr, vid);
+ if (err) {
+ netdev_warn(dev, "could not learn mac address %pM\n", v->addr);
+ sparx5_pgid_update_mask(port, pgid_idx, false);
+ return err;
+ }
}
- /* Now look at bridged ports */
- for (i = 0; i < SPX5_PORTS; i++)
- sparx5_sync_port_dev_addr(sparx5, sparx5->ports[i], vid, add);
+ return 0;
}
-static int sparx5_handle_port_vlan_add(struct net_device *dev,
- struct notifier_block *nb,
- const struct switchdev_obj_port_vlan *v)
+static int sparx5_mdb_del_entry(struct net_device *dev,
+ struct sparx5 *spx5,
+ const unsigned char mac[ETH_ALEN],
+ const u16 vid,
+ u16 pgid_idx)
+{
+ int err;
+
+ err = sparx5_mact_forget(spx5, mac, vid);
+ if (err) {
+ netdev_warn(dev, "could not forget mac address %pM", mac);
+ return err;
+ }
+ err = sparx5_pgid_free(spx5, pgid_idx);
+ if (err) {
+ netdev_err(dev, "attempted to free already freed pgid\n");
+ return err;
+ }
+ return 0;
+}
+
+static int sparx5_handle_port_mdb_del(struct net_device *dev,
+ struct notifier_block *nb,
+ const struct switchdev_obj_port_mdb *v)
{
struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *spx5 = port->sparx5;
+ u16 pgid_idx, vid;
+ u32 mact_entry, res, pgid_entry[3];
+ int err;
- if (netif_is_bridge_master(dev)) {
- if (v->flags & BRIDGE_VLAN_INFO_BRENTRY) {
- struct sparx5 *sparx5 =
- container_of(nb, struct sparx5,
- switchdev_blocking_nb);
+ if (!br_vlan_enabled(spx5->hw_bridge_dev))
+ vid = 1;
+ else
+ vid = v->vid;
- sparx5_sync_bridge_dev_addr(dev, sparx5, v->vid, true);
+ res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
+
+ if (res) {
+ pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
+
+ /* MC_IDX has an offset of 65 in the PGID table. */
+ pgid_idx += PGID_MCAST_START;
+ sparx5_pgid_update_mask(port, pgid_idx, false);
+
+ pgid_entry[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid_idx));
+ pgid_entry[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid_idx));
+ pgid_entry[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid_idx));
+ if (pgid_entry[0] == 0 && pgid_entry[1] == 0 && pgid_entry[2] == 0) {
+ /* No ports are in MC group. Remove entry */
+ err = sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx);
+ if (err)
+ return err;
}
- return 0;
}
- if (!sparx5_netdevice_check(dev))
- return -EOPNOTSUPP;
-
- return sparx5_vlan_vid_add(port, v->vid,
- v->flags & BRIDGE_VLAN_INFO_PVID,
- v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ return 0;
}
static int sparx5_handle_port_obj_add(struct net_device *dev,
@@ -399,6 +501,10 @@ static int sparx5_handle_port_obj_add(struct net_device *dev,
err = sparx5_handle_port_vlan_add(dev, nb,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = sparx5_handle_port_mdb_add(dev, nb,
+ SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -421,7 +527,7 @@ static int sparx5_handle_port_vlan_del(struct net_device *dev,
container_of(nb, struct sparx5,
switchdev_blocking_nb);
- sparx5_sync_bridge_dev_addr(dev, sparx5, vid, false);
+ sparx5_mact_forget(sparx5, dev->broadcast, vid);
return 0;
}
@@ -432,9 +538,6 @@ static int sparx5_handle_port_vlan_del(struct net_device *dev,
if (ret)
return ret;
- /* Delete the port MAC address with the matching VLAN information */
- sparx5_mact_forget(port->sparx5, port->ndev->dev_addr, vid);
-
return 0;
}
@@ -450,6 +553,10 @@ static int sparx5_handle_port_obj_del(struct net_device *dev,
err = sparx5_handle_port_vlan_del(dev, nb,
SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = sparx5_handle_port_mdb_del(dev, nb,
+ SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
default:
err = -EOPNOTSUPP;
break;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
index 4ce490a25f33..8e56ffa1c4f7 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
@@ -58,16 +58,6 @@ int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
struct sparx5 *sparx5 = port->sparx5;
int ret;
- /* Make the port a member of the VLAN */
- set_bit(port->portno, sparx5->vlan_mask[vid]);
- ret = sparx5_vlant_set_mask(sparx5, vid);
- if (ret)
- return ret;
-
- /* Default ingress vlan classification */
- if (pvid)
- port->pvid = vid;
-
/* Untagged egress vlan classification */
if (untagged && port->vid != vid) {
if (port->vid) {
@@ -79,6 +69,16 @@ int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
port->vid = vid;
}
+ /* Make the port a member of the VLAN */
+ set_bit(port->portno, sparx5->vlan_mask[vid]);
+ ret = sparx5_vlant_set_mask(sparx5, vid);
+ if (ret)
+ return ret;
+
+ /* Default ingress vlan classification */
+ if (pvid)
+ port->pvid = vid;
+
sparx5_vlan_port_apply(sparx5, port);
return 0;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 636dfef24a6c..49b85ca578b0 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -663,7 +663,7 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
struct gdma_context *gc = gd->gdma_context;
struct hw_channel_context *hwc;
u32 length = gmi->length;
- u32 req_msg_size;
+ size_t req_msg_size;
int err;
int i;
@@ -674,7 +674,7 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
return -EINVAL;
hwc = gc->hwc.driver_data;
- req_msg_size = sizeof(*req) + num_page * sizeof(u64);
+ req_msg_size = struct_size(req, page_addr_list, num_page);
if (req_msg_size > hwc->max_req_msg_size)
return -EINVAL;
diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h
index 9a12607fb511..d36405af9432 100644
--- a/drivers/net/ethernet/microsoft/mana/mana.h
+++ b/drivers/net/ethernet/microsoft/mana/mana.h
@@ -48,7 +48,15 @@ enum TRI_STATE {
#define MAX_PORTS_IN_MANA_DEV 256
-struct mana_stats {
+struct mana_stats_rx {
+ u64 packets;
+ u64 bytes;
+ u64 xdp_drop;
+ u64 xdp_tx;
+ struct u64_stats_sync syncp;
+};
+
+struct mana_stats_tx {
u64 packets;
u64 bytes;
struct u64_stats_sync syncp;
@@ -76,7 +84,7 @@ struct mana_txq {
atomic_t pending_sends;
- struct mana_stats stats;
+ struct mana_stats_tx stats;
};
/* skb data and frags dma mappings */
@@ -298,10 +306,11 @@ struct mana_rxq {
u32 buf_index;
- struct mana_stats stats;
+ struct mana_stats_rx stats;
struct bpf_prog __rcu *bpf_prog;
struct xdp_rxq_info xdp_rxq;
+ struct page *xdp_save_page;
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 498d0f999275..b7d3ba1b4d17 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -136,7 +136,7 @@ int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
bool ipv4 = false, ipv6 = false;
struct mana_tx_package pkg = {};
struct netdev_queue *net_txq;
- struct mana_stats *tx_stats;
+ struct mana_stats_tx *tx_stats;
struct gdma_queue *gdma_sq;
unsigned int csum_type;
struct mana_txq *txq;
@@ -299,7 +299,8 @@ static void mana_get_stats64(struct net_device *ndev,
{
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
- struct mana_stats *stats;
+ struct mana_stats_rx *rx_stats;
+ struct mana_stats_tx *tx_stats;
unsigned int start;
u64 packets, bytes;
int q;
@@ -310,26 +311,26 @@ static void mana_get_stats64(struct net_device *ndev,
netdev_stats_to_stats64(st, &ndev->stats);
for (q = 0; q < num_queues; q++) {
- stats = &apc->rxqs[q]->stats;
+ rx_stats = &apc->rxqs[q]->stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
st->rx_packets += packets;
st->rx_bytes += bytes;
}
for (q = 0; q < num_queues; q++) {
- stats = &apc->tx_qp[q].txq.stats;
+ tx_stats = &apc->tx_qp[q].txq.stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
st->tx_packets += packets;
st->tx_bytes += bytes;
@@ -986,7 +987,7 @@ static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
struct mana_rxq *rxq)
{
- struct mana_stats *rx_stats = &rxq->stats;
+ struct mana_stats_rx *rx_stats = &rxq->stats;
struct net_device *ndev = rxq->ndev;
uint pkt_len = cqe->ppi[0].pkt_len;
u16 rxq_idx = rxq->rxq_idx;
@@ -1007,7 +1008,7 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
if (act != XDP_PASS && act != XDP_TX)
- goto drop;
+ goto drop_xdp;
skb = mana_build_skb(buf_va, pkt_len, &xdp);
@@ -1034,6 +1035,14 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
}
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->packets++;
+ rx_stats->bytes += pkt_len;
+
+ if (act == XDP_TX)
+ rx_stats->xdp_tx++;
+ u64_stats_update_end(&rx_stats->syncp);
+
if (act == XDP_TX) {
skb_set_queue_mapping(skb, rxq_idx);
mana_xdp_tx(skb, ndev);
@@ -1042,15 +1051,19 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
napi_gro_receive(napi, skb);
+ return;
+
+drop_xdp:
u64_stats_update_begin(&rx_stats->syncp);
- rx_stats->packets++;
- rx_stats->bytes += pkt_len;
+ rx_stats->xdp_drop++;
u64_stats_update_end(&rx_stats->syncp);
- return;
drop:
- free_page((unsigned long)buf_va);
+ WARN_ON_ONCE(rxq->xdp_save_page);
+ rxq->xdp_save_page = virt_to_page(buf_va);
+
++ndev->stats.rx_dropped;
+
return;
}
@@ -1072,8 +1085,10 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
break;
case CQE_RX_TRUNCATED:
- netdev_err(ndev, "Dropped a truncated packet\n");
- return;
+ ++ndev->stats.rx_dropped;
+ rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
+ netdev_warn_once(ndev, "Dropped a truncated packet\n");
+ goto drop;
case CQE_RX_COALESCED_4:
netdev_err(ndev, "RX coalescing is unsupported\n");
@@ -1089,9 +1104,6 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
return;
}
- if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
- return;
-
pktlen = oob->ppi[0].pkt_len;
if (pktlen == 0) {
@@ -1105,7 +1117,13 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
rxbuf_oob = &rxq->rx_oobs[curr];
WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
- new_page = alloc_page(GFP_ATOMIC);
+ /* Reuse XDP dropped page if available */
+ if (rxq->xdp_save_page) {
+ new_page = rxq->xdp_save_page;
+ rxq->xdp_save_page = NULL;
+ } else {
+ new_page = alloc_page(GFP_ATOMIC);
+ }
if (new_page) {
da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
@@ -1135,6 +1153,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
mana_rx_skb(old_buf, oob, rxq);
+drop:
mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
mana_post_pkt_rxq(rxq);
@@ -1392,6 +1411,9 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
mana_deinit_cq(apc, &rxq->rx_cq);
+ if (rxq->xdp_save_page)
+ __free_page(rxq->xdp_save_page);
+
for (i = 0; i < rxq->num_rx_buf; i++) {
rx_oob = &rxq->rx_oobs[i];
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index c3c81ae3fafd..e13f2453eabb 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -23,7 +23,7 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset)
if (stringset != ETH_SS_STATS)
return -EINVAL;
- return ARRAY_SIZE(mana_eth_stats) + num_queues * 4;
+ return ARRAY_SIZE(mana_eth_stats) + num_queues * 6;
}
static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
@@ -46,6 +46,10 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
sprintf(p, "rx_%d_bytes", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_%d_xdp_drop", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_%d_xdp_tx", i);
+ p += ETH_GSTRING_LEN;
}
for (i = 0; i < num_queues; i++) {
@@ -62,9 +66,12 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
void *eth_stats = &apc->eth_stats;
- struct mana_stats *stats;
+ struct mana_stats_rx *rx_stats;
+ struct mana_stats_tx *tx_stats;
unsigned int start;
u64 packets, bytes;
+ u64 xdp_drop;
+ u64 xdp_tx;
int q, i = 0;
if (!apc->port_is_up)
@@ -74,26 +81,30 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
for (q = 0; q < num_queues; q++) {
- stats = &apc->rxqs[q]->stats;
+ rx_stats = &apc->rxqs[q]->stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ xdp_drop = rx_stats->xdp_drop;
+ xdp_tx = rx_stats->xdp_tx;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
+ data[i++] = xdp_drop;
+ data[i++] = xdp_tx;
}
for (q = 0; q < num_queues; q++) {
- stats = &apc->tx_qp[q].txq.stats;
+ tx_stats = &apc->tx_qp[q].txq.stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 15179b9529e1..afb7dcadb8d2 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -510,14 +510,14 @@ static int moxart_mac_probe(struct platform_device *pdev)
}
priv->tx_buf_base = kmalloc_array(priv->tx_buf_size, TX_DESC_NUM,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!priv->tx_buf_base) {
ret = -ENOMEM;
goto init_fail;
}
priv->rx_buf_base = kmalloc_array(priv->rx_buf_size, RX_DESC_NUM,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!priv->rx_buf_base) {
ret = -ENOMEM;
goto init_fail;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index fd3ceb74620d..e443bd8b2d09 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -13,6 +13,7 @@
#define TABLE_UPDATE_SLEEP_US 10
#define TABLE_UPDATE_TIMEOUT_US 100000
+#define OCELOT_RSV_VLAN_RANGE_START 4000
struct ocelot_mact_entry {
u8 mac[ETH_ALEN];
@@ -221,6 +222,35 @@ static void ocelot_vcap_enable(struct ocelot *ocelot, int port)
REW_PORT_CFG, port);
}
+static int ocelot_single_vlan_aware_bridge(struct ocelot *ocelot,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *bridge = NULL;
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port || !ocelot_port->bridge ||
+ !br_vlan_enabled(ocelot_port->bridge))
+ continue;
+
+ if (!bridge) {
+ bridge = ocelot_port->bridge;
+ continue;
+ }
+
+ if (bridge == ocelot_port->bridge)
+ continue;
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one VLAN-aware bridge is supported");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
{
return ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
@@ -347,12 +377,45 @@ static void ocelot_port_manage_port_tag(struct ocelot *ocelot, int port)
}
}
+int ocelot_bridge_num_find(struct ocelot *ocelot,
+ const struct net_device *bridge)
+{
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (ocelot_port && ocelot_port->bridge == bridge)
+ return ocelot_port->bridge_num;
+ }
+
+ return -1;
+}
+EXPORT_SYMBOL_GPL(ocelot_bridge_num_find);
+
+static u16 ocelot_vlan_unaware_pvid(struct ocelot *ocelot,
+ const struct net_device *bridge)
+{
+ int bridge_num;
+
+ /* Standalone ports use VID 0 */
+ if (!bridge)
+ return 0;
+
+ bridge_num = ocelot_bridge_num_find(ocelot, bridge);
+ if (WARN_ON(bridge_num < 0))
+ return 0;
+
+ /* VLAN-unaware bridges use a reserved VID going from 4095 downwards */
+ return VLAN_N_VID - bridge_num - 1;
+}
+
/* Default vlan to clasify for untagged frames (may be zero) */
static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
const struct ocelot_bridge_vlan *pvid_vlan)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- u16 pvid = OCELOT_VLAN_UNAWARE_PVID;
+ u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge);
u32 val = 0;
ocelot_port->pvid_vlan = pvid_vlan;
@@ -466,12 +529,29 @@ static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid)
return 0;
}
+static int ocelot_add_vlan_unaware_pvid(struct ocelot *ocelot, int port,
+ const struct net_device *bridge)
+{
+ u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
+ return ocelot_vlan_member_add(ocelot, port, vid, true);
+}
+
+static int ocelot_del_vlan_unaware_pvid(struct ocelot *ocelot, int port,
+ const struct net_device *bridge)
+{
+ u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
+ return ocelot_vlan_member_del(ocelot, port, vid);
+}
+
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
bool vlan_aware, struct netlink_ext_ack *extack)
{
struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_vcap_filter *filter;
+ int err;
u32 val;
list_for_each_entry(filter, &block->rules, list) {
@@ -483,6 +563,19 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
}
}
+ err = ocelot_single_vlan_aware_bridge(ocelot, extack);
+ if (err)
+ return err;
+
+ if (vlan_aware)
+ err = ocelot_del_vlan_unaware_pvid(ocelot, port,
+ ocelot_port->bridge);
+ else
+ err = ocelot_add_vlan_unaware_pvid(ocelot, port,
+ ocelot_port->bridge);
+ if (err)
+ return err;
+
ocelot_port->vlan_aware = vlan_aware;
if (vlan_aware)
@@ -521,6 +614,12 @@ int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
}
}
+ if (vid > OCELOT_RSV_VLAN_RANGE_START) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN range 4000-4095 reserved for VLAN-unaware bridging");
+ return -EBUSY;
+ }
+
return 0;
}
EXPORT_SYMBOL(ocelot_vlan_prepare);
@@ -584,11 +683,11 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
for (vid = 1; vid < VLAN_N_VID; vid++)
ocelot_vlant_set_mask(ocelot, vid, 0);
- /* Because VLAN filtering is enabled, we need VID 0 to get untagged
- * traffic. It is added automatically if 8021q module is loaded, but
- * we can't rely on it since module may be not loaded.
+ /* We need VID 0 to get traffic on standalone ports.
+ * It is added automatically if the 8021q module is loaded, but we
+ * can't rely on that since it might not be.
*/
- ocelot_vlant_set_mask(ocelot, OCELOT_VLAN_UNAWARE_PVID, all_ports);
+ ocelot_vlant_set_mask(ocelot, OCELOT_STANDALONE_PVID, all_ports);
/* Set vlan ingress filter mask to all ports but the CPU port by
* default.
@@ -1237,21 +1336,27 @@ void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
}
EXPORT_SYMBOL(ocelot_drain_cpu_queue);
-int ocelot_fdb_add(struct ocelot *ocelot, int port,
- const unsigned char *addr, u16 vid)
+int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr,
+ u16 vid, const struct net_device *bridge)
{
int pgid = port;
if (port == ocelot->npi)
pgid = PGID_CPU;
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED);
}
EXPORT_SYMBOL(ocelot_fdb_add);
-int ocelot_fdb_del(struct ocelot *ocelot, int port,
- const unsigned char *addr, u16 vid)
+int ocelot_fdb_del(struct ocelot *ocelot, int port, const unsigned char *addr,
+ u16 vid, const struct net_device *bridge)
{
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
return ocelot_mact_forget(ocelot, addr, vid);
}
EXPORT_SYMBOL(ocelot_fdb_del);
@@ -1413,6 +1518,12 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port,
is_static = (entry.type == ENTRYTYPE_LOCKED);
+ /* Hide the reserved VLANs used for
+ * VLAN-unaware bridging.
+ */
+ if (entry.vid > OCELOT_RSV_VLAN_RANGE_START)
+ entry.vid = 0;
+
err = cb(entry.mac, entry.vid, is_static, data);
if (err)
break;
@@ -1472,9 +1583,9 @@ ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
trap->key.ipv6.dport.mask = 0xffff;
}
-static int ocelot_trap_add(struct ocelot *ocelot, int port,
- unsigned long cookie,
- void (*populate)(struct ocelot_vcap_filter *f))
+int ocelot_trap_add(struct ocelot *ocelot, int port,
+ unsigned long cookie, bool take_ts,
+ void (*populate)(struct ocelot_vcap_filter *f))
{
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *trap;
@@ -1500,6 +1611,8 @@ static int ocelot_trap_add(struct ocelot *ocelot, int port,
trap->action.cpu_copy_ena = true;
trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
trap->action.port_mask = 0;
+ trap->take_ts = take_ts;
+ list_add_tail(&trap->trap_list, &ocelot->traps);
new = true;
}
@@ -1511,16 +1624,17 @@ static int ocelot_trap_add(struct ocelot *ocelot, int port,
err = ocelot_vcap_filter_replace(ocelot, trap);
if (err) {
trap->ingress_port_mask &= ~BIT(port);
- if (!trap->ingress_port_mask)
+ if (!trap->ingress_port_mask) {
+ list_del(&trap->trap_list);
kfree(trap);
+ }
return err;
}
return 0;
}
-static int ocelot_trap_del(struct ocelot *ocelot, int port,
- unsigned long cookie)
+int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie)
{
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *trap;
@@ -1533,39 +1647,42 @@ static int ocelot_trap_del(struct ocelot *ocelot, int port,
return 0;
trap->ingress_port_mask &= ~BIT(port);
- if (!trap->ingress_port_mask)
+ if (!trap->ingress_port_mask) {
+ list_del(&trap->trap_list);
+
return ocelot_vcap_filter_del(ocelot, trap);
+ }
return ocelot_vcap_filter_replace(ocelot, trap);
}
static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port)
{
- unsigned long l2_cookie = ocelot->num_phys_ports + 1;
+ unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
- return ocelot_trap_add(ocelot, port, l2_cookie,
+ return ocelot_trap_add(ocelot, port, l2_cookie, true,
ocelot_populate_l2_ptp_trap_key);
}
static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port)
{
- unsigned long l2_cookie = ocelot->num_phys_ports + 1;
+ unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
return ocelot_trap_del(ocelot, port, l2_cookie);
}
static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
{
- unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2;
- unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3;
+ unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
int err;
- err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie,
+ err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, true,
ocelot_populate_ipv4_ptp_event_trap_key);
if (err)
return err;
- err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie,
+ err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, false,
ocelot_populate_ipv4_ptp_general_trap_key);
if (err)
ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
@@ -1575,8 +1692,8 @@ static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
{
- unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2;
- unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3;
+ unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
int err;
err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
@@ -1586,16 +1703,16 @@ static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
{
- unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4;
- unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5;
+ unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
int err;
- err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie,
+ err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, true,
ocelot_populate_ipv6_ptp_event_trap_key);
if (err)
return err;
- err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie,
+ err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, false,
ocelot_populate_ipv6_ptp_general_trap_key);
if (err)
ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
@@ -1605,8 +1722,8 @@ static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port)
{
- unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4;
- unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5;
+ unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
int err;
err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
@@ -1750,28 +1867,36 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
EXPORT_SYMBOL(ocelot_get_strings);
/* Caller must hold &ocelot->stats_lock */
-static void ocelot_update_stats(struct ocelot *ocelot)
+static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
{
- int i, j;
+ unsigned int idx = port * ocelot->num_stats;
+ struct ocelot_stats_region *region;
+ int err, j;
- for (i = 0; i < ocelot->num_phys_ports; i++) {
- /* Configure the port to read the stats from */
- ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG);
+ /* Configure the port to read the stats from */
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG);
- for (j = 0; j < ocelot->num_stats; j++) {
- u32 val;
- unsigned int idx = i * ocelot->num_stats + j;
+ list_for_each_entry(region, &ocelot->stats_regions, node) {
+ err = ocelot_bulk_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
+ region->offset, region->buf,
+ region->count);
+ if (err)
+ return err;
- val = ocelot_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
- ocelot->stats_layout[j].offset);
+ for (j = 0; j < region->count; j++) {
+ u64 *stat = &ocelot->stats[idx + j];
+ u64 val = region->buf[j];
- if (val < (ocelot->stats[idx] & U32_MAX))
- ocelot->stats[idx] += (u64)1 << 32;
+ if (val < (*stat & U32_MAX))
+ *stat += (u64)1 << 32;
- ocelot->stats[idx] = (ocelot->stats[idx] &
- ~(u64)U32_MAX) + val;
+ *stat = (*stat & ~(u64)U32_MAX) + val;
}
+
+ idx += region->count;
}
+
+ return err;
}
static void ocelot_check_stats_work(struct work_struct *work)
@@ -1779,29 +1904,40 @@ static void ocelot_check_stats_work(struct work_struct *work)
struct delayed_work *del_work = to_delayed_work(work);
struct ocelot *ocelot = container_of(del_work, struct ocelot,
stats_work);
+ int i, err;
mutex_lock(&ocelot->stats_lock);
- ocelot_update_stats(ocelot);
+ for (i = 0; i < ocelot->num_phys_ports; i++) {
+ err = ocelot_port_update_stats(ocelot, i);
+ if (err)
+ break;
+ }
mutex_unlock(&ocelot->stats_lock);
+ if (err)
+ dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
+
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
}
void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
{
- int i;
+ int i, err;
mutex_lock(&ocelot->stats_lock);
/* check and update now */
- ocelot_update_stats(ocelot);
+ err = ocelot_port_update_stats(ocelot, port);
/* Copy all counters */
for (i = 0; i < ocelot->num_stats; i++)
*data++ = ocelot->stats[port * ocelot->num_stats + i];
mutex_unlock(&ocelot->stats_lock);
+
+ if (err)
+ dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
}
EXPORT_SYMBOL(ocelot_get_ethtool_stats);
@@ -1814,6 +1950,41 @@ int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
}
EXPORT_SYMBOL(ocelot_get_sset_count);
+static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
+{
+ struct ocelot_stats_region *region = NULL;
+ unsigned int last;
+ int i;
+
+ INIT_LIST_HEAD(&ocelot->stats_regions);
+
+ for (i = 0; i < ocelot->num_stats; i++) {
+ if (region && ocelot->stats_layout[i].offset == last + 1) {
+ region->count++;
+ } else {
+ region = devm_kzalloc(ocelot->dev, sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->offset = ocelot->stats_layout[i].offset;
+ region->count = 1;
+ list_add_tail(&region->node, &ocelot->stats_regions);
+ }
+
+ last = ocelot->stats_layout[i].offset;
+ }
+
+ list_for_each_entry(region, &ocelot->stats_regions, node) {
+ region->buf = devm_kcalloc(ocelot->dev, region->count,
+ sizeof(*region->buf), GFP_KERNEL);
+ if (!region->buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
struct ethtool_ts_info *info)
{
@@ -1847,6 +2018,8 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
u32 mask = 0;
int port;
+ lockdep_assert_held(&ocelot->fwd_domain_lock);
+
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
@@ -1860,6 +2033,19 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
return mask;
}
+/* The logical port number of a LAG is equal to the lowest numbered physical
+ * port ID present in that LAG. It may change if that port ever leaves the LAG.
+ */
+static int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
+{
+ int bond_mask = ocelot_get_bond_mask(ocelot, bond);
+
+ if (!bond_mask)
+ return -ENOENT;
+
+ return __ffs(bond_mask);
+}
+
u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port)
{
struct ocelot_port *ocelot_port = ocelot->ports[src_port];
@@ -1979,6 +2165,28 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
}
EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask);
+void ocelot_port_set_dsa_8021q_cpu(struct ocelot *ocelot, int port)
+{
+ u16 vid;
+
+ ocelot->ports[port]->is_dsa_8021q_cpu = true;
+
+ for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
+ ocelot_vlan_member_add(ocelot, port, vid, true);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_set_dsa_8021q_cpu);
+
+void ocelot_port_unset_dsa_8021q_cpu(struct ocelot *ocelot, int port)
+{
+ u16 vid;
+
+ ocelot->ports[port]->is_dsa_8021q_cpu = false;
+
+ for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
+ ocelot_vlan_member_del(ocelot, port, vid);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_unset_dsa_8021q_cpu);
+
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
@@ -2123,7 +2331,8 @@ static void ocelot_encode_ports_to_mdb(unsigned char *addr,
}
int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ const struct net_device *bridge)
{
unsigned char addr[ETH_ALEN];
struct ocelot_multicast *mc;
@@ -2133,6 +2342,9 @@ int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc) {
/* New entry */
@@ -2179,7 +2391,8 @@ int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
EXPORT_SYMBOL(ocelot_port_mdb_add);
int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ const struct net_device *bridge)
{
unsigned char addr[ETH_ALEN];
struct ocelot_multicast *mc;
@@ -2189,6 +2402,9 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc)
return -ENOENT;
@@ -2222,18 +2438,30 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_port_mdb_del);
-void ocelot_port_bridge_join(struct ocelot *ocelot, int port,
- struct net_device *bridge)
+int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
+ struct net_device *bridge, int bridge_num,
+ struct netlink_ext_ack *extack)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int err;
+
+ err = ocelot_single_vlan_aware_bridge(ocelot, extack);
+ if (err)
+ return err;
mutex_lock(&ocelot->fwd_domain_lock);
ocelot_port->bridge = bridge;
+ ocelot_port->bridge_num = bridge_num;
ocelot_apply_bridge_fwd_mask(ocelot, true);
mutex_unlock(&ocelot->fwd_domain_lock);
+
+ if (br_vlan_enabled(bridge))
+ return 0;
+
+ return ocelot_add_vlan_unaware_pvid(ocelot, port, bridge);
}
EXPORT_SYMBOL(ocelot_port_bridge_join);
@@ -2244,7 +2472,11 @@ void ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
mutex_lock(&ocelot->fwd_domain_lock);
+ if (!br_vlan_enabled(bridge))
+ ocelot_del_vlan_unaware_pvid(ocelot, port, bridge);
+
ocelot_port->bridge = NULL;
+ ocelot_port->bridge_num = -1;
ocelot_port_set_pvid(ocelot, port, NULL);
ocelot_port_manage_port_tag(ocelot, port);
@@ -2353,7 +2585,7 @@ static void ocelot_setup_logical_port_ids(struct ocelot *ocelot)
bond = ocelot_port->bond;
if (bond) {
- int lag = __ffs(ocelot_get_bond_mask(ocelot, bond));
+ int lag = ocelot_bond_get_id(ocelot, bond);
ocelot_rmw_gix(ocelot,
ANA_PORT_PORT_CFG_PORTID_VAL(lag),
@@ -2368,6 +2600,46 @@ static void ocelot_setup_logical_port_ids(struct ocelot *ocelot)
}
}
+/* Documentation for PORTID_VAL says:
+ * Logical port number for front port. If port is not a member of a LLAG,
+ * then PORTID must be set to the physical port number.
+ * If port is a member of a LLAG, then PORTID must be set to the common
+ * PORTID_VAL used for all member ports of the LLAG.
+ * The value must not exceed the number of physical ports on the device.
+ *
+ * This means we have little choice but to migrate FDB entries pointing towards
+ * a logical port when that changes.
+ */
+static void ocelot_migrate_lag_fdbs(struct ocelot *ocelot,
+ struct net_device *bond,
+ int lag)
+{
+ struct ocelot_lag_fdb *fdb;
+ int err;
+
+ lockdep_assert_held(&ocelot->fwd_domain_lock);
+
+ list_for_each_entry(fdb, &ocelot->lag_fdbs, list) {
+ if (fdb->bond != bond)
+ continue;
+
+ err = ocelot_mact_forget(ocelot, fdb->addr, fdb->vid);
+ if (err) {
+ dev_err(ocelot->dev,
+ "failed to delete LAG %s FDB %pM vid %d: %pe\n",
+ bond->name, fdb->addr, fdb->vid, ERR_PTR(err));
+ }
+
+ err = ocelot_mact_learn(ocelot, lag, fdb->addr, fdb->vid,
+ ENTRYTYPE_LOCKED);
+ if (err) {
+ dev_err(ocelot->dev,
+ "failed to migrate LAG %s FDB %pM vid %d: %pe\n",
+ bond->name, fdb->addr, fdb->vid, ERR_PTR(err));
+ }
+ }
+}
+
int ocelot_port_lag_join(struct ocelot *ocelot, int port,
struct net_device *bond,
struct netdev_lag_upper_info *info)
@@ -2392,14 +2664,23 @@ EXPORT_SYMBOL(ocelot_port_lag_join);
void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
struct net_device *bond)
{
+ int old_lag_id, new_lag_id;
+
mutex_lock(&ocelot->fwd_domain_lock);
+ old_lag_id = ocelot_bond_get_id(ocelot, bond);
+
ocelot->ports[port]->bond = NULL;
ocelot_setup_logical_port_ids(ocelot);
ocelot_apply_bridge_fwd_mask(ocelot, false);
ocelot_set_aggr_pgids(ocelot);
+ new_lag_id = ocelot_bond_get_id(ocelot, bond);
+
+ if (new_lag_id >= 0 && old_lag_id != new_lag_id)
+ ocelot_migrate_lag_fdbs(ocelot, bond, new_lag_id);
+
mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_lag_leave);
@@ -2408,13 +2689,83 @@ void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot_port->lag_tx_active = lag_tx_active;
/* Rebalance the LAGs */
ocelot_set_aggr_pgids(ocelot);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_lag_change);
+int ocelot_lag_fdb_add(struct ocelot *ocelot, struct net_device *bond,
+ const unsigned char *addr, u16 vid,
+ const struct net_device *bridge)
+{
+ struct ocelot_lag_fdb *fdb;
+ int lag, err;
+
+ fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
+ if (!fdb)
+ return -ENOMEM;
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
+ ether_addr_copy(fdb->addr, addr);
+ fdb->vid = vid;
+ fdb->bond = bond;
+
+ lag = ocelot_bond_get_id(ocelot, bond);
+
+ err = ocelot_mact_learn(ocelot, lag, addr, vid, ENTRYTYPE_LOCKED);
+ if (err) {
+ mutex_unlock(&ocelot->fwd_domain_lock);
+ kfree(fdb);
+ return err;
+ }
+
+ list_add_tail(&fdb->list, &ocelot->lag_fdbs);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_lag_fdb_add);
+
+int ocelot_lag_fdb_del(struct ocelot *ocelot, struct net_device *bond,
+ const unsigned char *addr, u16 vid,
+ const struct net_device *bridge)
+{
+ struct ocelot_lag_fdb *fdb, *tmp;
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
+ list_for_each_entry_safe(fdb, tmp, &ocelot->lag_fdbs, list) {
+ if (!ether_addr_equal(fdb->addr, addr) || fdb->vid != vid ||
+ fdb->bond != bond)
+ continue;
+
+ ocelot_mact_forget(ocelot, addr, vid);
+ list_del(&fdb->list);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+ kfree(fdb);
+
+ return 0;
+ }
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(ocelot_lag_fdb_del);
+
/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
* The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
* In the special case that it's the NPI port that we're configuring, the
@@ -2535,6 +2886,9 @@ EXPORT_SYMBOL(ocelot_port_pre_bridge_flags);
void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
struct switchdev_brport_flags flags)
{
+ if (port == ocelot->npi)
+ port = ocelot->num_phys_ports;
+
if (flags.mask & BR_LEARNING)
ocelot_port_set_learning(ocelot, port,
!!(flags.val & BR_LEARNING));
@@ -2553,6 +2907,198 @@ void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_port_bridge_flags);
+int ocelot_port_get_default_prio(struct ocelot *ocelot, int port)
+{
+ int val = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
+
+ return ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(val);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_default_prio);
+
+int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio)
+{
+ if (prio >= OCELOT_NUM_TC)
+ return -ERANGE;
+
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(prio),
+ ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M,
+ ANA_PORT_QOS_CFG,
+ port);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio);
+
+int ocelot_port_get_dscp_prio(struct ocelot *ocelot, int port, u8 dscp)
+{
+ int qos_cfg = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
+ int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
+
+ /* Return error if DSCP prioritization isn't enabled */
+ if (!(qos_cfg & ANA_PORT_QOS_CFG_QOS_DSCP_ENA))
+ return -EOPNOTSUPP;
+
+ if (qos_cfg & ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA) {
+ dscp = ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(dscp_cfg);
+ /* Re-read ANA_DSCP_CFG for the translated DSCP */
+ dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
+ }
+
+ /* If the DSCP value is not trusted, the QoS classification falls back
+ * to VLAN PCP or port-based default.
+ */
+ if (!(dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA))
+ return -EOPNOTSUPP;
+
+ return ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_dscp_prio);
+
+int ocelot_port_add_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio)
+{
+ int mask, val;
+
+ if (prio >= OCELOT_NUM_TC)
+ return -ERANGE;
+
+ /* There is at least one app table priority (this one), so we need to
+ * make sure DSCP prioritization is enabled on the port.
+ * Also make sure DSCP translation is disabled
+ * (dcbnl doesn't support it).
+ */
+ mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA |
+ ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA;
+
+ ocelot_rmw_gix(ocelot, ANA_PORT_QOS_CFG_QOS_DSCP_ENA, mask,
+ ANA_PORT_QOS_CFG, port);
+
+ /* Trust this DSCP value and map it to the given QoS class */
+ val = ANA_DSCP_CFG_DSCP_TRUST_ENA | ANA_DSCP_CFG_QOS_DSCP_VAL(prio);
+
+ ocelot_write_rix(ocelot, val, ANA_DSCP_CFG, dscp);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_add_dscp_prio);
+
+int ocelot_port_del_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio)
+{
+ int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
+ int mask, i;
+
+ /* During a "dcb app replace" command, the new app table entry will be
+ * added first, then the old one will be deleted. But the hardware only
+ * supports one QoS class per DSCP value (duh), so if we blindly delete
+ * the app table entry for this DSCP value, we end up deleting the
+ * entry with the new priority. Avoid that by checking whether user
+ * space wants to delete the priority which is currently configured, or
+ * something else which is no longer current.
+ */
+ if (ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg) != prio)
+ return 0;
+
+ /* Untrust this DSCP value */
+ ocelot_write_rix(ocelot, 0, ANA_DSCP_CFG, dscp);
+
+ for (i = 0; i < 64; i++) {
+ int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, i);
+
+ /* There are still app table entries on the port, so we need to
+ * keep DSCP enabled, nothing to do.
+ */
+ if (dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA)
+ return 0;
+ }
+
+ /* Disable DSCP QoS classification if there isn't any trusted
+ * DSCP value left.
+ */
+ mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA |
+ ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA;
+
+ ocelot_rmw_gix(ocelot, 0, mask, ANA_PORT_QOS_CFG, port);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_del_dscp_prio);
+
+struct ocelot_mirror *ocelot_mirror_get(struct ocelot *ocelot, int to,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_mirror *m = ocelot->mirror;
+
+ if (m) {
+ if (m->to != to) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirroring already configured towards different egress port");
+ return ERR_PTR(-EBUSY);
+ }
+
+ refcount_inc(&m->refcount);
+ return m;
+ }
+
+ m = kzalloc(sizeof(*m), GFP_KERNEL);
+ if (!m)
+ return ERR_PTR(-ENOMEM);
+
+ m->to = to;
+ refcount_set(&m->refcount, 1);
+ ocelot->mirror = m;
+
+ /* Program the mirror port to hardware */
+ ocelot_write(ocelot, BIT(to), ANA_MIRRORPORTS);
+
+ return m;
+}
+
+void ocelot_mirror_put(struct ocelot *ocelot)
+{
+ struct ocelot_mirror *m = ocelot->mirror;
+
+ if (!refcount_dec_and_test(&m->refcount))
+ return;
+
+ ocelot_write(ocelot, 0, ANA_MIRRORPORTS);
+ ocelot->mirror = NULL;
+ kfree(m);
+}
+
+int ocelot_port_mirror_add(struct ocelot *ocelot, int from, int to,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct ocelot_mirror *m = ocelot_mirror_get(ocelot, to, extack);
+
+ if (IS_ERR(m))
+ return PTR_ERR(m);
+
+ if (ingress) {
+ ocelot_rmw_gix(ocelot, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
+ ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
+ ANA_PORT_PORT_CFG, from);
+ } else {
+ ocelot_rmw(ocelot, BIT(from), BIT(from),
+ ANA_EMIRRORPORTS);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_mirror_add);
+
+void ocelot_port_mirror_del(struct ocelot *ocelot, int from, bool ingress)
+{
+ if (ingress) {
+ ocelot_rmw_gix(ocelot, 0, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
+ ANA_PORT_PORT_CFG, from);
+ } else {
+ ocelot_rmw(ocelot, 0, BIT(from), ANA_EMIRRORPORTS);
+ }
+
+ ocelot_mirror_put(ocelot);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_mirror_del);
+
void ocelot_init_port(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
@@ -2647,7 +3193,7 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot)
/* Configure the CPU port to be VLAN aware */
ocelot_write_gix(ocelot,
- ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_VLAN_UNAWARE_PVID) |
+ ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_STANDALONE_PVID) |
ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
ANA_PORT_VLAN_CFG, cpu);
@@ -2709,6 +3255,7 @@ int ocelot_init(struct ocelot *ocelot)
INIT_LIST_HEAD(&ocelot->multicast);
INIT_LIST_HEAD(&ocelot->pgids);
INIT_LIST_HEAD(&ocelot->vlans);
+ INIT_LIST_HEAD(&ocelot->lag_fdbs);
ocelot_detect_features(ocelot);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
@@ -2814,6 +3361,13 @@ int ocelot_init(struct ocelot *ocelot)
ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
ANA_CPUQ_8021_CFG, i);
+ ret = ocelot_prepare_stats_regions(ocelot);
+ if (ret) {
+ destroy_workqueue(ocelot->stats_queue);
+ destroy_workqueue(ocelot->owq);
+ return ret;
+ }
+
INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index bf4eff6d7086..d0fa8ab6cc81 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -21,11 +21,12 @@
#include <soc/mscc/ocelot_dev.h>
#include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot_ptp.h>
+#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot.h>
#include "ocelot_rew.h"
#include "ocelot_qs.h"
-#define OCELOT_VLAN_UNAWARE_PVID 0
+#define OCELOT_STANDALONE_PVID 0
#define OCELOT_BUFFER_CELL_SZ 60
#define OCELOT_STATS_CHECK_DELAY (2 * HZ)
@@ -37,7 +38,8 @@
struct ocelot_port_tc {
bool block_shared;
unsigned long offload_cnt;
-
+ unsigned long ingress_mirred_id;
+ unsigned long egress_mirred_id;
unsigned long police_id;
};
@@ -80,6 +82,9 @@ struct ocelot_multicast {
struct ocelot_pgid *pgid;
};
+int ocelot_bridge_num_find(struct ocelot *ocelot,
+ const struct net_device *bridge);
+
int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
bool is_static, void *data);
int ocelot_mact_learn(struct ocelot *ocelot, int port,
@@ -102,6 +107,15 @@ int ocelot_port_devlink_init(struct ocelot *ocelot, int port,
enum devlink_port_flavour flavour);
void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port);
+int ocelot_trap_add(struct ocelot *ocelot, int port,
+ unsigned long cookie, bool take_ts,
+ void (*populate)(struct ocelot_vcap_filter *f));
+int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie);
+
+struct ocelot_mirror *ocelot_mirror_get(struct ocelot *ocelot, int to,
+ struct netlink_ext_ack *extack);
+void ocelot_mirror_put(struct ocelot *ocelot);
+
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 949858891973..03b5e59d033e 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -6,6 +6,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <soc/mscc/ocelot_vcap.h>
+#include "ocelot_police.h"
#include "ocelot_vcap.h"
/* Arbitrarily chosen constants for encoding the VCAP block and lookup number
@@ -60,6 +61,12 @@ static int ocelot_chain_to_block(int chain, bool ingress)
*/
static int ocelot_chain_to_lookup(int chain)
{
+ /* Backwards compatibility with older, single-chain tc-flower
+ * offload support in Ocelot
+ */
+ if (chain == 0)
+ return 0;
+
return (chain / VCAP_LOOKUP) % 10;
}
@@ -68,7 +75,15 @@ static int ocelot_chain_to_lookup(int chain)
*/
static int ocelot_chain_to_pag(int chain)
{
- int lookup = ocelot_chain_to_lookup(chain);
+ int lookup;
+
+ /* Backwards compatibility with older, single-chain tc-flower
+ * offload support in Ocelot
+ */
+ if (chain == 0)
+ return 0;
+
+ lookup = ocelot_chain_to_lookup(chain);
/* calculate PAG value as chain index relative to the first PAG */
return chain - VCAP_IS2_CHAIN(lookup, 0);
@@ -217,6 +232,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
bool ingress, struct flow_cls_offload *f,
struct ocelot_vcap_filter *filter)
{
+ const struct flow_action *action = &f->rule->action;
struct netlink_ext_ack *extack = f->common.extack;
bool allow_missing_goto_target = false;
const struct flow_action_entry *a;
@@ -244,7 +260,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->goto_target = -1;
filter->type = OCELOT_VCAP_FILTER_DUMMY;
- flow_action_for_each(i, a, &f->rule->action) {
+ flow_action_for_each(i, a, action) {
switch (a->id) {
case FLOW_ACTION_DROP:
if (filter->block_id != VCAP_IS2) {
@@ -279,6 +295,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->action.cpu_copy_ena = true;
filter->action.cpu_qu_num = 0;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ list_add_tail(&filter->trap_list, &ocelot->traps);
break;
case FLOW_ACTION_POLICE:
if (filter->block_id == PSFP_BLOCK_ID) {
@@ -296,11 +313,11 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
- if (a->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+
+ err = ocelot_policer_validate(action, a, extack);
+ if (err)
+ return err;
+
filter->action.police_ena = true;
pol_ix = a->hw_index + ocelot->vcap_pol.base;
@@ -342,6 +359,27 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->action.port_mask = BIT(egress_port);
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
+ case FLOW_ACTION_MIRRED:
+ if (filter->block_id != VCAP_IS2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirror action can only be offloaded to VCAP IS2");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ egress_port = ocelot->ops->netdev_to_port(a->dev);
+ if (egress_port < 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not an ocelot port");
+ return -EOPNOTSUPP;
+ }
+ filter->egress_port.value = egress_port;
+ filter->action.mirror_ena = true;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
case FLOW_ACTION_VLAN_POP:
if (filter->block_id != VCAP_IS1) {
NL_SET_ERR_MSG_MOD(extack,
@@ -840,6 +878,8 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
ret = ocelot_flower_parse(ocelot, port, ingress, f, filter);
if (ret) {
+ if (!list_empty(&filter->trap_list))
+ list_del(&filter->trap_list);
kfree(filter);
return ret;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
index 7390fa3980ec..2067382d0ee1 100644
--- a/drivers/net/ethernet/mscc/ocelot_io.c
+++ b/drivers/net/ethernet/mscc/ocelot_io.c
@@ -10,6 +10,19 @@
#include "ocelot.h"
+int __ocelot_bulk_read_ix(struct ocelot *ocelot, u32 reg, u32 offset, void *buf,
+ int count)
+{
+ u16 target = reg >> TARGET_OFFSET;
+
+ WARN_ON(!target);
+
+ return regmap_bulk_read(ocelot->targets[target],
+ ocelot->map[target][reg & REG_MASK] + offset,
+ buf, count);
+}
+EXPORT_SYMBOL_GPL(__ocelot_bulk_read_ix);
+
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset)
{
u16 target = reg >> TARGET_OFFSET;
diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c
index 1fa58546abdc..3ccec488a304 100644
--- a/drivers/net/ethernet/mscc/ocelot_mrp.c
+++ b/drivers/net/ethernet/mscc/ocelot_mrp.c
@@ -60,7 +60,7 @@ static int ocelot_mrp_redirect_add_vcap(struct ocelot *ocelot, int src_port,
filter->key_type = OCELOT_VCAP_KEY_ETYPE;
filter->prio = 1;
- filter->id.cookie = src_port;
+ filter->id.cookie = OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, src_port);
filter->id.tc_offload = false;
filter->block_id = VCAP_IS2;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -77,55 +77,46 @@ static int ocelot_mrp_redirect_add_vcap(struct ocelot *ocelot, int src_port,
return err;
}
-static int ocelot_mrp_copy_add_vcap(struct ocelot *ocelot, int port,
- int prio, unsigned long cookie)
+static void ocelot_populate_mrp_trap_key(struct ocelot_vcap_filter *filter)
{
const u8 mrp_mask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
- struct ocelot_vcap_filter *filter;
- int err;
-
- filter = kzalloc(sizeof(*filter), GFP_KERNEL);
- if (!filter)
- return -ENOMEM;
- filter->key_type = OCELOT_VCAP_KEY_ETYPE;
- filter->prio = prio;
- filter->id.cookie = cookie;
- filter->id.tc_offload = false;
- filter->block_id = VCAP_IS2;
- filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
- filter->ingress_port_mask = BIT(port);
/* Here is possible to use control or test dmac because the mask
* doesn't cover the LSB
*/
ether_addr_copy(filter->key.etype.dmac.value, mrp_test_dmac);
ether_addr_copy(filter->key.etype.dmac.mask, mrp_mask);
- filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
- filter->action.port_mask = 0x0;
- filter->action.cpu_copy_ena = true;
- filter->action.cpu_qu_num = OCELOT_MRP_CPUQ;
+}
- err = ocelot_vcap_filter_add(ocelot, filter, NULL);
- if (err)
- kfree(filter);
+static int ocelot_mrp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long cookie = OCELOT_VCAP_IS2_MRP_TRAP(ocelot);
- return err;
+ return ocelot_trap_add(ocelot, port, cookie, false,
+ ocelot_populate_mrp_trap_key);
+}
+
+static int ocelot_mrp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long cookie = OCELOT_VCAP_IS2_MRP_TRAP(ocelot);
+
+ return ocelot_trap_del(ocelot, port, cookie);
}
static void ocelot_mrp_save_mac(struct ocelot *ocelot,
struct ocelot_port *port)
{
ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_test_dmac,
- OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
+ OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED);
ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_control_dmac,
- OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
+ OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED);
}
static void ocelot_mrp_del_mac(struct ocelot *ocelot,
struct ocelot_port *port)
{
- ocelot_mact_forget(ocelot, mrp_test_dmac, OCELOT_VLAN_UNAWARE_PVID);
- ocelot_mact_forget(ocelot, mrp_control_dmac, OCELOT_VLAN_UNAWARE_PVID);
+ ocelot_mact_forget(ocelot, mrp_test_dmac, OCELOT_STANDALONE_PVID);
+ ocelot_mact_forget(ocelot, mrp_control_dmac, OCELOT_STANDALONE_PVID);
}
int ocelot_mrp_add(struct ocelot *ocelot, int port,
@@ -186,7 +177,7 @@ int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port,
ocelot_mrp_save_mac(ocelot, ocelot_port);
if (mrp->ring_role != BR_MRP_RING_ROLE_MRC)
- return ocelot_mrp_copy_add_vcap(ocelot, port, 1, port);
+ return ocelot_mrp_trap_add(ocelot, port);
dst_port = ocelot_mrp_find_partner_port(ocelot, ocelot_port);
if (dst_port == -1)
@@ -196,10 +187,10 @@ int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port,
if (err)
return err;
- err = ocelot_mrp_copy_add_vcap(ocelot, port, 2,
- port + ocelot->num_phys_ports);
+ err = ocelot_mrp_trap_add(ocelot, port);
if (err) {
- ocelot_mrp_del_vcap(ocelot, port);
+ ocelot_mrp_del_vcap(ocelot,
+ OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port));
return err;
}
@@ -211,7 +202,7 @@ int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port,
const struct switchdev_obj_ring_role_mrp *mrp)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- int i;
+ int err, i;
if (!ocelot_port)
return -EOPNOTSUPP;
@@ -222,8 +213,11 @@ int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port,
if (ocelot_port->mrp_ring_id != mrp->ring_id)
return 0;
- ocelot_mrp_del_vcap(ocelot, port);
- ocelot_mrp_del_vcap(ocelot, port + ocelot->num_phys_ports);
+ err = ocelot_mrp_trap_del(ocelot, port);
+ if (err)
+ return err;
+
+ ocelot_mrp_del_vcap(ocelot, OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port));
for (i = 0; i < ocelot->num_phys_ports; ++i) {
ocelot_port = ocelot->ports[i];
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index e271b6225b72..247bc105bdd2 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -14,11 +14,14 @@
#include <linux/phy/phy.h>
#include <net/pkt_cls.h>
#include "ocelot.h"
+#include "ocelot_police.h"
#include "ocelot_vcap.h"
#include "ocelot_fdma.h"
#define OCELOT_MAC_QUIRKS OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP
+static bool ocelot_netdevice_dev_check(const struct net_device *dev);
+
static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp)
{
return devlink_priv(dlp->devlink);
@@ -215,14 +218,14 @@ int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
}
}
-static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
- struct tc_cls_matchall_offload *f,
- bool ingress)
+static int ocelot_setup_tc_cls_matchall_police(struct ocelot_port_private *priv,
+ struct tc_cls_matchall_offload *f,
+ bool ingress,
+ struct netlink_ext_ack *extack)
{
- struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_action_entry *action = &f->rule->action.entries[0];
struct ocelot *ocelot = priv->port.ocelot;
struct ocelot_policer pol = { 0 };
- struct flow_action_entry *action;
int port = priv->chip_port;
int err;
@@ -231,6 +234,119 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
return -EOPNOTSUPP;
}
+ if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one policer per port is supported");
+ return -EEXIST;
+ }
+
+ err = ocelot_policer_validate(&f->rule->action, action, extack);
+ if (err)
+ return err;
+
+ pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8;
+ pol.burst = action->police.burst;
+
+ err = ocelot_port_policer_add(ocelot, port, &pol);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not add policer");
+ return err;
+ }
+
+ priv->tc.police_id = f->cookie;
+ priv->tc.offload_cnt++;
+
+ return 0;
+}
+
+static int ocelot_setup_tc_cls_matchall_mirred(struct ocelot_port_private *priv,
+ struct tc_cls_matchall_offload *f,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_action *action = &f->rule->action;
+ struct ocelot *ocelot = priv->port.ocelot;
+ struct ocelot_port_private *other_priv;
+ const struct flow_action_entry *a;
+ int err;
+
+ if (f->common.protocol != htons(ETH_P_ALL))
+ return -EOPNOTSUPP;
+
+ if (!flow_action_basic_hw_stats_check(action, extack))
+ return -EOPNOTSUPP;
+
+ a = &action->entries[0];
+ if (!a->dev)
+ return -EINVAL;
+
+ if (!ocelot_netdevice_dev_check(a->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not an ocelot port");
+ return -EOPNOTSUPP;
+ }
+
+ other_priv = netdev_priv(a->dev);
+
+ err = ocelot_port_mirror_add(ocelot, priv->chip_port,
+ other_priv->chip_port, ingress, extack);
+ if (err)
+ return err;
+
+ if (ingress)
+ priv->tc.ingress_mirred_id = f->cookie;
+ else
+ priv->tc.egress_mirred_id = f->cookie;
+ priv->tc.offload_cnt++;
+
+ return 0;
+}
+
+static int ocelot_del_tc_cls_matchall_police(struct ocelot_port_private *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+ int err;
+
+ err = ocelot_port_policer_del(ocelot, port);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Could not delete policer");
+ return err;
+ }
+
+ priv->tc.police_id = 0;
+ priv->tc.offload_cnt--;
+
+ return 0;
+}
+
+static int ocelot_del_tc_cls_matchall_mirred(struct ocelot_port_private *priv,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ ocelot_port_mirror_del(ocelot, port, ingress);
+
+ if (ingress)
+ priv->tc.ingress_mirred_id = 0;
+ else
+ priv->tc.egress_mirred_id = 0;
+ priv->tc.offload_cnt--;
+
+ return 0;
+}
+
+static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_action_entry *action;
+
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
if (!flow_offload_has_one_action(&f->rule->action)) {
@@ -241,54 +357,41 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
if (priv->tc.block_shared) {
NL_SET_ERR_MSG_MOD(extack,
- "Rate limit is not supported on shared blocks");
+ "Matchall offloads not supported on shared blocks");
return -EOPNOTSUPP;
}
action = &f->rule->action.entries[0];
- if (action->id != FLOW_ACTION_POLICE) {
+ switch (action->id) {
+ case FLOW_ACTION_POLICE:
+ return ocelot_setup_tc_cls_matchall_police(priv, f,
+ ingress,
+ extack);
+ break;
+ case FLOW_ACTION_MIRRED:
+ return ocelot_setup_tc_cls_matchall_mirred(priv, f,
+ ingress,
+ extack);
+ default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
return -EOPNOTSUPP;
}
- if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
- NL_SET_ERR_MSG_MOD(extack,
- "Only one policer per port is supported");
- return -EEXIST;
- }
-
- if (action->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
-
- pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8;
- pol.burst = action->police.burst;
-
- err = ocelot_port_policer_add(ocelot, port, &pol);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Could not add policer");
- return err;
- }
-
- priv->tc.police_id = f->cookie;
- priv->tc.offload_cnt++;
- return 0;
+ break;
case TC_CLSMATCHALL_DESTROY:
- if (priv->tc.police_id != f->cookie)
+ action = &f->rule->action.entries[0];
+
+ if (f->cookie == priv->tc.police_id)
+ return ocelot_del_tc_cls_matchall_police(priv, extack);
+ else if (f->cookie == priv->tc.ingress_mirred_id ||
+ f->cookie == priv->tc.egress_mirred_id)
+ return ocelot_del_tc_cls_matchall_mirred(priv, ingress,
+ extack);
+ else
return -ENOENT;
- err = ocelot_port_policer_del(ocelot, port);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack,
- "Could not delete policer");
- return err;
- }
- priv->tc.police_id = 0;
- priv->tc.offload_cnt--;
- return 0;
+ break;
case TC_CLSMATCHALL_STATS:
default:
return -EOPNOTSUPP;
@@ -419,7 +522,7 @@ static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
* with VLAN filtering feature. We need to keep it to receive
* untagged traffic.
*/
- if (vid == OCELOT_VLAN_UNAWARE_PVID)
+ if (vid == OCELOT_STANDALONE_PVID)
return 0;
ret = ocelot_vlan_del(ocelot, port, vid);
@@ -559,7 +662,7 @@ static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
struct ocelot_mact_work_ctx w;
ether_addr_copy(w.forget.addr, addr);
- w.forget.vid = OCELOT_VLAN_UNAWARE_PVID;
+ w.forget.vid = OCELOT_STANDALONE_PVID;
w.type = OCELOT_MACT_FORGET;
return ocelot_enqueue_mact_action(ocelot, &w);
@@ -573,7 +676,7 @@ static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
struct ocelot_mact_work_ctx w;
ether_addr_copy(w.learn.addr, addr);
- w.learn.vid = OCELOT_VLAN_UNAWARE_PVID;
+ w.learn.vid = OCELOT_STANDALONE_PVID;
w.learn.pgid = PGID_CPU;
w.learn.entry_type = ENTRYTYPE_LOCKED;
w.type = OCELOT_MACT_LEARN;
@@ -608,9 +711,9 @@ static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
/* Learn the new net device MAC address in the mac table. */
ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data,
- OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
+ OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED);
/* Then forget the previous one. */
- ocelot_mact_forget(ocelot, dev->dev_addr, OCELOT_VLAN_UNAWARE_PVID);
+ ocelot_mact_forget(ocelot, dev->dev_addr, OCELOT_STANDALONE_PVID);
eth_hw_addr_set(dev, addr->sa_data);
return 0;
@@ -662,10 +765,11 @@ static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot *ocelot = priv->port.ocelot;
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
- return ocelot_fdb_add(ocelot, port, addr, vid);
+ return ocelot_fdb_add(ocelot, port, addr, vid, ocelot_port->bridge);
}
static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
@@ -673,10 +777,11 @@ static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
const unsigned char *addr, u16 vid)
{
struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot *ocelot = priv->port.ocelot;
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
- return ocelot_fdb_del(ocelot, port, addr, vid);
+ return ocelot_fdb_del(ocelot, port, addr, vid, ocelot_port->bridge);
}
static int ocelot_port_fdb_dump(struct sk_buff *skb,
@@ -988,7 +1093,7 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev,
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
- return ocelot_port_mdb_add(ocelot, port, mdb);
+ return ocelot_port_mdb_add(ocelot, port, mdb, ocelot_port->bridge);
}
static int ocelot_port_obj_del_mdb(struct net_device *dev,
@@ -999,7 +1104,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev,
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
- return ocelot_port_mdb_del(ocelot, port, mdb);
+ return ocelot_port_mdb_del(ocelot, port, mdb, ocelot_port->bridge);
}
static int ocelot_port_obj_mrp_add(struct net_device *dev,
@@ -1173,6 +1278,33 @@ static int ocelot_switchdev_unsync(struct ocelot *ocelot, int port)
return 0;
}
+static int ocelot_bridge_num_get(struct ocelot *ocelot,
+ const struct net_device *bridge_dev)
+{
+ int bridge_num = ocelot_bridge_num_find(ocelot, bridge_dev);
+
+ if (bridge_num < 0) {
+ /* First port that offloads this bridge */
+ bridge_num = find_first_zero_bit(&ocelot->bridges,
+ ocelot->num_phys_ports);
+
+ set_bit(bridge_num, &ocelot->bridges);
+ }
+
+ return bridge_num;
+}
+
+static void ocelot_bridge_num_put(struct ocelot *ocelot,
+ const struct net_device *bridge_dev,
+ int bridge_num)
+{
+ /* Check if the bridge is still in use, otherwise it is time
+ * to clean it up so we can reuse this bridge_num later.
+ */
+ if (!ocelot_bridge_num_find(ocelot, bridge_dev))
+ clear_bit(bridge_num, &ocelot->bridges);
+}
+
static int ocelot_netdevice_bridge_join(struct net_device *dev,
struct net_device *brport_dev,
struct net_device *bridge,
@@ -1182,9 +1314,14 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev,
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
- int err;
+ int bridge_num, err;
+
+ bridge_num = ocelot_bridge_num_get(ocelot, bridge);
- ocelot_port_bridge_join(ocelot, port, bridge);
+ err = ocelot_port_bridge_join(ocelot, port, bridge, bridge_num,
+ extack);
+ if (err)
+ goto err_join;
err = switchdev_bridge_port_offload(brport_dev, dev, priv,
&ocelot_switchdev_nb,
@@ -1205,6 +1342,8 @@ err_switchdev_sync:
&ocelot_switchdev_blocking_nb);
err_switchdev_offload:
ocelot_port_bridge_leave(ocelot, port, bridge);
+err_join:
+ ocelot_bridge_num_put(ocelot, bridge, bridge_num);
return err;
}
@@ -1225,6 +1364,7 @@ static int ocelot_netdevice_bridge_leave(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
+ int bridge_num = ocelot_port->bridge_num;
int port = priv->chip_port;
int err;
@@ -1233,6 +1373,7 @@ static int ocelot_netdevice_bridge_leave(struct net_device *dev,
return err;
ocelot_port_bridge_leave(ocelot, port, bridge);
+ ocelot_bridge_num_put(ocelot, bridge, bridge_num);
return 0;
}
@@ -1700,7 +1841,7 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
eth_hw_addr_gen(dev, ocelot->base_mac, port);
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr,
- OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
+ OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED);
ocelot_init_port(ocelot, port);
diff --git a/drivers/net/ethernet/mscc/ocelot_police.c b/drivers/net/ethernet/mscc/ocelot_police.c
index 6f5068c1041a..a65606bb84a0 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.c
+++ b/drivers/net/ethernet/mscc/ocelot_police.c
@@ -154,6 +154,47 @@ int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
return 0;
}
+int ocelot_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *a,
+ struct netlink_ext_ack *extack)
+{
+ if (a->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (a->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ a->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (a->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, a)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but police action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (a->police.peakrate_bytes_ps ||
+ a->police.avrate || a->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (a->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload does not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_policer_validate);
+
int ocelot_port_policer_add(struct ocelot *ocelot, int port,
struct ocelot_policer *pol)
{
diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h
index 7adb05f71999..7552995f8b17 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.h
+++ b/drivers/net/ethernet/mscc/ocelot_police.h
@@ -8,6 +8,7 @@
#define _MSCC_OCELOT_POLICE_H_
#include "ocelot.h"
+#include <net/flow_offload.h>
enum mscc_qos_rate_mode {
MSCC_QOS_RATE_MODE_DISABLED, /* Policer/shaper disabled */
@@ -33,4 +34,8 @@ struct qos_policer_conf {
int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
struct qos_policer_conf *conf);
+int ocelot_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *a,
+ struct netlink_ext_ack *extack);
+
#endif /* _MSCC_OCELOT_POLICE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index d3544413a8a4..c8701ac955a8 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -335,6 +335,7 @@ static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data,
vcap_action_set(vcap, data, VCAP_IS2_ACT_MASK_MODE, a->mask_mode);
vcap_action_set(vcap, data, VCAP_IS2_ACT_PORT_MASK, a->port_mask);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_MIRROR_ENA, a->mirror_ena);
vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_ENA, a->police_ena);
vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_IDX, a->pol_ix);
vcap_action_set(vcap, data, VCAP_IS2_ACT_CPU_QU_NUM, a->cpu_qu_num);
@@ -564,9 +565,9 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
val = proto.value[0];
msk = proto.mask[0];
type = IS2_TYPE_IP_UDP_TCP;
- if (msk == 0xff && (val == 6 || val == 17)) {
+ if (msk == 0xff && (val == IPPROTO_TCP || val == IPPROTO_UDP)) {
/* UDP/TCP protocol match */
- tcp = (val == 6 ?
+ tcp = (val == IPPROTO_TCP ?
OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_TCP, tcp);
vcap_key_l4_port_set(vcap, &data,
@@ -955,14 +956,21 @@ int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix)
}
EXPORT_SYMBOL(ocelot_vcap_policer_del);
-static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
- struct ocelot_vcap_block *block,
- struct ocelot_vcap_filter *filter)
+static int
+ocelot_vcap_filter_add_aux_resources(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter,
+ struct netlink_ext_ack *extack)
{
- struct ocelot_vcap_filter *tmp;
- struct list_head *pos, *n;
+ struct ocelot_mirror *m;
int ret;
+ if (filter->block_id == VCAP_IS2 && filter->action.mirror_ena) {
+ m = ocelot_mirror_get(ocelot, filter->egress_port.value,
+ extack);
+ if (IS_ERR(m))
+ return PTR_ERR(m);
+ }
+
if (filter->block_id == VCAP_IS2 && filter->action.police_ena) {
ret = ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
&filter->action.pol);
@@ -970,6 +978,33 @@ static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
return ret;
}
+ return 0;
+}
+
+static void
+ocelot_vcap_filter_del_aux_resources(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter)
+{
+ if (filter->block_id == VCAP_IS2 && filter->action.police_ena)
+ ocelot_vcap_policer_del(ocelot, filter->action.pol_ix);
+
+ if (filter->block_id == VCAP_IS2 && filter->action.mirror_ena)
+ ocelot_mirror_put(ocelot);
+}
+
+static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
+ struct ocelot_vcap_block *block,
+ struct ocelot_vcap_filter *filter,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_vcap_filter *tmp;
+ struct list_head *pos, *n;
+ int ret;
+
+ ret = ocelot_vcap_filter_add_aux_resources(ocelot, filter, extack);
+ if (ret)
+ return ret;
+
block->count++;
if (list_empty(&block->rules)) {
@@ -1168,7 +1203,7 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
}
/* Add filter to the linked list */
- ret = ocelot_vcap_filter_add_to_block(ocelot, block, filter);
+ ret = ocelot_vcap_filter_add_to_block(ocelot, block, filter, extack);
if (ret)
return ret;
@@ -1195,18 +1230,12 @@ static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
struct ocelot_vcap_block *block,
struct ocelot_vcap_filter *filter)
{
- struct ocelot_vcap_filter *tmp;
- struct list_head *pos, *q;
+ struct ocelot_vcap_filter *tmp, *n;
- list_for_each_safe(pos, q, &block->rules) {
- tmp = list_entry(pos, struct ocelot_vcap_filter, list);
+ list_for_each_entry_safe(tmp, n, &block->rules, list) {
if (ocelot_vcap_filter_equal(filter, tmp)) {
- if (tmp->block_id == VCAP_IS2 &&
- tmp->action.police_ena)
- ocelot_vcap_policer_del(ocelot,
- tmp->action.pol_ix);
-
- list_del(pos);
+ ocelot_vcap_filter_del_aux_resources(ocelot, tmp);
+ list_del(&tmp->list);
kfree(tmp);
}
}
@@ -1401,6 +1430,7 @@ int ocelot_vcap_init(struct ocelot *ocelot)
}
INIT_LIST_HEAD(&ocelot->dummy_rules);
+ INIT_LIST_HEAD(&ocelot->traps);
INIT_LIST_HEAD(&ocelot->vcap_pol.pol_list);
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 9cff3d48acbc..9c0861d03634 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -5,6 +5,7 @@ nfp-objs := \
nfpcore/nfp6000_pcie.o \
nfpcore/nfp_cppcore.o \
nfpcore/nfp_cpplib.o \
+ nfpcore/nfp_dev.o \
nfpcore/nfp_hwinfo.o \
nfpcore/nfp_mip.o \
nfpcore/nfp_mutex.o \
@@ -19,18 +20,25 @@ nfp-objs := \
ccm_mbox.o \
devlink_param.o \
nfp_asm.o \
+ nfd3/dp.o \
+ nfd3/rings.o \
+ nfd3/xsk.o \
+ nfdk/dp.o \
+ nfdk/rings.o \
nfp_app.o \
nfp_app_nic.o \
nfp_devlink.o \
nfp_hwmon.o \
nfp_main.o \
nfp_net_common.o \
+ nfp_net_dp.o \
nfp_net_ctrl.o \
nfp_net_debugdump.o \
nfp_net_ethtool.o \
nfp_net_main.o \
nfp_net_repr.o \
nfp_net_sriov.o \
+ nfp_net_xsk.o \
nfp_netvf_main.o \
nfp_port.o \
nfp_shared_buf.o \
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index a3242b36e216..1b9421e844a9 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -922,6 +922,51 @@ nfp_fl_pedit(const struct flow_action_entry *act,
}
}
+static struct nfp_fl_meter *nfp_fl_meter(char *act_data)
+{
+ size_t act_size = sizeof(struct nfp_fl_meter);
+ struct nfp_fl_meter *meter_act;
+
+ meter_act = (struct nfp_fl_meter *)act_data;
+
+ memset(meter_act, 0, act_size);
+
+ meter_act->head.jump_id = NFP_FL_ACTION_OPCODE_METER;
+ meter_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ return meter_act;
+}
+
+static int
+nfp_flower_meter_action(struct nfp_app *app,
+ const struct flow_action_entry *action,
+ struct nfp_fl_payload *nfp_fl, int *a_len,
+ struct net_device *netdev,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_fl_meter *fl_meter;
+ u32 meter_id;
+
+ if (*a_len + sizeof(struct nfp_fl_meter) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload:meter action size beyond the allowed maximum");
+ return -EOPNOTSUPP;
+ }
+
+ meter_id = action->hw_index;
+ if (!nfp_flower_search_meter_entry(app, meter_id)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can not offload flow table with unsupported police action.");
+ return -EOPNOTSUPP;
+ }
+
+ fl_meter = nfp_fl_meter(&nfp_fl->action_data[*a_len]);
+ *a_len += sizeof(struct nfp_fl_meter);
+ fl_meter->meter_id = cpu_to_be32(meter_id);
+
+ return 0;
+}
+
static int
nfp_flower_output_action(struct nfp_app *app,
const struct flow_action_entry *act,
@@ -985,6 +1030,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
struct netlink_ext_ack *extack, int act_idx)
{
+ struct nfp_flower_priv *fl_priv = app->priv;
struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_set_tun *set_tun;
struct nfp_fl_push_vlan *psh_v;
@@ -1149,6 +1195,18 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
*pkt_host = true;
break;
+ case FLOW_ACTION_POLICE:
+ if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_METER)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: unsupported police action in action list");
+ return -EOPNOTSUPP;
+ }
+
+ err = nfp_flower_meter_action(app, act, nfp_fl, a_len, netdev,
+ extack);
+ if (err)
+ return err;
+ break;
default:
/* Currently we do not handle any other actions. */
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 1543e47456d5..68e8a2fb1a29 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -85,6 +85,7 @@
#define NFP_FL_ACTION_OPCODE_SET_TCP 15
#define NFP_FL_ACTION_OPCODE_PRE_LAG 16
#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17
+#define NFP_FL_ACTION_OPCODE_METER 24
#define NFP_FL_ACTION_OPCODE_PUSH_GENEVE 26
#define NFP_FL_ACTION_OPCODE_NUM 32
@@ -260,6 +261,12 @@ struct nfp_fl_set_mpls {
__be32 lse;
};
+struct nfp_fl_meter {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be32 meter_id;
+};
+
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index ac1dcfa1d179..4d960a9641b3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -266,7 +266,7 @@ nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
int i, err, count = 0;
reprs = rcu_dereference_protected(app->reprs[type],
- lockdep_is_held(&app->pf->lock));
+ nfp_app_is_locked(app));
if (!reprs)
return 0;
@@ -295,7 +295,7 @@ nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
if (!tot_repl)
return 0;
- lockdep_assert_held(&app->pf->lock);
+ assert_nfp_app_locked(app);
if (!wait_event_timeout(priv->reify_wait_queue,
atomic_read(replies) >= tot_repl,
NFP_FL_REPLY_TIMEOUT)) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 917c450a7aad..fa902ce2dd82 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -12,7 +12,9 @@
#include <linux/rhashtable.h>
#include <linux/time64.h>
#include <linux/types.h>
+#include <net/flow_offload.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tcp.h>
#include <linux/workqueue.h>
#include <linux/idr.h>
@@ -48,6 +50,7 @@ struct nfp_app;
#define NFP_FL_FEATS_IPV6_TUN BIT(7)
#define NFP_FL_FEATS_VLAN_QINQ BIT(8)
#define NFP_FL_FEATS_QOS_PPS BIT(9)
+#define NFP_FL_FEATS_QOS_METER BIT(10)
#define NFP_FL_FEATS_HOST_ACK BIT(31)
#define NFP_FL_ENABLE_FLOW_MERGE BIT(0)
@@ -63,7 +66,8 @@ struct nfp_app;
NFP_FL_FEATS_PRE_TUN_RULES | \
NFP_FL_FEATS_IPV6_TUN | \
NFP_FL_FEATS_VLAN_QINQ | \
- NFP_FL_FEATS_QOS_PPS)
+ NFP_FL_FEATS_QOS_PPS | \
+ NFP_FL_FEATS_QOS_METER)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
@@ -191,6 +195,8 @@ struct nfp_fl_internal_ports {
* @qos_stats_work: Workqueue for qos stats processing
* @qos_rate_limiters: Current active qos rate limiters
* @qos_stats_lock: Lock on qos stats updates
+ * @meter_stats_lock: Lock on meter stats updates
+ * @meter_table: Hash table used to store the meter table
* @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
* @merge_table: Hash table to store merged flows
* @ct_zone_table: Hash table used to store the different zones
@@ -228,6 +234,8 @@ struct nfp_flower_priv {
struct delayed_work qos_stats_work;
unsigned int qos_rate_limiters;
spinlock_t qos_stats_lock; /* Protect the qos stats */
+ struct mutex meter_stats_lock; /* Protect the meter stats */
+ struct rhashtable meter_table;
int pre_tun_rule_cnt;
struct rhashtable merge_table;
struct rhashtable ct_zone_table;
@@ -374,6 +382,31 @@ struct nfp_fl_stats_frame {
__be64 stats_cookie;
};
+struct nfp_meter_stats_entry {
+ u64 pkts;
+ u64 bytes;
+ u64 drops;
+};
+
+struct nfp_meter_entry {
+ struct rhash_head ht_node;
+ u32 meter_id;
+ bool bps;
+ u32 rate;
+ u32 burst;
+ u64 used;
+ struct nfp_meter_stats {
+ u64 update;
+ struct nfp_meter_stats_entry curr;
+ struct nfp_meter_stats_entry prev;
+ } stats;
+};
+
+enum nfp_meter_op {
+ NFP_METER_ADD,
+ NFP_METER_DEL,
+};
+
static inline bool
nfp_flower_internal_port_can_offload(struct nfp_app *app,
struct net_device *netdev)
@@ -569,4 +602,18 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
void
nfp_flower_update_merge_stats(struct nfp_app *app,
struct nfp_fl_payload *sub_flow);
+
+int nfp_setup_tc_act_offload(struct nfp_app *app,
+ struct flow_offload_action *fl_act);
+int nfp_init_meter_table(struct nfp_app *app);
+void nfp_flower_stats_meter_request_all(struct nfp_flower_priv *fl_priv);
+void nfp_act_stats_reply(struct nfp_app *app, void *pmsg);
+int nfp_flower_offload_one_police(struct nfp_app *app, bool ingress,
+ bool pps, u32 id, u32 rate, u32 burst);
+int nfp_flower_setup_meter_entry(struct nfp_app *app,
+ const struct flow_action_entry *action,
+ enum nfp_meter_op op,
+ u32 meter_id);
+struct nfp_meter_entry *
+nfp_flower_search_meter_entry(struct nfp_app *app, u32 meter_id);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index f97eff5afd12..92e8ade4854e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1861,6 +1861,20 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str
return 0;
}
+static int
+nfp_setup_tc_no_dev(struct nfp_app *app, enum tc_setup_type type, void *data)
+{
+ if (!data)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_ACT:
+ return nfp_setup_tc_act_offload(app, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
int
nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
@@ -1868,7 +1882,7 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *
void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!netdev)
- return -EOPNOTSUPP;
+ return nfp_setup_tc_no_dev(cb_priv, type, data);
if (!nfp_fl_is_netdev_to_offload(netdev))
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 784c6dbf8bc4..3206ba83b1aa 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -1,7 +1,11 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2019 Netronome Systems, Inc. */
+#include <linux/hash.h>
+#include <linux/hashtable.h>
+#include <linux/jhash.h>
#include <linux/math64.h>
+#include <linux/vmalloc.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
@@ -11,10 +15,14 @@
#define NFP_FL_QOS_UPDATE msecs_to_jiffies(1000)
#define NFP_FL_QOS_PPS BIT(15)
+#define NFP_FL_QOS_METER BIT(10)
struct nfp_police_cfg_head {
__be32 flags_opts;
- __be32 port;
+ union {
+ __be32 meter_id;
+ __be32 port;
+ };
};
enum NFP_FL_QOS_TYPES {
@@ -46,7 +54,15 @@ enum NFP_FL_QOS_TYPES {
* | Committed Information Rate |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* Word[0](FLag options):
- * [15] p(pps) 1 for pps ,0 for bps
+ * [15] p(pps) 1 for pps, 0 for bps
+ *
+ * Meter control message
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-------------------------------+-+---+-----+-+---------+-+---+-+
+ * | Reserved |p| Y |TYPE |E|TSHFV |P| PC|R|
+ * +-------------------------------+-+---+-----+-+---------+-+---+-+
+ * | meter ID |
+ * +-------------------------------+-------------------------------+
*
*/
struct nfp_police_config {
@@ -67,6 +83,74 @@ struct nfp_police_stats_reply {
__be64 drop_pkts;
};
+int nfp_flower_offload_one_police(struct nfp_app *app, bool ingress,
+ bool pps, u32 id, u32 rate, u32 burst)
+{
+ struct nfp_police_config *config;
+ struct sk_buff *skb;
+
+ skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config),
+ NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ config = nfp_flower_cmsg_get_data(skb);
+ memset(config, 0, sizeof(struct nfp_police_config));
+ if (pps)
+ config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS);
+ if (!ingress)
+ config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_METER);
+
+ if (ingress)
+ config->head.port = cpu_to_be32(id);
+ else
+ config->head.meter_id = cpu_to_be32(id);
+
+ config->bkt_tkn_p = cpu_to_be32(burst);
+ config->bkt_tkn_c = cpu_to_be32(burst);
+ config->pbs = cpu_to_be32(burst);
+ config->cbs = cpu_to_be32(burst);
+ config->pir = cpu_to_be32(rate);
+ config->cir = cpu_to_be32(rate);
+ nfp_ctrl_tx(app->ctrl, skb);
+
+ return 0;
+}
+
+static int nfp_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int
nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow,
@@ -77,15 +161,15 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
struct nfp_flower_priv *fl_priv = app->priv;
struct flow_action_entry *action = NULL;
struct nfp_flower_repr_priv *repr_priv;
- struct nfp_police_config *config;
u32 netdev_port_id, i;
struct nfp_repr *repr;
- struct sk_buff *skb;
bool pps_support;
u32 bps_num = 0;
u32 pps_num = 0;
u32 burst;
+ bool pps;
u64 rate;
+ int err;
if (!nfp_netdev_is_nfp_repr(netdev)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
@@ -132,6 +216,11 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
"unsupported offload: qos rate limit offload requires police action");
return -EOPNOTSUPP;
}
+
+ err = nfp_policer_validate(&flow->rule->action, action, extack);
+ if (err)
+ return err;
+
if (action->police.rate_bytes_ps > 0) {
if (bps_num++) {
NL_SET_ERR_MSG_MOD(extack,
@@ -169,23 +258,12 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
}
if (rate != 0) {
- skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
- NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- config = nfp_flower_cmsg_get_data(skb);
- memset(config, 0, sizeof(struct nfp_police_config));
+ pps = false;
if (action->police.rate_pkt_ps > 0)
- config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_PPS);
- config->head.port = cpu_to_be32(netdev_port_id);
- config->bkt_tkn_p = cpu_to_be32(burst);
- config->bkt_tkn_c = cpu_to_be32(burst);
- config->pbs = cpu_to_be32(burst);
- config->cbs = cpu_to_be32(burst);
- config->pir = cpu_to_be32(rate);
- config->cir = cpu_to_be32(rate);
- nfp_ctrl_tx(repr->app->ctrl, skb);
+ pps = true;
+ nfp_flower_offload_one_police(repr->app, true,
+ pps, netdev_port_id,
+ rate, burst);
}
}
repr_priv->qos_table.netdev_port_id = netdev_port_id;
@@ -266,6 +344,9 @@ void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
u32 netdev_port_id;
msg = nfp_flower_cmsg_get_data(skb);
+ if (be32_to_cpu(msg->head.flags_opts) & NFP_FL_QOS_METER)
+ return nfp_act_stats_reply(app, msg);
+
netdev_port_id = be32_to_cpu(msg->head.port);
rcu_read_lock();
netdev = nfp_app_dev_get(app, netdev_port_id, NULL);
@@ -297,7 +378,7 @@ exit_unlock_rcu:
static void
nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
- u32 netdev_port_id)
+ u32 id, bool ingress)
{
struct nfp_police_cfg_head *head;
struct sk_buff *skb;
@@ -308,10 +389,15 @@ nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
GFP_ATOMIC);
if (!skb)
return;
-
head = nfp_flower_cmsg_get_data(skb);
+
memset(head, 0, sizeof(struct nfp_police_cfg_head));
- head->port = cpu_to_be32(netdev_port_id);
+ if (ingress) {
+ head->port = cpu_to_be32(id);
+ } else {
+ head->flags_opts = cpu_to_be32(NFP_FL_QOS_METER);
+ head->meter_id = cpu_to_be32(id);
+ }
nfp_ctrl_tx(fl_priv->app->ctrl, skb);
}
@@ -341,7 +427,8 @@ nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv)
if (!netdev_port_id)
continue;
- nfp_flower_stats_rlim_request(fl_priv, netdev_port_id);
+ nfp_flower_stats_rlim_request(fl_priv,
+ netdev_port_id, true);
}
}
@@ -359,6 +446,8 @@ static void update_stats_cache(struct work_struct *work)
qos_stats_work);
nfp_flower_stats_rlim_request_all(fl_priv);
+ nfp_flower_stats_meter_request_all(fl_priv);
+
schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE);
}
@@ -406,6 +495,9 @@ void nfp_flower_qos_init(struct nfp_app *app)
struct nfp_flower_priv *fl_priv = app->priv;
spin_lock_init(&fl_priv->qos_stats_lock);
+ mutex_init(&fl_priv->meter_stats_lock);
+ nfp_init_meter_table(app);
+
INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache);
}
@@ -441,3 +533,333 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
}
+
+/* offload tc action, currently only for tc police */
+
+static const struct rhashtable_params stats_meter_table_params = {
+ .key_offset = offsetof(struct nfp_meter_entry, meter_id),
+ .head_offset = offsetof(struct nfp_meter_entry, ht_node),
+ .key_len = sizeof(u32),
+};
+
+struct nfp_meter_entry *
+nfp_flower_search_meter_entry(struct nfp_app *app, u32 meter_id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ return rhashtable_lookup_fast(&priv->meter_table, &meter_id,
+ stats_meter_table_params);
+}
+
+static struct nfp_meter_entry *
+nfp_flower_add_meter_entry(struct nfp_app *app, u32 meter_id)
+{
+ struct nfp_meter_entry *meter_entry = NULL;
+ struct nfp_flower_priv *priv = app->priv;
+
+ meter_entry = rhashtable_lookup_fast(&priv->meter_table,
+ &meter_id,
+ stats_meter_table_params);
+ if (meter_entry)
+ return meter_entry;
+
+ meter_entry = kzalloc(sizeof(*meter_entry), GFP_KERNEL);
+ if (!meter_entry)
+ return NULL;
+
+ meter_entry->meter_id = meter_id;
+ meter_entry->used = jiffies;
+ if (rhashtable_insert_fast(&priv->meter_table, &meter_entry->ht_node,
+ stats_meter_table_params)) {
+ kfree(meter_entry);
+ return NULL;
+ }
+
+ priv->qos_rate_limiters++;
+ if (priv->qos_rate_limiters == 1)
+ schedule_delayed_work(&priv->qos_stats_work,
+ NFP_FL_QOS_UPDATE);
+
+ return meter_entry;
+}
+
+static void nfp_flower_del_meter_entry(struct nfp_app *app, u32 meter_id)
+{
+ struct nfp_meter_entry *meter_entry = NULL;
+ struct nfp_flower_priv *priv = app->priv;
+
+ meter_entry = rhashtable_lookup_fast(&priv->meter_table, &meter_id,
+ stats_meter_table_params);
+ if (!meter_entry)
+ return;
+
+ rhashtable_remove_fast(&priv->meter_table,
+ &meter_entry->ht_node,
+ stats_meter_table_params);
+ kfree(meter_entry);
+ priv->qos_rate_limiters--;
+ if (!priv->qos_rate_limiters)
+ cancel_delayed_work_sync(&priv->qos_stats_work);
+}
+
+int nfp_flower_setup_meter_entry(struct nfp_app *app,
+ const struct flow_action_entry *action,
+ enum nfp_meter_op op,
+ u32 meter_id)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_meter_entry *meter_entry = NULL;
+ int err = 0;
+
+ mutex_lock(&fl_priv->meter_stats_lock);
+
+ switch (op) {
+ case NFP_METER_DEL:
+ nfp_flower_del_meter_entry(app, meter_id);
+ goto exit_unlock;
+ case NFP_METER_ADD:
+ meter_entry = nfp_flower_add_meter_entry(app, meter_id);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto exit_unlock;
+ }
+
+ if (!meter_entry) {
+ err = -ENOMEM;
+ goto exit_unlock;
+ }
+
+ if (action->police.rate_bytes_ps > 0) {
+ meter_entry->bps = true;
+ meter_entry->rate = action->police.rate_bytes_ps;
+ meter_entry->burst = action->police.burst;
+ } else {
+ meter_entry->bps = false;
+ meter_entry->rate = action->police.rate_pkt_ps;
+ meter_entry->burst = action->police.burst_pkt;
+ }
+
+exit_unlock:
+ mutex_unlock(&fl_priv->meter_stats_lock);
+ return err;
+}
+
+int nfp_init_meter_table(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ return rhashtable_init(&priv->meter_table, &stats_meter_table_params);
+}
+
+void
+nfp_flower_stats_meter_request_all(struct nfp_flower_priv *fl_priv)
+{
+ struct nfp_meter_entry *meter_entry = NULL;
+ struct rhashtable_iter iter;
+
+ mutex_lock(&fl_priv->meter_stats_lock);
+ rhashtable_walk_enter(&fl_priv->meter_table, &iter);
+ rhashtable_walk_start(&iter);
+
+ while ((meter_entry = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(meter_entry))
+ continue;
+ nfp_flower_stats_rlim_request(fl_priv,
+ meter_entry->meter_id, false);
+ }
+
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+ mutex_unlock(&fl_priv->meter_stats_lock);
+}
+
+static int
+nfp_act_install_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_action_entry *paction = &fl_act->action.entries[0];
+ u32 action_num = fl_act->action.num_entries;
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct flow_action_entry *action = NULL;
+ u32 burst, i, meter_id;
+ bool pps_support, pps;
+ bool add = false;
+ u64 rate;
+
+ pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
+
+ for (i = 0 ; i < action_num; i++) {
+ /*set qos associate data for this interface */
+ action = paction + i;
+ if (action->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: qos rate limit offload requires police action");
+ continue;
+ }
+ if (action->police.rate_bytes_ps > 0) {
+ rate = action->police.rate_bytes_ps;
+ burst = action->police.burst;
+ } else if (action->police.rate_pkt_ps > 0 && pps_support) {
+ rate = action->police.rate_pkt_ps;
+ burst = action->police.burst_pkt;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: unsupported qos rate limit");
+ continue;
+ }
+
+ if (rate != 0) {
+ meter_id = action->hw_index;
+ if (nfp_flower_setup_meter_entry(app, action, NFP_METER_ADD, meter_id))
+ continue;
+
+ pps = false;
+ if (action->police.rate_pkt_ps > 0)
+ pps = true;
+ nfp_flower_offload_one_police(app, false, pps, meter_id,
+ rate, burst);
+ add = true;
+ }
+ }
+
+ return add ? 0 : -EOPNOTSUPP;
+}
+
+static int
+nfp_act_remove_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_meter_entry *meter_entry = NULL;
+ struct nfp_police_config *config;
+ struct sk_buff *skb;
+ u32 meter_id;
+ bool pps;
+
+ /*delete qos associate data for this interface */
+ if (fl_act->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: qos rate limit offload requires police action");
+ return -EOPNOTSUPP;
+ }
+
+ meter_id = fl_act->index;
+ meter_entry = nfp_flower_search_meter_entry(app, meter_id);
+ if (!meter_entry) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "no meter entry when delete the action index.");
+ return -ENOENT;
+ }
+ pps = !meter_entry->bps;
+
+ skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config),
+ NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ config = nfp_flower_cmsg_get_data(skb);
+ memset(config, 0, sizeof(struct nfp_police_config));
+ config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_METER);
+ config->head.meter_id = cpu_to_be32(meter_id);
+ if (pps)
+ config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS);
+
+ nfp_ctrl_tx(app->ctrl, skb);
+ nfp_flower_setup_meter_entry(app, NULL, NFP_METER_DEL, meter_id);
+
+ return 0;
+}
+
+void
+nfp_act_stats_reply(struct nfp_app *app, void *pmsg)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_meter_entry *meter_entry = NULL;
+ struct nfp_police_stats_reply *msg = pmsg;
+ u32 meter_id;
+
+ meter_id = be32_to_cpu(msg->head.meter_id);
+ mutex_lock(&fl_priv->meter_stats_lock);
+
+ meter_entry = nfp_flower_search_meter_entry(app, meter_id);
+ if (!meter_entry)
+ goto exit_unlock;
+
+ meter_entry->stats.curr.pkts = be64_to_cpu(msg->pass_pkts) +
+ be64_to_cpu(msg->drop_pkts);
+ meter_entry->stats.curr.bytes = be64_to_cpu(msg->pass_bytes) +
+ be64_to_cpu(msg->drop_bytes);
+ meter_entry->stats.curr.drops = be64_to_cpu(msg->drop_pkts);
+ if (!meter_entry->stats.update) {
+ meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts;
+ meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes;
+ meter_entry->stats.prev.drops = meter_entry->stats.curr.drops;
+ }
+
+ meter_entry->stats.update = jiffies;
+
+exit_unlock:
+ mutex_unlock(&fl_priv->meter_stats_lock);
+}
+
+static int
+nfp_act_stats_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_meter_entry *meter_entry = NULL;
+ u64 diff_bytes, diff_pkts, diff_drops;
+ int err = 0;
+
+ if (fl_act->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: qos rate limit offload requires police action");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&fl_priv->meter_stats_lock);
+ meter_entry = nfp_flower_search_meter_entry(app, fl_act->index);
+ if (!meter_entry) {
+ err = -ENOENT;
+ goto exit_unlock;
+ }
+ diff_pkts = meter_entry->stats.curr.pkts > meter_entry->stats.prev.pkts ?
+ meter_entry->stats.curr.pkts - meter_entry->stats.prev.pkts : 0;
+ diff_bytes = meter_entry->stats.curr.bytes > meter_entry->stats.prev.bytes ?
+ meter_entry->stats.curr.bytes - meter_entry->stats.prev.bytes : 0;
+ diff_drops = meter_entry->stats.curr.drops > meter_entry->stats.prev.drops ?
+ meter_entry->stats.curr.drops - meter_entry->stats.prev.drops : 0;
+
+ flow_stats_update(&fl_act->stats, diff_bytes, diff_pkts, diff_drops,
+ meter_entry->stats.update,
+ FLOW_ACTION_HW_STATS_DELAYED);
+
+ meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts;
+ meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes;
+ meter_entry->stats.prev.drops = meter_entry->stats.curr.drops;
+
+exit_unlock:
+ mutex_unlock(&fl_priv->meter_stats_lock);
+ return err;
+}
+
+int nfp_setup_tc_act_offload(struct nfp_app *app,
+ struct flow_offload_action *fl_act)
+{
+ struct netlink_ext_ack *extack = fl_act->extack;
+ struct nfp_flower_priv *fl_priv = app->priv;
+
+ if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_METER))
+ return -EOPNOTSUPP;
+
+ switch (fl_act->command) {
+ case FLOW_ACT_REPLACE:
+ return nfp_act_install_actions(app, fl_act, extack);
+ case FLOW_ACT_DESTROY:
+ return nfp_act_remove_actions(app, fl_act, extack);
+ case FLOW_ACT_STATS:
+ return nfp_act_stats_actions(app, fl_act, extack);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index cb43651ea9ba..c71bd555f482 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -356,7 +356,7 @@ __nfp_tun_add_route_to_cache(struct list_head *route_list,
return 0;
}
- entry = kmalloc(sizeof(*entry) + add_len, GFP_ATOMIC);
+ entry = kmalloc(struct_size(entry, ip_add, add_len), GFP_ATOMIC);
if (!entry) {
spin_unlock_bh(list_lock);
return -ENOMEM;
@@ -942,8 +942,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
if (!nfp_mac_idx) {
/* Assign a global index if non-repr or MAC is now shared. */
if (entry || !port) {
- ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
- NFP_MAX_MAC_INDEX, GFP_KERNEL);
+ ida_idx = ida_alloc_max(&priv->tun.mac_off_ids,
+ NFP_MAX_MAC_INDEX, GFP_KERNEL);
if (ida_idx < 0)
return ida_idx;
@@ -998,7 +998,7 @@ err_free_entry:
kfree(entry);
err_free_ida:
if (ida_idx != -1)
- ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
+ ida_free(&priv->tun.mac_off_ids, ida_idx);
return err;
}
@@ -1061,7 +1061,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
}
ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
- ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
+ ida_free(&priv->tun.mac_off_ids, ida_idx);
entry->index = nfp_mac_idx;
return 0;
}
@@ -1081,7 +1081,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
/* If MAC has global ID then extract and free the ida entry. */
if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) {
ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
- ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
+ ida_free(&priv->tun.mac_off_ids, ida_idx);
}
kfree(entry);
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
new file mode 100644
index 000000000000..7db56abaa582
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -0,0 +1,1350 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#include <linux/bpf_trace.h>
+#include <linux/netdevice.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "../nfp_net_xsk.h"
+#include "../crypto/crypto.h"
+#include "../crypto/fw.h"
+#include "nfd3.h"
+
+/* Transmit processing
+ *
+ * One queue controller peripheral queue is used for transmit. The
+ * driver en-queues packets for transmit by advancing the write
+ * pointer. The device indicates that packets have transmitted by
+ * advancing the read pointer. The driver maintains a local copy of
+ * the read and write pointer in @struct nfp_net_tx_ring. The driver
+ * keeps @wr_p in sync with the queue controller write pointer and can
+ * determine how many packets have been transmitted by comparing its
+ * copy of the read pointer @rd_p with the read pointer maintained by
+ * the queue controller peripheral.
+ */
+
+/* Wrappers for deciding when to stop and restart TX queues */
+static int nfp_nfd3_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
+{
+ return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
+}
+
+static int nfp_nfd3_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
+{
+ return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
+}
+
+/**
+ * nfp_nfd3_tx_ring_stop() - stop tx ring
+ * @nd_q: netdev queue
+ * @tx_ring: driver tx queue structure
+ *
+ * Safely stop TX ring. Remember that while we are running .start_xmit()
+ * someone else may be cleaning the TX ring completions so we need to be
+ * extra careful here.
+ */
+static void
+nfp_nfd3_tx_ring_stop(struct netdev_queue *nd_q,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ netif_tx_stop_queue(nd_q);
+
+ /* We can race with the TX completion out of NAPI so recheck */
+ smp_mb();
+ if (unlikely(nfp_nfd3_tx_ring_should_wake(tx_ring)))
+ netif_tx_start_queue(nd_q);
+}
+
+/**
+ * nfp_nfd3_tx_tso() - Set up Tx descriptor for LSO
+ * @r_vec: per-ring structure
+ * @txbuf: Pointer to driver soft TX descriptor
+ * @txd: Pointer to HW TX descriptor
+ * @skb: Pointer to SKB
+ * @md_bytes: Prepend length
+ *
+ * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
+ * Return error on packet header greater than maximum supported LSO header size.
+ */
+static void
+nfp_nfd3_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfd3_tx_buf *txbuf,
+ struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb, u32 md_bytes)
+{
+ u32 l3_offset, l4_offset, hdrlen;
+ u16 mss;
+
+ if (!skb_is_gso(skb))
+ return;
+
+ if (!skb->encapsulation) {
+ l3_offset = skb_network_offset(skb);
+ l4_offset = skb_transport_offset(skb);
+ hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ } else {
+ l3_offset = skb_inner_network_offset(skb);
+ l4_offset = skb_inner_transport_offset(skb);
+ hdrlen = skb_inner_transport_header(skb) - skb->data +
+ inner_tcp_hdrlen(skb);
+ }
+
+ txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
+ txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
+
+ mss = skb_shinfo(skb)->gso_size & NFD3_DESC_TX_MSS_MASK;
+ txd->l3_offset = l3_offset - md_bytes;
+ txd->l4_offset = l4_offset - md_bytes;
+ txd->lso_hdrlen = hdrlen - md_bytes;
+ txd->mss = cpu_to_le16(mss);
+ txd->flags |= NFD3_DESC_TX_LSO;
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_lso++;
+ u64_stats_update_end(&r_vec->tx_sync);
+}
+
+/**
+ * nfp_nfd3_tx_csum() - Set TX CSUM offload flags in TX descriptor
+ * @dp: NFP Net data path struct
+ * @r_vec: per-ring structure
+ * @txbuf: Pointer to driver soft TX descriptor
+ * @txd: Pointer to TX descriptor
+ * @skb: Pointer to SKB
+ *
+ * This function sets the TX checksum flags in the TX descriptor based
+ * on the configuration and the protocol of the packet to be transmitted.
+ */
+static void
+nfp_nfd3_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_nfd3_tx_buf *txbuf, struct nfp_nfd3_tx_desc *txd,
+ struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h;
+ struct iphdr *iph;
+ u8 l4_hdr;
+
+ if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
+ return;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return;
+
+ txd->flags |= NFD3_DESC_TX_CSUM;
+ if (skb->encapsulation)
+ txd->flags |= NFD3_DESC_TX_ENCAP;
+
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+ if (iph->version == 4) {
+ txd->flags |= NFD3_DESC_TX_IP4_CSUM;
+ l4_hdr = iph->protocol;
+ } else if (ipv6h->version == 6) {
+ l4_hdr = ipv6h->nexthdr;
+ } else {
+ nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
+ return;
+ }
+
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ txd->flags |= NFD3_DESC_TX_TCP_CSUM;
+ break;
+ case IPPROTO_UDP:
+ txd->flags |= NFD3_DESC_TX_UDP_CSUM;
+ break;
+ default:
+ nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
+ return;
+ }
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ if (skb->encapsulation)
+ r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
+ else
+ r_vec->hw_csum_tx += txbuf->pkt_cnt;
+ u64_stats_update_end(&r_vec->tx_sync);
+}
+
+static int nfp_nfd3_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ unsigned char *data;
+ u32 meta_id = 0;
+ int md_bytes;
+
+ if (likely(!md_dst && !tls_handle))
+ return 0;
+ if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
+ if (!tls_handle)
+ return 0;
+ md_dst = NULL;
+ }
+
+ md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
+
+ if (unlikely(skb_cow_head(skb, md_bytes)))
+ return -ENOMEM;
+
+ meta_id = 0;
+ data = skb_push(skb, md_bytes) + md_bytes;
+ if (md_dst) {
+ data -= 4;
+ put_unaligned_be32(md_dst->u.port_info.port_id, data);
+ meta_id = NFP_NET_META_PORTID;
+ }
+ if (tls_handle) {
+ /* conn handle is opaque, we just use u64 to be able to quickly
+ * compare it to zero
+ */
+ data -= 8;
+ memcpy(data, &tls_handle, sizeof(tls_handle));
+ meta_id <<= NFP_NET_META_FIELD_SIZE;
+ meta_id |= NFP_NET_META_CONN_HANDLE;
+ }
+
+ data -= 4;
+ put_unaligned_be32(meta_id, data);
+
+ return md_bytes;
+}
+
+/**
+ * nfp_nfd3_tx() - Main transmit entry point
+ * @skb: SKB to transmit
+ * @netdev: netdev structure
+ *
+ * Return: NETDEV_TX_OK on success.
+ */
+netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int f, nr_frags, wr_idx, md_bytes;
+ struct nfp_net_tx_ring *tx_ring;
+ struct nfp_net_r_vector *r_vec;
+ struct nfp_nfd3_tx_buf *txbuf;
+ struct nfp_nfd3_tx_desc *txd;
+ struct netdev_queue *nd_q;
+ const skb_frag_t *frag;
+ struct nfp_net_dp *dp;
+ dma_addr_t dma_addr;
+ unsigned int fsize;
+ u64 tls_handle = 0;
+ u16 qidx;
+
+ dp = &nn->dp;
+ qidx = skb_get_queue_mapping(skb);
+ tx_ring = &dp->tx_rings[qidx];
+ r_vec = tx_ring->r_vec;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
+ nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
+ qidx, tx_ring->wr_p, tx_ring->rd_p);
+ nd_q = netdev_get_tx_queue(dp->netdev, qidx);
+ netif_tx_stop_queue(nd_q);
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_busy++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ return NETDEV_TX_BUSY;
+ }
+
+ skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
+ if (unlikely(!skb)) {
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ return NETDEV_TX_OK;
+ }
+
+ md_bytes = nfp_nfd3_prep_tx_meta(skb, tls_handle);
+ if (unlikely(md_bytes < 0))
+ goto err_flush;
+
+ /* Start with the head skbuf */
+ dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_dma_err;
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+ /* Stash the soft descriptor of the head then initialize it */
+ txbuf = &tx_ring->txbufs[wr_idx];
+ txbuf->skb = skb;
+ txbuf->dma_addr = dma_addr;
+ txbuf->fidx = -1;
+ txbuf->pkt_cnt = 1;
+ txbuf->real_len = skb->len;
+
+ /* Build TX descriptor */
+ txd = &tx_ring->txds[wr_idx];
+ txd->offset_eop = (nr_frags ? 0 : NFD3_DESC_TX_EOP) | md_bytes;
+ txd->dma_len = cpu_to_le16(skb_headlen(skb));
+ nfp_desc_set_dma_addr(txd, dma_addr);
+ txd->data_len = cpu_to_le16(skb->len);
+
+ txd->flags = 0;
+ txd->mss = 0;
+ txd->lso_hdrlen = 0;
+
+ /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
+ nfp_nfd3_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
+ nfp_nfd3_tx_csum(dp, r_vec, txbuf, txd, skb);
+ if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
+ txd->flags |= NFD3_DESC_TX_VLAN;
+ txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
+ }
+
+ /* Gather DMA */
+ if (nr_frags > 0) {
+ __le64 second_half;
+
+ /* all descs must match except for in addr, length and eop */
+ second_half = txd->vals8[1];
+
+ for (f = 0; f < nr_frags; f++) {
+ frag = &skb_shinfo(skb)->frags[f];
+ fsize = skb_frag_size(frag);
+
+ dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
+ fsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_unmap;
+
+ wr_idx = D_IDX(tx_ring, wr_idx + 1);
+ tx_ring->txbufs[wr_idx].skb = skb;
+ tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
+ tx_ring->txbufs[wr_idx].fidx = f;
+
+ txd = &tx_ring->txds[wr_idx];
+ txd->dma_len = cpu_to_le16(fsize);
+ nfp_desc_set_dma_addr(txd, dma_addr);
+ txd->offset_eop = md_bytes |
+ ((f == nr_frags - 1) ? NFD3_DESC_TX_EOP : 0);
+ txd->vals8[1] = second_half;
+ }
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_gather++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ }
+
+ skb_tx_timestamp(skb);
+
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+
+ tx_ring->wr_p += nr_frags + 1;
+ if (nfp_nfd3_tx_ring_should_stop(tx_ring))
+ nfp_nfd3_tx_ring_stop(nd_q, tx_ring);
+
+ tx_ring->wr_ptr_add += nr_frags + 1;
+ if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
+ nfp_net_tx_xmit_more_flush(tx_ring);
+
+ return NETDEV_TX_OK;
+
+err_unmap:
+ while (--f >= 0) {
+ frag = &skb_shinfo(skb)->frags[f];
+ dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ tx_ring->txbufs[wr_idx].skb = NULL;
+ tx_ring->txbufs[wr_idx].dma_addr = 0;
+ tx_ring->txbufs[wr_idx].fidx = -2;
+ wr_idx = wr_idx - 1;
+ if (wr_idx < 0)
+ wr_idx += tx_ring->cnt;
+ }
+ dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ tx_ring->txbufs[wr_idx].skb = NULL;
+ tx_ring->txbufs[wr_idx].dma_addr = 0;
+ tx_ring->txbufs[wr_idx].fidx = -2;
+err_dma_err:
+ nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+err_flush:
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ nfp_net_tls_tx_undo(skb, tls_handle);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * nfp_nfd3_tx_complete() - Handled completed TX packets
+ * @tx_ring: TX ring structure
+ * @budget: NAPI budget (only used as bool to determine if in NAPI context)
+ */
+void nfp_nfd3_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ u32 done_pkts = 0, done_bytes = 0;
+ struct netdev_queue *nd_q;
+ u32 qcp_rd_p;
+ int todo;
+
+ if (tx_ring->wr_p == tx_ring->rd_p)
+ return;
+
+ /* Work out how many descriptors have been transmitted */
+ qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
+
+ if (qcp_rd_p == tx_ring->qcp_rd_p)
+ return;
+
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+ while (todo--) {
+ const skb_frag_t *frag;
+ struct nfp_nfd3_tx_buf *tx_buf;
+ struct sk_buff *skb;
+ int fidx, nr_frags;
+ int idx;
+
+ idx = D_IDX(tx_ring, tx_ring->rd_p++);
+ tx_buf = &tx_ring->txbufs[idx];
+
+ skb = tx_buf->skb;
+ if (!skb)
+ continue;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ fidx = tx_buf->fidx;
+
+ if (fidx == -1) {
+ /* unmap head */
+ dma_unmap_single(dp->dev, tx_buf->dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ done_pkts += tx_buf->pkt_cnt;
+ done_bytes += tx_buf->real_len;
+ } else {
+ /* unmap fragment */
+ frag = &skb_shinfo(skb)->frags[fidx];
+ dma_unmap_page(dp->dev, tx_buf->dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ }
+
+ /* check for last gather fragment */
+ if (fidx == nr_frags - 1)
+ napi_consume_skb(skb, budget);
+
+ tx_buf->dma_addr = 0;
+ tx_buf->skb = NULL;
+ tx_buf->fidx = -2;
+ }
+
+ tx_ring->qcp_rd_p = qcp_rd_p;
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_bytes += done_bytes;
+ r_vec->tx_pkts += done_pkts;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ if (!dp->netdev)
+ return;
+
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+ netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
+ if (nfp_nfd3_tx_ring_should_wake(tx_ring)) {
+ /* Make sure TX thread will see updated tx_ring->rd_p */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(nd_q)))
+ netif_tx_wake_queue(nd_q);
+ }
+
+ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+ "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+}
+
+static bool nfp_nfd3_xdp_complete(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ u32 done_pkts = 0, done_bytes = 0;
+ bool done_all;
+ int idx, todo;
+ u32 qcp_rd_p;
+
+ /* Work out how many descriptors have been transmitted */
+ qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
+
+ if (qcp_rd_p == tx_ring->qcp_rd_p)
+ return true;
+
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+ done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
+ todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
+
+ tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
+
+ done_pkts = todo;
+ while (todo--) {
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
+ tx_ring->rd_p++;
+
+ done_bytes += tx_ring->txbufs[idx].real_len;
+ }
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_bytes += done_bytes;
+ r_vec->tx_pkts += done_pkts;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+ "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+
+ return done_all;
+}
+
+/* Receive processing
+ */
+
+static void *
+nfp_nfd3_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
+{
+ void *frag;
+
+ if (!dp->xdp_prog) {
+ frag = napi_alloc_frag(dp->fl_bufsz);
+ if (unlikely(!frag))
+ return NULL;
+ } else {
+ struct page *page;
+
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return NULL;
+ frag = page_address(page);
+ }
+
+ *dma_addr = nfp_net_dma_map_rx(dp, frag);
+ if (dma_mapping_error(dp->dev, *dma_addr)) {
+ nfp_net_free_frag(frag, dp->xdp_prog);
+ nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
+ return NULL;
+ }
+
+ return frag;
+}
+
+/**
+ * nfp_nfd3_rx_give_one() - Put mapped skb on the software and hardware rings
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring structure
+ * @frag: page fragment buffer
+ * @dma_addr: DMA address of skb mapping
+ */
+static void
+nfp_nfd3_rx_give_one(const struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring,
+ void *frag, dma_addr_t dma_addr)
+{
+ unsigned int wr_idx;
+
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
+
+ nfp_net_dma_sync_dev_rx(dp, dma_addr);
+
+ /* Stash SKB and DMA address away */
+ rx_ring->rxbufs[wr_idx].frag = frag;
+ rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
+
+ /* Fill freelist descriptor */
+ rx_ring->rxds[wr_idx].fld.reserved = 0;
+ rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
+ nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
+ dma_addr + dp->rx_dma_off);
+
+ rx_ring->wr_p++;
+ if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
+ /* Update write pointer of the freelist queue. Make
+ * sure all writes are flushed before telling the hardware.
+ */
+ wmb();
+ nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
+ }
+}
+
+/**
+ * nfp_nfd3_rx_ring_fill_freelist() - Give buffers from the ring to FW
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring to fill
+ */
+void nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int i;
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
+ return nfp_net_xsk_rx_ring_fill_freelist(rx_ring);
+
+ for (i = 0; i < rx_ring->cnt - 1; i++)
+ nfp_nfd3_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
+ rx_ring->rxbufs[i].dma_addr);
+}
+
+/**
+ * nfp_nfd3_rx_csum_has_errors() - group check if rxd has any csum errors
+ * @flags: RX descriptor flags field in CPU byte order
+ */
+static int nfp_nfd3_rx_csum_has_errors(u16 flags)
+{
+ u16 csum_all_checked, csum_all_ok;
+
+ csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
+ csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
+
+ return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
+}
+
+/**
+ * nfp_nfd3_rx_csum() - set SKB checksum field based on RX descriptor flags
+ * @dp: NFP Net data path struct
+ * @r_vec: per-ring structure
+ * @rxd: Pointer to RX descriptor
+ * @meta: Parsed metadata prepend
+ * @skb: Pointer to SKB
+ */
+void
+nfp_nfd3_rx_csum(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ const struct nfp_net_rx_desc *rxd,
+ const struct nfp_meta_parsed *meta, struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ if (!(dp->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (meta->csum_type) {
+ skb->ip_summed = meta->csum_type;
+ skb->csum = meta->csum;
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_complete++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ return;
+ }
+
+ if (nfp_nfd3_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_error++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ return;
+ }
+
+ /* Assume that the firmware will never report inner CSUM_OK unless outer
+ * L4 headers were successfully parsed. FW will always report zero UDP
+ * checksum as CSUM_OK.
+ */
+ if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
+ rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
+ __skb_incr_checksum_unnecessary(skb);
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_ok++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+
+ if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
+ rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
+ __skb_incr_checksum_unnecessary(skb);
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_inner_ok++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+}
+
+static void
+nfp_nfd3_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ unsigned int type, __be32 *hash)
+{
+ if (!(netdev->features & NETIF_F_RXHASH))
+ return;
+
+ switch (type) {
+ case NFP_NET_RSS_IPV4:
+ case NFP_NET_RSS_IPV6:
+ case NFP_NET_RSS_IPV6_EX:
+ meta->hash_type = PKT_HASH_TYPE_L3;
+ break;
+ default:
+ meta->hash_type = PKT_HASH_TYPE_L4;
+ break;
+ }
+
+ meta->hash = get_unaligned_be32(hash);
+}
+
+static void
+nfp_nfd3_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ void *data, struct nfp_net_rx_desc *rxd)
+{
+ struct nfp_net_rx_hash *rx_hash = data;
+
+ if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
+ return;
+
+ nfp_nfd3_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
+ &rx_hash->hash);
+}
+
+bool
+nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ void *data, void *pkt, unsigned int pkt_len, int meta_len)
+{
+ u32 meta_info;
+
+ meta_info = get_unaligned_be32(data);
+ data += 4;
+
+ while (meta_info) {
+ switch (meta_info & NFP_NET_META_FIELD_MASK) {
+ case NFP_NET_META_HASH:
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ nfp_nfd3_set_hash(netdev, meta,
+ meta_info & NFP_NET_META_FIELD_MASK,
+ (__be32 *)data);
+ data += 4;
+ break;
+ case NFP_NET_META_MARK:
+ meta->mark = get_unaligned_be32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_PORTID:
+ meta->portid = get_unaligned_be32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_CSUM:
+ meta->csum_type = CHECKSUM_COMPLETE;
+ meta->csum =
+ (__force __wsum)__get_unaligned_cpu32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_RESYNC_INFO:
+ if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
+ pkt_len))
+ return false;
+ data += sizeof(struct nfp_net_tls_resync_req);
+ break;
+ default:
+ return true;
+ }
+
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ }
+
+ return data != pkt;
+}
+
+static void
+nfp_nfd3_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
+ struct sk_buff *skb)
+{
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_drops++;
+ /* If we have both skb and rxbuf the replacement buffer allocation
+ * must have failed, count this as an alloc failure.
+ */
+ if (skb && rxbuf)
+ r_vec->rx_replace_buf_alloc_fail++;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ /* skb is build based on the frag, free_skb() would free the frag
+ * so to be able to reuse it we need an extra ref.
+ */
+ if (skb && rxbuf && skb->head == rxbuf->frag)
+ page_ref_inc(virt_to_head_page(rxbuf->frag));
+ if (rxbuf)
+ nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
+ if (skb)
+ dev_kfree_skb_any(skb);
+}
+
+static bool
+nfp_nfd3_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
+ struct nfp_net_tx_ring *tx_ring,
+ struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
+ unsigned int pkt_len, bool *completed)
+{
+ unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
+ struct nfp_nfd3_tx_buf *txbuf;
+ struct nfp_nfd3_tx_desc *txd;
+ int wr_idx;
+
+ /* Reject if xdp_adjust_tail grow packet beyond DMA area */
+ if (pkt_len + dma_off > dma_map_sz)
+ return false;
+
+ if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+ if (!*completed) {
+ nfp_nfd3_xdp_complete(tx_ring);
+ *completed = true;
+ }
+
+ if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+ nfp_nfd3_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
+ NULL);
+ return false;
+ }
+ }
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+ /* Stash the soft descriptor of the head then initialize it */
+ txbuf = &tx_ring->txbufs[wr_idx];
+
+ nfp_nfd3_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
+
+ txbuf->frag = rxbuf->frag;
+ txbuf->dma_addr = rxbuf->dma_addr;
+ txbuf->fidx = -1;
+ txbuf->pkt_cnt = 1;
+ txbuf->real_len = pkt_len;
+
+ dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
+ pkt_len, DMA_BIDIRECTIONAL);
+
+ /* Build TX descriptor */
+ txd = &tx_ring->txds[wr_idx];
+ txd->offset_eop = NFD3_DESC_TX_EOP;
+ txd->dma_len = cpu_to_le16(pkt_len);
+ nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
+ txd->data_len = cpu_to_le16(pkt_len);
+
+ txd->flags = 0;
+ txd->mss = 0;
+ txd->lso_hdrlen = 0;
+
+ tx_ring->wr_p++;
+ tx_ring->wr_ptr_add++;
+ return true;
+}
+
+/**
+ * nfp_nfd3_rx() - receive up to @budget packets on @rx_ring
+ * @rx_ring: RX ring to receive from
+ * @budget: NAPI budget
+ *
+ * Note, this function is separated out from the napi poll function to
+ * more cleanly separate packet receive code from other bookkeeping
+ * functions performed in the napi poll function.
+ *
+ * Return: Number of packets received.
+ */
+static int nfp_nfd3_rx(struct nfp_net_rx_ring *rx_ring, int budget)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ struct nfp_net_tx_ring *tx_ring;
+ struct bpf_prog *xdp_prog;
+ bool xdp_tx_cmpl = false;
+ unsigned int true_bufsz;
+ struct sk_buff *skb;
+ int pkts_polled = 0;
+ struct xdp_buff xdp;
+ int idx;
+
+ xdp_prog = READ_ONCE(dp->xdp_prog);
+ true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
+ xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
+ &rx_ring->xdp_rxq);
+ tx_ring = r_vec->xdp_ring;
+
+ while (pkts_polled < budget) {
+ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+ struct nfp_net_rx_buf *rxbuf;
+ struct nfp_net_rx_desc *rxd;
+ struct nfp_meta_parsed meta;
+ bool redir_egress = false;
+ struct net_device *netdev;
+ dma_addr_t new_dma_addr;
+ u32 meta_len_xdp = 0;
+ void *new_frag;
+
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+ rxd = &rx_ring->rxds[idx];
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+ break;
+
+ /* Memory barrier to ensure that we won't do other reads
+ * before the DD bit.
+ */
+ dma_rmb();
+
+ memset(&meta, 0, sizeof(meta));
+
+ rx_ring->rd_p++;
+ pkts_polled++;
+
+ rxbuf = &rx_ring->rxbufs[idx];
+ /* < meta_len >
+ * <-- [rx_offset] -->
+ * ---------------------------------------------------------
+ * | [XX] | metadata | packet | XXXX |
+ * ---------------------------------------------------------
+ * <---------------- data_len --------------->
+ *
+ * The rx_offset is fixed for all packets, the meta_len can vary
+ * on a packet by packet basis. If rx_offset is set to zero
+ * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
+ * buffer and is immediately followed by the packet (no [XX]).
+ */
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+ data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
+
+ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ pkt_off += meta_len;
+ else
+ pkt_off += dp->rx_offset;
+ meta_off = pkt_off - meta_len;
+
+ /* Stats update */
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_pkts++;
+ r_vec->rx_bytes += pkt_len;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
+ (dp->rx_offset && meta_len > dp->rx_offset))) {
+ nn_dp_warn(dp, "oversized RX packet metadata %u\n",
+ meta_len);
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ continue;
+ }
+
+ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
+ data_len);
+
+ if (!dp->chained_metadata_format) {
+ nfp_nfd3_set_hash_desc(dp->netdev, &meta,
+ rxbuf->frag + meta_off, rxd);
+ } else if (meta_len) {
+ if (unlikely(nfp_nfd3_parse_meta(dp->netdev, &meta,
+ rxbuf->frag + meta_off,
+ rxbuf->frag + pkt_off,
+ pkt_len, meta_len))) {
+ nn_dp_warn(dp, "invalid RX packet metadata\n");
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+ }
+
+ if (xdp_prog && !meta.portid) {
+ void *orig_data = rxbuf->frag + pkt_off;
+ unsigned int dma_off;
+ int act;
+
+ xdp_prepare_buff(&xdp,
+ rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
+ pkt_off - NFP_NET_RX_BUF_HEADROOM,
+ pkt_len, true);
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ pkt_len = xdp.data_end - xdp.data;
+ pkt_off += xdp.data - orig_data;
+
+ switch (act) {
+ case XDP_PASS:
+ meta_len_xdp = xdp.data - xdp.data_meta;
+ break;
+ case XDP_TX:
+ dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
+ if (unlikely(!nfp_nfd3_tx_xdp_buf(dp, rx_ring,
+ tx_ring,
+ rxbuf,
+ dma_off,
+ pkt_len,
+ &xdp_tx_cmpl)))
+ trace_xdp_exception(dp->netdev,
+ xdp_prog, act);
+ continue;
+ default:
+ bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag,
+ rxbuf->dma_addr);
+ continue;
+ }
+ }
+
+ if (likely(!meta.portid)) {
+ netdev = dp->netdev;
+ } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
+ struct nfp_net *nn = netdev_priv(dp->netdev);
+
+ nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
+ pkt_len);
+ nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag,
+ rxbuf->dma_addr);
+ continue;
+ } else {
+ struct nfp_net *nn;
+
+ nn = netdev_priv(dp->netdev);
+ netdev = nfp_app_dev_get(nn->app, meta.portid,
+ &redir_egress);
+ if (unlikely(!netdev)) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
+ }
+
+ skb = build_skb(rxbuf->frag, true_bufsz);
+ if (unlikely(!skb)) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ continue;
+ }
+ new_frag = nfp_nfd3_napi_alloc_one(dp, &new_dma_addr);
+ if (unlikely(!new_frag)) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ continue;
+ }
+
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+ nfp_nfd3_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+ skb_reserve(skb, pkt_off);
+ skb_put(skb, pkt_len);
+
+ skb->mark = meta.mark;
+ skb_set_hash(skb, meta.hash, meta.hash_type);
+
+ skb_record_rx_queue(skb, rx_ring->idx);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ nfp_nfd3_rx_csum(dp, r_vec, rxd, &meta, skb);
+
+#ifdef CONFIG_TLS_DEVICE
+ if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
+ skb->decrypted = true;
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_tls_rx++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+#endif
+
+ if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(rxd->rxd.vlan));
+ if (meta_len_xdp)
+ skb_metadata_set(skb, meta_len_xdp);
+
+ if (likely(!redir_egress)) {
+ napi_gro_receive(&rx_ring->r_vec->napi, skb);
+ } else {
+ skb->dev = netdev;
+ skb_reset_network_header(skb);
+ __skb_push(skb, ETH_HLEN);
+ dev_queue_xmit(skb);
+ }
+ }
+
+ if (xdp_prog) {
+ if (tx_ring->wr_ptr_add)
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
+ !xdp_tx_cmpl)
+ if (!nfp_nfd3_xdp_complete(tx_ring))
+ pkts_polled = budget;
+ }
+
+ return pkts_polled;
+}
+
+/**
+ * nfp_nfd3_poll() - napi poll function
+ * @napi: NAPI structure
+ * @budget: NAPI budget
+ *
+ * Return: number of packets polled.
+ */
+int nfp_nfd3_poll(struct napi_struct *napi, int budget)
+{
+ struct nfp_net_r_vector *r_vec =
+ container_of(napi, struct nfp_net_r_vector, napi);
+ unsigned int pkts_polled = 0;
+
+ if (r_vec->tx_ring)
+ nfp_nfd3_tx_complete(r_vec->tx_ring, budget);
+ if (r_vec->rx_ring)
+ pkts_polled = nfp_nfd3_rx(r_vec->rx_ring, budget);
+
+ if (pkts_polled < budget)
+ if (napi_complete_done(napi, pkts_polled))
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+
+ if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
+ struct dim_sample dim_sample = {};
+ unsigned int start;
+ u64 pkts, bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&r_vec->rx_sync);
+ pkts = r_vec->rx_pkts;
+ bytes = r_vec->rx_bytes;
+ } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+
+ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+ net_dim(&r_vec->rx_dim, dim_sample);
+ }
+
+ if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
+ struct dim_sample dim_sample = {};
+ unsigned int start;
+ u64 pkts, bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&r_vec->tx_sync);
+ pkts = r_vec->tx_pkts;
+ bytes = r_vec->tx_bytes;
+ } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+
+ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+ net_dim(&r_vec->tx_dim, dim_sample);
+ }
+
+ return pkts_polled;
+}
+
+/* Control device data path
+ */
+
+bool
+nfp_nfd3_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, bool old)
+{
+ unsigned int real_len = skb->len, meta_len = 0;
+ struct nfp_net_tx_ring *tx_ring;
+ struct nfp_nfd3_tx_buf *txbuf;
+ struct nfp_nfd3_tx_desc *txd;
+ struct nfp_net_dp *dp;
+ dma_addr_t dma_addr;
+ int wr_idx;
+
+ dp = &r_vec->nfp_net->dp;
+ tx_ring = r_vec->tx_ring;
+
+ if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
+ nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
+ goto err_free;
+ }
+
+ if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_busy++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ if (!old)
+ __skb_queue_tail(&r_vec->queue, skb);
+ else
+ __skb_queue_head(&r_vec->queue, skb);
+ return true;
+ }
+
+ if (nfp_app_ctrl_has_meta(nn->app)) {
+ if (unlikely(skb_headroom(skb) < 8)) {
+ nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
+ goto err_free;
+ }
+ meta_len = 8;
+ put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
+ put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
+ }
+
+ /* Start with the head skbuf */
+ dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_dma_warn;
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+ /* Stash the soft descriptor of the head then initialize it */
+ txbuf = &tx_ring->txbufs[wr_idx];
+ txbuf->skb = skb;
+ txbuf->dma_addr = dma_addr;
+ txbuf->fidx = -1;
+ txbuf->pkt_cnt = 1;
+ txbuf->real_len = real_len;
+
+ /* Build TX descriptor */
+ txd = &tx_ring->txds[wr_idx];
+ txd->offset_eop = meta_len | NFD3_DESC_TX_EOP;
+ txd->dma_len = cpu_to_le16(skb_headlen(skb));
+ nfp_desc_set_dma_addr(txd, dma_addr);
+ txd->data_len = cpu_to_le16(skb->len);
+
+ txd->flags = 0;
+ txd->mss = 0;
+ txd->lso_hdrlen = 0;
+
+ tx_ring->wr_p++;
+ tx_ring->wr_ptr_add++;
+ nfp_net_tx_xmit_more_flush(tx_ring);
+
+ return false;
+
+err_dma_warn:
+ nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
+err_free:
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&r_vec->queue)))
+ if (nfp_nfd3_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
+ return;
+}
+
+static bool
+nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
+{
+ u32 meta_type, meta_tag;
+
+ if (!nfp_app_ctrl_has_meta(nn->app))
+ return !meta_len;
+
+ if (meta_len != 8)
+ return false;
+
+ meta_type = get_unaligned_be32(data);
+ meta_tag = get_unaligned_be32(data + 4);
+
+ return (meta_type == NFP_NET_META_PORTID &&
+ meta_tag == NFP_META_PORT_ID_CTRL);
+}
+
+static bool
+nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+ struct nfp_net_rx_buf *rxbuf;
+ struct nfp_net_rx_desc *rxd;
+ dma_addr_t new_dma_addr;
+ struct sk_buff *skb;
+ void *new_frag;
+ int idx;
+
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+ rxd = &rx_ring->rxds[idx];
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+ return false;
+
+ /* Memory barrier to ensure that we won't do other reads
+ * before the DD bit.
+ */
+ dma_rmb();
+
+ rx_ring->rd_p++;
+
+ rxbuf = &rx_ring->rxbufs[idx];
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+ data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
+
+ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ pkt_off += meta_len;
+ else
+ pkt_off += dp->rx_offset;
+ meta_off = pkt_off - meta_len;
+
+ /* Stats update */
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_pkts++;
+ r_vec->rx_bytes += pkt_len;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
+
+ if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
+ nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
+ meta_len);
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ return true;
+ }
+
+ skb = build_skb(rxbuf->frag, dp->fl_bufsz);
+ if (unlikely(!skb)) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ return true;
+ }
+ new_frag = nfp_nfd3_napi_alloc_one(dp, &new_dma_addr);
+ if (unlikely(!new_frag)) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ return true;
+ }
+
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+ nfp_nfd3_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+ skb_reserve(skb, pkt_off);
+ skb_put(skb, pkt_len);
+
+ nfp_app_ctrl_rx(nn->app, skb);
+
+ return true;
+}
+
+static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+{
+ struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
+ struct nfp_net *nn = r_vec->nfp_net;
+ struct nfp_net_dp *dp = &nn->dp;
+ unsigned int budget = 512;
+
+ while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
+ continue;
+
+ return budget;
+}
+
+void nfp_nfd3_ctrl_poll(struct tasklet_struct *t)
+{
+ struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
+
+ spin_lock(&r_vec->lock);
+ nfp_nfd3_tx_complete(r_vec->tx_ring, 0);
+ __nfp_ctrl_tx_queued(r_vec);
+ spin_unlock(&r_vec->lock);
+
+ if (nfp_ctrl_rx(r_vec)) {
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ } else {
+ tasklet_schedule(&r_vec->tasklet);
+ nn_dp_warn(&r_vec->nfp_net->dp,
+ "control message budget exceeded!\n");
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h b/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h
new file mode 100644
index 000000000000..7a0df9e6c3c4
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#ifndef _NFP_DP_NFD3_H_
+#define _NFP_DP_NFD3_H_
+
+struct sk_buff;
+struct net_device;
+
+/* TX descriptor format */
+
+#define NFD3_DESC_TX_EOP BIT(7)
+#define NFD3_DESC_TX_OFFSET_MASK GENMASK(6, 0)
+#define NFD3_DESC_TX_MSS_MASK GENMASK(13, 0)
+
+/* Flags in the host TX descriptor */
+#define NFD3_DESC_TX_CSUM BIT(7)
+#define NFD3_DESC_TX_IP4_CSUM BIT(6)
+#define NFD3_DESC_TX_TCP_CSUM BIT(5)
+#define NFD3_DESC_TX_UDP_CSUM BIT(4)
+#define NFD3_DESC_TX_VLAN BIT(3)
+#define NFD3_DESC_TX_LSO BIT(2)
+#define NFD3_DESC_TX_ENCAP BIT(1)
+#define NFD3_DESC_TX_O_IP4_CSUM BIT(0)
+
+struct nfp_nfd3_tx_desc {
+ union {
+ struct {
+ u8 dma_addr_hi; /* High bits of host buf address */
+ __le16 dma_len; /* Length to DMA for this desc */
+ u8 offset_eop; /* Offset in buf where pkt starts +
+ * highest bit is eop flag.
+ */
+ __le32 dma_addr_lo; /* Low 32bit of host buf addr */
+
+ __le16 mss; /* MSS to be used for LSO */
+ u8 lso_hdrlen; /* LSO, TCP payload offset */
+ u8 flags; /* TX Flags, see @NFD3_DESC_TX_* */
+ union {
+ struct {
+ u8 l3_offset; /* L3 header offset */
+ u8 l4_offset; /* L4 header offset */
+ };
+ __le16 vlan; /* VLAN tag to add if indicated */
+ };
+ __le16 data_len; /* Length of frame + meta data */
+ } __packed;
+ __le32 vals[4];
+ __le64 vals8[2];
+ };
+};
+
+/**
+ * struct nfp_nfd3_tx_buf - software TX buffer descriptor
+ * @skb: normal ring, sk_buff associated with this buffer
+ * @frag: XDP ring, page frag associated with this buffer
+ * @xdp: XSK buffer pool handle (for AF_XDP)
+ * @dma_addr: DMA mapping address of the buffer
+ * @fidx: Fragment index (-1 for the head and [0..nr_frags-1] for frags)
+ * @pkt_cnt: Number of packets to be produced out of the skb associated
+ * with this buffer (valid only on the head's buffer).
+ * Will be 1 for all non-TSO packets.
+ * @is_xsk_tx: Flag if buffer is a RX buffer after a XDP_TX action and not a
+ * buffer from the TX queue (for AF_XDP).
+ * @real_len: Number of bytes which to be produced out of the skb (valid only
+ * on the head's buffer). Equal to skb->len for non-TSO packets.
+ */
+struct nfp_nfd3_tx_buf {
+ union {
+ struct sk_buff *skb;
+ void *frag;
+ struct xdp_buff *xdp;
+ };
+ dma_addr_t dma_addr;
+ union {
+ struct {
+ short int fidx;
+ u16 pkt_cnt;
+ };
+ struct {
+ bool is_xsk_tx;
+ };
+ };
+ u32 real_len;
+};
+
+void
+nfp_nfd3_rx_csum(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ const struct nfp_net_rx_desc *rxd,
+ const struct nfp_meta_parsed *meta, struct sk_buff *skb);
+bool
+nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ void *data, void *pkt, unsigned int pkt_len, int meta_len);
+void nfp_nfd3_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget);
+int nfp_nfd3_poll(struct napi_struct *napi, int budget);
+netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev);
+bool
+nfp_nfd3_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, bool old);
+void nfp_nfd3_ctrl_poll(struct tasklet_struct *t);
+void nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring);
+void nfp_nfd3_xsk_tx_free(struct nfp_nfd3_tx_buf *txbuf);
+int nfp_nfd3_xsk_poll(struct napi_struct *napi, int budget);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/rings.c b/drivers/net/ethernet/netronome/nfp/nfd3/rings.c
new file mode 100644
index 000000000000..47604d5e25eb
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/rings.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#include <linux/seq_file.h>
+
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "../nfp_net_xsk.h"
+#include "nfd3.h"
+
+static void nfp_nfd3_xsk_tx_bufs_free(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_nfd3_tx_buf *txbuf;
+ unsigned int idx;
+
+ while (tx_ring->rd_p != tx_ring->wr_p) {
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
+ txbuf = &tx_ring->txbufs[idx];
+
+ txbuf->real_len = 0;
+
+ tx_ring->qcp_rd_p++;
+ tx_ring->rd_p++;
+
+ if (tx_ring->r_vec->xsk_pool) {
+ if (txbuf->is_xsk_tx)
+ nfp_nfd3_xsk_tx_free(txbuf);
+
+ xsk_tx_completed(tx_ring->r_vec->xsk_pool, 1);
+ }
+ }
+}
+
+/**
+ * nfp_nfd3_tx_ring_reset() - Free any untransmitted buffers and reset pointers
+ * @dp: NFP Net data path struct
+ * @tx_ring: TX ring structure
+ *
+ * Assumes that the device is stopped, must be idempotent.
+ */
+static void
+nfp_nfd3_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ struct netdev_queue *nd_q;
+ const skb_frag_t *frag;
+
+ while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
+ struct nfp_nfd3_tx_buf *tx_buf;
+ struct sk_buff *skb;
+ int idx, nr_frags;
+
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
+ tx_buf = &tx_ring->txbufs[idx];
+
+ skb = tx_ring->txbufs[idx].skb;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (tx_buf->fidx == -1) {
+ /* unmap head */
+ dma_unmap_single(dp->dev, tx_buf->dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ } else {
+ /* unmap fragment */
+ frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
+ dma_unmap_page(dp->dev, tx_buf->dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ }
+
+ /* check for last gather fragment */
+ if (tx_buf->fidx == nr_frags - 1)
+ dev_kfree_skb_any(skb);
+
+ tx_buf->dma_addr = 0;
+ tx_buf->skb = NULL;
+ tx_buf->fidx = -2;
+
+ tx_ring->qcp_rd_p++;
+ tx_ring->rd_p++;
+ }
+
+ if (tx_ring->is_xdp)
+ nfp_nfd3_xsk_tx_bufs_free(tx_ring);
+
+ memset(tx_ring->txds, 0, tx_ring->size);
+ tx_ring->wr_p = 0;
+ tx_ring->rd_p = 0;
+ tx_ring->qcp_rd_p = 0;
+ tx_ring->wr_ptr_add = 0;
+
+ if (tx_ring->is_xdp || !dp->netdev)
+ return;
+
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+ netdev_tx_reset_queue(nd_q);
+}
+
+/**
+ * nfp_nfd3_tx_ring_free() - Free resources allocated to a TX ring
+ * @tx_ring: TX ring to free
+ */
+static void nfp_nfd3_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+
+ kvfree(tx_ring->txbufs);
+
+ if (tx_ring->txds)
+ dma_free_coherent(dp->dev, tx_ring->size,
+ tx_ring->txds, tx_ring->dma);
+
+ tx_ring->cnt = 0;
+ tx_ring->txbufs = NULL;
+ tx_ring->txds = NULL;
+ tx_ring->dma = 0;
+ tx_ring->size = 0;
+}
+
+/**
+ * nfp_nfd3_tx_ring_alloc() - Allocate resource for a TX ring
+ * @dp: NFP Net data path struct
+ * @tx_ring: TX Ring structure to allocate
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int
+nfp_nfd3_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+
+ tx_ring->cnt = dp->txd_cnt;
+
+ tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
+ tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!tx_ring->txds) {
+ netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
+ tx_ring->cnt);
+ goto err_alloc;
+ }
+
+ tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
+ GFP_KERNEL);
+ if (!tx_ring->txbufs)
+ goto err_alloc;
+
+ if (!tx_ring->is_xdp && dp->netdev)
+ netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
+ tx_ring->idx);
+
+ return 0;
+
+err_alloc:
+ nfp_nfd3_tx_ring_free(tx_ring);
+ return -ENOMEM;
+}
+
+static void
+nfp_nfd3_tx_ring_bufs_free(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ unsigned int i;
+
+ if (!tx_ring->is_xdp)
+ return;
+
+ for (i = 0; i < tx_ring->cnt; i++) {
+ if (!tx_ring->txbufs[i].frag)
+ return;
+
+ nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
+ __free_page(virt_to_page(tx_ring->txbufs[i].frag));
+ }
+}
+
+static int
+nfp_nfd3_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_nfd3_tx_buf *txbufs = tx_ring->txbufs;
+ unsigned int i;
+
+ if (!tx_ring->is_xdp)
+ return 0;
+
+ for (i = 0; i < tx_ring->cnt; i++) {
+ txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
+ if (!txbufs[i].frag) {
+ nfp_nfd3_tx_ring_bufs_free(dp, tx_ring);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void
+nfp_nfd3_print_tx_descs(struct seq_file *file,
+ struct nfp_net_r_vector *r_vec,
+ struct nfp_net_tx_ring *tx_ring,
+ u32 d_rd_p, u32 d_wr_p)
+{
+ struct nfp_nfd3_tx_desc *txd;
+ u32 txd_cnt = tx_ring->cnt;
+ int i;
+
+ for (i = 0; i < txd_cnt; i++) {
+ struct xdp_buff *xdp;
+ struct sk_buff *skb;
+
+ txd = &tx_ring->txds[i];
+ seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i,
+ txd->vals[0], txd->vals[1],
+ txd->vals[2], txd->vals[3]);
+
+ if (!tx_ring->is_xdp) {
+ skb = READ_ONCE(tx_ring->txbufs[i].skb);
+ if (skb)
+ seq_printf(file, " skb->head=%p skb->data=%p",
+ skb->head, skb->data);
+ } else {
+ xdp = READ_ONCE(tx_ring->txbufs[i].xdp);
+ if (xdp)
+ seq_printf(file, " xdp->data=%p", xdp->data);
+ }
+
+ if (tx_ring->txbufs[i].dma_addr)
+ seq_printf(file, " dma_addr=%pad",
+ &tx_ring->txbufs[i].dma_addr);
+
+ if (i == tx_ring->rd_p % txd_cnt)
+ seq_puts(file, " H_RD");
+ if (i == tx_ring->wr_p % txd_cnt)
+ seq_puts(file, " H_WR");
+ if (i == d_rd_p % txd_cnt)
+ seq_puts(file, " D_RD");
+ if (i == d_wr_p % txd_cnt)
+ seq_puts(file, " D_WR");
+
+ seq_putc(file, '\n');
+ }
+}
+
+#define NFP_NFD3_CFG_CTRL_SUPPORTED \
+ (NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_CTRL_PROMISC | \
+ NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC | \
+ NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM | \
+ NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_TXVLAN | \
+ NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO | \
+ NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA | \
+ NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_RSS | \
+ NFP_NET_CFG_CTRL_IRQMOD | NFP_NET_CFG_CTRL_TXRWB | \
+ NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE | \
+ NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 | \
+ NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE | \
+ NFP_NET_CFG_CTRL_LIVE_ADDR)
+
+const struct nfp_dp_ops nfp_nfd3_ops = {
+ .version = NFP_NFD_VER_NFD3,
+ .tx_min_desc_per_pkt = 1,
+ .cap_mask = NFP_NFD3_CFG_CTRL_SUPPORTED,
+ .poll = nfp_nfd3_poll,
+ .xsk_poll = nfp_nfd3_xsk_poll,
+ .ctrl_poll = nfp_nfd3_ctrl_poll,
+ .xmit = nfp_nfd3_tx,
+ .ctrl_tx_one = nfp_nfd3_ctrl_tx_one,
+ .rx_ring_fill_freelist = nfp_nfd3_rx_ring_fill_freelist,
+ .tx_ring_alloc = nfp_nfd3_tx_ring_alloc,
+ .tx_ring_reset = nfp_nfd3_tx_ring_reset,
+ .tx_ring_free = nfp_nfd3_tx_ring_free,
+ .tx_ring_bufs_alloc = nfp_nfd3_tx_ring_bufs_alloc,
+ .tx_ring_bufs_free = nfp_nfd3_tx_ring_bufs_free,
+ .print_tx_descs = nfp_nfd3_print_tx_descs
+};
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
new file mode 100644
index 000000000000..c16c4b42ecfd
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc */
+/* Copyright (C) 2021 Corigine, Inc */
+
+#include <linux/bpf_trace.h>
+#include <linux/netdevice.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "../nfp_net_xsk.h"
+#include "nfd3.h"
+
+static bool
+nfp_nfd3_xsk_tx_xdp(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_net_rx_ring *rx_ring,
+ struct nfp_net_tx_ring *tx_ring,
+ struct nfp_net_xsk_rx_buf *xrxbuf, unsigned int pkt_len,
+ int pkt_off)
+{
+ struct xsk_buff_pool *pool = r_vec->xsk_pool;
+ struct nfp_nfd3_tx_buf *txbuf;
+ struct nfp_nfd3_tx_desc *txd;
+ unsigned int wr_idx;
+
+ if (nfp_net_tx_space(tx_ring) < 1)
+ return false;
+
+ xsk_buff_raw_dma_sync_for_device(pool, xrxbuf->dma_addr + pkt_off,
+ pkt_len);
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+ txbuf = &tx_ring->txbufs[wr_idx];
+ txbuf->xdp = xrxbuf->xdp;
+ txbuf->real_len = pkt_len;
+ txbuf->is_xsk_tx = true;
+
+ /* Build TX descriptor */
+ txd = &tx_ring->txds[wr_idx];
+ txd->offset_eop = NFD3_DESC_TX_EOP;
+ txd->dma_len = cpu_to_le16(pkt_len);
+ nfp_desc_set_dma_addr(txd, xrxbuf->dma_addr + pkt_off);
+ txd->data_len = cpu_to_le16(pkt_len);
+
+ txd->flags = 0;
+ txd->mss = 0;
+ txd->lso_hdrlen = 0;
+
+ tx_ring->wr_ptr_add++;
+ tx_ring->wr_p++;
+
+ return true;
+}
+
+static void nfp_nfd3_xsk_rx_skb(struct nfp_net_rx_ring *rx_ring,
+ const struct nfp_net_rx_desc *rxd,
+ struct nfp_net_xsk_rx_buf *xrxbuf,
+ const struct nfp_meta_parsed *meta,
+ unsigned int pkt_len,
+ bool meta_xdp,
+ unsigned int *skbs_polled)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ struct net_device *netdev;
+ struct sk_buff *skb;
+
+ if (likely(!meta->portid)) {
+ netdev = dp->netdev;
+ } else {
+ struct nfp_net *nn = netdev_priv(dp->netdev);
+
+ netdev = nfp_app_dev_get(nn->app, meta->portid, NULL);
+ if (unlikely(!netdev)) {
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ return;
+ }
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
+ }
+
+ skb = napi_alloc_skb(&r_vec->napi, pkt_len);
+ if (!skb) {
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ return;
+ }
+ memcpy(skb_put(skb, pkt_len), xrxbuf->xdp->data, pkt_len);
+
+ skb->mark = meta->mark;
+ skb_set_hash(skb, meta->hash, meta->hash_type);
+
+ skb_record_rx_queue(skb, rx_ring->idx);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ nfp_nfd3_rx_csum(dp, r_vec, rxd, meta, skb);
+
+ if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(rxd->rxd.vlan));
+ if (meta_xdp)
+ skb_metadata_set(skb,
+ xrxbuf->xdp->data - xrxbuf->xdp->data_meta);
+
+ napi_gro_receive(&rx_ring->r_vec->napi, skb);
+
+ nfp_net_xsk_rx_free(xrxbuf);
+
+ (*skbs_polled)++;
+}
+
+static unsigned int
+nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget,
+ unsigned int *skbs_polled)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ struct nfp_net_tx_ring *tx_ring;
+ struct bpf_prog *xdp_prog;
+ bool xdp_redir = false;
+ int pkts_polled = 0;
+
+ xdp_prog = READ_ONCE(dp->xdp_prog);
+ tx_ring = r_vec->xdp_ring;
+
+ while (pkts_polled < budget) {
+ unsigned int meta_len, data_len, pkt_len, pkt_off;
+ struct nfp_net_xsk_rx_buf *xrxbuf;
+ struct nfp_net_rx_desc *rxd;
+ struct nfp_meta_parsed meta;
+ int idx, act;
+
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+ rxd = &rx_ring->rxds[idx];
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+ break;
+
+ rx_ring->rd_p++;
+ pkts_polled++;
+
+ xrxbuf = &rx_ring->xsk_rxbufs[idx];
+
+ /* If starved of buffers "drop" it and scream. */
+ if (rx_ring->rd_p >= rx_ring->wr_p) {
+ nn_dp_warn(dp, "Starved of RX buffers\n");
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ break;
+ }
+
+ /* Memory barrier to ensure that we won't do other reads
+ * before the DD bit.
+ */
+ dma_rmb();
+
+ memset(&meta, 0, sizeof(meta));
+
+ /* Only supporting AF_XDP with dynamic metadata so buffer layout
+ * is always:
+ *
+ * ---------------------------------------------------------
+ * | off | metadata | packet | XXXX |
+ * ---------------------------------------------------------
+ */
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+ data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
+
+ if (unlikely(meta_len > NFP_NET_MAX_PREPEND)) {
+ nn_dp_warn(dp, "Oversized RX packet metadata %u\n",
+ meta_len);
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ continue;
+ }
+
+ /* Stats update. */
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_pkts++;
+ r_vec->rx_bytes += pkt_len;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ xrxbuf->xdp->data += meta_len;
+ xrxbuf->xdp->data_end = xrxbuf->xdp->data + pkt_len;
+ xdp_set_data_meta_invalid(xrxbuf->xdp);
+ xsk_buff_dma_sync_for_cpu(xrxbuf->xdp, r_vec->xsk_pool);
+ net_prefetch(xrxbuf->xdp->data);
+
+ if (meta_len) {
+ if (unlikely(nfp_nfd3_parse_meta(dp->netdev, &meta,
+ xrxbuf->xdp->data -
+ meta_len,
+ xrxbuf->xdp->data,
+ pkt_len, meta_len))) {
+ nn_dp_warn(dp, "Invalid RX packet metadata\n");
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ continue;
+ }
+
+ if (unlikely(meta.portid)) {
+ struct nfp_net *nn = netdev_priv(dp->netdev);
+
+ if (meta.portid != NFP_META_PORT_ID_CTRL) {
+ nfp_nfd3_xsk_rx_skb(rx_ring, rxd,
+ xrxbuf, &meta,
+ pkt_len, false,
+ skbs_polled);
+ continue;
+ }
+
+ nfp_app_ctrl_rx_raw(nn->app, xrxbuf->xdp->data,
+ pkt_len);
+ nfp_net_xsk_rx_free(xrxbuf);
+ continue;
+ }
+ }
+
+ act = bpf_prog_run_xdp(xdp_prog, xrxbuf->xdp);
+
+ pkt_len = xrxbuf->xdp->data_end - xrxbuf->xdp->data;
+ pkt_off = xrxbuf->xdp->data - xrxbuf->xdp->data_hard_start;
+
+ switch (act) {
+ case XDP_PASS:
+ nfp_nfd3_xsk_rx_skb(rx_ring, rxd, xrxbuf, &meta, pkt_len,
+ true, skbs_polled);
+ break;
+ case XDP_TX:
+ if (!nfp_nfd3_xsk_tx_xdp(dp, r_vec, rx_ring, tx_ring,
+ xrxbuf, pkt_len, pkt_off))
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ else
+ nfp_net_xsk_rx_unstash(xrxbuf);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(dp->netdev, xrxbuf->xdp, xdp_prog)) {
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ } else {
+ nfp_net_xsk_rx_unstash(xrxbuf);
+ xdp_redir = true;
+ }
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ break;
+ }
+ }
+
+ nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring);
+
+ if (xdp_redir)
+ xdp_do_flush_map();
+
+ if (tx_ring->wr_ptr_add)
+ nfp_net_tx_xmit_more_flush(tx_ring);
+
+ return pkts_polled;
+}
+
+void nfp_nfd3_xsk_tx_free(struct nfp_nfd3_tx_buf *txbuf)
+{
+ xsk_buff_free(txbuf->xdp);
+
+ txbuf->dma_addr = 0;
+ txbuf->xdp = NULL;
+}
+
+static bool nfp_nfd3_xsk_complete(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ u32 done_pkts = 0, done_bytes = 0, reused = 0;
+ bool done_all;
+ int idx, todo;
+ u32 qcp_rd_p;
+
+ if (tx_ring->wr_p == tx_ring->rd_p)
+ return true;
+
+ /* Work out how many descriptors have been transmitted. */
+ qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
+
+ if (qcp_rd_p == tx_ring->qcp_rd_p)
+ return true;
+
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+ done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
+ todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
+
+ tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
+
+ done_pkts = todo;
+ while (todo--) {
+ struct nfp_nfd3_tx_buf *txbuf;
+
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
+ tx_ring->rd_p++;
+
+ txbuf = &tx_ring->txbufs[idx];
+ if (unlikely(!txbuf->real_len))
+ continue;
+
+ done_bytes += txbuf->real_len;
+ txbuf->real_len = 0;
+
+ if (txbuf->is_xsk_tx) {
+ nfp_nfd3_xsk_tx_free(txbuf);
+ reused++;
+ }
+ }
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_bytes += done_bytes;
+ r_vec->tx_pkts += done_pkts;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ xsk_tx_completed(r_vec->xsk_pool, done_pkts - reused);
+
+ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+ "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+
+ return done_all;
+}
+
+static void nfp_nfd3_xsk_tx(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct xdp_desc desc[NFP_NET_XSK_TX_BATCH];
+ struct xsk_buff_pool *xsk_pool;
+ struct nfp_nfd3_tx_desc *txd;
+ u32 pkts = 0, wr_idx;
+ u32 i, got;
+
+ xsk_pool = r_vec->xsk_pool;
+
+ while (nfp_net_tx_space(tx_ring) >= NFP_NET_XSK_TX_BATCH) {
+ for (i = 0; i < NFP_NET_XSK_TX_BATCH; i++)
+ if (!xsk_tx_peek_desc(xsk_pool, &desc[i]))
+ break;
+ got = i;
+ if (!got)
+ break;
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i);
+ prefetchw(&tx_ring->txds[wr_idx]);
+
+ for (i = 0; i < got; i++)
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, desc[i].addr,
+ desc[i].len);
+
+ for (i = 0; i < got; i++) {
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i);
+
+ tx_ring->txbufs[wr_idx].real_len = desc[i].len;
+ tx_ring->txbufs[wr_idx].is_xsk_tx = false;
+
+ /* Build TX descriptor. */
+ txd = &tx_ring->txds[wr_idx];
+ nfp_desc_set_dma_addr(txd,
+ xsk_buff_raw_get_dma(xsk_pool,
+ desc[i].addr
+ ));
+ txd->offset_eop = NFD3_DESC_TX_EOP;
+ txd->dma_len = cpu_to_le16(desc[i].len);
+ txd->data_len = cpu_to_le16(desc[i].len);
+ }
+
+ tx_ring->wr_p += got;
+ pkts += got;
+ }
+
+ if (!pkts)
+ return;
+
+ xsk_tx_release(xsk_pool);
+ /* Ensure all records are visible before incrementing write counter. */
+ wmb();
+ nfp_qcp_wr_ptr_add(tx_ring->qcp_q, pkts);
+}
+
+int nfp_nfd3_xsk_poll(struct napi_struct *napi, int budget)
+{
+ struct nfp_net_r_vector *r_vec =
+ container_of(napi, struct nfp_net_r_vector, napi);
+ unsigned int pkts_polled, skbs = 0;
+
+ pkts_polled = nfp_nfd3_xsk_rx(r_vec->rx_ring, budget, &skbs);
+
+ if (pkts_polled < budget) {
+ if (r_vec->tx_ring)
+ nfp_nfd3_tx_complete(r_vec->tx_ring, budget);
+
+ if (!nfp_nfd3_xsk_complete(r_vec->xdp_ring))
+ pkts_polled = budget;
+
+ nfp_nfd3_xsk_tx(r_vec->xdp_ring);
+
+ if (pkts_polled < budget && napi_complete_done(napi, skbs))
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ }
+
+ return pkts_polled;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
new file mode 100644
index 000000000000..e3da9ac20e57
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -0,0 +1,1524 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#include <linux/bpf_trace.h>
+#include <linux/netdevice.h>
+#include <linux/overflow.h>
+#include <linux/sizes.h>
+#include <linux/bitfield.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "../crypto/crypto.h"
+#include "../crypto/fw.h"
+#include "nfdk.h"
+
+static int nfp_nfdk_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
+{
+ return !nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT * 2);
+}
+
+static int nfp_nfdk_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
+{
+ return nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT);
+}
+
+static void nfp_nfdk_tx_ring_stop(struct netdev_queue *nd_q,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ netif_tx_stop_queue(nd_q);
+
+ /* We can race with the TX completion out of NAPI so recheck */
+ smp_mb();
+ if (unlikely(nfp_nfdk_tx_ring_should_wake(tx_ring)))
+ netif_tx_start_queue(nd_q);
+}
+
+static __le64
+nfp_nfdk_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfdk_tx_buf *txbuf,
+ struct sk_buff *skb)
+{
+ u32 segs, hdrlen, l3_offset, l4_offset;
+ struct nfp_nfdk_tx_desc txd;
+ u16 mss;
+
+ if (!skb->encapsulation) {
+ l3_offset = skb_network_offset(skb);
+ l4_offset = skb_transport_offset(skb);
+ hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ } else {
+ l3_offset = skb_inner_network_offset(skb);
+ l4_offset = skb_inner_transport_offset(skb);
+ hdrlen = skb_inner_transport_header(skb) - skb->data +
+ inner_tcp_hdrlen(skb);
+ }
+
+ segs = skb_shinfo(skb)->gso_segs;
+ mss = skb_shinfo(skb)->gso_size & NFDK_DESC_TX_MSS_MASK;
+
+ /* Note: TSO of the packet with metadata prepended to skb is not
+ * supported yet, in which case l3/l4_offset and lso_hdrlen need
+ * be correctly handled here.
+ * Concern:
+ * The driver doesn't have md_bytes easily available at this point.
+ * The PCI.IN PD ME won't have md_bytes bytes to add to lso_hdrlen,
+ * so it needs the full length there. The app MEs might prefer
+ * l3_offset and l4_offset relative to the start of packet data,
+ * but could probably cope with it being relative to the CTM buf
+ * data offset.
+ */
+ txd.l3_offset = l3_offset;
+ txd.l4_offset = l4_offset;
+ txd.lso_meta_res = 0;
+ txd.mss = cpu_to_le16(mss);
+ txd.lso_hdrlen = hdrlen;
+ txd.lso_totsegs = segs;
+
+ txbuf->pkt_cnt = segs;
+ txbuf->real_len = skb->len + hdrlen * (txbuf->pkt_cnt - 1);
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_lso++;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ return txd.raw;
+}
+
+static u8
+nfp_nfdk_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ unsigned int pkt_cnt, struct sk_buff *skb, u64 flags)
+{
+ struct ipv6hdr *ipv6h;
+ struct iphdr *iph;
+
+ if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
+ return flags;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return flags;
+
+ flags |= NFDK_DESC_TX_L4_CSUM;
+
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+ /* L3 checksum offloading flag is not required for ipv6 */
+ if (iph->version == 4) {
+ flags |= NFDK_DESC_TX_L3_CSUM;
+ } else if (ipv6h->version != 6) {
+ nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
+ return flags;
+ }
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ if (!skb->encapsulation) {
+ r_vec->hw_csum_tx += pkt_cnt;
+ } else {
+ flags |= NFDK_DESC_TX_ENCAP;
+ r_vec->hw_csum_tx_inner += pkt_cnt;
+ }
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ return flags;
+}
+
+static int
+nfp_nfdk_tx_maybe_close_block(struct nfp_net_tx_ring *tx_ring,
+ unsigned int nr_frags, struct sk_buff *skb)
+{
+ unsigned int n_descs, wr_p, nop_slots;
+ const skb_frag_t *frag, *fend;
+ struct nfp_nfdk_tx_desc *txd;
+ unsigned int wr_idx;
+ int err;
+
+recount_descs:
+ n_descs = nfp_nfdk_headlen_to_segs(skb_headlen(skb));
+
+ frag = skb_shinfo(skb)->frags;
+ fend = frag + nr_frags;
+ for (; frag < fend; frag++)
+ n_descs += DIV_ROUND_UP(skb_frag_size(frag),
+ NFDK_TX_MAX_DATA_PER_DESC);
+
+ if (unlikely(n_descs > NFDK_TX_DESC_GATHER_MAX)) {
+ if (skb_is_nonlinear(skb)) {
+ err = skb_linearize(skb);
+ if (err)
+ return err;
+ goto recount_descs;
+ }
+ return -EINVAL;
+ }
+
+ /* Under count by 1 (don't count meta) for the round down to work out */
+ n_descs += !!skb_is_gso(skb);
+
+ if (round_down(tx_ring->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
+ round_down(tx_ring->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT))
+ goto close_block;
+
+ if ((u32)tx_ring->data_pending + skb->len > NFDK_TX_MAX_DATA_PER_BLOCK)
+ goto close_block;
+
+ return 0;
+
+close_block:
+ wr_p = tx_ring->wr_p;
+ nop_slots = D_BLOCK_CPL(wr_p);
+
+ wr_idx = D_IDX(tx_ring, wr_p);
+ tx_ring->ktxbufs[wr_idx].skb = NULL;
+ txd = &tx_ring->ktxds[wr_idx];
+
+ memset(txd, 0, array_size(nop_slots, sizeof(struct nfp_nfdk_tx_desc)));
+
+ tx_ring->data_pending = 0;
+ tx_ring->wr_p += nop_slots;
+ tx_ring->wr_ptr_add += nop_slots;
+
+ return 0;
+}
+
+static int nfp_nfdk_prep_port_id(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ unsigned char *data;
+
+ if (likely(!md_dst))
+ return 0;
+ if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
+ return 0;
+
+ /* Note: Unsupported case when TSO a skb with metedata prepended.
+ * See the comments in `nfp_nfdk_tx_tso` for details.
+ */
+ if (unlikely(md_dst && skb_is_gso(skb)))
+ return -EOPNOTSUPP;
+
+ if (unlikely(skb_cow_head(skb, sizeof(md_dst->u.port_info.port_id))))
+ return -ENOMEM;
+
+ data = skb_push(skb, sizeof(md_dst->u.port_info.port_id));
+ put_unaligned_be32(md_dst->u.port_info.port_id, data);
+
+ return sizeof(md_dst->u.port_info.port_id);
+}
+
+static int
+nfp_nfdk_prep_tx_meta(struct nfp_app *app, struct sk_buff *skb,
+ struct nfp_net_r_vector *r_vec)
+{
+ unsigned char *data;
+ int res, md_bytes;
+ u32 meta_id = 0;
+
+ res = nfp_nfdk_prep_port_id(skb);
+ if (unlikely(res <= 0))
+ return res;
+
+ md_bytes = res;
+ meta_id = NFP_NET_META_PORTID;
+
+ if (unlikely(skb_cow_head(skb, sizeof(meta_id))))
+ return -ENOMEM;
+
+ md_bytes += sizeof(meta_id);
+
+ meta_id = FIELD_PREP(NFDK_META_LEN, md_bytes) |
+ FIELD_PREP(NFDK_META_FIELDS, meta_id);
+
+ data = skb_push(skb, sizeof(meta_id));
+ put_unaligned_be32(meta_id, data);
+
+ return NFDK_DESC_TX_CHAIN_META;
+}
+
+/**
+ * nfp_nfdk_tx() - Main transmit entry point
+ * @skb: SKB to transmit
+ * @netdev: netdev structure
+ *
+ * Return: NETDEV_TX_OK on success.
+ */
+netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_nfdk_tx_buf *txbuf, *etxbuf;
+ u32 cnt, tmp_dlen, dlen_type = 0;
+ struct nfp_net_tx_ring *tx_ring;
+ struct nfp_net_r_vector *r_vec;
+ const skb_frag_t *frag, *fend;
+ struct nfp_nfdk_tx_desc *txd;
+ unsigned int real_len, qidx;
+ unsigned int dma_len, type;
+ struct netdev_queue *nd_q;
+ struct nfp_net_dp *dp;
+ int nr_frags, wr_idx;
+ dma_addr_t dma_addr;
+ u64 metadata;
+
+ dp = &nn->dp;
+ qidx = skb_get_queue_mapping(skb);
+ tx_ring = &dp->tx_rings[qidx];
+ r_vec = tx_ring->r_vec;
+ nd_q = netdev_get_tx_queue(dp->netdev, qidx);
+
+ /* Don't bother counting frags, assume the worst */
+ if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
+ nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
+ qidx, tx_ring->wr_p, tx_ring->rd_p);
+ netif_tx_stop_queue(nd_q);
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_busy++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ return NETDEV_TX_BUSY;
+ }
+
+ metadata = nfp_nfdk_prep_tx_meta(nn->app, skb, r_vec);
+ if (unlikely((int)metadata < 0))
+ goto err_flush;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ if (nfp_nfdk_tx_maybe_close_block(tx_ring, nr_frags, skb))
+ goto err_flush;
+
+ /* DMA map all */
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+ txd = &tx_ring->ktxds[wr_idx];
+ txbuf = &tx_ring->ktxbufs[wr_idx];
+
+ dma_len = skb_headlen(skb);
+ if (skb_is_gso(skb))
+ type = NFDK_DESC_TX_TYPE_TSO;
+ else if (!nr_frags && dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+ type = NFDK_DESC_TX_TYPE_SIMPLE;
+ else
+ type = NFDK_DESC_TX_TYPE_GATHER;
+
+ dma_addr = dma_map_single(dp->dev, skb->data, dma_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_warn_dma;
+
+ txbuf->skb = skb;
+ txbuf++;
+
+ txbuf->dma_addr = dma_addr;
+ txbuf++;
+
+ /* FIELD_PREP() implicitly truncates to chunk */
+ dma_len -= 1;
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+ FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
+
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr(txd, dma_addr);
+
+ /* starts at bit 0 */
+ BUILD_BUG_ON(!(NFDK_DESC_TX_DMA_LEN_HEAD & 1));
+
+ /* Preserve the original dlen_type, this way below the EOP logic
+ * can use dlen_type.
+ */
+ tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
+ dma_len -= tmp_dlen;
+ dma_addr += tmp_dlen + 1;
+ txd++;
+
+ /* The rest of the data (if any) will be in larger dma descritors
+ * and is handled with the fragment loop.
+ */
+ frag = skb_shinfo(skb)->frags;
+ fend = frag + nr_frags;
+
+ while (true) {
+ while (dma_len > 0) {
+ dma_len -= 1;
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
+
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr(txd, dma_addr);
+
+ dma_len -= dlen_type;
+ dma_addr += dlen_type + 1;
+ txd++;
+ }
+
+ if (frag >= fend)
+ break;
+
+ dma_len = skb_frag_size(frag);
+ dma_addr = skb_frag_dma_map(dp->dev, frag, 0, dma_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_unmap;
+
+ txbuf->dma_addr = dma_addr;
+ txbuf++;
+
+ frag++;
+ }
+
+ (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
+
+ if (!skb_is_gso(skb)) {
+ real_len = skb->len;
+ /* Metadata desc */
+ metadata = nfp_nfdk_tx_csum(dp, r_vec, 1, skb, metadata);
+ txd->raw = cpu_to_le64(metadata);
+ txd++;
+ } else {
+ /* lso desc should be placed after metadata desc */
+ (txd + 1)->raw = nfp_nfdk_tx_tso(r_vec, txbuf, skb);
+ real_len = txbuf->real_len;
+ /* Metadata desc */
+ metadata = nfp_nfdk_tx_csum(dp, r_vec, txbuf->pkt_cnt, skb, metadata);
+ txd->raw = cpu_to_le64(metadata);
+ txd += 2;
+ txbuf++;
+ }
+
+ cnt = txd - tx_ring->ktxds - wr_idx;
+ if (unlikely(round_down(wr_idx, NFDK_TX_DESC_BLOCK_CNT) !=
+ round_down(wr_idx + cnt - 1, NFDK_TX_DESC_BLOCK_CNT)))
+ goto err_warn_overflow;
+
+ skb_tx_timestamp(skb);
+
+ tx_ring->wr_p += cnt;
+ if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
+ tx_ring->data_pending += skb->len;
+ else
+ tx_ring->data_pending = 0;
+
+ if (nfp_nfdk_tx_ring_should_stop(tx_ring))
+ nfp_nfdk_tx_ring_stop(nd_q, tx_ring);
+
+ tx_ring->wr_ptr_add += cnt;
+ if (__netdev_tx_sent_queue(nd_q, real_len, netdev_xmit_more()))
+ nfp_net_tx_xmit_more_flush(tx_ring);
+
+ return NETDEV_TX_OK;
+
+err_warn_overflow:
+ WARN_ONCE(1, "unable to fit packet into a descriptor wr_idx:%d head:%d frags:%d cnt:%d",
+ wr_idx, skb_headlen(skb), nr_frags, cnt);
+ if (skb_is_gso(skb))
+ txbuf--;
+err_unmap:
+ /* txbuf pointed to the next-to-use */
+ etxbuf = txbuf;
+ /* first txbuf holds the skb */
+ txbuf = &tx_ring->ktxbufs[wr_idx + 1];
+ if (txbuf < etxbuf) {
+ dma_unmap_single(dp->dev, txbuf->dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ txbuf->raw = 0;
+ txbuf++;
+ }
+ frag = skb_shinfo(skb)->frags;
+ while (etxbuf < txbuf) {
+ dma_unmap_page(dp->dev, txbuf->dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ txbuf->raw = 0;
+ frag++;
+ txbuf++;
+ }
+err_warn_dma:
+ nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+err_flush:
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * nfp_nfdk_tx_complete() - Handled completed TX packets
+ * @tx_ring: TX ring structure
+ * @budget: NAPI budget (only used as bool to determine if in NAPI context)
+ */
+static void nfp_nfdk_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ u32 done_pkts = 0, done_bytes = 0;
+ struct nfp_nfdk_tx_buf *ktxbufs;
+ struct device *dev = dp->dev;
+ struct netdev_queue *nd_q;
+ u32 rd_p, qcp_rd_p;
+ int todo;
+
+ rd_p = tx_ring->rd_p;
+ if (tx_ring->wr_p == rd_p)
+ return;
+
+ /* Work out how many descriptors have been transmitted */
+ qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
+
+ if (qcp_rd_p == tx_ring->qcp_rd_p)
+ return;
+
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+ ktxbufs = tx_ring->ktxbufs;
+
+ while (todo > 0) {
+ const skb_frag_t *frag, *fend;
+ unsigned int size, n_descs = 1;
+ struct nfp_nfdk_tx_buf *txbuf;
+ struct sk_buff *skb;
+
+ txbuf = &ktxbufs[D_IDX(tx_ring, rd_p)];
+ skb = txbuf->skb;
+ txbuf++;
+
+ /* Closed block */
+ if (!skb) {
+ n_descs = D_BLOCK_CPL(rd_p);
+ goto next;
+ }
+
+ /* Unmap head */
+ size = skb_headlen(skb);
+ n_descs += nfp_nfdk_headlen_to_segs(size);
+ dma_unmap_single(dev, txbuf->dma_addr, size, DMA_TO_DEVICE);
+ txbuf++;
+
+ /* Unmap frags */
+ frag = skb_shinfo(skb)->frags;
+ fend = frag + skb_shinfo(skb)->nr_frags;
+ for (; frag < fend; frag++) {
+ size = skb_frag_size(frag);
+ n_descs += DIV_ROUND_UP(size,
+ NFDK_TX_MAX_DATA_PER_DESC);
+ dma_unmap_page(dev, txbuf->dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ txbuf++;
+ }
+
+ if (!skb_is_gso(skb)) {
+ done_bytes += skb->len;
+ done_pkts++;
+ } else {
+ done_bytes += txbuf->real_len;
+ done_pkts += txbuf->pkt_cnt;
+ n_descs++;
+ }
+
+ napi_consume_skb(skb, budget);
+next:
+ rd_p += n_descs;
+ todo -= n_descs;
+ }
+
+ tx_ring->rd_p = rd_p;
+ tx_ring->qcp_rd_p = qcp_rd_p;
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_bytes += done_bytes;
+ r_vec->tx_pkts += done_pkts;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ if (!dp->netdev)
+ return;
+
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+ netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
+ if (nfp_nfdk_tx_ring_should_wake(tx_ring)) {
+ /* Make sure TX thread will see updated tx_ring->rd_p */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(nd_q)))
+ netif_tx_wake_queue(nd_q);
+ }
+
+ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+ "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+}
+
+/* Receive processing */
+static void *
+nfp_nfdk_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
+{
+ void *frag;
+
+ if (!dp->xdp_prog) {
+ frag = napi_alloc_frag(dp->fl_bufsz);
+ if (unlikely(!frag))
+ return NULL;
+ } else {
+ struct page *page;
+
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return NULL;
+ frag = page_address(page);
+ }
+
+ *dma_addr = nfp_net_dma_map_rx(dp, frag);
+ if (dma_mapping_error(dp->dev, *dma_addr)) {
+ nfp_net_free_frag(frag, dp->xdp_prog);
+ nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
+ return NULL;
+ }
+
+ return frag;
+}
+
+/**
+ * nfp_nfdk_rx_give_one() - Put mapped skb on the software and hardware rings
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring structure
+ * @frag: page fragment buffer
+ * @dma_addr: DMA address of skb mapping
+ */
+static void
+nfp_nfdk_rx_give_one(const struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring,
+ void *frag, dma_addr_t dma_addr)
+{
+ unsigned int wr_idx;
+
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
+
+ nfp_net_dma_sync_dev_rx(dp, dma_addr);
+
+ /* Stash SKB and DMA address away */
+ rx_ring->rxbufs[wr_idx].frag = frag;
+ rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
+
+ /* Fill freelist descriptor */
+ rx_ring->rxds[wr_idx].fld.reserved = 0;
+ rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
+ nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
+ dma_addr + dp->rx_dma_off);
+
+ rx_ring->wr_p++;
+ if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
+ /* Update write pointer of the freelist queue. Make
+ * sure all writes are flushed before telling the hardware.
+ */
+ wmb();
+ nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
+ }
+}
+
+/**
+ * nfp_nfdk_rx_ring_fill_freelist() - Give buffers from the ring to FW
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring to fill
+ */
+void nfp_nfdk_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int i;
+
+ for (i = 0; i < rx_ring->cnt - 1; i++)
+ nfp_nfdk_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
+ rx_ring->rxbufs[i].dma_addr);
+}
+
+/**
+ * nfp_nfdk_rx_csum_has_errors() - group check if rxd has any csum errors
+ * @flags: RX descriptor flags field in CPU byte order
+ */
+static int nfp_nfdk_rx_csum_has_errors(u16 flags)
+{
+ u16 csum_all_checked, csum_all_ok;
+
+ csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
+ csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
+
+ return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
+}
+
+/**
+ * nfp_nfdk_rx_csum() - set SKB checksum field based on RX descriptor flags
+ * @dp: NFP Net data path struct
+ * @r_vec: per-ring structure
+ * @rxd: Pointer to RX descriptor
+ * @meta: Parsed metadata prepend
+ * @skb: Pointer to SKB
+ */
+static void
+nfp_nfdk_rx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_net_rx_desc *rxd, struct nfp_meta_parsed *meta,
+ struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ if (!(dp->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (meta->csum_type) {
+ skb->ip_summed = meta->csum_type;
+ skb->csum = meta->csum;
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_complete++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ return;
+ }
+
+ if (nfp_nfdk_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_error++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ return;
+ }
+
+ /* Assume that the firmware will never report inner CSUM_OK unless outer
+ * L4 headers were successfully parsed. FW will always report zero UDP
+ * checksum as CSUM_OK.
+ */
+ if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
+ rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
+ __skb_incr_checksum_unnecessary(skb);
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_ok++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+
+ if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
+ rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
+ __skb_incr_checksum_unnecessary(skb);
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_inner_ok++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+}
+
+static void
+nfp_nfdk_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ unsigned int type, __be32 *hash)
+{
+ if (!(netdev->features & NETIF_F_RXHASH))
+ return;
+
+ switch (type) {
+ case NFP_NET_RSS_IPV4:
+ case NFP_NET_RSS_IPV6:
+ case NFP_NET_RSS_IPV6_EX:
+ meta->hash_type = PKT_HASH_TYPE_L3;
+ break;
+ default:
+ meta->hash_type = PKT_HASH_TYPE_L4;
+ break;
+ }
+
+ meta->hash = get_unaligned_be32(hash);
+}
+
+static bool
+nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ void *data, void *pkt, unsigned int pkt_len, int meta_len)
+{
+ u32 meta_info;
+
+ meta_info = get_unaligned_be32(data);
+ data += 4;
+
+ while (meta_info) {
+ switch (meta_info & NFP_NET_META_FIELD_MASK) {
+ case NFP_NET_META_HASH:
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ nfp_nfdk_set_hash(netdev, meta,
+ meta_info & NFP_NET_META_FIELD_MASK,
+ (__be32 *)data);
+ data += 4;
+ break;
+ case NFP_NET_META_MARK:
+ meta->mark = get_unaligned_be32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_PORTID:
+ meta->portid = get_unaligned_be32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_CSUM:
+ meta->csum_type = CHECKSUM_COMPLETE;
+ meta->csum =
+ (__force __wsum)__get_unaligned_cpu32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_RESYNC_INFO:
+ if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
+ pkt_len))
+ return false;
+ data += sizeof(struct nfp_net_tls_resync_req);
+ break;
+ default:
+ return true;
+ }
+
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ }
+
+ return data != pkt;
+}
+
+static void
+nfp_nfdk_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
+ struct sk_buff *skb)
+{
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_drops++;
+ /* If we have both skb and rxbuf the replacement buffer allocation
+ * must have failed, count this as an alloc failure.
+ */
+ if (skb && rxbuf)
+ r_vec->rx_replace_buf_alloc_fail++;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ /* skb is build based on the frag, free_skb() would free the frag
+ * so to be able to reuse it we need an extra ref.
+ */
+ if (skb && rxbuf && skb->head == rxbuf->frag)
+ page_ref_inc(virt_to_head_page(rxbuf->frag));
+ if (rxbuf)
+ nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
+ if (skb)
+ dev_kfree_skb_any(skb);
+}
+
+static bool nfp_nfdk_xdp_complete(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ struct nfp_net_rx_ring *rx_ring;
+ u32 qcp_rd_p, done = 0;
+ bool done_all;
+ int todo;
+
+ /* Work out how many descriptors have been transmitted */
+ qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
+ if (qcp_rd_p == tx_ring->qcp_rd_p)
+ return true;
+
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+ done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
+ todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
+
+ rx_ring = r_vec->rx_ring;
+ while (todo > 0) {
+ int idx = D_IDX(tx_ring, tx_ring->rd_p + done);
+ struct nfp_nfdk_tx_buf *txbuf;
+ unsigned int step = 1;
+
+ txbuf = &tx_ring->ktxbufs[idx];
+ if (!txbuf->raw)
+ goto next;
+
+ if (NFDK_TX_BUF_INFO(txbuf->val) != NFDK_TX_BUF_INFO_SOP) {
+ WARN_ONCE(1, "Unexpected TX buffer in XDP TX ring\n");
+ goto next;
+ }
+
+ /* Two successive txbufs are used to stash virtual and dma
+ * address respectively, recycle and clean them here.
+ */
+ nfp_nfdk_rx_give_one(dp, rx_ring,
+ (void *)NFDK_TX_BUF_PTR(txbuf[0].val),
+ txbuf[1].dma_addr);
+ txbuf[0].raw = 0;
+ txbuf[1].raw = 0;
+ step = 2;
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ /* Note: tx_bytes not accumulated. */
+ r_vec->tx_pkts++;
+ u64_stats_update_end(&r_vec->tx_sync);
+next:
+ todo -= step;
+ done += step;
+ }
+
+ tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + done);
+ tx_ring->rd_p += done;
+
+ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+ "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+
+ return done_all;
+}
+
+static bool
+nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
+ struct nfp_net_tx_ring *tx_ring,
+ struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
+ unsigned int pkt_len, bool *completed)
+{
+ unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
+ unsigned int dma_len, type, cnt, dlen_type, tmp_dlen;
+ struct nfp_nfdk_tx_buf *txbuf;
+ struct nfp_nfdk_tx_desc *txd;
+ unsigned int n_descs;
+ dma_addr_t dma_addr;
+ int wr_idx;
+
+ /* Reject if xdp_adjust_tail grow packet beyond DMA area */
+ if (pkt_len + dma_off > dma_map_sz)
+ return false;
+
+ /* Make sure there's still at least one block available after
+ * aligning to block boundary, so that the txds used below
+ * won't wrap around the tx_ring.
+ */
+ if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
+ if (!*completed) {
+ nfp_nfdk_xdp_complete(tx_ring);
+ *completed = true;
+ }
+
+ if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
+ nfp_nfdk_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
+ NULL);
+ return false;
+ }
+ }
+
+ /* Check if cross block boundary */
+ n_descs = nfp_nfdk_headlen_to_segs(pkt_len);
+ if ((round_down(tx_ring->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
+ round_down(tx_ring->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT)) ||
+ ((u32)tx_ring->data_pending + pkt_len >
+ NFDK_TX_MAX_DATA_PER_BLOCK)) {
+ unsigned int nop_slots = D_BLOCK_CPL(tx_ring->wr_p);
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+ txd = &tx_ring->ktxds[wr_idx];
+ memset(txd, 0,
+ array_size(nop_slots, sizeof(struct nfp_nfdk_tx_desc)));
+
+ tx_ring->data_pending = 0;
+ tx_ring->wr_p += nop_slots;
+ tx_ring->wr_ptr_add += nop_slots;
+ }
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+ txbuf = &tx_ring->ktxbufs[wr_idx];
+
+ txbuf[0].val = (unsigned long)rxbuf->frag | NFDK_TX_BUF_INFO_SOP;
+ txbuf[1].dma_addr = rxbuf->dma_addr;
+ /* Note: pkt len not stored */
+
+ dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
+ pkt_len, DMA_BIDIRECTIONAL);
+
+ /* Build TX descriptor */
+ txd = &tx_ring->ktxds[wr_idx];
+ dma_len = pkt_len;
+ dma_addr = rxbuf->dma_addr + dma_off;
+
+ if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+ type = NFDK_DESC_TX_TYPE_SIMPLE;
+ else
+ type = NFDK_DESC_TX_TYPE_GATHER;
+
+ /* FIELD_PREP() implicitly truncates to chunk */
+ dma_len -= 1;
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+ FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
+
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr(txd, dma_addr);
+
+ tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
+ dma_len -= tmp_dlen;
+ dma_addr += tmp_dlen + 1;
+ txd++;
+
+ while (dma_len > 0) {
+ dma_len -= 1;
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr(txd, dma_addr);
+
+ dlen_type &= NFDK_DESC_TX_DMA_LEN;
+ dma_len -= dlen_type;
+ dma_addr += dlen_type + 1;
+ txd++;
+ }
+
+ (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
+
+ /* Metadata desc */
+ txd->raw = 0;
+ txd++;
+
+ cnt = txd - tx_ring->ktxds - wr_idx;
+ tx_ring->wr_p += cnt;
+ if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
+ tx_ring->data_pending += pkt_len;
+ else
+ tx_ring->data_pending = 0;
+
+ tx_ring->wr_ptr_add += cnt;
+ return true;
+}
+
+/**
+ * nfp_nfdk_rx() - receive up to @budget packets on @rx_ring
+ * @rx_ring: RX ring to receive from
+ * @budget: NAPI budget
+ *
+ * Note, this function is separated out from the napi poll function to
+ * more cleanly separate packet receive code from other bookkeeping
+ * functions performed in the napi poll function.
+ *
+ * Return: Number of packets received.
+ */
+static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ struct nfp_net_tx_ring *tx_ring;
+ struct bpf_prog *xdp_prog;
+ bool xdp_tx_cmpl = false;
+ unsigned int true_bufsz;
+ struct sk_buff *skb;
+ int pkts_polled = 0;
+ struct xdp_buff xdp;
+ int idx;
+
+ xdp_prog = READ_ONCE(dp->xdp_prog);
+ true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
+ xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
+ &rx_ring->xdp_rxq);
+ tx_ring = r_vec->xdp_ring;
+
+ while (pkts_polled < budget) {
+ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+ struct nfp_net_rx_buf *rxbuf;
+ struct nfp_net_rx_desc *rxd;
+ struct nfp_meta_parsed meta;
+ bool redir_egress = false;
+ struct net_device *netdev;
+ dma_addr_t new_dma_addr;
+ u32 meta_len_xdp = 0;
+ void *new_frag;
+
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+ rxd = &rx_ring->rxds[idx];
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+ break;
+
+ /* Memory barrier to ensure that we won't do other reads
+ * before the DD bit.
+ */
+ dma_rmb();
+
+ memset(&meta, 0, sizeof(meta));
+
+ rx_ring->rd_p++;
+ pkts_polled++;
+
+ rxbuf = &rx_ring->rxbufs[idx];
+ /* < meta_len >
+ * <-- [rx_offset] -->
+ * ---------------------------------------------------------
+ * | [XX] | metadata | packet | XXXX |
+ * ---------------------------------------------------------
+ * <---------------- data_len --------------->
+ *
+ * The rx_offset is fixed for all packets, the meta_len can vary
+ * on a packet by packet basis. If rx_offset is set to zero
+ * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
+ * buffer and is immediately followed by the packet (no [XX]).
+ */
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+ data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
+
+ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ pkt_off += meta_len;
+ else
+ pkt_off += dp->rx_offset;
+ meta_off = pkt_off - meta_len;
+
+ /* Stats update */
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_pkts++;
+ r_vec->rx_bytes += pkt_len;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
+ (dp->rx_offset && meta_len > dp->rx_offset))) {
+ nn_dp_warn(dp, "oversized RX packet metadata %u\n",
+ meta_len);
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ continue;
+ }
+
+ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
+ data_len);
+
+ if (meta_len) {
+ if (unlikely(nfp_nfdk_parse_meta(dp->netdev, &meta,
+ rxbuf->frag + meta_off,
+ rxbuf->frag + pkt_off,
+ pkt_len, meta_len))) {
+ nn_dp_warn(dp, "invalid RX packet metadata\n");
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+ }
+
+ if (xdp_prog && !meta.portid) {
+ void *orig_data = rxbuf->frag + pkt_off;
+ unsigned int dma_off;
+ int act;
+
+ xdp_prepare_buff(&xdp,
+ rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
+ pkt_off - NFP_NET_RX_BUF_HEADROOM,
+ pkt_len, true);
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ pkt_len = xdp.data_end - xdp.data;
+ pkt_off += xdp.data - orig_data;
+
+ switch (act) {
+ case XDP_PASS:
+ meta_len_xdp = xdp.data - xdp.data_meta;
+ break;
+ case XDP_TX:
+ dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
+ if (unlikely(!nfp_nfdk_tx_xdp_buf(dp, rx_ring,
+ tx_ring,
+ rxbuf,
+ dma_off,
+ pkt_len,
+ &xdp_tx_cmpl)))
+ trace_xdp_exception(dp->netdev,
+ xdp_prog, act);
+ continue;
+ default:
+ bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag,
+ rxbuf->dma_addr);
+ continue;
+ }
+ }
+
+ if (likely(!meta.portid)) {
+ netdev = dp->netdev;
+ } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
+ struct nfp_net *nn = netdev_priv(dp->netdev);
+
+ nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
+ pkt_len);
+ nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag,
+ rxbuf->dma_addr);
+ continue;
+ } else {
+ struct nfp_net *nn;
+
+ nn = netdev_priv(dp->netdev);
+ netdev = nfp_app_dev_get(nn->app, meta.portid,
+ &redir_egress);
+ if (unlikely(!netdev)) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
+ }
+
+ skb = build_skb(rxbuf->frag, true_bufsz);
+ if (unlikely(!skb)) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ continue;
+ }
+ new_frag = nfp_nfdk_napi_alloc_one(dp, &new_dma_addr);
+ if (unlikely(!new_frag)) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ continue;
+ }
+
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+ nfp_nfdk_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+ skb_reserve(skb, pkt_off);
+ skb_put(skb, pkt_len);
+
+ skb->mark = meta.mark;
+ skb_set_hash(skb, meta.hash, meta.hash_type);
+
+ skb_record_rx_queue(skb, rx_ring->idx);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ nfp_nfdk_rx_csum(dp, r_vec, rxd, &meta, skb);
+
+ if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(rxd->rxd.vlan));
+ if (meta_len_xdp)
+ skb_metadata_set(skb, meta_len_xdp);
+
+ if (likely(!redir_egress)) {
+ napi_gro_receive(&rx_ring->r_vec->napi, skb);
+ } else {
+ skb->dev = netdev;
+ skb_reset_network_header(skb);
+ __skb_push(skb, ETH_HLEN);
+ dev_queue_xmit(skb);
+ }
+ }
+
+ if (xdp_prog) {
+ if (tx_ring->wr_ptr_add)
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
+ !xdp_tx_cmpl)
+ if (!nfp_nfdk_xdp_complete(tx_ring))
+ pkts_polled = budget;
+ }
+
+ return pkts_polled;
+}
+
+/**
+ * nfp_nfdk_poll() - napi poll function
+ * @napi: NAPI structure
+ * @budget: NAPI budget
+ *
+ * Return: number of packets polled.
+ */
+int nfp_nfdk_poll(struct napi_struct *napi, int budget)
+{
+ struct nfp_net_r_vector *r_vec =
+ container_of(napi, struct nfp_net_r_vector, napi);
+ unsigned int pkts_polled = 0;
+
+ if (r_vec->tx_ring)
+ nfp_nfdk_tx_complete(r_vec->tx_ring, budget);
+ if (r_vec->rx_ring)
+ pkts_polled = nfp_nfdk_rx(r_vec->rx_ring, budget);
+
+ if (pkts_polled < budget)
+ if (napi_complete_done(napi, pkts_polled))
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+
+ if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
+ struct dim_sample dim_sample = {};
+ unsigned int start;
+ u64 pkts, bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&r_vec->rx_sync);
+ pkts = r_vec->rx_pkts;
+ bytes = r_vec->rx_bytes;
+ } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+
+ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+ net_dim(&r_vec->rx_dim, dim_sample);
+ }
+
+ if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
+ struct dim_sample dim_sample = {};
+ unsigned int start;
+ u64 pkts, bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&r_vec->tx_sync);
+ pkts = r_vec->tx_pkts;
+ bytes = r_vec->tx_bytes;
+ } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+
+ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+ net_dim(&r_vec->tx_dim, dim_sample);
+ }
+
+ return pkts_polled;
+}
+
+/* Control device data path
+ */
+
+bool
+nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, bool old)
+{
+ u32 cnt, tmp_dlen, dlen_type = 0;
+ struct nfp_net_tx_ring *tx_ring;
+ struct nfp_nfdk_tx_buf *txbuf;
+ struct nfp_nfdk_tx_desc *txd;
+ unsigned int dma_len, type;
+ struct nfp_net_dp *dp;
+ dma_addr_t dma_addr;
+ u64 metadata = 0;
+ int wr_idx;
+
+ dp = &r_vec->nfp_net->dp;
+ tx_ring = r_vec->tx_ring;
+
+ if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
+ nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
+ goto err_free;
+ }
+
+ /* Don't bother counting frags, assume the worst */
+ if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_busy++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ if (!old)
+ __skb_queue_tail(&r_vec->queue, skb);
+ else
+ __skb_queue_head(&r_vec->queue, skb);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (nfp_app_ctrl_has_meta(nn->app)) {
+ if (unlikely(skb_headroom(skb) < 8)) {
+ nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
+ goto err_free;
+ }
+ metadata = NFDK_DESC_TX_CHAIN_META;
+ put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
+ put_unaligned_be32(FIELD_PREP(NFDK_META_LEN, 8) |
+ FIELD_PREP(NFDK_META_FIELDS,
+ NFP_NET_META_PORTID),
+ skb_push(skb, 4));
+ }
+
+ if (nfp_nfdk_tx_maybe_close_block(tx_ring, 0, skb))
+ goto err_free;
+
+ /* DMA map all */
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+ txd = &tx_ring->ktxds[wr_idx];
+ txbuf = &tx_ring->ktxbufs[wr_idx];
+
+ dma_len = skb_headlen(skb);
+ if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+ type = NFDK_DESC_TX_TYPE_SIMPLE;
+ else
+ type = NFDK_DESC_TX_TYPE_GATHER;
+
+ dma_addr = dma_map_single(dp->dev, skb->data, dma_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_warn_dma;
+
+ txbuf->skb = skb;
+ txbuf++;
+
+ txbuf->dma_addr = dma_addr;
+ txbuf++;
+
+ dma_len -= 1;
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+ FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
+
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr(txd, dma_addr);
+
+ tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
+ dma_len -= tmp_dlen;
+ dma_addr += tmp_dlen + 1;
+ txd++;
+
+ while (dma_len > 0) {
+ dma_len -= 1;
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr(txd, dma_addr);
+
+ dlen_type &= NFDK_DESC_TX_DMA_LEN;
+ dma_len -= dlen_type;
+ dma_addr += dlen_type + 1;
+ txd++;
+ }
+
+ (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
+
+ /* Metadata desc */
+ txd->raw = cpu_to_le64(metadata);
+ txd++;
+
+ cnt = txd - tx_ring->ktxds - wr_idx;
+ if (unlikely(round_down(wr_idx, NFDK_TX_DESC_BLOCK_CNT) !=
+ round_down(wr_idx + cnt - 1, NFDK_TX_DESC_BLOCK_CNT)))
+ goto err_warn_overflow;
+
+ tx_ring->wr_p += cnt;
+ if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
+ tx_ring->data_pending += skb->len;
+ else
+ tx_ring->data_pending = 0;
+
+ tx_ring->wr_ptr_add += cnt;
+ nfp_net_tx_xmit_more_flush(tx_ring);
+
+ return NETDEV_TX_OK;
+
+err_warn_overflow:
+ WARN_ONCE(1, "unable to fit packet into a descriptor wr_idx:%d head:%d frags:%d cnt:%d",
+ wr_idx, skb_headlen(skb), 0, cnt);
+ txbuf--;
+ dma_unmap_single(dp->dev, txbuf->dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ txbuf->raw = 0;
+err_warn_dma:
+ nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+err_free:
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&r_vec->queue)))
+ if (nfp_nfdk_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
+ return;
+}
+
+static bool
+nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
+{
+ u32 meta_type, meta_tag;
+
+ if (!nfp_app_ctrl_has_meta(nn->app))
+ return !meta_len;
+
+ if (meta_len != 8)
+ return false;
+
+ meta_type = get_unaligned_be32(data);
+ meta_tag = get_unaligned_be32(data + 4);
+
+ return (meta_type == NFP_NET_META_PORTID &&
+ meta_tag == NFP_META_PORT_ID_CTRL);
+}
+
+static bool
+nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+ struct nfp_net_rx_buf *rxbuf;
+ struct nfp_net_rx_desc *rxd;
+ dma_addr_t new_dma_addr;
+ struct sk_buff *skb;
+ void *new_frag;
+ int idx;
+
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+ rxd = &rx_ring->rxds[idx];
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+ return false;
+
+ /* Memory barrier to ensure that we won't do other reads
+ * before the DD bit.
+ */
+ dma_rmb();
+
+ rx_ring->rd_p++;
+
+ rxbuf = &rx_ring->rxbufs[idx];
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+ data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
+
+ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ pkt_off += meta_len;
+ else
+ pkt_off += dp->rx_offset;
+ meta_off = pkt_off - meta_len;
+
+ /* Stats update */
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_pkts++;
+ r_vec->rx_bytes += pkt_len;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
+
+ if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
+ nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
+ meta_len);
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ return true;
+ }
+
+ skb = build_skb(rxbuf->frag, dp->fl_bufsz);
+ if (unlikely(!skb)) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ return true;
+ }
+ new_frag = nfp_nfdk_napi_alloc_one(dp, &new_dma_addr);
+ if (unlikely(!new_frag)) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ return true;
+ }
+
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+ nfp_nfdk_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+ skb_reserve(skb, pkt_off);
+ skb_put(skb, pkt_len);
+
+ nfp_app_ctrl_rx(nn->app, skb);
+
+ return true;
+}
+
+static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+{
+ struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
+ struct nfp_net *nn = r_vec->nfp_net;
+ struct nfp_net_dp *dp = &nn->dp;
+ unsigned int budget = 512;
+
+ while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
+ continue;
+
+ return budget;
+}
+
+void nfp_nfdk_ctrl_poll(struct tasklet_struct *t)
+{
+ struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
+
+ spin_lock(&r_vec->lock);
+ nfp_nfdk_tx_complete(r_vec->tx_ring, 0);
+ __nfp_ctrl_tx_queued(r_vec);
+ spin_unlock(&r_vec->lock);
+
+ if (nfp_ctrl_rx(r_vec)) {
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ } else {
+ tasklet_schedule(&r_vec->tasklet);
+ nn_dp_warn(&r_vec->nfp_net->dp,
+ "control message budget exceeded!\n");
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h b/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
new file mode 100644
index 000000000000..c41e0975eb73
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef _NFP_DP_NFDK_H_
+#define _NFP_DP_NFDK_H_
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+#define NFDK_TX_DESC_PER_SIMPLE_PKT 2
+
+#define NFDK_TX_MAX_DATA_PER_HEAD SZ_4K
+#define NFDK_TX_MAX_DATA_PER_DESC SZ_16K
+#define NFDK_TX_DESC_BLOCK_SZ 256
+#define NFDK_TX_DESC_BLOCK_CNT (NFDK_TX_DESC_BLOCK_SZ / \
+ sizeof(struct nfp_nfdk_tx_desc))
+#define NFDK_TX_DESC_STOP_CNT (NFDK_TX_DESC_BLOCK_CNT * \
+ NFDK_TX_DESC_PER_SIMPLE_PKT)
+#define NFDK_TX_MAX_DATA_PER_BLOCK SZ_64K
+#define NFDK_TX_DESC_GATHER_MAX 17
+
+/* TX descriptor format */
+
+#define NFDK_DESC_TX_MSS_MASK GENMASK(13, 0)
+
+#define NFDK_DESC_TX_CHAIN_META BIT(3)
+#define NFDK_DESC_TX_ENCAP BIT(2)
+#define NFDK_DESC_TX_L4_CSUM BIT(1)
+#define NFDK_DESC_TX_L3_CSUM BIT(0)
+
+#define NFDK_DESC_TX_DMA_LEN_HEAD GENMASK(11, 0)
+#define NFDK_DESC_TX_TYPE_HEAD GENMASK(15, 12)
+#define NFDK_DESC_TX_DMA_LEN GENMASK(13, 0)
+#define NFDK_DESC_TX_TYPE_NOP 0
+#define NFDK_DESC_TX_TYPE_GATHER 1
+#define NFDK_DESC_TX_TYPE_TSO 2
+#define NFDK_DESC_TX_TYPE_SIMPLE 8
+#define NFDK_DESC_TX_EOP BIT(14)
+
+#define NFDK_META_LEN GENMASK(7, 0)
+#define NFDK_META_FIELDS GENMASK(31, 8)
+
+#define D_BLOCK_CPL(idx) (NFDK_TX_DESC_BLOCK_CNT - \
+ (idx) % NFDK_TX_DESC_BLOCK_CNT)
+
+struct nfp_nfdk_tx_desc {
+ union {
+ struct {
+ u8 dma_addr_hi; /* High bits of host buf address */
+ u8 padding; /* Must be zero */
+ __le16 dma_len_type; /* Length to DMA for this desc */
+ __le32 dma_addr_lo; /* Low 32bit of host buf addr */
+ };
+
+ struct {
+ __le16 mss; /* MSS to be used for LSO */
+ u8 lso_hdrlen; /* LSO, TCP payload offset */
+ u8 lso_totsegs; /* LSO, total segments */
+ u8 l3_offset; /* L3 header offset */
+ u8 l4_offset; /* L4 header offset */
+ __le16 lso_meta_res; /* Rsvd bits in TSO metadata */
+ };
+
+ struct {
+ u8 flags; /* TX Flags, see @NFDK_DESC_TX_* */
+ u8 reserved[7]; /* meta byte placeholder */
+ };
+
+ __le32 vals[2];
+ __le64 raw;
+ };
+};
+
+/* The device don't make use of the 2 or 3 least significant bits of the address
+ * due to alignment constraints. The driver can make use of those bits to carry
+ * information about the buffer before giving it to the device.
+ *
+ * NOTE: The driver must clear the lower bits before handing the buffer to the
+ * device.
+ *
+ * - NFDK_TX_BUF_INFO_SOP - Start of a packet
+ * Mark the buffer as a start of a packet. This is used in the XDP TX process
+ * to stash virtual and DMA address so that they can be recycled when the TX
+ * operation is completed.
+ */
+#define NFDK_TX_BUF_PTR(val) ((val) & ~(sizeof(void *) - 1))
+#define NFDK_TX_BUF_INFO(val) ((val) & (sizeof(void *) - 1))
+#define NFDK_TX_BUF_INFO_SOP BIT(0)
+
+struct nfp_nfdk_tx_buf {
+ union {
+ /* First slot */
+ union {
+ struct sk_buff *skb;
+ void *frag;
+ unsigned long val;
+ };
+
+ /* 1 + nr_frags next slots */
+ dma_addr_t dma_addr;
+
+ /* TSO (optional) */
+ struct {
+ u32 pkt_cnt;
+ u32 real_len;
+ };
+
+ u64 raw;
+ };
+};
+
+static inline int nfp_nfdk_headlen_to_segs(unsigned int headlen)
+{
+ /* First descriptor fits less data, so adjust for that */
+ return DIV_ROUND_UP(headlen +
+ NFDK_TX_MAX_DATA_PER_DESC -
+ NFDK_TX_MAX_DATA_PER_HEAD,
+ NFDK_TX_MAX_DATA_PER_DESC);
+}
+
+int nfp_nfdk_poll(struct napi_struct *napi, int budget);
+netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev);
+bool
+nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, bool old);
+void nfp_nfdk_ctrl_poll(struct tasklet_struct *t);
+void nfp_nfdk_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring);
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/rings.c b/drivers/net/ethernet/netronome/nfp/nfdk/rings.c
new file mode 100644
index 000000000000..301f11108826
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/rings.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/seq_file.h>
+
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "nfdk.h"
+
+static void
+nfp_nfdk_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ struct device *dev = dp->dev;
+ struct netdev_queue *nd_q;
+
+ while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
+ const skb_frag_t *frag, *fend;
+ unsigned int size, n_descs = 1;
+ struct nfp_nfdk_tx_buf *txbuf;
+ int nr_frags, rd_idx;
+ struct sk_buff *skb;
+
+ rd_idx = D_IDX(tx_ring, tx_ring->rd_p);
+ txbuf = &tx_ring->ktxbufs[rd_idx];
+
+ skb = txbuf->skb;
+ if (!skb) {
+ n_descs = D_BLOCK_CPL(tx_ring->rd_p);
+ goto next;
+ }
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ txbuf++;
+
+ /* Unmap head */
+ size = skb_headlen(skb);
+ dma_unmap_single(dev, txbuf->dma_addr, size, DMA_TO_DEVICE);
+ n_descs += nfp_nfdk_headlen_to_segs(size);
+ txbuf++;
+
+ frag = skb_shinfo(skb)->frags;
+ fend = frag + nr_frags;
+ for (; frag < fend; frag++) {
+ size = skb_frag_size(frag);
+ dma_unmap_page(dev, txbuf->dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ n_descs += DIV_ROUND_UP(size,
+ NFDK_TX_MAX_DATA_PER_DESC);
+ txbuf++;
+ }
+
+ if (skb_is_gso(skb))
+ n_descs++;
+
+ dev_kfree_skb_any(skb);
+next:
+ tx_ring->rd_p += n_descs;
+ }
+
+ memset(tx_ring->txds, 0, tx_ring->size);
+ tx_ring->data_pending = 0;
+ tx_ring->wr_p = 0;
+ tx_ring->rd_p = 0;
+ tx_ring->qcp_rd_p = 0;
+ tx_ring->wr_ptr_add = 0;
+
+ if (tx_ring->is_xdp || !dp->netdev)
+ return;
+
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+ netdev_tx_reset_queue(nd_q);
+}
+
+static void nfp_nfdk_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+
+ kvfree(tx_ring->ktxbufs);
+
+ if (tx_ring->ktxds)
+ dma_free_coherent(dp->dev, tx_ring->size,
+ tx_ring->ktxds, tx_ring->dma);
+
+ tx_ring->cnt = 0;
+ tx_ring->txbufs = NULL;
+ tx_ring->txds = NULL;
+ tx_ring->dma = 0;
+ tx_ring->size = 0;
+}
+
+static int
+nfp_nfdk_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+
+ tx_ring->cnt = dp->txd_cnt * NFDK_TX_DESC_PER_SIMPLE_PKT;
+ tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->ktxds));
+ tx_ring->ktxds = dma_alloc_coherent(dp->dev, tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!tx_ring->ktxds) {
+ netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
+ tx_ring->cnt);
+ goto err_alloc;
+ }
+
+ tx_ring->ktxbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->ktxbufs),
+ GFP_KERNEL);
+ if (!tx_ring->ktxbufs)
+ goto err_alloc;
+
+ if (!tx_ring->is_xdp && dp->netdev)
+ netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
+ tx_ring->idx);
+
+ return 0;
+
+err_alloc:
+ nfp_nfdk_tx_ring_free(tx_ring);
+ return -ENOMEM;
+}
+
+static void
+nfp_nfdk_tx_ring_bufs_free(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+}
+
+static int
+nfp_nfdk_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ return 0;
+}
+
+static void
+nfp_nfdk_print_tx_descs(struct seq_file *file,
+ struct nfp_net_r_vector *r_vec,
+ struct nfp_net_tx_ring *tx_ring,
+ u32 d_rd_p, u32 d_wr_p)
+{
+ struct nfp_nfdk_tx_desc *txd;
+ u32 txd_cnt = tx_ring->cnt;
+ int i;
+
+ for (i = 0; i < txd_cnt; i++) {
+ txd = &tx_ring->ktxds[i];
+
+ seq_printf(file, "%04d: 0x%08x 0x%08x 0x%016llx", i,
+ txd->vals[0], txd->vals[1], tx_ring->ktxbufs[i].raw);
+
+ if (i == tx_ring->rd_p % txd_cnt)
+ seq_puts(file, " H_RD");
+ if (i == tx_ring->wr_p % txd_cnt)
+ seq_puts(file, " H_WR");
+ if (i == d_rd_p % txd_cnt)
+ seq_puts(file, " D_RD");
+ if (i == d_wr_p % txd_cnt)
+ seq_puts(file, " D_WR");
+
+ seq_putc(file, '\n');
+ }
+}
+
+#define NFP_NFDK_CFG_CTRL_SUPPORTED \
+ (NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_CTRL_PROMISC | \
+ NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC | \
+ NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM | \
+ NFP_NET_CFG_CTRL_RXVLAN | \
+ NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO | \
+ NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA | \
+ NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_IRQMOD | \
+ NFP_NET_CFG_CTRL_TXRWB | \
+ NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE | \
+ NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 | \
+ NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE | \
+ NFP_NET_CFG_CTRL_LIVE_ADDR)
+
+const struct nfp_dp_ops nfp_nfdk_ops = {
+ .version = NFP_NFD_VER_NFDK,
+ .tx_min_desc_per_pkt = NFDK_TX_DESC_PER_SIMPLE_PKT,
+ .cap_mask = NFP_NFDK_CFG_CTRL_SUPPORTED,
+ .poll = nfp_nfdk_poll,
+ .ctrl_poll = nfp_nfdk_ctrl_poll,
+ .xmit = nfp_nfdk_tx,
+ .ctrl_tx_one = nfp_nfdk_ctrl_tx_one,
+ .rx_ring_fill_freelist = nfp_nfdk_rx_ring_fill_freelist,
+ .tx_ring_alloc = nfp_nfdk_tx_ring_alloc,
+ .tx_ring_reset = nfp_nfdk_tx_ring_reset,
+ .tx_ring_free = nfp_nfdk_tx_ring_free,
+ .tx_ring_bufs_alloc = nfp_nfdk_tx_ring_bufs_alloc,
+ .tx_ring_bufs_free = nfp_nfdk_tx_ring_bufs_free,
+ .print_tx_descs = nfp_nfdk_print_tx_descs
+};
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 3a973282b2bb..09f250e74dfa 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -121,7 +121,7 @@ struct nfp_reprs *
nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type)
{
return rcu_dereference_protected(app->reprs[type],
- lockdep_is_held(&app->pf->lock));
+ nfp_app_is_locked(app));
}
struct nfp_reprs *
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 3e9baff07100..dd56207df246 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -75,7 +75,7 @@ extern const struct nfp_app_type app_abm;
* @bpf: BPF ndo offload-related calls
* @xdp_offload: offload an XDP program
* @eswitch_mode_get: get SR-IOV eswitch mode
- * @eswitch_mode_set: set SR-IOV eswitch mode (under pf->lock)
+ * @eswitch_mode_set: set SR-IOV eswitch mode
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
* @dev_get: get representor or internal port representing netdev
@@ -174,6 +174,16 @@ struct nfp_app {
void *priv;
};
+static inline void assert_nfp_app_locked(struct nfp_app *app)
+{
+ devl_assert_locked(priv_to_devlink(app->pf));
+}
+
+static inline bool nfp_app_is_locked(struct nfp_app *app)
+{
+ return devl_lock_is_held(priv_to_devlink(app->pf));
+}
+
void nfp_check_rhashtable_empty(void *ptr, void *arg);
bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index bea978df7713..405786c00334 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -26,12 +26,11 @@ nfp_devlink_fill_eth_port(struct nfp_port *port,
}
static int
-nfp_devlink_fill_eth_port_from_id(struct nfp_pf *pf, unsigned int port_index,
+nfp_devlink_fill_eth_port_from_id(struct nfp_pf *pf,
+ struct devlink_port *dl_port,
struct nfp_eth_table_port *copy)
{
- struct nfp_port *port;
-
- port = nfp_port_from_id(pf, NFP_PORT_PHYS_PORT, port_index);
+ struct nfp_port *port = container_of(dl_port, struct nfp_port, dl_port);
return nfp_devlink_fill_eth_port(port, copy);
}
@@ -62,7 +61,7 @@ nfp_devlink_set_lanes(struct nfp_pf *pf, unsigned int idx, unsigned int lanes)
}
static int
-nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
+nfp_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
unsigned int count, struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
@@ -70,33 +69,25 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
unsigned int lanes;
int ret;
- mutex_lock(&pf->lock);
-
rtnl_lock();
- ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, &eth_port);
+ ret = nfp_devlink_fill_eth_port_from_id(pf, port, &eth_port);
rtnl_unlock();
if (ret)
- goto out;
+ return ret;
- if (eth_port.port_lanes % count) {
- ret = -EINVAL;
- goto out;
- }
+ if (eth_port.port_lanes % count)
+ return -EINVAL;
/* Special case the 100G CXP -> 2x40G split */
lanes = eth_port.port_lanes / count;
if (eth_port.lanes == 10 && count == 2)
lanes = 8 / count;
- ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
-out:
- mutex_unlock(&pf->lock);
-
- return ret;
+ return nfp_devlink_set_lanes(pf, eth_port.index, lanes);
}
static int
-nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
+nfp_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
@@ -104,29 +95,21 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
unsigned int lanes;
int ret;
- mutex_lock(&pf->lock);
-
rtnl_lock();
- ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, &eth_port);
+ ret = nfp_devlink_fill_eth_port_from_id(pf, port, &eth_port);
rtnl_unlock();
if (ret)
- goto out;
+ return ret;
- if (!eth_port.is_split) {
- ret = -EINVAL;
- goto out;
- }
+ if (!eth_port.is_split)
+ return -EINVAL;
/* Special case the 100G CXP -> 2x40G unsplit */
lanes = eth_port.port_lanes;
if (eth_port.port_lanes == 8)
lanes = 10;
- ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
-out:
- mutex_unlock(&pf->lock);
-
- return ret;
+ return nfp_devlink_set_lanes(pf, eth_port.index, lanes);
}
static int
@@ -161,13 +144,8 @@ static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
- int ret;
-
- mutex_lock(&pf->lock);
- ret = nfp_app_eswitch_mode_set(pf->app, mode);
- mutex_unlock(&pf->lock);
- return ret;
+ return nfp_app_eswitch_mode_set(pf->app, mode);
}
static const struct nfp_devlink_versions_simple {
@@ -375,12 +353,12 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
devlink = priv_to_devlink(app->pf);
- return devlink_port_register(devlink, &port->dl_port, port->eth_id);
+ return devl_port_register(devlink, &port->dl_port, port->eth_id);
}
void nfp_devlink_port_unregister(struct nfp_port *port)
{
- devlink_port_unregister(&port->dl_port);
+ devl_port_unregister(&port->dl_port);
}
void nfp_devlink_port_type_eth_set(struct nfp_port *port)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index bb3b8a7f6c5d..eeda39e34f84 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -19,6 +19,7 @@
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_dev.h"
#include "nfpcore/nfp_nffw.h"
#include "nfpcore/nfp_nsp.h"
@@ -32,17 +33,21 @@
static const char nfp_driver_name[] = "nfp";
static const struct pci_device_id nfp_pci_device_ids[] = {
- { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000,
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP3800,
PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
- PCI_ANY_ID, 0,
+ PCI_ANY_ID, 0, NFP_DEV_NFP3800,
+ },
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000,
+ PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
+ PCI_ANY_ID, 0, NFP_DEV_NFP6000,
},
{ PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000,
PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
- PCI_ANY_ID, 0,
+ PCI_ANY_ID, 0, NFP_DEV_NFP6000,
},
- { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000,
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000,
PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
- PCI_ANY_ID, 0,
+ PCI_ANY_ID, 0, NFP_DEV_NFP6000,
},
{ 0, } /* Required last entry. */
};
@@ -222,6 +227,7 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
#ifdef CONFIG_PCI_IOV
struct nfp_pf *pf = pci_get_drvdata(pdev);
+ struct devlink *devlink;
int err;
if (num_vfs > pf->limit_vfs) {
@@ -236,7 +242,8 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
return err;
}
- mutex_lock(&pf->lock);
+ devlink = priv_to_devlink(pf);
+ devl_lock(devlink);
err = nfp_app_sriov_enable(pf->app, num_vfs);
if (err) {
@@ -250,11 +257,11 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
return num_vfs;
err_sriov_disable:
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
pci_disable_sriov(pdev);
return err;
#endif
@@ -265,8 +272,10 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
{
#ifdef CONFIG_PCI_IOV
struct nfp_pf *pf = pci_get_drvdata(pdev);
+ struct devlink *devlink;
- mutex_lock(&pf->lock);
+ devlink = priv_to_devlink(pf);
+ devl_lock(devlink);
/* If the VFs are assigned we cannot shut down SR-IOV without
* causing issues, so just leave the hardware available but
@@ -274,7 +283,7 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
*/
if (pci_vfs_assigned(pdev)) {
dev_warn(&pdev->dev, "Disabling while VFs assigned - VFs will not be deallocated\n");
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
return -EPERM;
}
@@ -282,7 +291,7 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
pf->num_vfs = 0;
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
pci_disable_sriov(pdev);
dev_dbg(&pdev->dev, "Removed VFs.\n");
@@ -667,6 +676,7 @@ static int nfp_pf_find_rtsyms(struct nfp_pf *pf)
static int nfp_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
+ const struct nfp_dev_info *dev_info;
struct devlink *devlink;
struct nfp_pf *pf;
int err;
@@ -675,14 +685,15 @@ static int nfp_pci_probe(struct pci_dev *pdev,
pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000_VF)
dev_warn(&pdev->dev, "Binding NFP VF device to the NFP PF driver, the VF driver is called 'nfp_netvf'\n");
+ dev_info = &nfp_dev_info[pci_id->driver_data];
+
err = pci_enable_device(pdev);
if (err < 0)
return err;
pci_set_master(pdev);
- err = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS));
+ err = dma_set_mask_and_coherent(&pdev->dev, dev_info->dma_mask);
if (err)
goto err_pci_disable;
@@ -700,9 +711,9 @@ static int nfp_pci_probe(struct pci_dev *pdev,
pf = devlink_priv(devlink);
INIT_LIST_HEAD(&pf->vnics);
INIT_LIST_HEAD(&pf->ports);
- mutex_init(&pf->lock);
pci_set_drvdata(pdev, pf);
pf->pdev = pdev;
+ pf->dev_info = dev_info;
pf->wq = alloc_workqueue("nfp-%s", 0, 2, pci_name(pdev));
if (!pf->wq) {
@@ -710,7 +721,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
goto err_pci_priv_unset;
}
- pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev);
+ pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev, dev_info);
if (IS_ERR(pf->cpp)) {
err = PTR_ERR(pf->cpp);
goto err_disable_msix;
@@ -790,7 +801,6 @@ err_disable_msix:
destroy_workqueue(pf->wq);
err_pci_priv_unset:
pci_set_drvdata(pdev, NULL);
- mutex_destroy(&pf->lock);
devlink_free(devlink);
err_rel_regions:
pci_release_regions(pdev);
@@ -827,7 +837,6 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw)
kfree(pf->eth_tbl);
kfree(pf->nspi);
- mutex_destroy(&pf->lock);
devlink_free(priv_to_devlink(pf));
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index a7dede946a33..f56ca11de134 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -13,7 +13,6 @@
#include <linux/list.h>
#include <linux/types.h>
#include <linux/msi.h>
-#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
#include <net/devlink.h>
@@ -48,6 +47,7 @@ struct nfp_dumpspec {
/**
* struct nfp_pf - NFP PF-specific device structure
* @pdev: Backpointer to PCI device
+ * @dev_info: NFP ASIC params
* @cpp: Pointer to the CPP handle
* @app: Pointer to the APP handle
* @data_vnic_bar: Pointer to the CPP area for the data vNICs' BARs
@@ -84,10 +84,12 @@ struct nfp_dumpspec {
* @port_refresh_work: Work entry for taking netdevs out
* @shared_bufs: Array of shared buffer structures if FW has any SBs
* @num_shared_bufs: Number of elements in @shared_bufs
- * @lock: Protects all fields which may change after probe
+ *
+ * Fields which may change after proble are protected by devlink instance lock.
*/
struct nfp_pf {
struct pci_dev *pdev;
+ const struct nfp_dev_info *dev_info;
struct nfp_cpp *cpp;
@@ -139,8 +141,6 @@ struct nfp_pf {
struct nfp_shared_buf *shared_bufs;
unsigned int num_shared_bufs;
-
- struct mutex lock;
};
extern struct pci_driver nfp_netvf_pci_driver;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 0b1865e9f0b5..428783b7018b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -63,9 +63,6 @@
#define NFP_NET_Q0_BAR 2
#define NFP_NET_Q1_BAR 4 /* OBSOLETE */
-/* Max bits in DMA address */
-#define NFP_NET_MAX_DMA_BITS 40
-
/* Default size for MTU and freelist buffer sizes */
#define NFP_NET_DEFAULT_MTU 1500U
@@ -85,11 +82,6 @@
NFP_NET_MAX_TX_RINGS : NFP_NET_MAX_RX_RINGS)
#define NFP_NET_MAX_IRQS (NFP_NET_NON_Q_VECTORS + NFP_NET_MAX_R_VECS)
-#define NFP_NET_MIN_TX_DESCS 256 /* Min. # of Tx descs per ring */
-#define NFP_NET_MIN_RX_DESCS 256 /* Min. # of Rx descs per ring */
-#define NFP_NET_MAX_TX_DESCS (256 * 1024) /* Max. # of Tx descs per ring */
-#define NFP_NET_MAX_RX_DESCS (256 * 1024) /* Max. # of Rx descs per ring */
-
#define NFP_NET_TX_DESCS_DEFAULT 4096 /* Default # of Tx descs per ring */
#define NFP_NET_RX_DESCS_DEFAULT 4096 /* Default # of Rx descs per ring */
@@ -105,10 +97,19 @@
/* Forward declarations */
struct nfp_cpp;
+struct nfp_dev_info;
+struct nfp_dp_ops;
struct nfp_eth_table_port;
struct nfp_net;
struct nfp_net_r_vector;
struct nfp_port;
+struct xsk_buff_pool;
+
+struct nfp_nfd3_tx_desc;
+struct nfp_nfd3_tx_buf;
+
+struct nfp_nfdk_tx_desc;
+struct nfp_nfdk_tx_buf;
/* Convenience macro for wrapping descriptor index on ring size */
#define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1))
@@ -123,86 +124,25 @@ struct nfp_port;
__d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \
} while (0)
-/* TX descriptor format */
-
-#define PCIE_DESC_TX_EOP BIT(7)
-#define PCIE_DESC_TX_OFFSET_MASK GENMASK(6, 0)
-#define PCIE_DESC_TX_MSS_MASK GENMASK(13, 0)
-
-/* Flags in the host TX descriptor */
-#define PCIE_DESC_TX_CSUM BIT(7)
-#define PCIE_DESC_TX_IP4_CSUM BIT(6)
-#define PCIE_DESC_TX_TCP_CSUM BIT(5)
-#define PCIE_DESC_TX_UDP_CSUM BIT(4)
-#define PCIE_DESC_TX_VLAN BIT(3)
-#define PCIE_DESC_TX_LSO BIT(2)
-#define PCIE_DESC_TX_ENCAP BIT(1)
-#define PCIE_DESC_TX_O_IP4_CSUM BIT(0)
-
-struct nfp_net_tx_desc {
- union {
- struct {
- u8 dma_addr_hi; /* High bits of host buf address */
- __le16 dma_len; /* Length to DMA for this desc */
- u8 offset_eop; /* Offset in buf where pkt starts +
- * highest bit is eop flag.
- */
- __le32 dma_addr_lo; /* Low 32bit of host buf addr */
-
- __le16 mss; /* MSS to be used for LSO */
- u8 lso_hdrlen; /* LSO, TCP payload offset */
- u8 flags; /* TX Flags, see @PCIE_DESC_TX_* */
- union {
- struct {
- u8 l3_offset; /* L3 header offset */
- u8 l4_offset; /* L4 header offset */
- };
- __le16 vlan; /* VLAN tag to add if indicated */
- };
- __le16 data_len; /* Length of frame + meta data */
- } __packed;
- __le32 vals[4];
- __le64 vals8[2];
- };
-};
-
-/**
- * struct nfp_net_tx_buf - software TX buffer descriptor
- * @skb: normal ring, sk_buff associated with this buffer
- * @frag: XDP ring, page frag associated with this buffer
- * @dma_addr: DMA mapping address of the buffer
- * @fidx: Fragment index (-1 for the head and [0..nr_frags-1] for frags)
- * @pkt_cnt: Number of packets to be produced out of the skb associated
- * with this buffer (valid only on the head's buffer).
- * Will be 1 for all non-TSO packets.
- * @real_len: Number of bytes which to be produced out of the skb (valid only
- * on the head's buffer). Equal to skb->len for non-TSO packets.
- */
-struct nfp_net_tx_buf {
- union {
- struct sk_buff *skb;
- void *frag;
- };
- dma_addr_t dma_addr;
- short int fidx;
- u16 pkt_cnt;
- u32 real_len;
-};
-
/**
* struct nfp_net_tx_ring - TX ring structure
* @r_vec: Back pointer to ring vector structure
* @idx: Ring index from Linux's perspective
- * @qcidx: Queue Controller Peripheral (QCP) queue index for the TX queue
+ * @data_pending: number of bytes added to current block (NFDK only)
* @qcp_q: Pointer to base of the QCP TX queue
+ * @txrwb: TX pointer write back area
* @cnt: Size of the queue in number of descriptors
* @wr_p: TX ring write pointer (free running)
* @rd_p: TX ring read pointer (free running)
* @qcp_rd_p: Local copy of QCP TX queue read pointer
* @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer
* (used for .xmit_more delayed kick)
- * @txbufs: Array of transmitted TX buffers, to free on transmit
- * @txds: Virtual address of TX ring in host memory
+ * @txbufs: Array of transmitted TX buffers, to free on transmit (NFD3)
+ * @ktxbufs: Array of transmitted TX buffers, to free on transmit (NFDK)
+ * @txds: Virtual address of TX ring in host memory (NFD3)
+ * @ktxds: Virtual address of TX ring in host memory (NFDK)
+ *
+ * @qcidx: Queue Controller Peripheral (QCP) queue index for the TX queue
* @dma: DMA address of the TX ring
* @size: Size, in bytes, of the TX ring (needed to free)
* @is_xdp: Is this a XDP TX ring?
@@ -210,9 +150,10 @@ struct nfp_net_tx_buf {
struct nfp_net_tx_ring {
struct nfp_net_r_vector *r_vec;
- u32 idx;
- int qcidx;
+ u16 idx;
+ u16 data_pending;
u8 __iomem *qcp_q;
+ u64 *txrwb;
u32 cnt;
u32 wr_p;
@@ -221,8 +162,17 @@ struct nfp_net_tx_ring {
u32 wr_ptr_add;
- struct nfp_net_tx_buf *txbufs;
- struct nfp_net_tx_desc *txds;
+ union {
+ struct nfp_nfd3_tx_buf *txbufs;
+ struct nfp_nfdk_tx_buf *ktxbufs;
+ };
+ union {
+ struct nfp_nfd3_tx_desc *txds;
+ struct nfp_nfdk_tx_desc *ktxds;
+ };
+
+ /* Cold data follows */
+ int qcidx;
dma_addr_t dma;
size_t size;
@@ -315,6 +265,16 @@ struct nfp_net_rx_buf {
};
/**
+ * struct nfp_net_xsk_rx_buf - software RX XSK buffer descriptor
+ * @dma_addr: DMA mapping address of the buffer
+ * @xdp: XSK buffer pool handle (for AF_XDP)
+ */
+struct nfp_net_xsk_rx_buf {
+ dma_addr_t dma_addr;
+ struct xdp_buff *xdp;
+};
+
+/**
* struct nfp_net_rx_ring - RX ring structure
* @r_vec: Back pointer to ring vector structure
* @cnt: Size of the queue in number of descriptors
@@ -324,6 +284,7 @@ struct nfp_net_rx_buf {
* @fl_qcidx: Queue Controller Peripheral (QCP) queue index for the freelist
* @qcp_fl: Pointer to base of the QCP freelist queue
* @rxbufs: Array of transmitted FL/RX buffers
+ * @xsk_rxbufs: Array of transmitted FL/RX buffers (for AF_XDP)
* @rxds: Virtual address of FL/RX ring in host memory
* @xdp_rxq: RX-ring info avail for XDP
* @dma: DMA address of the FL/RX ring
@@ -342,6 +303,7 @@ struct nfp_net_rx_ring {
u8 __iomem *qcp_fl;
struct nfp_net_rx_buf *rxbufs;
+ struct nfp_net_xsk_rx_buf *xsk_rxbufs;
struct nfp_net_rx_desc *rxds;
struct xdp_rxq_info xdp_rxq;
@@ -360,6 +322,7 @@ struct nfp_net_rx_ring {
* @tx_ring: Pointer to TX ring
* @rx_ring: Pointer to RX ring
* @xdp_ring: Pointer to an extra TX ring for XDP
+ * @xsk_pool: XSK buffer pool active on vector queue pair (for AF_XDP)
* @irq_entry: MSI-X table entry (use for talking to the device)
* @event_ctr: Number of interrupt
* @rx_dim: Dynamic interrupt moderation structure for RX
@@ -431,6 +394,7 @@ struct nfp_net_r_vector {
u64 rx_replace_buf_alloc_fail;
struct nfp_net_tx_ring *xdp_ring;
+ struct xsk_buff_pool *xsk_pool;
struct u64_stats_sync tx_sync;
u64 tx_pkts;
@@ -460,13 +424,17 @@ struct nfp_net_fw_version {
u8 minor;
u8 major;
u8 class;
- u8 resv;
+
+ /* This byte can be exploited for more use, currently,
+ * BIT0: dp type, BIT[7:1]: reserved
+ */
+ u8 extend;
} __packed;
static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
- u8 resv, u8 class, u8 major, u8 minor)
+ u8 extend, u8 class, u8 major, u8 minor)
{
- return fw_ver->resv == resv &&
+ return fw_ver->extend == extend &&
fw_ver->class == class &&
fw_ver->major == major &&
fw_ver->minor == minor;
@@ -494,13 +462,17 @@ struct nfp_stat_pair {
* @rx_rings: Array of pre-allocated RX ring structures
* @ctrl_bar: Pointer to mapped control BAR
*
- * @txd_cnt: Size of the TX ring in number of descriptors
- * @rxd_cnt: Size of the RX ring in number of descriptors
+ * @ops: Callbacks and parameters for this vNIC's NFD version
+ * @txrwb: TX pointer write back area (indexed by queue id)
+ * @txrwb_dma: TX pointer write back area DMA address
+ * @txd_cnt: Size of the TX ring in number of min size packets
+ * @rxd_cnt: Size of the RX ring in number of min size packets
* @num_r_vecs: Number of used ring vectors
* @num_tx_rings: Currently configured number of TX rings
* @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
* @num_rx_rings: Currently configured number of RX rings
* @mtu: Device MTU
+ * @xsk_pools: XSK buffer pools, @max_r_vecs in size (for AF_XDP).
*/
struct nfp_net_dp {
struct device *dev;
@@ -527,6 +499,11 @@ struct nfp_net_dp {
/* Cold data follows */
+ const struct nfp_dp_ops *ops;
+
+ u64 *txrwb;
+ dma_addr_t txrwb_dma;
+
unsigned int txd_cnt;
unsigned int rxd_cnt;
@@ -537,11 +514,14 @@ struct nfp_net_dp {
unsigned int num_rx_rings;
unsigned int mtu;
+
+ struct xsk_buff_pool **xsk_pools;
};
/**
* struct nfp_net - NFP network device structure
* @dp: Datapath structure
+ * @dev_info: NFP ASIC params
* @id: vNIC id within the PF (0 for VFs)
* @fw_ver: Firmware version
* @cap: Capabilities advertised by the Firmware
@@ -615,6 +595,7 @@ struct nfp_net_dp {
struct nfp_net {
struct nfp_net_dp dp;
+ const struct nfp_dev_info *dev_info;
struct nfp_net_fw_version fw_ver;
u32 id;
@@ -767,7 +748,6 @@ static inline void nn_pci_flush(struct nfp_net *nn)
* either add to a pointer or to read the pointer value.
*/
#define NFP_QCP_QUEUE_ADDR_SZ 0x800
-#define NFP_QCP_QUEUE_AREA_SZ 0x80000
#define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ)
#define NFP_QCP_QUEUE_ADD_RPTR 0x0000
#define NFP_QCP_QUEUE_ADD_WPTR 0x0004
@@ -776,50 +756,21 @@ static inline void nn_pci_flush(struct nfp_net *nn)
#define NFP_QCP_QUEUE_STS_HI 0x000c
#define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask 0x3ffff
-/* The offset of a QCP queues in the PCIe Target */
-#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
-
/* nfp_qcp_ptr - Read or Write Pointer of a queue */
enum nfp_qcp_ptr {
NFP_QCP_READ_PTR = 0,
NFP_QCP_WRITE_PTR
};
-/* There appear to be an *undocumented* upper limit on the value which
- * one can add to a queue and that value is either 0x3f or 0x7f. We
- * go with 0x3f as a conservative measure.
- */
-#define NFP_QCP_MAX_ADD 0x3f
-
-static inline void _nfp_qcp_ptr_add(u8 __iomem *q,
- enum nfp_qcp_ptr ptr, u32 val)
-{
- u32 off;
-
- if (ptr == NFP_QCP_READ_PTR)
- off = NFP_QCP_QUEUE_ADD_RPTR;
- else
- off = NFP_QCP_QUEUE_ADD_WPTR;
-
- while (val > NFP_QCP_MAX_ADD) {
- writel(NFP_QCP_MAX_ADD, q + off);
- val -= NFP_QCP_MAX_ADD;
- }
-
- writel(val, q + off);
-}
-
/**
* nfp_qcp_rd_ptr_add() - Add the value to the read pointer of a queue
*
* @q: Base address for queue structure
* @val: Value to add to the queue pointer
- *
- * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
*/
static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
{
- _nfp_qcp_ptr_add(q, NFP_QCP_READ_PTR, val);
+ writel(val, q + NFP_QCP_QUEUE_ADD_RPTR);
}
/**
@@ -827,12 +778,10 @@ static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
*
* @q: Base address for queue structure
* @val: Value to add to the queue pointer
- *
- * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
*/
static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
{
- _nfp_qcp_ptr_add(q, NFP_QCP_WRITE_PTR, val);
+ writel(val, q + NFP_QCP_QUEUE_ADD_WPTR);
}
static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
@@ -875,6 +824,8 @@ static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
}
+u32 nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info, u16 queue);
+
static inline bool nfp_net_is_data_vnic(struct nfp_net *nn)
{
WARN_ON_ONCE(!nn->dp.netdev && nn->port);
@@ -921,11 +872,13 @@ static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
/* Globals */
extern const char nfp_driver_version[];
-extern const struct net_device_ops nfp_net_netdev_ops;
+extern const struct net_device_ops nfp_nfd3_netdev_ops;
+extern const struct net_device_ops nfp_nfdk_netdev_ops;
static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
{
- return netdev->netdev_ops == &nfp_net_netdev_ops;
+ return netdev->netdev_ops == &nfp_nfd3_netdev_ops ||
+ netdev->netdev_ops == &nfp_nfdk_netdev_ops;
}
static inline int nfp_net_coalesce_para_check(u32 usecs, u32 pkts)
@@ -941,7 +894,8 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
void __iomem *ctrl_bar);
struct nfp_net *
-nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
+nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
+ void __iomem *ctrl_bar, bool needs_netdev,
unsigned int max_tx_rings, unsigned int max_rx_rings);
void nfp_net_free(struct nfp_net *nn);
@@ -972,6 +926,10 @@ void nfp_net_irqs_disable(struct pci_dev *pdev);
void
nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
unsigned int n);
+struct sk_buff *
+nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, u64 *tls_handle, int *nr_frags);
+void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle);
struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 79257ec41987..b412670d89b2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
/*
* nfp_net_common.c
@@ -13,7 +13,6 @@
#include <linux/bitfield.h>
#include <linux/bpf.h>
-#include <linux/bpf_trace.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -38,13 +37,17 @@
#include <net/tls.h>
#include <net/vxlan.h>
+#include <net/xdp_sock_drv.h>
+#include "nfpcore/nfp_dev.h"
#include "nfpcore/nfp_nsp.h"
#include "ccm.h"
#include "nfp_app.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
+#include "nfp_net_dp.h"
#include "nfp_net_sriov.h"
+#include "nfp_net_xsk.h"
#include "nfp_port.h"
#include "crypto/crypto.h"
#include "crypto/fw.h"
@@ -63,33 +66,10 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
put_unaligned_le32(reg, fw_ver);
}
-static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
+u32 nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info, u16 queue)
{
- return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
- dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
- dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
-}
-
-static void
-nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
-{
- dma_sync_single_for_device(dp->dev, dma_addr,
- dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
- dp->rx_dma_dir);
-}
-
-static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
-{
- dma_unmap_single_attrs(dp->dev, dma_addr,
- dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
- dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
-}
-
-static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr,
- unsigned int len)
-{
- dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
- len, dp->rx_dma_dir);
+ queue &= dev_info->qc_idx_mask;
+ return dev_info->qc_addr_offset + NFP_QCP_QUEUE_ADDR_SZ * queue;
}
/* Firmware reconfig
@@ -375,19 +355,6 @@ int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
*/
/**
- * nfp_net_irq_unmask() - Unmask automasked interrupt
- * @nn: NFP Network structure
- * @entry_nr: MSI-X table entry
- *
- * Clear the ICR for the IRQ entry.
- */
-static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
-{
- nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
- nn_pci_flush(nn);
-}
-
-/**
* nfp_net_irqs_alloc() - allocates MSI-X irqs
* @pdev: PCI device structure
* @irq_entries: Array to be initialized and used to hold the irq entries
@@ -569,49 +536,6 @@ static irqreturn_t nfp_net_irq_exn(int irq, void *data)
}
/**
- * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
- * @tx_ring: TX ring structure
- * @r_vec: IRQ vector servicing this ring
- * @idx: Ring index
- * @is_xdp: Is this an XDP TX ring?
- */
-static void
-nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
- struct nfp_net_r_vector *r_vec, unsigned int idx,
- bool is_xdp)
-{
- struct nfp_net *nn = r_vec->nfp_net;
-
- tx_ring->idx = idx;
- tx_ring->r_vec = r_vec;
- tx_ring->is_xdp = is_xdp;
- u64_stats_init(&tx_ring->r_vec->tx_sync);
-
- tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
- tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
-}
-
-/**
- * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
- * @rx_ring: RX ring structure
- * @r_vec: IRQ vector servicing this ring
- * @idx: Ring index
- */
-static void
-nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
- struct nfp_net_r_vector *r_vec, unsigned int idx)
-{
- struct nfp_net *nn = r_vec->nfp_net;
-
- rx_ring->idx = idx;
- rx_ring->r_vec = r_vec;
- u64_stats_init(&rx_ring->r_vec->rx_sync);
-
- rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
- rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
-}
-
-/**
* nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
* @nn: NFP Network structure
* @ctrl_offset: Control BAR offset where IRQ configuration should be written
@@ -658,178 +582,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
free_irq(nn->irq_entries[vector_idx].vector, nn);
}
-/* Transmit
- *
- * One queue controller peripheral queue is used for transmit. The
- * driver en-queues packets for transmit by advancing the write
- * pointer. The device indicates that packets have transmitted by
- * advancing the read pointer. The driver maintains a local copy of
- * the read and write pointer in @struct nfp_net_tx_ring. The driver
- * keeps @wr_p in sync with the queue controller write pointer and can
- * determine how many packets have been transmitted by comparing its
- * copy of the read pointer @rd_p with the read pointer maintained by
- * the queue controller peripheral.
- */
-
-/**
- * nfp_net_tx_full() - Check if the TX ring is full
- * @tx_ring: TX ring to check
- * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
- *
- * This function checks, based on the *host copy* of read/write
- * pointer if a given TX ring is full. The real TX queue may have
- * some newly made available slots.
- *
- * Return: True if the ring is full.
- */
-static int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
-{
- return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
-}
-
-/* Wrappers for deciding when to stop and restart TX queues */
-static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
-{
- return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
-}
-
-static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
-{
- return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
-}
-
-/**
- * nfp_net_tx_ring_stop() - stop tx ring
- * @nd_q: netdev queue
- * @tx_ring: driver tx queue structure
- *
- * Safely stop TX ring. Remember that while we are running .start_xmit()
- * someone else may be cleaning the TX ring completions so we need to be
- * extra careful here.
- */
-static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
- struct nfp_net_tx_ring *tx_ring)
-{
- netif_tx_stop_queue(nd_q);
-
- /* We can race with the TX completion out of NAPI so recheck */
- smp_mb();
- if (unlikely(nfp_net_tx_ring_should_wake(tx_ring)))
- netif_tx_start_queue(nd_q);
-}
-
-/**
- * nfp_net_tx_tso() - Set up Tx descriptor for LSO
- * @r_vec: per-ring structure
- * @txbuf: Pointer to driver soft TX descriptor
- * @txd: Pointer to HW TX descriptor
- * @skb: Pointer to SKB
- * @md_bytes: Prepend length
- *
- * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
- * Return error on packet header greater than maximum supported LSO header size.
- */
-static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
- struct nfp_net_tx_buf *txbuf,
- struct nfp_net_tx_desc *txd, struct sk_buff *skb,
- u32 md_bytes)
-{
- u32 l3_offset, l4_offset, hdrlen;
- u16 mss;
-
- if (!skb_is_gso(skb))
- return;
-
- if (!skb->encapsulation) {
- l3_offset = skb_network_offset(skb);
- l4_offset = skb_transport_offset(skb);
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
- } else {
- l3_offset = skb_inner_network_offset(skb);
- l4_offset = skb_inner_transport_offset(skb);
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
- }
-
- txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
- txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
-
- mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
- txd->l3_offset = l3_offset - md_bytes;
- txd->l4_offset = l4_offset - md_bytes;
- txd->lso_hdrlen = hdrlen - md_bytes;
- txd->mss = cpu_to_le16(mss);
- txd->flags |= PCIE_DESC_TX_LSO;
-
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_lso++;
- u64_stats_update_end(&r_vec->tx_sync);
-}
-
-/**
- * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
- * @dp: NFP Net data path struct
- * @r_vec: per-ring structure
- * @txbuf: Pointer to driver soft TX descriptor
- * @txd: Pointer to TX descriptor
- * @skb: Pointer to SKB
- *
- * This function sets the TX checksum flags in the TX descriptor based
- * on the configuration and the protocol of the packet to be transmitted.
- */
-static void nfp_net_tx_csum(struct nfp_net_dp *dp,
- struct nfp_net_r_vector *r_vec,
- struct nfp_net_tx_buf *txbuf,
- struct nfp_net_tx_desc *txd, struct sk_buff *skb)
-{
- struct ipv6hdr *ipv6h;
- struct iphdr *iph;
- u8 l4_hdr;
-
- if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
- return;
-
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return;
-
- txd->flags |= PCIE_DESC_TX_CSUM;
- if (skb->encapsulation)
- txd->flags |= PCIE_DESC_TX_ENCAP;
-
- iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
- ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
-
- if (iph->version == 4) {
- txd->flags |= PCIE_DESC_TX_IP4_CSUM;
- l4_hdr = iph->protocol;
- } else if (ipv6h->version == 6) {
- l4_hdr = ipv6h->nexthdr;
- } else {
- nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
- return;
- }
-
- switch (l4_hdr) {
- case IPPROTO_TCP:
- txd->flags |= PCIE_DESC_TX_TCP_CSUM;
- break;
- case IPPROTO_UDP:
- txd->flags |= PCIE_DESC_TX_UDP_CSUM;
- break;
- default:
- nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
- return;
- }
-
- u64_stats_update_begin(&r_vec->tx_sync);
- if (skb->encapsulation)
- r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
- else
- r_vec->hw_csum_tx += txbuf->pkt_cnt;
- u64_stats_update_end(&r_vec->tx_sync);
-}
-
-static struct sk_buff *
+struct sk_buff *
nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
{
@@ -901,7 +654,7 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
return skb;
}
-static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
+void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
{
#ifdef CONFIG_TLS_DEVICE
struct nfp_net_tls_offload_ctx *ntls;
@@ -923,411 +676,6 @@ static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
#endif
}
-static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
-{
- wmb();
- nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
- tx_ring->wr_ptr_add = 0;
-}
-
-static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
-{
- struct metadata_dst *md_dst = skb_metadata_dst(skb);
- unsigned char *data;
- u32 meta_id = 0;
- int md_bytes;
-
- if (likely(!md_dst && !tls_handle))
- return 0;
- if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
- if (!tls_handle)
- return 0;
- md_dst = NULL;
- }
-
- md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
-
- if (unlikely(skb_cow_head(skb, md_bytes)))
- return -ENOMEM;
-
- meta_id = 0;
- data = skb_push(skb, md_bytes) + md_bytes;
- if (md_dst) {
- data -= 4;
- put_unaligned_be32(md_dst->u.port_info.port_id, data);
- meta_id = NFP_NET_META_PORTID;
- }
- if (tls_handle) {
- /* conn handle is opaque, we just use u64 to be able to quickly
- * compare it to zero
- */
- data -= 8;
- memcpy(data, &tls_handle, sizeof(tls_handle));
- meta_id <<= NFP_NET_META_FIELD_SIZE;
- meta_id |= NFP_NET_META_CONN_HANDLE;
- }
-
- data -= 4;
- put_unaligned_be32(meta_id, data);
-
- return md_bytes;
-}
-
-/**
- * nfp_net_tx() - Main transmit entry point
- * @skb: SKB to transmit
- * @netdev: netdev structure
- *
- * Return: NETDEV_TX_OK on success.
- */
-static netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
-{
- struct nfp_net *nn = netdev_priv(netdev);
- const skb_frag_t *frag;
- int f, nr_frags, wr_idx, md_bytes;
- struct nfp_net_tx_ring *tx_ring;
- struct nfp_net_r_vector *r_vec;
- struct nfp_net_tx_buf *txbuf;
- struct nfp_net_tx_desc *txd;
- struct netdev_queue *nd_q;
- struct nfp_net_dp *dp;
- dma_addr_t dma_addr;
- unsigned int fsize;
- u64 tls_handle = 0;
- u16 qidx;
-
- dp = &nn->dp;
- qidx = skb_get_queue_mapping(skb);
- tx_ring = &dp->tx_rings[qidx];
- r_vec = tx_ring->r_vec;
-
- nr_frags = skb_shinfo(skb)->nr_frags;
-
- if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
- nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
- qidx, tx_ring->wr_p, tx_ring->rd_p);
- nd_q = netdev_get_tx_queue(dp->netdev, qidx);
- netif_tx_stop_queue(nd_q);
- nfp_net_tx_xmit_more_flush(tx_ring);
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_busy++;
- u64_stats_update_end(&r_vec->tx_sync);
- return NETDEV_TX_BUSY;
- }
-
- skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
- if (unlikely(!skb)) {
- nfp_net_tx_xmit_more_flush(tx_ring);
- return NETDEV_TX_OK;
- }
-
- md_bytes = nfp_net_prep_tx_meta(skb, tls_handle);
- if (unlikely(md_bytes < 0))
- goto err_flush;
-
- /* Start with the head skbuf */
- dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dp->dev, dma_addr))
- goto err_dma_err;
-
- wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
-
- /* Stash the soft descriptor of the head then initialize it */
- txbuf = &tx_ring->txbufs[wr_idx];
- txbuf->skb = skb;
- txbuf->dma_addr = dma_addr;
- txbuf->fidx = -1;
- txbuf->pkt_cnt = 1;
- txbuf->real_len = skb->len;
-
- /* Build TX descriptor */
- txd = &tx_ring->txds[wr_idx];
- txd->offset_eop = (nr_frags ? 0 : PCIE_DESC_TX_EOP) | md_bytes;
- txd->dma_len = cpu_to_le16(skb_headlen(skb));
- nfp_desc_set_dma_addr(txd, dma_addr);
- txd->data_len = cpu_to_le16(skb->len);
-
- txd->flags = 0;
- txd->mss = 0;
- txd->lso_hdrlen = 0;
-
- /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
- nfp_net_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
- nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
- if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
- txd->flags |= PCIE_DESC_TX_VLAN;
- txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
- }
-
- /* Gather DMA */
- if (nr_frags > 0) {
- __le64 second_half;
-
- /* all descs must match except for in addr, length and eop */
- second_half = txd->vals8[1];
-
- for (f = 0; f < nr_frags; f++) {
- frag = &skb_shinfo(skb)->frags[f];
- fsize = skb_frag_size(frag);
-
- dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
- fsize, DMA_TO_DEVICE);
- if (dma_mapping_error(dp->dev, dma_addr))
- goto err_unmap;
-
- wr_idx = D_IDX(tx_ring, wr_idx + 1);
- tx_ring->txbufs[wr_idx].skb = skb;
- tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
- tx_ring->txbufs[wr_idx].fidx = f;
-
- txd = &tx_ring->txds[wr_idx];
- txd->dma_len = cpu_to_le16(fsize);
- nfp_desc_set_dma_addr(txd, dma_addr);
- txd->offset_eop = md_bytes |
- ((f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0);
- txd->vals8[1] = second_half;
- }
-
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_gather++;
- u64_stats_update_end(&r_vec->tx_sync);
- }
-
- skb_tx_timestamp(skb);
-
- nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
-
- tx_ring->wr_p += nr_frags + 1;
- if (nfp_net_tx_ring_should_stop(tx_ring))
- nfp_net_tx_ring_stop(nd_q, tx_ring);
-
- tx_ring->wr_ptr_add += nr_frags + 1;
- if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
- nfp_net_tx_xmit_more_flush(tx_ring);
-
- return NETDEV_TX_OK;
-
-err_unmap:
- while (--f >= 0) {
- frag = &skb_shinfo(skb)->frags[f];
- dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
- skb_frag_size(frag), DMA_TO_DEVICE);
- tx_ring->txbufs[wr_idx].skb = NULL;
- tx_ring->txbufs[wr_idx].dma_addr = 0;
- tx_ring->txbufs[wr_idx].fidx = -2;
- wr_idx = wr_idx - 1;
- if (wr_idx < 0)
- wr_idx += tx_ring->cnt;
- }
- dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
- skb_headlen(skb), DMA_TO_DEVICE);
- tx_ring->txbufs[wr_idx].skb = NULL;
- tx_ring->txbufs[wr_idx].dma_addr = 0;
- tx_ring->txbufs[wr_idx].fidx = -2;
-err_dma_err:
- nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
-err_flush:
- nfp_net_tx_xmit_more_flush(tx_ring);
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_errors++;
- u64_stats_update_end(&r_vec->tx_sync);
- nfp_net_tls_tx_undo(skb, tls_handle);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
-}
-
-/**
- * nfp_net_tx_complete() - Handled completed TX packets
- * @tx_ring: TX ring structure
- * @budget: NAPI budget (only used as bool to determine if in NAPI context)
- */
-static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
-{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
- struct netdev_queue *nd_q;
- u32 done_pkts = 0, done_bytes = 0;
- u32 qcp_rd_p;
- int todo;
-
- if (tx_ring->wr_p == tx_ring->rd_p)
- return;
-
- /* Work out how many descriptors have been transmitted */
- qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
-
- if (qcp_rd_p == tx_ring->qcp_rd_p)
- return;
-
- todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
-
- while (todo--) {
- const skb_frag_t *frag;
- struct nfp_net_tx_buf *tx_buf;
- struct sk_buff *skb;
- int fidx, nr_frags;
- int idx;
-
- idx = D_IDX(tx_ring, tx_ring->rd_p++);
- tx_buf = &tx_ring->txbufs[idx];
-
- skb = tx_buf->skb;
- if (!skb)
- continue;
-
- nr_frags = skb_shinfo(skb)->nr_frags;
- fidx = tx_buf->fidx;
-
- if (fidx == -1) {
- /* unmap head */
- dma_unmap_single(dp->dev, tx_buf->dma_addr,
- skb_headlen(skb), DMA_TO_DEVICE);
-
- done_pkts += tx_buf->pkt_cnt;
- done_bytes += tx_buf->real_len;
- } else {
- /* unmap fragment */
- frag = &skb_shinfo(skb)->frags[fidx];
- dma_unmap_page(dp->dev, tx_buf->dma_addr,
- skb_frag_size(frag), DMA_TO_DEVICE);
- }
-
- /* check for last gather fragment */
- if (fidx == nr_frags - 1)
- napi_consume_skb(skb, budget);
-
- tx_buf->dma_addr = 0;
- tx_buf->skb = NULL;
- tx_buf->fidx = -2;
- }
-
- tx_ring->qcp_rd_p = qcp_rd_p;
-
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_bytes += done_bytes;
- r_vec->tx_pkts += done_pkts;
- u64_stats_update_end(&r_vec->tx_sync);
-
- if (!dp->netdev)
- return;
-
- nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
- netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
- if (nfp_net_tx_ring_should_wake(tx_ring)) {
- /* Make sure TX thread will see updated tx_ring->rd_p */
- smp_mb();
-
- if (unlikely(netif_tx_queue_stopped(nd_q)))
- netif_tx_wake_queue(nd_q);
- }
-
- WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
- "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
- tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
-}
-
-static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
-{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- u32 done_pkts = 0, done_bytes = 0;
- bool done_all;
- int idx, todo;
- u32 qcp_rd_p;
-
- /* Work out how many descriptors have been transmitted */
- qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
-
- if (qcp_rd_p == tx_ring->qcp_rd_p)
- return true;
-
- todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
-
- done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
- todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
-
- tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
-
- done_pkts = todo;
- while (todo--) {
- idx = D_IDX(tx_ring, tx_ring->rd_p);
- tx_ring->rd_p++;
-
- done_bytes += tx_ring->txbufs[idx].real_len;
- }
-
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_bytes += done_bytes;
- r_vec->tx_pkts += done_pkts;
- u64_stats_update_end(&r_vec->tx_sync);
-
- WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
- "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
- tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
-
- return done_all;
-}
-
-/**
- * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
- * @dp: NFP Net data path struct
- * @tx_ring: TX ring structure
- *
- * Assumes that the device is stopped, must be idempotent.
- */
-static void
-nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
-{
- const skb_frag_t *frag;
- struct netdev_queue *nd_q;
-
- while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
- struct nfp_net_tx_buf *tx_buf;
- struct sk_buff *skb;
- int idx, nr_frags;
-
- idx = D_IDX(tx_ring, tx_ring->rd_p);
- tx_buf = &tx_ring->txbufs[idx];
-
- skb = tx_ring->txbufs[idx].skb;
- nr_frags = skb_shinfo(skb)->nr_frags;
-
- if (tx_buf->fidx == -1) {
- /* unmap head */
- dma_unmap_single(dp->dev, tx_buf->dma_addr,
- skb_headlen(skb), DMA_TO_DEVICE);
- } else {
- /* unmap fragment */
- frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
- dma_unmap_page(dp->dev, tx_buf->dma_addr,
- skb_frag_size(frag), DMA_TO_DEVICE);
- }
-
- /* check for last gather fragment */
- if (tx_buf->fidx == nr_frags - 1)
- dev_kfree_skb_any(skb);
-
- tx_buf->dma_addr = 0;
- tx_buf->skb = NULL;
- tx_buf->fidx = -2;
-
- tx_ring->qcp_rd_p++;
- tx_ring->rd_p++;
- }
-
- memset(tx_ring->txds, 0, tx_ring->size);
- tx_ring->wr_p = 0;
- tx_ring->rd_p = 0;
- tx_ring->qcp_rd_p = 0;
- tx_ring->wr_ptr_add = 0;
-
- if (tx_ring->is_xdp || !dp->netdev)
- return;
-
- nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
- netdev_tx_reset_queue(nd_q);
-}
-
static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct nfp_net *nn = netdev_priv(netdev);
@@ -1335,1008 +683,43 @@ static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue);
}
-/* Receive processing
- */
+/* Receive processing */
static unsigned int
-nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
+nfp_net_calc_fl_bufsz_data(struct nfp_net_dp *dp)
{
- unsigned int fl_bufsz;
+ unsigned int fl_bufsz = 0;
- fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
- fl_bufsz += dp->rx_dma_off;
if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
fl_bufsz += NFP_NET_MAX_PREPEND;
else
fl_bufsz += dp->rx_offset;
fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
- fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
- fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
return fl_bufsz;
}
-static void
-nfp_net_free_frag(void *frag, bool xdp)
-{
- if (!xdp)
- skb_free_frag(frag);
- else
- __free_page(virt_to_page(frag));
-}
-
-/**
- * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
- * @dp: NFP Net data path struct
- * @dma_addr: Pointer to storage for DMA address (output param)
- *
- * This function will allcate a new page frag, map it for DMA.
- *
- * Return: allocated page frag or NULL on failure.
- */
-static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
-{
- void *frag;
-
- if (!dp->xdp_prog) {
- frag = netdev_alloc_frag(dp->fl_bufsz);
- } else {
- struct page *page;
-
- page = alloc_page(GFP_KERNEL);
- frag = page ? page_address(page) : NULL;
- }
- if (!frag) {
- nn_dp_warn(dp, "Failed to alloc receive page frag\n");
- return NULL;
- }
-
- *dma_addr = nfp_net_dma_map_rx(dp, frag);
- if (dma_mapping_error(dp->dev, *dma_addr)) {
- nfp_net_free_frag(frag, dp->xdp_prog);
- nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
- return NULL;
- }
-
- return frag;
-}
-
-static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
-{
- void *frag;
-
- if (!dp->xdp_prog) {
- frag = napi_alloc_frag(dp->fl_bufsz);
- if (unlikely(!frag))
- return NULL;
- } else {
- struct page *page;
-
- page = dev_alloc_page();
- if (unlikely(!page))
- return NULL;
- frag = page_address(page);
- }
-
- *dma_addr = nfp_net_dma_map_rx(dp, frag);
- if (dma_mapping_error(dp->dev, *dma_addr)) {
- nfp_net_free_frag(frag, dp->xdp_prog);
- nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
- return NULL;
- }
-
- return frag;
-}
-
-/**
- * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
- * @dp: NFP Net data path struct
- * @rx_ring: RX ring structure
- * @frag: page fragment buffer
- * @dma_addr: DMA address of skb mapping
- */
-static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
- struct nfp_net_rx_ring *rx_ring,
- void *frag, dma_addr_t dma_addr)
-{
- unsigned int wr_idx;
-
- wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
-
- nfp_net_dma_sync_dev_rx(dp, dma_addr);
-
- /* Stash SKB and DMA address away */
- rx_ring->rxbufs[wr_idx].frag = frag;
- rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
-
- /* Fill freelist descriptor */
- rx_ring->rxds[wr_idx].fld.reserved = 0;
- rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
- nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
- dma_addr + dp->rx_dma_off);
-
- rx_ring->wr_p++;
- if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
- /* Update write pointer of the freelist queue. Make
- * sure all writes are flushed before telling the hardware.
- */
- wmb();
- nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
- }
-}
-
-/**
- * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
- * @rx_ring: RX ring structure
- *
- * Assumes that the device is stopped, must be idempotent.
- */
-static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
-{
- unsigned int wr_idx, last_idx;
-
- /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always
- * kept at cnt - 1 FL bufs.
- */
- if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
- return;
-
- /* Move the empty entry to the end of the list */
- wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
- last_idx = rx_ring->cnt - 1;
- rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
- rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag;
- rx_ring->rxbufs[last_idx].dma_addr = 0;
- rx_ring->rxbufs[last_idx].frag = NULL;
-
- memset(rx_ring->rxds, 0, rx_ring->size);
- rx_ring->wr_p = 0;
- rx_ring->rd_p = 0;
-}
-
-/**
- * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
- * @dp: NFP Net data path struct
- * @rx_ring: RX ring to remove buffers from
- *
- * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
- * entries. After device is disabled nfp_net_rx_ring_reset() must be called
- * to restore required ring geometry.
- */
-static void
-nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
- struct nfp_net_rx_ring *rx_ring)
-{
- unsigned int i;
-
- for (i = 0; i < rx_ring->cnt - 1; i++) {
- /* NULL skb can only happen when initial filling of the ring
- * fails to allocate enough buffers and calls here to free
- * already allocated ones.
- */
- if (!rx_ring->rxbufs[i].frag)
- continue;
-
- nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
- nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
- rx_ring->rxbufs[i].dma_addr = 0;
- rx_ring->rxbufs[i].frag = NULL;
- }
-}
-
-/**
- * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
- * @dp: NFP Net data path struct
- * @rx_ring: RX ring to remove buffers from
- */
-static int
-nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
- struct nfp_net_rx_ring *rx_ring)
-{
- struct nfp_net_rx_buf *rxbufs;
- unsigned int i;
-
- rxbufs = rx_ring->rxbufs;
-
- for (i = 0; i < rx_ring->cnt - 1; i++) {
- rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
- if (!rxbufs[i].frag) {
- nfp_net_rx_ring_bufs_free(dp, rx_ring);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-/**
- * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
- * @dp: NFP Net data path struct
- * @rx_ring: RX ring to fill
- */
-static void
-nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
- struct nfp_net_rx_ring *rx_ring)
-{
- unsigned int i;
-
- for (i = 0; i < rx_ring->cnt - 1; i++)
- nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
- rx_ring->rxbufs[i].dma_addr);
-}
-
-/**
- * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
- * @flags: RX descriptor flags field in CPU byte order
- */
-static int nfp_net_rx_csum_has_errors(u16 flags)
-{
- u16 csum_all_checked, csum_all_ok;
-
- csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
- csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
-
- return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
-}
-
-/**
- * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
- * @dp: NFP Net data path struct
- * @r_vec: per-ring structure
- * @rxd: Pointer to RX descriptor
- * @meta: Parsed metadata prepend
- * @skb: Pointer to SKB
- */
-static void nfp_net_rx_csum(struct nfp_net_dp *dp,
- struct nfp_net_r_vector *r_vec,
- struct nfp_net_rx_desc *rxd,
- struct nfp_meta_parsed *meta, struct sk_buff *skb)
-{
- skb_checksum_none_assert(skb);
-
- if (!(dp->netdev->features & NETIF_F_RXCSUM))
- return;
-
- if (meta->csum_type) {
- skb->ip_summed = meta->csum_type;
- skb->csum = meta->csum;
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->hw_csum_rx_complete++;
- u64_stats_update_end(&r_vec->rx_sync);
- return;
- }
-
- if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->hw_csum_rx_error++;
- u64_stats_update_end(&r_vec->rx_sync);
- return;
- }
-
- /* Assume that the firmware will never report inner CSUM_OK unless outer
- * L4 headers were successfully parsed. FW will always report zero UDP
- * checksum as CSUM_OK.
- */
- if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
- rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
- __skb_incr_checksum_unnecessary(skb);
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->hw_csum_rx_ok++;
- u64_stats_update_end(&r_vec->rx_sync);
- }
-
- if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
- rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
- __skb_incr_checksum_unnecessary(skb);
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->hw_csum_rx_inner_ok++;
- u64_stats_update_end(&r_vec->rx_sync);
- }
-}
-
-static void
-nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
- unsigned int type, __be32 *hash)
-{
- if (!(netdev->features & NETIF_F_RXHASH))
- return;
-
- switch (type) {
- case NFP_NET_RSS_IPV4:
- case NFP_NET_RSS_IPV6:
- case NFP_NET_RSS_IPV6_EX:
- meta->hash_type = PKT_HASH_TYPE_L3;
- break;
- default:
- meta->hash_type = PKT_HASH_TYPE_L4;
- break;
- }
-
- meta->hash = get_unaligned_be32(hash);
-}
-
-static void
-nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
- void *data, struct nfp_net_rx_desc *rxd)
-{
- struct nfp_net_rx_hash *rx_hash = data;
-
- if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
- return;
-
- nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
- &rx_hash->hash);
-}
-
-static bool
-nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
- void *data, void *pkt, unsigned int pkt_len, int meta_len)
+static unsigned int nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
{
- u32 meta_info;
-
- meta_info = get_unaligned_be32(data);
- data += 4;
-
- while (meta_info) {
- switch (meta_info & NFP_NET_META_FIELD_MASK) {
- case NFP_NET_META_HASH:
- meta_info >>= NFP_NET_META_FIELD_SIZE;
- nfp_net_set_hash(netdev, meta,
- meta_info & NFP_NET_META_FIELD_MASK,
- (__be32 *)data);
- data += 4;
- break;
- case NFP_NET_META_MARK:
- meta->mark = get_unaligned_be32(data);
- data += 4;
- break;
- case NFP_NET_META_PORTID:
- meta->portid = get_unaligned_be32(data);
- data += 4;
- break;
- case NFP_NET_META_CSUM:
- meta->csum_type = CHECKSUM_COMPLETE;
- meta->csum =
- (__force __wsum)__get_unaligned_cpu32(data);
- data += 4;
- break;
- case NFP_NET_META_RESYNC_INFO:
- if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
- pkt_len))
- return false;
- data += sizeof(struct nfp_net_tls_resync_req);
- break;
- default:
- return true;
- }
-
- meta_info >>= NFP_NET_META_FIELD_SIZE;
- }
-
- return data != pkt;
-}
-
-static void
-nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
- struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
- struct sk_buff *skb)
-{
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->rx_drops++;
- /* If we have both skb and rxbuf the replacement buffer allocation
- * must have failed, count this as an alloc failure.
- */
- if (skb && rxbuf)
- r_vec->rx_replace_buf_alloc_fail++;
- u64_stats_update_end(&r_vec->rx_sync);
-
- /* skb is build based on the frag, free_skb() would free the frag
- * so to be able to reuse it we need an extra ref.
- */
- if (skb && rxbuf && skb->head == rxbuf->frag)
- page_ref_inc(virt_to_head_page(rxbuf->frag));
- if (rxbuf)
- nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
- if (skb)
- dev_kfree_skb_any(skb);
-}
-
-static bool
-nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
- struct nfp_net_tx_ring *tx_ring,
- struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
- unsigned int pkt_len, bool *completed)
-{
- unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
- struct nfp_net_tx_buf *txbuf;
- struct nfp_net_tx_desc *txd;
- int wr_idx;
-
- /* Reject if xdp_adjust_tail grow packet beyond DMA area */
- if (pkt_len + dma_off > dma_map_sz)
- return false;
-
- if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
- if (!*completed) {
- nfp_net_xdp_complete(tx_ring);
- *completed = true;
- }
-
- if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
- nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
- NULL);
- return false;
- }
- }
-
- wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
-
- /* Stash the soft descriptor of the head then initialize it */
- txbuf = &tx_ring->txbufs[wr_idx];
-
- nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
-
- txbuf->frag = rxbuf->frag;
- txbuf->dma_addr = rxbuf->dma_addr;
- txbuf->fidx = -1;
- txbuf->pkt_cnt = 1;
- txbuf->real_len = pkt_len;
-
- dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
- pkt_len, DMA_BIDIRECTIONAL);
-
- /* Build TX descriptor */
- txd = &tx_ring->txds[wr_idx];
- txd->offset_eop = PCIE_DESC_TX_EOP;
- txd->dma_len = cpu_to_le16(pkt_len);
- nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
- txd->data_len = cpu_to_le16(pkt_len);
-
- txd->flags = 0;
- txd->mss = 0;
- txd->lso_hdrlen = 0;
-
- tx_ring->wr_p++;
- tx_ring->wr_ptr_add++;
- return true;
-}
-
-/**
- * nfp_net_rx() - receive up to @budget packets on @rx_ring
- * @rx_ring: RX ring to receive from
- * @budget: NAPI budget
- *
- * Note, this function is separated out from the napi poll function to
- * more cleanly separate packet receive code from other bookkeeping
- * functions performed in the napi poll function.
- *
- * Return: Number of packets received.
- */
-static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
-{
- struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
- struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
- struct nfp_net_tx_ring *tx_ring;
- struct bpf_prog *xdp_prog;
- bool xdp_tx_cmpl = false;
- unsigned int true_bufsz;
- struct sk_buff *skb;
- int pkts_polled = 0;
- struct xdp_buff xdp;
- int idx;
-
- xdp_prog = READ_ONCE(dp->xdp_prog);
- true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
- xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
- &rx_ring->xdp_rxq);
- tx_ring = r_vec->xdp_ring;
-
- while (pkts_polled < budget) {
- unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
- struct nfp_net_rx_buf *rxbuf;
- struct nfp_net_rx_desc *rxd;
- struct nfp_meta_parsed meta;
- bool redir_egress = false;
- struct net_device *netdev;
- dma_addr_t new_dma_addr;
- u32 meta_len_xdp = 0;
- void *new_frag;
-
- idx = D_IDX(rx_ring, rx_ring->rd_p);
-
- rxd = &rx_ring->rxds[idx];
- if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
- break;
-
- /* Memory barrier to ensure that we won't do other reads
- * before the DD bit.
- */
- dma_rmb();
-
- memset(&meta, 0, sizeof(meta));
-
- rx_ring->rd_p++;
- pkts_polled++;
-
- rxbuf = &rx_ring->rxbufs[idx];
- /* < meta_len >
- * <-- [rx_offset] -->
- * ---------------------------------------------------------
- * | [XX] | metadata | packet | XXXX |
- * ---------------------------------------------------------
- * <---------------- data_len --------------->
- *
- * The rx_offset is fixed for all packets, the meta_len can vary
- * on a packet by packet basis. If rx_offset is set to zero
- * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
- * buffer and is immediately followed by the packet (no [XX]).
- */
- meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
- data_len = le16_to_cpu(rxd->rxd.data_len);
- pkt_len = data_len - meta_len;
-
- pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
- if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
- pkt_off += meta_len;
- else
- pkt_off += dp->rx_offset;
- meta_off = pkt_off - meta_len;
-
- /* Stats update */
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->rx_pkts++;
- r_vec->rx_bytes += pkt_len;
- u64_stats_update_end(&r_vec->rx_sync);
-
- if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
- (dp->rx_offset && meta_len > dp->rx_offset))) {
- nn_dp_warn(dp, "oversized RX packet metadata %u\n",
- meta_len);
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
- continue;
- }
-
- nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
- data_len);
-
- if (!dp->chained_metadata_format) {
- nfp_net_set_hash_desc(dp->netdev, &meta,
- rxbuf->frag + meta_off, rxd);
- } else if (meta_len) {
- if (unlikely(nfp_net_parse_meta(dp->netdev, &meta,
- rxbuf->frag + meta_off,
- rxbuf->frag + pkt_off,
- pkt_len, meta_len))) {
- nn_dp_warn(dp, "invalid RX packet metadata\n");
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
- NULL);
- continue;
- }
- }
-
- if (xdp_prog && !meta.portid) {
- void *orig_data = rxbuf->frag + pkt_off;
- unsigned int dma_off;
- int act;
-
- xdp_prepare_buff(&xdp,
- rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
- pkt_off - NFP_NET_RX_BUF_HEADROOM,
- pkt_len, true);
-
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
-
- pkt_len = xdp.data_end - xdp.data;
- pkt_off += xdp.data - orig_data;
-
- switch (act) {
- case XDP_PASS:
- meta_len_xdp = xdp.data - xdp.data_meta;
- break;
- case XDP_TX:
- dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
- if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
- tx_ring, rxbuf,
- dma_off,
- pkt_len,
- &xdp_tx_cmpl)))
- trace_xdp_exception(dp->netdev,
- xdp_prog, act);
- continue;
- default:
- bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
- fallthrough;
- case XDP_ABORTED:
- trace_xdp_exception(dp->netdev, xdp_prog, act);
- fallthrough;
- case XDP_DROP:
- nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
- rxbuf->dma_addr);
- continue;
- }
- }
-
- if (likely(!meta.portid)) {
- netdev = dp->netdev;
- } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
- struct nfp_net *nn = netdev_priv(dp->netdev);
-
- nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
- pkt_len);
- nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
- rxbuf->dma_addr);
- continue;
- } else {
- struct nfp_net *nn;
-
- nn = netdev_priv(dp->netdev);
- netdev = nfp_app_dev_get(nn->app, meta.portid,
- &redir_egress);
- if (unlikely(!netdev)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
- NULL);
- continue;
- }
-
- if (nfp_netdev_is_nfp_repr(netdev))
- nfp_repr_inc_rx_stats(netdev, pkt_len);
- }
-
- skb = build_skb(rxbuf->frag, true_bufsz);
- if (unlikely(!skb)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
- continue;
- }
- new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
- if (unlikely(!new_frag)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
- continue;
- }
-
- nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
-
- nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
-
- skb_reserve(skb, pkt_off);
- skb_put(skb, pkt_len);
-
- skb->mark = meta.mark;
- skb_set_hash(skb, meta.hash, meta.hash_type);
-
- skb_record_rx_queue(skb, rx_ring->idx);
- skb->protocol = eth_type_trans(skb, netdev);
-
- nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
-
-#ifdef CONFIG_TLS_DEVICE
- if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
- skb->decrypted = true;
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->hw_tls_rx++;
- u64_stats_update_end(&r_vec->rx_sync);
- }
-#endif
-
- if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(rxd->rxd.vlan));
- if (meta_len_xdp)
- skb_metadata_set(skb, meta_len_xdp);
-
- if (likely(!redir_egress)) {
- napi_gro_receive(&rx_ring->r_vec->napi, skb);
- } else {
- skb->dev = netdev;
- skb_reset_network_header(skb);
- __skb_push(skb, ETH_HLEN);
- dev_queue_xmit(skb);
- }
- }
-
- if (xdp_prog) {
- if (tx_ring->wr_ptr_add)
- nfp_net_tx_xmit_more_flush(tx_ring);
- else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
- !xdp_tx_cmpl)
- if (!nfp_net_xdp_complete(tx_ring))
- pkts_polled = budget;
- }
-
- return pkts_polled;
-}
-
-/**
- * nfp_net_poll() - napi poll function
- * @napi: NAPI structure
- * @budget: NAPI budget
- *
- * Return: number of packets polled.
- */
-static int nfp_net_poll(struct napi_struct *napi, int budget)
-{
- struct nfp_net_r_vector *r_vec =
- container_of(napi, struct nfp_net_r_vector, napi);
- unsigned int pkts_polled = 0;
-
- if (r_vec->tx_ring)
- nfp_net_tx_complete(r_vec->tx_ring, budget);
- if (r_vec->rx_ring)
- pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
-
- if (pkts_polled < budget)
- if (napi_complete_done(napi, pkts_polled))
- nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
-
- if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
- struct dim_sample dim_sample = {};
- unsigned int start;
- u64 pkts, bytes;
-
- do {
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
- pkts = r_vec->rx_pkts;
- bytes = r_vec->rx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
-
- dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
- net_dim(&r_vec->rx_dim, dim_sample);
- }
-
- if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
- struct dim_sample dim_sample = {};
- unsigned int start;
- u64 pkts, bytes;
-
- do {
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
- pkts = r_vec->tx_pkts;
- bytes = r_vec->tx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
-
- dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
- net_dim(&r_vec->tx_dim, dim_sample);
- }
-
- return pkts_polled;
-}
-
-/* Control device data path
- */
-
-static bool
-nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
- struct sk_buff *skb, bool old)
-{
- unsigned int real_len = skb->len, meta_len = 0;
- struct nfp_net_tx_ring *tx_ring;
- struct nfp_net_tx_buf *txbuf;
- struct nfp_net_tx_desc *txd;
- struct nfp_net_dp *dp;
- dma_addr_t dma_addr;
- int wr_idx;
-
- dp = &r_vec->nfp_net->dp;
- tx_ring = r_vec->tx_ring;
-
- if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
- nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
- goto err_free;
- }
-
- if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_busy++;
- u64_stats_update_end(&r_vec->tx_sync);
- if (!old)
- __skb_queue_tail(&r_vec->queue, skb);
- else
- __skb_queue_head(&r_vec->queue, skb);
- return true;
- }
-
- if (nfp_app_ctrl_has_meta(nn->app)) {
- if (unlikely(skb_headroom(skb) < 8)) {
- nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
- goto err_free;
- }
- meta_len = 8;
- put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
- put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
- }
-
- /* Start with the head skbuf */
- dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dp->dev, dma_addr))
- goto err_dma_warn;
-
- wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
-
- /* Stash the soft descriptor of the head then initialize it */
- txbuf = &tx_ring->txbufs[wr_idx];
- txbuf->skb = skb;
- txbuf->dma_addr = dma_addr;
- txbuf->fidx = -1;
- txbuf->pkt_cnt = 1;
- txbuf->real_len = real_len;
-
- /* Build TX descriptor */
- txd = &tx_ring->txds[wr_idx];
- txd->offset_eop = meta_len | PCIE_DESC_TX_EOP;
- txd->dma_len = cpu_to_le16(skb_headlen(skb));
- nfp_desc_set_dma_addr(txd, dma_addr);
- txd->data_len = cpu_to_le16(skb->len);
-
- txd->flags = 0;
- txd->mss = 0;
- txd->lso_hdrlen = 0;
-
- tx_ring->wr_p++;
- tx_ring->wr_ptr_add++;
- nfp_net_tx_xmit_more_flush(tx_ring);
-
- return false;
-
-err_dma_warn:
- nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
-err_free:
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_errors++;
- u64_stats_update_end(&r_vec->tx_sync);
- dev_kfree_skb_any(skb);
- return false;
-}
-
-bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
-{
- struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
-
- return nfp_ctrl_tx_one(nn, r_vec, skb, false);
-}
-
-bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
-{
- struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
- bool ret;
-
- spin_lock_bh(&r_vec->lock);
- ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
- spin_unlock_bh(&r_vec->lock);
-
- return ret;
-}
-
-static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
-{
- struct sk_buff *skb;
-
- while ((skb = __skb_dequeue(&r_vec->queue)))
- if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
- return;
-}
-
-static bool
-nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
-{
- u32 meta_type, meta_tag;
-
- if (!nfp_app_ctrl_has_meta(nn->app))
- return !meta_len;
-
- if (meta_len != 8)
- return false;
-
- meta_type = get_unaligned_be32(data);
- meta_tag = get_unaligned_be32(data + 4);
-
- return (meta_type == NFP_NET_META_PORTID &&
- meta_tag == NFP_META_PORT_ID_CTRL);
-}
-
-static bool
-nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
- struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
-{
- unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
- struct nfp_net_rx_buf *rxbuf;
- struct nfp_net_rx_desc *rxd;
- dma_addr_t new_dma_addr;
- struct sk_buff *skb;
- void *new_frag;
- int idx;
-
- idx = D_IDX(rx_ring, rx_ring->rd_p);
-
- rxd = &rx_ring->rxds[idx];
- if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
- return false;
-
- /* Memory barrier to ensure that we won't do other reads
- * before the DD bit.
- */
- dma_rmb();
-
- rx_ring->rd_p++;
-
- rxbuf = &rx_ring->rxbufs[idx];
- meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
- data_len = le16_to_cpu(rxd->rxd.data_len);
- pkt_len = data_len - meta_len;
-
- pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
- if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
- pkt_off += meta_len;
- else
- pkt_off += dp->rx_offset;
- meta_off = pkt_off - meta_len;
-
- /* Stats update */
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->rx_pkts++;
- r_vec->rx_bytes += pkt_len;
- u64_stats_update_end(&r_vec->rx_sync);
-
- nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
-
- if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
- nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
- meta_len);
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
- return true;
- }
-
- skb = build_skb(rxbuf->frag, dp->fl_bufsz);
- if (unlikely(!skb)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
- return true;
- }
- new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
- if (unlikely(!new_frag)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
- return true;
- }
-
- nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
-
- nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
-
- skb_reserve(skb, pkt_off);
- skb_put(skb, pkt_len);
-
- nfp_app_ctrl_rx(nn->app, skb);
-
- return true;
-}
+ unsigned int fl_bufsz;
-static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
-{
- struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
- struct nfp_net *nn = r_vec->nfp_net;
- struct nfp_net_dp *dp = &nn->dp;
- unsigned int budget = 512;
+ fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
+ fl_bufsz += dp->rx_dma_off;
+ fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
- while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
- continue;
+ fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
+ fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- return budget;
+ return fl_bufsz;
}
-static void nfp_ctrl_poll(struct tasklet_struct *t)
+static unsigned int nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp *dp)
{
- struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
+ unsigned int fl_bufsz;
- spin_lock(&r_vec->lock);
- nfp_net_tx_complete(r_vec->tx_ring, 0);
- __nfp_ctrl_tx_queued(r_vec);
- spin_unlock(&r_vec->lock);
+ fl_bufsz = XDP_PACKET_HEADROOM;
+ fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
- if (nfp_ctrl_rx(r_vec)) {
- nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
- } else {
- tasklet_schedule(&r_vec->tasklet);
- nn_dp_warn(&r_vec->nfp_net->dp,
- "control message budget exceeded!\n");
- }
+ return fl_bufsz;
}
/* Setup and Configuration
@@ -2371,7 +754,7 @@ static void nfp_net_vecs_init(struct nfp_net *nn)
__skb_queue_head_init(&r_vec->queue);
spin_lock_init(&r_vec->lock);
- tasklet_setup(&r_vec->tasklet, nfp_ctrl_poll);
+ tasklet_setup(&r_vec->tasklet, nn->dp.ops->ctrl_poll);
tasklet_disable(&r_vec->tasklet);
}
@@ -2379,263 +762,25 @@ static void nfp_net_vecs_init(struct nfp_net *nn)
}
}
-/**
- * nfp_net_tx_ring_free() - Free resources allocated to a TX ring
- * @tx_ring: TX ring to free
- */
-static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
-{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
-
- kvfree(tx_ring->txbufs);
-
- if (tx_ring->txds)
- dma_free_coherent(dp->dev, tx_ring->size,
- tx_ring->txds, tx_ring->dma);
-
- tx_ring->cnt = 0;
- tx_ring->txbufs = NULL;
- tx_ring->txds = NULL;
- tx_ring->dma = 0;
- tx_ring->size = 0;
-}
-
-/**
- * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
- * @dp: NFP Net data path struct
- * @tx_ring: TX Ring structure to allocate
- *
- * Return: 0 on success, negative errno otherwise.
- */
-static int
-nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
-{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
-
- tx_ring->cnt = dp->txd_cnt;
-
- tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
- tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL | __GFP_NOWARN);
- if (!tx_ring->txds) {
- netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
- tx_ring->cnt);
- goto err_alloc;
- }
-
- tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
- GFP_KERNEL);
- if (!tx_ring->txbufs)
- goto err_alloc;
-
- if (!tx_ring->is_xdp && dp->netdev)
- netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
- tx_ring->idx);
-
- return 0;
-
-err_alloc:
- nfp_net_tx_ring_free(tx_ring);
- return -ENOMEM;
-}
-
static void
-nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
- struct nfp_net_tx_ring *tx_ring)
-{
- unsigned int i;
-
- if (!tx_ring->is_xdp)
- return;
-
- for (i = 0; i < tx_ring->cnt; i++) {
- if (!tx_ring->txbufs[i].frag)
- return;
-
- nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
- __free_page(virt_to_page(tx_ring->txbufs[i].frag));
- }
-}
-
-static int
-nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
- struct nfp_net_tx_ring *tx_ring)
-{
- struct nfp_net_tx_buf *txbufs = tx_ring->txbufs;
- unsigned int i;
-
- if (!tx_ring->is_xdp)
- return 0;
-
- for (i = 0; i < tx_ring->cnt; i++) {
- txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
- if (!txbufs[i].frag) {
- nfp_net_tx_ring_bufs_free(dp, tx_ring);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
-{
- unsigned int r;
-
- dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
- GFP_KERNEL);
- if (!dp->tx_rings)
- return -ENOMEM;
-
- for (r = 0; r < dp->num_tx_rings; r++) {
- int bias = 0;
-
- if (r >= dp->num_stack_tx_rings)
- bias = dp->num_stack_tx_rings;
-
- nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias],
- r, bias);
-
- if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
- goto err_free_prev;
-
- if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
- goto err_free_ring;
- }
-
- return 0;
-
-err_free_prev:
- while (r--) {
- nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
-err_free_ring:
- nfp_net_tx_ring_free(&dp->tx_rings[r]);
- }
- kfree(dp->tx_rings);
- return -ENOMEM;
-}
-
-static void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
-{
- unsigned int r;
-
- for (r = 0; r < dp->num_tx_rings; r++) {
- nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
- nfp_net_tx_ring_free(&dp->tx_rings[r]);
- }
-
- kfree(dp->tx_rings);
-}
-
-/**
- * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
- * @rx_ring: RX ring to free
- */
-static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
+nfp_net_napi_add(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, int idx)
{
- struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
- struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
-
if (dp->netdev)
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- kvfree(rx_ring->rxbufs);
-
- if (rx_ring->rxds)
- dma_free_coherent(dp->dev, rx_ring->size,
- rx_ring->rxds, rx_ring->dma);
-
- rx_ring->cnt = 0;
- rx_ring->rxbufs = NULL;
- rx_ring->rxds = NULL;
- rx_ring->dma = 0;
- rx_ring->size = 0;
-}
-
-/**
- * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
- * @dp: NFP Net data path struct
- * @rx_ring: RX ring to allocate
- *
- * Return: 0 on success, negative errno otherwise.
- */
-static int
-nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
-{
- int err;
-
- if (dp->netdev) {
- err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
- rx_ring->idx, rx_ring->r_vec->napi.napi_id);
- if (err < 0)
- return err;
- }
-
- rx_ring->cnt = dp->rxd_cnt;
- rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
- rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL | __GFP_NOWARN);
- if (!rx_ring->rxds) {
- netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
- rx_ring->cnt);
- goto err_alloc;
- }
-
- rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs),
- GFP_KERNEL);
- if (!rx_ring->rxbufs)
- goto err_alloc;
-
- return 0;
-
-err_alloc:
- nfp_net_rx_ring_free(rx_ring);
- return -ENOMEM;
-}
-
-static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
-{
- unsigned int r;
-
- dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
- GFP_KERNEL);
- if (!dp->rx_rings)
- return -ENOMEM;
-
- for (r = 0; r < dp->num_rx_rings; r++) {
- nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
-
- if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
- goto err_free_prev;
-
- if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
- goto err_free_ring;
- }
-
- return 0;
-
-err_free_prev:
- while (r--) {
- nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
-err_free_ring:
- nfp_net_rx_ring_free(&dp->rx_rings[r]);
- }
- kfree(dp->rx_rings);
- return -ENOMEM;
+ netif_napi_add(dp->netdev, &r_vec->napi,
+ nfp_net_has_xsk_pool_slow(dp, idx) ?
+ dp->ops->xsk_poll : dp->ops->poll,
+ NAPI_POLL_WEIGHT);
+ else
+ tasklet_enable(&r_vec->tasklet);
}
-static void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
+static void
+nfp_net_napi_del(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec)
{
- unsigned int r;
-
- for (r = 0; r < dp->num_rx_rings; r++) {
- nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
- nfp_net_rx_ring_free(&dp->rx_rings[r]);
- }
-
- kfree(dp->rx_rings);
+ if (dp->netdev)
+ netif_napi_del(&r_vec->napi);
+ else
+ tasklet_disable(&r_vec->tasklet);
}
static void
@@ -2648,6 +793,17 @@ nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
&dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
+
+ if (nfp_net_has_xsk_pool_slow(dp, idx) || r_vec->xsk_pool) {
+ r_vec->xsk_pool = dp->xdp_prog ? dp->xsk_pools[idx] : NULL;
+
+ if (r_vec->xsk_pool)
+ xsk_pool_set_rxq_info(r_vec->xsk_pool,
+ &r_vec->rx_ring->xdp_rxq);
+
+ nfp_net_napi_del(dp, r_vec);
+ nfp_net_napi_add(dp, r_vec, idx);
+ }
}
static int
@@ -2656,23 +812,14 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
{
int err;
- /* Setup NAPI */
- if (nn->dp.netdev)
- netif_napi_add(nn->dp.netdev, &r_vec->napi,
- nfp_net_poll, NAPI_POLL_WEIGHT);
- else
- tasklet_enable(&r_vec->tasklet);
+ nfp_net_napi_add(&nn->dp, r_vec, idx);
snprintf(r_vec->name, sizeof(r_vec->name),
"%s-rxtx-%d", nfp_net_name(nn), idx);
err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
r_vec);
if (err) {
- if (nn->dp.netdev)
- netif_napi_del(&r_vec->napi);
- else
- tasklet_disable(&r_vec->tasklet);
-
+ nfp_net_napi_del(&nn->dp, r_vec);
nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
return err;
}
@@ -2690,11 +837,7 @@ static void
nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
{
irq_set_affinity_hint(r_vec->irq_vector, NULL);
- if (nn->dp.netdev)
- netif_napi_del(&r_vec->napi);
- else
- tasklet_disable(&r_vec->tasklet);
-
+ nfp_net_napi_del(&nn->dp, r_vec);
free_irq(r_vec->irq_vector, r_vec);
}
@@ -2768,17 +911,6 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
}
-static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
-{
- nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
- nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
- nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
-
- nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
- nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
- nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
-}
-
/**
* nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
* @nn: NFP Net device to reconfigure
@@ -2808,8 +940,11 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
if (err)
nn_err(nn, "Could not disable device: %d\n", err);
- for (r = 0; r < nn->dp.num_rx_rings; r++)
+ for (r = 0; r < nn->dp.num_rx_rings; r++) {
nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
+ if (nfp_net_has_xsk_pool_slow(&nn->dp, nn->dp.rx_rings[r].idx))
+ nfp_net_xsk_rx_bufs_free(&nn->dp.rx_rings[r]);
+ }
for (r = 0; r < nn->dp.num_tx_rings; r++)
nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
for (r = 0; r < nn->dp.num_r_vecs; r++)
@@ -2818,25 +953,6 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
nn->dp.ctrl = new_ctrl;
}
-static void
-nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
- struct nfp_net_rx_ring *rx_ring, unsigned int idx)
-{
- /* Write the DMA address, size and MSI-X info to the device */
- nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
- nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
- nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
-}
-
-static void
-nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
- struct nfp_net_tx_ring *tx_ring, unsigned int idx)
-{
- nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
- nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
- nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
-}
-
/**
* nfp_net_set_config_and_enable() - Write control BAR and enable NFP
* @nn: NFP Net device to reconfigure
@@ -2866,11 +982,11 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
for (r = 0; r < nn->dp.num_rx_rings; r++)
nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
- nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ?
- 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1);
+ nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE,
+ U64_MAX >> (64 - nn->dp.num_tx_rings));
- nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
- 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
+ nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE,
+ U64_MAX >> (64 - nn->dp.num_rx_rings));
if (nn->dp.netdev)
nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
@@ -3296,20 +1412,39 @@ struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
*new = nn->dp;
+ new->xsk_pools = kmemdup(new->xsk_pools,
+ array_size(nn->max_r_vecs,
+ sizeof(new->xsk_pools)),
+ GFP_KERNEL);
+ if (!new->xsk_pools) {
+ kfree(new);
+ return NULL;
+ }
+
/* Clear things which need to be recomputed */
new->fl_bufsz = 0;
new->tx_rings = NULL;
new->rx_rings = NULL;
new->num_r_vecs = 0;
new->num_stack_tx_rings = 0;
+ new->txrwb = NULL;
+ new->txrwb_dma = 0;
return new;
}
+static void nfp_net_free_dp(struct nfp_net_dp *dp)
+{
+ kfree(dp->xsk_pools);
+ kfree(dp);
+}
+
static int
nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
struct netlink_ext_ack *extack)
{
+ unsigned int r, xsk_min_fl_bufsz;
+
/* XDP-enabled tests */
if (!dp->xdp_prog)
return 0;
@@ -3322,6 +1457,18 @@ nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
return -EINVAL;
}
+ xsk_min_fl_bufsz = nfp_net_calc_fl_bufsz_xsk(dp);
+ for (r = 0; r < nn->max_r_vecs; r++) {
+ if (!dp->xsk_pools[r])
+ continue;
+
+ if (xsk_pool_get_rx_frame_size(dp->xsk_pools[r]) < xsk_min_fl_bufsz) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "XSK buffer pool chunk size too small");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -3389,7 +1536,7 @@ int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
nfp_net_open_stack(nn);
exit_free_dp:
- kfree(dp);
+ nfp_net_free_dp(dp);
return err;
@@ -3398,7 +1545,7 @@ err_free_rx:
err_cleanup_vecs:
for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
- kfree(dp);
+ nfp_net_free_dp(dp);
return err;
}
@@ -3716,6 +1863,9 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
return nfp_net_xdp_setup_drv(nn, xdp);
case XDP_SETUP_PROG_HW:
return nfp_net_xdp_setup_hw(nn, xdp);
+ case XDP_SETUP_XSK_POOL:
+ return nfp_net_xsk_setup_pool(netdev, xdp->xsk.pool,
+ xdp->xsk.queue_id);
default:
return nfp_app_bpf(nn->app, nn, xdp);
}
@@ -3742,7 +1892,35 @@ static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
return 0;
}
-const struct net_device_ops nfp_net_netdev_ops = {
+const struct net_device_ops nfp_nfd3_netdev_ops = {
+ .ndo_init = nfp_app_ndo_init,
+ .ndo_uninit = nfp_app_ndo_uninit,
+ .ndo_open = nfp_net_netdev_open,
+ .ndo_stop = nfp_net_netdev_close,
+ .ndo_start_xmit = nfp_net_tx,
+ .ndo_get_stats64 = nfp_net_stat64,
+ .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
+ .ndo_set_vf_mac = nfp_app_set_vf_mac,
+ .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
+ .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
+ .ndo_set_vf_trust = nfp_app_set_vf_trust,
+ .ndo_get_vf_config = nfp_app_get_vf_config,
+ .ndo_set_vf_link_state = nfp_app_set_vf_link_state,
+ .ndo_setup_tc = nfp_port_setup_tc,
+ .ndo_tx_timeout = nfp_net_tx_timeout,
+ .ndo_set_rx_mode = nfp_net_set_rx_mode,
+ .ndo_change_mtu = nfp_net_change_mtu,
+ .ndo_set_mac_address = nfp_net_set_mac_address,
+ .ndo_set_features = nfp_net_set_features,
+ .ndo_features_check = nfp_net_features_check,
+ .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
+ .ndo_bpf = nfp_net_xdp,
+ .ndo_xsk_wakeup = nfp_net_xsk_wakeup,
+ .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
+};
+
+const struct net_device_ops nfp_nfdk_netdev_ops = {
.ndo_init = nfp_app_ndo_init,
.ndo_uninit = nfp_app_ndo_uninit,
.ndo_open = nfp_net_netdev_open,
@@ -3811,10 +1989,10 @@ void nfp_net_info(struct nfp_net *nn)
nn->dp.num_tx_rings, nn->max_tx_rings,
nn->dp.num_rx_rings, nn->max_rx_rings);
nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
- nn->fw_ver.resv, nn->fw_ver.class,
+ nn->fw_ver.extend, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -3832,6 +2010,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
+ nn->cap & NFP_NET_CFG_CTRL_TXRWB ? "TXRWB " : "",
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
@@ -3843,6 +2022,7 @@ void nfp_net_info(struct nfp_net *nn)
/**
* nfp_net_alloc() - Allocate netdev and related structure
* @pdev: PCI device
+ * @dev_info: NFP ASIC params
* @ctrl_bar: PCI IOMEM with vNIC config memory
* @needs_netdev: Whether to allocate a netdev for this vNIC
* @max_tx_rings: Maximum number of TX rings supported by device
@@ -3855,7 +2035,8 @@ void nfp_net_info(struct nfp_net *nn)
* Return: NFP Net device structure, or ERR_PTR on error.
*/
struct nfp_net *
-nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
+nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
+ void __iomem *ctrl_bar, bool needs_netdev,
unsigned int max_tx_rings, unsigned int max_rx_rings)
{
struct nfp_net *nn;
@@ -3880,7 +2061,28 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
nn->dp.dev = &pdev->dev;
nn->dp.ctrl_bar = ctrl_bar;
+ nn->dev_info = dev_info;
nn->pdev = pdev;
+ nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
+
+ switch (FIELD_GET(NFP_NET_CFG_VERSION_DP_MASK, nn->fw_ver.extend)) {
+ case NFP_NET_CFG_VERSION_DP_NFD3:
+ nn->dp.ops = &nfp_nfd3_ops;
+ break;
+ case NFP_NET_CFG_VERSION_DP_NFDK:
+ if (nn->fw_ver.major < 5) {
+ dev_err(&pdev->dev,
+ "NFDK must use ABI 5 or newer, found: %d\n",
+ nn->fw_ver.major);
+ err = -EINVAL;
+ goto err_free_nn;
+ }
+ nn->dp.ops = &nfp_nfdk_ops;
+ break;
+ default:
+ err = -EINVAL;
+ goto err_free_nn;
+ }
nn->max_tx_rings = max_tx_rings;
nn->max_rx_rings = max_rx_rings;
@@ -3893,6 +2095,14 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
nn->dp.num_r_vecs = min_t(unsigned int,
nn->dp.num_r_vecs, num_online_cpus());
+ nn->max_r_vecs = nn->dp.num_r_vecs;
+
+ nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(nn->dp.xsk_pools),
+ GFP_KERNEL);
+ if (!nn->dp.xsk_pools) {
+ err = -ENOMEM;
+ goto err_free_nn;
+ }
nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
@@ -3932,6 +2142,7 @@ void nfp_net_free(struct nfp_net *nn)
WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
nfp_ccm_mbox_free(nn);
+ kfree(nn->dp.xsk_pools);
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
else
@@ -4090,7 +2301,15 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
/* Finalise the netdev setup */
- netdev->netdev_ops = &nfp_net_netdev_ops;
+ switch (nn->dp.ops->version) {
+ case NFP_NFD_VER_NFD3:
+ netdev->netdev_ops = &nfp_nfd3_netdev_ops;
+ break;
+ case NFP_NFD_VER_NFDK:
+ netdev->netdev_ops = &nfp_nfdk_netdev_ops;
+ break;
+ }
+
netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
/* MTU range: 68 - hw-specific max */
@@ -4138,6 +2357,9 @@ static int nfp_net_read_caps(struct nfp_net *nn)
nn->dp.rx_offset = NFP_NET_RX_OFFSET;
}
+ /* Mask out NFD-version-specific features */
+ nn->cap &= nn->dp.ops->cap_mask;
+
/* For control vNICs mask out the capabilities app doesn't want. */
if (!nn->dp.netdev)
nn->cap &= nn->app->type->ctrl_cap_mask;
@@ -4190,6 +2412,10 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
+ /* Enable TX pointer writeback, if supported */
+ if (nn->cap & NFP_NET_CFG_CTRL_TXRWB)
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXRWB;
+
/* Stash the re-configuration queue away. First odd queue in TX Bar */
nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 3d61a8cb60b0..8892a94f00c3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -1,8 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
-/*
- * nfp_net_ctrl.h
+/* nfp_net_ctrl.h
* Netronome network device driver: Control BAR layout
* Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
* Jason McMullan <jason.mcmullan@netronome.com>
@@ -15,30 +14,24 @@
#include <linux/types.h>
-/**
- * Configuration BAR size.
+/* Configuration BAR size.
*
* The configuration BAR is 8K in size, but due to
* THB-350, 32k needs to be reserved.
*/
#define NFP_NET_CFG_BAR_SZ (32 * 1024)
-/**
- * Offset in Freelist buffer where packet starts on RX
- */
+/* Offset in Freelist buffer where packet starts on RX */
#define NFP_NET_RX_OFFSET 32
-/**
- * LSO parameters
+/* LSO parameters
* %NFP_NET_LSO_MAX_HDR_SZ: Maximum header size supported for LSO frames
* %NFP_NET_LSO_MAX_SEGS: Maximum number of segments LSO frame can produce
*/
#define NFP_NET_LSO_MAX_HDR_SZ 255
#define NFP_NET_LSO_MAX_SEGS 64
-/**
- * Prepend field types
- */
+/* Prepend field types */
#define NFP_NET_META_FIELD_SIZE 4
#define NFP_NET_META_HASH 1 /* next field carries hash type */
#define NFP_NET_META_MARK 2
@@ -49,9 +42,7 @@
#define NFP_META_PORT_ID_CTRL ~0U
-/**
- * Hash type pre-pended when a RSS hash was computed
- */
+/* Hash type pre-pended when a RSS hash was computed */
#define NFP_NET_RSS_NONE 0
#define NFP_NET_RSS_IPV4 1
#define NFP_NET_RSS_IPV6 2
@@ -63,16 +54,14 @@
#define NFP_NET_RSS_IPV6_UDP 8
#define NFP_NET_RSS_IPV6_EX_UDP 9
-/**
- * Ring counts
+/* Ring counts
* %NFP_NET_TXR_MAX: Maximum number of TX rings
* %NFP_NET_RXR_MAX: Maximum number of RX rings
*/
#define NFP_NET_TXR_MAX 64
#define NFP_NET_RXR_MAX 64
-/**
- * Read/Write config words (0x0000 - 0x002c)
+/* Read/Write config words (0x0000 - 0x002c)
* %NFP_NET_CFG_CTRL: Global control
* %NFP_NET_CFG_UPDATE: Indicate which fields are updated
* %NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
@@ -103,7 +92,6 @@
#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */
#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS (version 1) */
#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */
-#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */
#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */
#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
@@ -147,8 +135,7 @@
#define NFP_NET_CFG_LSC 0x0020
#define NFP_NET_CFG_MACADDR 0x0024
-/**
- * Read-only words (0x0030 - 0x0050):
+/* Read-only words (0x0030 - 0x0050):
* %NFP_NET_CFG_VERSION: Firmware version number
* %NFP_NET_CFG_STS: Status
* %NFP_NET_CFG_CAP: Capabilities (same bits as %NFP_NET_CFG_CTRL)
@@ -162,7 +149,10 @@
* - define more STS bits
*/
#define NFP_NET_CFG_VERSION 0x0030
-#define NFP_NET_CFG_VERSION_RESERVED_MASK (0xff << 24)
+#define NFP_NET_CFG_VERSION_RESERVED_MASK (0xfe << 24)
+#define NFP_NET_CFG_VERSION_DP_NFD3 0
+#define NFP_NET_CFG_VERSION_DP_NFDK 1
+#define NFP_NET_CFG_VERSION_DP_MASK 1
#define NFP_NET_CFG_VERSION_CLASS_MASK (0xff << 16)
#define NFP_NET_CFG_VERSION_CLASS(x) (((x) & 0xff) << 16)
#define NFP_NET_CFG_VERSION_CLASS_GENERIC 0
@@ -193,36 +183,31 @@
#define NFP_NET_CFG_START_TXQ 0x0048
#define NFP_NET_CFG_START_RXQ 0x004c
-/**
- * Prepend configuration
+/* Prepend configuration
*/
#define NFP_NET_CFG_RX_OFFSET 0x0050
#define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */
-/**
- * RSS capabilities
+/* RSS capabilities
* %NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
* %NFP_NET_CFG_RSS_HFUNC)
*/
#define NFP_NET_CFG_RSS_CAP 0x0054
#define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000
-/**
- * TLV area start
+/* TLV area start
* %NFP_NET_CFG_TLV_BASE: start anchor of the TLV area
*/
#define NFP_NET_CFG_TLV_BASE 0x0058
-/**
- * VXLAN/UDP encap configuration
+/* VXLAN/UDP encap configuration
* %NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
* %NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
*/
#define NFP_NET_CFG_VXLAN_PORT 0x0060
#define NFP_NET_CFG_VXLAN_SZ 0x0008
-/**
- * BPF section
+/* BPF section
* %NFP_NET_CFG_BPF_ABI: BPF ABI version
* %NFP_NET_CFG_BPF_CAP: BPF capabilities
* %NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
@@ -247,14 +232,12 @@
#define NFP_NET_CFG_BPF_CFG_MASK 7ULL
#define NFP_NET_CFG_BPF_ADDR_MASK (~NFP_NET_CFG_BPF_CFG_MASK)
-/**
- * 40B reserved for future use (0x0098 - 0x00c0)
+/* 40B reserved for future use (0x0098 - 0x00c0)
*/
#define NFP_NET_CFG_RESERVED 0x0098
#define NFP_NET_CFG_RESERVED_SZ 0x0028
-/**
- * RSS configuration (0x0100 - 0x01ac):
+/* RSS configuration (0x0100 - 0x01ac):
* Used only when NFP_NET_CFG_CTRL_RSS is enabled
* %NFP_NET_CFG_RSS_CFG: RSS configuration word
* %NFP_NET_CFG_RSS_KEY: RSS "secret" key
@@ -281,8 +264,7 @@
NFP_NET_CFG_RSS_KEY_SZ)
#define NFP_NET_CFG_RSS_ITBL_SZ 0x80
-/**
- * TX ring configuration (0x200 - 0x800)
+/* TX ring configuration (0x200 - 0x800)
* %NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration
* %NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries)
* %NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
@@ -301,8 +283,7 @@
#define NFP_NET_CFG_TXR_IRQ_MOD(_x) (NFP_NET_CFG_TXR_BASE + 0x500 + \
((_x) * 0x4))
-/**
- * RX ring configuration (0x0800 - 0x0c00)
+/* RX ring configuration (0x0800 - 0x0c00)
* %NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration
* %NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries)
* %NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries)
@@ -318,8 +299,7 @@
#define NFP_NET_CFG_RXR_IRQ_MOD(_x) (NFP_NET_CFG_RXR_BASE + 0x300 + \
((_x) * 0x4))
-/**
- * Interrupt Control/Cause registers (0x0c00 - 0x0d00)
+/* Interrupt Control/Cause registers (0x0c00 - 0x0d00)
* These registers are only used when MSI-X auto-masking is not
* enabled (%NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index
* by MSI-X entry and are 1B in size. If an entry is zero, the
@@ -334,8 +314,7 @@
#define NFP_NET_CFG_ICR_RXTX 0x1
#define NFP_NET_CFG_ICR_LSC 0x2
-/**
- * General device stats (0x0d00 - 0x0d90)
+/* General device stats (0x0d00 - 0x0d90)
* all counters are 64bit.
*/
#define NFP_NET_CFG_STATS_BASE 0x0d00
@@ -368,8 +347,7 @@
#define NFP_NET_CFG_STATS_APP3_FRAMES (NFP_NET_CFG_STATS_BASE + 0xc0)
#define NFP_NET_CFG_STATS_APP3_BYTES (NFP_NET_CFG_STATS_BASE + 0xc8)
-/**
- * Per ring stats (0x1000 - 0x1800)
+/* Per ring stats (0x1000 - 0x1800)
* options, 64bit per entry
* %NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count)
* %NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count)
@@ -381,8 +359,7 @@
#define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \
((_x) * 0x10))
-/**
- * General use mailbox area (0x1800 - 0x19ff)
+/* General use mailbox area (0x1800 - 0x19ff)
* 4B used for update command and 4B return code
* followed by a max of 504B of variable length value
*/
@@ -399,8 +376,7 @@
#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5
#define NFP_NET_CFG_MBOX_CMD_TLV_CMSG 6
-/**
- * VLAN filtering using general use mailbox
+/* VLAN filtering using general use mailbox
* %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
* %NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
* %NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter
@@ -411,8 +387,7 @@
#define NFP_NET_CFG_VLAN_FILTER_PROTO (NFP_NET_CFG_VLAN_FILTER + 2)
#define NFP_NET_CFG_VLAN_FILTER_SZ 0x0004
-/**
- * TLV capabilities
+/* TLV capabilities
* %NFP_NET_CFG_TLV_TYPE: Offset of type within the TLV
* %NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV
* %NFP_NET_CFG_TLV_LENGTH: Offset of length within the TLV
@@ -438,8 +413,7 @@
#define NFP_NET_CFG_TLV_HEADER_TYPE 0x7fff0000
#define NFP_NET_CFG_TLV_HEADER_LENGTH 0x0000ffff
-/**
- * Capability TLV types
+/* Capability TLV types
*
* %NFP_NET_CFG_TLV_TYPE_UNKNOWN:
* Special TLV type to catch bugs, should never be encountered. Drivers should
@@ -512,8 +486,7 @@
struct device;
-/**
- * struct nfp_net_tlv_caps - parsed control BAR TLV capabilities
+/* struct nfp_net_tlv_caps - parsed control BAR TLV capabilities
* @me_freq_mhz: ME clock_freq (MHz)
* @mbox_off: vNIC mailbox area offset
* @mbox_len: vNIC mailbox area length
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 553c708694e8..d8b735ccf899 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include "nfp_net.h"
+#include "nfp_net_dp.h"
static struct dentry *nfp_dir;
@@ -42,13 +43,19 @@ static int nfp_rx_q_show(struct seq_file *file, void *data)
seq_printf(file, "%04d: 0x%08x 0x%08x", i,
rxd->vals[0], rxd->vals[1]);
- frag = READ_ONCE(rx_ring->rxbufs[i].frag);
- if (frag)
- seq_printf(file, " frag=%p", frag);
+ if (!r_vec->xsk_pool) {
+ frag = READ_ONCE(rx_ring->rxbufs[i].frag);
+ if (frag)
+ seq_printf(file, " frag=%p", frag);
- if (rx_ring->rxbufs[i].dma_addr)
- seq_printf(file, " dma_addr=%pad",
- &rx_ring->rxbufs[i].dma_addr);
+ if (rx_ring->rxbufs[i].dma_addr)
+ seq_printf(file, " dma_addr=%pad",
+ &rx_ring->rxbufs[i].dma_addr);
+ } else {
+ if (rx_ring->xsk_rxbufs[i].dma_addr)
+ seq_printf(file, " dma_addr=%pad",
+ &rx_ring->xsk_rxbufs[i].dma_addr);
+ }
if (i == rx_ring->rd_p % rxd_cnt)
seq_puts(file, " H_RD ");
@@ -74,10 +81,8 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
{
struct nfp_net_r_vector *r_vec = file->private;
struct nfp_net_tx_ring *tx_ring;
- struct nfp_net_tx_desc *txd;
- int d_rd_p, d_wr_p, txd_cnt;
struct nfp_net *nn;
- int i;
+ int d_rd_p, d_wr_p;
rtnl_lock();
@@ -91,49 +96,20 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
if (!nfp_net_running(nn))
goto out;
- txd_cnt = tx_ring->cnt;
-
d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q);
- seq_printf(file, "TX[%02d,%02d%s]: cnt=%u dma=%pad host=%p H_RD=%u H_WR=%u D_RD=%u D_WR=%u\n",
+ seq_printf(file, "TX[%02d,%02d%s]: cnt=%u dma=%pad host=%p H_RD=%u H_WR=%u D_RD=%u D_WR=%u",
tx_ring->idx, tx_ring->qcidx,
tx_ring == r_vec->tx_ring ? "" : "xdp",
tx_ring->cnt, &tx_ring->dma, tx_ring->txds,
tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);
+ if (tx_ring->txrwb)
+ seq_printf(file, " TXRWB=%llu", *tx_ring->txrwb);
+ seq_putc(file, '\n');
- for (i = 0; i < txd_cnt; i++) {
- txd = &tx_ring->txds[i];
- seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i,
- txd->vals[0], txd->vals[1],
- txd->vals[2], txd->vals[3]);
-
- if (tx_ring == r_vec->tx_ring) {
- struct sk_buff *skb = READ_ONCE(tx_ring->txbufs[i].skb);
-
- if (skb)
- seq_printf(file, " skb->head=%p skb->data=%p",
- skb->head, skb->data);
- } else {
- seq_printf(file, " frag=%p",
- READ_ONCE(tx_ring->txbufs[i].frag));
- }
-
- if (tx_ring->txbufs[i].dma_addr)
- seq_printf(file, " dma_addr=%pad",
- &tx_ring->txbufs[i].dma_addr);
-
- if (i == tx_ring->rd_p % txd_cnt)
- seq_puts(file, " H_RD");
- if (i == tx_ring->wr_p % txd_cnt)
- seq_puts(file, " H_WR");
- if (i == d_rd_p % txd_cnt)
- seq_puts(file, " D_RD");
- if (i == d_wr_p % txd_cnt)
- seq_puts(file, " D_WR");
-
- seq_putc(file, '\n');
- }
+ nfp_net_debugfs_print_tx_descs(file, &nn->dp, r_vec, tx_ring,
+ d_rd_p, d_wr_p);
out:
rtnl_unlock();
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c
new file mode 100644
index 000000000000..34dd94811df3
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#include "nfp_app.h"
+#include "nfp_net_dp.h"
+#include "nfp_net_xsk.h"
+
+/**
+ * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
+ * @dp: NFP Net data path struct
+ * @dma_addr: Pointer to storage for DMA address (output param)
+ *
+ * This function will allcate a new page frag, map it for DMA.
+ *
+ * Return: allocated page frag or NULL on failure.
+ */
+void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
+{
+ void *frag;
+
+ if (!dp->xdp_prog) {
+ frag = netdev_alloc_frag(dp->fl_bufsz);
+ } else {
+ struct page *page;
+
+ page = alloc_page(GFP_KERNEL);
+ frag = page ? page_address(page) : NULL;
+ }
+ if (!frag) {
+ nn_dp_warn(dp, "Failed to alloc receive page frag\n");
+ return NULL;
+ }
+
+ *dma_addr = nfp_net_dma_map_rx(dp, frag);
+ if (dma_mapping_error(dp->dev, *dma_addr)) {
+ nfp_net_free_frag(frag, dp->xdp_prog);
+ nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
+ return NULL;
+ }
+
+ return frag;
+}
+
+/**
+ * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
+ * @tx_ring: TX ring structure
+ * @dp: NFP Net data path struct
+ * @r_vec: IRQ vector servicing this ring
+ * @idx: Ring index
+ * @is_xdp: Is this an XDP TX ring?
+ */
+static void
+nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec, unsigned int idx,
+ bool is_xdp)
+{
+ struct nfp_net *nn = r_vec->nfp_net;
+
+ tx_ring->idx = idx;
+ tx_ring->r_vec = r_vec;
+ tx_ring->is_xdp = is_xdp;
+ u64_stats_init(&tx_ring->r_vec->tx_sync);
+
+ tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
+ tx_ring->txrwb = dp->txrwb ? &dp->txrwb[idx] : NULL;
+ tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
+}
+
+/**
+ * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
+ * @rx_ring: RX ring structure
+ * @r_vec: IRQ vector servicing this ring
+ * @idx: Ring index
+ */
+static void
+nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
+ struct nfp_net_r_vector *r_vec, unsigned int idx)
+{
+ struct nfp_net *nn = r_vec->nfp_net;
+
+ rx_ring->idx = idx;
+ rx_ring->r_vec = r_vec;
+ u64_stats_init(&rx_ring->r_vec->rx_sync);
+
+ rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
+ rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
+}
+
+/**
+ * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
+ * @rx_ring: RX ring structure
+ *
+ * Assumes that the device is stopped, must be idempotent.
+ */
+void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int wr_idx, last_idx;
+
+ /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always
+ * kept at cnt - 1 FL bufs.
+ */
+ if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
+ return;
+
+ /* Move the empty entry to the end of the list */
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
+ last_idx = rx_ring->cnt - 1;
+ if (rx_ring->r_vec->xsk_pool) {
+ rx_ring->xsk_rxbufs[wr_idx] = rx_ring->xsk_rxbufs[last_idx];
+ memset(&rx_ring->xsk_rxbufs[last_idx], 0,
+ sizeof(*rx_ring->xsk_rxbufs));
+ } else {
+ rx_ring->rxbufs[wr_idx] = rx_ring->rxbufs[last_idx];
+ memset(&rx_ring->rxbufs[last_idx], 0, sizeof(*rx_ring->rxbufs));
+ }
+
+ memset(rx_ring->rxds, 0, rx_ring->size);
+ rx_ring->wr_p = 0;
+ rx_ring->rd_p = 0;
+}
+
+/**
+ * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring to remove buffers from
+ *
+ * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
+ * entries. After device is disabled nfp_net_rx_ring_reset() must be called
+ * to restore required ring geometry.
+ */
+static void
+nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int i;
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
+ return;
+
+ for (i = 0; i < rx_ring->cnt - 1; i++) {
+ /* NULL skb can only happen when initial filling of the ring
+ * fails to allocate enough buffers and calls here to free
+ * already allocated ones.
+ */
+ if (!rx_ring->rxbufs[i].frag)
+ continue;
+
+ nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
+ nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
+ rx_ring->rxbufs[i].dma_addr = 0;
+ rx_ring->rxbufs[i].frag = NULL;
+ }
+}
+
+/**
+ * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring to remove buffers from
+ */
+static int
+nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
+{
+ struct nfp_net_rx_buf *rxbufs;
+ unsigned int i;
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
+ return 0;
+
+ rxbufs = rx_ring->rxbufs;
+
+ for (i = 0; i < rx_ring->cnt - 1; i++) {
+ rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
+ if (!rxbufs[i].frag) {
+ nfp_net_rx_ring_bufs_free(dp, rx_ring);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
+{
+ unsigned int r;
+
+ dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
+ GFP_KERNEL);
+ if (!dp->tx_rings)
+ return -ENOMEM;
+
+ if (dp->ctrl & NFP_NET_CFG_CTRL_TXRWB) {
+ dp->txrwb = dma_alloc_coherent(dp->dev,
+ dp->num_tx_rings * sizeof(u64),
+ &dp->txrwb_dma, GFP_KERNEL);
+ if (!dp->txrwb)
+ goto err_free_rings;
+ }
+
+ for (r = 0; r < dp->num_tx_rings; r++) {
+ int bias = 0;
+
+ if (r >= dp->num_stack_tx_rings)
+ bias = dp->num_stack_tx_rings;
+
+ nfp_net_tx_ring_init(&dp->tx_rings[r], dp,
+ &nn->r_vecs[r - bias], r, bias);
+
+ if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
+ goto err_free_prev;
+
+ if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
+ goto err_free_ring;
+ }
+
+ return 0;
+
+err_free_prev:
+ while (r--) {
+ nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
+err_free_ring:
+ nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
+ }
+ if (dp->txrwb)
+ dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64),
+ dp->txrwb, dp->txrwb_dma);
+err_free_rings:
+ kfree(dp->tx_rings);
+ return -ENOMEM;
+}
+
+void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
+{
+ unsigned int r;
+
+ for (r = 0; r < dp->num_tx_rings; r++) {
+ nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
+ nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
+ }
+
+ if (dp->txrwb)
+ dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64),
+ dp->txrwb, dp->txrwb_dma);
+ kfree(dp->tx_rings);
+}
+
+/**
+ * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
+ * @rx_ring: RX ring to free
+ */
+static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+
+ if (dp->netdev)
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
+ kvfree(rx_ring->xsk_rxbufs);
+ else
+ kvfree(rx_ring->rxbufs);
+
+ if (rx_ring->rxds)
+ dma_free_coherent(dp->dev, rx_ring->size,
+ rx_ring->rxds, rx_ring->dma);
+
+ rx_ring->cnt = 0;
+ rx_ring->rxbufs = NULL;
+ rx_ring->xsk_rxbufs = NULL;
+ rx_ring->rxds = NULL;
+ rx_ring->dma = 0;
+ rx_ring->size = 0;
+}
+
+/**
+ * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring to allocate
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int
+nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
+{
+ enum xdp_mem_type mem_type;
+ size_t rxbuf_sw_desc_sz;
+ int err;
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
+ mem_type = MEM_TYPE_XSK_BUFF_POOL;
+ rxbuf_sw_desc_sz = sizeof(*rx_ring->xsk_rxbufs);
+ } else {
+ mem_type = MEM_TYPE_PAGE_ORDER0;
+ rxbuf_sw_desc_sz = sizeof(*rx_ring->rxbufs);
+ }
+
+ if (dp->netdev) {
+ err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
+ rx_ring->idx, rx_ring->r_vec->napi.napi_id);
+ if (err < 0)
+ return err;
+
+ err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, mem_type, NULL);
+ if (err)
+ goto err_alloc;
+ }
+
+ rx_ring->cnt = dp->rxd_cnt;
+ rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
+ rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
+ &rx_ring->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!rx_ring->rxds) {
+ netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
+ rx_ring->cnt);
+ goto err_alloc;
+ }
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
+ rx_ring->xsk_rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
+ GFP_KERNEL);
+ if (!rx_ring->xsk_rxbufs)
+ goto err_alloc;
+ } else {
+ rx_ring->rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
+ GFP_KERNEL);
+ if (!rx_ring->rxbufs)
+ goto err_alloc;
+ }
+
+ return 0;
+
+err_alloc:
+ nfp_net_rx_ring_free(rx_ring);
+ return -ENOMEM;
+}
+
+int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
+{
+ unsigned int r;
+
+ dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
+ GFP_KERNEL);
+ if (!dp->rx_rings)
+ return -ENOMEM;
+
+ for (r = 0; r < dp->num_rx_rings; r++) {
+ nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
+
+ if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
+ goto err_free_prev;
+
+ if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
+ goto err_free_ring;
+ }
+
+ return 0;
+
+err_free_prev:
+ while (r--) {
+ nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
+err_free_ring:
+ nfp_net_rx_ring_free(&dp->rx_rings[r]);
+ }
+ kfree(dp->rx_rings);
+ return -ENOMEM;
+}
+
+void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
+{
+ unsigned int r;
+
+ for (r = 0; r < dp->num_rx_rings; r++) {
+ nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
+ nfp_net_rx_ring_free(&dp->rx_rings[r]);
+ }
+
+ kfree(dp->rx_rings);
+}
+
+void
+nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
+ struct nfp_net_rx_ring *rx_ring, unsigned int idx)
+{
+ /* Write the DMA address, size and MSI-X info to the device */
+ nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
+ nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
+ nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
+}
+
+void
+nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
+ struct nfp_net_tx_ring *tx_ring, unsigned int idx)
+{
+ nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
+ if (tx_ring->txrwb) {
+ *tx_ring->txrwb = 0;
+ nn_writeq(nn, NFP_NET_CFG_TXR_WB_ADDR(idx),
+ nn->dp.txrwb_dma + idx * sizeof(u64));
+ }
+ nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
+ nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
+}
+
+void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
+{
+ nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
+
+ nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
+ nn_writeq(nn, NFP_NET_CFG_TXR_WB_ADDR(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
+}
+
+netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ return nn->dp.ops->xmit(skb, netdev);
+}
+
+bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+{
+ struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+
+ return nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
+}
+
+bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+{
+ struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+ bool ret;
+
+ spin_lock_bh(&r_vec->lock);
+ ret = nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
+ spin_unlock_bh(&r_vec->lock);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h
new file mode 100644
index 000000000000..c934cc2d3208
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef _NFP_NET_DP_
+#define _NFP_NET_DP_
+
+#include "nfp_net.h"
+
+static inline dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
+{
+ return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
+ dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+ dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static inline void
+nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
+{
+ dma_sync_single_for_device(dp->dev, dma_addr,
+ dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+ dp->rx_dma_dir);
+}
+
+static inline void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp,
+ dma_addr_t dma_addr)
+{
+ dma_unmap_single_attrs(dp->dev, dma_addr,
+ dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+ dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static inline void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp,
+ dma_addr_t dma_addr,
+ unsigned int len)
+{
+ dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
+ len, dp->rx_dma_dir);
+}
+
+/**
+ * nfp_net_tx_full() - check if the TX ring is full
+ * @tx_ring: TX ring to check
+ * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
+ *
+ * This function checks, based on the *host copy* of read/write
+ * pointer if a given TX ring is full. The real TX queue may have
+ * some newly made available slots.
+ *
+ * Return: True if the ring is full.
+ */
+static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
+{
+ return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
+}
+
+static inline void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
+{
+ wmb(); /* drain writebuffer */
+ nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
+ tx_ring->wr_ptr_add = 0;
+}
+
+static inline u32
+nfp_net_read_tx_cmpl(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp)
+{
+ if (tx_ring->txrwb)
+ return *tx_ring->txrwb;
+ return nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
+}
+
+static inline void nfp_net_free_frag(void *frag, bool xdp)
+{
+ if (!xdp)
+ skb_free_frag(frag);
+ else
+ __free_page(virt_to_page(frag));
+}
+
+/**
+ * nfp_net_irq_unmask() - Unmask automasked interrupt
+ * @nn: NFP Network structure
+ * @entry_nr: MSI-X table entry
+ *
+ * Clear the ICR for the IRQ entry.
+ */
+static inline void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
+{
+ nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
+ nn_pci_flush(nn);
+}
+
+struct seq_file;
+
+/* Common */
+void
+nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
+ struct nfp_net_rx_ring *rx_ring, unsigned int idx);
+void
+nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
+ struct nfp_net_tx_ring *tx_ring, unsigned int idx);
+void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx);
+
+void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr);
+int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
+int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
+void nfp_net_rx_rings_free(struct nfp_net_dp *dp);
+void nfp_net_tx_rings_free(struct nfp_net_dp *dp);
+void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring);
+
+enum nfp_nfd_version {
+ NFP_NFD_VER_NFD3,
+ NFP_NFD_VER_NFDK,
+};
+
+/**
+ * struct nfp_dp_ops - Hooks to wrap different implementation of different dp
+ * @version: Indicate dp type
+ * @tx_min_desc_per_pkt: Minimal TX descs needed for each packet
+ * @cap_mask: Mask of supported features
+ * @poll: Napi poll for normal rx/tx
+ * @xsk_poll: Napi poll when xsk is enabled
+ * @ctrl_poll: Tasklet poll for ctrl rx/tx
+ * @xmit: Xmit for normal path
+ * @ctrl_tx_one: Xmit for ctrl path
+ * @rx_ring_fill_freelist: Give buffers from the ring to FW
+ * @tx_ring_alloc: Allocate resource for a TX ring
+ * @tx_ring_reset: Free any untransmitted buffers and reset pointers
+ * @tx_ring_free: Free resources allocated to a TX ring
+ * @tx_ring_bufs_alloc: Allocate resource for each TX buffer
+ * @tx_ring_bufs_free: Free resources allocated to each TX buffer
+ * @print_tx_descs: Show TX ring's info for debug purpose
+ */
+struct nfp_dp_ops {
+ enum nfp_nfd_version version;
+ unsigned int tx_min_desc_per_pkt;
+ u32 cap_mask;
+
+ int (*poll)(struct napi_struct *napi, int budget);
+ int (*xsk_poll)(struct napi_struct *napi, int budget);
+ void (*ctrl_poll)(struct tasklet_struct *t);
+ netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *netdev);
+ bool (*ctrl_tx_one)(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, bool old);
+ void (*rx_ring_fill_freelist)(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring);
+ int (*tx_ring_alloc)(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring);
+ void (*tx_ring_reset)(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring);
+ void (*tx_ring_free)(struct nfp_net_tx_ring *tx_ring);
+ int (*tx_ring_bufs_alloc)(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring);
+ void (*tx_ring_bufs_free)(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring);
+
+ void (*print_tx_descs)(struct seq_file *file,
+ struct nfp_net_r_vector *r_vec,
+ struct nfp_net_tx_ring *tx_ring,
+ u32 d_rd_p, u32 d_wr_p);
+};
+
+static inline void
+nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ return dp->ops->tx_ring_reset(dp, tx_ring);
+}
+
+static inline void
+nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
+{
+ dp->ops->rx_ring_fill_freelist(dp, rx_ring);
+}
+
+static inline int
+nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ return dp->ops->tx_ring_alloc(dp, tx_ring);
+}
+
+static inline void
+nfp_net_tx_ring_free(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ dp->ops->tx_ring_free(tx_ring);
+}
+
+static inline int
+nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ return dp->ops->tx_ring_bufs_alloc(dp, tx_ring);
+}
+
+static inline void
+nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ dp->ops->tx_ring_bufs_free(dp, tx_ring);
+}
+
+static inline void
+nfp_net_debugfs_print_tx_descs(struct seq_file *file, struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec,
+ struct nfp_net_tx_ring *tx_ring,
+ u32 d_rd_p, u32 d_wr_p)
+{
+ dp->ops->print_tx_descs(file, r_vec, tx_ring, d_rd_p, d_wr_p);
+}
+
+extern const struct nfp_dp_ops nfp_nfd3_ops;
+extern const struct nfp_dp_ops nfp_nfdk_ops;
+
+netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev);
+
+#endif /* _NFP_NET_DP_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index e0c27471bcdb..61c8b450aafb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -21,10 +21,12 @@
#include <linux/sfp.h>
#include "nfpcore/nfp.h"
+#include "nfpcore/nfp_dev.h"
#include "nfpcore/nfp_nsp.h"
#include "nfp_app.h"
#include "nfp_main.h"
#include "nfp_net_ctrl.h"
+#include "nfp_net_dp.h"
#include "nfp_net.h"
#include "nfp_port.h"
@@ -217,7 +219,7 @@ nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct nfp_net *nn = netdev_priv(netdev);
snprintf(vnic_version, sizeof(vnic_version), "%d.%d.%d.%d",
- nn->fw_ver.resv, nn->fw_ver.class,
+ nn->fw_ver.extend, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor);
strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
sizeof(drvinfo->bus_info));
@@ -386,9 +388,10 @@ static void nfp_net_get_ringparam(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct nfp_net *nn = netdev_priv(netdev);
+ u32 qc_max = nn->dev_info->max_qc_size;
- ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
- ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
+ ring->rx_max_pending = qc_max;
+ ring->tx_max_pending = qc_max / nn->dp.ops->tx_min_desc_per_pkt;
ring->rx_pending = nn->dp.rxd_cnt;
ring->tx_pending = nn->dp.txd_cnt;
}
@@ -412,19 +415,22 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
+ u32 tx_dpp, qc_min, qc_max, rxd_cnt, txd_cnt;
struct nfp_net *nn = netdev_priv(netdev);
- u32 rxd_cnt, txd_cnt;
/* We don't have separate queues/rings for small/large frames. */
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
return -EINVAL;
+ qc_min = nn->dev_info->min_qc_size;
+ qc_max = nn->dev_info->max_qc_size;
+ tx_dpp = nn->dp.ops->tx_min_desc_per_pkt;
/* Round up to supported values */
rxd_cnt = roundup_pow_of_two(ring->rx_pending);
txd_cnt = roundup_pow_of_two(ring->tx_pending);
- if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS ||
- txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
+ if (rxd_cnt < qc_min || rxd_cnt > qc_max ||
+ txd_cnt < qc_min / tx_dpp || txd_cnt > qc_max / tx_dpp)
return -EINVAL;
if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 751f76cd4f79..ca4e05650fe6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -22,6 +22,7 @@
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_dev.h"
#include "nfpcore/nfp_nffw.h"
#include "nfpcore/nfp_nsp.h"
#include "nfpcore/nfp6000_pcie.h"
@@ -116,13 +117,12 @@ nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
/* Allocate and initialise the vNIC */
- nn = nfp_net_alloc(pf->pdev, ctrl_bar, needs_netdev,
+ nn = nfp_net_alloc(pf->pdev, pf->dev_info, ctrl_bar, needs_netdev,
n_tx_rings, n_rx_rings);
if (IS_ERR(nn))
return nn;
nn->app = pf->app;
- nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->dp.is_vf = 0;
@@ -307,6 +307,7 @@ err_prev_deinit:
static int
nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
{
+ struct devlink *devlink = priv_to_devlink(pf);
u8 __iomem *ctrl_bar;
int err;
@@ -314,9 +315,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
if (IS_ERR(pf->app))
return PTR_ERR(pf->app);
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
err = nfp_app_init(pf->app);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
if (err)
goto err_free;
@@ -343,9 +344,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
err_unmap:
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
err_app_clean:
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
nfp_app_clean(pf->app);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
err_free:
nfp_app_free(pf->app);
pf->app = NULL;
@@ -354,14 +355,16 @@ err_free:
static void nfp_net_pf_app_clean(struct nfp_pf *pf)
{
+ struct devlink *devlink = priv_to_devlink(pf);
+
if (pf->ctrl_vnic) {
nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
}
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
nfp_app_clean(pf->app);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
nfp_app_free(pf->app);
pf->app = NULL;
@@ -495,8 +498,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf)
}
cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
- mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id, NFP_PCIE_QUEUE(0),
- NFP_QCP_QUEUE_AREA_SZ, &pf->qc_area);
+ mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id,
+ nfp_qcp_queue_offset(pf->dev_info, 0),
+ pf->dev_info->qc_area_sz, &pf->qc_area);
if (IS_ERR(mem)) {
nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
err = PTR_ERR(mem);
@@ -546,12 +550,13 @@ nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
{
+ struct devlink *devlink = priv_to_devlink(pf);
struct nfp_eth_table *eth_table;
struct nfp_net *nn, *next;
struct nfp_port *port;
int err;
- lockdep_assert_held(&pf->lock);
+ devl_assert_locked(devlink);
/* Check for nfp_net_pci_remove() racing against us */
if (list_empty(&pf->vnics))
@@ -600,10 +605,11 @@ static void nfp_net_refresh_vnics(struct work_struct *work)
{
struct nfp_pf *pf = container_of(work, struct nfp_pf,
port_refresh_work);
+ struct devlink *devlink = priv_to_devlink(pf);
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
nfp_net_refresh_port_table_sync(pf);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
}
void nfp_net_refresh_port_table(struct nfp_port *port)
@@ -672,9 +678,11 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
}
nfp_net_get_fw_version(&fw_ver, ctrl_bar);
- if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
+ if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK ||
+ fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
- fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
+ fw_ver.extend, fw_ver.class,
+ fw_ver.major, fw_ver.minor);
err = -EINVAL;
goto err_unmap;
}
@@ -690,7 +698,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
break;
default:
nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
- fw_ver.resv, fw_ver.class,
+ fw_ver.extend, fw_ver.class,
fw_ver.major, fw_ver.minor);
err = -EINVAL;
goto err_unmap;
@@ -709,7 +717,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_shared_buf_unreg;
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
/* Allocate the vnics and do basic init */
@@ -729,7 +737,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_stop_app;
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
devlink_register(devlink);
return 0;
@@ -742,7 +750,7 @@ err_free_vnics:
nfp_net_pf_free_vnics(pf);
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
nfp_devlink_params_unregister(pf);
err_shared_buf_unreg:
nfp_shared_buf_unregister(pf);
@@ -756,10 +764,11 @@ err_unmap:
void nfp_net_pci_remove(struct nfp_pf *pf)
{
+ struct devlink *devlink = priv_to_devlink(pf);
struct nfp_net *nn, *next;
devlink_unregister(priv_to_devlink(pf));
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
if (!nfp_net_is_data_vnic(nn))
continue;
@@ -771,7 +780,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
/* stop app first, to avoid double free of ctrl vNIC's ddir */
nfp_net_debugfs_dir_clean(&pf->ddir);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
nfp_devlink_params_unregister(pf);
nfp_shared_buf_unregister(pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 181ac8e789a3..ba3fa7eac98d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -20,7 +20,7 @@ struct net_device *
nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set, unsigned int id)
{
return rcu_dereference_protected(set->reprs[id],
- lockdep_is_held(&app->pf->lock));
+ nfp_app_is_locked(app));
}
static void
@@ -476,7 +476,7 @@ nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type)
int i;
reprs = rcu_dereference_protected(app->reprs[type],
- lockdep_is_held(&app->pf->lock));
+ nfp_app_is_locked(app));
if (!reprs)
return;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
index a3db0cbf6425..786be58a907e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
@@ -4,8 +4,7 @@
#ifndef _NFP_NET_SRIOV_H_
#define _NFP_NET_SRIOV_H_
-/**
- * SRIOV VF configuration.
+/* SRIOV VF configuration.
* The configuration memory begins with a mailbox region for communication with
* the firmware followed by individual VF entries.
*/
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c
new file mode 100644
index 000000000000..86829446c637
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc */
+/* Copyright (C) 2021 Corigine, Inc */
+
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <net/xdp_sock_drv.h>
+#include <trace/events/xdp.h>
+
+#include "nfp_app.h"
+#include "nfp_net.h"
+#include "nfp_net_dp.h"
+#include "nfp_net_xsk.h"
+
+static void
+nfp_net_xsk_rx_bufs_stash(struct nfp_net_rx_ring *rx_ring, unsigned int idx,
+ struct xdp_buff *xdp)
+{
+ unsigned int headroom;
+
+ headroom = xsk_pool_get_headroom(rx_ring->r_vec->xsk_pool);
+
+ rx_ring->rxds[idx].fld.reserved = 0;
+ rx_ring->rxds[idx].fld.meta_len_dd = 0;
+
+ rx_ring->xsk_rxbufs[idx].xdp = xdp;
+ rx_ring->xsk_rxbufs[idx].dma_addr =
+ xsk_buff_xdp_get_frame_dma(xdp) + headroom;
+}
+
+void nfp_net_xsk_rx_unstash(struct nfp_net_xsk_rx_buf *rxbuf)
+{
+ rxbuf->dma_addr = 0;
+ rxbuf->xdp = NULL;
+}
+
+void nfp_net_xsk_rx_free(struct nfp_net_xsk_rx_buf *rxbuf)
+{
+ if (rxbuf->xdp)
+ xsk_buff_free(rxbuf->xdp);
+
+ nfp_net_xsk_rx_unstash(rxbuf);
+}
+
+void nfp_net_xsk_rx_bufs_free(struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int i;
+
+ if (!rx_ring->cnt)
+ return;
+
+ for (i = 0; i < rx_ring->cnt - 1; i++)
+ nfp_net_xsk_rx_free(&rx_ring->xsk_rxbufs[i]);
+}
+
+void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct xsk_buff_pool *pool = r_vec->xsk_pool;
+ unsigned int wr_idx, wr_ptr_add = 0;
+ struct xdp_buff *xdp;
+
+ while (nfp_net_rx_space(rx_ring)) {
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
+
+ xdp = xsk_buff_alloc(pool);
+ if (!xdp)
+ break;
+
+ nfp_net_xsk_rx_bufs_stash(rx_ring, wr_idx, xdp);
+
+ nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
+ rx_ring->xsk_rxbufs[wr_idx].dma_addr);
+
+ rx_ring->wr_p++;
+ wr_ptr_add++;
+ }
+
+ /* Ensure all records are visible before incrementing write counter. */
+ wmb();
+ nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, wr_ptr_add);
+}
+
+void nfp_net_xsk_rx_drop(struct nfp_net_r_vector *r_vec,
+ struct nfp_net_xsk_rx_buf *xrxbuf)
+{
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_drops++;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ nfp_net_xsk_rx_free(xrxbuf);
+}
+
+static void nfp_net_xsk_pool_unmap(struct device *dev,
+ struct xsk_buff_pool *pool)
+{
+ return xsk_pool_dma_unmap(pool, 0);
+}
+
+static int nfp_net_xsk_pool_map(struct device *dev, struct xsk_buff_pool *pool)
+{
+ return xsk_pool_dma_map(pool, dev, 0);
+}
+
+int nfp_net_xsk_setup_pool(struct net_device *netdev,
+ struct xsk_buff_pool *pool, u16 queue_id)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ struct xsk_buff_pool *prev_pool;
+ struct nfp_net_dp *dp;
+ int err;
+
+ /* NFDK doesn't implement xsk yet. */
+ if (nn->dp.ops->version == NFP_NFD_VER_NFDK)
+ return -EOPNOTSUPP;
+
+ /* Reject on old FWs so we can drop some checks on datapath. */
+ if (nn->dp.rx_offset != NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ return -EOPNOTSUPP;
+ if (!nn->dp.chained_metadata_format)
+ return -EOPNOTSUPP;
+
+ /* Install */
+ if (pool) {
+ err = nfp_net_xsk_pool_map(nn->dp.dev, pool);
+ if (err)
+ return err;
+ }
+
+ /* Reconfig/swap */
+ dp = nfp_net_clone_dp(nn);
+ if (!dp) {
+ err = -ENOMEM;
+ goto err_unmap;
+ }
+
+ prev_pool = dp->xsk_pools[queue_id];
+ dp->xsk_pools[queue_id] = pool;
+
+ err = nfp_net_ring_reconfig(nn, dp, NULL);
+ if (err)
+ goto err_unmap;
+
+ /* Uninstall */
+ if (prev_pool)
+ nfp_net_xsk_pool_unmap(nn->dp.dev, prev_pool);
+
+ return 0;
+err_unmap:
+ if (pool)
+ nfp_net_xsk_pool_unmap(nn->dp.dev, pool);
+
+ return err;
+}
+
+int nfp_net_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ /* queue_id comes from a zero-copy socket, installed with XDP_SETUP_XSK_POOL,
+ * so it must be within our vector range. Moreover, our napi structs
+ * are statically allocated, so we can always kick them without worrying
+ * if reconfig is in progress or interface down.
+ */
+ napi_schedule(&nn->r_vecs[queue_id].napi);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h
new file mode 100644
index 000000000000..6d281eb2fc1c
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2018 Netronome Systems, Inc */
+/* Copyright (C) 2021 Corigine, Inc */
+
+#ifndef _NFP_XSK_H_
+#define _NFP_XSK_H_
+
+#include <net/xdp_sock_drv.h>
+
+#define NFP_NET_XSK_TX_BATCH 16 /* XSK TX transmission batch size. */
+
+static inline bool nfp_net_has_xsk_pool_slow(struct nfp_net_dp *dp,
+ unsigned int qid)
+{
+ return dp->xdp_prog && dp->xsk_pools[qid];
+}
+
+static inline int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring)
+{
+ return rx_ring->cnt - rx_ring->wr_p + rx_ring->rd_p - 1;
+}
+
+static inline int nfp_net_tx_space(struct nfp_net_tx_ring *tx_ring)
+{
+ return tx_ring->cnt - tx_ring->wr_p + tx_ring->rd_p - 1;
+}
+
+void nfp_net_xsk_rx_unstash(struct nfp_net_xsk_rx_buf *rxbuf);
+void nfp_net_xsk_rx_free(struct nfp_net_xsk_rx_buf *rxbuf);
+void nfp_net_xsk_rx_drop(struct nfp_net_r_vector *r_vec,
+ struct nfp_net_xsk_rx_buf *xrxbuf);
+int nfp_net_xsk_setup_pool(struct net_device *netdev, struct xsk_buff_pool *pool,
+ u16 queue_id);
+
+void nfp_net_xsk_rx_bufs_free(struct nfp_net_rx_ring *rx_ring);
+
+void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring);
+
+int nfp_net_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
+
+#endif /* _NFP_XSK_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 87f2268b16d6..a51eb26dd977 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/etherdevice.h>
+#include "nfpcore/nfp_dev.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
#include "nfp_main.h"
@@ -36,11 +37,14 @@ struct nfp_net_vf {
static const char nfp_net_driver_name[] = "nfp_netvf";
-#define PCI_DEVICE_NFP6000VF 0x6003
static const struct pci_device_id nfp_netvf_pci_device_ids[] = {
- { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000VF,
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP3800_VF,
PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
- PCI_ANY_ID, 0,
+ PCI_ANY_ID, 0, NFP_DEV_NFP3800_VF,
+ },
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF,
+ PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
+ PCI_ANY_ID, 0, NFP_DEV_NFP6000_VF,
},
{ 0, } /* Required last entry. */
};
@@ -65,6 +69,7 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
static int nfp_netvf_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
+ const struct nfp_dev_info *dev_info;
struct nfp_net_fw_version fw_ver;
int max_tx_rings, max_rx_rings;
u32 tx_bar_off, rx_bar_off;
@@ -78,6 +83,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
int stride;
int err;
+ dev_info = &nfp_dev_info[pci_id->driver_data];
+
vf = kzalloc(sizeof(*vf), GFP_KERNEL);
if (!vf)
return -ENOMEM;
@@ -95,8 +102,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- err = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS));
+ err = dma_set_mask_and_coherent(&pdev->dev, dev_info->dma_mask);
if (err)
goto err_pci_regions;
@@ -116,9 +122,11 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
nfp_net_get_fw_version(&fw_ver, ctrl_bar);
- if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
+ if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK ||
+ fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
- fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
+ fw_ver.extend, fw_ver.class,
+ fw_ver.major, fw_ver.minor);
err = -EINVAL;
goto err_ctrl_unmap;
}
@@ -138,7 +146,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
break;
default:
dev_err(&pdev->dev, "Unsupported Firmware ABI %d.%d.%d.%d\n",
- fw_ver.resv, fw_ver.class,
+ fw_ver.extend, fw_ver.class,
fw_ver.major, fw_ver.minor);
err = -EINVAL;
goto err_ctrl_unmap;
@@ -167,19 +175,19 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
startq = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
- tx_bar_off = NFP_PCIE_QUEUE(startq);
+ tx_bar_off = nfp_qcp_queue_offset(dev_info, startq);
startq = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
- rx_bar_off = NFP_PCIE_QUEUE(startq);
+ rx_bar_off = nfp_qcp_queue_offset(dev_info, startq);
/* Allocate and initialise the netdev */
- nn = nfp_net_alloc(pdev, ctrl_bar, true, max_tx_rings, max_rx_rings);
+ nn = nfp_net_alloc(pdev, dev_info, ctrl_bar, true,
+ max_tx_rings, max_rx_rings);
if (IS_ERR(nn)) {
err = PTR_ERR(nn);
goto err_ctrl_unmap;
}
vf->nn = nn;
- nn->fw_ver = fw_ver;
nn->dp.is_vf = 1;
nn->stride_tx = stride;
nn->stride_rx = stride;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c
index 93c5bfc0510b..4f2308570dcf 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c
@@ -75,23 +75,6 @@ int nfp_port_set_features(struct net_device *netdev, netdev_features_t features)
return 0;
}
-struct nfp_port *
-nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id)
-{
- struct nfp_port *port;
-
- lockdep_assert_held(&pf->lock);
-
- if (type != NFP_PORT_PHYS_PORT)
- return NULL;
-
- list_for_each_entry(port, &pf->ports, port_list)
- if (port->eth_id == id)
- return port;
-
- return NULL;
-}
-
struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port)
{
if (!port)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index ae4da189d955..d1ebe6c72f7f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -106,8 +106,6 @@ nfp_port_set_features(struct net_device *netdev, netdev_features_t features);
struct nfp_port *nfp_port_from_netdev(struct net_device *netdev);
int nfp_port_get_port_parent_id(struct net_device *netdev,
struct netdev_phys_item_id *ppid);
-struct nfp_port *
-nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id);
struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port);
struct nfp_eth_table_port *nfp_port_get_eth_port(struct nfp_port *port);
@@ -132,8 +130,7 @@ void nfp_devlink_port_unregister(struct nfp_port *port);
void nfp_devlink_port_type_eth_set(struct nfp_port *port);
void nfp_devlink_port_type_clear(struct nfp_port *port);
-/**
- * Mac stats (0x0000 - 0x0200)
+/* Mac stats (0x0000 - 0x0200)
* all counters are 64bit.
*/
#define NFP_MAC_STATS_BASE 0x0000
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 252fe06f58aa..0d1d39edbbae 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -28,6 +28,7 @@
#include <linux/pci.h>
#include "nfp_cpp.h"
+#include "nfp_dev.h"
#include "nfp6000/nfp6000.h"
@@ -100,11 +101,7 @@
#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4))
#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4))
-#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
- (0x400 + ((bar) * 8 + (slot)) * 4)
-
-#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
- (((bar) * 8 + (slot)) * 4)
+#define NFP_PCIE_P2C_EXPBAR_OFFSET(bar_index) ((bar_index) * 4)
/* The number of explicit BARs to reserve.
* Minimum is 0, maximum is 4 on the NFP6000.
@@ -145,6 +142,7 @@ struct nfp_bar {
struct nfp6000_pcie {
struct pci_dev *pdev;
struct device *dev;
+ const struct nfp_dev_info *dev_info;
/* PCI BAR management */
spinlock_t bar_lock; /* Protect the PCI2CPP BAR cache */
@@ -269,19 +267,16 @@ compute_bar(const struct nfp6000_pcie *nfp, const struct nfp_bar *bar,
static int
nfp6000_bar_write(struct nfp6000_pcie *nfp, struct nfp_bar *bar, u32 newcfg)
{
- int base, slot;
- int xbar;
+ unsigned int xbar;
- base = bar->index >> 3;
- slot = bar->index & 7;
+ xbar = NFP_PCIE_P2C_EXPBAR_OFFSET(bar->index);
if (nfp->iomem.csr) {
- xbar = NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
writel(newcfg, nfp->iomem.csr + xbar);
/* Readback to ensure BAR is flushed */
readl(nfp->iomem.csr + xbar);
} else {
- xbar = NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
+ xbar += nfp->dev_info->pcie_cfg_expbar_offset;
pci_write_config_dword(nfp->pdev, xbar, newcfg);
}
@@ -622,7 +617,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
nfp6000_bar_write(nfp, bar, barcfg_msix_general);
- nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000;
+ nfp->expl.data = bar->iomem + NFP_PCIE_SRAM +
+ nfp->dev_info->pcie_expl_offset;
switch (nfp->pdev->device) {
case PCI_DEVICE_ID_NETRONOME_NFP3800:
@@ -1306,18 +1302,20 @@ static const struct nfp_cpp_operations nfp6000_pcie_ops = {
/**
* nfp_cpp_from_nfp6000_pcie() - Build a NFP CPP bus from a NFP6000 PCI device
* @pdev: NFP6000 PCI device
+ * @dev_info: NFP ASIC params
*
* Return: NFP CPP handle
*/
-struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
+struct nfp_cpp *
+nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info)
{
struct nfp6000_pcie *nfp;
u16 interface;
int err;
/* Finished with card initialization. */
- dev_info(&pdev->dev,
- "Netronome Flow Processor NFP4000/NFP5000/NFP6000 PCIe Card Probe\n");
+ dev_info(&pdev->dev, "Netronome Flow Processor %s PCIe Card Probe\n",
+ dev_info->chip_names);
pcie_print_link_status(pdev);
nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
@@ -1328,6 +1326,7 @@ struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
nfp->dev = &pdev->dev;
nfp->pdev = pdev;
+ nfp->dev_info = dev_info;
init_waitqueue_head(&nfp->bar_waiters);
spin_lock_init(&nfp->bar_lock);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
index 6d1bffa6eac6..097660b673db 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
@@ -11,6 +11,7 @@
#include "nfp_cpp.h"
-struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev);
+struct nfp_cpp *
+nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info);
#endif /* NFP6000_PCIE_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index 2dd0f5842873..3d379e937184 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -32,10 +32,6 @@
#define PCI_64BIT_BAR_COUNT 3
-/* NFP hardware vendor/device ids.
- */
-#define PCI_DEVICE_ID_NETRONOME_NFP3800 0x3800
-
#define NFP_CPP_NUM_TARGETS 16
/* Max size of area it should be safe to request */
#define NFP_CPP_SAFE_AREA_SIZE SZ_2M
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
index 85734c6badf5..508ae6b571ca 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
@@ -22,6 +22,7 @@
#include "nfp6000/nfp_xpb.h"
/* NFP6000 PL */
+#define NFP_PL_DEVICE_PART_NFP6000 0x6200
#define NFP_PL_DEVICE_ID 0x00000004
#define NFP_PL_DEVICE_ID_MASK GENMASK(7, 0)
#define NFP_PL_DEVICE_PART_MASK GENMASK(31, 16)
@@ -130,8 +131,12 @@ int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model)
return err;
*model = reg & NFP_PL_DEVICE_MODEL_MASK;
- if (*model & NFP_PL_DEVICE_ID_MASK)
- *model -= 0x10;
+ /* Disambiguate the NFP4000/NFP5000/NFP6000 chips */
+ if (FIELD_GET(NFP_PL_DEVICE_PART_MASK, reg) ==
+ NFP_PL_DEVICE_PART_NFP6000) {
+ if (*model & NFP_PL_DEVICE_ID_MASK)
+ *model -= 0x10;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c
new file mode 100644
index 000000000000..28384d6d1c6f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+
+#include "nfp_dev.h"
+
+const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = {
+ [NFP_DEV_NFP3800] = {
+ .dma_mask = DMA_BIT_MASK(40),
+ .qc_idx_mask = GENMASK(8, 0),
+ .qc_addr_offset = 0x400000,
+ .min_qc_size = 512,
+ .max_qc_size = SZ_64K,
+
+ .chip_names = "NFP3800",
+ .pcie_cfg_expbar_offset = 0x0a00,
+ .pcie_expl_offset = 0xd000,
+ .qc_area_sz = 0x100000,
+ },
+ [NFP_DEV_NFP3800_VF] = {
+ .dma_mask = DMA_BIT_MASK(40),
+ .qc_idx_mask = GENMASK(8, 0),
+ .qc_addr_offset = 0,
+ .min_qc_size = 512,
+ .max_qc_size = SZ_64K,
+ },
+ [NFP_DEV_NFP6000] = {
+ .dma_mask = DMA_BIT_MASK(40),
+ .qc_idx_mask = GENMASK(7, 0),
+ .qc_addr_offset = 0x80000,
+ .min_qc_size = 256,
+ .max_qc_size = SZ_256K,
+
+ .chip_names = "NFP4000/NFP5000/NFP6000",
+ .pcie_cfg_expbar_offset = 0x0400,
+ .pcie_expl_offset = 0x1000,
+ .qc_area_sz = 0x80000,
+ },
+ [NFP_DEV_NFP6000_VF] = {
+ .dma_mask = DMA_BIT_MASK(40),
+ .qc_idx_mask = GENMASK(7, 0),
+ .qc_addr_offset = 0,
+ .min_qc_size = 256,
+ .max_qc_size = SZ_256K,
+ },
+};
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h
new file mode 100644
index 000000000000..d4189869cf7b
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef _NFP_DEV_H_
+#define _NFP_DEV_H_
+
+#include <linux/types.h>
+
+enum nfp_dev_id {
+ NFP_DEV_NFP3800,
+ NFP_DEV_NFP3800_VF,
+ NFP_DEV_NFP6000,
+ NFP_DEV_NFP6000_VF,
+ NFP_DEV_CNT,
+};
+
+struct nfp_dev_info {
+ /* Required fields */
+ u64 dma_mask;
+ u32 qc_idx_mask;
+ u32 qc_addr_offset;
+ u32 min_qc_size;
+ u32 max_qc_size;
+
+ /* PF-only fields */
+ const char *chip_names;
+ u32 pcie_cfg_expbar_offset;
+ u32 pcie_expl_offset;
+ u32 qc_area_sz;
+};
+
+extern const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT];
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 10e7d8b21c46..730fea214b8a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -513,7 +513,7 @@ nfp_nsp_command_buf_dma_sg(struct nfp_nsp *nsp,
dma_size = BIT_ULL(dma_order);
nseg = DIV_ROUND_UP(max_size, chunk_size);
- chunks = kzalloc(array_size(sizeof(*chunks), nseg), GFP_KERNEL);
+ chunks = kcalloc(nseg, sizeof(*chunks), GFP_KERNEL);
if (!chunks)
return -ENOMEM;
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 07a00dd9cfe0..4b3482ce90a1 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -324,8 +324,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
+ sizeof(*priv->rx_bd_v) *
((i + 1) % RX_BD_NUM));
- skb = netdev_alloc_skb_ip_align(ndev,
- NIXGE_MAX_JUMBO_FRAME_SIZE);
+ skb = __netdev_alloc_skb_ip_align(ndev,
+ NIXGE_MAX_JUMBO_FRAME_SIZE,
+ GFP_KERNEL);
if (!skb)
goto out;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index bc39558fe82b..756f97dce85b 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1471,6 +1471,7 @@ static int lpc_eth_drv_resume(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct netdata_local *pldat;
+ int ret;
if (device_may_wakeup(&pdev->dev))
disable_irq_wake(ndev->irq);
@@ -1480,7 +1481,9 @@ static int lpc_eth_drv_resume(struct platform_device *pdev)
pldat = netdev_priv(ndev);
/* Enable interface clock */
- clk_enable(pldat->clk);
+ ret = clk_enable(pldat->clk);
+ if (ret)
+ return ret;
/* Reset and initialize */
__lpc_eth_reset(pldat);
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 12105f62cbdd..03650022d444 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -191,7 +191,7 @@ IV. Notes
Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
-and an AlphaStation to verifty the Alpha port!
+and an AlphaStation to verify the Alpha port!
IVb. References
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 5e25411ff02f..602f4d45d529 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -18,7 +18,7 @@ struct ionic_lif;
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
-#define DEVCMD_TIMEOUT 10
+#define DEVCMD_TIMEOUT 5
#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100)
#define IONIC_PHC_UPDATE_NS 10000000000 /* 10s in nanoseconds */
@@ -78,6 +78,9 @@ void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
u8 status, int err);
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
+int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_wait);
+void ionic_dev_cmd_dev_err_print(struct ionic *ionic, u8 opcode, u8 status,
+ int err);
int ionic_set_dma_mask(struct ionic *ionic);
int ionic_setup(struct ionic *ionic);
@@ -89,4 +92,6 @@ int ionic_port_identify(struct ionic *ionic);
int ionic_port_init(struct ionic *ionic);
int ionic_port_reset(struct ionic *ionic);
+const char *ionic_vf_attr_to_str(enum ionic_vf_attr attr);
+
#endif /* _IONIC_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 7e296fa71b36..6ffc62c41165 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -109,8 +109,8 @@ void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page)
static void ionic_vf_dealloc_locked(struct ionic *ionic)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_STATSADDR };
struct ionic_vf *v;
- dma_addr_t dma = 0;
int i;
if (!ionic->vfs)
@@ -120,9 +120,8 @@ static void ionic_vf_dealloc_locked(struct ionic *ionic)
v = &ionic->vfs[i];
if (v->stats_pa) {
- (void)ionic_set_vf_config(ionic, i,
- IONIC_VF_ATTR_STATSADDR,
- (u8 *)&dma);
+ vfc.stats_pa = 0;
+ (void)ionic_set_vf_config(ionic, i, &vfc);
dma_unmap_single(ionic->dev, v->stats_pa,
sizeof(v->stats), DMA_FROM_DEVICE);
v->stats_pa = 0;
@@ -143,6 +142,7 @@ static void ionic_vf_dealloc(struct ionic *ionic)
static int ionic_vf_alloc(struct ionic *ionic, int num_vfs)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_STATSADDR };
struct ionic_vf *v;
int err = 0;
int i;
@@ -166,9 +166,10 @@ static int ionic_vf_alloc(struct ionic *ionic, int num_vfs)
}
ionic->num_vfs++;
+
/* ignore failures from older FW, we just won't get stats */
- (void)ionic_set_vf_config(ionic, i, IONIC_VF_ATTR_STATSADDR,
- (u8 *)&v->stats_pa);
+ vfc.stats_pa = cpu_to_le64(v->stats_pa);
+ (void)ionic_set_vf_config(ionic, i, &vfc);
}
out:
@@ -331,6 +332,9 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_deregister_lifs;
}
+ mod_timer(&ionic->watchdog_timer,
+ round_jiffies(jiffies + ionic->watchdog_period));
+
return 0;
err_out_deregister_lifs:
@@ -348,7 +352,6 @@ err_out_port_reset:
err_out_reset:
ionic_reset(ionic);
err_out_teardown:
- del_timer_sync(&ionic->watchdog_timer);
pci_clear_master(pdev);
/* Don't fail the probe for these errors, keep
* the hw interface around for inspection
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index d57e80d44c9d..9d0514cfeb5c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -33,7 +33,8 @@ static void ionic_watchdog_cb(struct timer_list *t)
!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
- if (test_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state)) {
+ if (test_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state) &&
+ !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
netdev_err(lif->netdev, "rxmode change dropped\n");
@@ -46,6 +47,24 @@ static void ionic_watchdog_cb(struct timer_list *t)
}
}
+static void ionic_watchdog_init(struct ionic *ionic)
+{
+ struct ionic_dev *idev = &ionic->idev;
+
+ timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0);
+ ionic->watchdog_period = IONIC_WATCHDOG_SECS * HZ;
+
+ /* set times to ensure the first check will proceed */
+ atomic_long_set(&idev->last_check_time, jiffies - 2 * HZ);
+ idev->last_hb_time = jiffies - 2 * ionic->watchdog_period;
+ /* init as ready, so no transition if the first check succeeds */
+ idev->last_fw_hb = 0;
+ idev->fw_hb_ready = true;
+ idev->fw_status_ready = true;
+ idev->fw_generation = IONIC_FW_STS_F_GENERATION &
+ ioread8(&idev->dev_info_regs->fw_status);
+}
+
void ionic_init_devinfo(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
@@ -109,21 +128,7 @@ int ionic_dev_setup(struct ionic *ionic)
return -EFAULT;
}
- timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0);
- ionic->watchdog_period = IONIC_WATCHDOG_SECS * HZ;
-
- /* set times to ensure the first check will proceed */
- atomic_long_set(&idev->last_check_time, jiffies - 2 * HZ);
- idev->last_hb_time = jiffies - 2 * ionic->watchdog_period;
- /* init as ready, so no transition if the first check succeeds */
- idev->last_fw_hb = 0;
- idev->fw_hb_ready = true;
- idev->fw_status_ready = true;
- idev->fw_generation = IONIC_FW_STS_F_GENERATION &
- ioread8(&idev->dev_info_regs->fw_status);
-
- mod_timer(&ionic->watchdog_timer,
- round_jiffies(jiffies + ionic->watchdog_period));
+ ionic_watchdog_init(ionic);
idev->db_pages = bar->vaddr;
idev->phy_db_pages = bar->bus_addr;
@@ -132,10 +137,21 @@ int ionic_dev_setup(struct ionic *ionic)
}
/* Devcmd Interface */
+bool ionic_is_fw_running(struct ionic_dev *idev)
+{
+ u8 fw_status = ioread8(&idev->dev_info_regs->fw_status);
+
+ /* firmware is useful only if the running bit is set and
+ * fw_status != 0xff (bad PCI read)
+ */
+ return (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING);
+}
+
int ionic_heartbeat_check(struct ionic *ionic)
{
- struct ionic_dev *idev = &ionic->idev;
unsigned long check_time, last_check_time;
+ struct ionic_dev *idev = &ionic->idev;
+ struct ionic_lif *lif = ionic->lif;
bool fw_status_ready = true;
bool fw_hb_ready;
u8 fw_generation;
@@ -155,13 +171,10 @@ do_check_time:
goto do_check_time;
}
- /* firmware is useful only if the running bit is set and
- * fw_status != 0xff (bad PCI read)
- * If fw_status is not ready don't bother with the generation.
- */
fw_status = ioread8(&idev->dev_info_regs->fw_status);
- if (fw_status == 0xff || !(fw_status & IONIC_FW_STS_F_RUNNING)) {
+ /* If fw_status is not ready don't bother with the generation */
+ if (!ionic_is_fw_running(idev)) {
fw_status_ready = false;
} else {
fw_generation = fw_status & IONIC_FW_STS_F_GENERATION;
@@ -176,26 +189,40 @@ do_check_time:
* the down, the next watchdog will see the fw is up
* and the generation value stable, so will trigger
* the fw-up activity.
+ *
+ * If we had already moved to FW_RESET from a RESET event,
+ * it is possible that we never saw the fw_status go to 0,
+ * so we fake the current idev->fw_status_ready here to
+ * force the transition and get FW up again.
*/
- fw_status_ready = false;
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ idev->fw_status_ready = false; /* go to running */
+ else
+ fw_status_ready = false; /* go to down */
}
}
+ dev_dbg(ionic->dev, "fw_status 0x%02x ready %d idev->ready %d last_hb 0x%x state 0x%02lx\n",
+ fw_status, fw_status_ready, idev->fw_status_ready,
+ idev->last_fw_hb, lif->state[0]);
+
/* is this a transition? */
- if (fw_status_ready != idev->fw_status_ready) {
- struct ionic_lif *lif = ionic->lif;
+ if (fw_status_ready != idev->fw_status_ready &&
+ !test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
bool trigger = false;
idev->fw_status_ready = fw_status_ready;
- if (!fw_status_ready) {
- dev_info(ionic->dev, "FW stopped %u\n", fw_status);
- if (lif && !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
- trigger = true;
- } else {
- dev_info(ionic->dev, "FW running %u\n", fw_status);
- if (lif && test_bit(IONIC_LIF_F_FW_RESET, lif->state))
- trigger = true;
+ if (!fw_status_ready &&
+ !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
+ !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
+ dev_info(ionic->dev, "FW stopped 0x%02x\n", fw_status);
+ trigger = true;
+
+ } else if (fw_status_ready &&
+ test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ dev_info(ionic->dev, "FW running 0x%02x\n", fw_status);
+ trigger = true;
}
if (trigger) {
@@ -210,12 +237,14 @@ do_check_time:
}
}
- if (!fw_status_ready)
+ if (!idev->fw_status_ready)
return -ENXIO;
- /* wait at least one watchdog period since the last heartbeat */
+ /* Because of some variability in the actual FW heartbeat, we
+ * wait longer than the DEVCMD_TIMEOUT before checking again.
+ */
last_check_time = idev->last_hb_time;
- if (time_before(check_time, last_check_time + ionic->watchdog_period))
+ if (time_before(check_time, last_check_time + DEVCMD_TIMEOUT * 2 * HZ))
return 0;
fw_hb = ioread32(&idev->dev_info_regs->fw_heartbeat);
@@ -392,60 +421,63 @@ void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type)
}
/* VF commands */
-int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data)
+int ionic_set_vf_config(struct ionic *ionic, int vf,
+ struct ionic_vf_setattr_cmd *vfc)
{
union ionic_dev_cmd cmd = {
.vf_setattr.opcode = IONIC_CMD_VF_SETATTR,
- .vf_setattr.attr = attr,
+ .vf_setattr.attr = vfc->attr,
.vf_setattr.vf_index = cpu_to_le16(vf),
};
int err;
+ memcpy(cmd.vf_setattr.pad, vfc->pad, sizeof(vfc->pad));
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_go(&ionic->idev, &cmd);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ return err;
+}
+
+int ionic_dev_cmd_vf_getattr(struct ionic *ionic, int vf, u8 attr,
+ struct ionic_vf_getattr_comp *comp)
+{
+ union ionic_dev_cmd cmd = {
+ .vf_getattr.opcode = IONIC_CMD_VF_GETATTR,
+ .vf_getattr.attr = attr,
+ .vf_getattr.vf_index = cpu_to_le16(vf),
+ };
+ int err;
+
+ if (vf >= ionic->num_vfs)
+ return -EINVAL;
+
switch (attr) {
case IONIC_VF_ATTR_SPOOFCHK:
- cmd.vf_setattr.spoofchk = *data;
- dev_dbg(ionic->dev, "%s: vf %d spoof %d\n",
- __func__, vf, *data);
- break;
case IONIC_VF_ATTR_TRUST:
- cmd.vf_setattr.trust = *data;
- dev_dbg(ionic->dev, "%s: vf %d trust %d\n",
- __func__, vf, *data);
- break;
case IONIC_VF_ATTR_LINKSTATE:
- cmd.vf_setattr.linkstate = *data;
- dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n",
- __func__, vf, *data);
- break;
case IONIC_VF_ATTR_MAC:
- ether_addr_copy(cmd.vf_setattr.macaddr, data);
- dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n",
- __func__, vf, data);
- break;
case IONIC_VF_ATTR_VLAN:
- cmd.vf_setattr.vlanid = cpu_to_le16(*(u16 *)data);
- dev_dbg(ionic->dev, "%s: vf %d vlan %d\n",
- __func__, vf, *(u16 *)data);
- break;
case IONIC_VF_ATTR_RATE:
- cmd.vf_setattr.maxrate = cpu_to_le32(*(u32 *)data);
- dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n",
- __func__, vf, *(u32 *)data);
break;
case IONIC_VF_ATTR_STATSADDR:
- cmd.vf_setattr.stats_pa = cpu_to_le64(*(u64 *)data);
- dev_dbg(ionic->dev, "%s: vf %d stats_pa 0x%08llx\n",
- __func__, vf, *(u64 *)data);
- break;
default:
return -EINVAL;
}
mutex_lock(&ionic->dev_cmd_lock);
ionic_dev_cmd_go(&ionic->idev, &cmd);
- err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ err = ionic_dev_cmd_wait_nomsg(ionic, DEVCMD_TIMEOUT);
+ memcpy_fromio(comp, &ionic->idev.dev_cmd_regs->comp.vf_getattr,
+ sizeof(*comp));
mutex_unlock(&ionic->dev_cmd_lock);
+ if (err && comp->status != IONIC_RC_ENOSUPP)
+ ionic_dev_cmd_dev_err_print(ionic, cmd.vf_getattr.opcode,
+ comp->status, err);
+
return err;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index e5acf3bd62b2..563c302eb033 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -318,7 +318,10 @@ void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable);
void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type);
void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type);
-int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data);
+int ionic_set_vf_config(struct ionic *ionic, int vf,
+ struct ionic_vf_setattr_cmd *vfc);
+int ionic_dev_cmd_vf_getattr(struct ionic *ionic, int vf, u8 attr,
+ struct ionic_vf_getattr_comp *comp);
void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
u16 lif_type, u8 qtype, u8 qver);
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver);
@@ -353,5 +356,6 @@ void ionic_q_rewind(struct ionic_queue *q, struct ionic_desc_info *start);
void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
unsigned int stop_index);
int ionic_heartbeat_check(struct ionic *ionic);
+bool ionic_is_fw_running(struct ionic_dev *idev);
#endif /* _IONIC_DEV_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 386a5cf1e224..01c22701482d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -74,10 +74,10 @@ static void ionic_get_drvinfo(struct net_device *netdev,
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
- strlcpy(drvinfo->driver, IONIC_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, ionic->idev.dev_info.fw_version,
+ strscpy(drvinfo->driver, IONIC_DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, ionic->idev.dev_info.fw_version,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, ionic_bus_info(ionic),
+ strscpy(drvinfo->bus_info, ionic_bus_info(ionic),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 278610ed7227..4a90f611c611 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -759,7 +759,7 @@ enum ionic_txq_desc_opcode {
* IONIC_TXQ_DESC_OPCODE_CSUM_HW:
* Offload 16-bit checksum computation to hardware.
* If @csum_l3 is set then the packet's L3 checksum is
- * updated. Similarly, if @csum_l4 is set the the L4
+ * updated. Similarly, if @csum_l4 is set the L4
* checksum is updated. If @encap is set then encap header
* checksums are also updated.
*
@@ -1368,9 +1368,9 @@ union ionic_port_config {
* @status: link status (enum ionic_port_oper_status)
* @id: port id
* @speed: link speed (in Mbps)
- * @link_down_count: number of times link went from from up to down
+ * @link_down_count: number of times link went from up to down
* @fec_type: fec type (enum ionic_port_fec_type)
- * @xcvr: tranceiver status
+ * @xcvr: transceiver status
*/
struct ionic_port_status {
__le32 id;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 2ff7be17e5af..f3568901eb91 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <linux/cpumask.h>
#include <linux/crash_dump.h>
+#include <linux/vmalloc.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -393,11 +394,11 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
ionic_qcq_intr_free(lif, qcq);
if (qcq->cq.info) {
- devm_kfree(dev, qcq->cq.info);
+ vfree(qcq->cq.info);
qcq->cq.info = NULL;
}
if (qcq->q.info) {
- devm_kfree(dev, qcq->q.info);
+ vfree(qcq->q.info);
qcq->q.info = NULL;
}
}
@@ -528,8 +529,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
new->q.dev = dev;
new->flags = flags;
- new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info),
- GFP_KERNEL);
+ new->q.info = vzalloc(num_descs * sizeof(*new->q.info));
if (!new->q.info) {
netdev_err(lif->netdev, "Cannot allocate queue info\n");
err = -ENOMEM;
@@ -550,8 +550,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (err)
goto err_out;
- new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
- GFP_KERNEL);
+ new->cq.info = vzalloc(num_descs * sizeof(*new->cq.info));
if (!new->cq.info) {
netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
err = -ENOMEM;
@@ -640,14 +639,14 @@ err_out_free_cq:
err_out_free_q:
dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
err_out_free_cq_info:
- devm_kfree(dev, new->cq.info);
+ vfree(new->cq.info);
err_out_free_irq:
if (flags & IONIC_QCQ_F_INTR) {
devm_free_irq(dev, new->intr.vector, &new->napi);
ionic_intr_free(lif->ionic, new->intr.index);
}
err_out_free_q_info:
- devm_kfree(dev, new->q.info);
+ vfree(new->q.info);
err_out_free_qcq:
devm_kfree(dev, new);
err_out:
@@ -1112,12 +1111,17 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
break;
case IONIC_EVENT_RESET:
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- netdev_err(lif->netdev, "Reset event dropped\n");
- } else {
- work->type = IONIC_DW_TYPE_LIF_RESET;
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ if (lif->ionic->idev.fw_status_ready &&
+ !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
+ !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ netdev_err(lif->netdev, "Reset event dropped\n");
+ clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
+ } else {
+ work->type = IONIC_DW_TYPE_LIF_RESET;
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ }
}
break;
default:
@@ -1782,7 +1786,7 @@ static void ionic_lif_quiesce(struct ionic_lif *lif)
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
- netdev_err(lif->netdev, "lif quiesce failed %d\n", err);
+ netdev_dbg(lif->netdev, "lif quiesce failed %d\n", err);
}
static void ionic_txrx_disable(struct ionic_lif *lif)
@@ -2152,6 +2156,76 @@ static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd
}
}
+static int ionic_update_cached_vf_config(struct ionic *ionic, int vf)
+{
+ struct ionic_vf_getattr_comp comp = { 0 };
+ int err;
+ u8 attr;
+
+ attr = IONIC_VF_ATTR_VLAN;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err)
+ ionic->vfs[vf].vlanid = comp.vlanid;
+
+ attr = IONIC_VF_ATTR_SPOOFCHK;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err)
+ ionic->vfs[vf].spoofchk = comp.spoofchk;
+
+ attr = IONIC_VF_ATTR_LINKSTATE;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err) {
+ switch (comp.linkstate) {
+ case IONIC_VF_LINK_STATUS_UP:
+ ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ break;
+ case IONIC_VF_LINK_STATUS_DOWN:
+ ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ break;
+ case IONIC_VF_LINK_STATUS_AUTO:
+ ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_AUTO;
+ break;
+ default:
+ dev_warn(ionic->dev, "Unexpected link state %u\n", comp.linkstate);
+ break;
+ }
+ }
+
+ attr = IONIC_VF_ATTR_RATE;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err)
+ ionic->vfs[vf].maxrate = comp.maxrate;
+
+ attr = IONIC_VF_ATTR_TRUST;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err)
+ ionic->vfs[vf].trusted = comp.trust;
+
+ attr = IONIC_VF_ATTR_MAC;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err)
+ ether_addr_copy(ionic->vfs[vf].macaddr, comp.macaddr);
+
+err_out:
+ if (err)
+ dev_err(ionic->dev, "Failed to get %s for VF %d\n",
+ ionic_vf_attr_to_str(attr), vf);
+
+ return err;
+}
+
static int ionic_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivf)
{
@@ -2167,14 +2241,18 @@ static int ionic_get_vf_config(struct net_device *netdev,
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ivf->vf = vf;
- ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
- ivf->qos = 0;
- ivf->spoofchk = ionic->vfs[vf].spoofchk;
- ivf->linkstate = ionic->vfs[vf].linkstate;
- ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
- ivf->trusted = ionic->vfs[vf].trusted;
- ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
+ ivf->vf = vf;
+ ivf->qos = 0;
+
+ ret = ionic_update_cached_vf_config(ionic, vf);
+ if (!ret) {
+ ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
+ ivf->spoofchk = ionic->vfs[vf].spoofchk;
+ ivf->linkstate = ionic->vfs[vf].linkstate;
+ ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
+ ivf->trusted = ionic->vfs[vf].trusted;
+ ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
+ }
}
up_read(&ionic->vf_op_lock);
@@ -2220,6 +2298,7 @@ static int ionic_get_vf_stats(struct net_device *netdev, int vf,
static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_MAC };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
int ret;
@@ -2235,7 +2314,11 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
+ ether_addr_copy(vfc.macaddr, mac);
+ dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n",
+ __func__, vf, vfc.macaddr);
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
ether_addr_copy(ionic->vfs[vf].macaddr, mac);
}
@@ -2247,6 +2330,7 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
u8 qos, __be16 proto)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_VLAN };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
int ret;
@@ -2269,8 +2353,11 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf,
- IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
+ vfc.vlanid = cpu_to_le16(vlan);
+ dev_dbg(ionic->dev, "%s: vf %d vlan %d\n",
+ __func__, vf, le16_to_cpu(vfc.vlanid));
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
}
@@ -2282,6 +2369,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
static int ionic_set_vf_rate(struct net_device *netdev, int vf,
int tx_min, int tx_max)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_RATE };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
int ret;
@@ -2298,8 +2386,11 @@ static int ionic_set_vf_rate(struct net_device *netdev, int vf,
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf,
- IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
+ vfc.maxrate = cpu_to_le32(tx_max);
+ dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n",
+ __func__, vf, le32_to_cpu(vfc.maxrate));
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
}
@@ -2310,9 +2401,9 @@ static int ionic_set_vf_rate(struct net_device *netdev, int vf,
static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_SPOOFCHK };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
- u8 data = set; /* convert to u8 for config */
int ret;
if (!netif_device_present(netdev))
@@ -2323,10 +2414,13 @@ static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf,
- IONIC_VF_ATTR_SPOOFCHK, &data);
+ vfc.spoofchk = set;
+ dev_dbg(ionic->dev, "%s: vf %d spoof %d\n",
+ __func__, vf, vfc.spoofchk);
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
- ionic->vfs[vf].spoofchk = data;
+ ionic->vfs[vf].spoofchk = set;
}
up_write(&ionic->vf_op_lock);
@@ -2335,9 +2429,9 @@ static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_TRUST };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
- u8 data = set; /* convert to u8 for config */
int ret;
if (!netif_device_present(netdev))
@@ -2348,10 +2442,13 @@ static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf,
- IONIC_VF_ATTR_TRUST, &data);
+ vfc.trust = set;
+ dev_dbg(ionic->dev, "%s: vf %d trust %d\n",
+ __func__, vf, vfc.trust);
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
- ionic->vfs[vf].trusted = data;
+ ionic->vfs[vf].trusted = set;
}
up_write(&ionic->vf_op_lock);
@@ -2360,20 +2457,21 @@ static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_LINKSTATE };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
- u8 data;
+ u8 vfls;
int ret;
switch (set) {
case IFLA_VF_LINK_STATE_ENABLE:
- data = IONIC_VF_LINK_STATUS_UP;
+ vfls = IONIC_VF_LINK_STATUS_UP;
break;
case IFLA_VF_LINK_STATE_DISABLE:
- data = IONIC_VF_LINK_STATUS_DOWN;
+ vfls = IONIC_VF_LINK_STATUS_DOWN;
break;
case IFLA_VF_LINK_STATE_AUTO:
- data = IONIC_VF_LINK_STATUS_AUTO;
+ vfls = IONIC_VF_LINK_STATUS_AUTO;
break;
default:
return -EINVAL;
@@ -2387,8 +2485,11 @@ static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf,
- IONIC_VF_ATTR_LINKSTATE, &data);
+ vfc.linkstate = vfls;
+ dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n",
+ __func__, vf, vfc.linkstate);
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
ionic->vfs[vf].linkstate = set;
}
@@ -2835,6 +2936,7 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
mutex_unlock(&lif->queue_lock);
+ clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
dev_info(ionic->dev, "FW Down: LIFs stopped\n");
}
@@ -2934,8 +3036,6 @@ void ionic_lif_free(struct ionic_lif *lif)
/* unmap doorbell page */
ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
lif->kern_dbpage = NULL;
- kfree(lif->dbid_inuse);
- lif->dbid_inuse = NULL;
mutex_destroy(&lif->config_lock);
mutex_destroy(&lif->queue_lock);
@@ -3135,22 +3235,12 @@ int ionic_lif_init(struct ionic_lif *lif)
return -EINVAL;
}
- lif->dbid_inuse = bitmap_zalloc(lif->dbid_count, GFP_KERNEL);
- if (!lif->dbid_inuse) {
- dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
- return -ENOMEM;
- }
-
- /* first doorbell id reserved for kernel (dbid aka pid == zero) */
- set_bit(0, lif->dbid_inuse);
lif->kern_pid = 0;
-
dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
if (!lif->kern_dbpage) {
dev_err(dev, "Cannot map dbpage, aborting\n");
- err = -ENOMEM;
- goto err_out_free_dbid;
+ return -ENOMEM;
}
err = ionic_lif_adminq_init(lif);
@@ -3186,15 +3276,13 @@ int ionic_lif_init(struct ionic_lif *lif)
return 0;
err_out_notifyq_deinit:
+ napi_disable(&lif->adminqcq->napi);
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
err_out_adminq_deinit:
ionic_lif_qcq_deinit(lif, lif->adminqcq);
ionic_lif_reset(lif);
ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
lif->kern_dbpage = NULL;
-err_out_free_dbid:
- kfree(lif->dbid_inuse);
- lif->dbid_inuse = NULL;
return err;
}
@@ -3214,7 +3302,7 @@ static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
},
};
- strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
+ strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
sizeof(ctx.cmd.lif_setattr.name));
ionic_adminq_post_wait(lif, &ctx);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 9f7ab2f17f93..a53984bf3544 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -135,6 +135,7 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_LINK_CHECK_REQUESTED,
IONIC_LIF_F_FILTER_SYNC_NEEDED,
IONIC_LIF_F_FW_RESET,
+ IONIC_LIF_F_FW_STOPPING,
IONIC_LIF_F_SPLIT_INTR,
IONIC_LIF_F_BROKEN,
IONIC_LIF_F_TX_DIM_INTR,
@@ -213,7 +214,6 @@ struct ionic_lif {
u32 rx_coalesce_hw; /* what the hw is using */
u32 tx_coalesce_usecs; /* what the user asked for */
u32 tx_coalesce_hw; /* what the hw is using */
- unsigned long *dbid_inuse;
unsigned int dbid_count;
struct ionic_phc *phc;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 875f4ec42efe..4029b4e021f8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -188,6 +188,28 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
}
}
+const char *ionic_vf_attr_to_str(enum ionic_vf_attr attr)
+{
+ switch (attr) {
+ case IONIC_VF_ATTR_SPOOFCHK:
+ return "IONIC_VF_ATTR_SPOOFCHK";
+ case IONIC_VF_ATTR_TRUST:
+ return "IONIC_VF_ATTR_TRUST";
+ case IONIC_VF_ATTR_LINKSTATE:
+ return "IONIC_VF_ATTR_LINKSTATE";
+ case IONIC_VF_ATTR_MAC:
+ return "IONIC_VF_ATTR_MAC";
+ case IONIC_VF_ATTR_VLAN:
+ return "IONIC_VF_ATTR_VLAN";
+ case IONIC_VF_ATTR_RATE:
+ return "IONIC_VF_ATTR_RATE";
+ case IONIC_VF_ATTR_STATSADDR:
+ return "IONIC_VF_ATTR_STATSADDR";
+ default:
+ return "IONIC_VF_ATTR_UNKNOWN";
+ }
+}
+
static void ionic_adminq_flush(struct ionic_lif *lif)
{
struct ionic_desc_info *desc_info;
@@ -215,9 +237,13 @@ static void ionic_adminq_flush(struct ionic_lif *lif)
void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
u8 status, int err)
{
+ const char *stat_str;
+
+ stat_str = (err == -ETIMEDOUT) ? "TIMEOUT" :
+ ionic_error_to_str(status);
+
netdev_err(lif->netdev, "%s (%d) failed: %s (%d)\n",
- ionic_opcode_to_str(opcode), opcode,
- ionic_error_to_str(status), err);
+ ionic_opcode_to_str(opcode), opcode, stat_str, err);
}
static int ionic_adminq_check_err(struct ionic_lif *lif,
@@ -318,6 +344,7 @@ int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
if (do_msg && !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
netdev_err(netdev, "Posting of %s (%d) failed: %d\n",
name, ctx->cmd.cmd.opcode, err);
+ ctx->comp.comp.status = IONIC_RC_ERROR;
return err;
}
@@ -331,11 +358,15 @@ int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
if (remaining)
break;
- /* interrupt the wait if FW stopped */
- if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ /* force a check of FW status and break out if FW reset */
+ (void)ionic_heartbeat_check(lif->ionic);
+ if ((test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
+ !lif->ionic->idev.fw_status_ready) ||
+ test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
if (do_msg)
- netdev_err(netdev, "%s (%d) interrupted, FW in reset\n",
- name, ctx->cmd.cmd.opcode);
+ netdev_warn(netdev, "%s (%d) interrupted, FW in reset\n",
+ name, ctx->cmd.cmd.opcode);
+ ctx->comp.comp.status = IONIC_RC_ERROR;
return -ENXIO;
}
@@ -370,21 +401,34 @@ int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *
static void ionic_dev_cmd_clean(struct ionic *ionic)
{
- union __iomem ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
+ struct ionic_dev *idev = &ionic->idev;
- iowrite32(0, &regs->doorbell);
- memset_io(&regs->cmd, 0, sizeof(regs->cmd));
+ iowrite32(0, &idev->dev_cmd_regs->doorbell);
+ memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd));
}
-int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
+void ionic_dev_cmd_dev_err_print(struct ionic *ionic, u8 opcode, u8 status,
+ int err)
+{
+ const char *stat_str;
+
+ stat_str = (err == -ETIMEDOUT) ? "TIMEOUT" :
+ ionic_error_to_str(status);
+
+ dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) failed\n",
+ ionic_opcode_to_str(opcode), opcode, stat_str, err);
+}
+
+static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
+ const bool do_msg)
{
struct ionic_dev *idev = &ionic->idev;
unsigned long start_time;
unsigned long max_wait;
unsigned long duration;
+ int done = 0;
+ bool fw_up;
int opcode;
- int hb = 0;
- int done;
int err;
/* Wait for dev cmd to complete, retrying if we get EAGAIN,
@@ -394,31 +438,24 @@ int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
try_again:
opcode = readb(&idev->dev_cmd_regs->cmd.cmd.opcode);
start_time = jiffies;
- do {
+ for (fw_up = ionic_is_fw_running(idev);
+ !done && fw_up && time_before(jiffies, max_wait);
+ fw_up = ionic_is_fw_running(idev)) {
done = ionic_dev_cmd_done(idev);
if (done)
break;
usleep_range(100, 200);
-
- /* Don't check the heartbeat on FW_CONTROL commands as they are
- * notorious for interrupting the firmware's heartbeat update.
- */
- if (opcode != IONIC_CMD_FW_CONTROL)
- hb = ionic_heartbeat_check(ionic);
- } while (!done && !hb && time_before(jiffies, max_wait));
+ }
duration = jiffies - start_time;
dev_dbg(ionic->dev, "DEVCMD %s (%d) done=%d took %ld secs (%ld jiffies)\n",
ionic_opcode_to_str(opcode), opcode,
done, duration / HZ, duration);
- if (!done && hb) {
- /* It is possible (but unlikely) that FW was busy and missed a
- * heartbeat check but is still alive and will process this
- * request, so don't clean the dev_cmd in this case.
- */
- dev_dbg(ionic->dev, "DEVCMD %s (%d) failed - FW halted\n",
- ionic_opcode_to_str(opcode), opcode);
+ if (!done && !fw_up) {
+ ionic_dev_cmd_clean(ionic);
+ dev_warn(ionic->dev, "DEVCMD %s (%d) interrupted - FW is down\n",
+ ionic_opcode_to_str(opcode), opcode);
return -ENXIO;
}
@@ -444,9 +481,9 @@ try_again:
}
if (!(opcode == IONIC_CMD_FW_CONTROL && err == IONIC_RC_EAGAIN))
- dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) failed\n",
- ionic_opcode_to_str(opcode), opcode,
- ionic_error_to_str(err), err);
+ if (do_msg)
+ ionic_dev_cmd_dev_err_print(ionic, opcode, err,
+ ionic_error_to_errno(err));
return ionic_error_to_errno(err);
}
@@ -454,6 +491,16 @@ try_again:
return 0;
}
+int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
+{
+ return __ionic_dev_cmd_wait(ionic, max_seconds, true);
+}
+
+int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_seconds)
+{
+ return __ionic_dev_cmd_wait(ionic, max_seconds, false);
+}
+
int ionic_setup(struct ionic *ionic)
{
int err;
@@ -540,6 +587,9 @@ int ionic_reset(struct ionic *ionic)
struct ionic_dev *idev = &ionic->idev;
int err;
+ if (!ionic_is_fw_running(idev))
+ return 0;
+
mutex_lock(&ionic->dev_cmd_lock);
ionic_dev_cmd_reset(idev);
err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
@@ -612,15 +662,17 @@ int ionic_port_init(struct ionic *ionic)
int ionic_port_reset(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
- int err;
+ int err = 0;
if (!idev->port_info)
return 0;
- mutex_lock(&ionic->dev_cmd_lock);
- ionic_dev_cmd_port_reset(idev);
- err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
- mutex_unlock(&ionic->dev_cmd_lock);
+ if (ionic_is_fw_running(idev)) {
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_port_reset(idev);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+ }
dma_free_coherent(ionic->dev, idev->port_info_sz,
idev->port_info, idev->port_info_pa);
@@ -628,9 +680,6 @@ int ionic_port_reset(struct ionic *ionic)
idev->port_info = NULL;
idev->port_info_pa = 0;
- if (err)
- dev_err(ionic->dev, "Failed to reset port\n");
-
return err;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index f6e785f949f9..b7363376dfc8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -376,10 +376,24 @@ static int ionic_lif_filter_add(struct ionic_lif *lif,
spin_unlock_bh(&lif->rx_filters.lock);
- if (err == -ENOSPC) {
- if (le16_to_cpu(ctx.cmd.rx_filter_add.match) == IONIC_RX_FILTER_MATCH_VLAN)
- lif->max_vlans = lif->nvlans;
+ /* store the max_vlans limit that we found */
+ if (err == -ENOSPC &&
+ le16_to_cpu(ctx.cmd.rx_filter_add.match) == IONIC_RX_FILTER_MATCH_VLAN)
+ lif->max_vlans = lif->nvlans;
+
+ /* Prevent unnecessary error messages on recoverable
+ * errors as the filter will get retried on the next
+ * sync attempt.
+ */
+ switch (err) {
+ case -ENOSPC:
+ case -ENXIO:
+ case -ETIMEDOUT:
+ case -EAGAIN:
+ case -EBUSY:
return 0;
+ default:
+ break;
}
ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode,
@@ -494,9 +508,22 @@ static int ionic_lif_filter_del(struct ionic_lif *lif,
spin_unlock_bh(&lif->rx_filters.lock);
if (state != IONIC_FILTER_STATE_NEW) {
- err = ionic_adminq_post_wait(lif, &ctx);
- if (err && err != -EEXIST)
+ err = ionic_adminq_post_wait_nomsg(lif, &ctx);
+
+ switch (err) {
+ /* ignore these errors */
+ case -EEXIST:
+ case -ENXIO:
+ case -ETIMEDOUT:
+ case -EAGAIN:
+ case -EBUSY:
+ case 0:
+ break;
+ default:
+ ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode,
+ ctx.comp.comp.status, err);
return err;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index fd6806b4a1b9..9859a4432985 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -151,7 +151,6 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(vlan_stripped),
};
-
#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
#define IONIC_NUM_PORT_STATS ARRAY_SIZE(ionic_port_stats_desc)
#define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 94384f5d2a22..f54035455ad6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -10,7 +10,6 @@
#include "ionic_lif.h"
#include "ionic_txrx.h"
-
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
@@ -669,27 +668,37 @@ dma_fail:
return -EIO;
}
+static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info)
+{
+ struct ionic_buf_info *buf_info = desc_info->bufs;
+ struct device *dev = q->dev;
+ unsigned int i;
+
+ if (!desc_info->nbufs)
+ return;
+
+ dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+ buf_info++;
+ for (i = 1; i < desc_info->nbufs; i++, buf_info++)
+ dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+
+ desc_info->nbufs = 0;
+}
+
static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info,
void *cb_arg)
{
- struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_qcq *qcq = q_to_qcq(q);
struct sk_buff *skb = cb_arg;
- struct device *dev = q->dev;
- unsigned int i;
u16 qi;
- if (desc_info->nbufs) {
- dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
- buf_info->len, DMA_TO_DEVICE);
- buf_info++;
- for (i = 1; i < desc_info->nbufs; i++, buf_info++)
- dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
- buf_info->len, DMA_TO_DEVICE);
- }
+ ionic_tx_desc_unmap_bufs(q, desc_info);
if (!skb)
return;
@@ -931,8 +940,11 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
err = ionic_tx_tcp_inner_pseudo_csum(skb);
else
err = ionic_tx_tcp_pseudo_csum(skb);
- if (err)
+ if (err) {
+ /* clean up mapping from ionic_tx_map_skb */
+ ionic_tx_desc_unmap_bufs(q, desc_info);
return err;
+ }
if (encap)
hdrlen = skb_inner_transport_header(skb) - skb->data +
@@ -1003,8 +1015,8 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
return 0;
}
-static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
+ struct ionic_desc_info *desc_info)
{
struct ionic_txq_desc *desc = desc_info->txq_desc;
struct ionic_buf_info *buf_info = desc_info->bufs;
@@ -1038,12 +1050,10 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
stats->crc32_csum++;
else
stats->csum++;
-
- return 0;
}
-static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
+ struct ionic_desc_info *desc_info)
{
struct ionic_txq_desc *desc = desc_info->txq_desc;
struct ionic_buf_info *buf_info = desc_info->bufs;
@@ -1074,12 +1084,10 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_offset = 0;
stats->csum_none++;
-
- return 0;
}
-static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
+ struct ionic_desc_info *desc_info)
{
struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
struct ionic_buf_info *buf_info = &desc_info->bufs[1];
@@ -1093,31 +1101,24 @@ static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
}
stats->frags += skb_shinfo(skb)->nr_frags;
-
- return 0;
}
static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_desc_info *desc_info = &q->info[q->head_idx];
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- int err;
if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
return -EIO;
/* set up the initial descriptor */
if (skb->ip_summed == CHECKSUM_PARTIAL)
- err = ionic_tx_calc_csum(q, skb, desc_info);
+ ionic_tx_calc_csum(q, skb, desc_info);
else
- err = ionic_tx_calc_no_csum(q, skb, desc_info);
- if (err)
- return err;
+ ionic_tx_calc_no_csum(q, skb, desc_info);
/* add frags */
- err = ionic_tx_skb_frags(q, skb, desc_info);
- if (err)
- return err;
+ ionic_tx_skb_frags(q, skb, desc_info);
skb_tx_timestamp(skb);
stats->pkts++;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index cc4ec2bb36db..672480c9d195 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -3098,6 +3098,9 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
continue;
}
+ /* Some flows may keep variable set */
+ p_hwfn->mcp_info->mcp_handling_status = 0;
+
rc = qed_calc_hw_mode(p_hwfn);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 0ce37f2460a4..407029a36fa1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -1835,8 +1835,6 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
if (!allocated_mem)
return NULL;
- memset(allocated_mem, 0, NUM_STORMS * sizeof(struct phys_mem_desc));
-
/* For each Storm, set physical address in RAM */
while (buf_offset < buf_size) {
struct phys_mem_desc *storm_mem_desc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index da1eadabcb41..9fb1fa479d4b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -140,7 +140,7 @@ static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
int qed_mcp_free(struct qed_hwfn *p_hwfn)
{
if (p_hwfn->mcp_info) {
- struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
+ struct qed_mcp_cmd_elem *p_cmd_elem = NULL, *p_tmp;
kfree(p_hwfn->mcp_info->mfw_mb_cur);
kfree(p_hwfn->mcp_info->mfw_mb_shadow);
@@ -249,6 +249,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* Initialize the MFW spinlock */
spin_lock_init(&p_info->cmd_lock);
spin_lock_init(&p_info->link_lock);
+ spin_lock_init(&p_info->unload_lock);
INIT_LIST_HEAD(&p_info->cmd_list);
@@ -614,12 +615,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
usecs);
}
-int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 cmd,
- u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param)
+static int _qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ bool can_sleep)
{
struct qed_mcp_mb_params mb_params;
int rc;
@@ -627,6 +629,7 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = cmd;
mb_params.param = param;
+ mb_params.flags = can_sleep ? QED_MB_FLAG_CAN_SLEEP : 0;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
@@ -638,6 +641,28 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param,
+ o_mcp_resp, o_mcp_param, true));
+}
+
+int qed_mcp_cmd_nosleep(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param,
+ o_mcp_resp, o_mcp_param, false));
+}
+
static int
qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -1071,10 +1096,15 @@ int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return 0;
}
+#define MFW_COMPLETION_MAX_ITER 5000
+#define MFW_COMPLETION_INTERVAL_MS 1
+
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_mb_params mb_params;
+ u32 cnt = MFW_COMPLETION_MAX_ITER;
u32 wol_param;
+ int rc;
switch (p_hwfn->cdev->wol_config) {
case QED_OV_WOL_DISABLED:
@@ -1097,7 +1127,23 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
mb_params.param = wol_param;
mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
- return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ spin_lock_bh(&p_hwfn->mcp_info->unload_lock);
+ set_bit(QED_MCP_BYPASS_PROC_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status);
+ spin_unlock_bh(&p_hwfn->mcp_info->unload_lock);
+
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+
+ while (test_bit(QED_MCP_IN_PROCESSING_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status) && --cnt)
+ msleep(MFW_COMPLETION_INTERVAL_MS);
+
+ if (!cnt)
+ DP_NOTICE(p_hwfn,
+ "Failed to wait MFW event completion after %d msec\n",
+ MFW_COMPLETION_MAX_ITER * MFW_COMPLETION_INTERVAL_MS);
+
+ return rc;
}
int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -1728,8 +1774,8 @@ static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
/* Acknowledge the MFW */
- qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
- &param);
+ qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
+ &param);
}
static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -1766,8 +1812,8 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
/* Acknowledge the MFW */
- qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
- &resp, &param);
+ qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
+ &resp, &param);
}
static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn,
@@ -1997,6 +2043,19 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
"Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
+ spin_lock_bh(&p_hwfn->mcp_info->unload_lock);
+ if (test_bit(QED_MCP_BYPASS_PROC_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status)) {
+ spin_unlock_bh(&p_hwfn->mcp_info->unload_lock);
+ DP_INFO(p_hwfn,
+ "Msg [%d] is bypassed on unload flow\n", i);
+ continue;
+ }
+
+ set_bit(QED_MCP_IN_PROCESSING_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status);
+ spin_unlock_bh(&p_hwfn->mcp_info->unload_lock);
+
switch (i) {
case MFW_DRV_MSG_LINK_CHANGE:
qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
@@ -2050,6 +2109,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = -EINVAL;
}
+
+ clear_bit(QED_MCP_IN_PROCESSING_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status);
}
/* ACK everything */
@@ -3675,8 +3737,8 @@ static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
{
int rc;
- rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
- p_mcp_resp, p_mcp_param);
+ rc = qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD,
+ param, p_mcp_resp, p_mcp_param);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 369e1892450a..9bd0565fe8ab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -393,11 +393,12 @@ int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_board_config);
/**
- * qed_mcp_cmd(): General function for sending commands to the MCP
+ * qed_mcp_cmd(): Sleepable function for sending commands to the MCP
* mailbox. It acquire mutex lock for the entire
* operation, from sending the request until the MCP
* response. Waiting for MCP response will be checked up
- * to 5 seconds every 5ms.
+ * to 5 seconds every 10ms. Should not be called from atomic
+ * context.
*
* @p_hwfn: HW device data.
* @p_ptt: PTT required for register access.
@@ -417,6 +418,31 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param);
/**
+ * qed_mcp_cmd_nosleep(): Function for sending commands to the MCP
+ * mailbox. It acquire mutex lock for the entire
+ * operation, from sending the request until the MCP
+ * response. Waiting for MCP response will be checked up
+ * to 5 seconds every 10us. Should be called when sleep
+ * is not allowed.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @cmd: command to be sent to the MCP.
+ * @param: Optional param
+ * @o_mcp_resp: The MCP response code (exclude sequence).
+ * @o_mcp_param: Optional parameter provided by the MCP
+ * response
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_cmd_nosleep(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param);
+
+/**
* qed_mcp_drain(): drains the nig, allowing completion to pass in
* case of pauses.
* (Should be called only from sleepable context)
@@ -762,6 +788,14 @@ struct qed_mcp_info {
/* S/N for debug data mailbox commands */
atomic_t dbg_data_seq;
+
+ /* Spinlock used to sync the flag mcp_handling_status with
+ * the mfw events handler
+ */
+ spinlock_t unload_lock;
+ unsigned long mcp_handling_status;
+#define QED_MCP_BYPASS_PROC_BIT 0
+#define QED_MCP_IN_PROCESSING_BIT 1
};
struct qed_mcp_mb_params {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
index b70ee8200e15..6459dd3feb37 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
@@ -2470,6 +2470,6 @@ struct nvm_meta_bin_t {
u32 version;
#define NVM_META_BIN_VERSION 1
u32 num_options;
- u32 options[0];
+ u32 options[];
};
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 8ac38828ba45..0848b5529d48 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -2984,12 +2984,16 @@ static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
struct qed_filter_accept_flags *flags = &params->accept_flags;
struct qed_public_vf_info *vf_info;
+ u16 tlv_mask;
+
+ tlv_mask = BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM) |
+ BIT(QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN);
/* Untrusted VFs can't even be trusted to know that fact.
* Simply indicate everything is configured fine, and trace
* configuration 'behind their back'.
*/
- if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM)))
+ if (!(*tlvs & tlv_mask))
return 0;
vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
@@ -3006,6 +3010,13 @@ static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
flags->tx_accept_filter &= ~mask;
}
+ if (params->update_accept_any_vlan_flg) {
+ vf_info->accept_any_vlan = params->accept_any_vlan;
+
+ if (vf_info->forced_vlan && !vf_info->is_trusted_configured)
+ params->accept_any_vlan = false;
+ }
+
return 0;
}
@@ -3806,11 +3817,11 @@ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
return found;
}
-static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
- u16 vfid,
- struct qed_mcp_link_params *p_params,
- struct qed_mcp_link_state *p_link,
- struct qed_mcp_link_capabilities *p_caps)
+static int qed_iov_get_link(struct qed_hwfn *p_hwfn,
+ u16 vfid,
+ struct qed_mcp_link_params *p_params,
+ struct qed_mcp_link_state *p_link,
+ struct qed_mcp_link_capabilities *p_caps)
{
struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
vfid,
@@ -3818,7 +3829,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
struct qed_bulletin_content *p_bulletin;
if (!p_vf)
- return;
+ return -EINVAL;
p_bulletin = p_vf->bulletin.p_virt;
@@ -3828,6 +3839,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
__qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
if (p_caps)
__qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+ return 0;
}
static int
@@ -4686,6 +4698,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
struct qed_public_vf_info *vf_info;
struct qed_mcp_link_state link;
u32 tx_rate;
+ int ret;
/* Sanitize request */
if (IS_VF(cdev))
@@ -4699,7 +4712,9 @@ static int qed_get_vf_config(struct qed_dev *cdev,
vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
- qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
+ ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
+ if (ret)
+ return ret;
/* Fill information about VF */
ivi->vf = vf_id;
@@ -4715,6 +4730,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
tx_rate = vf_info->tx_rate;
ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
+ ivi->trusted = vf_info->is_trusted_request;
return 0;
}
@@ -5145,6 +5161,12 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
params.update_ctl_frame_check = 1;
params.mac_chk_en = !vf_info->is_trusted_configured;
+ params.update_accept_any_vlan_flg = 0;
+
+ if (vf_info->accept_any_vlan && vf_info->forced_vlan) {
+ params.update_accept_any_vlan_flg = 1;
+ params.accept_any_vlan = vf_info->accept_any_vlan;
+ }
if (vf_info->rx_accept_mode & mask) {
flags->update_rx_mode_config = 1;
@@ -5160,13 +5182,20 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
if (!vf_info->is_trusted_configured) {
flags->rx_accept_filter &= ~mask;
flags->tx_accept_filter &= ~mask;
+ params.accept_any_vlan = false;
}
if (flags->update_rx_mode_config ||
flags->update_tx_mode_config ||
- params.update_ctl_frame_check)
+ params.update_ctl_frame_check ||
+ params.update_accept_any_vlan_flg) {
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "vport update config for %s VF[abs 0x%x rel 0x%x]\n",
+ vf_info->is_trusted_configured ? "trusted" : "untrusted",
+ vf->abs_vf_id, vf->relative_vf_id);
qed_sp_vport_update(hwfn, &params,
QED_SPQ_MODE_EBLOCK, NULL);
+ }
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index f448e3dd6c8b..6ee2493de164 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -62,6 +62,7 @@ struct qed_public_vf_info {
bool is_trusted_request;
u8 rx_accept_mode;
u8 tx_accept_mode;
+ bool accept_any_vlan;
};
struct qed_iov_vf_init_params {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 597cd9cd57b5..7b0e390c0b07 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -513,6 +513,9 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
p_iov->bulletin.size,
&p_iov->bulletin.phys,
GFP_KERNEL);
+ if (!p_iov->bulletin.p_virt)
+ goto free_pf2vf_reply;
+
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
p_iov->bulletin.p_virt,
@@ -552,6 +555,10 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
return rc;
+free_pf2vf_reply:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union pfvf_tlvs),
+ p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
free_vf2pf_request:
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union vfpf_tlvs),
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index e10fe071a40f..54a2d653be63 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1355,7 +1355,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
memset(data, 0, stats->n_stats * sizeof(u64));
- for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
tx_ring = &adapter->tx_ring[ring];
data = qlcnic_fill_tx_queue_stats(data, tx_ring);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 29cdcb2285b1..bcf3746220df 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -10,6 +10,7 @@
#include <linux/ipv6.h>
#include <net/checksum.h>
#include <linux/printk.h>
+#include <linux/jiffies.h>
#include "qlcnic.h"
@@ -332,7 +333,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) &&
tmp_fil->vlan_id == vlan_id) {
- if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+ if (time_is_before_jiffies(QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
qlcnic_change_filter(adapter, &src_addr,
vlan_id, tx_ring);
tmp_fil->ftime = jiffies;
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 955cce644392..c865a4be05ee 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -435,7 +435,7 @@ qcaspi_receive(struct qcaspi *qca)
qca->rx_skb->protocol = eth_type_trans(
qca->rx_skb, qca->rx_skb->dev);
skb_checksum_none_assert(qca->rx_skb);
- netif_rx_ni(qca->rx_skb);
+ netif_rx(qca->rx_skb);
qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
net_dev->mtu + VLAN_ETH_HLEN);
if (!qca->rx_skb) {
@@ -1001,7 +1001,7 @@ qca_spi_probe(struct spi_device *spi)
return 0;
}
-static int
+static void
qca_spi_remove(struct spi_device *spi)
{
struct net_device *qcaspi_devs = spi_get_drvdata(spi);
@@ -1011,8 +1011,6 @@ qca_spi_remove(struct spi_device *spi)
unregister_netdev(qcaspi_devs);
free_netdev(qcaspi_devs);
-
- return 0;
}
static const struct spi_device_id qca_spi_id[] = {
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 27c4f43176aa..26646cb6a20a 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -108,7 +108,7 @@ qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
qca->rx_skb->protocol = eth_type_trans(
qca->rx_skb, qca->rx_skb->dev);
skb_checksum_none_assert(qca->rx_skb);
- netif_rx_ni(qca->rx_skb);
+ netif_rx(qca->rx_skb);
qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
netdev->mtu +
VLAN_ETH_HLEN);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index bfbd7847f946..a313242a762e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -207,7 +207,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
dev = skb->dev;
port = rmnet_get_port_rcu(dev);
if (unlikely(!port)) {
- atomic_long_inc(&skb->dev->rx_nohandler);
+ dev_core_stats_rx_nohandler_inc(skb->dev);
kfree_skb(skb);
goto done;
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 3676976c875b..ba194698cc14 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -298,7 +298,6 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
{
struct rmnet_map_header *map_header;
u32 padding, map_datalen;
- u8 *padbytes;
map_datalen = skb->len - hdrlen;
map_header = (struct rmnet_map_header *)
@@ -323,8 +322,7 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
if (skb_tailroom(skb) < padding)
return NULL;
- padbytes = (u8 *)skb_put(skb, padding);
- memset(padbytes, 0, padding);
+ skb_put_zero(skb, padding);
done:
map_header->pkt_len = htons(map_datalen + padding);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 19e2621e0645..33f5c5698ccb 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -1397,8 +1397,11 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
rtl_lock_config_regs(tp);
device_set_wakeup_enable(tp_to_dev(tp), wolopts);
- rtl_set_d3_pll_down(tp, !wolopts);
- tp->dev->wol_enabled = wolopts ? 1 : 0;
+
+ if (tp->dash_type == RTL_DASH_NONE) {
+ rtl_set_d3_pll_down(tp, !wolopts);
+ tp->dev->wol_enabled = wolopts ? 1 : 0;
+ }
}
static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -2667,10 +2670,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
rtl_eri_set_bits(tp, 0xd4, 0x0c00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
- rtl_eri_set_bits(tp, 0xd4, 0x1f80);
- break;
- case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
break;
default:
@@ -2678,13 +2678,48 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
}
}
+static void rtl_disable_exit_l1(struct rtl8169_private *tp)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
+ rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
+ break;
+ default:
+ break;
+ }
+}
+
static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
{
/* Don't enable ASPM in the chip if OS can't control ASPM */
if (enable && tp->aspm_manageable) {
RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_45 ... RTL_GIGA_MAC_VER_48:
+ case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
+ /* reset ephy tx/rx disable timer */
+ r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
+ /* chip can trigger L1.2 */
+ r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, BIT(2));
+ break;
+ default:
+ break;
+ }
} else {
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_45 ... RTL_GIGA_MAC_VER_48:
+ case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
+ r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
+ break;
+ default:
+ break;
+ }
+
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
}
@@ -4683,7 +4718,7 @@ static void rtl8169_down(struct rtl8169_private *tp)
rtl_pci_commit(tp);
rtl8169_cleanup(tp, true);
-
+ rtl_disable_exit_l1(tp);
rtl_prepare_power_down(tp);
}
@@ -4843,8 +4878,6 @@ static void rtl8169_net_suspend(struct rtl8169_private *tp)
rtl8169_down(tp);
}
-#ifdef CONFIG_PM
-
static int rtl8169_runtime_resume(struct device *dev)
{
struct rtl8169_private *tp = dev_get_drvdata(dev);
@@ -4860,7 +4893,7 @@ static int rtl8169_runtime_resume(struct device *dev)
return 0;
}
-static int __maybe_unused rtl8169_suspend(struct device *device)
+static int rtl8169_suspend(struct device *device)
{
struct rtl8169_private *tp = dev_get_drvdata(device);
@@ -4873,7 +4906,7 @@ static int __maybe_unused rtl8169_suspend(struct device *device)
return 0;
}
-static int __maybe_unused rtl8169_resume(struct device *device)
+static int rtl8169_resume(struct device *device)
{
struct rtl8169_private *tp = dev_get_drvdata(device);
@@ -4908,6 +4941,9 @@ static int rtl8169_runtime_idle(struct device *device)
{
struct rtl8169_private *tp = dev_get_drvdata(device);
+ if (tp->dash_type != RTL_DASH_NONE)
+ return -EBUSY;
+
if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
pm_schedule_suspend(device, 10000);
@@ -4915,13 +4951,11 @@ static int rtl8169_runtime_idle(struct device *device)
}
static const struct dev_pm_ops rtl8169_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume)
- SET_RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume,
- rtl8169_runtime_idle)
+ SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume)
+ RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume,
+ rtl8169_runtime_idle)
};
-#endif /* CONFIG_PM */
-
static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
{
/* WoL fails with 8168b when the receiver is disabled. */
@@ -4950,7 +4984,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
/* Restore original MAC address */
rtl_rar_set(tp, tp->dev->perm_addr);
- if (system_state == SYSTEM_POWER_OFF) {
+ if (system_state == SYSTEM_POWER_OFF &&
+ tp->dash_type == RTL_DASH_NONE) {
if (tp->saved_wolopts)
rtl_wol_shutdown_quirk(tp);
@@ -5255,6 +5290,16 @@ done:
rtl_rar_set(tp, mac_addr);
}
+/* register is set if system vendor successfully tested ASPM 1.2 */
+static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
+{
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_60 &&
+ r8168_mac_ocp_read(tp, 0xc0b2) & 0xf)
+ return true;
+
+ return false;
+}
+
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct rtl8169_private *tp;
@@ -5333,7 +5378,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* Chips from RTL8168h partially have issues with L1.2, but seem
* to work fine with L1 and L1.1.
*/
- if (tp->mac_version >= RTL_GIGA_MAC_VER_45)
+ if (rtl_aspm_is_safe(tp))
+ rc = 0;
+ else if (tp->mac_version >= RTL_GIGA_MAC_VER_45)
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
else
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
@@ -5409,7 +5456,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* configure chip for default features */
rtl8169_set_features(dev, dev->features);
- rtl_set_d3_pll_down(tp, true);
+ if (tp->dash_type == RTL_DASH_NONE) {
+ rtl_set_d3_pll_down(tp, true);
+ } else {
+ rtl_set_d3_pll_down(tp, false);
+ dev->wol_enabled = 1;
+ }
jumbo_max = rtl_jumbo_max(tp);
if (jumbo_max)
@@ -5460,9 +5512,7 @@ static struct pci_driver rtl8169_pci_driver = {
.probe = rtl_init_one,
.remove = rtl_remove_one,
.shutdown = rtl_shutdown,
-#ifdef CONFIG_PM
- .driver.pm = &rtl8169_pm_ops,
-#endif
+ .driver.pm = pm_ptr(&rtl8169_pm_ops),
};
module_pci_driver(rtl8169_pci_driver);
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index f7ad5487879b..15c295f90196 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -429,15 +429,6 @@ static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = {
{ 0x0d, 0xf880 }
};
-static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = {
- { 0x1f, 0x0002 },
- { 0x05, 0x669a },
- { 0x1f, 0x0005 },
- { 0x05, 0x8330 },
- { 0x06, 0x669a },
- { 0x1f, 0x0002 }
-};
-
static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp,
struct phy_device *phydev,
u16 val)
@@ -455,6 +446,29 @@ static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp,
r8169_apply_firmware(tp);
}
+static void rtl8168d_1_common(struct phy_device *phydev)
+{
+ u16 val;
+
+ phy_write_paged(phydev, 0x0002, 0x05, 0x669a);
+ r8168d_phy_param(phydev, 0x8330, 0xffff, 0x669a);
+ phy_write(phydev, 0x1f, 0x0002);
+
+ val = phy_read(phydev, 0x0d);
+
+ if ((val & 0x00ff) != 0x006c) {
+ static const u16 set[] = {
+ 0x0065, 0x0066, 0x0067, 0x0068,
+ 0x0069, 0x006a, 0x006b, 0x006c
+ };
+ int i;
+
+ val &= 0xff00;
+ for (i = 0; i < ARRAY_SIZE(set); i++)
+ phy_write(phydev, 0x0d, val | set[i]);
+ }
+}
+
static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -469,25 +483,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp,
phy_modify(phydev, 0x0c, 0x5d00, 0xa200);
if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
- int val;
-
- rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1);
-
- val = phy_read(phydev, 0x0d);
-
- if ((val & 0x00ff) != 0x006c) {
- static const u32 set[] = {
- 0x0065, 0x0066, 0x0067, 0x0068,
- 0x0069, 0x006a, 0x006b, 0x006c
- };
- int i;
-
- phy_write(phydev, 0x1f, 0x0002);
-
- val &= 0xff00;
- for (i = 0; i < ARRAY_SIZE(set); i++)
- phy_write(phydev, 0x0d, val | set[i]);
- }
+ rtl8168d_1_common(phydev);
} else {
phy_write_paged(phydev, 0x0002, 0x05, 0x6662);
r8168d_phy_param(phydev, 0x8330, 0xffff, 0x6662);
@@ -513,24 +509,7 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp,
rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0);
if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
- int val;
-
- rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1);
-
- val = phy_read(phydev, 0x0d);
- if ((val & 0x00ff) != 0x006c) {
- static const u32 set[] = {
- 0x0065, 0x0066, 0x0067, 0x0068,
- 0x0069, 0x006a, 0x006b, 0x006c
- };
- int i;
-
- phy_write(phydev, 0x1f, 0x0002);
-
- val &= 0xff00;
- for (i = 0; i < ARRAY_SIZE(set); i++)
- phy_write(phydev, 0x0d, val | set[i]);
- }
+ rtl8168d_1_common(phydev);
} else {
phy_write_paged(phydev, 0x0002, 0x05, 0x2642);
r8168d_phy_param(phydev, 0x8330, 0xffff, 0x2642);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index b215cde68e10..525d66f71f02 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -475,7 +475,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = netdev_alloc_skb(ndev, info->max_rx_len);
+ skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL);
if (!skb)
goto error;
ravb_set_buffer_align(skb);
@@ -1432,11 +1432,7 @@ static int ravb_phy_init(struct net_device *ndev)
* at this time.
*/
if (soc_device_match(r8a7795es10)) {
- err = phy_set_max_speed(phydev, SPEED_100);
- if (err) {
- netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
- goto err_phy_disconnect;
- }
+ phy_set_max_speed(phydev, SPEED_100);
netdev_info(ndev, "limited PHY to 100Mbit/s\n");
}
@@ -1457,8 +1453,6 @@ static int ravb_phy_init(struct net_device *ndev)
return 0;
-err_phy_disconnect:
- phy_disconnect(phydev);
err_deregister_fixed_link:
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
@@ -2854,7 +2848,6 @@ static int ravb_wol_restore(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- int ret;
if (info->nc_queues)
napi_enable(&priv->napi[RAVB_NC]);
@@ -2863,9 +2856,7 @@ static int ravb_wol_restore(struct net_device *ndev)
/* Disable MagicPacket */
ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
- ret = ravb_close(ndev);
- if (ret < 0)
- return ret;
+ ravb_close(ndev);
return disable_irq_wake(priv->emac_irq);
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index d947a628e166..67ade78fb767 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2026,14 +2026,8 @@ static int sh_eth_phy_init(struct net_device *ndev)
}
/* mask with MAC supported features */
- if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
- int err = phy_set_max_speed(phydev, SPEED_100);
- if (err) {
- netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
- phy_disconnect(phydev);
- return err;
- }
- }
+ if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
+ phy_set_max_speed(phydev, SPEED_100);
phy_attached_info(phydev);
@@ -3450,9 +3444,7 @@ static int sh_eth_wol_restore(struct net_device *ndev)
* both be reset and all registers restored. This is what
* happens during suspend and resume without WoL enabled.
*/
- ret = sh_eth_close(ndev);
- if (ret < 0)
- return ret;
+ sh_eth_close(ndev);
ret = sh_eth_open(ndev);
if (ret < 0)
return ret;
@@ -3464,7 +3456,7 @@ static int sh_eth_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct sh_eth_private *mdp = netdev_priv(ndev);
- int ret = 0;
+ int ret;
if (!netif_running(ndev))
return 0;
@@ -3483,7 +3475,7 @@ static int sh_eth_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct sh_eth_private *mdp = netdev_priv(ndev);
- int ret = 0;
+ int ret;
if (!netif_running(ndev))
return 0;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 32161a56726c..407a1f8e3059 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -127,7 +127,7 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
/* MAC core supports the EEE feature. */
if (priv->hw_cap.eee) {
/* Check if the PHY supports EEE */
- if (phy_init_eee(ndev->phydev, 1))
+ if (phy_init_eee(ndev->phydev, true))
return false;
priv->eee_active = 1;
@@ -2285,18 +2285,18 @@ static int __init sxgbe_cmdline_opt(char *str)
char *opt;
if (!str || !*str)
- return -EINVAL;
+ return 1;
while ((opt = strsep(&str, ",")) != NULL) {
if (!strncmp(opt, "eee_timer:", 10)) {
if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
}
}
- return 0;
+ return 1;
err:
pr_err("%s: ERROR broken module parameter conversion\n", __func__);
- return -EINVAL;
+ return 1;
}
__setup("sxgbeeth=", sxgbe_cmdline_opt);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index cf366ed2557c..50d535981a35 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3990,6 +3990,30 @@ static unsigned int ef10_check_caps(const struct efx_nic *efx,
}
}
+static unsigned int efx_ef10_recycle_ring_size(const struct efx_nic *efx)
+{
+ unsigned int ret = EFX_RECYCLE_RING_SIZE_10G;
+
+ /* There is no difference between PFs and VFs. The side is based on
+ * the maximum link speed of a given NIC.
+ */
+ switch (efx->pci_dev->device & 0xfff) {
+ case 0x0903: /* Farmingdale can do up to 10G */
+ break;
+ case 0x0923: /* Greenport can do up to 40G */
+ case 0x0a03: /* Medford can do up to 40G */
+ ret *= 4;
+ break;
+ default: /* Medford2 can do up to 100G */
+ ret *= 10;
+ }
+
+ if (IS_ENABLED(CONFIG_PPC64))
+ ret *= 4;
+
+ return ret;
+}
+
#define EF10_OFFLOAD_FEATURES \
(NETIF_F_IP_CSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | \
@@ -4106,6 +4130,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.check_caps = ef10_check_caps,
.print_additional_fwver = efx_ef10_print_additional_fwver,
.sensor_event = efx_mcdi_sensor_event,
+ .rx_recycle_ring_size = efx_ef10_recycle_ring_size,
};
const struct efx_nic_type efx_hunt_a0_nic_type = {
@@ -4243,4 +4268,5 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.check_caps = ef10_check_caps,
.print_additional_fwver = efx_ef10_print_additional_fwver,
.sensor_event = efx_mcdi_sensor_event,
+ .rx_recycle_ring_size = efx_ef10_recycle_ring_size,
};
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index f79b14a119ae..a07cbf45a326 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -23,6 +23,7 @@
#include "ef100_rx.h"
#include "ef100_tx.h"
#include "ef100_netdev.h"
+#include "rx_common.h"
#define EF100_MAX_VIS 4096
#define EF100_NUM_MCDI_BUFFERS 1
@@ -696,6 +697,12 @@ static unsigned int ef100_check_caps(const struct efx_nic *efx,
}
}
+static unsigned int efx_ef100_recycle_ring_size(const struct efx_nic *efx)
+{
+ /* Maximum link speed for Riverhead is 100G */
+ return 10 * EFX_RECYCLE_RING_SIZE_10G;
+}
+
/* NIC level access functions
*/
#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
@@ -770,6 +777,7 @@ const struct efx_nic_type ef100_pf_nic_type = {
.rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
.rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
.rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
.reconfigure_mac = ef100_reconfigure_mac,
.reconfigure_port = efx_mcdi_port_reconfigure,
@@ -849,6 +857,7 @@ const struct efx_nic_type ef100_vf_nic_type = {
.rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
.rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
.rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
.reconfigure_mac = ef100_reconfigure_mac,
.test_nvram = efx_new_mcdi_nvram_test_all,
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index ead550ae2709..d6fdcdc530ca 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -78,31 +78,48 @@ static const struct efx_channel_type efx_default_channel_type = {
* INTERRUPTS
*************/
-static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
+static unsigned int count_online_cores(struct efx_nic *efx, bool local_node)
{
- cpumask_var_t thread_mask;
+ cpumask_var_t filter_mask;
unsigned int count;
int cpu;
+ if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) {
+ netif_warn(efx, probe, efx->net_dev,
+ "RSS disabled due to allocation failure\n");
+ return 1;
+ }
+
+ cpumask_copy(filter_mask, cpu_online_mask);
+ if (local_node) {
+ int numa_node = pcibus_to_node(efx->pci_dev->bus);
+
+ cpumask_and(filter_mask, filter_mask, cpumask_of_node(numa_node));
+ }
+
+ count = 0;
+ for_each_cpu(cpu, filter_mask) {
+ ++count;
+ cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu));
+ }
+
+ free_cpumask_var(filter_mask);
+
+ return count;
+}
+
+static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
+{
+ unsigned int count;
+
if (rss_cpus) {
count = rss_cpus;
} else {
- if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
- netif_warn(efx, probe, efx->net_dev,
- "RSS disabled due to allocation failure\n");
- return 1;
- }
+ count = count_online_cores(efx, true);
- count = 0;
- for_each_online_cpu(cpu) {
- if (!cpumask_test_cpu(cpu, thread_mask)) {
- ++count;
- cpumask_or(thread_mask, thread_mask,
- topology_sibling_cpumask(cpu));
- }
- }
-
- free_cpumask_var(thread_mask);
+ /* If no online CPUs in local node, fallback to any online CPUs */
+ if (count == 0)
+ count = count_online_cores(efx, false);
}
if (count > EFX_MAX_RX_QUEUES) {
@@ -369,12 +386,20 @@ int efx_probe_interrupts(struct efx_nic *efx)
#if defined(CONFIG_SMP)
void efx_set_interrupt_affinity(struct efx_nic *efx)
{
+ int numa_node = pcibus_to_node(efx->pci_dev->bus);
+ const struct cpumask *numa_mask = cpumask_of_node(numa_node);
struct efx_channel *channel;
unsigned int cpu;
+ /* If no online CPUs in local node, fallback to any online CPU */
+ if (cpumask_first_and(cpu_online_mask, numa_mask) >= nr_cpu_ids)
+ numa_mask = cpu_online_mask;
+
+ cpu = -1;
efx_for_each_channel(channel, efx) {
- cpu = cpumask_local_spread(channel->channel,
- pcibus_to_node(efx->pci_dev->bus));
+ cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first_and(cpu_online_mask, numa_mask);
irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
}
}
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index be6bfd6b7ec7..50baf62b2cbc 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -163,9 +163,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
/* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
spin_lock_bh(&mcdi->iface_lock);
++mcdi->seqno;
+ seqno = mcdi->seqno & SEQ_MASK;
spin_unlock_bh(&mcdi->iface_lock);
- seqno = mcdi->seqno & SEQ_MASK;
xflags = 0;
if (mcdi->mode == MCDI_MODE_EVENTS)
xflags |= MCDI_HEADER_XFLAGS_EVREQ;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index cc15ee8812d9..c75dc75e2857 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1282,6 +1282,7 @@ struct efx_udp_tunnel {
* @udp_tnl_has_port: Check if a port has been added as UDP tunnel
* @print_additional_fwver: Dump NIC-specific additional FW version info
* @sensor_event: Handle a sensor event from MCDI
+ * @rx_recycle_ring_size: Size of the RX recycle ring
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1460,6 +1461,7 @@ struct efx_nic_type {
size_t (*print_additional_fwver)(struct efx_nic *efx, char *buf,
size_t len);
void (*sensor_event)(struct efx_nic *efx, efx_qword_t *ev);
+ unsigned int (*rx_recycle_ring_size)(const struct efx_nic *efx);
int revision;
unsigned int txd_ptr_tbl_base;
diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h
index b9cafe9cd568..0cef35c0c559 100644
--- a/drivers/net/ethernet/sfc/nic_common.h
+++ b/drivers/net/ethernet/sfc/nic_common.h
@@ -195,6 +195,11 @@ static inline void efx_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
efx->type->sensor_event(efx, ev);
}
+static inline unsigned int efx_rx_recycle_ring_size(const struct efx_nic *efx)
+{
+ return efx->type->rx_recycle_ring_size(efx);
+}
+
/* Some statistics are computed as A - B where A and B each increase
* linearly with some hardware counter(s) and the counters are read
* asynchronously. If the counters contributing to B are always read
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index 633ca77a26fd..1b22c7be0088 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -23,13 +23,6 @@ module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold,
"RX descriptor ring refill threshold (%)");
-/* Number of RX buffers to recycle pages for. When creating the RX page recycle
- * ring, this number is divided by the number of buffers per page to calculate
- * the number of pages to store in the RX page recycle ring.
- */
-#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
-#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
-
/* RX maximum head room required.
*
* This must be at least 1 to prevent overflow, plus one packet-worth
@@ -141,16 +134,7 @@ static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
unsigned int bufs_in_recycle_ring, page_ring_size;
struct efx_nic *efx = rx_queue->efx;
- /* Set the RX recycle ring size */
-#ifdef CONFIG_PPC64
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
-#else
- if (iommu_present(&pci_bus_type))
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
- else
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
-#endif /* CONFIG_PPC64 */
-
+ bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx);
page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
efx->rx_bufs_per_page);
rx_queue->page_ring = kcalloc(page_ring_size,
diff --git a/drivers/net/ethernet/sfc/rx_common.h b/drivers/net/ethernet/sfc/rx_common.h
index 207ccd8ba062..fbd2769307f9 100644
--- a/drivers/net/ethernet/sfc/rx_common.h
+++ b/drivers/net/ethernet/sfc/rx_common.h
@@ -18,6 +18,12 @@
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
EFX_RX_USR_BUF_SIZE)
+/* Number of RX buffers to recycle pages for. When creating the RX page recycle
+ * ring, this number is divided by the number of buffers per page to calculate
+ * the number of pages to store in the RX page recycle ring.
+ */
+#define EFX_RECYCLE_RING_SIZE_10G 256
+
static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
{
return page_address(buf->page) + buf->page_offset;
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 16347a6d0c47..ce3060e15b54 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -25,6 +25,7 @@
#include "mcdi_port_common.h"
#include "selftest.h"
#include "siena_sriov.h"
+#include "rx_common.h"
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
@@ -958,6 +959,12 @@ static unsigned int siena_check_caps(const struct efx_nic *efx,
return 0;
}
+static unsigned int efx_siena_recycle_ring_size(const struct efx_nic *efx)
+{
+ /* Maximum link speed is 10G */
+ return EFX_RECYCLE_RING_SIZE_10G;
+}
+
/**************************************************************************
*
* Revision-dependent attributes used by efx.c and nic.c
@@ -1098,4 +1105,5 @@ const struct efx_nic_type siena_a0_nic_type = {
.rx_hash_key_size = 16,
.check_caps = siena_check_caps,
.sensor_event = efx_mcdi_sensor_event,
+ .rx_recycle_ring_size = efx_siena_recycle_ring_size,
};
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 556bd353dd42..b0c5a44785fa 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1044,7 +1044,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
"rx failed to build skb\n");
break;
}
- page_pool_release_page(dring->page_pool, page);
+ skb_mark_for_recycle(skb);
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 8e8778cfbbad..63754a9c4ba7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -383,10 +383,10 @@ static int intel_crosststamp(ktime_t *device,
/* Repeat until the timestamps are from the FIFO last segment */
for (i = 0; i < num_snapshot; i++) {
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ read_lock_irqsave(&priv->ptp_lock, flags);
stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
*device = ns_to_ktime(ptp_time);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
*system = convert_art_to_tsc(art_time);
}
@@ -721,6 +721,7 @@ static int tgl_common_data(struct pci_dev *pdev,
plat->rx_queues_to_use = 6;
plat->tx_queues_to_use = 4;
plat->clk_ptp_rate = 200000000;
+ plat->speed_mode_2500 = intel_speed_mode_2500;
plat->safety_feat_cfg->tsoee = 1;
plat->safety_feat_cfg->mrxpee = 0;
@@ -740,7 +741,6 @@ static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
{
plat->bus_id = 1;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -755,7 +755,6 @@ static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
{
plat->bus_id = 2;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -1160,6 +1159,7 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G 0xa0ac
#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0 0x7aac
#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1 0x7aad
+#define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G 0x54ac
static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
@@ -1177,6 +1177,7 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 58c0feaa8131..6ff88df58767 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -9,7 +9,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_net.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/stmmac.h>
@@ -40,6 +39,33 @@
#define ETH_FINE_DLY_GTXC BIT(1)
#define ETH_FINE_DLY_RXC BIT(0)
+/* Peri Configuration register for mt8195 */
+#define MT8195_PERI_ETH_CTRL0 0xFD0
+#define MT8195_RMII_CLK_SRC_INTERNAL BIT(28)
+#define MT8195_RMII_CLK_SRC_RXC BIT(27)
+#define MT8195_ETH_INTF_SEL GENMASK(26, 24)
+#define MT8195_RGMII_TXC_PHASE_CTRL BIT(22)
+#define MT8195_EXT_PHY_MODE BIT(21)
+#define MT8195_DLY_GTXC_INV BIT(12)
+#define MT8195_DLY_GTXC_ENABLE BIT(5)
+#define MT8195_DLY_GTXC_STAGES GENMASK(4, 0)
+
+#define MT8195_PERI_ETH_CTRL1 0xFD4
+#define MT8195_DLY_RXC_INV BIT(25)
+#define MT8195_DLY_RXC_ENABLE BIT(18)
+#define MT8195_DLY_RXC_STAGES GENMASK(17, 13)
+#define MT8195_DLY_TXC_INV BIT(12)
+#define MT8195_DLY_TXC_ENABLE BIT(5)
+#define MT8195_DLY_TXC_STAGES GENMASK(4, 0)
+
+#define MT8195_PERI_ETH_CTRL2 0xFD8
+#define MT8195_DLY_RMII_RXC_INV BIT(25)
+#define MT8195_DLY_RMII_RXC_ENABLE BIT(18)
+#define MT8195_DLY_RMII_RXC_STAGES GENMASK(17, 13)
+#define MT8195_DLY_RMII_TXC_INV BIT(12)
+#define MT8195_DLY_RMII_TXC_ENABLE BIT(5)
+#define MT8195_DLY_RMII_TXC_STAGES GENMASK(4, 0)
+
struct mac_delay_struct {
u32 tx_delay;
u32 rx_delay;
@@ -50,19 +76,21 @@ struct mac_delay_struct {
struct mediatek_dwmac_plat_data {
const struct mediatek_dwmac_variant *variant;
struct mac_delay_struct mac_delay;
+ struct clk *rmii_internal_clk;
struct clk_bulk_data *clks;
- struct device_node *np;
struct regmap *peri_regmap;
+ struct device_node *np;
struct device *dev;
phy_interface_t phy_mode;
- int num_clks_to_config;
bool rmii_clk_from_mac;
bool rmii_rxc;
+ bool mac_wol;
};
struct mediatek_dwmac_variant {
int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat);
int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat);
+ void (*dwmac_fix_mac_speed)(void *priv, unsigned int speed);
/* clock ids to be requested */
const char * const *clk_list;
@@ -75,7 +103,11 @@ struct mediatek_dwmac_variant {
/* list of clocks required for mac */
static const char * const mt2712_dwmac_clk_l[] = {
- "axi", "apb", "mac_main", "ptp_ref", "rmii_internal"
+ "axi", "apb", "mac_main", "ptp_ref"
+};
+
+static const char * const mt8195_dwmac_clk_l[] = {
+ "axi", "apb", "mac_cg", "mac_main", "ptp_ref"
};
static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat)
@@ -84,23 +116,12 @@ static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat)
int rmii_rxc = plat->rmii_rxc ? RMII_CLK_SRC_RXC : 0;
u32 intf_val = 0;
- /* The clock labeled as "rmii_internal" in mt2712_dwmac_clk_l is needed
- * only in RMII(when MAC provides the reference clock), and useless for
- * RGMII/MII/RMII(when PHY provides the reference clock).
- * num_clks_to_config indicates the real number of clocks should be
- * configured, equals to (plat->variant->num_clks - 1) in default for all the case,
- * then +1 for rmii_clk_from_mac case.
- */
- plat->num_clks_to_config = plat->variant->num_clks - 1;
-
/* select phy interface in top control domain */
switch (plat->phy_mode) {
case PHY_INTERFACE_MODE_MII:
intf_val |= PHY_INTF_MII;
break;
case PHY_INTERFACE_MODE_RMII:
- if (plat->rmii_clk_from_mac)
- plat->num_clks_to_config++;
intf_val |= (PHY_INTF_RMII | rmii_rxc | rmii_clk_from_mac);
break;
case PHY_INTERFACE_MODE_RGMII:
@@ -268,6 +289,193 @@ static const struct mediatek_dwmac_variant mt2712_gmac_variant = {
.tx_delay_max = 17600,
};
+static int mt8195_set_interface(struct mediatek_dwmac_plat_data *plat)
+{
+ int rmii_clk_from_mac = plat->rmii_clk_from_mac ? MT8195_RMII_CLK_SRC_INTERNAL : 0;
+ int rmii_rxc = plat->rmii_rxc ? MT8195_RMII_CLK_SRC_RXC : 0;
+ u32 intf_val = 0;
+
+ /* select phy interface in top control domain */
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_MII);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val |= (rmii_rxc | rmii_clk_from_mac);
+ intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RMII);
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RGMII);
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ return -EINVAL;
+ }
+
+ /* MT8195 only support external PHY */
+ intf_val |= MT8195_EXT_PHY_MODE;
+
+ regmap_write(plat->peri_regmap, MT8195_PERI_ETH_CTRL0, intf_val);
+
+ return 0;
+}
+
+static void mt8195_delay_ps2stage(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+
+ /* 290ps per stage */
+ mac_delay->tx_delay /= 290;
+ mac_delay->rx_delay /= 290;
+}
+
+static void mt8195_delay_stage2ps(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+
+ /* 290ps per stage */
+ mac_delay->tx_delay *= 290;
+ mac_delay->rx_delay *= 290;
+}
+
+static int mt8195_set_delay(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+ u32 gtxc_delay_val = 0, delay_val = 0, rmii_delay_val = 0;
+
+ mt8195_delay_ps2stage(plat);
+
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_ENABLE, !!mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_STAGES, mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_INV, mac_delay->tx_inv);
+
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV, mac_delay->rx_inv);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (plat->rmii_clk_from_mac) {
+ /* case 1: mac provides the rmii reference clock,
+ * and the clock output to TXC pin.
+ * The egress timing can be adjusted by RMII_TXC delay macro circuit.
+ * The ingress timing can be adjusted by RMII_RXC delay macro circuit.
+ */
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_ENABLE,
+ !!mac_delay->tx_delay);
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_STAGES,
+ mac_delay->tx_delay);
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_INV,
+ mac_delay->tx_inv);
+
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_ENABLE,
+ !!mac_delay->rx_delay);
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_STAGES,
+ mac_delay->rx_delay);
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_INV,
+ mac_delay->rx_inv);
+ } else {
+ /* case 2: the rmii reference clock is from external phy,
+ * and the property "rmii_rxc" indicates which pin(TXC/RXC)
+ * the reference clk is connected to. The reference clock is a
+ * received signal, so rx_delay/rx_inv are used to indicate
+ * the reference clock timing adjustment
+ */
+ if (plat->rmii_rxc) {
+ /* the rmii reference clock from outside is connected
+ * to RXC pin, the reference clock will be adjusted
+ * by RXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE,
+ !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES,
+ mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV,
+ mac_delay->rx_inv);
+ } else {
+ /* the rmii reference clock from outside is connected
+ * to TXC pin, the reference clock will be adjusted
+ * by TXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_ENABLE,
+ !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_STAGES,
+ mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_INV,
+ mac_delay->rx_inv);
+ }
+ }
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_ENABLE, !!mac_delay->tx_delay);
+ gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_STAGES, mac_delay->tx_delay);
+ gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_INV, mac_delay->tx_inv);
+
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV, mac_delay->rx_inv);
+
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(plat->peri_regmap,
+ MT8195_PERI_ETH_CTRL0,
+ MT8195_RGMII_TXC_PHASE_CTRL |
+ MT8195_DLY_GTXC_INV |
+ MT8195_DLY_GTXC_ENABLE |
+ MT8195_DLY_GTXC_STAGES,
+ gtxc_delay_val);
+ regmap_write(plat->peri_regmap, MT8195_PERI_ETH_CTRL1, delay_val);
+ regmap_write(plat->peri_regmap, MT8195_PERI_ETH_CTRL2, rmii_delay_val);
+
+ mt8195_delay_stage2ps(plat);
+
+ return 0;
+}
+
+static void mt8195_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct mediatek_dwmac_plat_data *priv_plat = priv;
+
+ if ((phy_interface_mode_is_rgmii(priv_plat->phy_mode))) {
+ /* prefer 2ns fixed delay which is controlled by TXC_PHASE_CTRL,
+ * when link speed is 1Gbps with RGMII interface,
+ * Fall back to delay macro circuit for 10/100Mbps link speed.
+ */
+ if (speed == SPEED_1000)
+ regmap_update_bits(priv_plat->peri_regmap,
+ MT8195_PERI_ETH_CTRL0,
+ MT8195_RGMII_TXC_PHASE_CTRL |
+ MT8195_DLY_GTXC_ENABLE |
+ MT8195_DLY_GTXC_INV |
+ MT8195_DLY_GTXC_STAGES,
+ MT8195_RGMII_TXC_PHASE_CTRL);
+ else
+ mt8195_set_delay(priv_plat);
+ }
+}
+
+static const struct mediatek_dwmac_variant mt8195_gmac_variant = {
+ .dwmac_set_phy_interface = mt8195_set_interface,
+ .dwmac_set_delay = mt8195_set_delay,
+ .dwmac_fix_mac_speed = mt8195_fix_mac_speed,
+ .clk_list = mt8195_dwmac_clk_l,
+ .num_clks = ARRAY_SIZE(mt8195_dwmac_clk_l),
+ .dma_bit_mask = 35,
+ .rx_delay_max = 9280,
+ .tx_delay_max = 9280,
+};
+
static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
{
struct mac_delay_struct *mac_delay = &plat->mac_delay;
@@ -308,6 +516,7 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
mac_delay->rx_inv = of_property_read_bool(plat->np, "mediatek,rxc-inverse");
plat->rmii_rxc = of_property_read_bool(plat->np, "mediatek,rmii-rxc");
plat->rmii_clk_from_mac = of_property_read_bool(plat->np, "mediatek,rmii-clk-from-mac");
+ plat->mac_wol = of_property_read_bool(plat->np, "mediatek,mac-wol");
return 0;
}
@@ -315,18 +524,34 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
static int mediatek_dwmac_clk_init(struct mediatek_dwmac_plat_data *plat)
{
const struct mediatek_dwmac_variant *variant = plat->variant;
- int i, num = variant->num_clks;
+ int i, ret;
- plat->clks = devm_kcalloc(plat->dev, num, sizeof(*plat->clks), GFP_KERNEL);
+ plat->clks = devm_kcalloc(plat->dev, variant->num_clks, sizeof(*plat->clks), GFP_KERNEL);
if (!plat->clks)
return -ENOMEM;
- for (i = 0; i < num; i++)
+ for (i = 0; i < variant->num_clks; i++)
plat->clks[i].id = variant->clk_list[i];
- plat->num_clks_to_config = variant->num_clks;
+ ret = devm_clk_bulk_get(plat->dev, variant->num_clks, plat->clks);
+ if (ret)
+ return ret;
+
+ /* The clock labeled as "rmii_internal" is needed only in RMII(when
+ * MAC provides the reference clock), and useless for RGMII/MII or
+ * RMII(when PHY provides the reference clock).
+ * So, "rmii_internal" clock is got and configured only when
+ * reference clock of RMII is from MAC.
+ */
+ if (plat->rmii_clk_from_mac) {
+ plat->rmii_internal_clk = devm_clk_get(plat->dev, "rmii_internal");
+ if (IS_ERR(plat->rmii_internal_clk))
+ ret = PTR_ERR(plat->rmii_internal_clk);
+ } else {
+ plat->rmii_internal_clk = NULL;
+ }
- return devm_clk_bulk_get(plat->dev, num, plat->clks);
+ return ret;
}
static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
@@ -335,44 +560,117 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
const struct mediatek_dwmac_variant *variant = plat->variant;
int ret;
- ret = dma_set_mask_and_coherent(plat->dev, DMA_BIT_MASK(variant->dma_bit_mask));
- if (ret) {
- dev_err(plat->dev, "No suitable DMA available, err = %d\n", ret);
- return ret;
+ if (variant->dwmac_set_phy_interface) {
+ ret = variant->dwmac_set_phy_interface(plat);
+ if (ret) {
+ dev_err(plat->dev, "failed to set phy interface, err = %d\n", ret);
+ return ret;
+ }
}
- ret = variant->dwmac_set_phy_interface(plat);
- if (ret) {
- dev_err(plat->dev, "failed to set phy interface, err = %d\n", ret);
- return ret;
+ if (variant->dwmac_set_delay) {
+ ret = variant->dwmac_set_delay(plat);
+ if (ret) {
+ dev_err(plat->dev, "failed to set delay value, err = %d\n", ret);
+ return ret;
+ }
}
- ret = variant->dwmac_set_delay(plat);
+ ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks);
if (ret) {
- dev_err(plat->dev, "failed to set delay value, err = %d\n", ret);
+ dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
return ret;
}
- ret = clk_bulk_prepare_enable(plat->num_clks_to_config, plat->clks);
+ ret = clk_prepare_enable(plat->rmii_internal_clk);
if (ret) {
- dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
- return ret;
+ dev_err(plat->dev, "failed to enable rmii internal clk, err = %d\n", ret);
+ goto err_clk;
}
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
-
return 0;
+
+err_clk:
+ clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+ return ret;
}
static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv)
{
struct mediatek_dwmac_plat_data *plat = priv;
+ const struct mediatek_dwmac_variant *variant = plat->variant;
- clk_bulk_disable_unprepare(plat->num_clks_to_config, plat->clks);
+ clk_disable_unprepare(plat->rmii_internal_clk);
+ clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+}
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
+static int mediatek_dwmac_clks_config(void *priv, bool enabled)
+{
+ struct mediatek_dwmac_plat_data *plat = priv;
+ const struct mediatek_dwmac_variant *variant = plat->variant;
+ int ret = 0;
+
+ if (enabled) {
+ ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks);
+ if (ret) {
+ dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(plat->rmii_internal_clk);
+ if (ret) {
+ dev_err(plat->dev, "failed to enable rmii internal clk, err = %d\n", ret);
+ return ret;
+ }
+ } else {
+ clk_disable_unprepare(plat->rmii_internal_clk);
+ clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+ }
+
+ return ret;
+}
+
+static int mediatek_dwmac_common_data(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct mediatek_dwmac_plat_data *priv_plat)
+{
+ int i;
+
+ plat->interface = priv_plat->phy_mode;
+ plat->use_phy_wol = priv_plat->mac_wol ? 0 : 1;
+ plat->riwt_off = 1;
+ plat->maxmtu = ETH_DATA_LEN;
+ plat->addr64 = priv_plat->variant->dma_bit_mask;
+ plat->bsp_priv = priv_plat;
+ plat->init = mediatek_dwmac_init;
+ plat->exit = mediatek_dwmac_exit;
+ plat->clks_config = mediatek_dwmac_clks_config;
+ if (priv_plat->variant->dwmac_fix_mac_speed)
+ plat->fix_mac_speed = priv_plat->variant->dwmac_fix_mac_speed;
+
+ plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->safety_feat_cfg),
+ GFP_KERNEL);
+ if (!plat->safety_feat_cfg)
+ return -ENOMEM;
+
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 0;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 0;
+ plat->safety_feat_cfg->edpp = 1;
+ plat->safety_feat_cfg->prtyen = 1;
+ plat->safety_feat_cfg->tmouten = 1;
+
+ for (i = 0; i < plat->tx_queues_to_use; i++) {
+ /* Default TX Q0 to use TSO and rest TXQ for TBS */
+ if (i > 0)
+ plat->tx_queues_cfg[i].tbs_en = 1;
+ }
+
+ return 0;
}
static int mediatek_dwmac_probe(struct platform_device *pdev)
@@ -411,15 +709,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- plat_dat->interface = priv_plat->phy_mode;
- plat_dat->has_gmac4 = 1;
- plat_dat->has_gmac = 0;
- plat_dat->pmt = 0;
- plat_dat->riwt_off = 1;
- plat_dat->maxmtu = ETH_DATA_LEN;
- plat_dat->bsp_priv = priv_plat;
- plat_dat->init = mediatek_dwmac_init;
- plat_dat->exit = mediatek_dwmac_exit;
+ mediatek_dwmac_common_data(pdev, plat_dat, priv_plat);
mediatek_dwmac_init(pdev, priv_plat);
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
@@ -434,6 +724,8 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
static const struct of_device_id mediatek_dwmac_match[] = {
{ .compatible = "mediatek,mt2712-gmac",
.data = &mt2712_gmac_variant },
+ { .compatible = "mediatek,mt8195-gmac",
+ .data = &mt8195_gmac_variant },
{ }
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 2ffa0a11eea5..0cc28c79cc61 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -78,6 +78,7 @@ struct ethqos_emac_por {
struct ethqos_emac_driver_data {
const struct ethqos_emac_por *por;
unsigned int num_por;
+ bool rgmii_config_looback_en;
};
struct qcom_ethqos {
@@ -90,6 +91,7 @@ struct qcom_ethqos {
const struct ethqos_emac_por *por;
unsigned int num_por;
+ bool rgmii_config_looback_en;
};
static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
@@ -181,6 +183,22 @@ static const struct ethqos_emac_por emac_v2_3_0_por[] = {
static const struct ethqos_emac_driver_data emac_v2_3_0_data = {
.por = emac_v2_3_0_por,
.num_por = ARRAY_SIZE(emac_v2_3_0_por),
+ .rgmii_config_looback_en = true,
+};
+
+static const struct ethqos_emac_por emac_v2_1_0_por[] = {
+ { .offset = RGMII_IO_MACRO_CONFIG, .value = 0x40C01343 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG, .value = 0x2004642C },
+ { .offset = SDCC_HC_REG_DDR_CONFIG, .value = 0x00000000 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG2, .value = 0x00200000 },
+ { .offset = SDCC_USR_CTL, .value = 0x00010800 },
+ { .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 },
+};
+
+static const struct ethqos_emac_driver_data emac_v2_1_0_data = {
+ .por = emac_v2_1_0_por,
+ .num_por = ARRAY_SIZE(emac_v2_1_0_por),
+ .rgmii_config_looback_en = false,
};
static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
@@ -297,8 +315,12 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
SDCC_DDR_CONFIG_PRG_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
- RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ if (ethqos->rgmii_config_looback_en)
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ else
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ 0, RGMII_IO_MACRO_CONFIG);
break;
case SPEED_100:
@@ -331,8 +353,13 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
- RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ if (ethqos->rgmii_config_looback_en)
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ else
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ 0, RGMII_IO_MACRO_CONFIG);
+
break;
case SPEED_10:
@@ -504,6 +531,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
data = of_device_get_match_data(&pdev->dev);
ethqos->por = data->por;
ethqos->num_por = data->num_por;
+ ethqos->rgmii_config_looback_en = data->rgmii_config_looback_en;
ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii");
if (IS_ERR(ethqos->rgmii_clk)) {
@@ -558,6 +586,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
static const struct of_device_id qcom_ethqos_match[] = {
{ .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_data},
+ { .compatible = "qcom,sm8150-ethqos", .data = &emac_v2_1_0_data},
{ }
};
MODULE_DEVICE_TABLE(of, qcom_ethqos_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 09644ab0d87a..f86cc83003f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -16,6 +16,7 @@
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/regmap.h>
#include <linux/stmmac.h>
@@ -57,7 +58,6 @@ struct emac_variant {
};
/* struct sunxi_priv_data - hold all sunxi private data
- * @tx_clk: reference to MAC TX clock
* @ephy_clk: reference to the optional EPHY clock for the internal PHY
* @regulator: reference to the optional regulator
* @rst_ephy: reference to the optional EPHY reset for the internal PHY
@@ -68,7 +68,6 @@ struct emac_variant {
* @mux_handle: Internal pointer used by mdio-mux lib
*/
struct sunxi_priv_data {
- struct clk *tx_clk;
struct clk *ephy_clk;
struct regulator *regulator;
struct reset_control *rst_ephy;
@@ -579,22 +578,14 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
}
}
- ret = clk_prepare_enable(gmac->tx_clk);
- if (ret) {
- dev_err(&pdev->dev, "Could not enable AHB clock\n");
- goto err_disable_regulator;
- }
-
if (gmac->use_internal_phy) {
ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev));
if (ret)
- goto err_disable_clk;
+ goto err_disable_regulator;
}
return 0;
-err_disable_clk:
- clk_disable_unprepare(gmac->tx_clk);
err_disable_regulator:
if (gmac->regulator)
regulator_disable(gmac->regulator);
@@ -1043,8 +1034,6 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
if (gmac->variant->soc_has_internal_phy)
sun8i_dwmac_unpower_internal_phy(gmac);
- clk_disable_unprepare(gmac->tx_clk);
-
if (gmac->regulator)
regulator_disable(gmac->regulator);
}
@@ -1167,12 +1156,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
return -EINVAL;
}
- gmac->tx_clk = devm_clk_get(dev, "stmmaceth");
- if (IS_ERR(gmac->tx_clk)) {
- dev_err(dev, "Could not get TX clock\n");
- return PTR_ERR(gmac->tx_clk);
- }
-
/* Optional regulator for PHY */
gmac->regulator = devm_regulator_get_optional(dev, "phy");
if (IS_ERR(gmac->regulator)) {
@@ -1254,6 +1237,12 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
ndev = dev_get_drvdata(&pdev->dev);
priv = netdev_priv(ndev);
+ /* the MAC is runtime suspended after stmmac_dvr_probe(), so we
+ * need to ensure the MAC resume back before other operations such
+ * as reset.
+ */
+ pm_runtime_get_sync(&pdev->dev);
+
/* The mux must be registered after parent MDIO
* so after stmmac_dvr_probe()
*/
@@ -1272,12 +1261,15 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
goto dwmac_remove;
}
+ pm_runtime_put(&pdev->dev);
+
return 0;
dwmac_mux:
reset_control_put(gmac->rst_ephy);
clk_put(gmac->ephy_clk);
dwmac_remove:
+ pm_runtime_put_noidle(&pdev->dev);
stmmac_dvr_remove(&pdev->dev);
dwmac_exit:
sun8i_dwmac_exit(pdev, gmac);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 5b195d5051d6..57970ae2178d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -263,7 +263,7 @@ struct stmmac_priv {
u32 adv_ts;
int use_riwt;
int irq_wake;
- spinlock_t ptp_lock;
+ rwlock_t ptp_lock;
/* Protects auxiliary snapshot registers from concurrent access. */
struct mutex aux_ts_lock;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index a7ec9f4d46ce..22fea0f67245 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -196,9 +196,9 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
GMAC_TIMESTAMP_ATSNS_SHIFT;
for (i = 0; i < num_snapshot; i++) {
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ read_lock_irqsave(&priv->ptp_lock, flags);
get_ptptime(priv->ptpaddr, &ptp_time);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
event.type = PTP_CLOCK_EXTTS;
event.index = 0;
event.timestamp = ptp_time;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index bde76ea2deec..4a4b3651ab3e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -938,105 +938,15 @@ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
priv->pause, tx_cnt);
}
-static void stmmac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
+static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- int tx_cnt = priv->plat->tx_queues_to_use;
- int max_speed = priv->plat->max_speed;
-
- phylink_set(mac_supported, 10baseT_Half);
- phylink_set(mac_supported, 10baseT_Full);
- phylink_set(mac_supported, 100baseT_Half);
- phylink_set(mac_supported, 100baseT_Full);
- phylink_set(mac_supported, 1000baseT_Half);
- phylink_set(mac_supported, 1000baseT_Full);
- phylink_set(mac_supported, 1000baseKX_Full);
-
- phylink_set(mac_supported, Autoneg);
- phylink_set(mac_supported, Pause);
- phylink_set(mac_supported, Asym_Pause);
- phylink_set_port_modes(mac_supported);
-
- /* Cut down 1G if asked to */
- if ((max_speed > 0) && (max_speed < 1000)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- } else if (priv->plat->has_gmac4) {
- if (!max_speed || max_speed >= 2500) {
- phylink_set(mac_supported, 2500baseT_Full);
- phylink_set(mac_supported, 2500baseX_Full);
- }
- } else if (priv->plat->has_xgmac) {
- if (!max_speed || (max_speed >= 2500)) {
- phylink_set(mac_supported, 2500baseT_Full);
- phylink_set(mac_supported, 2500baseX_Full);
- }
- if (!max_speed || (max_speed >= 5000)) {
- phylink_set(mac_supported, 5000baseT_Full);
- }
- if (!max_speed || (max_speed >= 10000)) {
- phylink_set(mac_supported, 10000baseSR_Full);
- phylink_set(mac_supported, 10000baseLR_Full);
- phylink_set(mac_supported, 10000baseER_Full);
- phylink_set(mac_supported, 10000baseLRM_Full);
- phylink_set(mac_supported, 10000baseT_Full);
- phylink_set(mac_supported, 10000baseKX4_Full);
- phylink_set(mac_supported, 10000baseKR_Full);
- }
- if (!max_speed || (max_speed >= 25000)) {
- phylink_set(mac_supported, 25000baseCR_Full);
- phylink_set(mac_supported, 25000baseKR_Full);
- phylink_set(mac_supported, 25000baseSR_Full);
- }
- if (!max_speed || (max_speed >= 40000)) {
- phylink_set(mac_supported, 40000baseKR4_Full);
- phylink_set(mac_supported, 40000baseCR4_Full);
- phylink_set(mac_supported, 40000baseSR4_Full);
- phylink_set(mac_supported, 40000baseLR4_Full);
- }
- if (!max_speed || (max_speed >= 50000)) {
- phylink_set(mac_supported, 50000baseCR2_Full);
- phylink_set(mac_supported, 50000baseKR2_Full);
- phylink_set(mac_supported, 50000baseSR2_Full);
- phylink_set(mac_supported, 50000baseKR_Full);
- phylink_set(mac_supported, 50000baseSR_Full);
- phylink_set(mac_supported, 50000baseCR_Full);
- phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
- phylink_set(mac_supported, 50000baseDR_Full);
- }
- if (!max_speed || (max_speed >= 100000)) {
- phylink_set(mac_supported, 100000baseKR4_Full);
- phylink_set(mac_supported, 100000baseSR4_Full);
- phylink_set(mac_supported, 100000baseCR4_Full);
- phylink_set(mac_supported, 100000baseLR4_ER4_Full);
- phylink_set(mac_supported, 100000baseKR2_Full);
- phylink_set(mac_supported, 100000baseSR2_Full);
- phylink_set(mac_supported, 100000baseCR2_Full);
- phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
- phylink_set(mac_supported, 100000baseDR2_Full);
- }
- }
- /* Half-Duplex can only work with single queue */
- if (tx_cnt > 1) {
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 1000baseT_Half);
- }
-
- linkmode_and(supported, supported, mac_supported);
- linkmode_andnot(supported, supported, mask);
-
- linkmode_and(state->advertising, state->advertising, mac_supported);
- linkmode_andnot(state->advertising, state->advertising, mask);
+ if (!priv->hw->xpcs)
+ return NULL;
- /* If PCS is supported, check which modes it supports. */
- if (priv->hw->xpcs)
- xpcs_validate(priv->hw->xpcs, supported, state);
+ return &priv->hw->xpcs->pcs;
}
static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
@@ -1175,7 +1085,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
- .validate = stmmac_validate,
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = stmmac_mac_select_pcs,
.mac_config = stmmac_mac_config,
.mac_link_down = stmmac_mac_link_down,
.mac_link_up = stmmac_mac_link_up,
@@ -1255,12 +1166,12 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ int max_speed = priv->plat->max_speed;
int mode = priv->plat->phy_interface;
struct phylink *phylink;
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
- priv->phylink_config.pcs_poll = true;
if (priv->plat->mdio_bus_data)
priv->phylink_config.ovr_an_inband =
mdio_bus_data->xpcs_an_inband;
@@ -1268,14 +1179,50 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
if (!fwnode)
fwnode = dev_fwnode(priv->device);
+ /* Set the platform/firmware specified interface mode */
+ __set_bit(mode, priv->phylink_config.supported_interfaces);
+
+ /* If we have an xpcs, it defines which PHY interfaces are supported. */
+ if (priv->hw->xpcs)
+ xpcs_get_interfaces(priv->hw->xpcs,
+ priv->phylink_config.supported_interfaces);
+
+ priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
+
+ if (!max_speed || max_speed >= 1000)
+ priv->phylink_config.mac_capabilities |= MAC_1000;
+
+ if (priv->plat->has_gmac4) {
+ if (!max_speed || max_speed >= 2500)
+ priv->phylink_config.mac_capabilities |= MAC_2500FD;
+ } else if (priv->plat->has_xgmac) {
+ if (!max_speed || max_speed >= 2500)
+ priv->phylink_config.mac_capabilities |= MAC_2500FD;
+ if (!max_speed || max_speed >= 5000)
+ priv->phylink_config.mac_capabilities |= MAC_5000FD;
+ if (!max_speed || max_speed >= 10000)
+ priv->phylink_config.mac_capabilities |= MAC_10000FD;
+ if (!max_speed || max_speed >= 25000)
+ priv->phylink_config.mac_capabilities |= MAC_25000FD;
+ if (!max_speed || max_speed >= 40000)
+ priv->phylink_config.mac_capabilities |= MAC_40000FD;
+ if (!max_speed || max_speed >= 50000)
+ priv->phylink_config.mac_capabilities |= MAC_50000FD;
+ if (!max_speed || max_speed >= 100000)
+ priv->phylink_config.mac_capabilities |= MAC_100000FD;
+ }
+
+ /* Half-Duplex can only work with single queue */
+ if (priv->plat->tx_queues_to_use > 1)
+ priv->phylink_config.mac_capabilities &=
+ ~(MAC_10HD | MAC_100HD | MAC_1000HD);
+
phylink = phylink_create(&priv->phylink_config, fwnode,
mode, &stmmac_phylink_mac_ops);
if (IS_ERR(phylink))
return PTR_ERR(phylink);
- if (priv->hw->xpcs)
- phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
-
priv->phylink = phylink;
return 0;
}
@@ -1721,7 +1668,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_count = priv->plat->rx_queues_to_use;
- u32 queue;
+ int queue;
int ret;
/* RX INITIALIZATION */
@@ -1748,9 +1695,6 @@ err_init_rx_buffers:
rx_q->buf_alloc_num = 0;
rx_q->xsk_pool = NULL;
- if (queue == 0)
- break;
-
queue--;
}
@@ -2262,6 +2206,23 @@ static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
stmmac_stop_tx(priv, priv->ioaddr, chan);
}
+static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
+ u32 chan;
+
+ for (chan = 0; chan < dma_csr_ch; chan++) {
+ struct stmmac_channel *ch = &priv->channel[chan];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+}
+
/**
* stmmac_start_all_dma - start all RX and TX DMA channels
* @priv: driver private structure
@@ -2904,8 +2865,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
/* DMA CSR Channel configuration */
- for (chan = 0; chan < dma_csr_ch; chan++)
+ for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) {
@@ -3309,7 +3272,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
ret = stmmac_init_ptp(priv);
if (ret == -EOPNOTSUPP)
- netdev_warn(priv->dev, "PTP not supported by HW\n");
+ netdev_info(priv->dev, "PTP not supported by HW\n");
else if (ret)
netdev_warn(priv->dev, "PTP init failed\n");
else if (ptp_register)
@@ -3761,6 +3724,7 @@ static int stmmac_open(struct net_device *dev)
stmmac_enable_all_queues(priv);
netif_tx_start_all_queues(priv->dev);
+ stmmac_enable_all_dma_irq(priv);
return 0;
@@ -6510,8 +6474,10 @@ int stmmac_xdp_open(struct net_device *dev)
}
/* DMA CSR Channel configuration */
- for (chan = 0; chan < dma_csr_ch; chan++)
+ for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }
/* Adjust Split header */
sph_en = (priv->hw->rx_csum > 0) && priv->sph;
@@ -6572,6 +6538,7 @@ int stmmac_xdp_open(struct net_device *dev)
stmmac_enable_all_queues(priv);
netif_carrier_on(dev);
netif_tx_start_all_queues(dev);
+ stmmac_enable_all_dma_irq(priv);
return 0;
@@ -7451,6 +7418,7 @@ int stmmac_resume(struct device *dev)
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
stmmac_enable_all_queues(priv);
+ stmmac_enable_all_dma_irq(priv);
mutex_unlock(&priv->lock);
rtnl_unlock();
@@ -7467,7 +7435,7 @@ static int __init stmmac_cmdline_opt(char *str)
char *opt;
if (!str || !*str)
- return -EINVAL;
+ return 1;
while ((opt = strsep(&str, ",")) != NULL) {
if (!strncmp(opt, "debug:", 6)) {
if (kstrtoint(opt + 6, 0, &debug))
@@ -7498,11 +7466,11 @@ static int __init stmmac_cmdline_opt(char *str)
goto err;
}
}
- return 0;
+ return 1;
err:
pr_err("%s: ERROR broken module parameter conversion", __func__);
- return -EINVAL;
+ return 1;
}
__setup("stmmaceth=", stmmac_cmdline_opt);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 1c9f02f9c317..e45fb191d8e6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -39,9 +39,9 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
diff = div_u64(adj, 1000000000ULL);
addend = neg_adj ? (addend - diff) : (addend + diff);
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ write_lock_irqsave(&priv->ptp_lock, flags);
stmmac_config_addend(priv, priv->ptpaddr, addend);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ write_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
}
@@ -86,9 +86,9 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
mutex_unlock(&priv->plat->est->lock);
}
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ write_lock_irqsave(&priv->ptp_lock, flags);
stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ write_unlock_irqrestore(&priv->ptp_lock, flags);
/* Caculate new basetime and re-configured EST after PTP time adjust. */
if (est_rst) {
@@ -137,9 +137,9 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
unsigned long flags;
u64 ns = 0;
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ read_lock_irqsave(&priv->ptp_lock, flags);
stmmac_get_systime(priv, priv->ptpaddr, &ns);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
*ts = ns_to_timespec64(ns);
@@ -162,9 +162,9 @@ static int stmmac_set_time(struct ptp_clock_info *ptp,
container_of(ptp, struct stmmac_priv, ptp_clock_ops);
unsigned long flags;
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ write_lock_irqsave(&priv->ptp_lock, flags);
stmmac_init_systime(priv, priv->ptpaddr, ts->tv_sec, ts->tv_nsec);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ write_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
}
@@ -194,12 +194,12 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
cfg->period.tv_sec = rq->perout.period.sec;
cfg->period.tv_nsec = rq->perout.period.nsec;
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ write_lock_irqsave(&priv->ptp_lock, flags);
ret = stmmac_flex_pps_config(priv, priv->ioaddr,
rq->perout.index, cfg, on,
priv->sub_second_inc,
priv->systime_flags);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ write_unlock_irqrestore(&priv->ptp_lock, flags);
break;
case PTP_CLK_REQ_EXTTS:
priv->plat->ext_snapshot_en = on;
@@ -314,7 +314,7 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n;
- spin_lock_init(&priv->ptp_lock);
+ rwlock_init(&priv->ptp_lock);
mutex_init(&priv->aux_ts_lock);
priv->ptp_clock_ops = stmmac_ptp_clock_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index be3cb63675a5..9f1759593b94 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -1777,9 +1777,9 @@ static int stmmac_test_tbs(struct stmmac_priv *priv)
if (ret)
return ret;
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ read_lock_irqsave(&priv->ptp_lock, flags);
stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
if (!curr_time) {
ret = -EOPNOTSUPP;
@@ -1799,9 +1799,9 @@ static int stmmac_test_tbs(struct stmmac_priv *priv)
goto fail_disable;
/* Check if expected time has elapsed */
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ read_lock_irqsave(&priv->ptp_lock, flags);
stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
if ((curr_time - start_time) < STMMAC_TBS_LT_OFFSET)
ret = -EINVAL;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index dba9f12efa1c..b04a6a7bf566 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -88,6 +88,7 @@
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/uaccess.h>
+#include <linux/jiffies.h>
#define cas_page_map(x) kmap_atomic((x))
#define cas_page_unmap(x) kunmap_atomic((x))
@@ -1234,19 +1235,6 @@ static void cas_init_rx_dma(struct cas *cp)
*/
readl(cp->regs + REG_INTR_STATUS_ALIAS);
writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
- if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
- for (i = 1; i < N_RX_COMP_RINGS; i++)
- readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
-
- /* 2 is different from 3 and 4 */
- if (N_RX_COMP_RINGS > 1)
- writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
- cp->regs + REG_PLUS_ALIASN_CLEAR(1));
-
- for (i = 2; i < N_RX_COMP_RINGS; i++)
- writel(INTR_RX_DONE_ALT,
- cp->regs + REG_PLUS_ALIASN_CLEAR(i));
- }
/* set up pause thresholds */
val = CAS_BASE(RX_PAUSE_THRESH_OFF,
@@ -3508,9 +3496,6 @@ enable_rx_done:
if (N_RX_DESC_RINGS > 1)
writel(RX_DESC_RINGN_SIZE(1) - 4,
cp->regs + REG_PLUS_RX_KICK1);
-
- for (i = 1; i < N_RX_COMP_RINGS; i++)
- writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
}
}
@@ -4063,8 +4048,8 @@ static void cas_link_timer(struct timer_list *t)
if (link_transition_timeout != 0 &&
cp->link_transition_jiffies_valid &&
- ((jiffies - cp->link_transition_jiffies) >
- (link_transition_timeout))) {
+ time_is_before_jiffies(cp->link_transition_jiffies +
+ link_transition_timeout)) {
/* One-second counter so link-down workaround doesn't
* cause resets to occur so fast as to fool the switch
* into thinking the link is down.
@@ -4679,7 +4664,7 @@ static void cas_set_msglevel(struct net_device *dev, u32 value)
static int cas_get_regs_len(struct net_device *dev)
{
struct cas *cp = netdev_priv(dev);
- return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
+ return min_t(int, cp->casreg_len, CAS_MAX_REGS);
}
static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index ba8ad76313a9..42460c0885fc 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -7909,7 +7909,7 @@ static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
* won't get any interrupts and that's painful to debug.
*/
if (nr64(LDG_NUM(ldn)) != ldg) {
- dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
+ dev_err(np->device, "Port %u, mismatched LDG assignment for ldn %d, should be %d is %llu\n",
np->port, ldn, ldg,
(unsigned long long) nr64(LDG_NUM(ldn)));
return -EINVAL;
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index ad9029ae6848..77e5dffb558f 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -3146,7 +3146,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
if (err) {
printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
"aborting.\n");
- goto err_out_iounmap;
+ goto err_out_free_coherent;
}
pci_set_drvdata(pdev, hp);
@@ -3179,6 +3179,10 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
return 0;
+err_out_free_coherent:
+ dma_free_coherent(hp->dma_dev, PAGE_SIZE,
+ hp->happy_block, hp->hblock_dvma);
+
err_out_iounmap:
iounmap(hp->gregs);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index d45b6bb86f0b..72acdf802258 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -6,7 +6,7 @@
*/
#include <linux/net_tstamp.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -471,9 +471,7 @@ static void am65_cpsw_get_pauseparam(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- pause->autoneg = AUTONEG_DISABLE;
- pause->rx_pause = salve->rx_pause ? true : false;
- pause->tx_pause = salve->tx_pause ? true : false;
+ phylink_ethtool_get_pauseparam(salve->phylink, pause);
}
static int am65_cpsw_set_pauseparam(struct net_device *ndev,
@@ -481,18 +479,7 @@ static int am65_cpsw_set_pauseparam(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy)
- return -EINVAL;
-
- if (!phy_validate_pause(salve->phy, pause))
- return -EINVAL;
-
- salve->rx_pause = pause->rx_pause ? true : false;
- salve->tx_pause = pause->tx_pause ? true : false;
-
- phy_set_asym_pause(salve->phy, salve->rx_pause, salve->tx_pause);
-
- return 0;
+ return phylink_ethtool_set_pauseparam(salve->phylink, pause);
}
static void am65_cpsw_get_wol(struct net_device *ndev,
@@ -500,11 +487,7 @@ static void am65_cpsw_get_wol(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- wol->supported = 0;
- wol->wolopts = 0;
-
- if (salve->phy)
- phy_ethtool_get_wol(salve->phy, wol);
+ phylink_ethtool_get_wol(salve->phylink, wol);
}
static int am65_cpsw_set_wol(struct net_device *ndev,
@@ -512,10 +495,7 @@ static int am65_cpsw_set_wol(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy)
- return -EOPNOTSUPP;
-
- return phy_ethtool_set_wol(salve->phy, wol);
+ return phylink_ethtool_set_wol(salve->phylink, wol);
}
static int am65_cpsw_get_link_ksettings(struct net_device *ndev,
@@ -523,11 +503,7 @@ static int am65_cpsw_get_link_ksettings(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy)
- return -EOPNOTSUPP;
-
- phy_ethtool_ksettings_get(salve->phy, ecmd);
- return 0;
+ return phylink_ethtool_ksettings_get(salve->phylink, ecmd);
}
static int
@@ -536,40 +512,28 @@ am65_cpsw_set_link_ksettings(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
- return -EOPNOTSUPP;
-
- return phy_ethtool_ksettings_set(salve->phy, ecmd);
+ return phylink_ethtool_ksettings_set(salve->phylink, ecmd);
}
static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
- return -EOPNOTSUPP;
-
- return phy_ethtool_get_eee(salve->phy, edata);
+ return phylink_ethtool_get_eee(salve->phylink, edata);
}
static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
- return -EOPNOTSUPP;
-
- return phy_ethtool_set_eee(salve->phy, edata);
+ return phylink_ethtool_set_eee(salve->phylink, edata);
}
static int am65_cpsw_nway_reset(struct net_device *ndev)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
- return -EOPNOTSUPP;
-
- return phy_restart_aneg(salve->phy);
+ return phylink_ethtool_nway_reset(salve->phylink);
}
static int am65_cpsw_get_regs_len(struct net_device *ndev)
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 8251d7eb001b..d2747e9db286 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -18,7 +18,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -159,69 +159,6 @@ static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common)
common->pdata.quirks);
}
-void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
-{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
- struct phy_device *phy = port->slave.phy;
- u32 mac_control = 0;
-
- if (!phy)
- return;
-
- if (phy->link) {
- mac_control = CPSW_SL_CTL_GMII_EN;
-
- if (phy->speed == 1000)
- mac_control |= CPSW_SL_CTL_GIG;
- if (phy->speed == 10 && phy_interface_is_rgmii(phy))
- /* Can be used with in band mode only */
- mac_control |= CPSW_SL_CTL_EXT_EN;
- if (phy->speed == 100 && phy->interface == PHY_INTERFACE_MODE_RMII)
- mac_control |= CPSW_SL_CTL_IFCTL_A;
- if (phy->duplex)
- mac_control |= CPSW_SL_CTL_FULLDUPLEX;
-
- /* RGMII speed is 100M if !CPSW_SL_CTL_GIG*/
-
- /* rx_pause/tx_pause */
- if (port->slave.rx_pause)
- mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
-
- if (port->slave.tx_pause)
- mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
-
- cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
-
- /* enable forwarding */
- cpsw_ale_control_set(common->ale, port->port_id,
- ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
-
- am65_cpsw_qos_link_up(ndev, phy->speed);
- netif_tx_wake_all_queues(ndev);
- } else {
- int tmo;
-
- /* disable forwarding */
- cpsw_ale_control_set(common->ale, port->port_id,
- ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
-
- cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
-
- tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
- dev_dbg(common->dev, "donw msc_sl %08x tmo %d\n",
- cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS),
- tmo);
-
- cpsw_sl_ctl_reset(port->slave.mac_sl);
-
- am65_cpsw_qos_link_down(ndev);
- netif_tx_stop_all_queues(ndev);
- }
-
- phy_print_status(phy);
-}
-
static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
__be16 proto, u16 vid)
{
@@ -589,15 +526,11 @@ static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev)
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret;
- if (port->slave.phy)
- phy_stop(port->slave.phy);
+ phylink_stop(port->slave.phylink);
netif_tx_stop_all_queues(ndev);
- if (port->slave.phy) {
- phy_disconnect(port->slave.phy);
- port->slave.phy = NULL;
- }
+ phylink_disconnect_phy(port->slave.phylink);
ret = am65_cpsw_nuss_common_stop(common);
if (ret)
@@ -667,25 +600,14 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
if (ret)
goto error_cleanup;
- if (port->slave.phy_node) {
- port->slave.phy = of_phy_connect(ndev,
- port->slave.phy_node,
- &am65_cpsw_nuss_adjust_link,
- 0, port->slave.phy_if);
- if (!port->slave.phy) {
- dev_err(common->dev, "phy %pOF not found on slave %d\n",
- port->slave.phy_node,
- port->port_id);
- ret = -ENODEV;
- goto error_cleanup;
- }
- }
+ ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0);
+ if (ret)
+ goto error_cleanup;
/* restore vlan configurations */
vlan_for_each(ndev, cpsw_restore_vlans, port);
- phy_attached_info(port->slave.phy);
- phy_start(port->slave.phy);
+ phylink_start(port->slave.phylink);
return 0;
@@ -1431,10 +1353,7 @@ static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
return am65_cpsw_nuss_hwtstamp_get(ndev, req);
}
- if (!port->slave.phy)
- return -EOPNOTSUPP;
-
- return phy_mii_ioctl(port->slave.phy, req, cmd);
+ return phylink_mii_ioctl(port->slave.phylink, req, cmd);
}
static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
@@ -1494,6 +1413,81 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_get_devlink_port = am65_cpsw_ndo_get_devlink_port,
};
+static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ /* Currently not used */
+}
+
+static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
+ phylink_config);
+ struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
+ struct am65_cpsw_common *common = port->common;
+ struct net_device *ndev = port->ndev;
+ int tmo;
+
+ /* disable forwarding */
+ cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
+
+ tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
+ dev_dbg(common->dev, "down msc_sl %08x tmo %d\n",
+ cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo);
+
+ cpsw_sl_ctl_reset(port->slave.mac_sl);
+
+ am65_cpsw_qos_link_down(ndev);
+ netif_tx_stop_all_queues(ndev);
+}
+
+static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
+ phylink_config);
+ struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
+ struct am65_cpsw_common *common = port->common;
+ u32 mac_control = CPSW_SL_CTL_GMII_EN;
+ struct net_device *ndev = port->ndev;
+
+ if (speed == SPEED_1000)
+ mac_control |= CPSW_SL_CTL_GIG;
+ if (speed == SPEED_10 && interface == PHY_INTERFACE_MODE_RGMII)
+ /* Can be used with in band mode only */
+ mac_control |= CPSW_SL_CTL_EXT_EN;
+ if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII)
+ mac_control |= CPSW_SL_CTL_IFCTL_A;
+ if (duplex)
+ mac_control |= CPSW_SL_CTL_FULLDUPLEX;
+
+ /* rx_pause/tx_pause */
+ if (rx_pause)
+ mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
+
+ if (tx_pause)
+ mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
+
+ cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
+
+ /* enable forwarding */
+ cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ am65_cpsw_qos_link_up(ndev, speed);
+ netif_tx_wake_all_queues(ndev);
+}
+
+static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops = {
+ .validate = phylink_generic_validate,
+ .mac_config = am65_cpsw_nuss_mac_config,
+ .mac_link_down = am65_cpsw_nuss_mac_link_down,
+ .mac_link_up = am65_cpsw_nuss_mac_link_up,
+};
+
static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
{
struct am65_cpsw_common *common = port->common;
@@ -1890,27 +1884,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
of_property_read_bool(port_np, "ti,mac-only");
/* get phy/link info */
- if (of_phy_is_fixed_link(port_np)) {
- ret = of_phy_register_fixed_link(port_np);
- if (ret) {
- ret = dev_err_probe(dev, ret,
- "failed to register fixed-link phy %pOF\n",
- port_np);
- goto of_node_put;
- }
- port->slave.phy_node = of_node_get(port_np);
- } else {
- port->slave.phy_node =
- of_parse_phandle(port_np, "phy-handle", 0);
- }
-
- if (!port->slave.phy_node) {
- dev_err(dev,
- "slave[%d] no phy found\n", port_id);
- ret = -ENODEV;
- goto of_node_put;
- }
-
+ port->slave.phy_node = port_np;
ret = of_get_phy_mode(port_np, &port->slave.phy_if);
if (ret) {
dev_err(dev, "%pOF read phy-mode err %d\n",
@@ -1952,12 +1926,25 @@ static void am65_cpsw_pcpu_stats_free(void *data)
free_percpu(stats);
}
+static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ if (port->slave.phylink)
+ phylink_destroy(port->slave.phylink);
+ }
+}
+
static int
am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
{
struct am65_cpsw_ndev_priv *ndev_priv;
struct device *dev = common->dev;
struct am65_cpsw_port *port;
+ struct phylink *phylink;
int ret;
port = &common->ports[port_idx];
@@ -1995,6 +1982,20 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
+ /* Configuring Phylink */
+ port->slave.phylink_config.dev = &port->ndev->dev;
+ port->slave.phylink_config.type = PHYLINK_NETDEV;
+ port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+
+ phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&port->slave.phylink_config, dev->fwnode, port->slave.phy_if,
+ &am65_cpsw_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ port->slave.phylink = phylink;
+
/* Disable TX checksum offload by default due to HW bug */
if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
port->ndev->features &= ~NETIF_F_HW_CSUM;
@@ -2761,15 +2762,17 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
ret = am65_cpsw_nuss_init_ndevs(common);
if (ret)
- goto err_of_clear;
+ goto err_free_phylink;
ret = am65_cpsw_nuss_register_ndevs(common);
if (ret)
- goto err_of_clear;
+ goto err_free_phylink;
pm_runtime_put(dev);
return 0;
+err_free_phylink:
+ am65_cpsw_nuss_phylink_cleanup(common);
err_of_clear:
of_platform_device_destroy(common->mdio_dev, NULL);
err_pm_clear:
@@ -2792,6 +2795,7 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
return ret;
}
+ am65_cpsw_nuss_phylink_cleanup(common);
am65_cpsw_unregister_devlink(common);
am65_cpsw_unregister_notifiers(common);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 048ed10143c1..ac945631bf2f 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -10,7 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/platform_device.h>
#include <linux/soc/ti/k3-ringacc.h>
#include <net/devlink.h>
@@ -30,13 +30,14 @@ struct am65_cpsw_slave_data {
bool mac_only;
struct cpsw_sl *mac_sl;
struct device_node *phy_node;
- struct phy_device *phy;
phy_interface_t phy_if;
struct phy *ifphy;
bool rx_pause;
bool tx_pause;
u8 mac_addr[ETH_ALEN];
int port_vlan;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
};
struct am65_cpsw_port {
diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
index 599708a3e81d..d4c56da98a6a 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
@@ -237,15 +237,11 @@ static int am65_cpsw_port_vlans_add(struct am65_cpsw_port *port,
{
bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct net_device *orig_dev = vlan->obj.orig_dev;
- bool cpu_port = netif_is_bridge_master(orig_dev);
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
netdev_dbg(port->ndev, "VID add: %s: vid:%u flags:%X\n",
port->ndev->name, vlan->vid, vlan->flags);
- if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
- return 0;
-
return am65_cpsw_port_vlan_add(port, untag, pvid, vlan->vid, orig_dev);
}
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index aa42141be3c0..a557a477d039 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -364,11 +364,9 @@ int cpsw_ethtool_op_begin(struct net_device *ndev)
struct cpsw_common *cpsw = priv->cpsw;
int ret;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
- pm_runtime_put_noidle(cpsw->dev);
- }
return ret;
}
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
index a7d97d429e06..ce85f7610273 100644
--- a/drivers/net/ethernet/ti/cpsw_switchdev.c
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -252,15 +252,11 @@ static int cpsw_port_vlans_add(struct cpsw_priv *priv,
{
bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct net_device *orig_dev = vlan->obj.orig_dev;
- bool cpu_port = netif_is_bridge_master(orig_dev);
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
dev_dbg(priv->dev, "VID add: %s: vid:%u flags:%X\n",
priv->ndev->name, vlan->vid, vlan->flags);
- if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
- return 0;
-
return cpsw_port_vlan_add(priv, untag, pvid, vlan->vid, orig_dev);
}
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index dc70a6bfaa6a..92ca739fac01 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -568,7 +568,9 @@ int cpts_register(struct cpts *cpts)
for (i = 0; i < CPTS_MAX_EVENTS; i++)
list_add(&cpts->pool_data[i].list, &cpts->pool);
- clk_enable(cpts->refclk);
+ err = clk_enable(cpts->refclk);
+ if (err)
+ return err;
cpts_write32(cpts, CPTS_EN, control);
cpts_write32(cpts, TS_PEND_EN, int_enable);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 31df3267a01a..4b6aed78d392 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1604,6 +1604,7 @@ static int emac_dev_stop(struct net_device *ndev)
int irq_num;
struct emac_priv *priv = netdev_priv(ndev);
struct device *emac_dev = &ndev->dev;
+ int ret = 0;
/* inform the upper layers. */
netif_stop_queue(ndev);
@@ -1618,17 +1619,31 @@ static int emac_dev_stop(struct net_device *ndev)
phy_disconnect(ndev->phydev);
/* Free IRQ */
- while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
- for (irq_num = res->start; irq_num <= res->end; irq_num++)
- free_irq(irq_num, priv->ndev);
- i++;
+ if (dev_of_node(&priv->pdev->dev)) {
+ do {
+ ret = platform_get_irq_optional(priv->pdev, i);
+ if (ret < 0 && ret != -ENXIO)
+ break;
+ if (ret > 0) {
+ free_irq(ret, priv->ndev);
+ } else {
+ ret = 0;
+ break;
+ }
+ } while (++i);
+ } else {
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++)
+ free_irq(irq_num, priv->ndev);
+ i++;
+ }
}
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
pm_runtime_put(&priv->pdev->dev);
- return 0;
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index a4efd5e35158..fce2626e34fa 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -70,7 +70,7 @@ struct davinci_mdio_regs {
#define USERACCESS_DATA (0xffff)
u32 physel;
- } user[0];
+ } user[];
};
static const struct mdio_platform_data default_pdata = {
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index b818e4579f6f..16507bff652a 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -2082,7 +2082,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
netcp->tx_pool_region_id = temp[1];
if (netcp->tx_pool_size < MAX_SKB_FRAGS) {
- dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n",
+ dev_err(dev, "tx-pool size too small, must be at least %ld\n",
MAX_SKB_FRAGS);
ret = -ENODEV;
goto quit;
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index 89a31783fbb4..eb39a45de012 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -362,7 +362,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
mse102x_dump_packet(__func__, skb->len, skb->data);
skb->protocol = eth_type_trans(skb, mse->ndev);
- netif_rx_ni(skb);
+ netif_rx(skb);
mse->ndev->stats.rx_packets++;
mse->ndev->stats.rx_bytes += rxlen;
@@ -731,7 +731,7 @@ static int mse102x_probe_spi(struct spi_device *spi)
return 0;
}
-static int mse102x_remove_spi(struct spi_device *spi)
+static void mse102x_remove_spi(struct spi_device *spi)
{
struct mse102x_net *mse = dev_get_drvdata(&spi->dev);
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
@@ -741,8 +741,6 @@ static int mse102x_remove_spi(struct spi_device *spi)
mse102x_remove_device_debugfs(mses);
unregister_netdev(mse->ndev);
-
- return 0;
}
static const struct of_device_id mse102x_match_table[] = {
diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c
index 7779a36da3c8..7c52796273a4 100644
--- a/drivers/net/ethernet/wiznet/w5100-spi.c
+++ b/drivers/net/ethernet/wiznet/w5100-spi.c
@@ -461,11 +461,9 @@ static int w5100_spi_probe(struct spi_device *spi)
return w5100_probe(&spi->dev, ops, priv_size, mac, spi->irq, -EINVAL);
}
-static int w5100_spi_remove(struct spi_device *spi)
+static void w5100_spi_remove(struct spi_device *spi)
{
w5100_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id w5100_spi_ids[] = {
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index ae24d6b86803..4fd7c39e1123 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -883,7 +883,7 @@ static void w5100_rx_work(struct work_struct *work)
struct sk_buff *skb;
while ((skb = w5100_rx_skb(priv->ndev)))
- netif_rx_ni(skb);
+ netif_rx(skb);
w5100_enable_intr(priv);
}
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 911b5ef9e680..0014729b8865 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# Xilink device configuration
+# Xilinx device configuration
#
config NET_VENDOR_XILINX
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 4a73127e10a6..c6395c406418 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -271,7 +271,7 @@ This option defaults to enabled (set) */
#define XTE_TIE_OFFSET 0x000003A4 /* Interrupt enable */
-/** MII Mamagement Control register (MGTCR) */
+/* MII Management Control register (MGTCR) */
#define XTE_MGTDR_OFFSET 0x000003B0 /* MII data */
#define XTE_MIIMAI_OFFSET 0x000003B4 /* MII control */
@@ -283,7 +283,7 @@ This option defaults to enabled (set) */
#define STS_CTRL_APP0_ERR (1 << 31)
#define STS_CTRL_APP0_IRQONEND (1 << 30)
-/* undoccumented */
+/* undocumented */
#define STS_CTRL_APP0_STOPONEND (1 << 29)
#define STS_CTRL_APP0_CMPLT (1 << 28)
#define STS_CTRL_APP0_SOP (1 << 27)
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 64c7e26c3b75..869e362e09c1 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -361,8 +361,9 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
+ sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
- skb = netdev_alloc_skb_ip_align(ndev,
- XTE_MAX_JUMBO_FRAME_SIZE);
+ skb = __netdev_alloc_skb_ip_align(ndev,
+ XTE_MAX_JUMBO_FRAME_SIZE,
+ GFP_KERNEL);
if (!skb)
goto out;
@@ -1008,7 +1009,7 @@ static void ll_temac_recv(struct net_device *ndev)
(skb->len > 64)) {
/* Convert from device endianness (be32) to cpu
- * endiannes, and if necessary swap the bytes
+ * endianness, and if necessary swap the bytes
* (back) for proper IP checksum byte order
* (be16).
*/
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 5b4d153b1492..0f9c88dd1a4a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -119,11 +119,11 @@
#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
-/* Default TX/RX Threshold and waitbound values for SGDMA mode */
+/* Default TX/RX Threshold and delay timer values for SGDMA mode */
#define XAXIDMA_DFT_TX_THRESHOLD 24
-#define XAXIDMA_DFT_TX_WAITBOUND 254
-#define XAXIDMA_DFT_RX_THRESHOLD 24
-#define XAXIDMA_DFT_RX_WAITBOUND 254
+#define XAXIDMA_DFT_TX_USEC 50
+#define XAXIDMA_DFT_RX_THRESHOLD 1
+#define XAXIDMA_DFT_RX_USEC 50
#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
@@ -385,7 +385,9 @@ struct axidma_bd {
* @phy_node: Pointer to device node structure
* @phylink: Pointer to phylink instance
* @phylink_config: phylink configuration settings
+ * @napi: NAPI control structure
* @pcs_phy: Reference to PCS/PMA PHY if used
+ * @pcs: phylink pcs structure for PCS PHY
* @switch_x_sgmii: Whether switchable 1000BaseX/SGMII mode is enabled in the core
* @axi_clk: AXI4-Lite bus clock
* @misc_clks: Misc ethernet clocks (AXI4-Stream, Ref, MGT clocks)
@@ -394,6 +396,7 @@ struct axidma_bd {
* @regs_start: Resource start for axienet device addresses
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
+ * @rx_dma_cr: Nominal content of RX DMA control register
* @dma_err_task: Work structure to process Axi DMA errors
* @tx_irq: Axidma TX IRQ number
* @rx_irq: Axidma RX IRQ number
@@ -422,7 +425,9 @@ struct axidma_bd {
* @csum_offload_on_tx_path: Stores the checksum selection on TX side.
* @csum_offload_on_rx_path: Stores the checksum selection on RX side.
* @coalesce_count_rx: Store the irq coalesce on RX side.
+ * @coalesce_usec_rx: IRQ coalesce delay for RX
* @coalesce_count_tx: Store the irq coalesce on TX side.
+ * @coalesce_usec_tx: IRQ coalesce delay for TX
*/
struct axienet_local {
struct net_device *ndev;
@@ -433,7 +438,10 @@ struct axienet_local {
struct phylink *phylink;
struct phylink_config phylink_config;
+ struct napi_struct napi;
+
struct mdio_device *pcs_phy;
+ struct phylink_pcs pcs;
bool switch_x_sgmii;
@@ -447,6 +455,8 @@ struct axienet_local {
void __iomem *regs;
void __iomem *dma_regs;
+ u32 rx_dma_cr;
+
struct work_struct dma_err_task;
int tx_irq;
@@ -474,7 +484,9 @@ struct axienet_local {
int csum_offload_on_rx_path;
u32 coalesce_count_rx;
+ u32 coalesce_usec_rx;
u32 coalesce_count_tx;
+ u32 coalesce_usec_tx;
};
/**
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 377c94ec2486..c7eb05e4a6bf 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -7,7 +7,7 @@
* Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
* Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
* Copyright (c) 2010 - 2011 PetaLogix
- * Copyright (c) 2019 SED Systems, a division of Calian Ltd.
+ * Copyright (c) 2019 - 2022 Calian Advanced Technologies
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*
* This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
@@ -33,7 +33,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/skbuff.h>
-#include <linux/spinlock.h>
+#include <linux/math64.h>
#include <linux/phy.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
@@ -190,7 +190,7 @@ static void axienet_dma_bd_release(struct net_device *ndev)
struct axienet_local *lp = netdev_priv(ndev);
/* If we end up here, tx_bd_v must have been DMA allocated. */
- dma_free_coherent(ndev->dev.parent,
+ dma_free_coherent(lp->dev,
sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
lp->tx_bd_v,
lp->tx_bd_p);
@@ -215,18 +215,90 @@ static void axienet_dma_bd_release(struct net_device *ndev)
*/
if (lp->rx_bd_v[i].cntrl) {
phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
- dma_unmap_single(ndev->dev.parent, phys,
+ dma_unmap_single(lp->dev, phys,
lp->max_frm_size, DMA_FROM_DEVICE);
}
}
- dma_free_coherent(ndev->dev.parent,
+ dma_free_coherent(lp->dev,
sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
lp->rx_bd_v,
lp->rx_bd_p);
}
/**
+ * axienet_usec_to_timer - Calculate IRQ delay timer value
+ * @lp: Pointer to the axienet_local structure
+ * @coalesce_usec: Microseconds to convert into timer value
+ */
+static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
+{
+ u32 result;
+ u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
+
+ if (lp->axi_clk)
+ clk_rate = clk_get_rate(lp->axi_clk);
+
+ /* 1 Timeout Interval = 125 * (clock period of SG clock) */
+ result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
+ (u64)125000000);
+ if (result > 255)
+ result = 255;
+
+ return result;
+}
+
+/**
+ * axienet_dma_start - Set up DMA registers and start DMA operation
+ * @lp: Pointer to the axienet_local structure
+ */
+static void axienet_dma_start(struct axienet_local *lp)
+{
+ u32 tx_cr;
+
+ /* Start updating the Rx channel control register */
+ lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
+ XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
+ /* Only set interrupt delay timer if not generating an interrupt on
+ * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
+ */
+ if (lp->coalesce_count_rx > 1)
+ lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
+ << XAXIDMA_DELAY_SHIFT) |
+ XAXIDMA_IRQ_DELAY_MASK;
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+
+ /* Start updating the Tx channel control register */
+ tx_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
+ XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
+ /* Only set interrupt delay timer if not generating an interrupt on
+ * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
+ */
+ if (lp->coalesce_count_tx > 1)
+ tx_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
+ << XAXIDMA_DELAY_SHIFT) |
+ XAXIDMA_IRQ_DELAY_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, tx_cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting.
+ */
+ axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ tx_cr |= XAXIDMA_CR_RUNSTOP_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, tx_cr);
+}
+
+/**
* axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
* @ndev: Pointer to the net_device structure
*
@@ -238,7 +310,6 @@ static void axienet_dma_bd_release(struct net_device *ndev)
*/
static int axienet_dma_bd_init(struct net_device *ndev)
{
- u32 cr;
int i;
struct sk_buff *skb;
struct axienet_local *lp = netdev_priv(ndev);
@@ -249,13 +320,13 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->rx_bd_ci = 0;
/* Allocate the Tx and Rx buffer descriptors. */
- lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ lp->tx_bd_v = dma_alloc_coherent(lp->dev,
sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
return -ENOMEM;
- lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ lp->rx_bd_v = dma_alloc_coherent(lp->dev,
sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
&lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
@@ -285,9 +356,9 @@ static int axienet_dma_bd_init(struct net_device *ndev)
goto out;
lp->rx_bd_v[i].skb = skb;
- addr = dma_map_single(ndev->dev.parent, skb->data,
+ addr = dma_map_single(lp->dev, skb->data,
lp->max_frm_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, addr)) {
+ if (dma_mapping_error(lp->dev, addr)) {
netdev_err(ndev, "DMA mapping error\n");
goto out;
}
@@ -296,50 +367,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->rx_bd_v[i].cntrl = lp->max_frm_size;
}
- /* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
- ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = ((cr & ~XAXIDMA_DELAY_MASK) |
- (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- /* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
- ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
- (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- /* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.
- */
- axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
-
- /* Write to the RS (Run-stop) bit in the Tx channel control register.
- * Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting.
- */
- axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_start(lp);
return 0;
out:
@@ -531,13 +559,51 @@ static int __axienet_device_reset(struct axienet_local *lp)
}
/**
+ * axienet_dma_stop - Stop DMA operation
+ * @lp: Pointer to the axienet_local structure
+ */
+static void axienet_dma_stop(struct axienet_local *lp)
+{
+ int count;
+ u32 cr, sr;
+
+ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ synchronize_irq(lp->rx_irq);
+
+ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ synchronize_irq(lp->tx_irq);
+
+ /* Give DMAs a chance to halt gracefully */
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ }
+
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ }
+
+ /* Do a reset to ensure DMA is really stopped */
+ axienet_lock_mii(lp);
+ __axienet_device_reset(lp);
+ axienet_unlock_mii(lp);
+}
+
+/**
* axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
* @ndev: Pointer to the net_device structure
*
* This function is called to reset and initialize the Axi Ethernet core. This
* is typically called during initialization. It does a reset of the Axi DMA
* Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
- * areconnected to Axi Ethernet reset lines, this in turn resets the Axi
+ * are connected to Axi Ethernet reset lines, this in turn resets the Axi
* Ethernet core. No separate hardware reset is done for the Axi Ethernet
* core.
* Returns 0 on success or a negative error number otherwise.
@@ -636,7 +702,7 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
/* Ensure we see complete descriptor update */
dma_rmb();
phys = desc_get_phys_addr(lp, cur_p);
- dma_unmap_single(ndev->dev.parent, phys,
+ dma_unmap_single(lp->dev, phys,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
@@ -774,9 +840,9 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
}
- phys = dma_map_single(ndev->dev.parent, skb->data,
+ phys = dma_map_single(lp->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (unlikely(dma_mapping_error(lp->dev, phys))) {
if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++;
@@ -790,11 +856,11 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
- phys = dma_map_single(ndev->dev.parent,
+ phys = dma_map_single(lp->dev,
skb_frag_address(frag),
skb_frag_size(frag),
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (unlikely(dma_mapping_error(lp->dev, phys))) {
if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++;
@@ -833,79 +899,84 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
/**
- * axienet_recv - Is called from Axi DMA Rx Isr to complete the received
- * BD processing.
- * @ndev: Pointer to net_device structure.
+ * axienet_poll - Triggered by RX ISR to complete the received BD processing.
+ * @napi: Pointer to NAPI structure.
+ * @budget: Max number of packets to process.
*
- * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
- * does minimal processing and invokes "netif_rx" to complete further
- * processing.
+ * Return: Number of RX packets processed.
*/
-static void axienet_recv(struct net_device *ndev)
+static int axienet_poll(struct napi_struct *napi, int budget)
{
u32 length;
u32 csumstatus;
u32 size = 0;
- u32 packets = 0;
+ int packets = 0;
dma_addr_t tail_p = 0;
- struct axienet_local *lp = netdev_priv(ndev);
- struct sk_buff *skb, *new_skb;
struct axidma_bd *cur_p;
+ struct sk_buff *skb, *new_skb;
+ struct axienet_local *lp = container_of(napi, struct axienet_local, napi);
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
- while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
dma_addr_t phys;
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
-
/* Ensure we see complete descriptor update */
dma_rmb();
- phys = desc_get_phys_addr(lp, cur_p);
- dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
- DMA_FROM_DEVICE);
skb = cur_p->skb;
cur_p->skb = NULL;
- length = cur_p->app4 & 0x0000FFFF;
-
- skb_put(skb, length);
- skb->protocol = eth_type_trans(skb, ndev);
- /*skb_checksum_none_assert(skb);*/
- skb->ip_summed = CHECKSUM_NONE;
-
- /* if we're doing Rx csum offload, set it up */
- if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
- csumstatus = (cur_p->app2 &
- XAE_FULL_CSUM_STATUS_MASK) >> 3;
- if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
- (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* skb could be NULL if a previous pass already received the
+ * packet for this slot in the ring, but failed to refill it
+ * with a newly allocated buffer. In this case, don't try to
+ * receive it again.
+ */
+ if (likely(skb)) {
+ length = cur_p->app4 & 0x0000FFFF;
+
+ phys = desc_get_phys_addr(lp, cur_p);
+ dma_unmap_single(lp->dev, phys, lp->max_frm_size,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, lp->ndev);
+ /*skb_checksum_none_assert(skb);*/
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* if we're doing Rx csum offload, set it up */
+ if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
+ csumstatus = (cur_p->app2 &
+ XAE_FULL_CSUM_STATUS_MASK) >> 3;
+ if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
+ csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
+ skb->protocol == htons(ETH_P_IP) &&
+ skb->len > 64) {
+ skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
+ skb->ip_summed = CHECKSUM_COMPLETE;
}
- } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
- skb->protocol == htons(ETH_P_IP) &&
- skb->len > 64) {
- skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
- netif_rx(skb);
+ napi_gro_receive(napi, skb);
- size += length;
- packets++;
+ size += length;
+ packets++;
+ }
- new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
+ new_skb = napi_alloc_skb(napi, lp->max_frm_size);
if (!new_skb)
- return;
+ break;
- phys = dma_map_single(ndev->dev.parent, new_skb->data,
+ phys = dma_map_single(lp->dev, new_skb->data,
lp->max_frm_size,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (unlikely(dma_mapping_error(lp->dev, phys))) {
if (net_ratelimit())
- netdev_err(ndev, "RX DMA mapping error\n");
+ netdev_err(lp->ndev, "RX DMA mapping error\n");
dev_kfree_skb(new_skb);
- return;
+ break;
}
desc_set_phys_addr(lp, phys, cur_p);
@@ -913,16 +984,30 @@ static void axienet_recv(struct net_device *ndev)
cur_p->status = 0;
cur_p->skb = new_skb;
+ /* Only update tail_p to mark this slot as usable after it has
+ * been successfully refilled.
+ */
+ tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+
if (++lp->rx_bd_ci >= lp->rx_bd_num)
lp->rx_bd_ci = 0;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
}
- ndev->stats.rx_packets += packets;
- ndev->stats.rx_bytes += size;
+ lp->ndev->stats.rx_packets += packets;
+ lp->ndev->stats.rx_bytes += size;
if (tail_p)
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+
+ if (packets < budget && napi_complete_done(napi, packets)) {
+ /* Re-enable RX completion interrupts. This should
+ * cause an immediate interrupt if any RX packets are
+ * already pending.
+ */
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+ }
+ return packets;
}
/**
@@ -937,41 +1022,27 @@ static void axienet_recv(struct net_device *ndev)
*/
static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
{
- u32 cr;
unsigned int status;
struct net_device *ndev = _ndev;
struct axienet_local *lp = netdev_priv(ndev);
status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
- axienet_start_xmit_done(lp->ndev);
- goto out;
- }
+
if (!(status & XAXIDMA_IRQ_ALL_MASK))
return IRQ_NONE;
- if (status & XAXIDMA_IRQ_ERROR_MASK) {
- dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
- (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
- (lp->tx_bd_v[lp->tx_bd_ci]).phys);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
+
+ if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
+ netdev_err(ndev, "DMA Tx error 0x%x\n", status);
+ netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
+ (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
+ (lp->tx_bd_v[lp->tx_bd_ci]).phys);
schedule_work(&lp->dma_err_task);
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
+ } else {
+ axienet_start_xmit_done(lp->ndev);
}
-out:
+
return IRQ_HANDLED;
}
@@ -982,46 +1053,40 @@ out:
*
* Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
*
- * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
+ * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
* processing.
*/
static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
{
- u32 cr;
unsigned int status;
struct net_device *ndev = _ndev;
struct axienet_local *lp = netdev_priv(ndev);
status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
- axienet_recv(lp->ndev);
- goto out;
- }
+
if (!(status & XAXIDMA_IRQ_ALL_MASK))
return IRQ_NONE;
- if (status & XAXIDMA_IRQ_ERROR_MASK) {
- dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
- (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
- (lp->rx_bd_v[lp->rx_bd_ci]).phys);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
+
+ if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
+ netdev_err(ndev, "DMA Rx error 0x%x\n", status);
+ netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
+ (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
+ (lp->rx_bd_v[lp->rx_bd_ci]).phys);
schedule_work(&lp->dma_err_task);
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
+ } else {
+ /* Disable further RX completion interrupts and schedule
+ * NAPI receive.
+ */
+ u32 cr = lp->rx_dma_cr;
+
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+
+ napi_schedule(&lp->napi);
}
-out:
+
return IRQ_HANDLED;
}
@@ -1095,6 +1160,8 @@ static int axienet_open(struct net_device *ndev)
/* Enable worker thread for Axi DMA error handling */
INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
+ napi_enable(&lp->napi);
+
/* Enable interrupts for Axi DMA Tx */
ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
ndev->name, ndev);
@@ -1120,6 +1187,7 @@ err_eth_irq:
err_rx_irq:
free_irq(lp->tx_irq, ndev);
err_tx_irq:
+ napi_disable(&lp->napi);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
cancel_work_sync(&lp->dma_err_task);
@@ -1139,46 +1207,22 @@ err_tx_irq:
*/
static int axienet_stop(struct net_device *ndev)
{
- u32 cr, sr;
- int count;
struct axienet_local *lp = netdev_priv(ndev);
dev_dbg(&ndev->dev, "axienet_close()\n");
+ napi_disable(&lp->napi);
+
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ axienet_dma_stop(lp);
axienet_iow(lp, XAE_IE_OFFSET, 0);
- /* Give DMAs a chance to halt gracefully */
- sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
- msleep(20);
- sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- }
-
- sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
- msleep(20);
- sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- }
-
- /* Do a reset to ensure DMA is really stopped */
- axienet_lock_mii(lp);
- __axienet_device_reset(lp);
- axienet_unlock_mii(lp);
-
cancel_work_sync(&lp->dma_err_task);
if (lp->eth_irq > 0)
@@ -1449,14 +1493,12 @@ axienet_ethtools_get_coalesce(struct net_device *ndev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- u32 regval = 0;
struct axienet_local *lp = netdev_priv(ndev);
- regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
- regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
+
+ ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
+ ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
+ ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
+ ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
return 0;
}
@@ -1489,8 +1531,12 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
if (ecoalesce->rx_max_coalesced_frames)
lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
+ if (ecoalesce->rx_coalesce_usecs)
+ lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
if (ecoalesce->tx_max_coalesced_frames)
lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
+ if (ecoalesce->tx_coalesce_usecs)
+ lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
return 0;
}
@@ -1521,7 +1567,8 @@ static int axienet_ethtools_nway_reset(struct net_device *dev)
}
static const struct ethtool_ops axienet_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@@ -1537,78 +1584,78 @@ static const struct ethtool_ops axienet_ethtool_ops = {
.nway_reset = axienet_ethtools_nway_reset,
};
-static void axienet_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct axienet_local *lp = netdev_priv(ndev);
+ return container_of(pcs, struct axienet_local, pcs);
+}
- switch (state->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- phylink_mii_c22_pcs_get_state(lp->pcs_phy, state);
- break;
- default:
- break;
- }
+static void axienet_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
+
+ phylink_mii_c22_pcs_get_state(pcs_phy, state);
}
-static void axienet_mac_an_restart(struct phylink_config *config)
+static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct axienet_local *lp = netdev_priv(ndev);
+ struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
- phylink_mii_c22_pcs_an_restart(lp->pcs_phy);
+ phylink_mii_c22_pcs_an_restart(pcs_phy);
}
-static int axienet_mac_prepare(struct phylink_config *config, unsigned int mode,
- phy_interface_t iface)
+static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- struct net_device *ndev = to_net_dev(config->dev);
+ struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
+ struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
struct axienet_local *lp = netdev_priv(ndev);
int ret;
- switch (iface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- if (!lp->switch_x_sgmii)
- return 0;
-
- ret = mdiobus_write(lp->pcs_phy->bus,
- lp->pcs_phy->addr,
- XLNX_MII_STD_SELECT_REG,
- iface == PHY_INTERFACE_MODE_SGMII ?
+ if (lp->switch_x_sgmii) {
+ ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
+ interface == PHY_INTERFACE_MODE_SGMII ?
XLNX_MII_STD_SELECT_SGMII : 0);
- if (ret < 0)
- netdev_warn(ndev, "Failed to switch PHY interface: %d\n",
+ if (ret < 0) {
+ netdev_warn(ndev,
+ "Failed to switch PHY interface: %d\n",
ret);
- return ret;
- default:
- return 0;
+ return ret;
+ }
}
+
+ ret = phylink_mii_c22_pcs_config(pcs_phy, mode, interface, advertising);
+ if (ret < 0)
+ netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
+
+ return ret;
}
-static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
- const struct phylink_link_state *state)
+static const struct phylink_pcs_ops axienet_pcs_ops = {
+ .pcs_get_state = axienet_pcs_get_state,
+ .pcs_config = axienet_pcs_config,
+ .pcs_an_restart = axienet_pcs_an_restart,
+};
+
+static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
struct net_device *ndev = to_net_dev(config->dev);
struct axienet_local *lp = netdev_priv(ndev);
- int ret;
- switch (state->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- ret = phylink_mii_c22_pcs_config(lp->pcs_phy, mode,
- state->interface,
- state->advertising);
- if (ret < 0)
- netdev_warn(ndev, "Failed to configure PCS: %d\n",
- ret);
- break;
+ if (interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_SGMII)
+ return &lp->pcs;
- default:
- break;
- }
+ return NULL;
+}
+
+static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ /* nothing meaningful to do */
}
static void axienet_mac_link_down(struct phylink_config *config,
@@ -1663,9 +1710,7 @@ static void axienet_mac_link_up(struct phylink_config *config,
static const struct phylink_mac_ops axienet_phylink_ops = {
.validate = phylink_generic_validate,
- .mac_pcs_get_state = axienet_mac_pcs_get_state,
- .mac_an_restart = axienet_mac_an_restart,
- .mac_prepare = axienet_mac_prepare,
+ .mac_select_pcs = axienet_mac_select_pcs,
.mac_config = axienet_mac_config,
.mac_link_down = axienet_mac_link_down,
.mac_link_up = axienet_mac_link_up,
@@ -1680,29 +1725,26 @@ static const struct phylink_mac_ops axienet_phylink_ops = {
*/
static void axienet_dma_err_handler(struct work_struct *work)
{
+ u32 i;
u32 axienet_status;
- u32 cr, i;
+ struct axidma_bd *cur_p;
struct axienet_local *lp = container_of(work, struct axienet_local,
dma_err_task);
struct net_device *ndev = lp->ndev;
- struct axidma_bd *cur_p;
+
+ napi_disable(&lp->napi);
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- /* When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. MDIO must be disabled before resetting.
- * Hold MDIO bus lock to avoid MDIO accesses during the reset.
- */
- axienet_lock_mii(lp);
- __axienet_device_reset(lp);
- axienet_unlock_mii(lp);
+
+ axienet_dma_stop(lp);
for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
if (cur_p->cntrl) {
dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
- dma_unmap_single(ndev->dev.parent, addr,
+ dma_unmap_single(lp->dev, addr,
(cur_p->cntrl &
XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
@@ -1735,50 +1777,7 @@ static void axienet_dma_err_handler(struct work_struct *work)
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
- /* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
- (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = ((cr & ~XAXIDMA_DELAY_MASK) |
- (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Finally write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- /* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
- (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
- (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- /* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.
- */
- axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
-
- /* Write to the RS (Run-stop) bit in the Tx channel control register.
- * Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting
- */
- axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_start(lp);
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
axienet_status &= ~XAE_RCW1_RX_MASK;
@@ -1799,6 +1798,7 @@ static void axienet_dma_err_handler(struct work_struct *work)
axienet_set_mac_address(ndev, NULL);
axienet_set_multicast_list(ndev);
axienet_setoptions(ndev, lp->options);
+ napi_enable(&lp->napi);
}
/**
@@ -1847,6 +1847,8 @@ static int axienet_probe(struct platform_device *pdev)
lp->rx_bd_num = RX_BD_NUM_DEFAULT;
lp->tx_bd_num = TX_BD_NUM_DEFAULT;
+ netif_napi_add(ndev, &lp->napi, axienet_poll, NAPI_POLL_WEIGHT);
+
lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
if (!lp->axi_clk) {
/* For backward compatibility, if named AXI clock is not present,
@@ -2053,7 +2055,9 @@ static int axienet_probe(struct platform_device *pdev)
}
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
+ lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+ lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
/* Reset core now that clocks are enabled, prior to accessing MDIO */
ret = __axienet_device_reset(lp);
@@ -2079,12 +2083,12 @@ static int axienet_probe(struct platform_device *pdev)
ret = -EPROBE_DEFER;
goto cleanup_mdio;
}
- lp->phylink_config.pcs_poll = true;
+ lp->pcs.ops = &axienet_pcs_ops;
+ lp->pcs.poll = true;
}
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
- lp->phylink_config.legacy_pre_march2020 = true;
lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
MAC_10FD | MAC_100FD | MAC_1000FD;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 519599480b15..57a24f62e353 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -498,7 +498,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
* @dev: Pointer to the network device instance
* @address: Void pointer to the sockaddr structure
*
- * This function copies the HW address from the sockaddr strucutre to the
+ * This function copies the HW address from the sockaddr structure to the
* net_device structure and updates the address in HW.
*
* Return: Error if the net device is busy or 0 if the addr is set
@@ -1183,7 +1183,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
if (rc) {
dev_err(dev,
"Cannot register network device, aborting\n");
- goto error;
+ goto put_node;
}
dev_info(dev,
@@ -1191,6 +1191,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
(unsigned long __force)ndev->mem_start, lp->base_addr, ndev->irq);
return 0;
+put_node:
+ of_node_put(lp->phy_node);
error:
free_netdev(ndev);
return rc;
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index 0e878fa6e322..b33f64c54b0e 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -20,9 +20,9 @@ if NET_VENDOR_XSCALE
config IXP4XX_ETH
tristate "Intel IXP4xx Ethernet support"
- depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
+ depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR && OF
select PHYLIB
- select OF_MDIO if OF
+ select OF_MDIO
select NET_PTP_CLASSIFY
help
Say Y here if you want to use built-in Ethernet ports
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index df77a22d1b81..d947955621ee 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -30,7 +30,6 @@
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
-#include <linux/platform_data/eth_ixp4xx.h>
#include <linux/platform_device.h>
#include <linux/ptp_classify.h>
#include <linux/slab.h>
@@ -38,6 +37,11 @@
#include <linux/soc/ixp4xx/npe.h>
#include <linux/soc/ixp4xx/qmgr.h>
#include <linux/soc/ixp4xx/cpu.h>
+#include <linux/types.h>
+
+#define IXP4XX_ETH_NPEA 0x00
+#define IXP4XX_ETH_NPEB 0x10
+#define IXP4XX_ETH_NPEC 0x20
#include "ixp46x_ts.h"
@@ -147,6 +151,16 @@ typedef void buffer_t;
#define free_buffer_irq kfree
#endif
+/* Information about built-in Ethernet MAC interfaces */
+struct eth_plat_info {
+ u8 phy; /* MII PHY ID, 0 - 31 */
+ u8 rxq; /* configurable, currently 0 - 31 only */
+ u8 txreadyq;
+ u8 hwaddr[6];
+ u8 npe; /* NPE instance used by this interface */
+ bool has_mdio; /* If this instance has an MDIO bus */
+};
+
struct eth_regs {
u32 tx_control[2], __res1[2]; /* 000 */
u32 rx_control[2], __res2[2]; /* 010 */
@@ -1366,7 +1380,6 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-#ifdef CONFIG_OF
static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
{
struct device_node *np = dev->of_node;
@@ -1417,12 +1430,6 @@ static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
return plat;
}
-#else
-static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
-{
- return NULL;
-}
-#endif
static int ixp4xx_eth_probe(struct platform_device *pdev)
{
@@ -1434,49 +1441,9 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
struct port *port;
int err;
- if (np) {
- plat = ixp4xx_of_get_platdata(dev);
- if (!plat)
- return -ENODEV;
- } else {
- plat = dev_get_platdata(dev);
- if (!plat)
- return -ENODEV;
- plat->npe = pdev->id;
- switch (plat->npe) {
- case IXP4XX_ETH_NPEA:
- /* If the MDIO bus is not up yet, defer probe */
- break;
- case IXP4XX_ETH_NPEB:
- /* On all except IXP43x, NPE-B is used for the MDIO bus.
- * If there is no NPE-B in the feature set, bail out,
- * else we have the MDIO bus here.
- */
- if (!cpu_is_ixp43x()) {
- if (!(ixp4xx_read_feature_bits() &
- IXP4XX_FEATURE_NPEB_ETH0))
- return -ENODEV;
- /* Else register the MDIO bus on NPE-B */
- plat->has_mdio = true;
- }
- break;
- case IXP4XX_ETH_NPEC:
- /* IXP43x lacks NPE-B and uses NPE-C for the MDIO bus
- * access, if there is no NPE-C, no bus, nothing works,
- * so bail out.
- */
- if (cpu_is_ixp43x()) {
- if (!(ixp4xx_read_feature_bits() &
- IXP4XX_FEATURE_NPEC_ETH))
- return -ENODEV;
- /* Else register the MDIO bus on NPE-B */
- plat->has_mdio = true;
- }
- break;
- default:
- return -ENODEV;
- }
- }
+ plat = ixp4xx_of_get_platdata(dev);
+ if (!plat)
+ return -ENODEV;
if (!(ndev = devm_alloc_etherdev(dev, sizeof(struct port))))
return -ENOMEM;
@@ -1530,21 +1497,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
__raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
udelay(50);
- if (np) {
- phydev = of_phy_get_and_connect(ndev, np, ixp4xx_adjust_link);
- } else {
- phydev = mdiobus_get_phy(mdio_bus, plat->phy);
- if (!phydev) {
- err = -ENODEV;
- dev_err(dev, "could not connect phydev (%d)\n", err);
- goto err_free_mem;
- }
- err = phy_connect_direct(ndev, phydev, ixp4xx_adjust_link,
- PHY_INTERFACE_MODE_MII);
- if (err)
- goto err_free_mem;
-
- }
+ phydev = of_phy_get_and_connect(ndev, np, ixp4xx_adjust_link);
if (!phydev) {
err = -ENODEV;
dev_err(dev, "no phydev\n");
diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c
index 39234852e01b..1f382777aa5a 100644
--- a/drivers/net/ethernet/xscale/ptp_ixp46x.c
+++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c
@@ -16,7 +16,6 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/platform_device.h>
#include <linux/soc/ixp4xx/cpu.h>
-#include <mach/ixp4xx-regs.h>
#include "ixp46x_ts.h"
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index ebd287039a54..5805e4a56385 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1514,10 +1514,9 @@ acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
{
struct acpi_device *device;
bool *found = context;
- int result;
- result = acpi_bus_get_device(obj_handle, &device);
- if (result)
+ device = acpi_fetch_acpi_dev(obj_handle);
+ if (!device)
return AE_OK;
if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index c1fdd721a730..7db6c135ac6c 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -56,6 +56,7 @@ struct geneve_config {
bool use_udp6_rx_checksums;
bool ttl_inherit;
enum ifla_geneve_df df;
+ bool inner_proto_inherit;
};
/* Pseudo network device */
@@ -251,17 +252,24 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
}
}
- skb_reset_mac_header(skb);
- skb->protocol = eth_type_trans(skb, geneve->dev);
- skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
-
if (tun_dst)
skb_dst_set(skb, &tun_dst->dst);
- /* Ignore packet loops (and multicast echo) */
- if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
- geneve->dev->stats.rx_errors++;
- goto drop;
+ if (gnvh->proto_type == htons(ETH_P_TEB)) {
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_type_trans(skb, geneve->dev);
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+ /* Ignore packet loops (and multicast echo) */
+ if (ether_addr_equal(eth_hdr(skb)->h_source,
+ geneve->dev->dev_addr)) {
+ geneve->dev->stats.rx_errors++;
+ goto drop;
+ }
+ } else {
+ skb_reset_mac_header(skb);
+ skb->dev = geneve->dev;
+ skb->pkt_type = PACKET_HOST;
}
oiph = skb_network_header(skb);
@@ -345,6 +353,7 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
struct genevehdr *geneveh;
struct geneve_dev *geneve;
struct geneve_sock *gs;
+ __be16 inner_proto;
int opts_len;
/* Need UDP and Geneve header to be present */
@@ -356,7 +365,11 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (unlikely(geneveh->ver != GENEVE_VER))
goto drop;
- if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
+ inner_proto = geneveh->proto_type;
+
+ if (unlikely((inner_proto != htons(ETH_P_TEB) &&
+ inner_proto != htons(ETH_P_IP) &&
+ inner_proto != htons(ETH_P_IPV6))))
goto drop;
gs = rcu_dereference_sk_user_data(sk);
@@ -367,9 +380,14 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (!geneve)
goto drop;
+ if (unlikely((!geneve->cfg.inner_proto_inherit &&
+ inner_proto != htons(ETH_P_TEB)))) {
+ geneve->dev->stats.rx_dropped++;
+ goto drop;
+ }
+
opts_len = geneveh->opt_len * 4;
- if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
- htons(ETH_P_TEB),
+ if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, inner_proto,
!net_eq(geneve->net, dev_net(geneve->dev)))) {
geneve->dev->stats.rx_dropped++;
goto drop;
@@ -717,7 +735,8 @@ static int geneve_stop(struct net_device *dev)
}
static void geneve_build_header(struct genevehdr *geneveh,
- const struct ip_tunnel_info *info)
+ const struct ip_tunnel_info *info,
+ __be16 inner_proto)
{
geneveh->ver = GENEVE_VER;
geneveh->opt_len = info->options_len / 4;
@@ -725,7 +744,7 @@ static void geneve_build_header(struct genevehdr *geneveh,
geneveh->critical = !!(info->key.tun_flags & TUNNEL_CRIT_OPT);
geneveh->rsvd1 = 0;
tunnel_id_to_vni(info->key.tun_id, geneveh->vni);
- geneveh->proto_type = htons(ETH_P_TEB);
+ geneveh->proto_type = inner_proto;
geneveh->rsvd2 = 0;
if (info->key.tun_flags & TUNNEL_GENEVE_OPT)
@@ -734,10 +753,12 @@ static void geneve_build_header(struct genevehdr *geneveh,
static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
const struct ip_tunnel_info *info,
- bool xnet, int ip_hdr_len)
+ bool xnet, int ip_hdr_len,
+ bool inner_proto_inherit)
{
bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
struct genevehdr *gnvh;
+ __be16 inner_proto;
int min_headroom;
int err;
@@ -755,8 +776,9 @@ static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
goto free_dst;
gnvh = __skb_push(skb, sizeof(*gnvh) + info->options_len);
- geneve_build_header(gnvh, info);
- skb_set_inner_protocol(skb, htons(ETH_P_TEB));
+ inner_proto = inner_proto_inherit ? skb->protocol : htons(ETH_P_TEB);
+ geneve_build_header(gnvh, info, inner_proto);
+ skb_set_inner_protocol(skb, inner_proto);
return 0;
free_dst:
@@ -925,7 +947,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
}
skb->protocol = eth_type_trans(skb, geneve->dev);
- netif_rx(skb);
+ __netif_rx(skb);
dst_release(&rt->dst);
return -EMSGSIZE;
}
@@ -959,7 +981,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
}
}
- err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr));
+ err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr),
+ geneve->cfg.inner_proto_inherit);
if (unlikely(err))
return err;
@@ -1021,7 +1044,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
}
skb->protocol = eth_type_trans(skb, geneve->dev);
- netif_rx(skb);
+ __netif_rx(skb);
dst_release(dst);
return -EMSGSIZE;
}
@@ -1038,7 +1061,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
ttl = key->ttl;
ttl = ttl ? : ip6_dst_hoplimit(dst);
}
- err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr));
+ err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr),
+ geneve->cfg.inner_proto_inherit);
if (unlikely(err))
return err;
@@ -1238,6 +1262,7 @@ static void geneve_setup(struct net_device *dev)
}
static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
+ [IFLA_GENEVE_UNSPEC] = { .strict_start_type = IFLA_GENEVE_INNER_PROTO_INHERIT },
[IFLA_GENEVE_ID] = { .type = NLA_U32 },
[IFLA_GENEVE_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) },
[IFLA_GENEVE_REMOTE6] = { .len = sizeof(struct in6_addr) },
@@ -1251,6 +1276,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
[IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 },
[IFLA_GENEVE_DF] = { .type = NLA_U8 },
+ [IFLA_GENEVE_INNER_PROTO_INHERIT] = { .type = NLA_FLAG },
};
static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -1388,6 +1414,14 @@ static int geneve_configure(struct net *net, struct net_device *dev,
dst_cache_reset(&geneve->cfg.info.dst_cache);
memcpy(&geneve->cfg, cfg, sizeof(*cfg));
+ if (geneve->cfg.inner_proto_inherit) {
+ dev->header_ops = NULL;
+ dev->type = ARPHRD_NONE;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->flags = IFF_NOARP;
+ }
+
err = register_netdevice(dev);
if (err)
return err;
@@ -1561,10 +1595,18 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
#endif
}
+ if (data[IFLA_GENEVE_INNER_PROTO_INHERIT]) {
+ if (changelink) {
+ attrtype = IFLA_GENEVE_INNER_PROTO_INHERIT;
+ goto change_notsup;
+ }
+ cfg->inner_proto_inherit = true;
+ }
+
return 0;
change_notsup:
NL_SET_ERR_MSG_ATTR(extack, data[attrtype],
- "Changing VNI, Port, endpoint IP address family, external, and UDP checksum attributes are not supported");
+ "Changing VNI, Port, endpoint IP address family, external, inner_proto_inherit, and UDP checksum attributes are not supported");
return -EOPNOTSUPP;
}
@@ -1740,6 +1782,7 @@ static size_t geneve_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_TX */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL_INHERIT */
+ nla_total_size(0) + /* IFLA_GENEVE_INNER_PROTO_INHERIT */
0;
}
@@ -1799,6 +1842,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u8(skb, IFLA_GENEVE_TTL_INHERIT, ttl_inherit))
goto nla_put_failure;
+ if (geneve->cfg.inner_proto_inherit &&
+ nla_put_flag(skb, IFLA_GENEVE_INNER_PROTO_INHERIT))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 24e5c54d06c1..a208e2b1a9af 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -66,13 +66,23 @@ struct gtp_dev {
struct sock *sk0;
struct sock *sk1u;
+ u8 sk_created;
struct net_device *dev;
+ struct net *net;
unsigned int role;
unsigned int hash_size;
struct hlist_head *tid_hash;
struct hlist_head *addr_hash;
+
+ u8 restart_count;
+};
+
+struct echo_info {
+ struct in_addr ms_addr_ip4;
+ struct in_addr peer_addr_ip4;
+ u8 gtp_version;
};
static unsigned int gtp_net_id __read_mostly;
@@ -83,6 +93,16 @@ struct gtp_net {
static u32 gtp_h_initval;
+static struct genl_family gtp_genl_family;
+
+enum gtp_multicast_groups {
+ GTP_GENL_MCGRP,
+};
+
+static const struct genl_multicast_group gtp_genl_mcgrps[] = {
+ [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
+};
+
static void pdp_context_delete(struct pdp_ctx *pctx);
static inline u32 gtp0_hashfn(u64 tid)
@@ -207,7 +227,7 @@ static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
dev_sw_netstats_rx_add(pctx->dev, skb->len);
- netif_rx(skb);
+ __netif_rx(skb);
return 0;
err:
@@ -215,6 +235,174 @@ err:
return -1;
}
+static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
+ const struct sock *sk,
+ __be32 daddr, __be32 saddr)
+{
+ memset(fl4, 0, sizeof(*fl4));
+ fl4->flowi4_oif = sk->sk_bound_dev_if;
+ fl4->daddr = daddr;
+ fl4->saddr = saddr;
+ fl4->flowi4_tos = RT_CONN_FLAGS(sk);
+ fl4->flowi4_proto = sk->sk_protocol;
+
+ return ip_route_output_key(sock_net(sk), fl4);
+}
+
+/* GSM TS 09.60. 7.3
+ * In all Path Management messages:
+ * - TID: is not used and shall be set to 0.
+ * - Flow Label is not used and shall be set to 0
+ * In signalling messages:
+ * - number: this field is not yet used in signalling messages.
+ * It shall be set to 255 by the sender and shall be ignored
+ * by the receiver
+ * Returns true if the echo req was correct, false otherwise.
+ */
+static bool gtp0_validate_echo_hdr(struct gtp0_header *gtp0)
+{
+ return !(gtp0->tid || (gtp0->flags ^ 0x1e) ||
+ gtp0->number != 0xff || gtp0->flow);
+}
+
+/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
+static void gtp0_build_echo_msg(struct gtp0_header *hdr, __u8 msg_type)
+{
+ int len_pkt, len_hdr;
+
+ hdr->flags = 0x1e; /* v0, GTP-non-prime. */
+ hdr->type = msg_type;
+ /* GSM TS 09.60. 7.3 In all Path Management Flow Label and TID
+ * are not used and shall be set to 0.
+ */
+ hdr->flow = 0;
+ hdr->tid = 0;
+ hdr->number = 0xff;
+ hdr->spare[0] = 0xff;
+ hdr->spare[1] = 0xff;
+ hdr->spare[2] = 0xff;
+
+ len_pkt = sizeof(struct gtp0_packet);
+ len_hdr = sizeof(struct gtp0_header);
+
+ if (msg_type == GTP_ECHO_RSP)
+ hdr->length = htons(len_pkt - len_hdr);
+ else
+ hdr->length = 0;
+}
+
+static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+ struct gtp0_packet *gtp_pkt;
+ struct gtp0_header *gtp0;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ struct iphdr *iph;
+ __be16 seq;
+
+ gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
+
+ if (!gtp0_validate_echo_hdr(gtp0))
+ return -1;
+
+ seq = gtp0->seq;
+
+ /* pull GTP and UDP headers */
+ skb_pull_data(skb, sizeof(struct gtp0_header) + sizeof(struct udphdr));
+
+ gtp_pkt = skb_push(skb, sizeof(struct gtp0_packet));
+ memset(gtp_pkt, 0, sizeof(struct gtp0_packet));
+
+ gtp0_build_echo_msg(&gtp_pkt->gtp0_h, GTP_ECHO_RSP);
+
+ /* GSM TS 09.60. 7.3 The Sequence Number in a signalling response
+ * message shall be copied from the signalling request message
+ * that the GSN is replying to.
+ */
+ gtp_pkt->gtp0_h.seq = seq;
+
+ gtp_pkt->ie.tag = GTPIE_RECOVERY;
+ gtp_pkt->ie.val = gtp->restart_count;
+
+ iph = ip_hdr(skb);
+
+ /* find route to the sender,
+ * src address becomes dst address and vice versa.
+ */
+ rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr);
+ if (IS_ERR(rt)) {
+ netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
+ &iph->saddr);
+ return -1;
+ }
+
+ udp_tunnel_xmit_skb(rt, gtp->sk0, skb,
+ fl4.saddr, fl4.daddr,
+ iph->tos,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ htons(GTP0_PORT), htons(GTP0_PORT),
+ !net_eq(sock_net(gtp->sk1u),
+ dev_net(gtp->dev)),
+ false);
+ return 0;
+}
+
+static int gtp_genl_fill_echo(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
+ int flags, u32 type, struct echo_info echo)
+{
+ void *genlh;
+
+ genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags,
+ type);
+ if (!genlh)
+ goto failure;
+
+ if (nla_put_u32(skb, GTPA_VERSION, echo.gtp_version) ||
+ nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer_addr_ip4.s_addr) ||
+ nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms_addr_ip4.s_addr))
+ goto failure;
+
+ genlmsg_end(skb, genlh);
+ return 0;
+
+failure:
+ genlmsg_cancel(skb, genlh);
+ return -EMSGSIZE;
+}
+
+static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+ struct gtp0_header *gtp0;
+ struct echo_info echo;
+ struct sk_buff *msg;
+ struct iphdr *iph;
+ int ret;
+
+ gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
+
+ if (!gtp0_validate_echo_hdr(gtp0))
+ return -1;
+
+ iph = ip_hdr(skb);
+ echo.ms_addr_ip4.s_addr = iph->daddr;
+ echo.peer_addr_ip4.s_addr = iph->saddr;
+ echo.gtp_version = GTP_V0;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ return genlmsg_multicast_netns(&gtp_genl_family, dev_net(gtp->dev),
+ msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
+}
+
/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
@@ -231,6 +419,16 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
if ((gtp0->flags >> 5) != GTP_V0)
return 1;
+ /* If the sockets were created in kernel, it means that
+ * there is no daemon running in userspace which would
+ * handle echo request.
+ */
+ if (gtp0->type == GTP_ECHO_REQ && gtp->sk_created)
+ return gtp0_send_echo_resp(gtp, skb);
+
+ if (gtp0->type == GTP_ECHO_RSP && gtp->sk_created)
+ return gtp0_handle_echo_resp(gtp, skb);
+
if (gtp0->type != GTP_TPDU)
return 1;
@@ -243,6 +441,131 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
return gtp_rx(pctx, skb, hdrlen, gtp->role);
}
+/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
+static void gtp1u_build_echo_msg(struct gtp1_header_long *hdr, __u8 msg_type)
+{
+ int len_pkt, len_hdr;
+
+ /* S flag must be set to 1 */
+ hdr->flags = 0x32; /* v1, GTP-non-prime. */
+ hdr->type = msg_type;
+ /* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */
+ hdr->tid = 0;
+
+ /* seq, npdu and next should be counted to the length of the GTP packet
+ * that's why szie of gtp1_header should be subtracted,
+ * not size of gtp1_header_long.
+ */
+
+ len_hdr = sizeof(struct gtp1_header);
+
+ if (msg_type == GTP_ECHO_RSP) {
+ len_pkt = sizeof(struct gtp1u_packet);
+ hdr->length = htons(len_pkt - len_hdr);
+ } else {
+ /* GTP_ECHO_REQ does not carry GTP Information Element,
+ * the why gtp1_header_long is used here.
+ */
+ len_pkt = sizeof(struct gtp1_header_long);
+ hdr->length = htons(len_pkt - len_hdr);
+ }
+}
+
+static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+ struct gtp1_header_long *gtp1u;
+ struct gtp1u_packet *gtp_pkt;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ struct iphdr *iph;
+
+ gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
+
+ /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
+ * Error Indication and Supported Extension Headers Notification
+ * messages, the S flag shall be set to 1 and TEID shall be set to 0.
+ */
+ if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
+ return -1;
+
+ /* pull GTP and UDP headers */
+ skb_pull_data(skb,
+ sizeof(struct gtp1_header_long) + sizeof(struct udphdr));
+
+ gtp_pkt = skb_push(skb, sizeof(struct gtp1u_packet));
+ memset(gtp_pkt, 0, sizeof(struct gtp1u_packet));
+
+ gtp1u_build_echo_msg(&gtp_pkt->gtp1u_h, GTP_ECHO_RSP);
+
+ /* 3GPP TS 29.281 7.7.2 - The Restart Counter value in the
+ * Recovery information element shall not be used, i.e. it shall
+ * be set to zero by the sender and shall be ignored by the receiver.
+ * The Recovery information element is mandatory due to backwards
+ * compatibility reasons.
+ */
+ gtp_pkt->ie.tag = GTPIE_RECOVERY;
+ gtp_pkt->ie.val = 0;
+
+ iph = ip_hdr(skb);
+
+ /* find route to the sender,
+ * src address becomes dst address and vice versa.
+ */
+ rt = ip4_route_output_gtp(&fl4, gtp->sk1u, iph->saddr, iph->daddr);
+ if (IS_ERR(rt)) {
+ netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
+ &iph->saddr);
+ return -1;
+ }
+
+ udp_tunnel_xmit_skb(rt, gtp->sk1u, skb,
+ fl4.saddr, fl4.daddr,
+ iph->tos,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ htons(GTP1U_PORT), htons(GTP1U_PORT),
+ !net_eq(sock_net(gtp->sk1u),
+ dev_net(gtp->dev)),
+ false);
+ return 0;
+}
+
+static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+ struct gtp1_header_long *gtp1u;
+ struct echo_info echo;
+ struct sk_buff *msg;
+ struct iphdr *iph;
+ int ret;
+
+ gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
+
+ /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
+ * Error Indication and Supported Extension Headers Notification
+ * messages, the S flag shall be set to 1 and TEID shall be set to 0.
+ */
+ if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
+ return -1;
+
+ iph = ip_hdr(skb);
+ echo.ms_addr_ip4.s_addr = iph->daddr;
+ echo.peer_addr_ip4.s_addr = iph->saddr;
+ echo.gtp_version = GTP_V1;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ return genlmsg_multicast_netns(&gtp_genl_family, dev_net(gtp->dev),
+ msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
+}
+
static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
unsigned int hdrlen = sizeof(struct udphdr) +
@@ -258,6 +581,16 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
if ((gtp1->flags >> 5) != GTP_V1)
return 1;
+ /* If the sockets were created in kernel, it means that
+ * there is no daemon running in userspace which would
+ * handle echo request.
+ */
+ if (gtp1->type == GTP_ECHO_REQ && gtp->sk_created)
+ return gtp1u_send_echo_resp(gtp, skb);
+
+ if (gtp1->type == GTP_ECHO_RSP && gtp->sk_created)
+ return gtp1u_handle_echo_resp(gtp, skb);
+
if (gtp1->type != GTP_TPDU)
return 1;
@@ -320,8 +653,16 @@ static void gtp_encap_disable_sock(struct sock *sk)
static void gtp_encap_disable(struct gtp_dev *gtp)
{
- gtp_encap_disable_sock(gtp->sk0);
- gtp_encap_disable_sock(gtp->sk1u);
+ if (gtp->sk_created) {
+ udp_tunnel_sock_release(gtp->sk0->sk_socket);
+ udp_tunnel_sock_release(gtp->sk1u->sk_socket);
+ gtp->sk_created = false;
+ gtp->sk0 = NULL;
+ gtp->sk1u = NULL;
+ } else {
+ gtp_encap_disable_sock(gtp->sk0);
+ gtp_encap_disable_sock(gtp->sk1u);
+ }
}
/* UDP encapsulation receive handler. See net/ipv4/udp.c.
@@ -388,20 +729,6 @@ static void gtp_dev_uninit(struct net_device *dev)
free_percpu(dev->tstats);
}
-static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
- const struct sock *sk,
- __be32 daddr)
-{
- memset(fl4, 0, sizeof(*fl4));
- fl4->flowi4_oif = sk->sk_bound_dev_if;
- fl4->daddr = daddr;
- fl4->saddr = inet_sk(sk)->inet_saddr;
- fl4->flowi4_tos = RT_CONN_FLAGS(sk);
- fl4->flowi4_proto = sk->sk_protocol;
-
- return ip_route_output_key(sock_net(sk), fl4);
-}
-
static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
{
int payload_len = skb->len;
@@ -507,7 +834,8 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
}
netdev_dbg(dev, "found PDP context %p\n", pctx);
- rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr);
+ rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr,
+ inet_sk(pctx->sk)->inet_saddr);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to SSGN %pI4\n",
&pctx->peer_addr_ip4.s_addr);
@@ -656,17 +984,69 @@ static void gtp_destructor(struct net_device *dev)
kfree(gtp->tid_hash);
}
+static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp)
+{
+ struct udp_tunnel_sock_cfg tuncfg = {};
+ struct udp_port_cfg udp_conf = {
+ .local_ip.s_addr = htonl(INADDR_ANY),
+ .family = AF_INET,
+ };
+ struct net *net = gtp->net;
+ struct socket *sock;
+ int err;
+
+ if (type == UDP_ENCAP_GTP0)
+ udp_conf.local_udp_port = htons(GTP0_PORT);
+ else if (type == UDP_ENCAP_GTP1U)
+ udp_conf.local_udp_port = htons(GTP1U_PORT);
+ else
+ return ERR_PTR(-EINVAL);
+
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err)
+ return ERR_PTR(err);
+
+ tuncfg.sk_user_data = gtp;
+ tuncfg.encap_type = type;
+ tuncfg.encap_rcv = gtp_encap_recv;
+ tuncfg.encap_destroy = NULL;
+
+ setup_udp_tunnel_sock(net, sock, &tuncfg);
+
+ return sock->sk;
+}
+
+static int gtp_create_sockets(struct gtp_dev *gtp, struct nlattr *data[])
+{
+ struct sock *sk1u = NULL;
+ struct sock *sk0 = NULL;
+
+ sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp);
+ if (IS_ERR(sk0))
+ return PTR_ERR(sk0);
+
+ sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp);
+ if (IS_ERR(sk1u)) {
+ udp_tunnel_sock_release(sk0->sk_socket);
+ return PTR_ERR(sk1u);
+ }
+
+ gtp->sk_created = true;
+ gtp->sk0 = sk0;
+ gtp->sk1u = sk1u;
+
+ return 0;
+}
+
static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
+ unsigned int role = GTP_ROLE_GGSN;
struct gtp_dev *gtp;
struct gtp_net *gn;
int hashsize, err;
- if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
- return -EINVAL;
-
gtp = netdev_priv(dev);
if (!data[IFLA_GTP_PDP_HASHSIZE]) {
@@ -677,11 +1057,28 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
hashsize = 1024;
}
+ if (data[IFLA_GTP_ROLE]) {
+ role = nla_get_u32(data[IFLA_GTP_ROLE]);
+ if (role > GTP_ROLE_SGSN)
+ return -EINVAL;
+ }
+ gtp->role = role;
+
+ if (!data[IFLA_GTP_RESTART_COUNT])
+ gtp->restart_count = 0;
+ else
+ gtp->restart_count = nla_get_u8(data[IFLA_GTP_RESTART_COUNT]);
+
+ gtp->net = src_net;
+
err = gtp_hashtable_new(gtp, hashsize);
if (err < 0)
return err;
- err = gtp_encap_enable(gtp, data);
+ if (data[IFLA_GTP_CREATE_SOCKETS])
+ err = gtp_create_sockets(gtp, data);
+ else
+ err = gtp_encap_enable(gtp, data);
if (err < 0)
goto out_hashtable;
@@ -726,6 +1123,8 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
[IFLA_GTP_FD1] = { .type = NLA_U32 },
[IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
[IFLA_GTP_ROLE] = { .type = NLA_U32 },
+ [IFLA_GTP_CREATE_SOCKETS] = { .type = NLA_U8 },
+ [IFLA_GTP_RESTART_COUNT] = { .type = NLA_U8 },
};
static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -740,7 +1139,8 @@ static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
static size_t gtp_get_size(const struct net_device *dev)
{
return nla_total_size(sizeof(__u32)) + /* IFLA_GTP_PDP_HASHSIZE */
- nla_total_size(sizeof(__u32)); /* IFLA_GTP_ROLE */
+ nla_total_size(sizeof(__u32)) + /* IFLA_GTP_ROLE */
+ nla_total_size(sizeof(__u8)); /* IFLA_GTP_RESTART_COUNT */
}
static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
@@ -751,6 +1151,8 @@ static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_GTP_ROLE, gtp->role))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_GTP_RESTART_COUNT, gtp->restart_count))
+ goto nla_put_failure;
return 0;
@@ -848,7 +1250,9 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
{
struct sock *sk1u = NULL;
struct sock *sk0 = NULL;
- unsigned int role = GTP_ROLE_GGSN;
+
+ if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
+ return -EINVAL;
if (data[IFLA_GTP_FD0]) {
u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
@@ -868,18 +1272,8 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
}
}
- if (data[IFLA_GTP_ROLE]) {
- role = nla_get_u32(data[IFLA_GTP_ROLE]);
- if (role > GTP_ROLE_SGSN) {
- gtp_encap_disable_sock(sk0);
- gtp_encap_disable_sock(sk1u);
- return -EINVAL;
- }
- }
-
gtp->sk0 = sk0;
gtp->sk1u = sk1u;
- gtp->role = role;
return 0;
}
@@ -1183,16 +1577,6 @@ out_unlock:
return err;
}
-static struct genl_family gtp_genl_family;
-
-enum gtp_multicast_groups {
- GTP_GENL_MCGRP,
-};
-
-static const struct genl_multicast_group gtp_genl_mcgrps[] = {
- [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
-};
-
static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
int flags, u32 type, struct pdp_ctx *pctx)
{
@@ -1336,6 +1720,95 @@ out:
return skb->len;
}
+static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *skb_to_send;
+ __be32 src_ip, dst_ip;
+ unsigned int version;
+ struct gtp_dev *gtp;
+ struct flowi4 fl4;
+ struct rtable *rt;
+ struct sock *sk;
+ __be16 port;
+ int len;
+
+ if (!info->attrs[GTPA_VERSION] ||
+ !info->attrs[GTPA_LINK] ||
+ !info->attrs[GTPA_PEER_ADDRESS] ||
+ !info->attrs[GTPA_MS_ADDRESS])
+ return -EINVAL;
+
+ version = nla_get_u32(info->attrs[GTPA_VERSION]);
+ dst_ip = nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
+ src_ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+
+ gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
+ if (!gtp)
+ return -ENODEV;
+
+ if (!gtp->sk_created)
+ return -EOPNOTSUPP;
+ if (!(gtp->dev->flags & IFF_UP))
+ return -ENETDOWN;
+
+ if (version == GTP_V0) {
+ struct gtp0_header *gtp0_h;
+
+ len = LL_RESERVED_SPACE(gtp->dev) + sizeof(struct gtp0_header) +
+ sizeof(struct iphdr) + sizeof(struct udphdr);
+
+ skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
+ if (!skb_to_send)
+ return -ENOMEM;
+
+ sk = gtp->sk0;
+ port = htons(GTP0_PORT);
+
+ gtp0_h = skb_push(skb_to_send, sizeof(struct gtp0_header));
+ memset(gtp0_h, 0, sizeof(struct gtp0_header));
+ gtp0_build_echo_msg(gtp0_h, GTP_ECHO_REQ);
+ } else if (version == GTP_V1) {
+ struct gtp1_header_long *gtp1u_h;
+
+ len = LL_RESERVED_SPACE(gtp->dev) +
+ sizeof(struct gtp1_header_long) +
+ sizeof(struct iphdr) + sizeof(struct udphdr);
+
+ skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
+ if (!skb_to_send)
+ return -ENOMEM;
+
+ sk = gtp->sk1u;
+ port = htons(GTP1U_PORT);
+
+ gtp1u_h = skb_push(skb_to_send,
+ sizeof(struct gtp1_header_long));
+ memset(gtp1u_h, 0, sizeof(struct gtp1_header_long));
+ gtp1u_build_echo_msg(gtp1u_h, GTP_ECHO_REQ);
+ } else {
+ return -ENODEV;
+ }
+
+ rt = ip4_route_output_gtp(&fl4, sk, dst_ip, src_ip);
+ if (IS_ERR(rt)) {
+ netdev_dbg(gtp->dev, "no route for echo request to %pI4\n",
+ &dst_ip);
+ kfree_skb(skb_to_send);
+ return -ENODEV;
+ }
+
+ udp_tunnel_xmit_skb(rt, sk, skb_to_send,
+ fl4.saddr, fl4.daddr,
+ fl4.flowi4_tos,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ port, port,
+ !net_eq(sock_net(sk),
+ dev_net(gtp->dev)),
+ false);
+ return 0;
+}
+
static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
[GTPA_LINK] = { .type = NLA_U32, },
[GTPA_VERSION] = { .type = NLA_U32, },
@@ -1368,6 +1841,12 @@ static const struct genl_small_ops gtp_genl_ops[] = {
.dumpit = gtp_genl_dump_pdp,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = GTP_CMD_ECHOREQ,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = gtp_genl_send_echo_req,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_family gtp_genl_family __ro_after_init = {
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index a03d0b474641..3e69079ed694 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -982,10 +982,10 @@ static int baycom_setmode(struct baycom_state *bc, const char *modestr)
bc->cfg.extmodem = 0;
if (strstr(modestr,"extmodem"))
bc->cfg.extmodem = 1;
- if (strstr(modestr,"noloopback"))
- bc->cfg.loopback = 0;
if (strstr(modestr,"loopback"))
bc->cfg.loopback = 1;
+ if (strstr(modestr, "noloopback"))
+ bc->cfg.loopback = 0;
if ((cp = strstr(modestr,"fclk="))) {
bc->cfg.fclk = simple_strtoul(cp+5, NULL, 0);
if (bc->cfg.fclk < 1000000)
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 7e527499d3ad..a2a12208e3ad 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -28,6 +28,7 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/uaccess.h>
+#include <linux/jiffies.h>
#include <net/ax25.h>
#include "z8530.h"
@@ -377,7 +378,7 @@ static int __init dmascc_init(void)
udelay(2000000 / TMR_0_HZ);
/* Timing loop */
- while (jiffies - time < 13) {
+ while (time_is_after_jiffies(time + 13)) {
for (i = 0; i < hw[h].num_devs; i++)
if (base[i] && counting[i]) {
/* Read back Timer 1: latch; read LSB; read MSB */
@@ -525,7 +526,7 @@ static int __init setup_adapter(int card_base, int type, int n)
/* Wait and detect IRQ */
time = jiffies;
- while (jiffies - time < 2 + HZ / TMR_0_HZ);
+ while (time_is_after_jiffies(time + 2 + HZ / TMR_0_HZ));
irq = probe_irq_off(irqs);
/* Clear pending interrupt, disable interrupts */
@@ -1353,7 +1354,7 @@ static void es_isr(struct scc_priv *priv)
/* Switch state */
write_scc(priv, R15, 0);
if (priv->tx_count &&
- (jiffies - priv->tx_start) < priv->param.txtimeout) {
+ time_is_after_jiffies(priv->tx_start + priv->param.txtimeout)) {
priv->state = TX_PAUSE;
start_timer(priv, priv->param.txpause, 0);
} else {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index afa81a9480cc..9442f751ad3a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -154,19 +154,15 @@ static void free_netvsc_device(struct rcu_head *head)
kfree(nvdev->extension);
- if (nvdev->recv_original_buf) {
- hv_unmap_memory(nvdev->recv_buf);
+ if (nvdev->recv_original_buf)
vfree(nvdev->recv_original_buf);
- } else {
+ else
vfree(nvdev->recv_buf);
- }
- if (nvdev->send_original_buf) {
- hv_unmap_memory(nvdev->send_buf);
+ if (nvdev->send_original_buf)
vfree(nvdev->send_original_buf);
- } else {
+ else
vfree(nvdev->send_buf);
- }
bitmap_free(nvdev->send_section_map);
@@ -765,6 +761,12 @@ void netvsc_device_remove(struct hv_device *device)
netvsc_teardown_send_gpadl(device, net_device, ndev);
}
+ if (net_device->recv_original_buf)
+ hv_unmap_memory(net_device->recv_buf);
+
+ if (net_device->send_original_buf)
+ hv_unmap_memory(net_device->send_buf);
+
/* Release all resources */
free_netvsc_device_rcu(net_device);
}
@@ -1628,7 +1630,6 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
case VM_PKT_DATA_USING_XFER_PAGES:
return netvsc_receive(ndev, net_device, nvchan, desc);
- break;
case VM_PKT_DATA_INBAND:
netvsc_receive_inband(ndev, net_device, desc);
@@ -1821,6 +1822,12 @@ cleanup:
netif_napi_del(&net_device->chan_table[0].napi);
cleanup2:
+ if (net_device->recv_original_buf)
+ hv_unmap_memory(net_device->recv_buf);
+
+ if (net_device->send_original_buf)
+ hv_unmap_memory(net_device->send_buf);
+
free_netvsc_device(&net_device->rcu);
return ERR_PTR(ret);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 3646469433b1..fde1c492ca02 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1587,6 +1587,9 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
pcpu_sum = kvmalloc_array(num_possible_cpus(),
sizeof(struct netvsc_ethtool_pcpu_stats),
GFP_KERNEL);
+ if (!pcpu_sum)
+ return;
+
netvsc_get_pcpu_stats(dev, pcpu_sum);
for_each_present_cpu(cpu) {
struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 7db9cbd0f5de..6afdf1622944 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1304,7 +1304,7 @@ err_alloc_wq:
return ret;
}
-static int adf7242_remove(struct spi_device *spi)
+static void adf7242_remove(struct spi_device *spi)
{
struct adf7242_local *lp = spi_get_drvdata(spi);
@@ -1316,8 +1316,6 @@ static int adf7242_remove(struct spi_device *spi)
ieee802154_unregister_hw(lp->hw);
mutex_destroy(&lp->bmux);
ieee802154_free_hw(lp->hw);
-
- return 0;
}
static const struct of_device_id adf7242_of_match[] = {
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 4f5ef8a9a9a8..549d04b5f3d4 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -1768,7 +1768,7 @@ free_dev:
return rc;
}
-static int at86rf230_remove(struct spi_device *spi)
+static void at86rf230_remove(struct spi_device *spi)
{
struct at86rf230_local *lp = spi_get_drvdata(spi);
@@ -1778,8 +1778,6 @@ static int at86rf230_remove(struct spi_device *spi)
ieee802154_free_hw(lp->hw);
at86rf230_debugfs_remove();
dev_dbg(&spi->dev, "unregistered at86rf230\n");
-
- return 0;
}
static const struct of_device_id at86rf230_of_match[] = {
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 2f5e7b31032a..07bafbf94680 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -74,81 +74,6 @@ struct atusb_chip_data {
int (*set_txpower)(struct ieee802154_hw*, s32);
};
-/* ----- USB commands without data ----------------------------------------- */
-
-/* To reduce the number of error checks in the code, we record the first error
- * in atusb->err and reject all subsequent requests until the error is cleared.
- */
-
-static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
- __u8 request, __u8 requesttype,
- __u16 value, __u16 index,
- void *data, __u16 size, int timeout)
-{
- struct usb_device *usb_dev = atusb->usb_dev;
- int ret;
-
- if (atusb->err)
- return atusb->err;
-
- ret = usb_control_msg(usb_dev, pipe, request, requesttype,
- value, index, data, size, timeout);
- if (ret < size) {
- ret = ret < 0 ? ret : -ENODATA;
-
- atusb->err = ret;
- dev_err(&usb_dev->dev,
- "%s: req 0x%02x val 0x%x idx 0x%x, error %d\n",
- __func__, request, value, index, ret);
- }
- return ret;
-}
-
-static int atusb_command(struct atusb *atusb, u8 cmd, u8 arg)
-{
- struct usb_device *usb_dev = atusb->usb_dev;
-
- dev_dbg(&usb_dev->dev, "%s: cmd = 0x%x\n", __func__, cmd);
- return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
- cmd, ATUSB_REQ_TO_DEV, arg, 0, NULL, 0, 1000);
-}
-
-static int atusb_write_reg(struct atusb *atusb, u8 reg, u8 value)
-{
- struct usb_device *usb_dev = atusb->usb_dev;
-
- dev_dbg(&usb_dev->dev, "%s: 0x%02x <- 0x%02x\n", __func__, reg, value);
- return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
- ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
- value, reg, NULL, 0, 1000);
-}
-
-static int atusb_read_reg(struct atusb *atusb, u8 reg)
-{
- struct usb_device *usb_dev = atusb->usb_dev;
- int ret;
- u8 *buffer;
- u8 value;
-
- buffer = kmalloc(1, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- dev_dbg(&usb_dev->dev, "%s: reg = 0x%x\n", __func__, reg);
- ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
- ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
- 0, reg, buffer, 1, 1000);
-
- if (ret >= 0) {
- value = buffer[0];
- kfree(buffer);
- return value;
- } else {
- kfree(buffer);
- return ret;
- }
-}
-
static int atusb_write_subreg(struct atusb *atusb, u8 reg, u8 mask,
u8 shift, u8 value)
{
@@ -158,7 +83,10 @@ static int atusb_write_subreg(struct atusb *atusb, u8 reg, u8 mask,
dev_dbg(&usb_dev->dev, "%s: 0x%02x <- 0x%02x\n", __func__, reg, value);
- orig = atusb_read_reg(atusb, reg);
+ ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, reg, &orig, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
/* Write the value only into that part of the register which is allowed
* by the mask. All other bits stay as before.
@@ -167,7 +95,8 @@ static int atusb_write_subreg(struct atusb *atusb, u8 reg, u8 mask,
tmp |= (value << shift) & mask;
if (tmp != orig)
- ret = atusb_write_reg(atusb, reg, tmp);
+ ret = usb_control_msg_send(usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ tmp, reg, NULL, 0, 1000, GFP_KERNEL);
return ret;
}
@@ -176,12 +105,16 @@ static int atusb_read_subreg(struct atusb *lp,
unsigned int addr, unsigned int mask,
unsigned int shift)
{
- int rc;
+ int reg, ret;
+
+ ret = usb_control_msg_recv(lp->usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, addr, &reg, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
- rc = atusb_read_reg(lp, addr);
- rc = (rc & mask) >> shift;
+ reg = (reg & mask) >> shift;
- return rc;
+ return reg;
}
static int atusb_get_and_clear_error(struct atusb *atusb)
@@ -419,16 +352,22 @@ static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw,
u16 addr = le16_to_cpu(filt->short_addr);
dev_vdbg(dev, "%s called for saddr\n", __func__);
- atusb_write_reg(atusb, RG_SHORT_ADDR_0, addr);
- atusb_write_reg(atusb, RG_SHORT_ADDR_1, addr >> 8);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ addr, RG_SHORT_ADDR_0, NULL, 0, 1000, GFP_KERNEL);
+
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ addr >> 8, RG_SHORT_ADDR_1, NULL, 0, 1000, GFP_KERNEL);
}
if (changed & IEEE802154_AFILT_PANID_CHANGED) {
u16 pan = le16_to_cpu(filt->pan_id);
dev_vdbg(dev, "%s called for pan id\n", __func__);
- atusb_write_reg(atusb, RG_PAN_ID_0, pan);
- atusb_write_reg(atusb, RG_PAN_ID_1, pan >> 8);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ pan, RG_PAN_ID_0, NULL, 0, 1000, GFP_KERNEL);
+
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ pan >> 8, RG_PAN_ID_1, NULL, 0, 1000, GFP_KERNEL);
}
if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
@@ -437,7 +376,9 @@ static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw,
memcpy(addr, &filt->ieee_addr, IEEE802154_EXTENDED_ADDR_LEN);
dev_vdbg(dev, "%s called for IEEE addr\n", __func__);
for (i = 0; i < 8; i++)
- atusb_write_reg(atusb, RG_IEEE_ADDR_0 + i, addr[i]);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ addr[i], RG_IEEE_ADDR_0 + i, NULL, 0,
+ 1000, GFP_KERNEL);
}
if (changed & IEEE802154_AFILT_PANC_CHANGED) {
@@ -459,7 +400,8 @@ static int atusb_start(struct ieee802154_hw *hw)
dev_dbg(&usb_dev->dev, "%s\n", __func__);
schedule_delayed_work(&atusb->work, 0);
- atusb_command(atusb, ATUSB_RX_MODE, 1);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_RX_MODE, ATUSB_REQ_TO_DEV, 1, 0,
+ NULL, 0, 1000, GFP_KERNEL);
ret = atusb_get_and_clear_error(atusb);
if (ret < 0)
usb_kill_anchored_urbs(&atusb->idle_urbs);
@@ -473,7 +415,8 @@ static void atusb_stop(struct ieee802154_hw *hw)
dev_dbg(&usb_dev->dev, "%s\n", __func__);
usb_kill_anchored_urbs(&atusb->idle_urbs);
- atusb_command(atusb, ATUSB_RX_MODE, 0);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_RX_MODE, ATUSB_REQ_TO_DEV, 0, 0,
+ NULL, 0, 1000, GFP_KERNEL);
atusb_get_and_clear_error(atusb);
}
@@ -580,9 +523,11 @@ atusb_set_cca_mode(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca)
static int hulusb_set_cca_ed_level(struct atusb *lp, int rssi_base_val)
{
- unsigned int cca_ed_thres;
+ int cca_ed_thres;
cca_ed_thres = atusb_read_subreg(lp, SR_CCA_ED_THRES);
+ if (cca_ed_thres < 0)
+ return cca_ed_thres;
switch (rssi_base_val) {
case -98:
@@ -799,18 +744,13 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
char *hw_name;
- unsigned char *buffer;
+ unsigned char buffer[3];
int ret;
- buffer = kmalloc(3, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
/* Get a couple of the ATMega Firmware values */
- ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
- ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
- buffer, 3, 1000);
- if (ret >= 0) {
+ ret = usb_control_msg_recv(atusb->usb_dev, 0, ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
+ buffer, 3, 1000, GFP_KERNEL);
+ if (!ret) {
atusb->fw_ver_maj = buffer[0];
atusb->fw_ver_min = buffer[1];
atusb->fw_hw_type = buffer[2];
@@ -849,7 +789,6 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
}
- kfree(buffer);
return ret;
}
@@ -863,7 +802,6 @@ static int atusb_get_and_show_build(struct atusb *atusb)
if (!build)
return -ENOMEM;
- /* We cannot call atusb_control_msg() here, since this request may read various length data */
ret = usb_control_msg(atusb->usb_dev, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD,
ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000);
if (ret >= 0) {
@@ -881,14 +819,27 @@ static int atusb_get_and_conf_chip(struct atusb *atusb)
u8 man_id_0, man_id_1, part_num, version_num;
const char *chip;
struct ieee802154_hw *hw = atusb->hw;
+ int ret;
- man_id_0 = atusb_read_reg(atusb, RG_MAN_ID_0);
- man_id_1 = atusb_read_reg(atusb, RG_MAN_ID_1);
- part_num = atusb_read_reg(atusb, RG_PART_NUM);
- version_num = atusb_read_reg(atusb, RG_VERSION_NUM);
+ ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, RG_MAN_ID_0, &man_id_0, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
- if (atusb->err)
- return atusb->err;
+ ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, RG_MAN_ID_1, &man_id_1, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, RG_PART_NUM, &part_num, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, RG_VERSION_NUM, &version_num, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
@@ -969,7 +920,7 @@ fail:
static int atusb_set_extended_addr(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
- unsigned char *buffer;
+ unsigned char buffer[IEEE802154_EXTENDED_ADDR_LEN];
__le64 extended_addr;
u64 addr;
int ret;
@@ -982,18 +933,12 @@ static int atusb_set_extended_addr(struct atusb *atusb)
return 0;
}
- buffer = kmalloc(IEEE802154_EXTENDED_ADDR_LEN, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
/* Firmware is new enough so we fetch the address from EEPROM */
- ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
- ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0,
- buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000);
+ ret = usb_control_msg_recv(atusb->usb_dev, 0, ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0,
+ buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000, GFP_KERNEL);
if (ret < 0) {
dev_err(&usb_dev->dev, "failed to fetch extended address, random address set\n");
ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr);
- kfree(buffer);
return ret;
}
@@ -1009,7 +954,6 @@ static int atusb_set_extended_addr(struct atusb *atusb)
&addr);
}
- kfree(buffer);
return ret;
}
@@ -1051,7 +995,8 @@ static int atusb_probe(struct usb_interface *interface,
hw->parent = &usb_dev->dev;
- atusb_command(atusb, ATUSB_RF_RESET, 0);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_RF_RESET, ATUSB_REQ_TO_DEV, 0, 0,
+ NULL, 0, 1000, GFP_KERNEL);
atusb_get_and_conf_chip(atusb);
atusb_get_and_show_revision(atusb);
atusb_get_and_show_build(atusb);
@@ -1076,7 +1021,9 @@ static int atusb_probe(struct usb_interface *interface,
* explicitly. Any resets after that will send us straight to TRX_OFF,
* making the command below redundant.
*/
- atusb_write_reg(atusb, RG_TRX_STATE, STATE_FORCE_TRX_OFF);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ STATE_FORCE_TRX_OFF, RG_TRX_STATE, NULL, 0, 1000, GFP_KERNEL);
+
msleep(1); /* reset => TRX_OFF, tTR13 = 37 us */
#if 0
@@ -1104,7 +1051,8 @@ static int atusb_probe(struct usb_interface *interface,
atusb_write_subreg(atusb, SR_RX_SAFE_MODE, 1);
#endif
- atusb_write_reg(atusb, RG_IRQ_MASK, 0xff);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ 0xff, RG_IRQ_MASK, NULL, 0, 1000, GFP_KERNEL);
ret = atusb_get_and_clear_error(atusb);
if (!ret)
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 2bc730fd260e..187cbc634ce8 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -831,7 +831,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl)
finish:;
}
-static int ca8210_remove(struct spi_device *spi_device);
+static void ca8210_remove(struct spi_device *spi_device);
/**
* ca8210_spi_transfer_complete() - Called when a single spi transfer has
@@ -3049,7 +3049,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv)
*
* Return: 0 or linux error code
*/
-static int ca8210_remove(struct spi_device *spi_device)
+static void ca8210_remove(struct spi_device *spi_device)
{
struct ca8210_priv *priv;
struct ca8210_platform_data *pdata;
@@ -3089,8 +3089,6 @@ static int ca8210_remove(struct spi_device *spi_device)
if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS))
ca8210_test_interface_clear(priv);
}
-
- return 0;
}
/**
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index 89c046b204e0..1e1f40f628a0 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -1213,7 +1213,7 @@ err_hw_init:
return ret;
}
-static int cc2520_remove(struct spi_device *spi)
+static void cc2520_remove(struct spi_device *spi)
{
struct cc2520_private *priv = spi_get_drvdata(spi);
@@ -1222,8 +1222,6 @@ static int cc2520_remove(struct spi_device *spi)
ieee802154_unregister_hw(priv->hw);
ieee802154_free_hw(priv->hw);
-
- return 0;
}
static const struct spi_device_id cc2520_ids[] = {
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 36f1c5aa98fc..38c217bd7c82 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -791,7 +791,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
phy->idx = idx;
INIT_LIST_HEAD(&phy->edges);
- hw->flags = IEEE802154_HW_PROMISCUOUS;
+ hw->flags = IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_RX_DROP_BAD_CKSUM;
hw->parent = dev;
err = ieee802154_register_hw(hw);
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index 383231b85464..c927a5ae0d05 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -1335,7 +1335,7 @@ free_dev:
return ret;
}
-static int mcr20a_remove(struct spi_device *spi)
+static void mcr20a_remove(struct spi_device *spi)
{
struct mcr20a_local *lp = spi_get_drvdata(spi);
@@ -1343,8 +1343,6 @@ static int mcr20a_remove(struct spi_device *spi)
ieee802154_unregister_hw(lp->hw);
ieee802154_free_hw(lp->hw);
-
- return 0;
}
static const struct of_device_id mcr20a_of_match[] = {
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index ff83e00b77af..ee4cfbf2c5cc 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -1356,7 +1356,7 @@ err_ret:
return ret;
}
-static int mrf24j40_remove(struct spi_device *spi)
+static void mrf24j40_remove(struct spi_device *spi)
{
struct mrf24j40 *devrec = spi_get_drvdata(spi);
@@ -1366,8 +1366,6 @@ static int mrf24j40_remove(struct spi_device *spi)
ieee802154_free_hw(devrec->hw);
/* TODO: Will ieee802154_free_device() wait until ->xmit() is
* complete? */
-
- return 0;
}
static const struct of_device_id mrf24j40_of_match[] = {
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
index d037682fb7ad..6782c2cbf542 100644
--- a/drivers/net/ipa/Kconfig
+++ b/drivers/net/ipa/Kconfig
@@ -2,7 +2,9 @@ config QCOM_IPA
tristate "Qualcomm IPA support"
depends on NET && QCOM_SMEM
depends on ARCH_QCOM || COMPILE_TEST
+ depends on INTERCONNECT
depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_SCM
select QCOM_QMI_HELPERS
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 1544564bc283..87e1d43c118c 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -320,6 +320,17 @@ gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
atomic_add(tre_count, &trans_info->tre_avail);
}
+/* Return true if no transactions are allocated, false otherwise */
+bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id)
+{
+ u32 tre_max = gsi_channel_tre_max(gsi, channel_id);
+ struct gsi_trans_info *trans_info;
+
+ trans_info = &gsi->channel[channel_id].trans_info;
+
+ return atomic_read(&trans_info->tre_avail) == tre_max;
+}
+
/* Allocate a GSI transaction on a channel */
struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
u32 tre_count,
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index 17fd1822d8a9..af379b49299e 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -130,6 +130,16 @@ void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr);
void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool);
/**
+ * gsi_channel_trans_idle() - Return whether no transactions are allocated
+ * @gsi: GSI pointer
+ * @channel_id: Channel the transaction is associated with
+ *
+ * Return: True if no transactions are allocated, false otherwise
+ *
+ */
+bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id);
+
+/**
* gsi_channel_trans_alloc() - Allocate a GSI transaction on a channel
* @gsi: GSI pointer
* @channel_id: Channel the transaction is associated with
diff --git a/drivers/net/ipa/ipa_data-v3.1.c b/drivers/net/ipa/ipa_data-v3.1.c
index 06ddb85f39b2..8ff351aefd23 100644
--- a/drivers/net/ipa/ipa_data-v3.1.c
+++ b/drivers/net/ipa/ipa_data-v3.1.c
@@ -101,6 +101,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
},
},
@@ -148,6 +149,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v3.5.1.c b/drivers/net/ipa/ipa_data-v3.5.1.c
index 760c22bbdf70..d1c466abddb2 100644
--- a/drivers/net/ipa/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/ipa_data-v3.5.1.c
@@ -92,6 +92,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
},
},
@@ -140,6 +141,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/ipa_data-v4.11.c
index fea91451a0c3..b1991cc6f0ca 100644
--- a/drivers/net/ipa/ipa_data-v4.11.c
+++ b/drivers/net/ipa/ipa_data-v4.11.c
@@ -86,6 +86,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
},
},
@@ -133,6 +134,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 32768,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v4.2.c b/drivers/net/ipa/ipa_data-v4.2.c
index 2a231e79d5e1..1190a43e8743 100644
--- a/drivers/net/ipa/ipa_data-v4.2.c
+++ b/drivers/net/ipa/ipa_data-v4.2.c
@@ -82,6 +82,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
},
},
@@ -130,6 +131,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/ipa_data-v4.5.c
index 2da2c4194f2e..944f72b0f285 100644
--- a/drivers/net/ipa/ipa_data-v4.5.c
+++ b/drivers/net/ipa/ipa_data-v4.5.c
@@ -95,6 +95,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
},
},
@@ -142,6 +143,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/ipa_data-v4.9.c
index 2421b5abb5d4..16786bff7ef8 100644
--- a/drivers/net/ipa/ipa_data-v4.9.c
+++ b/drivers/net/ipa/ipa_data-v4.9.c
@@ -87,6 +87,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
},
},
@@ -134,6 +135,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 6d329e9ce5d2..dbbeecf6df29 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -112,6 +112,7 @@ struct ipa_endpoint_tx_data {
/**
* struct ipa_endpoint_rx_data - configuration data for RX endpoints
+ * @buffer_size: requested receive buffer size (bytes)
* @pad_align: power-of-2 boundary to which packet payload is aligned
* @aggr_close_eof: whether aggregation closes on end-of-frame
*
@@ -125,6 +126,7 @@ struct ipa_endpoint_tx_data {
* a "frame" consisting of several transfers has ended.
*/
struct ipa_endpoint_rx_data {
+ u32 buffer_size;
u32 pad_align;
bool aggr_close_eof;
};
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 68291a3efd04..888e94278a84 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -25,10 +25,8 @@
#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
-#define IPA_REPLENISH_BATCH 16
-
-/* RX buffer is 1 page (or a power-of-2 contiguous pages) */
-#define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */
+/* Hardware is told about receive buffers once a "batch" has been queued */
+#define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
/* The amount of RX buffer space consumed by standard skb overhead */
#define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
@@ -75,6 +73,14 @@ struct ipa_status {
#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
+static u32 aggr_byte_limit_max(enum ipa_version version)
+{
+ if (version < IPA_VERSION_4_5)
+ return field_max(aggr_byte_limit_fmask(true));
+
+ return field_max(aggr_byte_limit_fmask(false));
+}
+
static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *all_data,
const struct ipa_gsi_endpoint_data *data)
@@ -87,6 +93,9 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true;
if (!data->toward_ipa) {
+ u32 buffer_size;
+ u32 limit;
+
if (data->endpoint.filter_support) {
dev_err(dev, "filtering not supported for "
"RX endpoint %u\n",
@@ -94,6 +103,41 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return false;
}
+ /* Nothing more to check for non-AP RX */
+ if (data->ee_id != GSI_EE_AP)
+ return true;
+
+ buffer_size = data->endpoint.config.rx.buffer_size;
+ /* The buffer size must hold an MTU plus overhead */
+ limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
+ if (buffer_size < limit) {
+ dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
+ data->endpoint_id, buffer_size, limit);
+ return false;
+ }
+
+ /* For an endpoint supporting receive aggregation, the
+ * aggregation byte limit defines the point at which an
+ * aggregation window will close. It is programmed into the
+ * IPA hardware as a number of KB. We don't use "hard byte
+ * limit" aggregation, so we need to supply enough space in
+ * a receive buffer to hold a complete MTU plus normal skb
+ * overhead *after* that aggregation byte limit has been
+ * crossed.
+ *
+ * This check just ensures the receive buffer size doesn't
+ * exceed what's representable in the aggregation limit field.
+ */
+ if (data->endpoint.config.aggregation) {
+ limit += SZ_1K * aggr_byte_limit_max(ipa->version);
+ if (buffer_size > limit) {
+ dev_err(dev, "RX buffer size too large for aggregated RX endpoint %u (%u > %u)\n",
+ data->endpoint_id, buffer_size, limit);
+
+ return false;
+ }
+ }
+
return true; /* Nothing more to check for RX */
}
@@ -156,21 +200,12 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true;
}
-static u32 aggr_byte_limit_max(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_5)
- return field_max(aggr_byte_limit_fmask(true));
-
- return field_max(aggr_byte_limit_fmask(false));
-}
-
static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *dp = data;
struct device *dev = &ipa->pdev->dev;
enum ipa_endpoint_name name;
- u32 limit;
if (count > IPA_ENDPOINT_COUNT) {
dev_err(dev, "too many endpoints specified (%u > %u)\n",
@@ -178,26 +213,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
return false;
}
- /* The aggregation byte limit defines the point at which an
- * aggregation window will close. It is programmed into the
- * IPA hardware as a number of KB. We don't use "hard byte
- * limit" aggregation, which means that we need to supply
- * enough space in a receive buffer to hold a complete MTU
- * plus normal skb overhead *after* that aggregation byte
- * limit has been crossed.
- *
- * This check ensures we don't define a receive buffer size
- * that would exceed what we can represent in the field that
- * is used to program its size.
- */
- limit = aggr_byte_limit_max(ipa->version) * SZ_1K;
- limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
- if (limit < IPA_RX_BUFFER_SIZE) {
- dev_err(dev, "buffer size too big for aggregation (%u > %u)\n",
- IPA_RX_BUFFER_SIZE, limit);
- return false;
- }
-
/* Make sure needed endpoints have defined data */
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
dev_err(dev, "command TX endpoint not defined\n");
@@ -723,13 +738,15 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
if (endpoint->data->aggregation) {
if (!endpoint->toward_ipa) {
+ const struct ipa_endpoint_rx_data *rx_data;
bool close_eof;
u32 limit;
+ rx_data = &endpoint->data->rx;
val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
- limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
+ limit = ipa_aggr_size_kb(rx_data->buffer_size);
val |= aggr_byte_limit_encoded(version, limit);
limit = IPA_AGGR_TIME_LIMIT;
@@ -737,7 +754,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
/* AGGR_PKT_LIMIT is 0 (unlimited) */
- close_eof = endpoint->data->rx.aggr_close_eof;
+ close_eof = rx_data->aggr_close_eof;
val |= aggr_sw_eof_active_encoded(version, close_eof);
/* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
@@ -1020,134 +1037,98 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
iowrite32(val, ipa->reg_virt + offset);
}
-static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
+static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
{
- struct gsi_trans *trans;
- bool doorbell = false;
struct page *page;
+ u32 buffer_size;
u32 offset;
u32 len;
int ret;
- page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
+ buffer_size = endpoint->data->rx.buffer_size;
+ page = dev_alloc_pages(get_order(buffer_size));
if (!page)
return -ENOMEM;
- trans = ipa_endpoint_trans_alloc(endpoint, 1);
- if (!trans)
- goto err_free_pages;
-
/* Offset the buffer to make space for skb headroom */
offset = NET_SKB_PAD;
- len = IPA_RX_BUFFER_SIZE - offset;
+ len = buffer_size - offset;
ret = gsi_trans_page_add(trans, page, len, offset);
if (ret)
- goto err_trans_free;
- trans->data = page; /* transaction owns page now */
-
- if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
- doorbell = true;
- endpoint->replenish_ready = 0;
- }
-
- gsi_trans_commit(trans, doorbell);
-
- return 0;
-
-err_trans_free:
- gsi_trans_free(trans);
-err_free_pages:
- __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
+ __free_pages(page, get_order(buffer_size));
+ else
+ trans->data = page; /* transaction owns page now */
- return -ENOMEM;
+ return ret;
}
/**
* ipa_endpoint_replenish() - Replenish endpoint receive buffers
* @endpoint: Endpoint to be replenished
- * @add_one: Whether this is replacing a just-consumed buffer
*
* The IPA hardware can hold a fixed number of receive buffers for an RX
* endpoint, based on the number of entries in the underlying channel ring
* buffer. If an endpoint's "backlog" is non-zero, it indicates how many
* more receive buffers can be supplied to the hardware. Replenishing for
- * an endpoint can be disabled, in which case requests to replenish a
- * buffer are "saved", and transferred to the backlog once it is re-enabled
- * again.
+ * an endpoint can be disabled, in which case buffers are not queued to
+ * the hardware.
*/
-static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
+static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
{
- struct gsi *gsi;
- u32 backlog;
- int delta;
+ struct gsi_trans *trans;
- if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) {
- if (add_one)
- atomic_inc(&endpoint->replenish_saved);
+ if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
return;
- }
- /* If already active, just update the backlog */
- if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) {
- if (add_one)
- atomic_inc(&endpoint->replenish_backlog);
+ /* Skip it if it's already active */
+ if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
return;
- }
- while (atomic_dec_not_zero(&endpoint->replenish_backlog))
- if (ipa_endpoint_replenish_one(endpoint))
+ while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
+ bool doorbell;
+
+ if (ipa_endpoint_replenish_one(endpoint, trans))
goto try_again_later;
- clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
- if (add_one)
- atomic_inc(&endpoint->replenish_backlog);
+ /* Ring the doorbell if we've got a full batch */
+ doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
+ gsi_trans_commit(trans, doorbell);
+ }
+
+ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
return;
try_again_later:
+ gsi_trans_free(trans);
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
- /* The last one didn't succeed, so fix the backlog */
- delta = add_one ? 2 : 1;
- backlog = atomic_add_return(delta, &endpoint->replenish_backlog);
-
/* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even
* one buffer, nothing will trigger another replenish attempt.
- * Receive buffer transactions use one TRE, so schedule work to
- * try replenishing again if our backlog is *all* available TREs.
+ * If the hardware has no receive buffers queued, schedule work to
+ * try replenishing again.
*/
- gsi = &endpoint->ipa->gsi;
- if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
+ if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
schedule_delayed_work(&endpoint->replenish_work,
msecs_to_jiffies(1));
}
static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
{
- struct gsi *gsi = &endpoint->ipa->gsi;
- u32 max_backlog;
- u32 saved;
-
set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
- while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
- atomic_add(saved, &endpoint->replenish_backlog);
/* Start replenishing if hardware currently has no buffers */
- max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
- if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
- ipa_endpoint_replenish(endpoint, false);
+ if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
+ ipa_endpoint_replenish(endpoint);
}
static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
{
- u32 backlog;
-
clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
- while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
- atomic_add(backlog, &endpoint->replenish_saved);
}
static void ipa_endpoint_replenish_work(struct work_struct *work)
@@ -1157,7 +1138,7 @@ static void ipa_endpoint_replenish_work(struct work_struct *work)
endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
- ipa_endpoint_replenish(endpoint, false);
+ ipa_endpoint_replenish(endpoint);
}
static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
@@ -1183,15 +1164,16 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
struct page *page, u32 len)
{
+ u32 buffer_size = endpoint->data->rx.buffer_size;
struct sk_buff *skb;
/* Nothing to do if there's no netdev */
if (!endpoint->netdev)
return false;
- WARN_ON(len > SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE - NET_SKB_PAD));
+ WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
- skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
+ skb = build_skb(page_address(page), buffer_size);
if (skb) {
/* Reserve the headroom and account for the data */
skb_reserve(skb, NET_SKB_PAD);
@@ -1289,8 +1271,9 @@ static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
struct page *page, u32 total_len)
{
+ u32 buffer_size = endpoint->data->rx.buffer_size;
void *data = page_address(page) + NET_SKB_PAD;
- u32 unused = IPA_RX_BUFFER_SIZE - total_len;
+ u32 unused = buffer_size - total_len;
u32 resid = total_len;
while (resid) {
@@ -1360,10 +1343,8 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
{
struct page *page;
- ipa_endpoint_replenish(endpoint, true);
-
if (trans->cancelled)
- return;
+ goto done;
/* Parse or build a socket buffer using the actual received length */
page = trans->data;
@@ -1371,6 +1352,8 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
ipa_endpoint_status_parse(endpoint, page, trans->len);
else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
trans->data = NULL; /* Pages have been consumed */
+done:
+ ipa_endpoint_replenish(endpoint);
}
void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
@@ -1398,8 +1381,11 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
} else {
struct page *page = trans->data;
- if (page)
- __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
+ if (page) {
+ u32 buffer_size = endpoint->data->rx.buffer_size;
+
+ __free_pages(page, get_order(buffer_size));
+ }
}
}
@@ -1704,9 +1690,6 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
*/
clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
- atomic_set(&endpoint->replenish_saved,
- gsi_channel_tre_max(gsi, endpoint->channel_id));
- atomic_set(&endpoint->replenish_backlog, 0);
INIT_DELAYED_WORK(&endpoint->replenish_work,
ipa_endpoint_replenish_work);
}
@@ -1882,6 +1865,8 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
enum ipa_endpoint_name name;
u32 filter_map;
+ BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
+
if (!ipa_endpoint_data_valid(ipa, count, data))
return 0; /* Error */
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 0313cdc607de..12fd5b16c18e 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -65,9 +65,7 @@ enum ipa_replenish_flag {
* @evt_ring_id: GSI event ring used by the endpoint
* @netdev: Network device pointer, if endpoint uses one
* @replenish_flags: Replenishing state flags
- * @replenish_ready: Number of replenish transactions without doorbell
- * @replenish_saved: Replenish requests held while disabled
- * @replenish_backlog: Number of buffers needed to fill hardware queue
+ * @replenish_count: Total number of replenish transactions committed
* @replenish_work: Work item used for repeated replenish failures
*/
struct ipa_endpoint {
@@ -86,9 +84,7 @@ struct ipa_endpoint {
/* Receive buffer replenishing for RX endpoints */
DECLARE_BITMAP(replenish_flags, IPA_REPLENISH_COUNT);
- u32 replenish_ready;
- atomic_t replenish_saved;
- atomic_t replenish_backlog;
+ u64 replenish_count;
struct delayed_work replenish_work; /* global wq */
};
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index f2989aac47a6..db5ac7552286 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -35,18 +35,6 @@
#define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */
/**
- * struct ipa_interconnect - IPA interconnect information
- * @path: Interconnect path
- * @average_bandwidth: Average interconnect bandwidth (KB/second)
- * @peak_bandwidth: Peak interconnect bandwidth (KB/second)
- */
-struct ipa_interconnect {
- struct icc_path *path;
- u32 average_bandwidth;
- u32 peak_bandwidth;
-};
-
-/**
* enum ipa_power_flag - IPA power flags
* @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled
* @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended
@@ -79,164 +67,78 @@ struct ipa_power {
spinlock_t spinlock; /* used with STOPPED/STARTED power flags */
DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
- struct ipa_interconnect *interconnect;
+ struct icc_bulk_data interconnect[];
};
-static int ipa_interconnect_init_one(struct device *dev,
- struct ipa_interconnect *interconnect,
- const struct ipa_interconnect_data *data)
-{
- struct icc_path *path;
-
- path = of_icc_get(dev, data->name);
- if (IS_ERR(path)) {
- int ret = PTR_ERR(path);
-
- dev_err_probe(dev, ret, "error getting %s interconnect\n",
- data->name);
-
- return ret;
- }
-
- interconnect->path = path;
- interconnect->average_bandwidth = data->average_bandwidth;
- interconnect->peak_bandwidth = data->peak_bandwidth;
-
- return 0;
-}
-
-static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect)
-{
- icc_put(interconnect->path);
- memset(interconnect, 0, sizeof(*interconnect));
-}
-
/* Initialize interconnects required for IPA operation */
-static int ipa_interconnect_init(struct ipa_power *power, struct device *dev,
+static int ipa_interconnect_init(struct ipa_power *power,
const struct ipa_interconnect_data *data)
{
- struct ipa_interconnect *interconnect;
- u32 count;
- int ret;
-
- count = power->interconnect_count;
- interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL);
- if (!interconnect)
- return -ENOMEM;
- power->interconnect = interconnect;
-
- while (count--) {
- ret = ipa_interconnect_init_one(dev, interconnect, data++);
- if (ret)
- goto out_unwind;
- interconnect++;
- }
-
- return 0;
-
-out_unwind:
- while (interconnect-- > power->interconnect)
- ipa_interconnect_exit_one(interconnect);
- kfree(power->interconnect);
- power->interconnect = NULL;
-
- return ret;
-}
-
-/* Inverse of ipa_interconnect_init() */
-static void ipa_interconnect_exit(struct ipa_power *power)
-{
- struct ipa_interconnect *interconnect;
-
- interconnect = power->interconnect + power->interconnect_count;
- while (interconnect-- > power->interconnect)
- ipa_interconnect_exit_one(interconnect);
- kfree(power->interconnect);
- power->interconnect = NULL;
-}
-
-/* Currently we only use one bandwidth level, so just "enable" interconnects */
-static int ipa_interconnect_enable(struct ipa *ipa)
-{
- struct ipa_interconnect *interconnect;
- struct ipa_power *power = ipa->power;
+ struct icc_bulk_data *interconnect;
int ret;
u32 i;
- interconnect = power->interconnect;
+ /* Initialize our interconnect data array for bulk operations */
+ interconnect = &power->interconnect[0];
for (i = 0; i < power->interconnect_count; i++) {
- ret = icc_set_bw(interconnect->path,
- interconnect->average_bandwidth,
- interconnect->peak_bandwidth);
- if (ret) {
- dev_err(&ipa->pdev->dev,
- "error %d enabling %s interconnect\n",
- ret, icc_get_name(interconnect->path));
- goto out_unwind;
- }
+ /* interconnect->path is filled in by of_icc_bulk_get() */
+ interconnect->name = data->name;
+ interconnect->avg_bw = data->average_bandwidth;
+ interconnect->peak_bw = data->peak_bandwidth;
+ data++;
interconnect++;
}
- return 0;
+ ret = of_icc_bulk_get(power->dev, power->interconnect_count,
+ power->interconnect);
+ if (ret)
+ return ret;
-out_unwind:
- while (interconnect-- > power->interconnect)
- (void)icc_set_bw(interconnect->path, 0, 0);
+ /* All interconnects are initially disabled */
+ icc_bulk_disable(power->interconnect_count, power->interconnect);
+
+ /* Set the bandwidth values to be used when enabled */
+ ret = icc_bulk_set_bw(power->interconnect_count, power->interconnect);
+ if (ret)
+ icc_bulk_put(power->interconnect_count, power->interconnect);
return ret;
}
-/* To disable an interconnect, we just its bandwidth to 0 */
-static int ipa_interconnect_disable(struct ipa *ipa)
+/* Inverse of ipa_interconnect_init() */
+static void ipa_interconnect_exit(struct ipa_power *power)
{
- struct ipa_interconnect *interconnect;
- struct ipa_power *power = ipa->power;
- struct device *dev = &ipa->pdev->dev;
- int result = 0;
- u32 count;
- int ret;
-
- count = power->interconnect_count;
- interconnect = power->interconnect + count;
- while (count--) {
- interconnect--;
- ret = icc_set_bw(interconnect->path, 0, 0);
- if (ret) {
- dev_err(dev, "error %d disabling %s interconnect\n",
- ret, icc_get_name(interconnect->path));
- /* Try to disable all; record only the first error */
- if (!result)
- result = ret;
- }
- }
-
- return result;
+ icc_bulk_put(power->interconnect_count, power->interconnect);
}
/* Enable IPA power, enabling interconnects and the core clock */
static int ipa_power_enable(struct ipa *ipa)
{
+ struct ipa_power *power = ipa->power;
int ret;
- ret = ipa_interconnect_enable(ipa);
+ ret = icc_bulk_enable(power->interconnect_count, power->interconnect);
if (ret)
return ret;
- ret = clk_prepare_enable(ipa->power->core);
+ ret = clk_prepare_enable(power->core);
if (ret) {
- dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret);
- (void)ipa_interconnect_disable(ipa);
+ dev_err(power->dev, "error %d enabling core clock\n", ret);
+ icc_bulk_disable(power->interconnect_count,
+ power->interconnect);
}
return ret;
}
/* Inverse of ipa_power_enable() */
-static int ipa_power_disable(struct ipa *ipa)
+static void ipa_power_disable(struct ipa *ipa)
{
- clk_disable_unprepare(ipa->power->core);
+ struct ipa_power *power = ipa->power;
- return ipa_interconnect_disable(ipa);
+ clk_disable_unprepare(power->core);
+
+ icc_bulk_disable(power->interconnect_count, power->interconnect);
}
static int ipa_runtime_suspend(struct device *dev)
@@ -250,7 +152,9 @@ static int ipa_runtime_suspend(struct device *dev)
gsi_suspend(&ipa->gsi);
}
- return ipa_power_disable(ipa);
+ ipa_power_disable(ipa);
+
+ return 0;
}
static int ipa_runtime_resume(struct device *dev)
@@ -453,6 +357,7 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
{
struct ipa_power *power;
struct clk *clk;
+ size_t size;
int ret;
clk = clk_get(dev, "core");
@@ -469,7 +374,8 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
goto err_clk_put;
}
- power = kzalloc(sizeof(*power), GFP_KERNEL);
+ size = struct_size(power, interconnect, data->interconnect_count);
+ power = kzalloc(size, GFP_KERNEL);
if (!power) {
ret = -ENOMEM;
goto err_clk_put;
@@ -479,7 +385,7 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
spin_lock_init(&power->spinlock);
power->interconnect_count = data->interconnect_count;
- ret = ipa_interconnect_init(power, dev, data->interconnect_data);
+ ret = ipa_interconnect_init(power, data->interconnect_data);
if (ret)
goto err_kfree;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index c613900c3811..6ffb27419e64 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -555,7 +555,7 @@ static void ipvlan_multicast_enqueue(struct ipvl_port *port,
schedule_work(&port->wq);
} else {
spin_unlock(&port->backlog.lock);
- atomic_long_inc(&skb->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(skb->dev);
kfree_skb(skb);
}
}
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index ed0edf5884ef..720394c0639b 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -74,11 +74,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
skb_tx_timestamp(skb);
/* do not fool net_timestamp_check() with various clock bases */
- skb->tstamp = 0;
+ skb_clear_tstamp(skb);
skb_orphan(skb);
- /* Before queueing this packet to netif_rx(),
+ /* Before queueing this packet to __netif_rx(),
* make sure dst is refcounted.
*/
skb_dst_force(skb);
@@ -86,7 +86,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
skb->protocol = eth_type_trans(skb, dev);
len = skb->len;
- if (likely(netif_rx(skb) == NET_RX_SUCCESS))
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
dev_lstats_add(dev, len);
return NETDEV_TX_OK;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 3d0874331763..832f09ac075e 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1033,7 +1033,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
else
nskb->pkt_type = PACKET_MULTICAST;
- netif_rx(nskb);
+ __netif_rx(nskb);
}
continue;
}
@@ -1056,7 +1056,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
nskb->dev = ndev;
- if (netif_rx(nskb) == NET_RX_SUCCESS) {
+ if (__netif_rx(nskb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsUntagged++;
u64_stats_update_end(&secy_stats->syncp);
@@ -1288,7 +1288,7 @@ nosci:
macsec_reset_skb(nskb, macsec->secy.netdev);
- ret = netif_rx(nskb);
+ ret = __netif_rx(nskb);
if (ret == NET_RX_SUCCESS) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsUnknownSCI++;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 6ef5f77be4d0..069e8824c264 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -285,7 +285,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
if (likely(nskb))
err = macvlan_broadcast_one(nskb, vlan, eth,
mode == MACVLAN_MODE_BRIDGE) ?:
- netif_rx_ni(nskb);
+ netif_rx(nskb);
macvlan_count_rx(vlan, skb->len + ETH_HLEN,
err == NET_RX_SUCCESS, true);
}
@@ -371,7 +371,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
free_nskb:
kfree_skb(nskb);
err:
- atomic_long_inc(&skb->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(skb->dev);
}
static void macvlan_flush_sources(struct macvlan_port *port,
@@ -410,7 +410,7 @@ static void macvlan_forward_source_one(struct sk_buff *skb,
if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, dev->dev_addr))
nskb->pkt_type = PACKET_HOST;
- ret = netif_rx(nskb);
+ ret = __netif_rx(nskb);
macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false);
}
@@ -468,7 +468,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
/* forward to original port. */
vlan = src;
ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
- netif_rx(skb);
+ __netif_rx(skb);
handle_res = RX_HANDLER_CONSUMED;
goto out;
}
@@ -889,7 +889,7 @@ static void macvlan_set_lockdep_class(struct net_device *dev)
static int macvlan_init(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- const struct net_device *lowerdev = vlan->lowerdev;
+ struct net_device *lowerdev = vlan->lowerdev;
struct macvlan_port *port = vlan->port;
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
@@ -911,6 +911,9 @@ static int macvlan_init(struct net_device *dev)
port->count += 1;
+ /* Get macvlan's reference to lowerdev */
+ dev_hold_track(lowerdev, &vlan->dev_tracker, GFP_KERNEL);
+
return 0;
}
@@ -1173,6 +1176,14 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_features_check = passthru_features_check,
};
+static void macvlan_dev_free(struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+ /* Get rid of the macvlan's reference to lowerdev */
+ dev_put_track(vlan->lowerdev, &vlan->dev_tracker);
+}
+
void macvlan_common_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -1184,6 +1195,7 @@ void macvlan_common_setup(struct net_device *dev)
dev->priv_flags |= IFF_UNICAST_FLT | IFF_CHANGE_PROTO_DOWN;
dev->netdev_ops = &macvlan_netdev_ops;
dev->needs_free_netdev = true;
+ dev->priv_destructor = macvlan_dev_free;
dev->header_ops = &macvlan_hard_header_ops;
dev->ethtool_ops = &macvlan_ethtool_ops;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 6b12902a803f..cecf8c63096c 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -133,11 +133,17 @@ static void macvtap_setup(struct net_device *dev)
dev->tx_queue_len = TUN_READQ_SIZE;
}
+static struct net *macvtap_link_net(const struct net_device *dev)
+{
+ return dev_net(macvlan_dev_real_dev(dev));
+}
+
static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
.kind = "macvtap",
.setup = macvtap_setup,
.newlink = macvtap_newlink,
.dellink = macvtap_dellink,
+ .get_link_net = macvtap_link_net,
.priv_size = sizeof(struct macvtap_dev),
};
diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig
index 2929471395ae..dc71657d9184 100644
--- a/drivers/net/mctp/Kconfig
+++ b/drivers/net/mctp/Kconfig
@@ -21,6 +21,18 @@ config MCTP_SERIAL
Say y here if you need to connect to MCTP endpoints over serial. To
compile as a module, use m; the module will be called mctp-serial.
+config MCTP_TRANSPORT_I2C
+ tristate "MCTP SMBus/I2C transport"
+ # i2c-mux is optional, but we must build as a module if i2c-mux is a module
+ depends on I2C_MUX || !I2C_MUX
+ depends on I2C
+ depends on I2C_SLAVE
+ select MCTP_FLOWS
+ help
+ Provides a driver to access MCTP devices over SMBus/I2C transport,
+ from DMTF specification DSP0237. A MCTP protocol network device is
+ created for each I2C bus that has been assigned a mctp-i2c device.
+
endmenu
endif
diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile
index d32622613ce4..1ca3e6028f77 100644
--- a/drivers/net/mctp/Makefile
+++ b/drivers/net/mctp/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_MCTP_SERIAL) += mctp-serial.o
+obj-$(CONFIG_MCTP_TRANSPORT_I2C) += mctp-i2c.o
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
new file mode 100644
index 000000000000..baf7afac7857
--- /dev/null
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -0,0 +1,1082 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Management Controller Transport Protocol (MCTP)
+ * Implements DMTF specification
+ * "DSP0237 Management Component Transport Protocol (MCTP) SMBus/I2C
+ * Transport Binding"
+ * https://www.dmtf.org/sites/default/files/standards/documents/DSP0237_1.2.0.pdf
+ *
+ * A netdev is created for each I2C bus that handles MCTP. In the case of an I2C
+ * mux topology a single I2C client is attached to the root of the mux topology,
+ * shared between all mux I2C busses underneath. For non-mux cases an I2C client
+ * is attached per netdev.
+ *
+ * mctp-i2c-controller.yml devicetree binding has further details.
+ *
+ * Copyright (c) 2022 Code Construct
+ * Copyright (c) 2022 Google
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/if_arp.h>
+#include <net/mctp.h>
+#include <net/mctpdevice.h>
+
+/* byte_count is limited to u8 */
+#define MCTP_I2C_MAXBLOCK 255
+/* One byte is taken by source_slave */
+#define MCTP_I2C_MAXMTU (MCTP_I2C_MAXBLOCK - 1)
+#define MCTP_I2C_MINMTU (64 + 4)
+/* Allow space for dest_address, command, byte_count, data, PEC */
+#define MCTP_I2C_BUFSZ (3 + MCTP_I2C_MAXBLOCK + 1)
+#define MCTP_I2C_MINLEN 8
+#define MCTP_I2C_COMMANDCODE 0x0f
+#define MCTP_I2C_TX_WORK_LEN 100
+/* Sufficient for 64kB at min mtu */
+#define MCTP_I2C_TX_QUEUE_LEN 1100
+
+#define MCTP_I2C_OF_PROP "mctp-controller"
+
+enum {
+ MCTP_I2C_FLOW_STATE_NEW = 0,
+ MCTP_I2C_FLOW_STATE_ACTIVE,
+};
+
+/* List of all struct mctp_i2c_client
+ * Lock protects driver_clients and also prevents adding/removing adapters
+ * during mctp_i2c_client probe/remove.
+ */
+static DEFINE_MUTEX(driver_clients_lock);
+static LIST_HEAD(driver_clients);
+
+struct mctp_i2c_client;
+
+/* The netdev structure. One of these per I2C adapter. */
+struct mctp_i2c_dev {
+ struct net_device *ndev;
+ struct i2c_adapter *adapter;
+ struct mctp_i2c_client *client;
+ struct list_head list; /* For mctp_i2c_client.devs */
+
+ size_t rx_pos;
+ u8 rx_buffer[MCTP_I2C_BUFSZ];
+ struct completion rx_done;
+
+ struct task_struct *tx_thread;
+ wait_queue_head_t tx_wq;
+ struct sk_buff_head tx_queue;
+ u8 tx_scratch[MCTP_I2C_BUFSZ];
+
+ /* A fake entry in our tx queue to perform an unlock operation */
+ struct sk_buff unlock_marker;
+
+ /* Spinlock protects i2c_lock_count, release_count, allow_rx */
+ spinlock_t lock;
+ int i2c_lock_count;
+ int release_count;
+ /* Indicates that the netif is ready to receive incoming packets */
+ bool allow_rx;
+
+};
+
+/* The i2c client structure. One per hardware i2c bus at the top of the
+ * mux tree, shared by multiple netdevs
+ */
+struct mctp_i2c_client {
+ struct i2c_client *client;
+ u8 lladdr;
+
+ struct mctp_i2c_dev *sel;
+ struct list_head devs;
+ spinlock_t sel_lock; /* Protects sel and devs */
+
+ struct list_head list; /* For driver_clients */
+};
+
+/* Header on the wire. */
+struct mctp_i2c_hdr {
+ u8 dest_slave;
+ u8 command;
+ /* Count of bytes following byte_count, excluding PEC */
+ u8 byte_count;
+ u8 source_slave;
+};
+
+static int mctp_i2c_recv(struct mctp_i2c_dev *midev);
+static int mctp_i2c_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val);
+static void mctp_i2c_ndo_uninit(struct net_device *dev);
+static int mctp_i2c_ndo_open(struct net_device *dev);
+
+static struct i2c_adapter *mux_root_adapter(struct i2c_adapter *adap)
+{
+#if IS_ENABLED(CONFIG_I2C_MUX)
+ return i2c_root_adapter(&adap->dev);
+#else
+ /* In non-mux config all i2c adapters are root adapters */
+ return adap;
+#endif
+}
+
+/* Creates a new i2c slave device attached to the root adapter.
+ * Sets up the slave callback.
+ * Must be called with a client on a root adapter.
+ */
+static struct mctp_i2c_client *mctp_i2c_new_client(struct i2c_client *client)
+{
+ struct mctp_i2c_client *mcli = NULL;
+ struct i2c_adapter *root = NULL;
+ int rc;
+
+ if (client->flags & I2C_CLIENT_TEN) {
+ dev_err(&client->dev, "failed, MCTP requires a 7-bit I2C address, addr=0x%x\n",
+ client->addr);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ root = mux_root_adapter(client->adapter);
+ if (!root) {
+ dev_err(&client->dev, "failed to find root adapter\n");
+ rc = -ENOENT;
+ goto err;
+ }
+ if (root != client->adapter) {
+ dev_err(&client->dev,
+ "A mctp-i2c-controller client cannot be placed on an I2C mux adapter.\n"
+ " It should be placed on the mux tree root adapter\n"
+ " then set mctp-controller property on adapters to attach\n");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ mcli = kzalloc(sizeof(*mcli), GFP_KERNEL);
+ if (!mcli) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ spin_lock_init(&mcli->sel_lock);
+ INIT_LIST_HEAD(&mcli->devs);
+ INIT_LIST_HEAD(&mcli->list);
+ mcli->lladdr = client->addr & 0xff;
+ mcli->client = client;
+ i2c_set_clientdata(client, mcli);
+
+ rc = i2c_slave_register(mcli->client, mctp_i2c_slave_cb);
+ if (rc < 0) {
+ dev_err(&client->dev, "i2c register failed %d\n", rc);
+ mcli->client = NULL;
+ i2c_set_clientdata(client, NULL);
+ goto err;
+ }
+
+ return mcli;
+err:
+ if (mcli) {
+ if (mcli->client)
+ i2c_unregister_device(mcli->client);
+ kfree(mcli);
+ }
+ return ERR_PTR(rc);
+}
+
+static void mctp_i2c_free_client(struct mctp_i2c_client *mcli)
+{
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&driver_clients_lock));
+ WARN_ON(!list_empty(&mcli->devs));
+ WARN_ON(mcli->sel); /* sanity check, no locking */
+
+ rc = i2c_slave_unregister(mcli->client);
+ /* Leak if it fails, we can't propagate errors upwards */
+ if (rc < 0)
+ dev_err(&mcli->client->dev, "i2c unregister failed %d\n", rc);
+ else
+ kfree(mcli);
+}
+
+/* Switch the mctp i2c device to receive responses.
+ * Call with sel_lock held
+ */
+static void __mctp_i2c_device_select(struct mctp_i2c_client *mcli,
+ struct mctp_i2c_dev *midev)
+{
+ assert_spin_locked(&mcli->sel_lock);
+ if (midev)
+ dev_hold(midev->ndev);
+ if (mcli->sel)
+ dev_put(mcli->sel->ndev);
+ mcli->sel = midev;
+}
+
+/* Switch the mctp i2c device to receive responses */
+static void mctp_i2c_device_select(struct mctp_i2c_client *mcli,
+ struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ __mctp_i2c_device_select(mcli, midev);
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+}
+
+static int mctp_i2c_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ struct mctp_i2c_client *mcli = i2c_get_clientdata(client);
+ struct mctp_i2c_dev *midev = NULL;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ midev = mcli->sel;
+ if (midev)
+ dev_hold(midev->ndev);
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+
+ if (!midev)
+ return 0;
+
+ switch (event) {
+ case I2C_SLAVE_WRITE_RECEIVED:
+ if (midev->rx_pos < MCTP_I2C_BUFSZ) {
+ midev->rx_buffer[midev->rx_pos] = *val;
+ midev->rx_pos++;
+ } else {
+ midev->ndev->stats.rx_over_errors++;
+ }
+
+ break;
+ case I2C_SLAVE_WRITE_REQUESTED:
+ /* dest_slave as first byte */
+ midev->rx_buffer[0] = mcli->lladdr << 1;
+ midev->rx_pos = 1;
+ break;
+ case I2C_SLAVE_STOP:
+ rc = mctp_i2c_recv(midev);
+ break;
+ default:
+ break;
+ }
+
+ dev_put(midev->ndev);
+ return rc;
+}
+
+/* Processes incoming data that has been accumulated by the slave cb */
+static int mctp_i2c_recv(struct mctp_i2c_dev *midev)
+{
+ struct net_device *ndev = midev->ndev;
+ struct mctp_i2c_hdr *hdr;
+ struct mctp_skb_cb *cb;
+ struct sk_buff *skb;
+ unsigned long flags;
+ u8 pec, calc_pec;
+ size_t recvlen;
+ int status;
+
+ /* + 1 for the PEC */
+ if (midev->rx_pos < MCTP_I2C_MINLEN + 1) {
+ ndev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
+ /* recvlen excludes PEC */
+ recvlen = midev->rx_pos - 1;
+
+ hdr = (void *)midev->rx_buffer;
+ if (hdr->command != MCTP_I2C_COMMANDCODE) {
+ ndev->stats.rx_dropped++;
+ return -EINVAL;
+ }
+
+ if (hdr->byte_count + offsetof(struct mctp_i2c_hdr, source_slave) != recvlen) {
+ ndev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
+
+ pec = midev->rx_buffer[midev->rx_pos - 1];
+ calc_pec = i2c_smbus_pec(0, midev->rx_buffer, recvlen);
+ if (pec != calc_pec) {
+ ndev->stats.rx_crc_errors++;
+ return -EINVAL;
+ }
+
+ skb = netdev_alloc_skb(ndev, recvlen);
+ if (!skb) {
+ ndev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ skb->protocol = htons(ETH_P_MCTP);
+ skb_put_data(skb, midev->rx_buffer, recvlen);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, sizeof(struct mctp_i2c_hdr));
+ skb_reset_network_header(skb);
+
+ cb = __mctp_cb(skb);
+ cb->halen = 1;
+ cb->haddr[0] = hdr->source_slave >> 1;
+
+ /* We need to ensure that the netif is not used once netdev
+ * unregister occurs
+ */
+ spin_lock_irqsave(&midev->lock, flags);
+ if (midev->allow_rx) {
+ reinit_completion(&midev->rx_done);
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ status = netif_rx(skb);
+ complete(&midev->rx_done);
+ } else {
+ status = NET_RX_DROP;
+ spin_unlock_irqrestore(&midev->lock, flags);
+ }
+
+ if (status == NET_RX_SUCCESS) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += recvlen;
+ } else {
+ ndev->stats.rx_dropped++;
+ }
+ return 0;
+}
+
+enum mctp_i2c_flow_state {
+ MCTP_I2C_TX_FLOW_INVALID,
+ MCTP_I2C_TX_FLOW_NONE,
+ MCTP_I2C_TX_FLOW_NEW,
+ MCTP_I2C_TX_FLOW_EXISTING,
+};
+
+static enum mctp_i2c_flow_state
+mctp_i2c_get_tx_flow_state(struct mctp_i2c_dev *midev, struct sk_buff *skb)
+{
+ enum mctp_i2c_flow_state state;
+ struct mctp_sk_key *key;
+ struct mctp_flow *flow;
+ unsigned long flags;
+
+ flow = skb_ext_find(skb, SKB_EXT_MCTP);
+ if (!flow)
+ return MCTP_I2C_TX_FLOW_NONE;
+
+ key = flow->key;
+ if (!key)
+ return MCTP_I2C_TX_FLOW_NONE;
+
+ spin_lock_irqsave(&key->lock, flags);
+ /* If the key is present but invalid, we're unlikely to be able
+ * to handle the flow at all; just drop now
+ */
+ if (!key->valid) {
+ state = MCTP_I2C_TX_FLOW_INVALID;
+
+ } else if (key->dev_flow_state == MCTP_I2C_FLOW_STATE_NEW) {
+ key->dev_flow_state = MCTP_I2C_FLOW_STATE_ACTIVE;
+ state = MCTP_I2C_TX_FLOW_NEW;
+ } else {
+ state = MCTP_I2C_TX_FLOW_EXISTING;
+ }
+
+ spin_unlock_irqrestore(&key->lock, flags);
+
+ return state;
+}
+
+/* We're not contending with ourselves here; we only need to exclude other
+ * i2c clients from using the bus. refcounts are simply to prevent
+ * recursive locking.
+ */
+static void mctp_i2c_lock_nest(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+ bool lock;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ lock = midev->i2c_lock_count == 0;
+ midev->i2c_lock_count++;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ if (lock)
+ i2c_lock_bus(midev->adapter, I2C_LOCK_SEGMENT);
+}
+
+static void mctp_i2c_unlock_nest(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+ bool unlock;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ if (!WARN_ONCE(midev->i2c_lock_count == 0, "lock count underflow!"))
+ midev->i2c_lock_count--;
+ unlock = midev->i2c_lock_count == 0;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ if (unlock)
+ i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT);
+}
+
+/* Unlocks the bus if was previously locked, used for cleanup */
+static void mctp_i2c_unlock_reset(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+ bool unlock;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ unlock = midev->i2c_lock_count > 0;
+ midev->i2c_lock_count = 0;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ if (unlock)
+ i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT);
+}
+
+static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb)
+{
+ struct net_device_stats *stats = &midev->ndev->stats;
+ enum mctp_i2c_flow_state fs;
+ struct mctp_i2c_hdr *hdr;
+ struct i2c_msg msg = {0};
+ u8 *pecp;
+ int rc;
+
+ fs = mctp_i2c_get_tx_flow_state(midev, skb);
+
+ hdr = (void *)skb_mac_header(skb);
+ /* Sanity check that packet contents matches skb length,
+ * and can't exceed MCTP_I2C_BUFSZ
+ */
+ if (skb->len != hdr->byte_count + 3) {
+ dev_warn_ratelimited(&midev->adapter->dev,
+ "Bad tx length %d vs skb %u\n",
+ hdr->byte_count + 3, skb->len);
+ return;
+ }
+
+ if (skb_tailroom(skb) >= 1) {
+ /* Linear case with space, we can just append the PEC */
+ skb_put(skb, 1);
+ } else {
+ /* Otherwise need to copy the buffer */
+ skb_copy_bits(skb, 0, midev->tx_scratch, skb->len);
+ hdr = (void *)midev->tx_scratch;
+ }
+
+ pecp = (void *)&hdr->source_slave + hdr->byte_count;
+ *pecp = i2c_smbus_pec(0, (u8 *)hdr, hdr->byte_count + 3);
+ msg.buf = (void *)&hdr->command;
+ /* command, bytecount, data, pec */
+ msg.len = 2 + hdr->byte_count + 1;
+ msg.addr = hdr->dest_slave >> 1;
+
+ switch (fs) {
+ case MCTP_I2C_TX_FLOW_NONE:
+ /* no flow: full lock & unlock */
+ mctp_i2c_lock_nest(midev);
+ mctp_i2c_device_select(midev->client, midev);
+ rc = __i2c_transfer(midev->adapter, &msg, 1);
+ mctp_i2c_unlock_nest(midev);
+ break;
+
+ case MCTP_I2C_TX_FLOW_NEW:
+ /* new flow: lock, tx, but don't unlock; that will happen
+ * on flow release
+ */
+ mctp_i2c_lock_nest(midev);
+ mctp_i2c_device_select(midev->client, midev);
+ fallthrough;
+
+ case MCTP_I2C_TX_FLOW_EXISTING:
+ /* existing flow: we already have the lock; just tx */
+ rc = __i2c_transfer(midev->adapter, &msg, 1);
+ break;
+
+ case MCTP_I2C_TX_FLOW_INVALID:
+ return;
+ }
+
+ if (rc < 0) {
+ dev_warn_ratelimited(&midev->adapter->dev,
+ "__i2c_transfer failed %d\n", rc);
+ stats->tx_errors++;
+ } else {
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+ }
+}
+
+static void mctp_i2c_flow_release(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+ bool unlock;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ if (midev->release_count > midev->i2c_lock_count) {
+ WARN_ONCE(1, "release count overflow");
+ midev->release_count = midev->i2c_lock_count;
+ }
+
+ midev->i2c_lock_count -= midev->release_count;
+ unlock = midev->i2c_lock_count == 0 && midev->release_count > 0;
+ midev->release_count = 0;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ if (unlock)
+ i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT);
+}
+
+static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned int len)
+{
+ struct mctp_i2c_hdr *hdr;
+ struct mctp_hdr *mhdr;
+ u8 lldst, llsrc;
+
+ if (len > MCTP_I2C_MAXMTU)
+ return -EMSGSIZE;
+
+ lldst = *((u8 *)daddr);
+ llsrc = *((u8 *)saddr);
+
+ skb_push(skb, sizeof(struct mctp_i2c_hdr));
+ skb_reset_mac_header(skb);
+ hdr = (void *)skb_mac_header(skb);
+ mhdr = mctp_hdr(skb);
+ hdr->dest_slave = (lldst << 1) & 0xff;
+ hdr->command = MCTP_I2C_COMMANDCODE;
+ hdr->byte_count = len + 1;
+ hdr->source_slave = ((llsrc << 1) & 0xff) | 0x01;
+ mhdr->ver = 0x01;
+
+ return 0;
+}
+
+static int mctp_i2c_tx_thread(void *data)
+{
+ struct mctp_i2c_dev *midev = data;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ for (;;) {
+ if (kthread_should_stop())
+ break;
+
+ spin_lock_irqsave(&midev->tx_queue.lock, flags);
+ skb = __skb_dequeue(&midev->tx_queue);
+ if (netif_queue_stopped(midev->ndev))
+ netif_wake_queue(midev->ndev);
+ spin_unlock_irqrestore(&midev->tx_queue.lock, flags);
+
+ if (skb == &midev->unlock_marker) {
+ mctp_i2c_flow_release(midev);
+
+ } else if (skb) {
+ mctp_i2c_xmit(midev, skb);
+ kfree_skb(skb);
+
+ } else {
+ wait_event_idle(midev->tx_wq,
+ !skb_queue_empty(&midev->tx_queue) ||
+ kthread_should_stop());
+ }
+ }
+
+ return 0;
+}
+
+static netdev_tx_t mctp_i2c_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mctp_i2c_dev *midev = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&midev->tx_queue.lock, flags);
+ if (skb_queue_len(&midev->tx_queue) >= MCTP_I2C_TX_WORK_LEN) {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&midev->tx_queue.lock, flags);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ __skb_queue_tail(&midev->tx_queue, skb);
+ if (skb_queue_len(&midev->tx_queue) == MCTP_I2C_TX_WORK_LEN)
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&midev->tx_queue.lock, flags);
+
+ wake_up(&midev->tx_wq);
+ return NETDEV_TX_OK;
+}
+
+static void mctp_i2c_release_flow(struct mctp_dev *mdev,
+ struct mctp_sk_key *key)
+
+{
+ struct mctp_i2c_dev *midev = netdev_priv(mdev->dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ midev->release_count++;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ /* Ensure we have a release operation queued, through the fake
+ * marker skb
+ */
+ spin_lock(&midev->tx_queue.lock);
+ if (!midev->unlock_marker.next)
+ __skb_queue_tail(&midev->tx_queue, &midev->unlock_marker);
+ spin_unlock(&midev->tx_queue.lock);
+
+ wake_up(&midev->tx_wq);
+}
+
+static const struct net_device_ops mctp_i2c_ops = {
+ .ndo_start_xmit = mctp_i2c_start_xmit,
+ .ndo_uninit = mctp_i2c_ndo_uninit,
+ .ndo_open = mctp_i2c_ndo_open,
+};
+
+static const struct header_ops mctp_i2c_headops = {
+ .create = mctp_i2c_header_create,
+};
+
+static const struct mctp_netdev_ops mctp_i2c_mctp_ops = {
+ .release_flow = mctp_i2c_release_flow,
+};
+
+static void mctp_i2c_net_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_MCTP;
+
+ dev->mtu = MCTP_I2C_MAXMTU;
+ dev->min_mtu = MCTP_I2C_MINMTU;
+ dev->max_mtu = MCTP_I2C_MAXMTU;
+ dev->tx_queue_len = MCTP_I2C_TX_QUEUE_LEN;
+
+ dev->hard_header_len = sizeof(struct mctp_i2c_hdr);
+ dev->addr_len = 1;
+
+ dev->netdev_ops = &mctp_i2c_ops;
+ dev->header_ops = &mctp_i2c_headops;
+}
+
+/* Populates the mctp_i2c_dev priv struct for a netdev.
+ * Returns an error pointer on failure.
+ */
+static struct mctp_i2c_dev *mctp_i2c_midev_init(struct net_device *dev,
+ struct mctp_i2c_client *mcli,
+ struct i2c_adapter *adap)
+{
+ struct mctp_i2c_dev *midev = netdev_priv(dev);
+ unsigned long flags;
+
+ midev->tx_thread = kthread_create(mctp_i2c_tx_thread, midev,
+ "%s/tx", dev->name);
+ if (IS_ERR(midev->tx_thread))
+ return ERR_CAST(midev->tx_thread);
+
+ midev->ndev = dev;
+ get_device(&adap->dev);
+ midev->adapter = adap;
+ get_device(&mcli->client->dev);
+ midev->client = mcli;
+ INIT_LIST_HEAD(&midev->list);
+ spin_lock_init(&midev->lock);
+ midev->i2c_lock_count = 0;
+ midev->release_count = 0;
+ init_completion(&midev->rx_done);
+ complete(&midev->rx_done);
+ init_waitqueue_head(&midev->tx_wq);
+ skb_queue_head_init(&midev->tx_queue);
+
+ /* Add to the parent mcli */
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ list_add(&midev->list, &mcli->devs);
+ /* Select a device by default */
+ if (!mcli->sel)
+ __mctp_i2c_device_select(mcli, midev);
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+
+ /* Start the worker thread */
+ wake_up_process(midev->tx_thread);
+
+ return midev;
+}
+
+/* Counterpart of mctp_i2c_midev_init */
+static void mctp_i2c_midev_free(struct mctp_i2c_dev *midev)
+{
+ struct mctp_i2c_client *mcli = midev->client;
+ unsigned long flags;
+
+ if (midev->tx_thread) {
+ kthread_stop(midev->tx_thread);
+ midev->tx_thread = NULL;
+ }
+
+ /* Unconditionally unlock on close */
+ mctp_i2c_unlock_reset(midev);
+
+ /* Remove the netdev from the parent i2c client. */
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ list_del(&midev->list);
+ if (mcli->sel == midev) {
+ struct mctp_i2c_dev *first;
+
+ first = list_first_entry_or_null(&mcli->devs, struct mctp_i2c_dev, list);
+ __mctp_i2c_device_select(mcli, first);
+ }
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+
+ skb_queue_purge(&midev->tx_queue);
+ put_device(&midev->adapter->dev);
+ put_device(&mcli->client->dev);
+}
+
+/* Stops, unregisters, and frees midev */
+static void mctp_i2c_unregister(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+
+ /* Stop tx thread prior to unregister, it uses netif_() functions */
+ kthread_stop(midev->tx_thread);
+ midev->tx_thread = NULL;
+
+ /* Prevent any new rx in mctp_i2c_recv(), let any pending work finish */
+ spin_lock_irqsave(&midev->lock, flags);
+ midev->allow_rx = false;
+ spin_unlock_irqrestore(&midev->lock, flags);
+ wait_for_completion(&midev->rx_done);
+
+ mctp_unregister_netdev(midev->ndev);
+ /* midev has been freed now by mctp_i2c_ndo_uninit callback */
+
+ free_netdev(midev->ndev);
+}
+
+static void mctp_i2c_ndo_uninit(struct net_device *dev)
+{
+ struct mctp_i2c_dev *midev = netdev_priv(dev);
+
+ /* Perform cleanup here to ensure that mcli->sel isn't holding
+ * a reference that would prevent unregister_netdevice()
+ * from completing.
+ */
+ mctp_i2c_midev_free(midev);
+}
+
+static int mctp_i2c_ndo_open(struct net_device *dev)
+{
+ struct mctp_i2c_dev *midev = netdev_priv(dev);
+ unsigned long flags;
+
+ /* i2c rx handler can only pass packets once the netdev is registered */
+ spin_lock_irqsave(&midev->lock, flags);
+ midev->allow_rx = true;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ return 0;
+}
+
+static int mctp_i2c_add_netdev(struct mctp_i2c_client *mcli,
+ struct i2c_adapter *adap)
+{
+ struct mctp_i2c_dev *midev = NULL;
+ struct net_device *ndev = NULL;
+ struct i2c_adapter *root;
+ unsigned long flags;
+ char namebuf[30];
+ int rc;
+
+ root = mux_root_adapter(adap);
+ if (root != mcli->client->adapter) {
+ dev_err(&mcli->client->dev,
+ "I2C adapter %s is not a child bus of %s\n",
+ mcli->client->adapter->name, root->name);
+ return -EINVAL;
+ }
+
+ WARN_ON(!mutex_is_locked(&driver_clients_lock));
+ snprintf(namebuf, sizeof(namebuf), "mctpi2c%d", adap->nr);
+ ndev = alloc_netdev(sizeof(*midev), namebuf, NET_NAME_ENUM, mctp_i2c_net_setup);
+ if (!ndev) {
+ dev_err(&mcli->client->dev, "alloc netdev failed\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+ dev_net_set(ndev, current->nsproxy->net_ns);
+ SET_NETDEV_DEV(ndev, &adap->dev);
+ dev_addr_set(ndev, &mcli->lladdr);
+
+ midev = mctp_i2c_midev_init(ndev, mcli, adap);
+ if (IS_ERR(midev)) {
+ rc = PTR_ERR(midev);
+ midev = NULL;
+ goto err;
+ }
+
+ rc = mctp_register_netdev(ndev, &mctp_i2c_mctp_ops);
+ if (rc < 0) {
+ dev_err(&mcli->client->dev,
+ "register netdev \"%s\" failed %d\n",
+ ndev->name, rc);
+ goto err;
+ }
+
+ spin_lock_irqsave(&midev->lock, flags);
+ midev->allow_rx = false;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ return 0;
+err:
+ if (midev)
+ mctp_i2c_midev_free(midev);
+ if (ndev)
+ free_netdev(ndev);
+ return rc;
+}
+
+/* Removes any netdev for adap. mcli is the parent root i2c client */
+static void mctp_i2c_remove_netdev(struct mctp_i2c_client *mcli,
+ struct i2c_adapter *adap)
+{
+ struct mctp_i2c_dev *midev = NULL, *m = NULL;
+ unsigned long flags;
+
+ WARN_ON(!mutex_is_locked(&driver_clients_lock));
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ /* List size is limited by number of MCTP netdevs on a single hardware bus */
+ list_for_each_entry(m, &mcli->devs, list)
+ if (m->adapter == adap) {
+ midev = m;
+ break;
+ }
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+
+ if (midev)
+ mctp_i2c_unregister(midev);
+}
+
+/* Determines whether a device is an i2c adapter.
+ * Optionally returns the root i2c_adapter
+ */
+static struct i2c_adapter *mctp_i2c_get_adapter(struct device *dev,
+ struct i2c_adapter **ret_root)
+{
+ struct i2c_adapter *root, *adap;
+
+ if (dev->type != &i2c_adapter_type)
+ return NULL;
+ adap = to_i2c_adapter(dev);
+ root = mux_root_adapter(adap);
+ WARN_ONCE(!root, "MCTP I2C failed to find root adapter for %s\n",
+ dev_name(dev));
+ if (!root)
+ return NULL;
+ if (ret_root)
+ *ret_root = root;
+ return adap;
+}
+
+/* Determines whether a device is an i2c adapter with the "mctp-controller"
+ * devicetree property set. If adap is not an OF node, returns match_no_of
+ */
+static bool mctp_i2c_adapter_match(struct i2c_adapter *adap, bool match_no_of)
+{
+ if (!adap->dev.of_node)
+ return match_no_of;
+ return of_property_read_bool(adap->dev.of_node, MCTP_I2C_OF_PROP);
+}
+
+/* Called for each existing i2c device (adapter or client) when a
+ * new mctp-i2c client is probed.
+ */
+static int mctp_i2c_client_try_attach(struct device *dev, void *data)
+{
+ struct i2c_adapter *adap = NULL, *root = NULL;
+ struct mctp_i2c_client *mcli = data;
+
+ adap = mctp_i2c_get_adapter(dev, &root);
+ if (!adap)
+ return 0;
+ if (mcli->client->adapter != root)
+ return 0;
+ /* Must either have mctp-controller property on the adapter, or
+ * be a root adapter if it's non-devicetree
+ */
+ if (!mctp_i2c_adapter_match(adap, adap == root))
+ return 0;
+
+ return mctp_i2c_add_netdev(mcli, adap);
+}
+
+static void mctp_i2c_notify_add(struct device *dev)
+{
+ struct mctp_i2c_client *mcli = NULL, *m = NULL;
+ struct i2c_adapter *root = NULL, *adap = NULL;
+ int rc;
+
+ adap = mctp_i2c_get_adapter(dev, &root);
+ if (!adap)
+ return;
+ /* Check for mctp-controller property on the adapter */
+ if (!mctp_i2c_adapter_match(adap, false))
+ return;
+
+ /* Find an existing mcli for adap's root */
+ mutex_lock(&driver_clients_lock);
+ list_for_each_entry(m, &driver_clients, list) {
+ if (m->client->adapter == root) {
+ mcli = m;
+ break;
+ }
+ }
+
+ if (mcli) {
+ rc = mctp_i2c_add_netdev(mcli, adap);
+ if (rc < 0)
+ dev_warn(dev, "Failed adding mctp-i2c net device\n");
+ }
+ mutex_unlock(&driver_clients_lock);
+}
+
+static void mctp_i2c_notify_del(struct device *dev)
+{
+ struct i2c_adapter *root = NULL, *adap = NULL;
+ struct mctp_i2c_client *mcli = NULL;
+
+ adap = mctp_i2c_get_adapter(dev, &root);
+ if (!adap)
+ return;
+
+ mutex_lock(&driver_clients_lock);
+ list_for_each_entry(mcli, &driver_clients, list) {
+ if (mcli->client->adapter == root) {
+ mctp_i2c_remove_netdev(mcli, adap);
+ break;
+ }
+ }
+ mutex_unlock(&driver_clients_lock);
+}
+
+static int mctp_i2c_probe(struct i2c_client *client)
+{
+ struct mctp_i2c_client *mcli = NULL;
+ int rc;
+
+ mutex_lock(&driver_clients_lock);
+ mcli = mctp_i2c_new_client(client);
+ if (IS_ERR(mcli)) {
+ rc = PTR_ERR(mcli);
+ mcli = NULL;
+ goto out;
+ } else {
+ list_add(&mcli->list, &driver_clients);
+ }
+
+ /* Add a netdev for adapters that have a 'mctp-controller' property */
+ i2c_for_each_dev(mcli, mctp_i2c_client_try_attach);
+ rc = 0;
+out:
+ mutex_unlock(&driver_clients_lock);
+ return rc;
+}
+
+static int mctp_i2c_remove(struct i2c_client *client)
+{
+ struct mctp_i2c_client *mcli = i2c_get_clientdata(client);
+ struct mctp_i2c_dev *midev = NULL, *tmp = NULL;
+
+ mutex_lock(&driver_clients_lock);
+ list_del(&mcli->list);
+ /* Remove all child adapter netdevs */
+ list_for_each_entry_safe(midev, tmp, &mcli->devs, list)
+ mctp_i2c_unregister(midev);
+
+ mctp_i2c_free_client(mcli);
+ mutex_unlock(&driver_clients_lock);
+ /* Callers ignore return code */
+ return 0;
+}
+
+/* We look for a 'mctp-controller' property on I2C busses as they are
+ * added/deleted, creating/removing netdevs as required.
+ */
+static int mctp_i2c_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ mctp_i2c_notify_add(dev);
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ mctp_i2c_notify_del(dev);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mctp_i2c_notifier = {
+ .notifier_call = mctp_i2c_notifier_call,
+};
+
+static const struct i2c_device_id mctp_i2c_id[] = {
+ { "mctp-i2c-interface", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, mctp_i2c_id);
+
+static const struct of_device_id mctp_i2c_of_match[] = {
+ { .compatible = "mctp-i2c-controller" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mctp_i2c_of_match);
+
+static struct i2c_driver mctp_i2c_driver = {
+ .driver = {
+ .name = "mctp-i2c-interface",
+ .of_match_table = mctp_i2c_of_match,
+ },
+ .probe_new = mctp_i2c_probe,
+ .remove = mctp_i2c_remove,
+ .id_table = mctp_i2c_id,
+};
+
+static __init int mctp_i2c_mod_init(void)
+{
+ int rc;
+
+ pr_info("MCTP I2C interface driver\n");
+ rc = i2c_add_driver(&mctp_i2c_driver);
+ if (rc < 0)
+ return rc;
+ rc = bus_register_notifier(&i2c_bus_type, &mctp_i2c_notifier);
+ if (rc < 0) {
+ i2c_del_driver(&mctp_i2c_driver);
+ return rc;
+ }
+ return 0;
+}
+
+static __exit void mctp_i2c_mod_exit(void)
+{
+ int rc;
+
+ rc = bus_unregister_notifier(&i2c_bus_type, &mctp_i2c_notifier);
+ if (rc < 0)
+ pr_warn("MCTP I2C could not unregister notifier, %d\n", rc);
+ i2c_del_driver(&mctp_i2c_driver);
+}
+
+module_init(mctp_i2c_mod_init);
+module_exit(mctp_i2c_mod_exit);
+
+MODULE_DESCRIPTION("MCTP I2C device");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Matt Johnston <matt@codeconstruct.com.au>");
diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
index 62723a7faa2d..7cd103fd34ef 100644
--- a/drivers/net/mctp/mctp-serial.c
+++ b/drivers/net/mctp/mctp-serial.c
@@ -286,7 +286,7 @@ static void mctp_serial_rx(struct mctp_serial *dev)
cb = __mctp_cb(skb);
cb->halen = 0;
- netif_rx_ni(skb);
+ netif_rx(skb);
dev->netdev->stats.rx_packets++;
dev->netdev->stats.rx_bytes += dev->rxlen;
}
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 7d2abaf2b2c9..c483ba67c21f 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -15,6 +15,7 @@
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#define MSCC_MIIM_REG_STATUS 0x0
@@ -36,11 +37,19 @@
#define PHY_CFG_PHY_RESET (BIT(5) | BIT(6) | BIT(7) | BIT(8))
#define MSCC_PHY_REG_PHY_STATUS 0x4
+#define LAN966X_CUPHY_COMMON_CFG 0x0
+#define CUPHY_COMMON_CFG_RESET_N BIT(0)
+
+struct mscc_miim_info {
+ unsigned int phy_reset_offset;
+ unsigned int phy_reset_bits;
+};
+
struct mscc_miim_dev {
struct regmap *regs;
int mii_status_offset;
struct regmap *phy_regs;
- int phy_reset_offset;
+ const struct mscc_miim_info *info;
};
/* When high resolution timers aren't built-in: we can't use usleep_range() as
@@ -157,27 +166,29 @@ out:
static int mscc_miim_reset(struct mii_bus *bus)
{
struct mscc_miim_dev *miim = bus->priv;
- int offset = miim->phy_reset_offset;
+ unsigned int offset, bits;
int ret;
- if (miim->phy_regs) {
- ret = regmap_write(miim->phy_regs,
- MSCC_PHY_REG_PHY_CFG + offset, 0);
- if (ret < 0) {
- WARN_ONCE(1, "mscc reset set error %d\n", ret);
- return ret;
- }
+ if (!miim->phy_regs)
+ return 0;
- ret = regmap_write(miim->phy_regs,
- MSCC_PHY_REG_PHY_CFG + offset, 0x1ff);
- if (ret < 0) {
- WARN_ONCE(1, "mscc reset clear error %d\n", ret);
- return ret;
- }
+ offset = miim->info->phy_reset_offset;
+ bits = miim->info->phy_reset_bits;
+
+ ret = regmap_update_bits(miim->phy_regs, offset, bits, 0);
+ if (ret < 0) {
+ WARN_ONCE(1, "mscc reset set error %d\n", ret);
+ return ret;
+ }
- mdelay(500);
+ ret = regmap_update_bits(miim->phy_regs, offset, bits, bits);
+ if (ret < 0) {
+ WARN_ONCE(1, "mscc reset clear error %d\n", ret);
+ return ret;
}
+ mdelay(500);
+
return 0;
}
@@ -187,6 +198,13 @@ static const struct regmap_config mscc_miim_regmap_config = {
.reg_stride = 4,
};
+static const struct regmap_config mscc_miim_phy_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .name = "phy",
+};
+
int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name,
struct regmap *mii_regmap, int status_offset)
{
@@ -250,7 +268,7 @@ static int mscc_miim_probe(struct platform_device *pdev)
}
phy_regmap = devm_regmap_init_mmio(&pdev->dev, phy_regs,
- &mscc_miim_regmap_config);
+ &mscc_miim_phy_regmap_config);
if (IS_ERR(phy_regmap)) {
dev_err(&pdev->dev, "Unable to create phy register regmap\n");
return PTR_ERR(phy_regmap);
@@ -265,7 +283,10 @@ static int mscc_miim_probe(struct platform_device *pdev)
miim = bus->priv;
miim->phy_regs = phy_regmap;
- miim->phy_reset_offset = 0;
+
+ miim->info = device_get_match_data(&pdev->dev);
+ if (!miim->info)
+ return -EINVAL;
ret = of_mdiobus_register(bus, pdev->dev.of_node);
if (ret < 0) {
@@ -287,8 +308,25 @@ static int mscc_miim_remove(struct platform_device *pdev)
return 0;
}
+static const struct mscc_miim_info mscc_ocelot_miim_info = {
+ .phy_reset_offset = MSCC_PHY_REG_PHY_CFG,
+ .phy_reset_bits = PHY_CFG_PHY_ENA | PHY_CFG_PHY_COMMON_RESET |
+ PHY_CFG_PHY_RESET,
+};
+
+static const struct mscc_miim_info microchip_lan966x_miim_info = {
+ .phy_reset_offset = LAN966X_CUPHY_COMMON_CFG,
+ .phy_reset_bits = CUPHY_COMMON_CFG_RESET_N,
+};
+
static const struct of_device_id mscc_miim_match[] = {
- { .compatible = "mscc,ocelot-miim" },
+ {
+ .compatible = "mscc,ocelot-miim",
+ .data = &mscc_ocelot_miim_info
+ }, {
+ .compatible = "microchip,lan966x-miim",
+ .data = &microchip_lan966x_miim_info
+ },
{ }
};
MODULE_DEVICE_TABLE(of, mscc_miim_match);
diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c
index ebd001f0eece..a881e3523328 100644
--- a/drivers/net/mdio/mdio-mux.c
+++ b/drivers/net/mdio/mdio-mux.c
@@ -168,8 +168,8 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus->priv = cb;
cb->mii_bus->name = "mdio_mux";
- snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x",
- pb->parent_id, v);
+ snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x.%x",
+ cb->mii_bus->name, pb->parent_id, v);
cb->mii_bus->parent = dev;
cb->mii_bus->read = mdio_mux_read;
cb->mii_bus->write = mdio_mux_write;
diff --git a/drivers/net/mdio/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c
index 7ab4e26db08c..7aafc221b5cf 100644
--- a/drivers/net/mdio/mdio-xgene.c
+++ b/drivers/net/mdio/mdio-xgene.c
@@ -285,7 +285,8 @@ static acpi_status acpi_register_phy(acpi_handle handle, u32 lvl,
const union acpi_object *obj;
u32 phy_addr;
- if (acpi_bus_get_device(handle, &adev))
+ adev = acpi_fetch_acpi_dev(handle);
+ if (!adev)
return AE_OK;
if (acpi_dev_get_property(adev, "phy-channel", ACPI_TYPE_INTEGER, &obj))
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
index aaa628f859fd..0b1b6f650104 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi_net.c
@@ -225,7 +225,7 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
u64_stats_inc(&mhi_netdev->stats.rx_packets);
u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
- netif_rx(skb);
+ __netif_rx(skb);
}
/* Refill if RX buffers queue becomes low */
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 86ec5aae4289..21a0435c02de 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -89,7 +89,7 @@ static int net_failover_close(struct net_device *dev)
static netdev_tx_t net_failover_drop_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index a1cbfa44a1e1..5735e5b1a2cb 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -3,7 +3,7 @@
obj-$(CONFIG_NETDEVSIM) += netdevsim.o
netdevsim-objs := \
- netdev.o dev.o ethtool.o fib.o bus.o health.o udp_tunnels.o
+ netdev.o dev.o ethtool.o fib.o bus.o health.o hwstats.o udp_tunnels.o
ifeq ($(CONFIG_BPF_SYSCALL),y)
netdevsim-objs += \
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 08d7b465a0de..57a3ac893792 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -59,7 +59,7 @@ static struct dentry *nsim_dev_ddir;
unsigned int nsim_dev_get_vfs(struct nsim_dev *nsim_dev)
{
WARN_ON(!lockdep_rtnl_is_held() &&
- !lockdep_is_held(&nsim_dev->vfs_lock));
+ !devl_lock_is_held(priv_to_devlink(nsim_dev)));
return nsim_dev->nsim_bus_dev->num_vfs;
}
@@ -275,7 +275,7 @@ static ssize_t nsim_bus_dev_max_vfs_write(struct file *file,
return -ENOMEM;
nsim_dev = file->private_data;
- mutex_lock(&nsim_dev->vfs_lock);
+ devl_lock(priv_to_devlink(nsim_dev));
/* Reject if VFs are configured */
if (nsim_dev_get_vfs(nsim_dev)) {
ret = -EBUSY;
@@ -285,7 +285,7 @@ static ssize_t nsim_bus_dev_max_vfs_write(struct file *file,
*ppos += count;
ret = count;
}
- mutex_unlock(&nsim_dev->vfs_lock);
+ devl_unlock(priv_to_devlink(nsim_dev));
kfree(vfconfigs);
return ret;
@@ -339,6 +339,7 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
debugfs_create_bool("fail_trap_policer_counter_get", 0600,
nsim_dev->ddir,
&nsim_dev->fail_trap_policer_counter_get);
+ /* caution, dev_max_vfs write takes devlink lock */
debugfs_create_file("max_vfs", 0600, nsim_dev->ddir,
nsim_dev, &nsim_dev_max_vfs_fops);
@@ -567,6 +568,9 @@ static void nsim_dev_dummy_region_exit(struct nsim_dev *nsim_dev)
devlink_region_destroy(nsim_dev->dummy_region);
}
+static int
+__nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type,
+ unsigned int port_index);
static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port);
static int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev,
@@ -575,12 +579,10 @@ static int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev,
struct devlink *devlink = priv_to_devlink(nsim_dev);
struct nsim_dev_port *nsim_dev_port, *tmp;
- devlink_rate_nodes_destroy(devlink);
- mutex_lock(&nsim_dev->port_list_lock);
+ devl_rate_nodes_destroy(devlink);
list_for_each_entry_safe(nsim_dev_port, tmp, &nsim_dev->port_list, list)
if (nsim_dev_port_is_vf(nsim_dev_port))
__nsim_dev_port_del(nsim_dev_port);
- mutex_unlock(&nsim_dev->port_list_lock);
nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
return 0;
}
@@ -588,11 +590,11 @@ static int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev,
static int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev,
struct netlink_ext_ack *extack)
{
- struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
+ struct nsim_dev_port *nsim_dev_port, *tmp;
int i, err;
for (i = 0; i < nsim_dev_get_vfs(nsim_dev); i++) {
- err = nsim_drv_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i);
+ err = __nsim_dev_port_add(nsim_dev, NSIM_DEV_PORT_TYPE_VF, i);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to initialize VFs' netdevsim ports");
pr_err("Failed to initialize VF id=%d. %d.\n", i, err);
@@ -603,8 +605,9 @@ static int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev,
return 0;
err_port_add_vfs:
- for (i--; i >= 0; i--)
- nsim_drv_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i);
+ list_for_each_entry_safe(nsim_dev_port, tmp, &nsim_dev->port_list, list)
+ if (nsim_dev_port_is_vf(nsim_dev_port))
+ __nsim_dev_port_del(nsim_dev_port);
return err;
}
@@ -612,22 +615,16 @@ static int nsim_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
- int err = 0;
- mutex_lock(&nsim_dev->vfs_lock);
if (mode == nsim_dev->esw_mode)
- goto unlock;
+ return 0;
if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
- err = nsim_esw_legacy_enable(nsim_dev, extack);
- else if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
- err = nsim_esw_switchdev_enable(nsim_dev, extack);
- else
- err = -EINVAL;
+ return nsim_esw_legacy_enable(nsim_dev, extack);
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return nsim_esw_switchdev_enable(nsim_dev, extack);
-unlock:
- mutex_unlock(&nsim_dev->vfs_lock);
- return err;
+ return -EINVAL;
}
static int nsim_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
@@ -835,14 +832,14 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
/* For each running port and enabled packet trap, generate a UDP
* packet with a random 5-tuple and report it.
*/
- mutex_lock(&nsim_dev->port_list_lock);
+ devl_lock(priv_to_devlink(nsim_dev));
list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) {
if (!netif_running(nsim_dev_port->ns->netdev))
continue;
nsim_dev_trap_report(nsim_dev_port);
}
- mutex_unlock(&nsim_dev->port_list_lock);
+ devl_unlock(priv_to_devlink(nsim_dev));
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
@@ -924,6 +921,7 @@ static void nsim_dev_traps_exit(struct devlink *devlink)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ /* caution, trap work takes devlink lock */
cancel_delayed_work_sync(&nsim_dev->trap_data->trap_report_dw);
devlink_traps_unregister(devlink, nsim_traps_arr,
ARRAY_SIZE(nsim_traps_arr));
@@ -1380,8 +1378,8 @@ static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_typ
memcpy(attrs.switch_id.id, nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
attrs.switch_id.id_len = nsim_dev->switch_id.id_len;
devlink_port_attrs_set(devlink_port, &attrs);
- err = devlink_port_register(priv_to_devlink(nsim_dev), devlink_port,
- nsim_dev_port->port_index);
+ err = devl_port_register(priv_to_devlink(nsim_dev), devlink_port,
+ nsim_dev_port->port_index);
if (err)
goto err_port_free;
@@ -1396,8 +1394,8 @@ static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_typ
}
if (nsim_dev_port_is_vf(nsim_dev_port)) {
- err = devlink_rate_leaf_create(&nsim_dev_port->devlink_port,
- nsim_dev_port);
+ err = devl_rate_leaf_create(&nsim_dev_port->devlink_port,
+ nsim_dev_port);
if (err)
goto err_nsim_destroy;
}
@@ -1412,7 +1410,7 @@ err_nsim_destroy:
err_port_debugfs_exit:
nsim_dev_port_debugfs_exit(nsim_dev_port);
err_dl_port_unregister:
- devlink_port_unregister(devlink_port);
+ devl_port_unregister(devlink_port);
err_port_free:
kfree(nsim_dev_port);
return err;
@@ -1424,11 +1422,11 @@ static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port)
list_del(&nsim_dev_port->list);
if (nsim_dev_port_is_vf(nsim_dev_port))
- devlink_rate_leaf_destroy(&nsim_dev_port->devlink_port);
+ devl_rate_leaf_destroy(&nsim_dev_port->devlink_port);
devlink_port_type_clear(devlink_port);
nsim_destroy(nsim_dev_port->ns);
nsim_dev_port_debugfs_exit(nsim_dev_port);
- devlink_port_unregister(devlink_port);
+ devl_port_unregister(devlink_port);
kfree(nsim_dev_port);
}
@@ -1436,11 +1434,11 @@ static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev)
{
struct nsim_dev_port *nsim_dev_port, *tmp;
- mutex_lock(&nsim_dev->port_list_lock);
+ devl_lock(priv_to_devlink(nsim_dev));
list_for_each_entry_safe(nsim_dev_port, tmp,
&nsim_dev->port_list, list)
__nsim_dev_port_del(nsim_dev_port);
- mutex_unlock(&nsim_dev->port_list_lock);
+ devl_unlock(priv_to_devlink(nsim_dev));
}
static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
@@ -1449,7 +1447,9 @@ static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
int i, err;
for (i = 0; i < port_count; i++) {
+ devl_lock(priv_to_devlink(nsim_dev));
err = __nsim_dev_port_add(nsim_dev, NSIM_DEV_PORT_TYPE_PF, i);
+ devl_unlock(priv_to_devlink(nsim_dev));
if (err)
goto err_port_del_all;
}
@@ -1470,7 +1470,6 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
devlink = priv_to_devlink(nsim_dev);
nsim_dev = devlink_priv(devlink);
INIT_LIST_HEAD(&nsim_dev->port_list);
- mutex_init(&nsim_dev->port_list_lock);
nsim_dev->fw_update_status = true;
nsim_dev->fw_update_overwrite_mask = 0;
@@ -1498,10 +1497,14 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
if (err)
goto err_health_exit;
- err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ err = nsim_dev_hwstats_init(nsim_dev);
if (err)
goto err_psample_exit;
+ err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ if (err)
+ goto err_hwstats_exit;
+
nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
0200,
nsim_dev->ddir,
@@ -1509,6 +1512,8 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
&nsim_dev_take_snapshot_fops);
return 0;
+err_hwstats_exit:
+ nsim_dev_hwstats_exit(nsim_dev);
err_psample_exit:
nsim_dev_psample_exit(nsim_dev);
err_health_exit:
@@ -1537,8 +1542,6 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id);
get_random_bytes(nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
INIT_LIST_HEAD(&nsim_dev->port_list);
- mutex_init(&nsim_dev->vfs_lock);
- mutex_init(&nsim_dev->port_list_lock);
nsim_dev->fw_update_status = true;
nsim_dev->fw_update_overwrite_mask = 0;
nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
@@ -1595,15 +1598,21 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
if (err)
goto err_bpf_dev_exit;
- err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ err = nsim_dev_hwstats_init(nsim_dev);
if (err)
goto err_psample_exit;
+ err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ if (err)
+ goto err_hwstats_exit;
+
nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
devlink_set_features(devlink, DEVLINK_F_RELOAD);
devlink_register(devlink);
return 0;
+err_hwstats_exit:
+ nsim_dev_hwstats_exit(nsim_dev);
err_psample_exit:
nsim_dev_psample_exit(nsim_dev);
err_bpf_dev_exit:
@@ -1639,21 +1648,21 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
return;
debugfs_remove(nsim_dev->take_snapshot);
- mutex_lock(&nsim_dev->vfs_lock);
+ devl_lock(devlink);
if (nsim_dev_get_vfs(nsim_dev)) {
nsim_bus_dev_set_vfs(nsim_dev->nsim_bus_dev, 0);
if (nsim_esw_mode_is_switchdev(nsim_dev))
nsim_esw_legacy_enable(nsim_dev, NULL);
}
- mutex_unlock(&nsim_dev->vfs_lock);
+ devl_unlock(devlink);
nsim_dev_port_del_all(nsim_dev);
+ nsim_dev_hwstats_exit(nsim_dev);
nsim_dev_psample_exit(nsim_dev);
nsim_dev_health_exit(nsim_dev);
nsim_fib_destroy(devlink, nsim_dev->fib_data);
nsim_dev_traps_exit(devlink);
nsim_dev_dummy_region_exit(nsim_dev);
- mutex_destroy(&nsim_dev->port_list_lock);
}
void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev)
@@ -1693,12 +1702,12 @@ int nsim_drv_port_add(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
int err;
- mutex_lock(&nsim_dev->port_list_lock);
+ devl_lock(priv_to_devlink(nsim_dev));
if (__nsim_dev_port_lookup(nsim_dev, type, port_index))
err = -EEXIST;
else
err = __nsim_dev_port_add(nsim_dev, type, port_index);
- mutex_unlock(&nsim_dev->port_list_lock);
+ devl_unlock(priv_to_devlink(nsim_dev));
return err;
}
@@ -1709,13 +1718,13 @@ int nsim_drv_port_del(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type
struct nsim_dev_port *nsim_dev_port;
int err = 0;
- mutex_lock(&nsim_dev->port_list_lock);
+ devl_lock(priv_to_devlink(nsim_dev));
nsim_dev_port = __nsim_dev_port_lookup(nsim_dev, type, port_index);
if (!nsim_dev_port)
err = -ENOENT;
else
__nsim_dev_port_del(nsim_dev_port);
- mutex_unlock(&nsim_dev->port_list_lock);
+ devl_unlock(priv_to_devlink(nsim_dev));
return err;
}
@@ -1723,9 +1732,10 @@ int nsim_drv_configure_vfs(struct nsim_bus_dev *nsim_bus_dev,
unsigned int num_vfs)
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+ struct devlink *devlink = priv_to_devlink(nsim_dev);
int ret = 0;
- mutex_lock(&nsim_dev->vfs_lock);
+ devl_lock(devlink);
if (nsim_bus_dev->num_vfs == num_vfs)
goto exit_unlock;
if (nsim_bus_dev->num_vfs && num_vfs) {
@@ -1751,7 +1761,7 @@ int nsim_drv_configure_vfs(struct nsim_bus_dev *nsim_bus_dev,
}
exit_unlock:
- mutex_unlock(&nsim_dev->vfs_lock);
+ devl_unlock(devlink);
return ret;
}
diff --git a/drivers/net/netdevsim/hwstats.c b/drivers/net/netdevsim/hwstats.c
new file mode 100644
index 000000000000..605a38e16db0
--- /dev/null
+++ b/drivers/net/netdevsim/hwstats.c
@@ -0,0 +1,486 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/debugfs.h>
+
+#include "netdevsim.h"
+
+#define NSIM_DEV_HWSTATS_TRAFFIC_MS 100
+
+static struct list_head *
+nsim_dev_hwstats_get_list_head(struct nsim_dev_hwstats *hwstats,
+ enum netdev_offload_xstats_type type)
+{
+ switch (type) {
+ case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
+ return &hwstats->l3_list;
+ }
+
+ WARN_ON_ONCE(1);
+ return NULL;
+}
+
+static void nsim_dev_hwstats_traffic_bump(struct nsim_dev_hwstats *hwstats,
+ enum netdev_offload_xstats_type type)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ struct list_head *hwsdev_list;
+
+ hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, type);
+ if (WARN_ON(!hwsdev_list))
+ return;
+
+ list_for_each_entry(hwsdev, hwsdev_list, list) {
+ if (hwsdev->enabled) {
+ hwsdev->stats.rx_packets += 1;
+ hwsdev->stats.tx_packets += 2;
+ hwsdev->stats.rx_bytes += 100;
+ hwsdev->stats.tx_bytes += 300;
+ }
+ }
+}
+
+static void nsim_dev_hwstats_traffic_work(struct work_struct *work)
+{
+ struct nsim_dev_hwstats *hwstats;
+
+ hwstats = container_of(work, struct nsim_dev_hwstats, traffic_dw.work);
+ mutex_lock(&hwstats->hwsdev_list_lock);
+ nsim_dev_hwstats_traffic_bump(hwstats, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+
+ schedule_delayed_work(&hwstats->traffic_dw,
+ msecs_to_jiffies(NSIM_DEV_HWSTATS_TRAFFIC_MS));
+}
+
+static struct nsim_dev_hwstats_netdev *
+nsim_dev_hwslist_find_hwsdev(struct list_head *hwsdev_list,
+ int ifindex)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+
+ list_for_each_entry(hwsdev, hwsdev_list, list) {
+ if (hwsdev->netdev->ifindex == ifindex)
+ return hwsdev;
+ }
+
+ return NULL;
+}
+
+static int nsim_dev_hwsdev_enable(struct nsim_dev_hwstats_netdev *hwsdev,
+ struct netlink_ext_ack *extack)
+{
+ if (hwsdev->fail_enable) {
+ hwsdev->fail_enable = false;
+ NL_SET_ERR_MSG_MOD(extack, "Stats enablement set to fail");
+ return -ECANCELED;
+ }
+
+ hwsdev->enabled = true;
+ return 0;
+}
+
+static void nsim_dev_hwsdev_disable(struct nsim_dev_hwstats_netdev *hwsdev)
+{
+ hwsdev->enabled = false;
+ memset(&hwsdev->stats, 0, sizeof(hwsdev->stats));
+}
+
+static int
+nsim_dev_hwsdev_report_delta(struct nsim_dev_hwstats_netdev *hwsdev,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ netdev_offload_xstats_report_delta(info->report_delta, &hwsdev->stats);
+ memset(&hwsdev->stats, 0, sizeof(hwsdev->stats));
+ return 0;
+}
+
+static void
+nsim_dev_hwsdev_report_used(struct nsim_dev_hwstats_netdev *hwsdev,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ if (hwsdev->enabled)
+ netdev_offload_xstats_report_used(info->report_used);
+}
+
+static int nsim_dev_hwstats_event_off_xstats(struct nsim_dev_hwstats *hwstats,
+ struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_offload_xstats_info *info;
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ struct list_head *hwsdev_list;
+ int err = 0;
+
+ info = ptr;
+ hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, info->type);
+ if (!hwsdev_list)
+ return 0;
+
+ mutex_lock(&hwstats->hwsdev_list_lock);
+
+ hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, dev->ifindex);
+ if (!hwsdev)
+ goto out;
+
+ switch (event) {
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ err = nsim_dev_hwsdev_enable(hwsdev, info->info.extack);
+ break;
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ nsim_dev_hwsdev_disable(hwsdev);
+ break;
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ nsim_dev_hwsdev_report_used(hwsdev, info);
+ break;
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ err = nsim_dev_hwsdev_report_delta(hwsdev, info);
+ break;
+ }
+
+out:
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+ return err;
+}
+
+static void nsim_dev_hwsdev_fini(struct nsim_dev_hwstats_netdev *hwsdev)
+{
+ dev_put(hwsdev->netdev);
+ kfree(hwsdev);
+}
+
+static void
+__nsim_dev_hwstats_event_unregister(struct nsim_dev_hwstats *hwstats,
+ struct net_device *dev,
+ enum netdev_offload_xstats_type type)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ struct list_head *hwsdev_list;
+
+ hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, type);
+ if (WARN_ON(!hwsdev_list))
+ return;
+
+ hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, dev->ifindex);
+ if (!hwsdev)
+ return;
+
+ list_del(&hwsdev->list);
+ nsim_dev_hwsdev_fini(hwsdev);
+}
+
+static void nsim_dev_hwstats_event_unregister(struct nsim_dev_hwstats *hwstats,
+ struct net_device *dev)
+{
+ mutex_lock(&hwstats->hwsdev_list_lock);
+ __nsim_dev_hwstats_event_unregister(hwstats, dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+}
+
+static int nsim_dev_hwstats_event(struct nsim_dev_hwstats *hwstats,
+ struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ switch (event) {
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ return nsim_dev_hwstats_event_off_xstats(hwstats, dev,
+ event, ptr);
+ case NETDEV_UNREGISTER:
+ nsim_dev_hwstats_event_unregister(hwstats, dev);
+ break;
+ }
+
+ return 0;
+}
+
+static int nsim_dev_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct nsim_dev_hwstats *hwstats;
+ int err = 0;
+
+ hwstats = container_of(nb, struct nsim_dev_hwstats, netdevice_nb);
+ err = nsim_dev_hwstats_event(hwstats, dev, event, ptr);
+ if (err)
+ return notifier_from_errno(err);
+
+ return NOTIFY_OK;
+}
+
+static int
+nsim_dev_hwstats_enable_ifindex(struct nsim_dev_hwstats *hwstats,
+ int ifindex,
+ enum netdev_offload_xstats_type type,
+ struct list_head *hwsdev_list)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ struct nsim_dev *nsim_dev;
+ struct net_device *netdev;
+ bool notify = false;
+ struct net *net;
+ int err = 0;
+
+ nsim_dev = container_of(hwstats, struct nsim_dev, hwstats);
+ net = nsim_dev_net(nsim_dev);
+
+ rtnl_lock();
+ mutex_lock(&hwstats->hwsdev_list_lock);
+ hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, ifindex);
+ if (hwsdev)
+ goto out_unlock_list;
+
+ netdev = dev_get_by_index(net, ifindex);
+ if (!netdev) {
+ err = -ENODEV;
+ goto out_unlock_list;
+ }
+
+ hwsdev = kzalloc(sizeof(*hwsdev), GFP_KERNEL);
+ if (!hwsdev) {
+ err = -ENOMEM;
+ goto out_put_netdev;
+ }
+
+ hwsdev->netdev = netdev;
+ list_add_tail(&hwsdev->list, hwsdev_list);
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+
+ if (netdev_offload_xstats_enabled(netdev, type)) {
+ nsim_dev_hwsdev_enable(hwsdev, NULL);
+ notify = true;
+ }
+
+ if (notify)
+ rtnl_offload_xstats_notify(netdev);
+ rtnl_unlock();
+ return err;
+
+out_put_netdev:
+ dev_put(netdev);
+out_unlock_list:
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+ rtnl_unlock();
+ return err;
+}
+
+static int
+nsim_dev_hwstats_disable_ifindex(struct nsim_dev_hwstats *hwstats,
+ int ifindex,
+ enum netdev_offload_xstats_type type,
+ struct list_head *hwsdev_list)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ int err = 0;
+
+ rtnl_lock();
+ mutex_lock(&hwstats->hwsdev_list_lock);
+ hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, ifindex);
+ if (hwsdev)
+ list_del(&hwsdev->list);
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+
+ if (!hwsdev) {
+ err = -ENOENT;
+ goto unlock_out;
+ }
+
+ if (netdev_offload_xstats_enabled(hwsdev->netdev, type)) {
+ netdev_offload_xstats_push_delta(hwsdev->netdev, type,
+ &hwsdev->stats);
+ rtnl_offload_xstats_notify(hwsdev->netdev);
+ }
+ nsim_dev_hwsdev_fini(hwsdev);
+
+unlock_out:
+ rtnl_unlock();
+ return err;
+}
+
+static int
+nsim_dev_hwstats_fail_ifindex(struct nsim_dev_hwstats *hwstats,
+ int ifindex,
+ enum netdev_offload_xstats_type type,
+ struct list_head *hwsdev_list)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ int err = 0;
+
+ mutex_lock(&hwstats->hwsdev_list_lock);
+
+ hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, ifindex);
+ if (!hwsdev) {
+ err = -ENOENT;
+ goto err_hwsdev_list_unlock;
+ }
+
+ hwsdev->fail_enable = true;
+
+err_hwsdev_list_unlock:
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+ return err;
+}
+
+enum nsim_dev_hwstats_do {
+ NSIM_DEV_HWSTATS_DO_DISABLE,
+ NSIM_DEV_HWSTATS_DO_ENABLE,
+ NSIM_DEV_HWSTATS_DO_FAIL,
+};
+
+struct nsim_dev_hwstats_fops {
+ const struct file_operations fops;
+ enum nsim_dev_hwstats_do action;
+ enum netdev_offload_xstats_type type;
+};
+
+static ssize_t
+nsim_dev_hwstats_do_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev_hwstats *hwstats = file->private_data;
+ struct nsim_dev_hwstats_fops *hwsfops;
+ struct list_head *hwsdev_list;
+ int ifindex;
+ int err;
+
+ hwsfops = container_of(debugfs_real_fops(file),
+ struct nsim_dev_hwstats_fops, fops);
+
+ err = kstrtoint_from_user(data, count, 0, &ifindex);
+ if (err)
+ return err;
+
+ hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, hwsfops->type);
+ if (WARN_ON(!hwsdev_list))
+ return -EINVAL;
+
+ switch (hwsfops->action) {
+ case NSIM_DEV_HWSTATS_DO_DISABLE:
+ err = nsim_dev_hwstats_disable_ifindex(hwstats, ifindex,
+ hwsfops->type,
+ hwsdev_list);
+ break;
+ case NSIM_DEV_HWSTATS_DO_ENABLE:
+ err = nsim_dev_hwstats_enable_ifindex(hwstats, ifindex,
+ hwsfops->type,
+ hwsdev_list);
+ break;
+ case NSIM_DEV_HWSTATS_DO_FAIL:
+ err = nsim_dev_hwstats_fail_ifindex(hwstats, ifindex,
+ hwsfops->type,
+ hwsdev_list);
+ break;
+ }
+ if (err)
+ return err;
+
+ return count;
+}
+
+#define NSIM_DEV_HWSTATS_FOPS(ACTION, TYPE) \
+ { \
+ .fops = { \
+ .open = simple_open, \
+ .write = nsim_dev_hwstats_do_write, \
+ .llseek = generic_file_llseek, \
+ .owner = THIS_MODULE, \
+ }, \
+ .action = ACTION, \
+ .type = TYPE, \
+ }
+
+static const struct nsim_dev_hwstats_fops nsim_dev_hwstats_l3_disable_fops =
+ NSIM_DEV_HWSTATS_FOPS(NSIM_DEV_HWSTATS_DO_DISABLE,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+
+static const struct nsim_dev_hwstats_fops nsim_dev_hwstats_l3_enable_fops =
+ NSIM_DEV_HWSTATS_FOPS(NSIM_DEV_HWSTATS_DO_ENABLE,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+
+static const struct nsim_dev_hwstats_fops nsim_dev_hwstats_l3_fail_fops =
+ NSIM_DEV_HWSTATS_FOPS(NSIM_DEV_HWSTATS_DO_FAIL,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+
+#undef NSIM_DEV_HWSTATS_FOPS
+
+int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev)
+{
+ struct nsim_dev_hwstats *hwstats = &nsim_dev->hwstats;
+ struct net *net = nsim_dev_net(nsim_dev);
+ int err;
+
+ mutex_init(&hwstats->hwsdev_list_lock);
+ INIT_LIST_HEAD(&hwstats->l3_list);
+
+ hwstats->netdevice_nb.notifier_call = nsim_dev_netdevice_event;
+ err = register_netdevice_notifier_net(net, &hwstats->netdevice_nb);
+ if (err)
+ goto err_mutex_destroy;
+
+ hwstats->ddir = debugfs_create_dir("hwstats", nsim_dev->ddir);
+ if (IS_ERR(hwstats->ddir)) {
+ err = PTR_ERR(hwstats->ddir);
+ goto err_unregister_notifier;
+ }
+
+ hwstats->l3_ddir = debugfs_create_dir("l3", hwstats->ddir);
+ if (IS_ERR(hwstats->l3_ddir)) {
+ err = PTR_ERR(hwstats->l3_ddir);
+ goto err_remove_hwstats_recursive;
+ }
+
+ debugfs_create_file("enable_ifindex", 0600, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_enable_fops.fops);
+ debugfs_create_file("disable_ifindex", 0600, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_disable_fops.fops);
+ debugfs_create_file("fail_next_enable", 0600, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_fail_fops.fops);
+
+ INIT_DELAYED_WORK(&hwstats->traffic_dw,
+ &nsim_dev_hwstats_traffic_work);
+ schedule_delayed_work(&hwstats->traffic_dw,
+ msecs_to_jiffies(NSIM_DEV_HWSTATS_TRAFFIC_MS));
+ return 0;
+
+err_remove_hwstats_recursive:
+ debugfs_remove_recursive(hwstats->ddir);
+err_unregister_notifier:
+ unregister_netdevice_notifier_net(net, &hwstats->netdevice_nb);
+err_mutex_destroy:
+ mutex_destroy(&hwstats->hwsdev_list_lock);
+ return err;
+}
+
+static void nsim_dev_hwsdev_list_wipe(struct nsim_dev_hwstats *hwstats,
+ enum netdev_offload_xstats_type type)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev, *tmp;
+ struct list_head *hwsdev_list;
+
+ hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, type);
+ if (WARN_ON(!hwsdev_list))
+ return;
+
+ mutex_lock(&hwstats->hwsdev_list_lock);
+ list_for_each_entry_safe(hwsdev, tmp, hwsdev_list, list) {
+ list_del(&hwsdev->list);
+ nsim_dev_hwsdev_fini(hwsdev);
+ }
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+}
+
+void nsim_dev_hwstats_exit(struct nsim_dev *nsim_dev)
+{
+ struct nsim_dev_hwstats *hwstats = &nsim_dev->hwstats;
+ struct net *net = nsim_dev_net(nsim_dev);
+
+ cancel_delayed_work_sync(&hwstats->traffic_dw);
+ debugfs_remove_recursive(hwstats->ddir);
+ unregister_netdevice_notifier_net(net, &hwstats->netdevice_nb);
+ nsim_dev_hwsdev_list_wipe(hwstats, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+ mutex_destroy(&hwstats->hwsdev_list_lock);
+}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index c49771f27f17..0b122872b2c9 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -184,6 +184,28 @@ struct nsim_dev_health {
int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink);
void nsim_dev_health_exit(struct nsim_dev *nsim_dev);
+struct nsim_dev_hwstats_netdev {
+ struct list_head list;
+ struct net_device *netdev;
+ struct rtnl_hw_stats64 stats;
+ bool enabled;
+ bool fail_enable;
+};
+
+struct nsim_dev_hwstats {
+ struct dentry *ddir;
+ struct dentry *l3_ddir;
+
+ struct mutex hwsdev_list_lock; /* protects hwsdev list(s) */
+ struct list_head l3_list;
+
+ struct notifier_block netdevice_nb;
+ struct delayed_work traffic_dw;
+};
+
+int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev);
+void nsim_dev_hwstats_exit(struct nsim_dev *nsim_dev);
+
#if IS_ENABLED(CONFIG_PSAMPLE)
int nsim_dev_psample_init(struct nsim_dev *nsim_dev);
void nsim_dev_psample_exit(struct nsim_dev *nsim_dev);
@@ -239,7 +261,6 @@ struct nsim_dev {
struct dentry *take_snapshot;
struct dentry *nodes_ddir;
- struct mutex vfs_lock; /* Protects vfconfigs */
struct nsim_vf_config *vfconfigs;
struct bpf_offload_dev *bpf_dev;
@@ -252,7 +273,6 @@ struct nsim_dev {
struct list_head bpf_bound_maps;
struct netdev_phys_item_id switch_id;
struct list_head port_list;
- struct mutex port_list_lock; /* protects port list */
bool fw_update_status;
u32 fw_update_overwrite_mask;
u32 max_macs;
@@ -261,6 +281,7 @@ struct nsim_dev {
bool fail_reload;
struct devlink_region *dummy_region;
struct nsim_dev_health health;
+ struct nsim_dev_hwstats hwstats;
struct flow_action_cookie *fa_cookie;
spinlock_t fa_cookie_lock; /* protects fa_cookie */
bool fail_trap_group_set;
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 98ca6b18415e..80bdc07f2cd3 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -119,7 +119,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
- if (netif_rx(skb) == NET_RX_DROP) {
+ if (__netif_rx(skb) == NET_RX_DROP) {
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
} else {
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index cd6742e6ba8b..61418d4dc0cd 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -632,35 +632,43 @@ static void xpcs_resolve_pma(struct dw_xpcs *xpcs,
}
}
-void xpcs_validate(struct dw_xpcs *xpcs, unsigned long *supported,
- struct phylink_link_state *state)
+static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
+ const struct phylink_link_state *state)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(xpcs_supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(xpcs_supported) = { 0, };
const struct xpcs_compat *compat;
+ struct dw_xpcs *xpcs;
int i;
- /* phylink expects us to report all supported modes with
- * PHY_INTERFACE_MODE_NA, just don't limit the supported and
- * advertising masks and exit.
- */
- if (state->interface == PHY_INTERFACE_MODE_NA)
- return;
-
- linkmode_zero(xpcs_supported);
-
+ xpcs = phylink_pcs_to_xpcs(pcs);
compat = xpcs_find_compat(xpcs->id, state->interface);
- /* Populate the supported link modes for this
- * PHY interface type
+ /* Populate the supported link modes for this PHY interface type.
+ * FIXME: what about the port modes and autoneg bit? This masks
+ * all those away.
*/
if (compat)
for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
set_bit(compat->supported[i], xpcs_supported);
linkmode_and(supported, supported, xpcs_supported);
- linkmode_and(state->advertising, state->advertising, xpcs_supported);
+
+ return 0;
+}
+
+void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
+{
+ int i, j;
+
+ for (i = 0; i < DW_XPCS_INTERFACE_MAX; i++) {
+ const struct xpcs_compat *compat = &xpcs->id->compat[i];
+
+ for (j = 0; j < compat->num_interfaces; j++)
+ if (compat->interface[j] < PHY_INTERFACE_MODE_MAX)
+ __set_bit(compat->interface[j], interfaces);
+ }
}
-EXPORT_SYMBOL_GPL(xpcs_validate);
+EXPORT_SYMBOL_GPL(xpcs_get_interfaces);
int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
{
@@ -1106,6 +1114,7 @@ static const struct xpcs_id xpcs_id_list[] = {
};
static const struct phylink_pcs_ops xpcs_phylink_ops = {
+ .pcs_validate = xpcs_validate,
.pcs_config = xpcs_config,
.pcs_get_state = xpcs_get_state,
.pcs_link_up = xpcs_link_up,
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 902495afcb38..ea7571a2b39b 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -220,6 +220,7 @@ config MEDIATEK_GE_PHY
config MICREL_PHY
tristate "Micrel PHYs"
+ depends on PTP_1588_CLOCK_OPTIONAL
help
Supports the KSZ9021, VSC8201, KS8001 PHYs.
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 968dd43a2b1e..a8db1a19011b 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -533,9 +533,7 @@ static int aqcs109_config_init(struct phy_device *phydev)
* PMA speed ability bits are the same for all members of the family,
* AQCS109 however supports speeds up to 2.5G only.
*/
- ret = phy_set_max_speed(phydev, SPEED_2500);
- if (ret)
- return ret;
+ phy_set_max_speed(phydev, SPEED_2500);
return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
}
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 29aa811af430..73926006d319 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -19,6 +19,8 @@
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/consumer.h>
+#include <linux/phylink.h>
+#include <linux/sfp.h>
#include <dt-bindings/net/qca-ar803x.h>
#define AT803X_SPECIFIC_FUNCTION_CONTROL 0x10
@@ -51,6 +53,8 @@
#define AT803X_INTR_ENABLE_PAGE_RECEIVED BIT(12)
#define AT803X_INTR_ENABLE_LINK_FAIL BIT(11)
#define AT803X_INTR_ENABLE_LINK_SUCCESS BIT(10)
+#define AT803X_INTR_ENABLE_LINK_FAIL_BX BIT(8)
+#define AT803X_INTR_ENABLE_LINK_SUCCESS_BX BIT(7)
#define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE BIT(5)
#define AT803X_INTR_ENABLE_POLARITY_CHANGED BIT(1)
#define AT803X_INTR_ENABLE_WOL BIT(0)
@@ -85,7 +89,17 @@
#define AT803X_DEBUG_DATA 0x1E
#define AT803X_MODE_CFG_MASK 0x0F
-#define AT803X_MODE_CFG_SGMII 0x01
+#define AT803X_MODE_CFG_BASET_RGMII 0x00
+#define AT803X_MODE_CFG_BASET_SGMII 0x01
+#define AT803X_MODE_CFG_BX1000_RGMII_50OHM 0x02
+#define AT803X_MODE_CFG_BX1000_RGMII_75OHM 0x03
+#define AT803X_MODE_CFG_BX1000_CONV_50OHM 0x04
+#define AT803X_MODE_CFG_BX1000_CONV_75OHM 0x05
+#define AT803X_MODE_CFG_FX100_RGMII_50OHM 0x06
+#define AT803X_MODE_CFG_FX100_CONV_50OHM 0x07
+#define AT803X_MODE_CFG_RGMII_AUTO_MDET 0x0B
+#define AT803X_MODE_CFG_FX100_RGMII_75OHM 0x0E
+#define AT803X_MODE_CFG_FX100_CONV_75OHM 0x0F
#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
@@ -283,6 +297,8 @@ struct at803x_priv {
u16 clk_25m_mask;
u8 smarteee_lpi_tw_1g;
u8 smarteee_lpi_tw_100m;
+ bool is_fiber;
+ bool is_1000basex;
struct regulator_dev *vddio_rdev;
struct regulator_dev *vddh_rdev;
struct regulator *vddio;
@@ -650,6 +666,55 @@ static int at8031_register_regulators(struct phy_device *phydev)
return 0;
}
+static int at803x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ phy_interface_t iface;
+
+ linkmode_zero(phy_support);
+ phylink_set(phy_support, 1000baseX_Full);
+ phylink_set(phy_support, 1000baseT_Full);
+ phylink_set(phy_support, Autoneg);
+ phylink_set(phy_support, Pause);
+ phylink_set(phy_support, Asym_Pause);
+
+ linkmode_zero(sfp_support);
+ sfp_parse_support(phydev->sfp_bus, id, sfp_support);
+ /* Some modules support 10G modes as well as others we support.
+ * Mask out non-supported modes so the correct interface is picked.
+ */
+ linkmode_and(sfp_support, phy_support, sfp_support);
+
+ if (linkmode_empty(sfp_support)) {
+ dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
+ return -EINVAL;
+ }
+
+ iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
+
+ /* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
+ * interface for use with SFP modules.
+ * However, some copper modules detected as having a preferred SGMII
+ * interface do default to and function in 1000Base-X mode, so just
+ * print a warning and allow such modules, as they may have some chance
+ * of working.
+ */
+ if (iface == PHY_INTERFACE_MODE_SGMII)
+ dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
+ else if (iface != PHY_INTERFACE_MODE_1000BASEX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct sfp_upstream_ops at803x_sfp_ops = {
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+ .module_insert = at803x_sfp_insert,
+};
+
static int at803x_parse_dt(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -757,6 +822,11 @@ static int at803x_parse_dt(struct phy_device *phydev)
phydev_err(phydev, "failed to get VDDIO regulator\n");
return PTR_ERR(priv->vddio);
}
+
+ /* Only AR8031/8033 support 1000Base-X for SFP modules */
+ ret = phy_sfp_probe(phydev, &at803x_sfp_ops);
+ if (ret < 0)
+ return ret;
}
return 0;
@@ -784,16 +854,24 @@ static int at803x_probe(struct phy_device *phydev)
return ret;
}
- /* Some bootloaders leave the fiber page selected.
- * Switch to the copper page, as otherwise we read
- * the PHY capabilities from the fiber side.
- */
if (phydev->drv->phy_id == ATH8031_PHY_ID) {
- phy_lock_mdio_bus(phydev);
- ret = at803x_write_page(phydev, AT803X_PAGE_COPPER);
- phy_unlock_mdio_bus(phydev);
- if (ret)
+ int ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ int mode_cfg;
+
+ if (ccr < 0)
goto err;
+ mode_cfg = ccr & AT803X_MODE_CFG_MASK;
+
+ switch (mode_cfg) {
+ case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
+ case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
+ priv->is_1000basex = true;
+ fallthrough;
+ case AT803X_MODE_CFG_FX100_RGMII_50OHM:
+ case AT803X_MODE_CFG_FX100_RGMII_75OHM:
+ priv->is_fiber = true;
+ break;
+ }
}
return 0;
@@ -815,6 +893,7 @@ static void at803x_remove(struct phy_device *phydev)
static int at803x_get_features(struct phy_device *phydev)
{
+ struct at803x_priv *priv = phydev->priv;
int err;
err = genphy_read_abilities(phydev);
@@ -841,12 +920,13 @@ static int at803x_get_features(struct phy_device *phydev)
* As a result of that, ESTATUS_1000_XFULL is set
* to 1 even when operating in copper TP mode.
*
- * Remove this mode from the supported link modes,
- * as this driver currently only supports copper
- * operation.
+ * Remove this mode from the supported link modes
+ * when not operating in 1000BaseX mode.
*/
- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
- phydev->supported);
+ if (!priv->is_1000basex)
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->supported);
+
return 0;
}
@@ -910,8 +990,27 @@ static int at8031_pll_config(struct phy_device *phydev)
static int at803x_config_init(struct phy_device *phydev)
{
+ struct at803x_priv *priv = phydev->priv;
int ret;
+ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
+ /* Some bootloaders leave the fiber page selected.
+ * Switch to the appropriate page (fiber or copper), as otherwise we
+ * read the PHY capabilities from the wrong page.
+ */
+ phy_lock_mdio_bus(phydev);
+ ret = at803x_write_page(phydev,
+ priv->is_fiber ? AT803X_PAGE_FIBER :
+ AT803X_PAGE_COPPER);
+ phy_unlock_mdio_bus(phydev);
+ if (ret)
+ return ret;
+
+ ret = at8031_pll_config(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
/* The RX and TX delay default is:
* after HW reset: RX delay enabled and TX delay disabled
* after SW reset: RX delay enabled, while TX delay retains the
@@ -941,12 +1040,6 @@ static int at803x_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
- if (phydev->drv->phy_id == ATH8031_PHY_ID) {
- ret = at8031_pll_config(phydev);
- if (ret < 0)
- return ret;
- }
-
/* Ar803x extended next page bit is enabled by default. Cisco
* multigig switches read this bit and attempt to negotiate 10Gbps
* rates even if the next page bit is disabled. This is incorrect
@@ -967,6 +1060,7 @@ static int at803x_ack_interrupt(struct phy_device *phydev)
static int at803x_config_intr(struct phy_device *phydev)
{
+ struct at803x_priv *priv = phydev->priv;
int err;
int value;
@@ -983,6 +1077,10 @@ static int at803x_config_intr(struct phy_device *phydev)
value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
value |= AT803X_INTR_ENABLE_LINK_FAIL;
value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
+ if (priv->is_fiber) {
+ value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
+ value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
+ }
err = phy_write(phydev, AT803X_INTR_ENABLE, value);
} else {
@@ -1115,8 +1213,12 @@ static int at803x_read_specific_status(struct phy_device *phydev)
static int at803x_read_status(struct phy_device *phydev)
{
+ struct at803x_priv *priv = phydev->priv;
int err, old_link = phydev->link;
+ if (priv->is_1000basex)
+ return genphy_c37_read_status(phydev);
+
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
if (err)
@@ -1170,6 +1272,7 @@ static int at803x_config_mdix(struct phy_device *phydev, u8 ctrl)
static int at803x_config_aneg(struct phy_device *phydev)
{
+ struct at803x_priv *priv = phydev->priv;
int ret;
ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
@@ -1186,6 +1289,9 @@ static int at803x_config_aneg(struct phy_device *phydev)
return ret;
}
+ if (priv->is_1000basex)
+ return genphy_c37_config_aneg(phydev);
+
/* Do not restart auto-negotiation by setting ret to 0 defautly,
* when calling __genphy_config_aneg later.
*/
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index c2d1a85ec559..ef8b14135133 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -886,7 +886,7 @@ out:
spin_unlock_irqrestore(&dp83640->rx_lock, flags);
if (shhwtstamps)
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static void decode_txts(struct dp83640_private *dp83640,
@@ -970,17 +970,6 @@ static void decode_status_frame(struct dp83640_private *dp83640,
}
}
-static int is_sync(struct sk_buff *skb, int type)
-{
- struct ptp_header *hdr;
-
- hdr = ptp_parse_header(skb, type);
- if (!hdr)
- return 0;
-
- return ptp_get_msgtype(hdr, type) == PTP_MSGTYPE_SYNC;
-}
-
static void dp83640_free_clocks(void)
{
struct dp83640_clock *clock;
@@ -1329,7 +1318,7 @@ static void rx_timestamp_work(struct work_struct *work)
break;
}
- netif_rx_ni(skb);
+ netif_rx(skb);
}
if (!skb_queue_empty(&dp83640->rx_queue))
@@ -1380,7 +1369,7 @@ static bool dp83640_rxtstamp(struct mii_timestamper *mii_ts,
skb_queue_tail(&dp83640->rx_queue, skb);
schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
} else {
- netif_rx_ni(skb);
+ netif_rx(skb);
}
return true;
@@ -1396,7 +1385,7 @@ static void dp83640_txtstamp(struct mii_timestamper *mii_ts,
switch (dp83640->hwts_tx_en) {
case HWTSTAMP_TX_ONESTEP_SYNC:
- if (is_sync(skb, type)) {
+ if (ptp_msg_is_sync(skb, type)) {
kfree_skb(skb);
return;
}
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 211b5476a6f5..ce17b2af3218 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -274,7 +274,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
if (err < 0)
return err;
- err = phy_write(phydev, MII_DP83822_MISR1, 0);
+ err = phy_write(phydev, MII_DP83822_MISR2, 0);
if (err < 0)
return err;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 2429db614b59..2702faf7b0f6 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1687,8 +1687,8 @@ static int marvell_suspend(struct phy_device *phydev)
int err;
/* Suspend the fiber mode first */
- if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
- phydev->supported)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported)) {
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
@@ -1722,8 +1722,8 @@ static int marvell_resume(struct phy_device *phydev)
int err;
/* Resume the fiber mode first */
- if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
- phydev->supported)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported)) {
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 7e7904fee1d9..73f7962a37d3 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -30,8 +30,12 @@
#define INTSRC_LINK_DOWN BIT(4)
#define INTSRC_REMOTE_FAULT BIT(5)
#define INTSRC_ANEG_COMPLETE BIT(6)
+#define INTSRC_ENERGY_DETECT BIT(7)
#define INTSRC_MASK 30
+#define INT_SOURCES (INTSRC_LINK_DOWN | INTSRC_ANEG_COMPLETE | \
+ INTSRC_ENERGY_DETECT)
+
#define BANK_ANALOG_DSP 0
#define BANK_WOL 1
#define BANK_BIST 3
@@ -200,7 +204,6 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev)
static int meson_gxl_config_intr(struct phy_device *phydev)
{
- u16 val;
int ret;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
@@ -209,16 +212,9 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
if (ret)
return ret;
- val = INTSRC_ANEG_PR
- | INTSRC_PARALLEL_FAULT
- | INTSRC_ANEG_LP_ACK
- | INTSRC_LINK_DOWN
- | INTSRC_REMOTE_FAULT
- | INTSRC_ANEG_COMPLETE;
- ret = phy_write(phydev, INTSRC_MASK, val);
+ ret = phy_write(phydev, INTSRC_MASK, INT_SOURCES);
} else {
- val = 0;
- ret = phy_write(phydev, INTSRC_MASK, val);
+ ret = phy_write(phydev, INTSRC_MASK, 0);
/* Ack any pending IRQ */
ret = meson_gxl_ack_interrupt(phydev);
@@ -237,10 +233,23 @@ static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev)
return IRQ_NONE;
}
+ irq_status &= INT_SOURCES;
+
if (irq_status == 0)
return IRQ_NONE;
- phy_trigger_machine(phydev);
+ /* Aneg-complete interrupt is used for link-up detection */
+ if (phydev->autoneg == AUTONEG_ENABLE &&
+ irq_status == INTSRC_ENERGY_DETECT)
+ return IRQ_HANDLED;
+
+ /* Give PHY some time before MAC starts sending data. This works
+ * around an issue where network doesn't come up properly.
+ */
+ if (!(irq_status & INTSRC_LINK_DOWN))
+ phy_queue_state_machine(phydev, msecs_to_jiffies(100));
+ else
+ phy_trigger_machine(phydev);
return IRQ_HANDLED;
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index a7ebcdab415b..19b11e896460 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -28,6 +28,10 @@
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_clock.h>
+#include <linux/ptp_classify.h>
+#include <linux/net_tstamp.h>
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
@@ -79,6 +83,119 @@
#define LAN8814_INTR_CTRL_REG_POLARITY BIT(1)
#define LAN8814_INTR_CTRL_REG_INTR_ENABLE BIT(0)
+/* Represents 1ppm adjustment in 2^32 format with
+ * each nsec contains 4 clock cycles.
+ * The value is calculated as following: (1/1000000)/((2^-32)/4)
+ */
+#define LAN8814_1PPM_FORMAT 17179
+
+#define PTP_RX_MOD 0x024F
+#define PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3)
+#define PTP_RX_TIMESTAMP_EN 0x024D
+#define PTP_TX_TIMESTAMP_EN 0x028D
+
+#define PTP_TIMESTAMP_EN_SYNC_ BIT(0)
+#define PTP_TIMESTAMP_EN_DREQ_ BIT(1)
+#define PTP_TIMESTAMP_EN_PDREQ_ BIT(2)
+#define PTP_TIMESTAMP_EN_PDRES_ BIT(3)
+
+#define PTP_RX_LATENCY_1000 0x0224
+#define PTP_TX_LATENCY_1000 0x0225
+
+#define PTP_RX_LATENCY_100 0x0222
+#define PTP_TX_LATENCY_100 0x0223
+
+#define PTP_RX_LATENCY_10 0x0220
+#define PTP_TX_LATENCY_10 0x0221
+
+#define PTP_TX_PARSE_L2_ADDR_EN 0x0284
+#define PTP_RX_PARSE_L2_ADDR_EN 0x0244
+
+#define PTP_TX_PARSE_IP_ADDR_EN 0x0285
+#define PTP_RX_PARSE_IP_ADDR_EN 0x0245
+#define LTC_HARD_RESET 0x023F
+#define LTC_HARD_RESET_ BIT(0)
+
+#define TSU_HARD_RESET 0x02C1
+#define TSU_HARD_RESET_ BIT(0)
+
+#define PTP_CMD_CTL 0x0200
+#define PTP_CMD_CTL_PTP_DISABLE_ BIT(0)
+#define PTP_CMD_CTL_PTP_ENABLE_ BIT(1)
+#define PTP_CMD_CTL_PTP_CLOCK_READ_ BIT(3)
+#define PTP_CMD_CTL_PTP_CLOCK_LOAD_ BIT(4)
+#define PTP_CMD_CTL_PTP_LTC_STEP_SEC_ BIT(5)
+#define PTP_CMD_CTL_PTP_LTC_STEP_NSEC_ BIT(6)
+
+#define PTP_CLOCK_SET_SEC_MID 0x0206
+#define PTP_CLOCK_SET_SEC_LO 0x0207
+#define PTP_CLOCK_SET_NS_HI 0x0208
+#define PTP_CLOCK_SET_NS_LO 0x0209
+
+#define PTP_CLOCK_READ_SEC_MID 0x022A
+#define PTP_CLOCK_READ_SEC_LO 0x022B
+#define PTP_CLOCK_READ_NS_HI 0x022C
+#define PTP_CLOCK_READ_NS_LO 0x022D
+
+#define PTP_OPERATING_MODE 0x0241
+#define PTP_OPERATING_MODE_STANDALONE_ BIT(0)
+
+#define PTP_TX_MOD 0x028F
+#define PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_ BIT(12)
+#define PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3)
+
+#define PTP_RX_PARSE_CONFIG 0x0242
+#define PTP_RX_PARSE_CONFIG_LAYER2_EN_ BIT(0)
+#define PTP_RX_PARSE_CONFIG_IPV4_EN_ BIT(1)
+#define PTP_RX_PARSE_CONFIG_IPV6_EN_ BIT(2)
+
+#define PTP_TX_PARSE_CONFIG 0x0282
+#define PTP_TX_PARSE_CONFIG_LAYER2_EN_ BIT(0)
+#define PTP_TX_PARSE_CONFIG_IPV4_EN_ BIT(1)
+#define PTP_TX_PARSE_CONFIG_IPV6_EN_ BIT(2)
+
+#define PTP_CLOCK_RATE_ADJ_HI 0x020C
+#define PTP_CLOCK_RATE_ADJ_LO 0x020D
+#define PTP_CLOCK_RATE_ADJ_DIR_ BIT(15)
+
+#define PTP_LTC_STEP_ADJ_HI 0x0212
+#define PTP_LTC_STEP_ADJ_LO 0x0213
+#define PTP_LTC_STEP_ADJ_DIR_ BIT(15)
+
+#define LAN8814_INTR_STS_REG 0x0033
+#define LAN8814_INTR_STS_REG_1588_TSU0_ BIT(0)
+#define LAN8814_INTR_STS_REG_1588_TSU1_ BIT(1)
+#define LAN8814_INTR_STS_REG_1588_TSU2_ BIT(2)
+#define LAN8814_INTR_STS_REG_1588_TSU3_ BIT(3)
+
+#define PTP_CAP_INFO 0x022A
+#define PTP_CAP_INFO_TX_TS_CNT_GET_(reg_val) (((reg_val) & 0x0f00) >> 8)
+#define PTP_CAP_INFO_RX_TS_CNT_GET_(reg_val) ((reg_val) & 0x000f)
+
+#define PTP_TX_EGRESS_SEC_HI 0x0296
+#define PTP_TX_EGRESS_SEC_LO 0x0297
+#define PTP_TX_EGRESS_NS_HI 0x0294
+#define PTP_TX_EGRESS_NS_LO 0x0295
+#define PTP_TX_MSG_HEADER2 0x0299
+
+#define PTP_RX_INGRESS_SEC_HI 0x0256
+#define PTP_RX_INGRESS_SEC_LO 0x0257
+#define PTP_RX_INGRESS_NS_HI 0x0254
+#define PTP_RX_INGRESS_NS_LO 0x0255
+#define PTP_RX_MSG_HEADER2 0x0259
+
+#define PTP_TSU_INT_EN 0x0200
+#define PTP_TSU_INT_EN_PTP_TX_TS_OVRFL_EN_ BIT(3)
+#define PTP_TSU_INT_EN_PTP_TX_TS_EN_ BIT(2)
+#define PTP_TSU_INT_EN_PTP_RX_TS_OVRFL_EN_ BIT(1)
+#define PTP_TSU_INT_EN_PTP_RX_TS_EN_ BIT(0)
+
+#define PTP_TSU_INT_STS 0x0201
+#define PTP_TSU_INT_STS_PTP_TX_TS_OVRFL_INT_ BIT(3)
+#define PTP_TSU_INT_STS_PTP_TX_TS_EN_ BIT(2)
+#define PTP_TSU_INT_STS_PTP_RX_TS_OVRFL_INT_ BIT(1)
+#define PTP_TSU_INT_STS_PTP_RX_TS_EN_ BIT(0)
+
/* PHY Control 1 */
#define MII_KSZPHY_CTRL_1 0x1e
#define KSZ8081_CTRL1_MDIX_STAT BIT(4)
@@ -108,6 +225,7 @@
#define MII_KSZPHY_TX_DATA_PAD_SKEW 0x106
#define PS_TO_REG 200
+#define FIFO_SIZE 8
struct kszphy_hw_stat {
const char *string;
@@ -128,7 +246,57 @@ struct kszphy_type {
bool has_rmii_ref_clk_sel;
};
+/* Shared structure between the PHYs of the same package. */
+struct lan8814_shared_priv {
+ struct phy_device *phydev;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+
+ /* Reference counter to how many ports in the package are enabling the
+ * timestamping
+ */
+ u8 ref;
+
+ /* Lock for ptp_clock and ref */
+ struct mutex shared_lock;
+};
+
+struct lan8814_ptp_rx_ts {
+ struct list_head list;
+ u32 seconds;
+ u32 nsec;
+ u16 seq_id;
+};
+
+struct kszphy_latencies {
+ u16 rx_10;
+ u16 tx_10;
+ u16 rx_100;
+ u16 tx_100;
+ u16 rx_1000;
+ u16 tx_1000;
+};
+
+struct kszphy_ptp_priv {
+ struct mii_timestamper mii_ts;
+ struct phy_device *phydev;
+
+ struct sk_buff_head tx_queue;
+ struct sk_buff_head rx_queue;
+
+ struct list_head rx_ts_list;
+ /* Lock for Rx ts fifo */
+ spinlock_t rx_ts_lock;
+
+ int hwts_tx_type;
+ enum hwtstamp_rx_filters rx_filter;
+ int layer;
+ int version;
+};
+
struct kszphy_priv {
+ struct kszphy_ptp_priv ptp_priv;
+ struct kszphy_latencies latencies;
const struct kszphy_type *type;
int led_mode;
bool rmii_ref_clk_sel;
@@ -136,6 +304,14 @@ struct kszphy_priv {
u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
};
+static struct kszphy_latencies lan8814_latencies = {
+ .rx_10 = 0x22AA,
+ .tx_10 = 0x2E4A,
+ .rx_100 = 0x092A,
+ .tx_100 = 0x02C1,
+ .rx_1000 = 0x01AD,
+ .tx_1000 = 0x00C9,
+};
static const struct kszphy_type ksz8021_type = {
.led_mode_reg = MII_KSZPHY_CTRL_2,
.has_broadcast_disable = true,
@@ -1596,11 +1772,13 @@ static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr)
{
u32 data;
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
- (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
- data = phy_read(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA);
+ phy_lock_mdio_bus(phydev);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
+ (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
+ data = __phy_read(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA);
+ phy_unlock_mdio_bus(phydev);
return data;
}
@@ -1608,43 +1786,670 @@ static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr)
static int lanphy_write_page_reg(struct phy_device *phydev, int page, u16 addr,
u16 val)
{
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
- (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
+ phy_lock_mdio_bus(phydev);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
+ page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC);
- val = phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, val);
- if (val) {
+ val = __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, val);
+ if (val != 0)
phydev_err(phydev, "Error: phy_write has returned error %d\n",
val);
- return val;
+ phy_unlock_mdio_bus(phydev);
+ return val;
+}
+
+static int lan8814_config_ts_intr(struct phy_device *phydev, bool enable)
+{
+ u16 val = 0;
+
+ if (enable)
+ val = PTP_TSU_INT_EN_PTP_TX_TS_EN_ |
+ PTP_TSU_INT_EN_PTP_TX_TS_OVRFL_EN_ |
+ PTP_TSU_INT_EN_PTP_RX_TS_EN_ |
+ PTP_TSU_INT_EN_PTP_RX_TS_OVRFL_EN_;
+
+ return lanphy_write_page_reg(phydev, 5, PTP_TSU_INT_EN, val);
+}
+
+static void lan8814_ptp_rx_ts_get(struct phy_device *phydev,
+ u32 *seconds, u32 *nano_seconds, u16 *seq_id)
+{
+ *seconds = lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_SEC_HI);
+ *seconds = (*seconds << 16) |
+ lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_SEC_LO);
+
+ *nano_seconds = lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_NS_HI);
+ *nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
+ lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_NS_LO);
+
+ *seq_id = lanphy_read_page_reg(phydev, 5, PTP_RX_MSG_HEADER2);
+}
+
+static void lan8814_ptp_tx_ts_get(struct phy_device *phydev,
+ u32 *seconds, u32 *nano_seconds, u16 *seq_id)
+{
+ *seconds = lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_SEC_HI);
+ *seconds = *seconds << 16 |
+ lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_SEC_LO);
+
+ *nano_seconds = lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_NS_HI);
+ *nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
+ lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_NS_LO);
+
+ *seq_id = lanphy_read_page_reg(phydev, 5, PTP_TX_MSG_HEADER2);
+}
+
+static int lan8814_ts_info(struct mii_timestamper *mii_ts, struct ethtool_ts_info *info)
+{
+ struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct lan8814_shared_priv *shared = phydev->shared->priv;
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = ptp_clock_index(shared->ptp_clock);
+
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON) |
+ (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+static void lan8814_flush_fifo(struct phy_device *phydev, bool egress)
+{
+ int i;
+
+ for (i = 0; i < FIFO_SIZE; ++i)
+ lanphy_read_page_reg(phydev, 5,
+ egress ? PTP_TX_MSG_HEADER2 : PTP_RX_MSG_HEADER2);
+
+ /* Read to clear overflow status bit */
+ lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
+}
+
+static int lan8814_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+{
+ struct kszphy_ptp_priv *ptp_priv =
+ container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct lan8814_shared_priv *shared = phydev->shared->priv;
+ struct lan8814_ptp_rx_ts *rx_ts, *tmp;
+ struct hwtstamp_config config;
+ int txcfg = 0, rxcfg = 0;
+ int pkt_ts_enable;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ ptp_priv->hwts_tx_type = config.tx_type;
+ ptp_priv->rx_filter = config.rx_filter;
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ptp_priv->layer = 0;
+ ptp_priv->version = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ ptp_priv->layer = PTP_CLASS_L4;
+ ptp_priv->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ ptp_priv->layer = PTP_CLASS_L2;
+ ptp_priv->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ ptp_priv->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
+ ptp_priv->version = PTP_CLASS_V2;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (ptp_priv->layer & PTP_CLASS_L2) {
+ rxcfg = PTP_RX_PARSE_CONFIG_LAYER2_EN_;
+ txcfg = PTP_TX_PARSE_CONFIG_LAYER2_EN_;
+ } else if (ptp_priv->layer & PTP_CLASS_L4) {
+ rxcfg |= PTP_RX_PARSE_CONFIG_IPV4_EN_ | PTP_RX_PARSE_CONFIG_IPV6_EN_;
+ txcfg |= PTP_TX_PARSE_CONFIG_IPV4_EN_ | PTP_TX_PARSE_CONFIG_IPV6_EN_;
}
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_PARSE_CONFIG, rxcfg);
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_PARSE_CONFIG, txcfg);
+
+ pkt_ts_enable = PTP_TIMESTAMP_EN_SYNC_ | PTP_TIMESTAMP_EN_DREQ_ |
+ PTP_TIMESTAMP_EN_PDREQ_ | PTP_TIMESTAMP_EN_PDRES_;
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
+
+ if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
+ PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+
+ if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ lan8814_config_ts_intr(ptp_priv->phydev, true);
+ else
+ lan8814_config_ts_intr(ptp_priv->phydev, false);
+
+ mutex_lock(&shared->shared_lock);
+ if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ shared->ref++;
+ else
+ shared->ref--;
+
+ if (shared->ref)
+ lanphy_write_page_reg(ptp_priv->phydev, 4, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_ENABLE_);
+ else
+ lanphy_write_page_reg(ptp_priv->phydev, 4, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_DISABLE_);
+ mutex_unlock(&shared->shared_lock);
+
+ /* In case of multiple starts and stops, these needs to be cleared */
+ list_for_each_entry_safe(rx_ts, tmp, &ptp_priv->rx_ts_list, list) {
+ list_del(&rx_ts->list);
+ kfree(rx_ts);
+ }
+ skb_queue_purge(&ptp_priv->rx_queue);
+ skb_queue_purge(&ptp_priv->tx_queue);
+
+ lan8814_flush_fifo(ptp_priv->phydev, false);
+ lan8814_flush_fifo(ptp_priv->phydev, true);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
+}
+
+static void lan8814_txtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
+
+ switch (ptp_priv->hwts_tx_type) {
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (ptp_msg_is_sync(skb, type)) {
+ kfree_skb(skb);
+ return;
+ }
+ fallthrough;
+ case HWTSTAMP_TX_ON:
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_queue_tail(&ptp_priv->tx_queue, skb);
+ break;
+ case HWTSTAMP_TX_OFF:
+ default:
+ kfree_skb(skb);
+ break;
+ }
+}
+
+static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
+{
+ struct ptp_header *ptp_header;
+ u32 type;
+
+ skb_push(skb, ETH_HLEN);
+ type = ptp_classify_raw(skb);
+ ptp_header = ptp_parse_header(skb, type);
+ skb_pull_inline(skb, ETH_HLEN);
+
+ *sig = (__force u16)(ntohs(ptp_header->sequence_id));
+}
+
+static bool lan8814_match_rx_ts(struct kszphy_ptp_priv *ptp_priv,
+ struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct lan8814_ptp_rx_ts *rx_ts, *tmp;
+ unsigned long flags;
+ bool ret = false;
+ u16 skb_sig;
+
+ lan8814_get_sig_rx(skb, &skb_sig);
+
+ /* Iterate over all RX timestamps and match it with the received skbs */
+ spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags);
+ list_for_each_entry_safe(rx_ts, tmp, &ptp_priv->rx_ts_list, list) {
+ /* Check if we found the signature we were looking for. */
+ if (memcmp(&skb_sig, &rx_ts->seq_id, sizeof(rx_ts->seq_id)))
+ continue;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds,
+ rx_ts->nsec);
+ list_del(&rx_ts->list);
+ kfree(rx_ts);
+
+ ret = true;
+ break;
+ }
+ spin_unlock_irqrestore(&ptp_priv->rx_ts_lock, flags);
+
+ if (ret)
+ netif_rx(skb);
+ return ret;
+}
+
+static bool lan8814_rxtstamp(struct mii_timestamper *mii_ts, struct sk_buff *skb, int type)
+{
+ struct kszphy_ptp_priv *ptp_priv =
+ container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
+
+ if (ptp_priv->rx_filter == HWTSTAMP_FILTER_NONE ||
+ type == PTP_CLASS_NONE)
+ return false;
+
+ if ((type & ptp_priv->version) == 0 || (type & ptp_priv->layer) == 0)
+ return false;
+
+ /* If we failed to match then add it to the queue for when the timestamp
+ * will come
+ */
+ if (!lan8814_match_rx_ts(ptp_priv, skb))
+ skb_queue_tail(&ptp_priv->rx_queue, skb);
+
+ return true;
+}
+
+static void lan8814_ptp_clock_set(struct phy_device *phydev,
+ u32 seconds, u32 nano_seconds)
+{
+ u32 sec_low, sec_high, nsec_low, nsec_high;
+
+ sec_low = seconds & 0xffff;
+ sec_high = (seconds >> 16) & 0xffff;
+ nsec_low = nano_seconds & 0xffff;
+ nsec_high = (nano_seconds >> 16) & 0x3fff;
+
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_LO, sec_low);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_MID, sec_high);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_LO, nsec_low);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_HI, nsec_high);
+
+ lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
+}
+
+static void lan8814_ptp_clock_get(struct phy_device *phydev,
+ u32 *seconds, u32 *nano_seconds)
+{
+ lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
+
+ *seconds = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_MID);
+ *seconds = (*seconds << 16) |
+ lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_LO);
+
+ *nano_seconds = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_HI);
+ *nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
+ lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_LO);
+}
+
+static int lan8814_ptpci_gettime64(struct ptp_clock_info *ptpci,
+ struct timespec64 *ts)
+{
+ struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = shared->phydev;
+ u32 nano_seconds;
+ u32 seconds;
+
+ mutex_lock(&shared->shared_lock);
+ lan8814_ptp_clock_get(phydev, &seconds, &nano_seconds);
+ mutex_unlock(&shared->shared_lock);
+ ts->tv_sec = seconds;
+ ts->tv_nsec = nano_seconds;
+
return 0;
}
-static int lan8814_config_init(struct phy_device *phydev)
+static int lan8814_ptpci_settime64(struct ptp_clock_info *ptpci,
+ const struct timespec64 *ts)
{
- int val;
+ struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = shared->phydev;
- /* Reset the PHY */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET);
- val |= LAN8814_QSGMII_SOFT_RESET_BIT;
- lanphy_write_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET, val);
+ mutex_lock(&shared->shared_lock);
+ lan8814_ptp_clock_set(phydev, ts->tv_sec, ts->tv_nsec);
+ mutex_unlock(&shared->shared_lock);
- /* Disable ANEG with QSGMII PCS Host side */
- val = lanphy_read_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG);
- val &= ~LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA;
- lanphy_write_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG, val);
+ return 0;
+}
- /* MDI-X setting for swap A,B transmit */
- val = lanphy_read_page_reg(phydev, 2, LAN8814_ALIGN_SWAP);
- val &= ~LAN8814_ALIGN_TX_A_B_SWAP_MASK;
- val |= LAN8814_ALIGN_TX_A_B_SWAP;
- lanphy_write_page_reg(phydev, 2, LAN8814_ALIGN_SWAP, val);
+static void lan8814_ptp_clock_step(struct phy_device *phydev,
+ s64 time_step_ns)
+{
+ u32 nano_seconds_step;
+ u64 abs_time_step_ns;
+ u32 unsigned_seconds;
+ u32 nano_seconds;
+ u32 remainder;
+ s32 seconds;
+
+ if (time_step_ns > 15000000000LL) {
+ /* convert to clock set */
+ lan8814_ptp_clock_get(phydev, &unsigned_seconds, &nano_seconds);
+ unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
+ nano_seconds += remainder;
+ if (nano_seconds >= 1000000000) {
+ unsigned_seconds++;
+ nano_seconds -= 1000000000;
+ }
+ lan8814_ptp_clock_set(phydev, unsigned_seconds, nano_seconds);
+ return;
+ } else if (time_step_ns < -15000000000LL) {
+ /* convert to clock set */
+ time_step_ns = -time_step_ns;
+
+ lan8814_ptp_clock_get(phydev, &unsigned_seconds, &nano_seconds);
+ unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
+ nano_seconds_step = remainder;
+ if (nano_seconds < nano_seconds_step) {
+ unsigned_seconds--;
+ nano_seconds += 1000000000;
+ }
+ nano_seconds -= nano_seconds_step;
+ lan8814_ptp_clock_set(phydev, unsigned_seconds,
+ nano_seconds);
+ return;
+ }
+
+ /* do clock step */
+ if (time_step_ns >= 0) {
+ abs_time_step_ns = (u64)time_step_ns;
+ seconds = (s32)div_u64_rem(abs_time_step_ns, 1000000000,
+ &remainder);
+ nano_seconds = remainder;
+ } else {
+ abs_time_step_ns = (u64)(-time_step_ns);
+ seconds = -((s32)div_u64_rem(abs_time_step_ns, 1000000000,
+ &remainder));
+ nano_seconds = remainder;
+ if (nano_seconds > 0) {
+ /* subtracting nano seconds is not allowed
+ * convert to subtracting from seconds,
+ * and adding to nanoseconds
+ */
+ seconds--;
+ nano_seconds = (1000000000 - nano_seconds);
+ }
+ }
+
+ if (nano_seconds > 0) {
+ /* add 8 ns to cover the likely normal increment */
+ nano_seconds += 8;
+ }
+
+ if (nano_seconds >= 1000000000) {
+ /* carry into seconds */
+ seconds++;
+ nano_seconds -= 1000000000;
+ }
+
+ while (seconds) {
+ if (seconds > 0) {
+ u32 adjustment_value = (u32)seconds;
+ u16 adjustment_value_lo, adjustment_value_hi;
+
+ if (adjustment_value > 0xF)
+ adjustment_value = 0xF;
+
+ adjustment_value_lo = adjustment_value & 0xffff;
+ adjustment_value_hi = (adjustment_value >> 16) & 0x3fff;
+
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ adjustment_value_lo);
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ PTP_LTC_STEP_ADJ_DIR_ |
+ adjustment_value_hi);
+ seconds -= ((s32)adjustment_value);
+ } else {
+ u32 adjustment_value = (u32)(-seconds);
+ u16 adjustment_value_lo, adjustment_value_hi;
+
+ if (adjustment_value > 0xF)
+ adjustment_value = 0xF;
+
+ adjustment_value_lo = adjustment_value & 0xffff;
+ adjustment_value_hi = (adjustment_value >> 16) & 0x3fff;
+
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ adjustment_value_lo);
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ adjustment_value_hi);
+ seconds += ((s32)adjustment_value);
+ }
+ lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_LTC_STEP_SEC_);
+ }
+ if (nano_seconds) {
+ u16 nano_seconds_lo;
+ u16 nano_seconds_hi;
+
+ nano_seconds_lo = nano_seconds & 0xffff;
+ nano_seconds_hi = (nano_seconds >> 16) & 0x3fff;
+
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ nano_seconds_lo);
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ PTP_LTC_STEP_ADJ_DIR_ |
+ nano_seconds_hi);
+ lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_LTC_STEP_NSEC_);
+ }
+}
+
+static int lan8814_ptpci_adjtime(struct ptp_clock_info *ptpci, s64 delta)
+{
+ struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = shared->phydev;
+
+ mutex_lock(&shared->shared_lock);
+ lan8814_ptp_clock_step(phydev, delta);
+ mutex_unlock(&shared->shared_lock);
+
+ return 0;
+}
+
+static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
+{
+ struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = shared->phydev;
+ u16 kszphy_rate_adj_lo, kszphy_rate_adj_hi;
+ bool positive = true;
+ u32 kszphy_rate_adj;
+
+ if (scaled_ppm < 0) {
+ scaled_ppm = -scaled_ppm;
+ positive = false;
+ }
+
+ kszphy_rate_adj = LAN8814_1PPM_FORMAT * (scaled_ppm >> 16);
+ kszphy_rate_adj += (LAN8814_1PPM_FORMAT * (0xffff & scaled_ppm)) >> 16;
+
+ kszphy_rate_adj_lo = kszphy_rate_adj & 0xffff;
+ kszphy_rate_adj_hi = (kszphy_rate_adj >> 16) & 0x3fff;
+
+ if (positive)
+ kszphy_rate_adj_hi |= PTP_CLOCK_RATE_ADJ_DIR_;
+
+ mutex_lock(&shared->shared_lock);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_RATE_ADJ_HI, kszphy_rate_adj_hi);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_RATE_ADJ_LO, kszphy_rate_adj_lo);
+ mutex_unlock(&shared->shared_lock);
return 0;
}
+static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
+{
+ struct ptp_header *ptp_header;
+ u32 type;
+
+ type = ptp_classify_raw(skb);
+ ptp_header = ptp_parse_header(skb, type);
+
+ *sig = (__force u16)(ntohs(ptp_header->sequence_id));
+}
+
+static void lan8814_dequeue_tx_skb(struct kszphy_ptp_priv *ptp_priv)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+ u32 seconds, nsec;
+ bool ret = false;
+ u16 skb_sig;
+ u16 seq_id;
+
+ lan8814_ptp_tx_ts_get(phydev, &seconds, &nsec, &seq_id);
+
+ spin_lock_irqsave(&ptp_priv->tx_queue.lock, flags);
+ skb_queue_walk_safe(&ptp_priv->tx_queue, skb, skb_tmp) {
+ lan8814_get_sig_tx(skb, &skb_sig);
+
+ if (memcmp(&skb_sig, &seq_id, sizeof(seq_id)))
+ continue;
+
+ __skb_unlink(skb, &ptp_priv->tx_queue);
+ ret = true;
+ break;
+ }
+ spin_unlock_irqrestore(&ptp_priv->tx_queue.lock, flags);
+
+ if (ret) {
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(seconds, nsec);
+ skb_complete_tx_timestamp(skb, &shhwtstamps);
+ }
+}
+
+static void lan8814_get_tx_ts(struct kszphy_ptp_priv *ptp_priv)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+ u32 reg;
+
+ do {
+ lan8814_dequeue_tx_skb(ptp_priv);
+
+ /* If other timestamps are available in the FIFO,
+ * process them.
+ */
+ reg = lanphy_read_page_reg(phydev, 5, PTP_CAP_INFO);
+ } while (PTP_CAP_INFO_TX_TS_CNT_GET_(reg) > 0);
+}
+
+static bool lan8814_match_skb(struct kszphy_ptp_priv *ptp_priv,
+ struct lan8814_ptp_rx_ts *rx_ts)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+ bool ret = false;
+ u16 skb_sig;
+
+ spin_lock_irqsave(&ptp_priv->rx_queue.lock, flags);
+ skb_queue_walk_safe(&ptp_priv->rx_queue, skb, skb_tmp) {
+ lan8814_get_sig_rx(skb, &skb_sig);
+
+ if (memcmp(&skb_sig, &rx_ts->seq_id, sizeof(rx_ts->seq_id)))
+ continue;
+
+ __skb_unlink(skb, &ptp_priv->rx_queue);
+
+ ret = true;
+ break;
+ }
+ spin_unlock_irqrestore(&ptp_priv->rx_queue.lock, flags);
+
+ if (ret) {
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec);
+ netif_rx(skb);
+ }
+
+ return ret;
+}
+
+static void lan8814_get_rx_ts(struct kszphy_ptp_priv *ptp_priv)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct lan8814_ptp_rx_ts *rx_ts;
+ unsigned long flags;
+ u32 reg;
+
+ do {
+ rx_ts = kzalloc(sizeof(*rx_ts), GFP_KERNEL);
+ if (!rx_ts)
+ return;
+
+ lan8814_ptp_rx_ts_get(phydev, &rx_ts->seconds, &rx_ts->nsec,
+ &rx_ts->seq_id);
+
+ /* If we failed to match the skb add it to the queue for when
+ * the frame will come
+ */
+ if (!lan8814_match_skb(ptp_priv, rx_ts)) {
+ spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags);
+ list_add(&rx_ts->list, &ptp_priv->rx_ts_list);
+ spin_unlock_irqrestore(&ptp_priv->rx_ts_lock, flags);
+ } else {
+ kfree(rx_ts);
+ }
+
+ /* If other timestamps are available in the FIFO,
+ * process them.
+ */
+ reg = lanphy_read_page_reg(phydev, 5, PTP_CAP_INFO);
+ } while (PTP_CAP_INFO_RX_TS_CNT_GET_(reg) > 0);
+}
+
+static void lan8814_handle_ptp_interrupt(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
+ u16 status;
+
+ status = lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
+ if (status & PTP_TSU_INT_STS_PTP_TX_TS_EN_)
+ lan8814_get_tx_ts(ptp_priv);
+
+ if (status & PTP_TSU_INT_STS_PTP_RX_TS_EN_)
+ lan8814_get_rx_ts(ptp_priv);
+
+ if (status & PTP_TSU_INT_STS_PTP_TX_TS_OVRFL_INT_) {
+ lan8814_flush_fifo(phydev, true);
+ skb_queue_purge(&ptp_priv->tx_queue);
+ }
+
+ if (status & PTP_TSU_INT_STS_PTP_RX_TS_OVRFL_INT_) {
+ lan8814_flush_fifo(phydev, false);
+ skb_queue_purge(&ptp_priv->rx_queue);
+ }
+}
+
static int lan8804_config_init(struct phy_device *phydev)
{
int val;
@@ -1666,17 +2471,31 @@ static int lan8804_config_init(struct phy_device *phydev)
static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
{
+ u16 tsu_irq_status;
int irq_status;
irq_status = phy_read(phydev, LAN8814_INTS);
- if (irq_status < 0)
- return IRQ_NONE;
+ if (irq_status > 0 && (irq_status & LAN8814_INT_LINK))
+ phy_trigger_machine(phydev);
- if (!(irq_status & LAN8814_INT_LINK))
+ if (irq_status < 0) {
+ phy_error(phydev);
return IRQ_NONE;
+ }
- phy_trigger_machine(phydev);
+ while (1) {
+ tsu_irq_status = lanphy_read_page_reg(phydev, 4,
+ LAN8814_INTR_STS_REG);
+ if (tsu_irq_status > 0 &&
+ (tsu_irq_status & (LAN8814_INTR_STS_REG_1588_TSU0_ |
+ LAN8814_INTR_STS_REG_1588_TSU1_ |
+ LAN8814_INTR_STS_REG_1588_TSU2_ |
+ LAN8814_INTR_STS_REG_1588_TSU3_)))
+ lan8814_handle_ptp_interrupt(phydev);
+ else
+ break;
+ }
return IRQ_HANDLED;
}
@@ -1716,6 +2535,223 @@ static int lan8814_config_intr(struct phy_device *phydev)
return err;
}
+static void lan8814_ptp_init(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
+ u32 temp;
+
+ lanphy_write_page_reg(phydev, 5, TSU_HARD_RESET, TSU_HARD_RESET_);
+
+ temp = lanphy_read_page_reg(phydev, 5, PTP_TX_MOD);
+ temp |= PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_;
+ lanphy_write_page_reg(phydev, 5, PTP_TX_MOD, temp);
+
+ temp = lanphy_read_page_reg(phydev, 5, PTP_RX_MOD);
+ temp |= PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_;
+ lanphy_write_page_reg(phydev, 5, PTP_RX_MOD, temp);
+
+ lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_CONFIG, 0);
+ lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_CONFIG, 0);
+
+ /* Removing default registers configs related to L2 and IP */
+ lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_L2_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_L2_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0);
+
+ skb_queue_head_init(&ptp_priv->tx_queue);
+ skb_queue_head_init(&ptp_priv->rx_queue);
+ INIT_LIST_HEAD(&ptp_priv->rx_ts_list);
+ spin_lock_init(&ptp_priv->rx_ts_lock);
+
+ ptp_priv->phydev = phydev;
+
+ ptp_priv->mii_ts.rxtstamp = lan8814_rxtstamp;
+ ptp_priv->mii_ts.txtstamp = lan8814_txtstamp;
+ ptp_priv->mii_ts.hwtstamp = lan8814_hwtstamp;
+ ptp_priv->mii_ts.ts_info = lan8814_ts_info;
+
+ phydev->mii_ts = &ptp_priv->mii_ts;
+}
+
+static int lan8814_ptp_probe_once(struct phy_device *phydev)
+{
+ struct lan8814_shared_priv *shared = phydev->shared->priv;
+
+ /* Initialise shared lock for clock*/
+ mutex_init(&shared->shared_lock);
+
+ shared->ptp_clock_info.owner = THIS_MODULE;
+ snprintf(shared->ptp_clock_info.name, 30, "%s", phydev->drv->name);
+ shared->ptp_clock_info.max_adj = 31249999;
+ shared->ptp_clock_info.n_alarm = 0;
+ shared->ptp_clock_info.n_ext_ts = 0;
+ shared->ptp_clock_info.n_pins = 0;
+ shared->ptp_clock_info.pps = 0;
+ shared->ptp_clock_info.pin_config = NULL;
+ shared->ptp_clock_info.adjfine = lan8814_ptpci_adjfine;
+ shared->ptp_clock_info.adjtime = lan8814_ptpci_adjtime;
+ shared->ptp_clock_info.gettime64 = lan8814_ptpci_gettime64;
+ shared->ptp_clock_info.settime64 = lan8814_ptpci_settime64;
+ shared->ptp_clock_info.getcrosststamp = NULL;
+
+ shared->ptp_clock = ptp_clock_register(&shared->ptp_clock_info,
+ &phydev->mdio.dev);
+ if (IS_ERR_OR_NULL(shared->ptp_clock)) {
+ phydev_err(phydev, "ptp_clock_register failed %lu\n",
+ PTR_ERR(shared->ptp_clock));
+ return -EINVAL;
+ }
+
+ phydev_dbg(phydev, "successfully registered ptp clock\n");
+
+ shared->phydev = phydev;
+
+ /* The EP.4 is shared between all the PHYs in the package and also it
+ * can be accessed by any of the PHYs
+ */
+ lanphy_write_page_reg(phydev, 4, LTC_HARD_RESET, LTC_HARD_RESET_);
+ lanphy_write_page_reg(phydev, 4, PTP_OPERATING_MODE,
+ PTP_OPERATING_MODE_STANDALONE_);
+
+ return 0;
+}
+
+static int lan8814_read_status(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ struct kszphy_latencies *latencies = &priv->latencies;
+ int err;
+ int regval;
+
+ err = genphy_read_status(phydev);
+ if (err)
+ return err;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_1000,
+ latencies->rx_1000);
+ lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_1000,
+ latencies->tx_1000);
+ break;
+ case SPEED_100:
+ lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_100,
+ latencies->rx_100);
+ lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_100,
+ latencies->tx_100);
+ break;
+ case SPEED_10:
+ lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_10,
+ latencies->rx_10);
+ lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_10,
+ latencies->tx_10);
+ break;
+ default:
+ break;
+ }
+
+ /* Make sure the PHY is not broken. Read idle error count,
+ * and reset the PHY if it is maxed out.
+ */
+ regval = phy_read(phydev, MII_STAT1000);
+ if ((regval & 0xFF) == 0xFF) {
+ phy_init_hw(phydev);
+ phydev->link = 0;
+ if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
+ phydev->drv->config_intr(phydev);
+ return genphy_config_aneg(phydev);
+ }
+
+ return 0;
+}
+
+static int lan8814_config_init(struct phy_device *phydev)
+{
+ int val;
+
+ /* Reset the PHY */
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET);
+ val |= LAN8814_QSGMII_SOFT_RESET_BIT;
+ lanphy_write_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET, val);
+
+ /* Disable ANEG with QSGMII PCS Host side */
+ val = lanphy_read_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG);
+ val &= ~LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA;
+ lanphy_write_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG, val);
+
+ /* MDI-X setting for swap A,B transmit */
+ val = lanphy_read_page_reg(phydev, 2, LAN8814_ALIGN_SWAP);
+ val &= ~LAN8814_ALIGN_TX_A_B_SWAP_MASK;
+ val |= LAN8814_ALIGN_TX_A_B_SWAP;
+ lanphy_write_page_reg(phydev, 2, LAN8814_ALIGN_SWAP, val);
+
+ return 0;
+}
+
+static void lan8814_parse_latency(struct phy_device *phydev)
+{
+ const struct device_node *np = phydev->mdio.dev.of_node;
+ struct kszphy_priv *priv = phydev->priv;
+ struct kszphy_latencies *latency = &priv->latencies;
+ u32 val;
+
+ if (!of_property_read_u32(np, "lan8814,latency_rx_10", &val))
+ latency->rx_10 = val;
+ if (!of_property_read_u32(np, "lan8814,latency_tx_10", &val))
+ latency->tx_10 = val;
+ if (!of_property_read_u32(np, "lan8814,latency_rx_100", &val))
+ latency->rx_100 = val;
+ if (!of_property_read_u32(np, "lan8814,latency_tx_100", &val))
+ latency->tx_100 = val;
+ if (!of_property_read_u32(np, "lan8814,latency_rx_1000", &val))
+ latency->rx_1000 = val;
+ if (!of_property_read_u32(np, "lan8814,latency_tx_1000", &val))
+ latency->tx_1000 = val;
+}
+
+static int lan8814_probe(struct phy_device *phydev)
+{
+ const struct device_node *np = phydev->mdio.dev.of_node;
+ struct kszphy_priv *priv;
+ u16 addr;
+ int err;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->led_mode = -1;
+
+ priv->latencies = lan8814_latencies;
+
+ phydev->priv = priv;
+
+ if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
+ !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING) ||
+ of_property_read_bool(np, "lan8814,ignore-ts"))
+ return 0;
+
+ /* Strap-in value for PHY address, below register read gives starting
+ * phy address value
+ */
+ addr = lanphy_read_page_reg(phydev, 4, 0) & 0x1F;
+ devm_phy_package_join(&phydev->mdio.dev, phydev,
+ addr, sizeof(struct lan8814_shared_priv));
+
+ if (phy_package_init_once(phydev)) {
+ err = lan8814_ptp_probe_once(phydev);
+ if (err)
+ return err;
+ }
+
+ lan8814_parse_latency(phydev);
+ lan8814_ptp_init(phydev);
+
+ return 0;
+}
+
static struct phy_driver ksphy_driver[] = {
{
.phy_id = PHY_ID_KS8737,
@@ -1890,10 +2926,9 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip INDY Gigabit Quad PHY",
.config_init = lan8814_config_init,
- .driver_data = &ksz9021_type,
- .probe = kszphy_probe,
+ .probe = lan8814_probe,
.soft_reset = genphy_soft_reset,
- .read_status = ksz9031_read_status,
+ .read_status = lan8814_read_status,
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index bc50224d43dd..389df3f4293c 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -8,11 +8,17 @@
#include <linux/phy.h>
#include <linux/ethtool.h>
#include <linux/ethtool_netlink.h>
+#include <linux/bitfield.h>
+
+#define PHY_ID_LAN87XX 0x0007c150
+#define PHY_ID_LAN937X 0x0007c180
/* External Register Control Register */
#define LAN87XX_EXT_REG_CTL (0x14)
#define LAN87XX_EXT_REG_CTL_RD_CTL (0x1000)
#define LAN87XX_EXT_REG_CTL_WR_CTL (0x0800)
+#define LAN87XX_REG_BANK_SEL_MASK GENMASK(10, 8)
+#define LAN87XX_REG_ADDR_MASK GENMASK(7, 0)
/* External Register Read Data Register */
#define LAN87XX_EXT_REG_RD_DATA (0x15)
@@ -37,6 +43,7 @@
#define PHYACC_ATTR_MODE_READ 0
#define PHYACC_ATTR_MODE_WRITE 1
#define PHYACC_ATTR_MODE_MODIFY 2
+#define PHYACC_ATTR_MODE_POLL 3
#define PHYACC_ATTR_BANK_SMI 0
#define PHYACC_ATTR_BANK_MISC 1
@@ -50,8 +57,33 @@
#define LAN87XX_CABLE_TEST_OPEN 1
#define LAN87XX_CABLE_TEST_SAME_SHORT 2
+/* T1 Registers */
+#define T1_AFE_PORT_CFG1_REG 0x0B
+#define T1_POWER_DOWN_CONTROL_REG 0x1A
+#define T1_SLV_FD_MULT_CFG_REG 0x18
+#define T1_CDR_CFG_PRE_LOCK_REG 0x05
+#define T1_CDR_CFG_POST_LOCK_REG 0x06
+#define T1_LCK_STG2_MUFACT_CFG_REG 0x1A
+#define T1_LCK_STG3_MUFACT_CFG_REG 0x1B
+#define T1_POST_LCK_MUFACT_CFG_REG 0x1C
+#define T1_TX_RX_FIFO_CFG_REG 0x02
+#define T1_TX_LPF_FIR_CFG_REG 0x55
+#define T1_SQI_CONFIG_REG 0x2E
+#define T1_MDIO_CONTROL2_REG 0x10
+#define T1_INTERRUPT_SOURCE_REG 0x18
+#define T1_INTERRUPT2_SOURCE_REG 0x08
+#define T1_EQ_FD_STG1_FRZ_CFG 0x69
+#define T1_EQ_FD_STG2_FRZ_CFG 0x6A
+#define T1_EQ_FD_STG3_FRZ_CFG 0x6B
+#define T1_EQ_FD_STG4_FRZ_CFG 0x6C
+#define T1_EQ_WT_FD_LCK_FRZ_CFG 0x6D
+#define T1_PST_EQ_LCK_STG1_FRZ_CFG 0x6E
+
+#define T1_MODE_STAT_REG 0x11
+#define T1_LINK_UP_MSK BIT(0)
+
#define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
-#define DRIVER_DESC "Microchip LAN87XX T1 PHY driver"
+#define DRIVER_DESC "Microchip LAN87XX/LAN937x T1 PHY driver"
struct access_ereg_val {
u8 mode;
@@ -61,6 +93,37 @@ struct access_ereg_val {
u16 mask;
};
+static int lan937x_dsp_workaround(struct phy_device *phydev, u16 ereg, u8 bank)
+{
+ u8 prev_bank;
+ int rc = 0;
+ u16 val;
+
+ mutex_lock(&phydev->lock);
+ /* Read previous selected bank */
+ rc = phy_read(phydev, LAN87XX_EXT_REG_CTL);
+ if (rc < 0)
+ goto out_unlock;
+
+ /* store the prev_bank */
+ prev_bank = FIELD_GET(LAN87XX_REG_BANK_SEL_MASK, rc);
+
+ if (bank != prev_bank && bank == PHYACC_ATTR_BANK_DSP) {
+ val = ereg & ~LAN87XX_REG_ADDR_MASK;
+
+ val &= ~LAN87XX_EXT_REG_CTL_WR_CTL;
+ val |= LAN87XX_EXT_REG_CTL_RD_CTL;
+
+ /* access twice for DSP bank change,dummy access */
+ rc = phy_write(phydev, LAN87XX_EXT_REG_CTL, val);
+ }
+
+out_unlock:
+ mutex_unlock(&phydev->lock);
+
+ return rc;
+}
+
static int access_ereg(struct phy_device *phydev, u8 mode, u8 bank,
u8 offset, u16 val)
{
@@ -89,6 +152,13 @@ static int access_ereg(struct phy_device *phydev, u8 mode, u8 bank,
ereg |= (bank << 8) | offset;
+ /* DSP bank access workaround for lan937x */
+ if (phydev->phy_id == PHY_ID_LAN937X) {
+ rc = lan937x_dsp_workaround(phydev, ereg, bank);
+ if (rc < 0)
+ return rc;
+ }
+
rc = phy_write(phydev, LAN87XX_EXT_REG_CTL, ereg);
if (rc < 0)
return rc;
@@ -117,6 +187,15 @@ static int access_ereg_modify_changed(struct phy_device *phydev,
return rc;
}
+static int access_smi_poll_timeout(struct phy_device *phydev,
+ u8 offset, u16 mask, u16 clr)
+{
+ int val;
+
+ return phy_read_poll_timeout(phydev, offset, val, (val & mask) == clr,
+ 150, 30000, true);
+}
+
static int lan87xx_config_rgmii_delay(struct phy_device *phydev)
{
int rc;
@@ -157,68 +236,159 @@ static int lan87xx_config_rgmii_delay(struct phy_device *phydev)
static int lan87xx_phy_init(struct phy_device *phydev)
{
static const struct access_ereg_val init[] = {
- /* TX Amplitude = 5 */
- {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_AFE, 0x0B,
- 0x000A, 0x001E},
- /* Clear SMI interrupts */
- {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI, 0x18,
- 0, 0},
- /* Clear MISC interrupts */
- {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_MISC, 0x08,
- 0, 0},
- /* Turn on TC10 Ring Oscillator (ROSC) */
- {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_MISC, 0x20,
- 0x0020, 0x0020},
- /* WUR Detect Length to 1.2uS, LPC Detect Length to 1.09uS */
- {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_PCS, 0x20,
- 0x283C, 0},
- /* Wake_In Debounce Length to 39uS, Wake_Out Length to 79uS */
- {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x21,
- 0x274F, 0},
- /* Enable Auto Wake Forward to Wake_Out, ROSC on, Sleep,
- * and Wake_In to wake PHY
- */
- {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x20,
- 0x80A7, 0},
- /* Enable WUP Auto Fwd, Enable Wake on MDI, Wakeup Debouncer
- * to 128 uS
- */
- {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x24,
- 0xF110, 0},
- /* Enable HW Init */
- {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_SMI, 0x1A,
- 0x0100, 0x0100},
+ /* TXPD/TXAMP6 Configs */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_AFE,
+ T1_AFE_PORT_CFG1_REG, 0x002D, 0 },
+ /* HW_Init Hi and Force_ED */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
+ T1_POWER_DOWN_CONTROL_REG, 0x0308, 0 },
+ /* Equalizer Full Duplex Freeze - T1 Slave */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_EQ_FD_STG1_FRZ_CFG, 0x0002, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_EQ_FD_STG2_FRZ_CFG, 0x0002, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_EQ_FD_STG3_FRZ_CFG, 0x0002, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_EQ_FD_STG4_FRZ_CFG, 0x0002, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_EQ_WT_FD_LCK_FRZ_CFG, 0x0002, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_PST_EQ_LCK_STG1_FRZ_CFG, 0x0002, 0 },
+ /* Slave Full Duplex Multi Configs */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_SLV_FD_MULT_CFG_REG, 0x0D53, 0 },
+ /* CDR Pre and Post Lock Configs */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_CDR_CFG_PRE_LOCK_REG, 0x0AB2, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_CDR_CFG_POST_LOCK_REG, 0x0AB3, 0 },
+ /* Lock Stage 2-3 Multi Factor Config */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_LCK_STG2_MUFACT_CFG_REG, 0x0AEA, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_LCK_STG3_MUFACT_CFG_REG, 0x0AEB, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_POST_LCK_MUFACT_CFG_REG, 0x0AEB, 0 },
+ /* Pointer delay */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_RX_FIFO_CFG_REG, 0x1C00, 0 },
+ /* Tx iir edits */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1000, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1861, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1061, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1922, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1122, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1983, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1183, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1944, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1144, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x18c5, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x10c5, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1846, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1046, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1807, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1007, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1808, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1008, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1809, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1009, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180A, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100A, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180B, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100B, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180C, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100C, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180D, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100D, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180E, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100E, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180F, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100F, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1810, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1010, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1811, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1011, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1000, 0 },
+ /* SQI enable */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_SQI_CONFIG_REG, 0x9572, 0 },
+ /* Flag LPS and WUR as idle errors */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
+ T1_MDIO_CONTROL2_REG, 0x0014, 0 },
+ /* HW_Init toggle, undo force ED, TXPD off */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
+ T1_POWER_DOWN_CONTROL_REG, 0x0200, 0 },
+ /* Reset PCS to trigger hardware initialization */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
+ T1_MDIO_CONTROL2_REG, 0x0094, 0 },
+ /* Poll till Hardware is initialized */
+ { PHYACC_ATTR_MODE_POLL, PHYACC_ATTR_BANK_SMI,
+ T1_MDIO_CONTROL2_REG, 0x0080, 0 },
+ /* Tx AMP - 0x06 */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_AFE,
+ T1_AFE_PORT_CFG1_REG, 0x000C, 0 },
+ /* Read INTERRUPT_SOURCE Register */
+ { PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI,
+ T1_INTERRUPT_SOURCE_REG, 0, 0 },
+ /* Read INTERRUPT_SOURCE Register */
+ { PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_MISC,
+ T1_INTERRUPT2_SOURCE_REG, 0, 0 },
+ /* HW_Init Hi */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
+ T1_POWER_DOWN_CONTROL_REG, 0x0300, 0 },
};
int rc, i;
- /* Start manual initialization procedures in Managed Mode */
- rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
- 0x1a, 0x0000, 0x0100);
+ /* phy Soft reset */
+ rc = genphy_soft_reset(phydev);
if (rc < 0)
return rc;
- /* Soft Reset the SMI block */
- rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
- 0x00, 0x8000, 0x8000);
- if (rc < 0)
- return rc;
-
- /* Check to see if the self-clearing bit is cleared */
- usleep_range(1000, 2000);
- rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
- PHYACC_ATTR_BANK_SMI, 0x00, 0);
- if (rc < 0)
- return rc;
- if ((rc & 0x8000) != 0)
- return -ETIMEDOUT;
-
/* PHY Initialization */
for (i = 0; i < ARRAY_SIZE(init); i++) {
- if (init[i].mode == PHYACC_ATTR_MODE_MODIFY) {
- rc = access_ereg_modify_changed(phydev, init[i].bank,
- init[i].offset,
- init[i].val,
- init[i].mask);
+ if (init[i].mode == PHYACC_ATTR_MODE_POLL &&
+ init[i].bank == PHYACC_ATTR_BANK_SMI) {
+ rc = access_smi_poll_timeout(phydev,
+ init[i].offset,
+ init[i].val,
+ init[i].mask);
} else {
rc = access_ereg(phydev, init[i].mode, init[i].bank,
init[i].offset, init[i].val);
@@ -504,22 +674,86 @@ static int lan87xx_cable_test_get_status(struct phy_device *phydev,
return 0;
}
+static int lan87xx_read_status(struct phy_device *phydev)
+{
+ int rc = 0;
+
+ rc = phy_read(phydev, T1_MODE_STAT_REG);
+ if (rc < 0)
+ return rc;
+
+ if (rc & T1_LINK_UP_MSK)
+ phydev->link = 1;
+ else
+ phydev->link = 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ rc = genphy_read_master_slave(phydev);
+ if (rc < 0)
+ return rc;
+
+ rc = genphy_read_status_fixed(phydev);
+ if (rc < 0)
+ return rc;
+
+ return rc;
+}
+
+static int lan87xx_config_aneg(struct phy_device *phydev)
+{
+ u16 ctl = 0;
+ int rc;
+
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ ctl |= CTL1000_AS_MASTER;
+ break;
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ break;
+ case MASTER_SLAVE_CFG_UNKNOWN:
+ case MASTER_SLAVE_CFG_UNSUPPORTED:
+ return 0;
+ default:
+ phydev_warn(phydev, "Unsupported Master/Slave mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ rc = phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
+ if (rc == 1)
+ rc = genphy_soft_reset(phydev);
+
+ return rc;
+}
+
static struct phy_driver microchip_t1_phy_driver[] = {
{
- .phy_id = 0x0007c150,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX),
.name = "Microchip LAN87xx T1",
.flags = PHY_POLL_CABLE_TEST,
-
.features = PHY_BASIC_T1_FEATURES,
-
.config_init = lan87xx_config_init,
-
.config_intr = lan87xx_phy_config_intr,
.handle_interrupt = lan87xx_handle_interrupt,
-
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .config_aneg = lan87xx_config_aneg,
+ .read_status = lan87xx_read_status,
+ .cable_test_start = lan87xx_cable_test_start,
+ .cable_test_get_status = lan87xx_cable_test_get_status,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN937X),
+ .name = "Microchip LAN937x T1",
+ .features = PHY_BASIC_T1_FEATURES,
+ .config_init = lan87xx_config_init,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = lan87xx_config_aneg,
+ .read_status = lan87xx_read_status,
.cable_test_start = lan87xx_cable_test_start,
.cable_test_get_status = lan87xx_cable_test_get_status,
}
@@ -528,7 +762,8 @@ static struct phy_driver microchip_t1_phy_driver[] = {
module_phy_driver(microchip_t1_phy_driver);
static struct mdio_device_id __maybe_unused microchip_t1_tbl[] = {
- { 0x0007c150, 0xfffffff0 },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN937X) },
{ }
};
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index ebfeeb3c67c1..7e3017e7a1c0 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -2685,3 +2685,6 @@ MODULE_DEVICE_TABLE(mdio, vsc85xx_tbl);
MODULE_DESCRIPTION("Microsemi VSC85xx PHY driver");
MODULE_AUTHOR("Nagaraju Lakkaraju");
MODULE_LICENSE("Dual MIT/GPL");
+
+MODULE_FIRMWARE(MSCC_VSC8584_REVB_INT8051_FW);
+MODULE_FIRMWARE(MSCC_VSC8574_REVB_INT8051_FW);
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index 34f829845d06..cf728bfd83e2 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -1212,7 +1212,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
ts.tv_sec--;
shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
- netif_rx_ni(skb);
+ netif_rx(skb);
return true;
}
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 06fdbae509a7..047c581457e3 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -478,7 +478,7 @@ static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
shhwtstamps_rx = skb_hwtstamps(skb);
shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
- netif_rx_ni(skb);
+ netif_rx(skb);
}
if (priv->extts) {
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 271fc01f7f7f..2001f3329133 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -243,7 +243,7 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
return count;
}
-static int __set_linkmode_max_speed(u32 max_speed, unsigned long *addr)
+static void __set_linkmode_max_speed(u32 max_speed, unsigned long *addr)
{
const struct phy_setting *p;
int i;
@@ -254,13 +254,11 @@ static int __set_linkmode_max_speed(u32 max_speed, unsigned long *addr)
else
break;
}
-
- return 0;
}
-static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
+static void __set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
- return __set_linkmode_max_speed(max_speed, phydev->supported);
+ __set_linkmode_max_speed(max_speed, phydev->supported);
}
/**
@@ -273,17 +271,11 @@ static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
* is connected to a 1G PHY. This function allows the MAC to indicate its
* maximum speed, and so limit what the PHY will advertise.
*/
-int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
+void phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
{
- int err;
-
- err = __set_phy_supported(phydev, max_speed);
- if (err)
- return err;
+ __set_phy_supported(phydev, max_speed);
phy_advertise_supported(phydev);
-
- return 0;
}
EXPORT_SYMBOL(phy_set_max_speed);
@@ -440,7 +432,9 @@ int phy_speed_down_core(struct phy_device *phydev)
if (min_common_speed == SPEED_UNKNOWN)
return -EINVAL;
- return __set_linkmode_max_speed(min_common_speed, phydev->advertising);
+ __set_linkmode_max_speed(min_common_speed, phydev->advertising);
+
+ return 0;
}
static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ce0bb5951b81..8406ac739def 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -2051,17 +2051,11 @@ static int genphy_setup_master_slave(struct phy_device *phydev)
CTL1000_PREFER_MASTER), ctl);
}
-static int genphy_read_master_slave(struct phy_device *phydev)
+int genphy_read_master_slave(struct phy_device *phydev)
{
int cfg, state;
int val;
- if (!phydev->is_gigabit_capable) {
- phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED;
- phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED;
- return 0;
- }
-
phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
@@ -2102,6 +2096,7 @@ static int genphy_read_master_slave(struct phy_device *phydev)
return 0;
}
+EXPORT_SYMBOL(genphy_read_master_slave);
/**
* genphy_restart_aneg - Enable and Restart Autonegotiation
@@ -2396,14 +2391,18 @@ int genphy_read_status(struct phy_device *phydev)
if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
return 0;
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED;
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
- err = genphy_read_master_slave(phydev);
- if (err < 0)
- return err;
+ if (phydev->is_gigabit_capable) {
+ err = genphy_read_master_slave(phydev);
+ if (err < 0)
+ return err;
+ }
err = genphy_read_lpa(phydev);
if (err < 0)
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 420201858564..06943889d747 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -74,6 +74,7 @@ struct phylink {
struct work_struct resolve;
bool mac_link_dropped;
+ bool using_mac_select_pcs;
struct sfp_bus *sfp_bus;
bool sfp_may_have_phy;
@@ -132,17 +133,6 @@ void phylink_set_port_modes(unsigned long *mask)
}
EXPORT_SYMBOL_GPL(phylink_set_port_modes);
-void phylink_set_10g_modes(unsigned long *mask)
-{
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
-}
-EXPORT_SYMBOL_GPL(phylink_set_10g_modes);
-
static int phylink_is_empty_linkmode(const unsigned long *linkmode)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = { 0, };
@@ -427,7 +417,7 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl,
int ret;
/* Get the PCS for this interface mode */
- if (pl->mac_ops->mac_select_pcs) {
+ if (pl->using_mac_select_pcs) {
pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
if (IS_ERR(pcs))
return PTR_ERR(pcs);
@@ -802,7 +792,7 @@ static void phylink_major_config(struct phylink *pl, bool restart,
phylink_dbg(pl, "major config %s\n", phy_modes(state->interface));
- if (pl->mac_ops->mac_select_pcs) {
+ if (pl->using_mac_select_pcs) {
pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
if (IS_ERR(pcs)) {
phylink_err(pl,
@@ -825,8 +815,18 @@ static void phylink_major_config(struct phylink *pl, bool restart,
/* If we have a new PCS, switch to the new PCS after preparing the MAC
* for the change.
*/
- if (pcs)
- phylink_set_pcs(pl, pcs);
+ if (pcs) {
+ pl->pcs = pcs;
+ pl->pcs_ops = pcs->ops;
+
+ if (!pl->phylink_disable_state &&
+ pl->cfg_link_an_mode == MLO_AN_INBAND) {
+ if (pcs->poll)
+ mod_timer(&pl->link_poll, jiffies + HZ);
+ else
+ del_timer(&pl->link_poll);
+ }
+ }
phylink_mac_config(pl, state);
@@ -1182,9 +1182,8 @@ static int phylink_register_sfp(struct phylink *pl,
bus = sfp_bus_find_fwnode(fwnode);
if (IS_ERR(bus)) {
- ret = PTR_ERR(bus);
- phylink_err(pl, "unable to attach SFP bus: %d\n", ret);
- return ret;
+ phylink_err(pl, "unable to attach SFP bus: %pe\n", bus);
+ return PTR_ERR(bus);
}
pl->sfp_bus = bus;
@@ -1216,11 +1215,17 @@ struct phylink *phylink_create(struct phylink_config *config,
phy_interface_t iface,
const struct phylink_mac_ops *mac_ops)
{
+ bool using_mac_select_pcs = false;
struct phylink *pl;
int ret;
- /* Validate the supplied configuration */
if (mac_ops->mac_select_pcs &&
+ mac_ops->mac_select_pcs(config, PHY_INTERFACE_MODE_NA) !=
+ ERR_PTR(-EOPNOTSUPP))
+ using_mac_select_pcs = true;
+
+ /* Validate the supplied configuration */
+ if (using_mac_select_pcs &&
phy_interface_empty(config->supported_interfaces)) {
dev_err(config->dev,
"phylink: error: empty supported_interfaces but mac_select_pcs() method present\n");
@@ -1244,6 +1249,7 @@ struct phylink *phylink_create(struct phylink_config *config,
return ERR_PTR(-EINVAL);
}
+ pl->using_mac_select_pcs = using_mac_select_pcs;
pl->phy_state.interface = iface;
pl->link_interface = iface;
if (iface == PHY_INTERFACE_MODE_MOCA)
@@ -1290,36 +1296,6 @@ struct phylink *phylink_create(struct phylink_config *config,
EXPORT_SYMBOL_GPL(phylink_create);
/**
- * phylink_set_pcs() - set the current PCS for phylink to use
- * @pl: a pointer to a &struct phylink returned from phylink_create()
- * @pcs: a pointer to the &struct phylink_pcs
- *
- * Bind the MAC PCS to phylink. This may be called after phylink_create().
- * If it is desired to dynamically change the PCS, then the preferred method
- * is to use mac_select_pcs(), but it may also be called in mac_prepare()
- * or mac_config().
- *
- * Please note that there are behavioural changes with the mac_config()
- * callback if a PCS is present (denoting a newer setup) so removing a PCS
- * is not supported, and if a PCS is going to be used, it must be registered
- * by calling phylink_set_pcs() at the latest in the first mac_config() call.
- */
-void phylink_set_pcs(struct phylink *pl, struct phylink_pcs *pcs)
-{
- pl->pcs = pcs;
- pl->pcs_ops = pcs->ops;
-
- if (!pl->phylink_disable_state &&
- pl->cfg_link_an_mode == MLO_AN_INBAND) {
- if (pl->config->pcs_poll || pcs->poll)
- mod_timer(&pl->link_poll, jiffies + HZ);
- else
- del_timer(&pl->link_poll);
- }
-}
-EXPORT_SYMBOL_GPL(phylink_set_pcs);
-
-/**
* phylink_destroy() - cleanup and destroy the phylink instance
* @pl: a pointer to a &struct phylink returned from phylink_create()
*
@@ -1403,11 +1379,11 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
ret = phylink_validate(pl, supported, &config);
if (ret) {
- phylink_warn(pl, "validation of %s with support %*pb and advertisement %*pb failed: %d\n",
+ phylink_warn(pl, "validation of %s with support %*pb and advertisement %*pb failed: %pe\n",
phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, phy->supported,
__ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising,
- ret);
+ ERR_PTR(ret));
return ret;
}
@@ -1684,7 +1660,6 @@ void phylink_start(struct phylink *pl)
poll |= pl->config->poll_fixed_state;
break;
case MLO_AN_INBAND:
- poll |= pl->config->pcs_poll;
if (pl->pcs)
poll |= pl->pcs->poll;
break;
@@ -2607,8 +2582,9 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
/* Ignore errors if we're expecting a PHY to attach later */
ret = phylink_validate(pl, support, &config);
if (ret) {
- phylink_err(pl, "validation with support %*pb failed: %d\n",
- __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
+ phylink_err(pl, "validation with support %*pb failed: %pe\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support,
+ ERR_PTR(ret));
return ret;
}
@@ -2624,10 +2600,12 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
linkmode_copy(support1, support);
ret = phylink_validate(pl, support1, &config);
if (ret) {
- phylink_err(pl, "validation of %s/%s with support %*pb failed: %d\n",
+ phylink_err(pl,
+ "validation of %s/%s with support %*pb failed: %pe\n",
phylink_an_mode_str(mode),
phy_modes(config.interface),
- __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support,
+ ERR_PTR(ret));
return ret;
}
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index c1512c9925a6..15aa5ac1ff49 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -75,6 +75,12 @@ static const struct sfp_quirk sfp_quirks[] = {
.part = "MA5671A",
.modes = sfp_quirk_2500basex,
}, {
+ // Lantech 8330-262D-E can operate at 2500base-X, but
+ // incorrectly report 2500MBd NRZ in their EEPROM
+ .vendor = "Lantech",
+ .part = "8330-262D-E",
+ .modes = sfp_quirk_2500basex,
+ }, {
.vendor = "UBNT",
.part = "UF-INSTANT",
.modes = sfp_quirk_ubnt_uf_instant,
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 4720b24ca51b..4dfb79807823 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -471,8 +471,8 @@ static unsigned int sfp_soft_get_state(struct sfp *sfp)
state |= SFP_F_TX_FAULT;
} else {
dev_err_ratelimited(sfp->dev,
- "failed to read SFP soft status: %d\n",
- ret);
+ "failed to read SFP soft status: %pe\n",
+ ERR_PTR(ret));
/* Preserve the current state */
state = sfp->state;
}
@@ -1311,7 +1311,8 @@ static void sfp_hwmon_probe(struct work_struct *work)
mod_delayed_work(system_wq, &sfp->hwmon_probe,
T_PROBE_RETRY_SLOW);
} else {
- dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
+ dev_warn(sfp->dev, "hwmon probe failed: %pe\n",
+ ERR_PTR(err));
}
return;
}
@@ -1516,14 +1517,15 @@ static int sfp_sm_probe_phy(struct sfp *sfp, bool is_c45)
if (phy == ERR_PTR(-ENODEV))
return PTR_ERR(phy);
if (IS_ERR(phy)) {
- dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy));
+ dev_err(sfp->dev, "mdiobus scan returned %pe\n", phy);
return PTR_ERR(phy);
}
err = phy_device_register(phy);
if (err) {
phy_device_free(phy);
- dev_err(sfp->dev, "phy_device_register failed: %d\n", err);
+ dev_err(sfp->dev, "phy_device_register failed: %pe\n",
+ ERR_PTR(err));
return err;
}
@@ -1531,7 +1533,7 @@ static int sfp_sm_probe_phy(struct sfp *sfp, bool is_c45)
if (err) {
phy_device_remove(phy);
phy_device_free(phy);
- dev_err(sfp->dev, "sfp_add_phy failed: %d\n", err);
+ dev_err(sfp->dev, "sfp_add_phy failed: %pe\n", ERR_PTR(err));
return err;
}
@@ -1708,7 +1710,7 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
err = sfp_read(sfp, true, SFP_EXT_STATUS, &val, sizeof(val));
if (err != sizeof(val)) {
- dev_err(sfp->dev, "Failed to read EEPROM: %d\n", err);
+ dev_err(sfp->dev, "Failed to read EEPROM: %pe\n", ERR_PTR(err));
return -EAGAIN;
}
@@ -1726,7 +1728,8 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
err = sfp_write(sfp, true, SFP_EXT_STATUS, &val, sizeof(val));
if (err != sizeof(val)) {
- dev_err(sfp->dev, "Failed to write EEPROM: %d\n", err);
+ dev_err(sfp->dev, "Failed to write EEPROM: %pe\n",
+ ERR_PTR(err));
return -EAGAIN;
}
@@ -1778,7 +1781,9 @@ static int sfp_cotsworks_fixup_check(struct sfp *sfp, struct sfp_eeprom_id *id)
id->base.connector = SFF8024_CONNECTOR_LC;
err = sfp_write(sfp, false, SFP_PHYS_ID, &id->base, 3);
if (err != 3) {
- dev_err(sfp->dev, "Failed to rewrite module EEPROM: %d\n", err);
+ dev_err(sfp->dev,
+ "Failed to rewrite module EEPROM: %pe\n",
+ ERR_PTR(err));
return err;
}
@@ -1789,7 +1794,9 @@ static int sfp_cotsworks_fixup_check(struct sfp *sfp, struct sfp_eeprom_id *id)
check = sfp_check(&id->base, sizeof(id->base) - 1);
err = sfp_write(sfp, false, SFP_CC_BASE, &check, 1);
if (err != 1) {
- dev_err(sfp->dev, "Failed to update base structure checksum in fiber module EEPROM: %d\n", err);
+ dev_err(sfp->dev,
+ "Failed to update base structure checksum in fiber module EEPROM: %pe\n",
+ ERR_PTR(err));
return err;
}
}
@@ -1814,12 +1821,13 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
if (ret < 0) {
if (report)
- dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret);
+ dev_err(sfp->dev, "failed to read EEPROM: %pe\n",
+ ERR_PTR(ret));
return -EAGAIN;
}
if (ret != sizeof(id.base)) {
- dev_err(sfp->dev, "EEPROM short read: %d\n", ret);
+ dev_err(sfp->dev, "EEPROM short read: %pe\n", ERR_PTR(ret));
return -EAGAIN;
}
@@ -1839,13 +1847,15 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
if (ret < 0) {
if (report)
- dev_err(sfp->dev, "failed to read EEPROM: %d\n",
- ret);
+ dev_err(sfp->dev,
+ "failed to read EEPROM: %pe\n",
+ ERR_PTR(ret));
return -EAGAIN;
}
if (ret != sizeof(id.base)) {
- dev_err(sfp->dev, "EEPROM short read: %d\n", ret);
+ dev_err(sfp->dev, "EEPROM short read: %pe\n",
+ ERR_PTR(ret));
return -EAGAIN;
}
}
@@ -1887,12 +1897,13 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
ret = sfp_read(sfp, false, SFP_CC_BASE + 1, &id.ext, sizeof(id.ext));
if (ret < 0) {
if (report)
- dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret);
+ dev_err(sfp->dev, "failed to read EEPROM: %pe\n",
+ ERR_PTR(ret));
return -EAGAIN;
}
if (ret != sizeof(id.ext)) {
- dev_err(sfp->dev, "EEPROM short read: %d\n", ret);
+ dev_err(sfp->dev, "EEPROM short read: %pe\n", ERR_PTR(ret));
return -EAGAIN;
}
@@ -2046,7 +2057,8 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
err = sfp_hwmon_insert(sfp);
if (err)
- dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
+ dev_warn(sfp->dev, "hwmon probe failed: %pe\n",
+ ERR_PTR(err));
sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0);
fallthrough;
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 8b5445a724ce..ff37f8ba6758 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -517,7 +517,7 @@ static int ks8995_probe(struct spi_device *spi)
return 0;
}
-static int ks8995_remove(struct spi_device *spi)
+static void ks8995_remove(struct spi_device *spi)
{
struct ks8995_switch *ks = spi_get_drvdata(spi);
@@ -526,8 +526,6 @@ static int ks8995_remove(struct spi_device *spi)
/* assert reset */
if (ks->pdata && gpio_is_valid(ks->pdata->reset_gpio))
gpiod_set_value(gpio_to_desc(ks->pdata->reset_gpio), 1);
-
- return 0;
}
/* ------------------------------------------------------------------------ */
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 0d491b4d6667..dafd3e9ebbf8 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -676,7 +676,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
case PLIP_PK_DONE:
/* Inform the upper layer for the arrival of a packet. */
rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
- netif_rx_ni(rcv->skb);
+ netif_rx(rcv->skb);
dev->stats.rx_bytes += rcv->length.h;
dev->stats.rx_packets++;
rcv->skb = NULL;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 1a95f3beb784..39e61e07e489 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -109,7 +109,7 @@ static int rionet_rx_clean(struct net_device *ndev)
skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
rnet->rx_skb[i]->protocol =
eth_type_trans(rnet->rx_skb[i], ndev);
- error = netif_rx(rnet->rx_skb[i]);
+ error = __netif_rx(rnet->rx_skb[i]);
if (error == NET_RX_DROP) {
ndev->stats.rx_dropped++;
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index 57a6d598467b..c3f8020571ad 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -872,7 +872,7 @@ printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[3
/* datagram completed: send to upper level */
skb_trim(skb, dlen);
- netif_rx(skb);
+ __netif_rx(skb);
stats->rx_bytes+=dlen;
stats->rx_packets++;
lp->rx_skb[ns] = NULL;
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 98f586f910fb..88396ff99f03 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -368,7 +368,7 @@ static void sl_bump(struct slip *sl)
skb_put_data(skb, sl->rbuff, count);
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IP);
- netif_rx_ni(skb);
+ netif_rx(skb);
dev->stats.rx_packets++;
}
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 8e3a28ba6b28..c3d42062559d 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -322,6 +322,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
struct tap_dev *tap;
struct tap_queue *q;
netdev_features_t features = TAP_FEATURES;
+ enum skb_drop_reason drop_reason;
tap = tap_dev_get_rcu(dev);
if (!tap)
@@ -343,12 +344,16 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
struct sk_buff *segs = __skb_gso_segment(skb, features, false);
struct sk_buff *next;
- if (IS_ERR(segs))
+ if (IS_ERR(segs)) {
+ drop_reason = SKB_DROP_REASON_SKB_GSO_SEG;
goto drop;
+ }
if (!segs) {
- if (ptr_ring_produce(&q->ring, skb))
+ if (ptr_ring_produce(&q->ring, skb)) {
+ drop_reason = SKB_DROP_REASON_FULL_RING;
goto drop;
+ }
goto wake_up;
}
@@ -356,8 +361,9 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
skb_list_walk_safe(segs, skb, next) {
skb_mark_not_on_list(skb);
if (ptr_ring_produce(&q->ring, skb)) {
- kfree_skb(skb);
- kfree_skb_list(next);
+ drop_reason = SKB_DROP_REASON_FULL_RING;
+ kfree_skb_reason(skb, drop_reason);
+ kfree_skb_list_reason(next, drop_reason);
break;
}
}
@@ -369,10 +375,14 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
*/
if (skb->ip_summed == CHECKSUM_PARTIAL &&
!(features & NETIF_F_CSUM_MASK) &&
- skb_checksum_help(skb))
+ skb_checksum_help(skb)) {
+ drop_reason = SKB_DROP_REASON_SKB_CSUM;
goto drop;
- if (ptr_ring_produce(&q->ring, skb))
+ }
+ if (ptr_ring_produce(&q->ring, skb)) {
+ drop_reason = SKB_DROP_REASON_FULL_RING;
goto drop;
+ }
}
wake_up:
@@ -383,7 +393,7 @@ drop:
/* Count errors/drops only here, thus don't care about args. */
if (tap->count_rx_dropped)
tap->count_rx_dropped(tap);
- kfree_skb(skb);
+ kfree_skb_reason(skb, drop_reason);
return RX_HANDLER_CONSUMED;
}
EXPORT_SYMBOL_GPL(tap_handle_frame);
@@ -632,6 +642,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
int depth;
bool zerocopy = false;
size_t linear;
+ enum skb_drop_reason drop_reason;
if (q->flags & IFF_VNET_HDR) {
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
@@ -696,8 +707,10 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
else
err = skb_copy_datagram_from_iter(skb, 0, from, len);
- if (err)
+ if (err) {
+ drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
goto err_kfree;
+ }
skb_set_network_header(skb, ETH_HLEN);
skb_reset_mac_header(skb);
@@ -706,8 +719,10 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
if (vnet_hdr_len) {
err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
tap_is_little_endian(q));
- if (err)
+ if (err) {
+ drop_reason = SKB_DROP_REASON_DEV_HDR;
goto err_kfree;
+ }
}
skb_probe_transport_header(skb);
@@ -738,7 +753,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
return total_len;
err_kfree:
- kfree_skb(skb);
+ kfree_skb_reason(skb, drop_reason);
err:
rcu_read_lock();
@@ -1198,7 +1213,8 @@ static int tap_sendmsg(struct socket *sock, struct msghdr *m,
struct xdp_buff *xdp;
int i;
- if (ctl && (ctl->type == TUN_MSG_PTR)) {
+ if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
+ ctl && ctl->type == TUN_MSG_PTR) {
for (i = 0; i < ctl->num; i++) {
xdp = &((struct xdp_buff *)ctl->ptr)[i];
tap_get_user_xdp(q, xdp);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 8b2adc56b92a..b07dde6f0abf 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -734,6 +734,11 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
port = team_port_get_rcu(skb->dev);
team = port->team;
if (!team_port_enabled(port)) {
+ if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+ /* link-local packets are mostly useful when stack receives them
+ * with the link they arrive on.
+ */
+ return RX_HANDLER_PASS;
/* allow exact match delivery for disabled ports */
res = RX_HANDLER_EXACT;
} else {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index fed85447701a..276a0e42ca8e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1058,6 +1058,7 @@ static unsigned int run_ebpf_filter(struct tun_struct *tun,
static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
+ enum skb_drop_reason drop_reason;
int txq = skb->queue_mapping;
struct netdev_queue *queue;
struct tun_file *tfile;
@@ -1067,8 +1068,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
tfile = rcu_dereference(tun->tfiles[txq]);
/* Drop packet if interface is not attached */
- if (!tfile)
+ if (!tfile) {
+ drop_reason = SKB_DROP_REASON_DEV_READY;
goto drop;
+ }
if (!rcu_dereference(tun->steering_prog))
tun_automq_xmit(tun, skb);
@@ -1078,19 +1081,32 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Drop if the filter does not like it.
* This is a noop if the filter is disabled.
* Filter can be enabled only for the TAP devices. */
- if (!check_filter(&tun->txflt, skb))
+ if (!check_filter(&tun->txflt, skb)) {
+ drop_reason = SKB_DROP_REASON_TAP_TXFILTER;
goto drop;
+ }
if (tfile->socket.sk->sk_filter &&
- sk_filter(tfile->socket.sk, skb))
+ sk_filter(tfile->socket.sk, skb)) {
+ drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
goto drop;
+ }
len = run_ebpf_filter(tun, skb, len);
- if (len == 0 || pskb_trim(skb, len))
+ if (len == 0) {
+ drop_reason = SKB_DROP_REASON_TAP_FILTER;
goto drop;
+ }
+
+ if (pskb_trim(skb, len)) {
+ drop_reason = SKB_DROP_REASON_NOMEM;
+ goto drop;
+ }
- if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
+ if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
+ drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
goto drop;
+ }
skb_tx_timestamp(skb);
@@ -1101,8 +1117,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset_ct(skb);
- if (ptr_ring_produce(&tfile->tx_ring, skb))
+ if (ptr_ring_produce(&tfile->tx_ring, skb)) {
+ drop_reason = SKB_DROP_REASON_FULL_RING;
goto drop;
+ }
/* NETIF_F_LLTX requires to do our own update of trans_start */
queue = netdev_get_tx_queue(dev, txq);
@@ -1117,9 +1135,9 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
drop:
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
skb_tx_error(skb);
- kfree_skb(skb);
+ kfree_skb_reason(skb, drop_reason);
rcu_read_unlock();
return NET_XMIT_DROP;
}
@@ -1273,7 +1291,7 @@ resample:
void *frame = tun_xdp_to_ptr(xdp);
if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
break;
}
nxmit++;
@@ -1608,7 +1626,7 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
trace_xdp_exception(tun->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
break;
}
@@ -1717,6 +1735,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
u32 rxhash = 0;
int skb_xdp = 1;
bool frags = tun_napi_frags_enabled(tfile);
+ enum skb_drop_reason drop_reason;
if (!(tun->flags & IFF_NO_PI)) {
if (len < sizeof(pi))
@@ -1778,7 +1797,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
*/
skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
if (IS_ERR(skb)) {
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
return PTR_ERR(skb);
}
if (!skb)
@@ -1807,7 +1826,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
if (frags)
mutex_unlock(&tfile->napi_mutex);
return PTR_ERR(skb);
@@ -1820,9 +1839,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (err) {
err = -EFAULT;
+ drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
drop:
- atomic_long_inc(&tun->dev->rx_dropped);
- kfree_skb(skb);
+ dev_core_stats_rx_dropped_inc(tun->dev);
+ kfree_skb_reason(skb, drop_reason);
if (frags) {
tfile->napi.skb = NULL;
mutex_unlock(&tfile->napi_mutex);
@@ -1856,7 +1876,7 @@ drop:
pi.proto = htons(ETH_P_IPV6);
break;
default:
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
kfree_skb(skb);
return -EINVAL;
}
@@ -1869,6 +1889,7 @@ drop:
case IFF_TAP:
if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
err = -ENOMEM;
+ drop_reason = SKB_DROP_REASON_HDR_TRUNC;
goto drop;
}
skb->protocol = eth_type_trans(skb, tun->dev);
@@ -1922,6 +1943,7 @@ drop:
if (unlikely(!(tun->dev->flags & IFF_UP))) {
err = -EIO;
rcu_read_unlock();
+ drop_reason = SKB_DROP_REASON_DEV_READY;
goto drop;
}
@@ -1934,7 +1956,7 @@ drop:
skb_headlen(skb));
if (unlikely(headlen > skb_headlen(skb))) {
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
napi_free_frags(&tfile->napi);
rcu_read_unlock();
mutex_unlock(&tfile->napi_mutex);
@@ -1962,7 +1984,7 @@ drop:
} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
tun_rx_batched(tun, tfile, skb, more);
} else {
- netif_rx_ni(skb);
+ netif_rx(skb);
}
rcu_read_unlock();
@@ -2388,9 +2410,10 @@ static int tun_xdp_one(struct tun_struct *tun,
struct virtio_net_hdr *gso = &hdr->gso;
struct bpf_prog *xdp_prog;
struct sk_buff *skb = NULL;
+ struct sk_buff_head *queue;
u32 rxhash = 0, act;
int buflen = hdr->buflen;
- int err = 0;
+ int ret = 0;
bool skb_xdp = false;
struct page *page;
@@ -2405,13 +2428,13 @@ static int tun_xdp_one(struct tun_struct *tun,
xdp_set_data_meta_invalid(xdp);
act = bpf_prog_run_xdp(xdp_prog, xdp);
- err = tun_xdp_act(tun, xdp_prog, xdp, act);
- if (err < 0) {
+ ret = tun_xdp_act(tun, xdp_prog, xdp, act);
+ if (ret < 0) {
put_page(virt_to_head_page(xdp->data));
- return err;
+ return ret;
}
- switch (err) {
+ switch (ret) {
case XDP_REDIRECT:
*flush = true;
fallthrough;
@@ -2435,7 +2458,7 @@ static int tun_xdp_one(struct tun_struct *tun,
build:
skb = build_skb(xdp->data_hard_start, buflen);
if (!skb) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
@@ -2445,7 +2468,7 @@ build:
if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
atomic_long_inc(&tun->rx_frame_errors);
kfree_skb(skb);
- err = -EINVAL;
+ ret = -EINVAL;
goto out;
}
@@ -2455,16 +2478,27 @@ build:
skb_record_rx_queue(skb, tfile->queue_index);
if (skb_xdp) {
- err = do_xdp_generic(xdp_prog, skb);
- if (err != XDP_PASS)
+ ret = do_xdp_generic(xdp_prog, skb);
+ if (ret != XDP_PASS) {
+ ret = 0;
goto out;
+ }
}
if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
!tfile->detached)
rxhash = __skb_get_hash_symmetric(skb);
- netif_receive_skb(skb);
+ if (tfile->napi_enabled) {
+ queue = &tfile->sk.sk_write_queue;
+ spin_lock(&queue->lock);
+ __skb_queue_tail(queue, skb);
+ spin_unlock(&queue->lock);
+ ret = 1;
+ } else {
+ netif_receive_skb(skb);
+ ret = 0;
+ }
/* No need to disable preemption here since this function is
* always called with bh disabled
@@ -2475,7 +2509,7 @@ build:
tun_flow_update(tun, rxhash, tfile);
out:
- return err;
+ return ret;
}
static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
@@ -2489,10 +2523,11 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
if (!tun)
return -EBADFD;
- if (ctl && (ctl->type == TUN_MSG_PTR)) {
+ if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
+ ctl && ctl->type == TUN_MSG_PTR) {
struct tun_page tpage;
int n = ctl->num;
- int flush = 0;
+ int flush = 0, queued = 0;
memset(&tpage, 0, sizeof(tpage));
@@ -2501,12 +2536,17 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
for (i = 0; i < n; i++) {
xdp = &((struct xdp_buff *)ctl->ptr)[i];
- tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
+ ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
+ if (ret > 0)
+ queued += ret;
}
if (flush)
xdp_do_flush();
+ if (tfile->napi_enabled && queued > 0)
+ napi_schedule(&tfile->napi);
+
rcu_read_unlock();
local_bh_enable();
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index b554054a7560..e62fc4f2aee0 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -358,6 +358,7 @@ config USB_NET_SMSC95XX
select BITREVERSE
select CRC16
select CRC32
+ imply NET_SELFTESTS
help
This option adds support for SMSC LAN95XX based USB 2.0
10/100 Ethernet adapters.
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 2a1e31defe71..2c81236c6c7c 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -158,6 +158,8 @@
#define AX_EEPROM_MAGIC 0xdeadbeef
#define AX_EEPROM_LEN 0x200
+#define AX_EMBD_PHY_ADDR 0x10
+
/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
struct asix_data {
u8 multi_filter[AX_MCAST_FILTER_SIZE];
@@ -177,14 +179,16 @@ struct asix_rx_fixup_info {
struct asix_common_private {
void (*resume)(struct usbnet *dev);
void (*suspend)(struct usbnet *dev);
+ int (*reset)(struct usbnet *dev, int in_pm);
u16 presvd_phy_advertise;
u16 presvd_phy_bmcr;
struct asix_rx_fixup_info rx_fixup_info;
struct mii_bus *mdio;
struct phy_device *phydev;
+ struct phy_device *phydev_int;
u16 phy_addr;
- char phy_name[20];
bool embd_phy;
+ u8 chipcode;
};
extern const struct driver_info ax88172a_info;
@@ -192,8 +196,8 @@ extern const struct driver_info ax88172a_info;
/* ASIX specific flags */
#define FLAG_EEPROM_MAC (1UL << 0) /* init device MAC from eeprom */
-int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data, int in_pm);
+int __must_check asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data, int in_pm);
int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data, int in_pm);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 71682970be58..632fa6c1d5e3 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -11,8 +11,8 @@
#define AX_HOST_EN_RETRIES 30
-int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data, int in_pm)
+int __must_check asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data, int in_pm)
{
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
@@ -27,9 +27,12 @@ int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, data, size);
- if (unlikely(ret < 0))
+ if (unlikely(ret < size)) {
+ ret = ret < 0 ? ret : -ENODATA;
+
netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n",
index, ret);
+ }
return ret;
}
@@ -79,7 +82,7 @@ static int asix_check_host_enable(struct usbnet *dev, int in_pm)
0, 0, 1, &smsr, in_pm);
if (ret == -ENODEV)
break;
- else if (ret < sizeof(smsr))
+ else if (ret < 0)
continue;
else if (smsr & AX_HOST_EN)
break;
@@ -488,7 +491,8 @@ void asix_set_multicast(struct net_device *net)
asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
}
-int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
+static int __asix_mdio_read(struct net_device *netdev, int phy_id, int loc,
+ bool in_pm)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res;
@@ -496,18 +500,18 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
mutex_lock(&dev->phy_mutex);
- ret = asix_check_host_enable(dev, 0);
+ ret = asix_check_host_enable(dev, in_pm);
if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
}
ret = asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id, (__u16)loc, 2,
- &res, 0);
+ &res, in_pm);
if (ret < 0)
goto out;
- ret = asix_set_hw_mii(dev, 0);
+ ret = asix_set_hw_mii(dev, in_pm);
out:
mutex_unlock(&dev->phy_mutex);
@@ -517,8 +521,13 @@ out:
return ret < 0 ? ret : le16_to_cpu(res);
}
+int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
+{
+ return __asix_mdio_read(netdev, phy_id, loc, false);
+}
+
static int __asix_mdio_write(struct net_device *netdev, int phy_id, int loc,
- int val)
+ int val, bool in_pm)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res = cpu_to_le16(val);
@@ -529,16 +538,16 @@ static int __asix_mdio_write(struct net_device *netdev, int phy_id, int loc,
mutex_lock(&dev->phy_mutex);
- ret = asix_check_host_enable(dev, 0);
+ ret = asix_check_host_enable(dev, in_pm);
if (ret == -ENODEV)
goto out;
ret = asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2,
- &res, 0);
+ &res, in_pm);
if (ret < 0)
goto out;
- ret = asix_set_hw_mii(dev, 0);
+ ret = asix_set_hw_mii(dev, in_pm);
out:
mutex_unlock(&dev->phy_mutex);
@@ -547,7 +556,7 @@ out:
void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
{
- __asix_mdio_write(netdev, phy_id, loc, val);
+ __asix_mdio_write(netdev, phy_id, loc, val, false);
}
/* MDIO read and write wrappers for phylib */
@@ -555,63 +564,25 @@ int asix_mdio_bus_read(struct mii_bus *bus, int phy_id, int regnum)
{
struct usbnet *priv = bus->priv;
- return asix_mdio_read(priv->net, phy_id, regnum);
+ return __asix_mdio_read(priv->net, phy_id, regnum, false);
}
int asix_mdio_bus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val)
{
struct usbnet *priv = bus->priv;
- return __asix_mdio_write(priv->net, phy_id, regnum, val);
+ return __asix_mdio_write(priv->net, phy_id, regnum, val, false);
}
int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
{
- struct usbnet *dev = netdev_priv(netdev);
- __le16 res;
- int ret;
-
- mutex_lock(&dev->phy_mutex);
-
- ret = asix_check_host_enable(dev, 1);
- if (ret == -ENODEV || ret == -ETIMEDOUT) {
- mutex_unlock(&dev->phy_mutex);
- return ret;
- }
-
- asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
- (__u16)loc, 2, &res, 1);
- asix_set_hw_mii(dev, 1);
- mutex_unlock(&dev->phy_mutex);
-
- netdev_dbg(dev->net, "asix_mdio_read_nopm() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
- phy_id, loc, le16_to_cpu(res));
-
- return le16_to_cpu(res);
+ return __asix_mdio_read(netdev, phy_id, loc, true);
}
void
asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc, int val)
{
- struct usbnet *dev = netdev_priv(netdev);
- __le16 res = cpu_to_le16(val);
- int ret;
-
- netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
- phy_id, loc, val);
-
- mutex_lock(&dev->phy_mutex);
-
- ret = asix_check_host_enable(dev, 1);
- if (ret == -ENODEV) {
- mutex_unlock(&dev->phy_mutex);
- return;
- }
-
- asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id,
- (__u16)loc, 2, &res, 1);
- asix_set_hw_mii(dev, 1);
- mutex_unlock(&dev->phy_mutex);
+ __asix_mdio_write(netdev, phy_id, loc, val, true);
}
void asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 4514d35ef4c4..38e47a93fb83 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -450,7 +450,6 @@ static int ax88772a_hw_reset(struct usbnet *dev, int in_pm)
struct asix_data *data = (struct asix_data *)&dev->data;
struct asix_common_private *priv = dev->driver_priv;
u16 rx_ctl, phy14h, phy15h, phy16h;
- u8 chipcode = 0;
int ret;
ret = asix_write_gpio(dev, AX_GPIO_RSE, 5, in_pm);
@@ -493,12 +492,7 @@ static int ax88772a_hw_reset(struct usbnet *dev, int in_pm)
goto out;
}
- ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0,
- 0, 1, &chipcode, in_pm);
- if (ret < 0)
- goto out;
-
- if ((chipcode & AX_CHIPCODE_MASK) == AX_AX88772B_CHIPCODE) {
+ if (priv->chipcode == AX_AX88772B_CHIPCODE) {
ret = asix_write_cmd(dev, AX_QCTCTRL, 0x8000, 0x8001,
0, NULL, in_pm);
if (ret < 0) {
@@ -506,7 +500,7 @@ static int ax88772a_hw_reset(struct usbnet *dev, int in_pm)
ret);
goto out;
}
- } else if ((chipcode & AX_CHIPCODE_MASK) == AX_AX88772A_CHIPCODE) {
+ } else if (priv->chipcode == AX_AX88772A_CHIPCODE) {
/* Check if the PHY registers have default settings */
phy14h = asix_mdio_read_nopm(dev->net, dev->mii.phy_id,
AX88772A_PHY14H);
@@ -625,27 +619,13 @@ static void ax88772_resume(struct usbnet *dev)
int i;
for (i = 0; i < 3; i++)
- if (!ax88772_hw_reset(dev, 1))
+ if (!priv->reset(dev, 1))
break;
if (netif_running(dev->net))
phy_start(priv->phydev);
}
-static void ax88772a_resume(struct usbnet *dev)
-{
- struct asix_common_private *priv = dev->driver_priv;
- int i;
-
- for (i = 0; i < 3; i++) {
- if (!ax88772a_hw_reset(dev, 1))
- break;
- }
-
- if (netif_running(dev->net))
- phy_start(priv->phydev);
-}
-
static int asix_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
@@ -681,15 +661,16 @@ static int ax88772_init_phy(struct usbnet *dev)
struct asix_common_private *priv = dev->driver_priv;
int ret;
- snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT,
- priv->mdio->id, priv->phy_addr);
+ priv->phydev = mdiobus_get_phy(priv->mdio, priv->phy_addr);
+ if (!priv->phydev) {
+ netdev_err(dev->net, "Could not find PHY\n");
+ return -ENODEV;
+ }
- priv->phydev = phy_connect(dev->net, priv->phy_name, &asix_adjust_link,
- PHY_INTERFACE_MODE_INTERNAL);
- if (IS_ERR(priv->phydev)) {
- netdev_err(dev->net, "Could not connect to PHY device %s\n",
- priv->phy_name);
- ret = PTR_ERR(priv->phydev);
+ ret = phy_connect_direct(dev->net, priv->phydev, &asix_adjust_link,
+ PHY_INTERFACE_MODE_INTERNAL);
+ if (ret) {
+ netdev_err(dev->net, "Could not connect PHY\n");
return ret;
}
@@ -698,13 +679,29 @@ static int ax88772_init_phy(struct usbnet *dev)
phy_attached_info(priv->phydev);
+ if (priv->embd_phy)
+ return 0;
+
+ /* In case main PHY is not the embedded PHY and MAC is RMII clock
+ * provider, we need to suspend embedded PHY by keeping PLL enabled
+ * (AX_SWRESET_IPPD == 0).
+ */
+ priv->phydev_int = mdiobus_get_phy(priv->mdio, AX_EMBD_PHY_ADDR);
+ if (!priv->phydev_int) {
+ netdev_err(dev->net, "Could not find internal PHY\n");
+ return -ENODEV;
+ }
+
+ priv->phydev_int->mac_managed_pm = 1;
+ phy_suspend(priv->phydev_int);
+
return 0;
}
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
- u8 buf[ETH_ALEN] = {0}, chipcode = 0;
struct asix_common_private *priv;
+ u8 buf[ETH_ALEN] = {0};
int ret, i;
priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL);
@@ -753,14 +750,25 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
return ret;
priv->phy_addr = ret;
- priv->embd_phy = ((priv->phy_addr & 0x1f) == 0x10);
+ priv->embd_phy = ((priv->phy_addr & 0x1f) == AX_EMBD_PHY_ADDR);
- asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
- chipcode &= AX_CHIPCODE_MASK;
+ ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1,
+ &priv->chipcode, 0);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to read STATMNGSTS_REG: %d\n", ret);
+ return ret;
+ }
+
+ priv->chipcode &= AX_CHIPCODE_MASK;
- ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
- ax88772a_hw_reset(dev, 0);
+ priv->resume = ax88772_resume;
+ priv->suspend = ax88772_suspend;
+ if (priv->chipcode == AX_AX88772_CHIPCODE)
+ priv->reset = ax88772_hw_reset;
+ else
+ priv->reset = ax88772a_hw_reset;
+ ret = priv->reset(dev, 0);
if (ret < 0) {
netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret);
return ret;
@@ -775,13 +783,6 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
priv->presvd_phy_bmcr = 0;
priv->presvd_phy_advertise = 0;
- if (chipcode == AX_AX88772_CHIPCODE) {
- priv->resume = ax88772_resume;
- priv->suspend = ax88772_suspend;
- } else {
- priv->resume = ax88772a_resume;
- priv->suspend = ax88772_suspend;
- }
ret = ax88772_init_mdio(dev);
if (ret)
@@ -858,7 +859,6 @@ static int marvell_phy_init(struct usbnet *dev)
reg = asix_mdio_read(dev->net, dev->mii.phy_id,
MII_MARVELL_LED_CTRL);
netdev_dbg(dev->net, "MII_MARVELL_LED_CTRL (2) = 0x%04x\n", reg);
- reg &= 0xfc0f;
}
return 0;
@@ -920,11 +920,21 @@ static int ax88178_reset(struct usbnet *dev)
int gpio0 = 0;
u32 phyid;
- asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status, 0);
+ ret = asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status, 0);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to read GPIOS: %d\n", ret);
+ return ret;
+ }
+
netdev_dbg(dev->net, "GPIO Status: 0x%04x\n", status);
asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL, 0);
- asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom, 0);
+ ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom, 0);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to read EEPROM: %d\n", ret);
+ return ret;
+ }
+
asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL, 0);
netdev_dbg(dev->net, "EEPROM index 0x17 is 0x%04x\n", eeprom);
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index c0b8b4aa78f3..c89639381eca 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -21,6 +21,7 @@
#include <net/ipv6.h>
#include <net/addrconf.h>
#include <net/ipv6_stubs.h>
+#include <net/ndisc.h>
/* alternative VLAN for IP session 0 if not untagged */
#define MBIM_IPS0_VID 4094
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index 13a9a83b8538..46af78caf457 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -56,7 +56,7 @@
struct gl_packet {
__le32 packet_length;
- char packet_data [1];
+ char packet_data[];
};
struct gl_header {
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index f97813a4e8d1..f8221a7acf62 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2319,7 +2319,7 @@ static struct hso_device *hso_create_device(struct usb_interface *intf,
{
struct hso_device *hso_dev;
- hso_dev = kzalloc(sizeof(*hso_dev), GFP_ATOMIC);
+ hso_dev = kzalloc(sizeof(*hso_dev), GFP_KERNEL);
if (!hso_dev)
return NULL;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index b8e20a3f2b84..415f16662f88 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1537,11 +1537,8 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
- if (dev->domain_data.phyirq > 0) {
- local_irq_disable();
- generic_handle_irq(dev->domain_data.phyirq);
- local_irq_enable();
- }
+ if (dev->domain_data.phyirq > 0)
+ generic_handle_irq_safe(dev->domain_data.phyirq);
} else {
netdev_warn(dev->net,
"unexpected interrupt: 0x%08x\n", intdata);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index bc1e3dd67c04..4ef61f6b85df 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -20,6 +20,8 @@
#include <linux/of_net.h>
#include <linux/mdio.h>
#include <linux/phy.h>
+#include <net/selftests.h>
+
#include "smsc95xx.h"
#define SMSC_CHIPNAME "smsc95xx"
@@ -84,9 +86,10 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
- if (unlikely(ret < 0)) {
- netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
- index, ret);
+ if (ret < 0) {
+ if (ret != -ENODEV)
+ netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
+ index, ret);
return ret;
}
@@ -116,7 +119,7 @@ static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
- if (unlikely(ret < 0))
+ if (ret < 0 && ret != -ENODEV)
netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n",
index, ret);
@@ -159,6 +162,9 @@ static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev,
do {
ret = __smsc95xx_read_reg(dev, MII_ADDR, &val, in_pm);
if (ret < 0) {
+ /* Ignore -ENODEV error during disconnect() */
+ if (ret == -ENODEV)
+ return 0;
netdev_warn(dev->net, "Error reading MII_ACCESS\n");
return ret;
}
@@ -194,7 +200,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
addr = mii_address_cmd(phy_id, idx, MII_READ_ | MII_BUSY_);
ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
if (ret < 0) {
- netdev_warn(dev->net, "Error writing MII_ADDR\n");
+ if (ret != -ENODEV)
+ netdev_warn(dev->net, "Error writing MII_ADDR\n");
goto done;
}
@@ -206,7 +213,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
ret = __smsc95xx_read_reg(dev, MII_DATA, &val, in_pm);
if (ret < 0) {
- netdev_warn(dev->net, "Error reading MII_DATA\n");
+ if (ret != -ENODEV)
+ netdev_warn(dev->net, "Error reading MII_DATA\n");
goto done;
}
@@ -214,6 +222,10 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
done:
mutex_unlock(&dev->phy_mutex);
+
+ /* Ignore -ENODEV error during disconnect() */
+ if (ret == -ENODEV)
+ return 0;
return ret;
}
@@ -235,7 +247,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
val = regval;
ret = __smsc95xx_write_reg(dev, MII_DATA, val, in_pm);
if (ret < 0) {
- netdev_warn(dev->net, "Error writing MII_DATA\n");
+ if (ret != -ENODEV)
+ netdev_warn(dev->net, "Error writing MII_DATA\n");
goto done;
}
@@ -243,7 +256,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
addr = mii_address_cmd(phy_id, idx, MII_WRITE_ | MII_BUSY_);
ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
if (ret < 0) {
- netdev_warn(dev->net, "Error writing MII_ADDR\n");
+ if (ret != -ENODEV)
+ netdev_warn(dev->net, "Error writing MII_ADDR\n");
goto done;
}
@@ -727,6 +741,26 @@ static u32 smsc95xx_get_link(struct net_device *net)
return net->phydev->link;
}
+static void smsc95xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ net_selftest_get_strings(data);
+ break;
+ }
+}
+
+static int smsc95xx_ethtool_get_sset_count(struct net_device *ndev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return net_selftest_get_count();
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_link = smsc95xx_get_link,
.nway_reset = phy_ethtool_nway_reset,
@@ -743,6 +777,9 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ts_info = ethtool_op_get_ts_info,
+ .self_test = net_selftest,
+ .get_strings = smsc95xx_ethtool_get_strings,
+ .get_sset_count = smsc95xx_ethtool_get_sset_count,
};
static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index d29fb9759cc9..1b5714926d81 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -287,7 +287,7 @@ static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
{
return __dev_forward_skb(dev, skb) ?: xdp ?
veth_xdp_rx(rq, skb) :
- netif_rx(skb);
+ __netif_rx(skb);
}
/* return true if the specified skb has chances of GRO aggregation
@@ -433,21 +433,6 @@ static void veth_set_multicast_list(struct net_device *dev)
{
}
-static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
- int buflen)
-{
- struct sk_buff *skb;
-
- skb = build_skb(head, buflen);
- if (!skb)
- return NULL;
-
- skb_reserve(skb, headroom);
- skb_put(skb, len);
-
- return skb;
-}
-
static int veth_select_rxq(struct net_device *dev)
{
return smp_processor_id() % dev->real_num_rx_queues;
@@ -494,7 +479,7 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame *frame = frames[i];
void *ptr = veth_xdp_to_ptr(frame);
- if (unlikely(frame->len > max_len ||
+ if (unlikely(xdp_get_frame_len(frame) > max_len ||
__ptr_ring_produce(&rq->xdp_ring, ptr)))
break;
nxmit++;
@@ -695,72 +680,143 @@ static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
}
}
-static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
- struct sk_buff *skb,
- struct veth_xdp_tx_bq *bq,
- struct veth_stats *stats)
+static void veth_xdp_get(struct xdp_buff *xdp)
{
- u32 pktlen, headroom, act, metalen, frame_sz;
- void *orig_data, *orig_data_end;
- struct bpf_prog *xdp_prog;
- int mac_len, delta, off;
- struct xdp_buff xdp;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ int i;
- skb_prepare_for_gro(skb);
+ get_page(virt_to_page(xdp->data));
+ if (likely(!xdp_buff_has_frags(xdp)))
+ return;
- rcu_read_lock();
- xdp_prog = rcu_dereference(rq->xdp_prog);
- if (unlikely(!xdp_prog)) {
- rcu_read_unlock();
- goto out;
- }
+ for (i = 0; i < sinfo->nr_frags; i++)
+ __skb_frag_ref(&sinfo->frags[i]);
+}
- mac_len = skb->data - skb_mac_header(skb);
- pktlen = skb->len + mac_len;
- headroom = skb_headroom(skb) - mac_len;
+static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
+ struct xdp_buff *xdp,
+ struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ u32 frame_sz;
if (skb_shared(skb) || skb_head_is_locked(skb) ||
- skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
+ skb_shinfo(skb)->nr_frags) {
+ u32 size, len, max_head_size, off;
struct sk_buff *nskb;
- int size, head_off;
- void *head, *start;
struct page *page;
+ int i, head_off;
- size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- if (size > PAGE_SIZE)
+ /* We need a private copy of the skb and data buffers since
+ * the ebpf program can modify it. We segment the original skb
+ * into order-0 pages without linearize it.
+ *
+ * Make sure we have enough space for linear and paged area
+ */
+ max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
+ VETH_XDP_HEADROOM);
+ if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size)
goto drop;
+ /* Allocate skb head */
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page)
goto drop;
- head = page_address(page);
- start = head + VETH_XDP_HEADROOM;
- if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
- page_frag_free(head);
+ nskb = build_skb(page_address(page), PAGE_SIZE);
+ if (!nskb) {
+ put_page(page);
goto drop;
}
- nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
- skb->len, PAGE_SIZE);
- if (!nskb) {
- page_frag_free(head);
+ skb_reserve(nskb, VETH_XDP_HEADROOM);
+ size = min_t(u32, skb->len, max_head_size);
+ if (skb_copy_bits(skb, 0, nskb->data, size)) {
+ consume_skb(nskb);
goto drop;
}
+ skb_put(nskb, size);
skb_copy_header(nskb, skb);
head_off = skb_headroom(nskb) - skb_headroom(skb);
skb_headers_offset_update(nskb, head_off);
+
+ /* Allocate paged area of new skb */
+ off = size;
+ len = skb->len - off;
+
+ for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
+ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page) {
+ consume_skb(nskb);
+ goto drop;
+ }
+
+ size = min_t(u32, len, PAGE_SIZE);
+ skb_add_rx_frag(nskb, i, page, 0, size, PAGE_SIZE);
+ if (skb_copy_bits(skb, off, page_address(page),
+ size)) {
+ consume_skb(nskb);
+ goto drop;
+ }
+
+ len -= size;
+ off += size;
+ }
+
consume_skb(skb);
skb = nskb;
+ } else if (skb_headroom(skb) < XDP_PACKET_HEADROOM &&
+ pskb_expand_head(skb, VETH_XDP_HEADROOM, 0, GFP_ATOMIC)) {
+ goto drop;
}
/* SKB "head" area always have tailroom for skb_shared_info */
frame_sz = skb_end_pointer(skb) - skb->head;
frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
- xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
+ xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(xdp, skb->head, skb_headroom(skb),
+ skb_headlen(skb), true);
+
+ if (skb_is_nonlinear(skb)) {
+ skb_shinfo(skb)->xdp_frags_size = skb->data_len;
+ xdp_buff_set_frags_flag(xdp);
+ } else {
+ xdp_buff_clear_frags_flag(xdp);
+ }
+ *pskb = skb;
+
+ return 0;
+drop:
+ consume_skb(skb);
+ *pskb = NULL;
+
+ return -ENOMEM;
+}
+
+static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
+ struct sk_buff *skb,
+ struct veth_xdp_tx_bq *bq,
+ struct veth_stats *stats)
+{
+ void *orig_data, *orig_data_end;
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
+ u32 act, metalen;
+ int off;
+
+ skb_prepare_for_gro(skb);
+
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(rq->xdp_prog);
+ if (unlikely(!xdp_prog)) {
+ rcu_read_unlock();
+ goto out;
+ }
+
+ __skb_push(skb, skb->data - skb_mac_header(skb));
+ if (veth_convert_skb_to_xdp_buff(rq, &xdp, &skb))
+ goto drop;
orig_data = xdp.data;
orig_data_end = xdp.data_end;
@@ -771,7 +827,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
case XDP_PASS:
break;
case XDP_TX:
- get_page(virt_to_page(xdp.data));
+ veth_xdp_get(&xdp);
consume_skb(skb);
xdp.rxq->mem = rq->xdp_mem;
if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
@@ -783,7 +839,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
- get_page(virt_to_page(xdp.data));
+ veth_xdp_get(&xdp);
consume_skb(skb);
xdp.rxq->mem = rq->xdp_mem;
if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
@@ -806,18 +862,27 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
rcu_read_unlock();
/* check if bpf_xdp_adjust_head was used */
- delta = orig_data - xdp.data;
- off = mac_len + delta;
+ off = orig_data - xdp.data;
if (off > 0)
__skb_push(skb, off);
else if (off < 0)
__skb_pull(skb, -off);
- skb->mac_header -= delta;
+
+ skb_reset_mac_header(skb);
/* check if bpf_xdp_adjust_tail was used */
off = xdp.data_end - orig_data_end;
if (off != 0)
__skb_put(skb, off); /* positive on grow, negative on shrink */
+
+ /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
+ * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
+ */
+ if (xdp_buff_has_frags(&xdp))
+ skb->data_len = skb_shinfo(skb)->xdp_frags_size;
+ else
+ skb->data_len = 0;
+
skb->protocol = eth_type_trans(skb, rq->dev);
metalen = xdp.data - xdp.data_meta;
@@ -833,7 +898,7 @@ xdp_drop:
return NULL;
err_xdp:
rcu_read_unlock();
- page_frag_free(xdp.data);
+ xdp_return_buff(&xdp);
xdp_xmit:
return NULL;
}
@@ -855,7 +920,7 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
/* ndo_xdp_xmit */
struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
- stats->xdp_bytes += frame->len;
+ stats->xdp_bytes += xdp_get_frame_len(frame);
frame = veth_xdp_rcv_one(rq, frame, bq, stats);
if (frame) {
/* XDP_PASS */
@@ -1463,9 +1528,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
goto err;
}
- max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
- peer->hard_header_len -
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) -
+ peer->hard_header_len;
+ /* Allow increasing the max_mtu if the program supports
+ * XDP fragments.
+ */
+ if (prog->aux->xdp_has_frags)
+ max_mtu += PAGE_SIZE * MAX_SKB_FRAGS;
+
if (peer->mtu > max_mtu) {
NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
err = -ERANGE;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a801ea40908f..11f26b00a226 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3449,8 +3449,7 @@ static __init int virtio_net_driver_init(void)
NULL, virtnet_cpu_dead);
if (ret)
goto err_dead;
-
- ret = register_virtio_driver(&virtio_net_driver);
+ ret = register_virtio_driver(&virtio_net_driver);
if (ret)
goto err_virtio;
return 0;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index e0b1ab99a359..85e362461d71 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -418,7 +418,7 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
skb->protocol = eth_type_trans(skb, dev);
- if (likely(netif_rx(skb) == NET_RX_SUCCESS))
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
vrf_rx_stats(dev, len);
else
this_cpu_inc(dev->dstats->rx_drps);
@@ -472,14 +472,13 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
memset(&fl6, 0, sizeof(fl6));
/* needed to match OIF rule */
- fl6.flowi6_oif = dev->ifindex;
+ fl6.flowi6_l3mdev = dev->ifindex;
fl6.flowi6_iif = LOOPBACK_IFINDEX;
fl6.daddr = iph->daddr;
fl6.saddr = iph->saddr;
fl6.flowlabel = ip6_flowinfo(iph);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = iph->nexthdr;
- fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL);
if (IS_ERR(dst) || dst == dst_null)
@@ -551,10 +550,10 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
memset(&fl4, 0, sizeof(fl4));
/* needed to match OIF rule */
- fl4.flowi4_oif = vrf_dev->ifindex;
+ fl4.flowi4_l3mdev = vrf_dev->ifindex;
fl4.flowi4_iif = LOOPBACK_IFINDEX;
fl4.flowi4_tos = RT_TOS(ip4h->tos);
- fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
+ fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
fl4.flowi4_proto = ip4h->protocol;
fl4.daddr = ip4h->daddr;
fl4.saddr = ip4h->saddr;
diff --git a/drivers/net/vxlan/Makefile b/drivers/net/vxlan/Makefile
new file mode 100644
index 000000000000..d4c255499b72
--- /dev/null
+++ b/drivers/net/vxlan/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the vxlan driver
+#
+
+obj-$(CONFIG_VXLAN) += vxlan.o
+
+vxlan-objs := vxlan_core.o vxlan_multicast.o vxlan_vnifilter.o
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan/vxlan_core.c
index 359d16780dbb..de97ff98d36e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -34,10 +34,10 @@
#include <net/ip6_checksum.h>
#endif
+#include "vxlan_private.h"
+
#define VXLAN_VERSION "0.1"
-#define PORT_HASH_BITS 8
-#define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
#define FDB_AGE_DEFAULT 300 /* 5 min */
#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
@@ -53,41 +53,15 @@ static bool log_ecn_error = true;
module_param(log_ecn_error, bool, 0644);
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
-static unsigned int vxlan_net_id;
-static struct rtnl_link_ops vxlan_link_ops;
+unsigned int vxlan_net_id;
-static const u8 all_zeros_mac[ETH_ALEN + 2];
+const u8 all_zeros_mac[ETH_ALEN + 2];
+static struct rtnl_link_ops vxlan_link_ops;
static int vxlan_sock_add(struct vxlan_dev *vxlan);
static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
-/* per-network namespace private data for this module */
-struct vxlan_net {
- struct list_head vxlan_list;
- struct hlist_head sock_list[PORT_HASH_SIZE];
- spinlock_t sock_lock;
- struct notifier_block nexthop_notifier_block;
-};
-
-/* Forwarding table entry */
-struct vxlan_fdb {
- struct hlist_node hlist; /* linked list of entries */
- struct rcu_head rcu;
- unsigned long updated; /* jiffies */
- unsigned long used;
- struct list_head remotes;
- u8 eth_addr[ETH_ALEN];
- u16 state; /* see ndm_state */
- __be32 vni;
- u16 flags; /* see ndm_flags and below */
- struct list_head nh_list;
- struct nexthop __rcu *nh;
- struct vxlan_dev __rcu *vdev;
-};
-
-#define NTF_VXLAN_ADDED_BY_USER 0x100
-
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
@@ -98,17 +72,6 @@ static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
}
#if IS_ENABLED(CONFIG_IPV6)
-static inline
-bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
-{
- if (a->sa.sa_family != b->sa.sa_family)
- return false;
- if (a->sa.sa_family == AF_INET6)
- return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
- else
- return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
-}
-
static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
{
if (nla_len(nla) >= sizeof(struct in6_addr)) {
@@ -135,12 +98,6 @@ static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
#else /* !CONFIG_IPV6 */
-static inline
-bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
-{
- return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
-}
-
static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
{
if (nla_len(nla) >= sizeof(struct in6_addr)) {
@@ -161,37 +118,6 @@ static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
}
#endif
-/* Virtual Network hash table head */
-static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
-{
- return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
-}
-
-/* Socket hash table head */
-static inline struct hlist_head *vs_head(struct net *net, __be16 port)
-{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
-
- return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
-}
-
-/* First remote destination for a forwarding entry.
- * Guaranteed to be non-NULL because remotes are never deleted.
- */
-static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
-{
- if (rcu_access_pointer(fdb->nh))
- return NULL;
- return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
-}
-
-static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
-{
- if (rcu_access_pointer(fdb->nh))
- return NULL;
- return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
-}
-
/* Find VXLAN socket based on network namespace, address family, UDP port,
* enabled unshareable flags and socket device binding (see l3mdev with
* non-default VRF).
@@ -213,18 +139,29 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
return NULL;
}
-static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex,
- __be32 vni)
+static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs,
+ int ifindex, __be32 vni,
+ struct vxlan_vni_node **vninode)
{
+ struct vxlan_vni_node *vnode;
struct vxlan_dev_node *node;
/* For flow based devices, map all packets to VNI 0 */
- if (vs->flags & VXLAN_F_COLLECT_METADATA)
+ if (vs->flags & VXLAN_F_COLLECT_METADATA &&
+ !(vs->flags & VXLAN_F_VNIFILTER))
vni = 0;
hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
- if (node->vxlan->default_dst.remote_vni != vni)
+ if (!node->vxlan)
+ continue;
+ vnode = NULL;
+ if (node->vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
+ vnode = vxlan_vnifilter_lookup(node->vxlan, vni);
+ if (!vnode)
+ continue;
+ } else if (node->vxlan->default_dst.remote_vni != vni) {
continue;
+ }
if (IS_ENABLED(CONFIG_IPV6)) {
const struct vxlan_config *cfg = &node->vxlan->cfg;
@@ -234,6 +171,8 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex,
continue;
}
+ if (vninode)
+ *vninode = vnode;
return node->vxlan;
}
@@ -251,7 +190,7 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, int ifindex,
if (!vs)
return NULL;
- return vxlan_vs_find_vni(vs, ifindex, vni);
+ return vxlan_vs_find_vni(vs, ifindex, vni, NULL);
}
/* Fill in neighbour message in skbuff. */
@@ -493,7 +432,7 @@ static u32 eth_hash(const unsigned char *addr)
return hash_64(value, FDB_HASH_BITS);
}
-static u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
+u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
{
/* use 1 byte of OUI and 3 bytes of NIC */
u32 key = get_unaligned((u32 *)(addr + 2));
@@ -501,7 +440,7 @@ static u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
}
-static u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni)
+u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni)
{
if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
return eth_vni_hash(mac, vni);
@@ -872,37 +811,35 @@ static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
goto err_inval;
}
- if (nh) {
- if (!nexthop_get(nh)) {
- NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
- nh = NULL;
- goto err_inval;
- }
- if (!nexthop_is_fdb(nh)) {
- NL_SET_ERR_MSG(extack, "Nexthop is not a fdb nexthop");
- goto err_inval;
- }
+ if (!nexthop_get(nh)) {
+ NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
+ nh = NULL;
+ goto err_inval;
+ }
+ if (!nexthop_is_fdb(nh)) {
+ NL_SET_ERR_MSG(extack, "Nexthop is not a fdb nexthop");
+ goto err_inval;
+ }
+
+ if (!nexthop_is_multipath(nh)) {
+ NL_SET_ERR_MSG(extack, "Nexthop is not a multipath group");
+ goto err_inval;
+ }
- if (!nexthop_is_multipath(nh)) {
- NL_SET_ERR_MSG(extack, "Nexthop is not a multipath group");
+ /* check nexthop group family */
+ switch (vxlan->default_dst.remote_ip.sa.sa_family) {
+ case AF_INET:
+ if (!nexthop_has_v4(nh)) {
+ err = -EAFNOSUPPORT;
+ NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
goto err_inval;
}
-
- /* check nexthop group family */
- switch (vxlan->default_dst.remote_ip.sa.sa_family) {
- case AF_INET:
- if (!nexthop_has_v4(nh)) {
- err = -EAFNOSUPPORT;
- NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
- goto err_inval;
- }
- break;
- case AF_INET6:
- if (nexthop_has_v4(nh)) {
- err = -EAFNOSUPPORT;
- NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
- goto err_inval;
- }
+ break;
+ case AF_INET6:
+ if (nexthop_has_v4(nh)) {
+ err = -EAFNOSUPPORT;
+ NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
+ goto err_inval;
}
}
@@ -920,12 +857,12 @@ err_inval:
return err;
}
-static int vxlan_fdb_create(struct vxlan_dev *vxlan,
- const u8 *mac, union vxlan_addr *ip,
- __u16 state, __be16 port, __be32 src_vni,
- __be32 vni, __u32 ifindex, __u16 ndm_flags,
- u32 nhid, struct vxlan_fdb **fdb,
- struct netlink_ext_ack *extack)
+int vxlan_fdb_create(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __be16 port, __be32 src_vni,
+ __be32 vni, __u32 ifindex, __u16 ndm_flags,
+ u32 nhid, struct vxlan_fdb **fdb,
+ struct netlink_ext_ack *extack)
{
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f;
@@ -1150,13 +1087,13 @@ err_notify:
}
/* Add new entry to forwarding table -- assumes lock held */
-static int vxlan_fdb_update(struct vxlan_dev *vxlan,
- const u8 *mac, union vxlan_addr *ip,
- __u16 state, __u16 flags,
- __be16 port, __be32 src_vni, __be32 vni,
- __u32 ifindex, __u16 ndm_flags, u32 nhid,
- bool swdev_notify,
- struct netlink_ext_ack *extack)
+int vxlan_fdb_update(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __u16 flags,
+ __be16 port, __be32 src_vni, __be32 vni,
+ __u32 ifindex, __u16 ndm_flags, u32 nhid,
+ bool swdev_notify,
+ struct netlink_ext_ack *extack)
{
struct vxlan_fdb *f;
@@ -1307,10 +1244,10 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
-static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
- const unsigned char *addr, union vxlan_addr ip,
- __be16 port, __be32 src_vni, __be32 vni,
- u32 ifindex, bool swdev_notify)
+int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
+ const unsigned char *addr, union vxlan_addr ip,
+ __be16 port, __be32 src_vni, __be32 vni,
+ u32 ifindex, bool swdev_notify)
{
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f;
@@ -1519,56 +1456,6 @@ static bool vxlan_snoop(struct net_device *dev,
return false;
}
-/* See if multicast group is already in use by other ID */
-static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
-{
- struct vxlan_dev *vxlan;
- struct vxlan_sock *sock4;
-#if IS_ENABLED(CONFIG_IPV6)
- struct vxlan_sock *sock6;
-#endif
- unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
-
- sock4 = rtnl_dereference(dev->vn4_sock);
-
- /* The vxlan_sock is only used by dev, leaving group has
- * no effect on other vxlan devices.
- */
- if (family == AF_INET && sock4 && refcount_read(&sock4->refcnt) == 1)
- return false;
-#if IS_ENABLED(CONFIG_IPV6)
- sock6 = rtnl_dereference(dev->vn6_sock);
- if (family == AF_INET6 && sock6 && refcount_read(&sock6->refcnt) == 1)
- return false;
-#endif
-
- list_for_each_entry(vxlan, &vn->vxlan_list, next) {
- if (!netif_running(vxlan->dev) || vxlan == dev)
- continue;
-
- if (family == AF_INET &&
- rtnl_dereference(vxlan->vn4_sock) != sock4)
- continue;
-#if IS_ENABLED(CONFIG_IPV6)
- if (family == AF_INET6 &&
- rtnl_dereference(vxlan->vn6_sock) != sock6)
- continue;
-#endif
-
- if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
- &dev->default_dst.remote_ip))
- continue;
-
- if (vxlan->default_dst.remote_ifindex !=
- dev->default_dst.remote_ifindex)
- continue;
-
- return true;
- }
-
- return false;
-}
-
static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
{
struct vxlan_net *vn;
@@ -1602,7 +1489,10 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
synchronize_net();
- vxlan_vs_del_dev(vxlan);
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+ vxlan_vs_del_vnigrp(vxlan);
+ else
+ vxlan_vs_del_dev(vxlan);
if (__vxlan_sock_release_prep(sock4)) {
udp_tunnel_sock_release(sock4->sock);
@@ -1617,76 +1507,6 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
#endif
}
-/* Update multicast group membership when first VNI on
- * multicast address is brought up
- */
-static int vxlan_igmp_join(struct vxlan_dev *vxlan)
-{
- struct sock *sk;
- union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
- int ifindex = vxlan->default_dst.remote_ifindex;
- int ret = -EINVAL;
-
- if (ip->sa.sa_family == AF_INET) {
- struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
- struct ip_mreqn mreq = {
- .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
- .imr_ifindex = ifindex,
- };
-
- sk = sock4->sock->sk;
- lock_sock(sk);
- ret = ip_mc_join_group(sk, &mreq);
- release_sock(sk);
-#if IS_ENABLED(CONFIG_IPV6)
- } else {
- struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
-
- sk = sock6->sock->sk;
- lock_sock(sk);
- ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
- &ip->sin6.sin6_addr);
- release_sock(sk);
-#endif
- }
-
- return ret;
-}
-
-/* Inverse of vxlan_igmp_join when last VNI is brought down */
-static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
-{
- struct sock *sk;
- union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
- int ifindex = vxlan->default_dst.remote_ifindex;
- int ret = -EINVAL;
-
- if (ip->sa.sa_family == AF_INET) {
- struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
- struct ip_mreqn mreq = {
- .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
- .imr_ifindex = ifindex,
- };
-
- sk = sock4->sock->sk;
- lock_sock(sk);
- ret = ip_mc_leave_group(sk, &mreq);
- release_sock(sk);
-#if IS_ENABLED(CONFIG_IPV6)
- } else {
- struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
-
- sk = sock6->sock->sk;
- lock_sock(sk);
- ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
- &ip->sin6.sin6_addr);
- release_sock(sk);
-#endif
- }
-
- return ret;
-}
-
static bool vxlan_remcsum(struct vxlanhdr *unparsed,
struct sk_buff *skb, u32 vxflags)
{
@@ -1828,6 +1648,7 @@ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
{
+ struct vxlan_vni_node *vninode = NULL;
struct vxlan_dev *vxlan;
struct vxlan_sock *vs;
struct vxlanhdr unparsed;
@@ -1860,7 +1681,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
- vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
+ vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, &vninode);
if (!vxlan)
goto drop;
@@ -1930,6 +1751,8 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
++vxlan->dev->stats.rx_frame_errors;
++vxlan->dev->stats.rx_errors;
+ vxlan_vnifilter_count(vxlan, vni, vninode,
+ VXLAN_VNI_STATS_RX_ERRORS, 0);
goto drop;
}
@@ -1937,11 +1760,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
rcu_read_unlock();
- atomic_long_inc(&vxlan->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(vxlan->dev);
+ vxlan_vnifilter_count(vxlan, vni, vninode,
+ VXLAN_VNI_STATS_RX_DROPS, 0);
goto drop;
}
dev_sw_netstats_rx_add(vxlan->dev, skb->len);
+ vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX, skb->len);
gro_cells_receive(&vxlan->gro_cells, skb);
rcu_read_unlock();
@@ -1975,7 +1801,7 @@ static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb)
return -ENOENT;
vni = vxlan_vni(hdr->vx_vni);
- vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
+ vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, NULL);
if (!vxlan)
return -ENOENT;
@@ -2049,8 +1875,12 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
reply->ip_summed = CHECKSUM_UNNECESSARY;
reply->pkt_type = PACKET_HOST;
- if (netif_rx_ni(reply) == NET_RX_DROP)
+ if (netif_rx(reply) == NET_RX_DROP) {
dev->stats.rx_dropped++;
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_RX_DROPS, 0);
+ }
+
} else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin.sin_addr.s_addr = tip,
@@ -2204,9 +2034,11 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
if (reply == NULL)
goto out;
- if (netif_rx_ni(reply) == NET_RX_DROP)
+ if (netif_rx(reply) == NET_RX_DROP) {
dev->stats.rx_dropped++;
-
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_RX_DROPS, 0);
+ }
} else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin6.sin6_addr = msg->target,
@@ -2540,15 +2372,20 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
tx_stats->tx_packets++;
tx_stats->tx_bytes += len;
u64_stats_update_end(&tx_stats->syncp);
+ vxlan_vnifilter_count(src_vxlan, vni, NULL, VXLAN_VNI_STATS_TX, len);
- if (netif_rx(skb) == NET_RX_SUCCESS) {
+ if (__netif_rx(skb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;
rx_stats->rx_bytes += len;
u64_stats_update_end(&rx_stats->syncp);
+ vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX,
+ len);
} else {
drop:
dev->stats.rx_dropped++;
+ vxlan_vnifilter_count(dst_vxlan, vni, NULL,
+ VXLAN_VNI_STATS_RX_DROPS, 0);
}
rcu_read_unlock();
}
@@ -2578,6 +2415,8 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
vxlan->cfg.flags);
if (!dst_vxlan) {
dev->stats.tx_errors++;
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
return -ENOENT;
@@ -2601,15 +2440,19 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
union vxlan_addr remote_ip, local_ip;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
+ unsigned int pkt_len = skb->len;
__be16 src_port = 0, dst_port;
struct dst_entry *ndst = NULL;
- __be32 vni, label;
__u8 tos, ttl;
int ifindex;
int err;
u32 flags = vxlan->cfg.flags;
bool udp_sum = false;
bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
+ __be32 vni = 0;
+#if IS_ENABLED(CONFIG_IPV6)
+ __be32 label;
+#endif
info = skb_tunnel_info(skb);
@@ -2647,7 +2490,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
else
udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
+#if IS_ENABLED(CONFIG_IPV6)
label = vxlan->cfg.label;
+#endif
} else {
if (!info) {
WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
@@ -2674,7 +2519,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ttl = info->key.ttl;
tos = info->key.tos;
+#if IS_ENABLED(CONFIG_IPV6)
label = info->key.label;
+#endif
udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
}
src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
@@ -2821,12 +2668,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
label, src_port, dst_port, !udp_sum);
#endif
}
+ vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX, pkt_len);
out_unlock:
rcu_read_unlock();
return;
drop:
dev->stats.tx_dropped++;
+ vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
return;
@@ -2838,6 +2687,7 @@ tx_error:
dev->stats.tx_carrier_errors++;
dst_release(ndst);
dev->stats.tx_errors++;
+ vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
}
@@ -2870,6 +2720,8 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
drop:
dev->stats.tx_dropped++;
+ vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
+ VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
}
@@ -2944,6 +2796,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
vxlan_fdb_miss(vxlan, eth->h_dest);
dev->stats.tx_dropped++;
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_TX_DROPS, 0);
kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -3044,6 +2898,9 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
int err;
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+ vxlan_vnigroup_init(vxlan);
+
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
@@ -3073,6 +2930,9 @@ static void vxlan_uninit(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+ vxlan_vnigroup_uninit(vxlan);
+
gro_cells_destroy(&vxlan->gro_cells);
vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
@@ -3090,14 +2950,10 @@ static int vxlan_open(struct net_device *dev)
if (ret < 0)
return ret;
- if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
- ret = vxlan_igmp_join(vxlan);
- if (ret == -EADDRINUSE)
- ret = 0;
- if (ret) {
- vxlan_sock_release(vxlan);
- return ret;
- }
+ ret = vxlan_multicast_join(vxlan);
+ if (ret) {
+ vxlan_sock_release(vxlan);
+ return ret;
}
if (vxlan->cfg.age_interval)
@@ -3134,19 +2990,15 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
static int vxlan_stop(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
- int ret = 0;
- if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
- !vxlan_group_used(vn, vxlan))
- ret = vxlan_igmp_leave(vxlan);
+ vxlan_multicast_leave(vxlan);
del_timer_sync(&vxlan->age_timer);
vxlan_flush(vxlan, false);
vxlan_sock_release(vxlan);
- return ret;
+ return 0;
}
/* Stub, nothing needs to be done. */
@@ -3369,6 +3221,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
[IFLA_VXLAN_TTL_INHERIT] = { .type = NLA_FLAG },
[IFLA_VXLAN_DF] = { .type = NLA_U8 },
+ [IFLA_VXLAN_VNIFILTER] = { .type = NLA_U8 },
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -3554,6 +3407,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA;
struct vxlan_sock *vs = NULL;
struct vxlan_dev_node *node;
int l3mdev_index = 0;
@@ -3589,7 +3443,12 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
rcu_assign_pointer(vxlan->vn4_sock, vs);
node = &vxlan->hlist4;
}
- vxlan_vs_add_dev(vs, vxlan, node);
+
+ if (metadata && (vxlan->cfg.flags & VXLAN_F_VNIFILTER))
+ vxlan_vs_add_vnigrp(vxlan, vs, ipv6);
+ else
+ vxlan_vs_add_dev(vs, vxlan, node);
+
return 0;
}
@@ -3616,13 +3475,42 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan)
return ret;
}
+int vxlan_vni_in_use(struct net *src_net, struct vxlan_dev *vxlan,
+ struct vxlan_config *conf, __be32 vni)
+{
+ struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
+ struct vxlan_dev *tmp;
+
+ list_for_each_entry(tmp, &vn->vxlan_list, next) {
+ if (tmp == vxlan)
+ continue;
+ if (tmp->cfg.flags & VXLAN_F_VNIFILTER) {
+ if (!vxlan_vnifilter_lookup(tmp, vni))
+ continue;
+ } else if (tmp->cfg.vni != vni) {
+ continue;
+ }
+ if (tmp->cfg.dst_port != conf->dst_port)
+ continue;
+ if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) !=
+ (conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)))
+ continue;
+
+ if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) &&
+ tmp->cfg.remote_ifindex != conf->remote_ifindex)
+ continue;
+
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf,
struct net_device **lower,
struct vxlan_dev *old,
struct netlink_ext_ack *extack)
{
- struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
- struct vxlan_dev *tmp;
bool use_ipv6 = false;
if (conf->flags & VXLAN_F_GPE) {
@@ -3755,22 +3643,7 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf,
if (!conf->age_interval)
conf->age_interval = FDB_AGE_DEFAULT;
- list_for_each_entry(tmp, &vn->vxlan_list, next) {
- if (tmp == old)
- continue;
-
- if (tmp->cfg.vni != conf->vni)
- continue;
- if (tmp->cfg.dst_port != conf->dst_port)
- continue;
- if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) !=
- (conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)))
- continue;
-
- if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) &&
- tmp->cfg.remote_ifindex != conf->remote_ifindex)
- continue;
-
+ if (vxlan_vni_in_use(src_net, old, conf, conf->vni)) {
NL_SET_ERR_MSG(extack,
"A VXLAN device with the specified VNI already exists");
return -EEXIST;
@@ -4226,6 +4099,21 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_VXLAN_DF])
conf->df = nla_get_u8(data[IFLA_VXLAN_DF]);
+ if (data[IFLA_VXLAN_VNIFILTER]) {
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_VNIFILTER,
+ VXLAN_F_VNIFILTER, changelink, false,
+ extack);
+ if (err)
+ return err;
+
+ if ((conf->flags & VXLAN_F_VNIFILTER) &&
+ !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_VNIFILTER],
+ "vxlan vnifilter only valid in collect metadata mode");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -4301,6 +4189,19 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
dst->remote_ifindex,
true);
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+
+ /* If vni filtering device, also update fdb entries of
+ * all vnis that were using default remote ip
+ */
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
+ err = vxlan_vnilist_update_group(vxlan, &dst->remote_ip,
+ &conf.remote_ip, extack);
+ if (err) {
+ netdev_adjacent_change_abort(dst->remote_dev,
+ lowerdev, dev);
+ return err;
+ }
+ }
}
if (conf.age_interval != vxlan->cfg.age_interval)
@@ -4446,6 +4347,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
goto nla_put_failure;
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER &&
+ nla_put_u8(skb, IFLA_VXLAN_VNIFILTER,
+ !!(vxlan->cfg.flags & VXLAN_F_VNIFILTER)))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -4805,6 +4711,8 @@ static int __init vxlan_init_module(void)
if (rc)
goto out4;
+ vxlan_vnifilter_init();
+
return 0;
out4:
unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
@@ -4819,6 +4727,7 @@ late_initcall(vxlan_init_module);
static void __exit vxlan_cleanup_module(void)
{
+ vxlan_vnifilter_uninit();
rtnl_link_unregister(&vxlan_link_ops);
unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
unregister_netdevice_notifier(&vxlan_notifier_block);
diff --git a/drivers/net/vxlan/vxlan_multicast.c b/drivers/net/vxlan/vxlan_multicast.c
new file mode 100644
index 000000000000..a7f2d67dc61b
--- /dev/null
+++ b/drivers/net/vxlan/vxlan_multicast.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Vxlan multicast group handling
+ *
+ */
+#include <linux/kernel.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <linux/igmp.h>
+#include <net/vxlan.h>
+
+#include "vxlan_private.h"
+
+/* Update multicast group membership when first VNI on
+ * multicast address is brought up
+ */
+int vxlan_igmp_join(struct vxlan_dev *vxlan, union vxlan_addr *rip,
+ int rifindex)
+{
+ union vxlan_addr *ip = (rip ? : &vxlan->default_dst.remote_ip);
+ int ifindex = (rifindex ? : vxlan->default_dst.remote_ifindex);
+ int ret = -EINVAL;
+ struct sock *sk;
+
+ if (ip->sa.sa_family == AF_INET) {
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
+ struct ip_mreqn mreq = {
+ .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
+ .imr_ifindex = ifindex,
+ };
+
+ sk = sock4->sock->sk;
+ lock_sock(sk);
+ ret = ip_mc_join_group(sk, &mreq);
+ release_sock(sk);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ sk = sock6->sock->sk;
+ lock_sock(sk);
+ ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
+ &ip->sin6.sin6_addr);
+ release_sock(sk);
+#endif
+ }
+
+ return ret;
+}
+
+int vxlan_igmp_leave(struct vxlan_dev *vxlan, union vxlan_addr *rip,
+ int rifindex)
+{
+ union vxlan_addr *ip = (rip ? : &vxlan->default_dst.remote_ip);
+ int ifindex = (rifindex ? : vxlan->default_dst.remote_ifindex);
+ int ret = -EINVAL;
+ struct sock *sk;
+
+ if (ip->sa.sa_family == AF_INET) {
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
+ struct ip_mreqn mreq = {
+ .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
+ .imr_ifindex = ifindex,
+ };
+
+ sk = sock4->sock->sk;
+ lock_sock(sk);
+ ret = ip_mc_leave_group(sk, &mreq);
+ release_sock(sk);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ sk = sock6->sock->sk;
+ lock_sock(sk);
+ ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
+ &ip->sin6.sin6_addr);
+ release_sock(sk);
+#endif
+ }
+
+ return ret;
+}
+
+static bool vxlan_group_used_match(union vxlan_addr *ip, int ifindex,
+ union vxlan_addr *rip, int rifindex)
+{
+ if (!vxlan_addr_multicast(rip))
+ return false;
+
+ if (!vxlan_addr_equal(rip, ip))
+ return false;
+
+ if (rifindex != ifindex)
+ return false;
+
+ return true;
+}
+
+static bool vxlan_group_used_by_vnifilter(struct vxlan_dev *vxlan,
+ union vxlan_addr *ip, int ifindex)
+{
+ struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
+ struct vxlan_vni_node *v, *tmp;
+
+ if (vxlan_group_used_match(ip, ifindex,
+ &vxlan->default_dst.remote_ip,
+ vxlan->default_dst.remote_ifindex))
+ return true;
+
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ if (!vxlan_addr_multicast(&v->remote_ip))
+ continue;
+
+ if (vxlan_group_used_match(ip, ifindex,
+ &v->remote_ip,
+ vxlan->default_dst.remote_ifindex))
+ return true;
+ }
+
+ return false;
+}
+
+/* See if multicast group is already in use by other ID */
+bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev,
+ __be32 vni, union vxlan_addr *rip, int rifindex)
+{
+ union vxlan_addr *ip = (rip ? : &dev->default_dst.remote_ip);
+ int ifindex = (rifindex ? : dev->default_dst.remote_ifindex);
+ struct vxlan_dev *vxlan;
+ struct vxlan_sock *sock4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct vxlan_sock *sock6;
+#endif
+ unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
+
+ sock4 = rtnl_dereference(dev->vn4_sock);
+
+ /* The vxlan_sock is only used by dev, leaving group has
+ * no effect on other vxlan devices.
+ */
+ if (family == AF_INET && sock4 && refcount_read(&sock4->refcnt) == 1)
+ return false;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ sock6 = rtnl_dereference(dev->vn6_sock);
+ if (family == AF_INET6 && sock6 && refcount_read(&sock6->refcnt) == 1)
+ return false;
+#endif
+
+ list_for_each_entry(vxlan, &vn->vxlan_list, next) {
+ if (!netif_running(vxlan->dev) || vxlan == dev)
+ continue;
+
+ if (family == AF_INET &&
+ rtnl_dereference(vxlan->vn4_sock) != sock4)
+ continue;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (family == AF_INET6 &&
+ rtnl_dereference(vxlan->vn6_sock) != sock6)
+ continue;
+#endif
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
+ if (!vxlan_group_used_by_vnifilter(vxlan, ip, ifindex))
+ continue;
+ } else {
+ if (!vxlan_group_used_match(ip, ifindex,
+ &vxlan->default_dst.remote_ip,
+ vxlan->default_dst.remote_ifindex))
+ continue;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+static int vxlan_multicast_join_vnigrp(struct vxlan_dev *vxlan)
+{
+ struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
+ struct vxlan_vni_node *v, *tmp, *vgood = NULL;
+ int ret = 0;
+
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ if (!vxlan_addr_multicast(&v->remote_ip))
+ continue;
+ /* skip if address is same as default address */
+ if (vxlan_addr_equal(&v->remote_ip,
+ &vxlan->default_dst.remote_ip))
+ continue;
+ ret = vxlan_igmp_join(vxlan, &v->remote_ip, 0);
+ if (ret == -EADDRINUSE)
+ ret = 0;
+ if (ret)
+ goto out;
+ vgood = v;
+ }
+out:
+ if (ret) {
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ if (!vxlan_addr_multicast(&v->remote_ip))
+ continue;
+ if (vxlan_addr_equal(&v->remote_ip,
+ &vxlan->default_dst.remote_ip))
+ continue;
+ vxlan_igmp_leave(vxlan, &v->remote_ip, 0);
+ if (v == vgood)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int vxlan_multicast_leave_vnigrp(struct vxlan_dev *vxlan)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
+ struct vxlan_vni_node *v, *tmp;
+ int last_err = 0, ret;
+
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ if (vxlan_addr_multicast(&v->remote_ip) &&
+ !vxlan_group_used(vn, vxlan, v->vni, &v->remote_ip,
+ 0)) {
+ ret = vxlan_igmp_leave(vxlan, &v->remote_ip, 0);
+ if (ret)
+ last_err = ret;
+ }
+ }
+
+ return last_err;
+}
+
+int vxlan_multicast_join(struct vxlan_dev *vxlan)
+{
+ int ret = 0;
+
+ if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
+ ret = vxlan_igmp_join(vxlan, &vxlan->default_dst.remote_ip,
+ vxlan->default_dst.remote_ifindex);
+ if (ret == -EADDRINUSE)
+ ret = 0;
+ if (ret)
+ return ret;
+ }
+
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+ return vxlan_multicast_join_vnigrp(vxlan);
+
+ return 0;
+}
+
+int vxlan_multicast_leave(struct vxlan_dev *vxlan)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ int ret = 0;
+
+ if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
+ !vxlan_group_used(vn, vxlan, 0, NULL, 0)) {
+ ret = vxlan_igmp_leave(vxlan, &vxlan->default_dst.remote_ip,
+ vxlan->default_dst.remote_ifindex);
+ if (ret)
+ return ret;
+ }
+
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+ return vxlan_multicast_leave_vnigrp(vxlan);
+
+ return 0;
+}
diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h
new file mode 100644
index 000000000000..599c3b4fdd5e
--- /dev/null
+++ b/drivers/net/vxlan/vxlan_private.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Vxlan private header file
+ *
+ */
+
+#ifndef _VXLAN_PRIVATE_H
+#define _VXLAN_PRIVATE_H
+
+#include <linux/rhashtable.h>
+
+extern unsigned int vxlan_net_id;
+extern const u8 all_zeros_mac[ETH_ALEN + 2];
+extern const struct rhashtable_params vxlan_vni_rht_params;
+
+#define PORT_HASH_BITS 8
+#define PORT_HASH_SIZE (1 << PORT_HASH_BITS)
+
+/* per-network namespace private data for this module */
+struct vxlan_net {
+ struct list_head vxlan_list;
+ struct hlist_head sock_list[PORT_HASH_SIZE];
+ spinlock_t sock_lock;
+ struct notifier_block nexthop_notifier_block;
+};
+
+/* Forwarding table entry */
+struct vxlan_fdb {
+ struct hlist_node hlist; /* linked list of entries */
+ struct rcu_head rcu;
+ unsigned long updated; /* jiffies */
+ unsigned long used;
+ struct list_head remotes;
+ u8 eth_addr[ETH_ALEN];
+ u16 state; /* see ndm_state */
+ __be32 vni;
+ u16 flags; /* see ndm_flags and below */
+ struct list_head nh_list;
+ struct nexthop __rcu *nh;
+ struct vxlan_dev __rcu *vdev;
+};
+
+#define NTF_VXLAN_ADDED_BY_USER 0x100
+
+/* Virtual Network hash table head */
+static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
+{
+ return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
+}
+
+/* Socket hash table head */
+static inline struct hlist_head *vs_head(struct net *net, __be16 port)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+ return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
+}
+
+/* First remote destination for a forwarding entry.
+ * Guaranteed to be non-NULL because remotes are never deleted.
+ */
+static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
+{
+ if (rcu_access_pointer(fdb->nh))
+ return NULL;
+ return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
+}
+
+static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
+{
+ if (rcu_access_pointer(fdb->nh))
+ return NULL;
+ return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline
+bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
+{
+ if (a->sa.sa_family != b->sa.sa_family)
+ return false;
+ if (a->sa.sa_family == AF_INET6)
+ return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
+ else
+ return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
+}
+
+#else /* !CONFIG_IPV6 */
+
+static inline
+bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
+{
+ return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
+}
+
+#endif
+
+static inline struct vxlan_vni_node *
+vxlan_vnifilter_lookup(struct vxlan_dev *vxlan, __be32 vni)
+{
+ struct vxlan_vni_group *vg;
+
+ vg = rcu_dereference_rtnl(vxlan->vnigrp);
+ if (!vg)
+ return NULL;
+
+ return rhashtable_lookup_fast(&vg->vni_hash, &vni,
+ vxlan_vni_rht_params);
+}
+
+/* vxlan_core.c */
+int vxlan_fdb_create(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __be16 port, __be32 src_vni,
+ __be32 vni, __u32 ifindex, __u16 ndm_flags,
+ u32 nhid, struct vxlan_fdb **fdb,
+ struct netlink_ext_ack *extack);
+int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
+ const unsigned char *addr, union vxlan_addr ip,
+ __be16 port, __be32 src_vni, __be32 vni,
+ u32 ifindex, bool swdev_notify);
+u32 eth_vni_hash(const unsigned char *addr, __be32 vni);
+u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni);
+int vxlan_fdb_update(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __u16 flags,
+ __be16 port, __be32 src_vni, __be32 vni,
+ __u32 ifindex, __u16 ndm_flags, u32 nhid,
+ bool swdev_notify, struct netlink_ext_ack *extack);
+int vxlan_vni_in_use(struct net *src_net, struct vxlan_dev *vxlan,
+ struct vxlan_config *conf, __be32 vni);
+
+/* vxlan_vnifilter.c */
+int vxlan_vnigroup_init(struct vxlan_dev *vxlan);
+void vxlan_vnigroup_uninit(struct vxlan_dev *vxlan);
+
+void vxlan_vnifilter_init(void);
+void vxlan_vnifilter_uninit(void);
+void vxlan_vnifilter_count(struct vxlan_dev *vxlan, __be32 vni,
+ struct vxlan_vni_node *vninode,
+ int type, unsigned int len);
+
+void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan,
+ struct vxlan_sock *vs,
+ bool ipv6);
+void vxlan_vs_del_vnigrp(struct vxlan_dev *vxlan);
+int vxlan_vnilist_update_group(struct vxlan_dev *vxlan,
+ union vxlan_addr *old_remote_ip,
+ union vxlan_addr *new_remote_ip,
+ struct netlink_ext_ack *extack);
+
+
+/* vxlan_multicast.c */
+int vxlan_multicast_join(struct vxlan_dev *vxlan);
+int vxlan_multicast_leave(struct vxlan_dev *vxlan);
+bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev,
+ __be32 vni, union vxlan_addr *rip, int rifindex);
+int vxlan_igmp_join(struct vxlan_dev *vxlan, union vxlan_addr *rip,
+ int rifindex);
+int vxlan_igmp_leave(struct vxlan_dev *vxlan, union vxlan_addr *rip,
+ int rifindex);
+#endif
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
new file mode 100644
index 000000000000..9f28d0b6a6b2
--- /dev/null
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
@@ -0,0 +1,999 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Vxlan vni filter for collect metadata mode
+ *
+ * Authors: Roopa Prabhu <roopa@nvidia.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/etherdevice.h>
+#include <linux/rhashtable.h>
+#include <net/rtnetlink.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <net/vxlan.h>
+
+#include "vxlan_private.h"
+
+static inline int vxlan_vni_cmp(struct rhashtable_compare_arg *arg,
+ const void *ptr)
+{
+ const struct vxlan_vni_node *vnode = ptr;
+ __be32 vni = *(__be32 *)arg->key;
+
+ return vnode->vni != vni;
+}
+
+const struct rhashtable_params vxlan_vni_rht_params = {
+ .head_offset = offsetof(struct vxlan_vni_node, vnode),
+ .key_offset = offsetof(struct vxlan_vni_node, vni),
+ .key_len = sizeof(__be32),
+ .nelem_hint = 3,
+ .max_size = VXLAN_N_VID,
+ .obj_cmpfn = vxlan_vni_cmp,
+ .automatic_shrinking = true,
+};
+
+static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan,
+ struct vxlan_vni_node *v,
+ bool del)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_dev_node *node;
+ struct vxlan_sock *vs;
+
+ spin_lock(&vn->sock_lock);
+ if (del) {
+ if (!hlist_unhashed(&v->hlist4.hlist))
+ hlist_del_init_rcu(&v->hlist4.hlist);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (!hlist_unhashed(&v->hlist6.hlist))
+ hlist_del_init_rcu(&v->hlist6.hlist);
+#endif
+ goto out;
+ }
+
+#if IS_ENABLED(CONFIG_IPV6)
+ vs = rtnl_dereference(vxlan->vn6_sock);
+ if (vs && v) {
+ node = &v->hlist6;
+ hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
+ }
+#endif
+ vs = rtnl_dereference(vxlan->vn4_sock);
+ if (vs && v) {
+ node = &v->hlist4;
+ hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
+ }
+out:
+ spin_unlock(&vn->sock_lock);
+}
+
+void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan,
+ struct vxlan_sock *vs,
+ bool ipv6)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
+ struct vxlan_vni_node *v, *tmp;
+ struct vxlan_dev_node *node;
+
+ if (!vg)
+ return;
+
+ spin_lock(&vn->sock_lock);
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ipv6)
+ node = &v->hlist6;
+ else
+#endif
+ node = &v->hlist4;
+ node->vxlan = vxlan;
+ hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
+ }
+ spin_unlock(&vn->sock_lock);
+}
+
+void vxlan_vs_del_vnigrp(struct vxlan_dev *vxlan)
+{
+ struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_vni_node *v, *tmp;
+
+ if (!vg)
+ return;
+
+ spin_lock(&vn->sock_lock);
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ hlist_del_init_rcu(&v->hlist4.hlist);
+#if IS_ENABLED(CONFIG_IPV6)
+ hlist_del_init_rcu(&v->hlist6.hlist);
+#endif
+ }
+ spin_unlock(&vn->sock_lock);
+}
+
+static void vxlan_vnifilter_stats_get(const struct vxlan_vni_node *vninode,
+ struct vxlan_vni_stats *dest)
+{
+ int i;
+
+ memset(dest, 0, sizeof(*dest));
+ for_each_possible_cpu(i) {
+ struct vxlan_vni_stats_pcpu *pstats;
+ struct vxlan_vni_stats temp;
+ unsigned int start;
+
+ pstats = per_cpu_ptr(vninode->stats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&pstats->syncp);
+ memcpy(&temp, &pstats->stats, sizeof(temp));
+ } while (u64_stats_fetch_retry_irq(&pstats->syncp, start));
+
+ dest->rx_packets += temp.rx_packets;
+ dest->rx_bytes += temp.rx_bytes;
+ dest->rx_drops += temp.rx_drops;
+ dest->rx_errors += temp.rx_errors;
+ dest->tx_packets += temp.tx_packets;
+ dest->tx_bytes += temp.tx_bytes;
+ dest->tx_drops += temp.tx_drops;
+ dest->tx_errors += temp.tx_errors;
+ }
+}
+
+static void vxlan_vnifilter_stats_add(struct vxlan_vni_node *vninode,
+ int type, unsigned int len)
+{
+ struct vxlan_vni_stats_pcpu *pstats = this_cpu_ptr(vninode->stats);
+
+ u64_stats_update_begin(&pstats->syncp);
+ switch (type) {
+ case VXLAN_VNI_STATS_RX:
+ pstats->stats.rx_bytes += len;
+ pstats->stats.rx_packets++;
+ break;
+ case VXLAN_VNI_STATS_RX_DROPS:
+ pstats->stats.rx_drops++;
+ break;
+ case VXLAN_VNI_STATS_RX_ERRORS:
+ pstats->stats.rx_errors++;
+ break;
+ case VXLAN_VNI_STATS_TX:
+ pstats->stats.tx_bytes += len;
+ pstats->stats.tx_packets++;
+ break;
+ case VXLAN_VNI_STATS_TX_DROPS:
+ pstats->stats.tx_drops++;
+ break;
+ case VXLAN_VNI_STATS_TX_ERRORS:
+ pstats->stats.tx_errors++;
+ break;
+ }
+ u64_stats_update_end(&pstats->syncp);
+}
+
+void vxlan_vnifilter_count(struct vxlan_dev *vxlan, __be32 vni,
+ struct vxlan_vni_node *vninode,
+ int type, unsigned int len)
+{
+ struct vxlan_vni_node *vnode;
+
+ if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))
+ return;
+
+ if (vninode) {
+ vnode = vninode;
+ } else {
+ vnode = vxlan_vnifilter_lookup(vxlan, vni);
+ if (!vnode)
+ return;
+ }
+
+ vxlan_vnifilter_stats_add(vnode, type, len);
+}
+
+static u32 vnirange(struct vxlan_vni_node *vbegin,
+ struct vxlan_vni_node *vend)
+{
+ return (be32_to_cpu(vend->vni) - be32_to_cpu(vbegin->vni));
+}
+
+static size_t vxlan_vnifilter_entry_nlmsg_size(void)
+{
+ return NLMSG_ALIGN(sizeof(struct tunnel_msg))
+ + nla_total_size(0) /* VXLAN_VNIFILTER_ENTRY */
+ + nla_total_size(sizeof(u32)) /* VXLAN_VNIFILTER_ENTRY_START */
+ + nla_total_size(sizeof(u32)) /* VXLAN_VNIFILTER_ENTRY_END */
+ + nla_total_size(sizeof(struct in6_addr));/* VXLAN_VNIFILTER_ENTRY_GROUP{6} */
+}
+
+static int __vnifilter_entry_fill_stats(struct sk_buff *skb,
+ const struct vxlan_vni_node *vbegin)
+{
+ struct vxlan_vni_stats vstats;
+ struct nlattr *vstats_attr;
+
+ vstats_attr = nla_nest_start(skb, VXLAN_VNIFILTER_ENTRY_STATS);
+ if (!vstats_attr)
+ goto out_stats_err;
+
+ vxlan_vnifilter_stats_get(vbegin, &vstats);
+ if (nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_BYTES,
+ vstats.rx_bytes, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_PKTS,
+ vstats.rx_packets, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_DROPS,
+ vstats.rx_drops, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_ERRORS,
+ vstats.rx_errors, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_BYTES,
+ vstats.tx_bytes, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_PKTS,
+ vstats.tx_packets, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_DROPS,
+ vstats.tx_drops, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_ERRORS,
+ vstats.tx_errors, VNIFILTER_ENTRY_STATS_PAD))
+ goto out_stats_err;
+
+ nla_nest_end(skb, vstats_attr);
+
+ return 0;
+
+out_stats_err:
+ nla_nest_cancel(skb, vstats_attr);
+ return -EMSGSIZE;
+}
+
+static bool vxlan_fill_vni_filter_entry(struct sk_buff *skb,
+ struct vxlan_vni_node *vbegin,
+ struct vxlan_vni_node *vend,
+ bool fill_stats)
+{
+ struct nlattr *ventry;
+ u32 vs = be32_to_cpu(vbegin->vni);
+ u32 ve = 0;
+
+ if (vbegin != vend)
+ ve = be32_to_cpu(vend->vni);
+
+ ventry = nla_nest_start(skb, VXLAN_VNIFILTER_ENTRY);
+ if (!ventry)
+ return false;
+
+ if (nla_put_u32(skb, VXLAN_VNIFILTER_ENTRY_START, vs))
+ goto out_err;
+
+ if (ve && nla_put_u32(skb, VXLAN_VNIFILTER_ENTRY_END, ve))
+ goto out_err;
+
+ if (!vxlan_addr_any(&vbegin->remote_ip)) {
+ if (vbegin->remote_ip.sa.sa_family == AF_INET) {
+ if (nla_put_in_addr(skb, VXLAN_VNIFILTER_ENTRY_GROUP,
+ vbegin->remote_ip.sin.sin_addr.s_addr))
+ goto out_err;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (nla_put_in6_addr(skb, VXLAN_VNIFILTER_ENTRY_GROUP6,
+ &vbegin->remote_ip.sin6.sin6_addr))
+ goto out_err;
+#endif
+ }
+ }
+
+ if (fill_stats && __vnifilter_entry_fill_stats(skb, vbegin))
+ goto out_err;
+
+ nla_nest_end(skb, ventry);
+
+ return true;
+
+out_err:
+ nla_nest_cancel(skb, ventry);
+
+ return false;
+}
+
+static void vxlan_vnifilter_notify(const struct vxlan_dev *vxlan,
+ struct vxlan_vni_node *vninode, int cmd)
+{
+ struct tunnel_msg *tmsg;
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ struct net *net = dev_net(vxlan->dev);
+ int err = -ENOBUFS;
+
+ skb = nlmsg_new(vxlan_vnifilter_entry_nlmsg_size(), GFP_KERNEL);
+ if (!skb)
+ goto out_err;
+
+ err = -EMSGSIZE;
+ nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*tmsg), 0);
+ if (!nlh)
+ goto out_err;
+ tmsg = nlmsg_data(nlh);
+ memset(tmsg, 0, sizeof(*tmsg));
+ tmsg->family = AF_BRIDGE;
+ tmsg->ifindex = vxlan->dev->ifindex;
+
+ if (!vxlan_fill_vni_filter_entry(skb, vninode, vninode, false))
+ goto out_err;
+
+ nlmsg_end(skb, nlh);
+ rtnl_notify(skb, net, 0, RTNLGRP_TUNNEL, NULL, GFP_KERNEL);
+
+ return;
+
+out_err:
+ rtnl_set_sk_err(net, RTNLGRP_TUNNEL, err);
+
+ kfree_skb(skb);
+}
+
+static int vxlan_vnifilter_dump_dev(const struct net_device *dev,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct vxlan_vni_node *tmp, *v, *vbegin = NULL, *vend = NULL;
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct tunnel_msg *new_tmsg, *tmsg;
+ int idx = 0, s_idx = cb->args[1];
+ struct vxlan_vni_group *vg;
+ struct nlmsghdr *nlh;
+ bool dump_stats;
+ int err = 0;
+
+ if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))
+ return -EINVAL;
+
+ /* RCU needed because of the vni locking rules (rcu || rtnl) */
+ vg = rcu_dereference(vxlan->vnigrp);
+ if (!vg || !vg->num_vnis)
+ return 0;
+
+ tmsg = nlmsg_data(cb->nlh);
+ dump_stats = !!(tmsg->flags & TUNNEL_MSG_FLAG_STATS);
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ RTM_NEWTUNNEL, sizeof(*new_tmsg), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+ new_tmsg = nlmsg_data(nlh);
+ memset(new_tmsg, 0, sizeof(*new_tmsg));
+ new_tmsg->family = PF_BRIDGE;
+ new_tmsg->ifindex = dev->ifindex;
+
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ if (idx < s_idx) {
+ idx++;
+ continue;
+ }
+ if (!vbegin) {
+ vbegin = v;
+ vend = v;
+ continue;
+ }
+ if (!dump_stats && vnirange(vend, v) == 1 &&
+ vxlan_addr_equal(&v->remote_ip, &vend->remote_ip)) {
+ goto update_end;
+ } else {
+ if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend,
+ dump_stats)) {
+ err = -EMSGSIZE;
+ break;
+ }
+ idx += vnirange(vbegin, vend) + 1;
+ vbegin = v;
+ }
+update_end:
+ vend = v;
+ }
+
+ if (!err && vbegin) {
+ if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend, dump_stats))
+ err = -EMSGSIZE;
+ }
+
+ cb->args[1] = err ? idx : 0;
+
+ nlmsg_end(skb, nlh);
+
+ return err;
+}
+
+static int vxlan_vnifilter_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int idx = 0, err = 0, s_idx = cb->args[0];
+ struct net *net = sock_net(skb->sk);
+ struct tunnel_msg *tmsg;
+ struct net_device *dev;
+
+ tmsg = nlmsg_data(cb->nlh);
+
+ if (tmsg->flags & ~TUNNEL_MSG_VALID_USER_FLAGS) {
+ NL_SET_ERR_MSG(cb->extack, "Invalid tunnelmsg flags in ancillary header");
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+ if (tmsg->ifindex) {
+ dev = dev_get_by_index_rcu(net, tmsg->ifindex);
+ if (!dev) {
+ err = -ENODEV;
+ goto out_err;
+ }
+ err = vxlan_vnifilter_dump_dev(dev, skb, cb);
+ /* if the dump completed without an error we return 0 here */
+ if (err != -EMSGSIZE)
+ goto out_err;
+ } else {
+ for_each_netdev_rcu(net, dev) {
+ if (!netif_is_vxlan(dev))
+ continue;
+ if (idx < s_idx)
+ goto skip;
+ err = vxlan_vnifilter_dump_dev(dev, skb, cb);
+ if (err == -EMSGSIZE)
+ break;
+skip:
+ idx++;
+ }
+ }
+ cb->args[0] = idx;
+ rcu_read_unlock();
+
+ return skb->len;
+
+out_err:
+ rcu_read_unlock();
+
+ return err;
+}
+
+static const struct nla_policy vni_filter_entry_policy[VXLAN_VNIFILTER_ENTRY_MAX + 1] = {
+ [VXLAN_VNIFILTER_ENTRY_START] = { .type = NLA_U32 },
+ [VXLAN_VNIFILTER_ENTRY_END] = { .type = NLA_U32 },
+ [VXLAN_VNIFILTER_ENTRY_GROUP] = { .type = NLA_BINARY,
+ .len = sizeof_field(struct iphdr, daddr) },
+ [VXLAN_VNIFILTER_ENTRY_GROUP6] = { .type = NLA_BINARY,
+ .len = sizeof(struct in6_addr) },
+};
+
+static const struct nla_policy vni_filter_policy[VXLAN_VNIFILTER_MAX + 1] = {
+ [VXLAN_VNIFILTER_ENTRY] = { .type = NLA_NESTED },
+};
+
+static int vxlan_update_default_fdb_entry(struct vxlan_dev *vxlan, __be32 vni,
+ union vxlan_addr *old_remote_ip,
+ union vxlan_addr *remote_ip,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+ u32 hash_index;
+ int err = 0;
+
+ hash_index = fdb_head_index(vxlan, all_zeros_mac, vni);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ if (remote_ip && !vxlan_addr_any(remote_ip)) {
+ err = vxlan_fdb_update(vxlan, all_zeros_mac,
+ remote_ip,
+ NUD_REACHABLE | NUD_PERMANENT,
+ NLM_F_APPEND | NLM_F_CREATE,
+ vxlan->cfg.dst_port,
+ vni,
+ vni,
+ dst->remote_ifindex,
+ NTF_SELF, 0, true, extack);
+ if (err) {
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ return err;
+ }
+ }
+
+ if (old_remote_ip && !vxlan_addr_any(old_remote_ip)) {
+ __vxlan_fdb_delete(vxlan, all_zeros_mac,
+ *old_remote_ip,
+ vxlan->cfg.dst_port,
+ vni, vni,
+ dst->remote_ifindex,
+ true);
+ }
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+
+ return err;
+}
+
+static int vxlan_vni_update_group(struct vxlan_dev *vxlan,
+ struct vxlan_vni_node *vninode,
+ union vxlan_addr *group,
+ bool create, bool *changed,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+ union vxlan_addr *newrip = NULL, *oldrip = NULL;
+ union vxlan_addr old_remote_ip;
+ int ret = 0;
+
+ memcpy(&old_remote_ip, &vninode->remote_ip, sizeof(old_remote_ip));
+
+ /* if per vni remote ip is not present use vxlan dev
+ * default dst remote ip for fdb entry
+ */
+ if (group && !vxlan_addr_any(group)) {
+ newrip = group;
+ } else {
+ if (!vxlan_addr_any(&dst->remote_ip))
+ newrip = &dst->remote_ip;
+ }
+
+ /* if old rip exists, and no newrip,
+ * explicitly delete old rip
+ */
+ if (!newrip && !vxlan_addr_any(&old_remote_ip))
+ oldrip = &old_remote_ip;
+
+ if (!newrip && !oldrip)
+ return 0;
+
+ if (!create && oldrip && newrip && vxlan_addr_equal(oldrip, newrip))
+ return 0;
+
+ ret = vxlan_update_default_fdb_entry(vxlan, vninode->vni,
+ oldrip, newrip,
+ extack);
+ if (ret)
+ goto out;
+
+ if (group)
+ memcpy(&vninode->remote_ip, group, sizeof(vninode->remote_ip));
+
+ if (vxlan->dev->flags & IFF_UP) {
+ if (vxlan_addr_multicast(&old_remote_ip) &&
+ !vxlan_group_used(vn, vxlan, vninode->vni,
+ &old_remote_ip,
+ vxlan->default_dst.remote_ifindex)) {
+ ret = vxlan_igmp_leave(vxlan, &old_remote_ip,
+ 0);
+ if (ret)
+ goto out;
+ }
+
+ if (vxlan_addr_multicast(&vninode->remote_ip)) {
+ ret = vxlan_igmp_join(vxlan, &vninode->remote_ip, 0);
+ if (ret == -EADDRINUSE)
+ ret = 0;
+ if (ret)
+ goto out;
+ }
+ }
+
+ *changed = true;
+
+ return 0;
+out:
+ return ret;
+}
+
+int vxlan_vnilist_update_group(struct vxlan_dev *vxlan,
+ union vxlan_addr *old_remote_ip,
+ union vxlan_addr *new_remote_ip,
+ struct netlink_ext_ack *extack)
+{
+ struct list_head *headp, *hpos;
+ struct vxlan_vni_group *vg;
+ struct vxlan_vni_node *vent;
+ int ret;
+
+ vg = rtnl_dereference(vxlan->vnigrp);
+
+ headp = &vg->vni_list;
+ list_for_each_prev(hpos, headp) {
+ vent = list_entry(hpos, struct vxlan_vni_node, vlist);
+ if (vxlan_addr_any(&vent->remote_ip)) {
+ ret = vxlan_update_default_fdb_entry(vxlan, vent->vni,
+ old_remote_ip,
+ new_remote_ip,
+ extack);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
+ struct vxlan_vni_node *vninode)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+
+ /* if per vni remote_ip not present, delete the
+ * default dst remote_ip previously added for this vni
+ */
+ if (!vxlan_addr_any(&vninode->remote_ip) ||
+ !vxlan_addr_any(&dst->remote_ip))
+ __vxlan_fdb_delete(vxlan, all_zeros_mac,
+ (vxlan_addr_any(&vninode->remote_ip) ?
+ dst->remote_ip : vninode->remote_ip),
+ vxlan->cfg.dst_port,
+ vninode->vni, vninode->vni,
+ dst->remote_ifindex,
+ true);
+
+ if (vxlan->dev->flags & IFF_UP) {
+ if (vxlan_addr_multicast(&vninode->remote_ip) &&
+ !vxlan_group_used(vn, vxlan, vninode->vni,
+ &vninode->remote_ip,
+ dst->remote_ifindex)) {
+ vxlan_igmp_leave(vxlan, &vninode->remote_ip, 0);
+ }
+ }
+}
+
+static int vxlan_vni_update(struct vxlan_dev *vxlan,
+ struct vxlan_vni_group *vg,
+ __be32 vni, union vxlan_addr *group,
+ bool *changed,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_vni_node *vninode;
+ int ret;
+
+ vninode = rhashtable_lookup_fast(&vg->vni_hash, &vni,
+ vxlan_vni_rht_params);
+ if (!vninode)
+ return 0;
+
+ ret = vxlan_vni_update_group(vxlan, vninode, group, false, changed,
+ extack);
+ if (ret)
+ return ret;
+
+ if (changed)
+ vxlan_vnifilter_notify(vxlan, vninode, RTM_NEWTUNNEL);
+
+ return 0;
+}
+
+static void __vxlan_vni_add_list(struct vxlan_vni_group *vg,
+ struct vxlan_vni_node *v)
+{
+ struct list_head *headp, *hpos;
+ struct vxlan_vni_node *vent;
+
+ headp = &vg->vni_list;
+ list_for_each_prev(hpos, headp) {
+ vent = list_entry(hpos, struct vxlan_vni_node, vlist);
+ if (be32_to_cpu(v->vni) < be32_to_cpu(vent->vni))
+ continue;
+ else
+ break;
+ }
+ list_add_rcu(&v->vlist, hpos);
+ vg->num_vnis++;
+}
+
+static void __vxlan_vni_del_list(struct vxlan_vni_group *vg,
+ struct vxlan_vni_node *v)
+{
+ list_del_rcu(&v->vlist);
+ vg->num_vnis--;
+}
+
+static struct vxlan_vni_node *vxlan_vni_alloc(struct vxlan_dev *vxlan,
+ __be32 vni)
+{
+ struct vxlan_vni_node *vninode;
+
+ vninode = kzalloc(sizeof(*vninode), GFP_ATOMIC);
+ if (!vninode)
+ return NULL;
+ vninode->stats = netdev_alloc_pcpu_stats(struct vxlan_vni_stats_pcpu);
+ if (!vninode->stats) {
+ kfree(vninode);
+ return NULL;
+ }
+ vninode->vni = vni;
+ vninode->hlist4.vxlan = vxlan;
+#if IS_ENABLED(CONFIG_IPV6)
+ vninode->hlist6.vxlan = vxlan;
+#endif
+
+ return vninode;
+}
+
+static int vxlan_vni_add(struct vxlan_dev *vxlan,
+ struct vxlan_vni_group *vg,
+ u32 vni, union vxlan_addr *group,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_vni_node *vninode;
+ __be32 v = cpu_to_be32(vni);
+ bool changed = false;
+ int err = 0;
+
+ if (vxlan_vnifilter_lookup(vxlan, v))
+ return vxlan_vni_update(vxlan, vg, v, group, &changed, extack);
+
+ err = vxlan_vni_in_use(vxlan->net, vxlan, &vxlan->cfg, v);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "VNI in use");
+ return err;
+ }
+
+ vninode = vxlan_vni_alloc(vxlan, v);
+ if (!vninode)
+ return -ENOMEM;
+
+ err = rhashtable_lookup_insert_fast(&vg->vni_hash,
+ &vninode->vnode,
+ vxlan_vni_rht_params);
+ if (err) {
+ kfree(vninode);
+ return err;
+ }
+
+ __vxlan_vni_add_list(vg, vninode);
+
+ if (vxlan->dev->flags & IFF_UP)
+ vxlan_vs_add_del_vninode(vxlan, vninode, false);
+
+ err = vxlan_vni_update_group(vxlan, vninode, group, true, &changed,
+ extack);
+
+ if (changed)
+ vxlan_vnifilter_notify(vxlan, vninode, RTM_NEWTUNNEL);
+
+ return err;
+}
+
+static void vxlan_vni_node_rcu_free(struct rcu_head *rcu)
+{
+ struct vxlan_vni_node *v;
+
+ v = container_of(rcu, struct vxlan_vni_node, rcu);
+ free_percpu(v->stats);
+ kfree(v);
+}
+
+static int vxlan_vni_del(struct vxlan_dev *vxlan,
+ struct vxlan_vni_group *vg,
+ u32 vni, struct netlink_ext_ack *extack)
+{
+ struct vxlan_vni_node *vninode;
+ __be32 v = cpu_to_be32(vni);
+ int err = 0;
+
+ vg = rtnl_dereference(vxlan->vnigrp);
+
+ vninode = rhashtable_lookup_fast(&vg->vni_hash, &v,
+ vxlan_vni_rht_params);
+ if (!vninode) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ vxlan_vni_delete_group(vxlan, vninode);
+
+ err = rhashtable_remove_fast(&vg->vni_hash,
+ &vninode->vnode,
+ vxlan_vni_rht_params);
+ if (err)
+ goto out;
+
+ __vxlan_vni_del_list(vg, vninode);
+
+ vxlan_vnifilter_notify(vxlan, vninode, RTM_DELTUNNEL);
+
+ if (vxlan->dev->flags & IFF_UP)
+ vxlan_vs_add_del_vninode(vxlan, vninode, true);
+
+ call_rcu(&vninode->rcu, vxlan_vni_node_rcu_free);
+
+ return 0;
+out:
+ return err;
+}
+
+static int vxlan_vni_add_del(struct vxlan_dev *vxlan, __u32 start_vni,
+ __u32 end_vni, union vxlan_addr *group,
+ int cmd, struct netlink_ext_ack *extack)
+{
+ struct vxlan_vni_group *vg;
+ int v, err = 0;
+
+ vg = rtnl_dereference(vxlan->vnigrp);
+
+ for (v = start_vni; v <= end_vni; v++) {
+ switch (cmd) {
+ case RTM_NEWTUNNEL:
+ err = vxlan_vni_add(vxlan, vg, v, group, extack);
+ break;
+ case RTM_DELTUNNEL:
+ err = vxlan_vni_del(vxlan, vg, v, extack);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ if (err)
+ goto out;
+ }
+
+ return 0;
+out:
+ return err;
+}
+
+static int vxlan_process_vni_filter(struct vxlan_dev *vxlan,
+ struct nlattr *nlvnifilter,
+ int cmd, struct netlink_ext_ack *extack)
+{
+ struct nlattr *vattrs[VXLAN_VNIFILTER_ENTRY_MAX + 1];
+ u32 vni_start = 0, vni_end = 0;
+ union vxlan_addr group;
+ int err;
+
+ err = nla_parse_nested(vattrs,
+ VXLAN_VNIFILTER_ENTRY_MAX,
+ nlvnifilter, vni_filter_entry_policy,
+ extack);
+ if (err)
+ return err;
+
+ if (vattrs[VXLAN_VNIFILTER_ENTRY_START]) {
+ vni_start = nla_get_u32(vattrs[VXLAN_VNIFILTER_ENTRY_START]);
+ vni_end = vni_start;
+ }
+
+ if (vattrs[VXLAN_VNIFILTER_ENTRY_END])
+ vni_end = nla_get_u32(vattrs[VXLAN_VNIFILTER_ENTRY_END]);
+
+ if (!vni_start && !vni_end) {
+ NL_SET_ERR_MSG_ATTR(extack, nlvnifilter,
+ "vni start nor end found in vni entry");
+ return -EINVAL;
+ }
+
+ if (vattrs[VXLAN_VNIFILTER_ENTRY_GROUP]) {
+ group.sin.sin_addr.s_addr =
+ nla_get_in_addr(vattrs[VXLAN_VNIFILTER_ENTRY_GROUP]);
+ group.sa.sa_family = AF_INET;
+ } else if (vattrs[VXLAN_VNIFILTER_ENTRY_GROUP6]) {
+ group.sin6.sin6_addr =
+ nla_get_in6_addr(vattrs[VXLAN_VNIFILTER_ENTRY_GROUP6]);
+ group.sa.sa_family = AF_INET6;
+ } else {
+ memset(&group, 0, sizeof(group));
+ }
+
+ if (vxlan_addr_multicast(&group) && !vxlan->default_dst.remote_ifindex) {
+ NL_SET_ERR_MSG(extack,
+ "Local interface required for multicast remote group");
+
+ return -EINVAL;
+ }
+
+ err = vxlan_vni_add_del(vxlan, vni_start, vni_end, &group, cmd,
+ extack);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void vxlan_vnigroup_uninit(struct vxlan_dev *vxlan)
+{
+ struct vxlan_vni_node *v, *tmp;
+ struct vxlan_vni_group *vg;
+
+ vg = rtnl_dereference(vxlan->vnigrp);
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ rhashtable_remove_fast(&vg->vni_hash, &v->vnode,
+ vxlan_vni_rht_params);
+ hlist_del_init_rcu(&v->hlist4.hlist);
+#if IS_ENABLED(CONFIG_IPV6)
+ hlist_del_init_rcu(&v->hlist6.hlist);
+#endif
+ __vxlan_vni_del_list(vg, v);
+ vxlan_vnifilter_notify(vxlan, v, RTM_DELTUNNEL);
+ call_rcu(&v->rcu, vxlan_vni_node_rcu_free);
+ }
+ rhashtable_destroy(&vg->vni_hash);
+ kfree(vg);
+}
+
+int vxlan_vnigroup_init(struct vxlan_dev *vxlan)
+{
+ struct vxlan_vni_group *vg;
+ int ret;
+
+ vg = kzalloc(sizeof(*vg), GFP_KERNEL);
+ if (!vg)
+ return -ENOMEM;
+ ret = rhashtable_init(&vg->vni_hash, &vxlan_vni_rht_params);
+ if (ret) {
+ kfree(vg);
+ return ret;
+ }
+ INIT_LIST_HEAD(&vg->vni_list);
+ rcu_assign_pointer(vxlan->vnigrp, vg);
+
+ return 0;
+}
+
+static int vxlan_vnifilter_process(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = sock_net(skb->sk);
+ struct tunnel_msg *tmsg;
+ struct vxlan_dev *vxlan;
+ struct net_device *dev;
+ struct nlattr *attr;
+ int err, vnis = 0;
+ int rem;
+
+ /* this should validate the header and check for remaining bytes */
+ err = nlmsg_parse(nlh, sizeof(*tmsg), NULL, VXLAN_VNIFILTER_MAX,
+ vni_filter_policy, extack);
+ if (err < 0)
+ return err;
+
+ tmsg = nlmsg_data(nlh);
+ dev = __dev_get_by_index(net, tmsg->ifindex);
+ if (!dev)
+ return -ENODEV;
+
+ if (!netif_is_vxlan(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "The device is not a vxlan device");
+ return -EINVAL;
+ }
+
+ vxlan = netdev_priv(dev);
+
+ if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))
+ return -EOPNOTSUPP;
+
+ nlmsg_for_each_attr(attr, nlh, sizeof(*tmsg), rem) {
+ switch (nla_type(attr)) {
+ case VXLAN_VNIFILTER_ENTRY:
+ err = vxlan_process_vni_filter(vxlan, attr,
+ nlh->nlmsg_type, extack);
+ break;
+ default:
+ continue;
+ }
+ vnis++;
+ if (err)
+ break;
+ }
+
+ if (!vnis) {
+ NL_SET_ERR_MSG_MOD(extack, "No vnis found to process");
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+void vxlan_vnifilter_init(void)
+{
+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETTUNNEL, NULL,
+ vxlan_vnifilter_dump, 0);
+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWTUNNEL,
+ vxlan_vnifilter_process, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELTUNNEL,
+ vxlan_vnifilter_process, NULL, 0);
+}
+
+void vxlan_vnifilter_uninit(void)
+{
+ rtnl_unregister(PF_BRIDGE, RTM_GETTUNNEL);
+ rtnl_unregister(PF_BRIDGE, RTM_NEWTUNNEL);
+ rtnl_unregister(PF_BRIDGE, RTM_DELTUNNEL);
+}
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 592a8389fc5a..140780ac1745 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -293,7 +293,8 @@ config SLIC_DS26522
config IXP4XX_HSS
tristate "Intel IXP4xx HSS (synchronous serial port) support"
depends on HDLC && IXP4XX_NPE && IXP4XX_QMGR
- depends on ARCH_IXP4XX
+ depends on ARCH_IXP4XX && OF
+ select MFD_SYSCON
help
Say Y here if you want to use built-in HSS ports
on IXP4xx processor.
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 0b7d9f2f2b8b..863c3e34e136 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -16,8 +16,10 @@
#include <linux/hdlc.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
@@ -1389,9 +1391,28 @@ static int ixp4xx_hss_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct net_device *ndev;
struct device_node *np;
+ struct regmap *rmap;
struct port *port;
hdlc_device *hdlc;
int err;
+ u32 val;
+
+ /*
+ * Go into the syscon and check if we have the HSS and HDLC
+ * features available, else this will not work.
+ */
+ rmap = syscon_regmap_lookup_by_compatible("syscon");
+ if (IS_ERR(rmap))
+ return dev_err_probe(dev, PTR_ERR(rmap),
+ "failed to look up syscon\n");
+
+ val = cpu_ixp4xx_features(rmap);
+
+ if ((val & (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) !=
+ (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) {
+ dev_err(dev, "HDLC and HSS feature unavailable in platform\n");
+ return -ENODEV;
+ }
np = dev->of_node;
@@ -1516,25 +1537,9 @@ static struct platform_driver ixp4xx_hss_driver = {
.probe = ixp4xx_hss_probe,
.remove = ixp4xx_hss_remove,
};
-
-static int __init hss_init_module(void)
-{
- if ((ixp4xx_read_feature_bits() &
- (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) !=
- (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS))
- return -ENODEV;
-
- return platform_driver_register(&ixp4xx_hss_driver);
-}
-
-static void __exit hss_cleanup_module(void)
-{
- platform_driver_unregister(&ixp4xx_hss_driver);
-}
+module_platform_driver(ixp4xx_hss_driver);
MODULE_AUTHOR("Krzysztof Halasa");
MODULE_DESCRIPTION("Intel IXP4xx HSS driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:ixp4xx_hss");
-module_init(hss_init_module);
-module_exit(hss_cleanup_module);
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 6a142dc85c37..76c6b4f89890 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -57,6 +57,7 @@
#include <asm/io.h>
#include <asm/dma.h>
#include <linux/uaccess.h>
+#include <linux/jiffies.h>
//#include <asm/spinlock.h>
#define DRIVER_MAJOR_VERSION 1
@@ -1968,7 +1969,7 @@ static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
printk("%s: Xmitter busy|\n", dev->name);
sc->extra_stats.tx_tbusy_calls++;
- if (jiffies - dev_trans_start(dev) < TX_TIMEOUT)
+ if (time_is_before_jiffies(dev_trans_start(dev) + TX_TIMEOUT))
goto bug_out;
/*
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
index 8e3b1c717c10..6063552cea9b 100644
--- a/drivers/net/wan/slic_ds26522.c
+++ b/drivers/net/wan/slic_ds26522.c
@@ -194,10 +194,9 @@ static int slic_ds26522_init_configure(struct spi_device *spi)
return 0;
}
-static int slic_ds26522_remove(struct spi_device *spi)
+static void slic_ds26522_remove(struct spi_device *spi)
{
pr_info("DS26522 module uninstalled\n");
- return 0;
}
static int slic_ds26522_probe(struct spi_device *spi)
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index a46067c38bf5..0fad1331303c 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -59,9 +59,7 @@ out:
return ret;
}
-#ifdef CONFIG_PM_SLEEP
-static int wg_pm_notification(struct notifier_block *nb, unsigned long action,
- void *data)
+static int wg_pm_notification(struct notifier_block *nb, unsigned long action, void *data)
{
struct wg_device *wg;
struct wg_peer *peer;
@@ -92,7 +90,24 @@ static int wg_pm_notification(struct notifier_block *nb, unsigned long action,
}
static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification };
-#endif
+
+static int wg_vm_notification(struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct wg_device *wg;
+ struct wg_peer *peer;
+
+ rtnl_lock();
+ list_for_each_entry(wg, &device_list, device_list) {
+ mutex_lock(&wg->device_update_lock);
+ list_for_each_entry(peer, &wg->peer_list, peer_list)
+ wg_noise_expire_current_peer_keypairs(peer);
+ mutex_unlock(&wg->device_update_lock);
+ }
+ rtnl_unlock();
+ return 0;
+}
+
+static struct notifier_block vm_notifier = { .notifier_call = wg_vm_notification };
static int wg_stop(struct net_device *dev)
{
@@ -424,16 +439,18 @@ int __init wg_device_init(void)
{
int ret;
-#ifdef CONFIG_PM_SLEEP
ret = register_pm_notifier(&pm_notifier);
if (ret)
return ret;
-#endif
- ret = register_pernet_device(&pernet_ops);
+ ret = register_random_vmfork_notifier(&vm_notifier);
if (ret)
goto error_pm;
+ ret = register_pernet_device(&pernet_ops);
+ if (ret)
+ goto error_vm;
+
ret = rtnl_link_register(&link_ops);
if (ret)
goto error_pernet;
@@ -442,10 +459,10 @@ int __init wg_device_init(void)
error_pernet:
unregister_pernet_device(&pernet_ops);
+error_vm:
+ unregister_random_vmfork_notifier(&vm_notifier);
error_pm:
-#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&pm_notifier);
-#endif
return ret;
}
@@ -453,8 +470,7 @@ void wg_device_uninit(void)
{
rtnl_link_unregister(&link_ops);
unregister_pernet_device(&pernet_ops);
-#ifdef CONFIG_PM_SLEEP
+ unregister_random_vmfork_notifier(&vm_notifier);
unregister_pm_notifier(&pm_notifier);
-#endif
rcu_barrier();
}
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 141c1b5a7b1f..9cabd342d156 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -104,7 +104,7 @@ static void ar5523_cmd_rx_cb(struct urb *urb)
}
if (urb->actual_length < sizeof(struct ar5523_cmd_hdr)) {
- ar5523_err(ar, "RX USB to short.\n");
+ ar5523_err(ar, "RX USB too short.\n");
goto skip;
}
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 8f5b8eb368fa..9e1f483e1362 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -75,6 +75,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -111,6 +112,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -148,6 +150,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9887_BOARD_DATA_SZ,
.board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -184,6 +187,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca6174_sdio_ops,
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
@@ -216,6 +220,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -252,6 +257,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -288,6 +294,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -325,6 +332,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca6174_ops,
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
@@ -370,6 +378,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
@@ -415,6 +424,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.ext_board_size = QCA99X0_EXT_BOARD_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
@@ -461,6 +471,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
@@ -501,6 +512,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -537,6 +549,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca6174_ops,
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
@@ -575,6 +588,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca6174_ops,
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
@@ -611,6 +625,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
@@ -643,6 +658,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dir = WCN3990_HW_1_0_FW_DIR,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &wcn3990_rx_desc_ops,
.hw_ops = &wcn3990_ops,
.decap_align_bytes = 1,
.num_peers = TARGET_HL_TLV_NUM_PEERS,
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 127b4e4980ef..907e1e13871a 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -131,6 +131,159 @@ static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = {
HTT_T2H_MSG_TYPE_PEER_STATS,
};
+const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops = {
+ .rx_desc_size = sizeof(struct htt_rx_desc_v1),
+ .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v1, msdu_payload)
+};
+
+static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc = container_of(rxd,
+ struct htt_rx_desc_v1,
+ base);
+
+ return MS(__le32_to_cpu(rx_desc->msdu_end.qca99x0.info1),
+ RX_MSDU_END_INFO1_L3_HDR_PAD);
+}
+
+static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc = container_of(rxd,
+ struct htt_rx_desc_v1,
+ base);
+
+ return !!(rx_desc->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_MSDU_LIMIT_ERR));
+}
+
+const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops = {
+ .rx_desc_size = sizeof(struct htt_rx_desc_v1),
+ .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v1, msdu_payload),
+
+ .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error,
+};
+
+static void ath10k_rx_desc_wcn3990_get_offsets(struct htt_rx_ring_rx_desc_offsets *off)
+{
+#define desc_offset(x) (offsetof(struct htt_rx_desc_v2, x) / 4)
+ off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ off->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+}
+
+static struct htt_rx_desc *
+ath10k_rx_desc_wcn3990_from_raw_buffer(void *buff)
+{
+ return &((struct htt_rx_desc_v2 *)buff)->base;
+}
+
+static struct rx_attention *
+ath10k_rx_desc_wcn3990_get_attention(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->attention;
+}
+
+static struct rx_frag_info_common *
+ath10k_rx_desc_wcn3990_get_frag_info(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->frag_info.common;
+}
+
+static struct rx_mpdu_start *
+ath10k_rx_desc_wcn3990_get_mpdu_start(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->mpdu_start;
+}
+
+static struct rx_mpdu_end *
+ath10k_rx_desc_wcn3990_get_mpdu_end(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->mpdu_end;
+}
+
+static struct rx_msdu_start_common *
+ath10k_rx_desc_wcn3990_get_msdu_start(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->msdu_start.common;
+}
+
+static struct rx_msdu_end_common *
+ath10k_rx_desc_wcn3990_get_msdu_end(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->msdu_end.common;
+}
+
+static struct rx_ppdu_start *
+ath10k_rx_desc_wcn3990_get_ppdu_start(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->ppdu_start;
+}
+
+static struct rx_ppdu_end_common *
+ath10k_rx_desc_wcn3990_get_ppdu_end(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->ppdu_end.common;
+}
+
+static u8 *
+ath10k_rx_desc_wcn3990_get_rx_hdr_status(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return rx_desc->rx_hdr_status;
+}
+
+static u8 *
+ath10k_rx_desc_wcn3990_get_msdu_payload(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return rx_desc->msdu_payload;
+}
+
+const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops = {
+ .rx_desc_size = sizeof(struct htt_rx_desc_v2),
+ .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v2, msdu_payload),
+
+ .rx_desc_from_raw_buffer = ath10k_rx_desc_wcn3990_from_raw_buffer,
+ .rx_desc_get_offsets = ath10k_rx_desc_wcn3990_get_offsets,
+ .rx_desc_get_attention = ath10k_rx_desc_wcn3990_get_attention,
+ .rx_desc_get_frag_info = ath10k_rx_desc_wcn3990_get_frag_info,
+ .rx_desc_get_mpdu_start = ath10k_rx_desc_wcn3990_get_mpdu_start,
+ .rx_desc_get_mpdu_end = ath10k_rx_desc_wcn3990_get_mpdu_end,
+ .rx_desc_get_msdu_start = ath10k_rx_desc_wcn3990_get_msdu_start,
+ .rx_desc_get_msdu_end = ath10k_rx_desc_wcn3990_get_msdu_end,
+ .rx_desc_get_ppdu_start = ath10k_rx_desc_wcn3990_get_ppdu_start,
+ .rx_desc_get_ppdu_end = ath10k_rx_desc_wcn3990_get_ppdu_end,
+ .rx_desc_get_rx_hdr_status = ath10k_rx_desc_wcn3990_get_rx_hdr_status,
+ .rx_desc_get_msdu_payload = ath10k_rx_desc_wcn3990_get_msdu_payload,
+};
+
int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 9a3a8907389b..f06cf39204e2 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -240,14 +240,7 @@ enum htt_rx_ring_flags {
#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
-struct htt_rx_ring_setup_ring32 {
- __le32 fw_idx_shadow_reg_paddr;
- __le32 rx_ring_base_paddr;
- __le16 rx_ring_len; /* in 4-byte words */
- __le16 rx_ring_bufsize; /* rx skb size - in bytes */
- __le16 flags; /* %HTT_RX_RING_FLAGS_ */
- __le16 fw_idx_init_val;
-
+struct htt_rx_ring_rx_desc_offsets {
/* the following offsets are in 4-byte units */
__le16 mac80211_hdr_offset;
__le16 msdu_payload_offset;
@@ -261,6 +254,17 @@ struct htt_rx_ring_setup_ring32 {
__le16 frag_info_offset;
} __packed;
+struct htt_rx_ring_setup_ring32 {
+ __le32 fw_idx_shadow_reg_paddr;
+ __le32 rx_ring_base_paddr;
+ __le16 rx_ring_len; /* in 4-byte words */
+ __le16 rx_ring_bufsize; /* rx skb size - in bytes */
+ __le16 flags; /* %HTT_RX_RING_FLAGS_ */
+ __le16 fw_idx_init_val;
+
+ struct htt_rx_ring_rx_desc_offsets offsets;
+} __packed;
+
struct htt_rx_ring_setup_ring64 {
__le64 fw_idx_shadow_reg_paddr;
__le64 rx_ring_base_paddr;
@@ -269,17 +273,7 @@ struct htt_rx_ring_setup_ring64 {
__le16 flags; /* %HTT_RX_RING_FLAGS_ */
__le16 fw_idx_init_val;
- /* the following offsets are in 4-byte units */
- __le16 mac80211_hdr_offset;
- __le16 msdu_payload_offset;
- __le16 ppdu_start_offset;
- __le16 ppdu_end_offset;
- __le16 mpdu_start_offset;
- __le16 mpdu_end_offset;
- __le16 msdu_start_offset;
- __le16 msdu_end_offset;
- __le16 rx_attention_offset;
- __le16 frag_info_offset;
+ struct htt_rx_ring_rx_desc_offsets offsets;
} __packed;
struct htt_rx_ring_setup_hdr {
@@ -2075,12 +2069,22 @@ static inline bool ath10k_htt_rx_proc_rx_frag_ind(struct ath10k_htt *htt,
return htt->rx_ops->htt_rx_proc_rx_frag_ind(htt, rx, skb);
}
+/* the driver strongly assumes that the rx header status be 64 bytes long,
+ * so all possible rx_desc structures must respect this assumption.
+ */
#define RX_HTT_HDR_STATUS_LEN 64
-/* This structure layout is programmed via rx ring setup
+/* The rx descriptor structure layout is programmed via rx ring setup
* so that FW knows how to transfer the rx descriptor to the host.
- * Buffers like this are placed on the rx ring.
+ * Unfortunately, though, QCA6174's firmware doesn't currently behave correctly
+ * when modifying the structure layout of the rx descriptor beyond what it expects
+ * (even if it correctly programmed during the rx ring setup).
+ * Therefore we must keep two different memory layouts, abstract the rx descriptor
+ * representation and use ath10k_rx_desc_ops
+ * for correctly accessing rx descriptor data.
*/
+
+/* base struct used for abstracting the rx descritor representation */
struct htt_rx_desc {
union {
/* This field is filled on the host using the msdu buffer
@@ -2089,6 +2093,13 @@ struct htt_rx_desc {
struct fw_rx_desc_base fw_desc;
u32 pad;
} __packed;
+} __packed;
+
+/* rx descriptor for wcn3990 and possibly extensible for newer cards
+ * Buffers like this are placed on the rx ring.
+ */
+struct htt_rx_desc_v2 {
+ struct htt_rx_desc base;
struct {
struct rx_attention attention;
struct rx_frag_info frag_info;
@@ -2103,6 +2114,240 @@ struct htt_rx_desc {
u8 msdu_payload[];
};
+/* QCA6174, QCA988x, QCA99x0 dedicated rx descriptor to make sure their firmware
+ * works correctly. We keep a single rx descriptor for all these three
+ * families of cards because from tests it seems to be the most stable solution,
+ * e.g. having a rx descriptor only for QCA6174 seldom caused firmware crashes
+ * during some tests.
+ * Buffers like this are placed on the rx ring.
+ */
+struct htt_rx_desc_v1 {
+ struct htt_rx_desc base;
+ struct {
+ struct rx_attention attention;
+ struct rx_frag_info_v1 frag_info;
+ struct rx_mpdu_start mpdu_start;
+ struct rx_msdu_start_v1 msdu_start;
+ struct rx_msdu_end_v1 msdu_end;
+ struct rx_mpdu_end mpdu_end;
+ struct rx_ppdu_start ppdu_start;
+ struct rx_ppdu_end_v1 ppdu_end;
+ } __packed;
+ u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
+ u8 msdu_payload[];
+};
+
+/* rx_desc abstraction */
+struct ath10k_htt_rx_desc_ops {
+ /* These fields are mandatory, they must be specified in any instance */
+
+ /* sizeof() of the rx_desc structure used by this hw */
+ size_t rx_desc_size;
+
+ /* offset of msdu_payload inside the rx_desc structure used by this hw */
+ size_t rx_desc_msdu_payload_offset;
+
+ /* These fields are options.
+ * When a field is not provided the default implementation gets used
+ * (see the ath10k_rx_desc_* operations below for more info about the defaults)
+ */
+ bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd);
+ int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
+
+ /* Safely cast from a void* buffer containing an rx descriptor
+ * to the proper rx_desc structure
+ */
+ struct htt_rx_desc *(*rx_desc_from_raw_buffer)(void *buff);
+
+ void (*rx_desc_get_offsets)(struct htt_rx_ring_rx_desc_offsets *offs);
+ struct rx_attention *(*rx_desc_get_attention)(struct htt_rx_desc *rxd);
+ struct rx_frag_info_common *(*rx_desc_get_frag_info)(struct htt_rx_desc *rxd);
+ struct rx_mpdu_start *(*rx_desc_get_mpdu_start)(struct htt_rx_desc *rxd);
+ struct rx_mpdu_end *(*rx_desc_get_mpdu_end)(struct htt_rx_desc *rxd);
+ struct rx_msdu_start_common *(*rx_desc_get_msdu_start)(struct htt_rx_desc *rxd);
+ struct rx_msdu_end_common *(*rx_desc_get_msdu_end)(struct htt_rx_desc *rxd);
+ struct rx_ppdu_start *(*rx_desc_get_ppdu_start)(struct htt_rx_desc *rxd);
+ struct rx_ppdu_end_common *(*rx_desc_get_ppdu_end)(struct htt_rx_desc *rxd);
+ u8 *(*rx_desc_get_rx_hdr_status)(struct htt_rx_desc *rxd);
+ u8 *(*rx_desc_get_msdu_payload)(struct htt_rx_desc *rxd);
+};
+
+extern const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops;
+extern const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops;
+extern const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops;
+
+static inline int
+ath10k_htt_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ if (hw->rx_desc_ops->rx_desc_get_l3_pad_bytes)
+ return hw->rx_desc_ops->rx_desc_get_l3_pad_bytes(rxd);
+ return 0;
+}
+
+static inline bool
+ath10k_htt_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ if (hw->rx_desc_ops->rx_desc_get_msdu_limit_error)
+ return hw->rx_desc_ops->rx_desc_get_msdu_limit_error(rxd);
+ return false;
+}
+
+/* The default implementation of all these getters is using the old rx_desc,
+ * so that it is easier to define the ath10k_htt_rx_desc_ops instances.
+ * But probably, if new wireless cards must be supported, it would be better
+ * to switch the default implementation to the new rx_desc, since this would
+ * make the extension easier .
+ */
+static inline struct htt_rx_desc *
+ath10k_htt_rx_desc_from_raw_buffer(struct ath10k_hw_params *hw, void *buff)
+{
+ if (hw->rx_desc_ops->rx_desc_from_raw_buffer)
+ return hw->rx_desc_ops->rx_desc_from_raw_buffer(buff);
+ return &((struct htt_rx_desc_v1 *)buff)->base;
+}
+
+static inline void
+ath10k_htt_rx_desc_get_offsets(struct ath10k_hw_params *hw,
+ struct htt_rx_ring_rx_desc_offsets *off)
+{
+ if (hw->rx_desc_ops->rx_desc_get_offsets) {
+ hw->rx_desc_ops->rx_desc_get_offsets(off);
+ } else {
+#define desc_offset(x) (offsetof(struct htt_rx_desc_v1, x) / 4)
+ off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ off->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+ }
+}
+
+static inline struct rx_attention *
+ath10k_htt_rx_desc_get_attention(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_attention)
+ return hw->rx_desc_ops->rx_desc_get_attention(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->attention;
+}
+
+static inline struct rx_frag_info_common *
+ath10k_htt_rx_desc_get_frag_info(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_frag_info)
+ return hw->rx_desc_ops->rx_desc_get_frag_info(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->frag_info.common;
+}
+
+static inline struct rx_mpdu_start *
+ath10k_htt_rx_desc_get_mpdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_mpdu_start)
+ return hw->rx_desc_ops->rx_desc_get_mpdu_start(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->mpdu_start;
+}
+
+static inline struct rx_mpdu_end *
+ath10k_htt_rx_desc_get_mpdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_mpdu_end)
+ return hw->rx_desc_ops->rx_desc_get_mpdu_end(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->mpdu_end;
+}
+
+static inline struct rx_msdu_start_common *
+ath10k_htt_rx_desc_get_msdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_msdu_start)
+ return hw->rx_desc_ops->rx_desc_get_msdu_start(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->msdu_start.common;
+}
+
+static inline struct rx_msdu_end_common *
+ath10k_htt_rx_desc_get_msdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_msdu_end)
+ return hw->rx_desc_ops->rx_desc_get_msdu_end(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->msdu_end.common;
+}
+
+static inline struct rx_ppdu_start *
+ath10k_htt_rx_desc_get_ppdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_ppdu_start)
+ return hw->rx_desc_ops->rx_desc_get_ppdu_start(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->ppdu_start;
+}
+
+static inline struct rx_ppdu_end_common *
+ath10k_htt_rx_desc_get_ppdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_ppdu_end)
+ return hw->rx_desc_ops->rx_desc_get_ppdu_end(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->ppdu_end.common;
+}
+
+static inline u8 *
+ath10k_htt_rx_desc_get_rx_hdr_status(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_rx_hdr_status)
+ return hw->rx_desc_ops->rx_desc_get_rx_hdr_status(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return rx_desc->rx_hdr_status;
+}
+
+static inline u8 *
+ath10k_htt_rx_desc_get_msdu_payload(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_msdu_payload)
+ return hw->rx_desc_ops->rx_desc_get_msdu_payload(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return rx_desc->msdu_payload;
+}
+
#define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff
#define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB 0
#define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK 0x00001000
@@ -2136,7 +2381,14 @@ struct htt_rx_chan_info {
* rounded up to a cache line size.
*/
#define HTT_RX_BUF_SIZE 2048
-#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
+
+/* The HTT_RX_MSDU_SIZE can't be statically computed anymore,
+ * because it depends on the underlying device rx_desc representation
+ */
+static inline int ath10k_htt_rx_msdu_size(struct ath10k_hw_params *hw)
+{
+ return HTT_RX_BUF_SIZE - (int)hw->rx_desc_ops->rx_desc_size;
+}
/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
* aggregated traffic more nicely.
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index adbaeb67eedf..771252dd6d4e 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -21,7 +21,10 @@
#define HTT_RX_RING_REFILL_RESCHED_MS 5
-static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
+/* shortcut to interpret a raw memory buffer as a rx descriptor */
+#define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf)
+
+static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb);
static struct sk_buff *
ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
@@ -128,6 +131,7 @@ static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
{
+ struct ath10k_hw_params *hw = &htt->ar->hw_params;
struct htt_rx_desc *rx_desc;
struct ath10k_skb_rxcb *rxcb;
struct sk_buff *skb;
@@ -163,8 +167,8 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
skb->data);
/* Clear rx_desc attention word before posting to Rx ring */
- rx_desc = (struct htt_rx_desc *)skb->data;
- rx_desc->attention.flags = __cpu_to_le32(0);
+ rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data);
+ ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0);
paddr = dma_map_single(htt->ar->dev, skb->data,
skb->len + skb_tailroom(skb),
@@ -343,9 +347,14 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
struct sk_buff_head *amsdu)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
int msdu_len, msdu_chaining = 0;
struct sk_buff *msdu;
struct htt_rx_desc *rx_desc;
+ struct rx_attention *rx_desc_attention;
+ struct rx_frag_info_common *rx_desc_frag_info_common;
+ struct rx_msdu_start_common *rx_desc_msdu_start_common;
+ struct rx_msdu_end_common *rx_desc_msdu_end_common;
lockdep_assert_held(&htt->rx_ring.lock);
@@ -360,13 +369,18 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
__skb_queue_tail(amsdu, msdu);
- rx_desc = (struct htt_rx_desc *)msdu->data;
+ rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc);
+ rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw,
+ rx_desc);
+ rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc);
+ rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc);
/* FIXME: we must report msdu payload since this is what caller
* expects now
*/
- skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
- skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
/*
* Sanity check - confirm the HW is finished filling in the
@@ -376,24 +390,24 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
* To prevent the case that we handle a stale Rx descriptor,
* just assert for now until we have a way to recover.
*/
- if (!(__le32_to_cpu(rx_desc->attention.flags)
+ if (!(__le32_to_cpu(rx_desc_attention->flags)
& RX_ATTENTION_FLAGS_MSDU_DONE)) {
__skb_queue_purge(amsdu);
return -EIO;
}
- msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
+ msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags)
& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
- msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
+ msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0),
RX_MSDU_START_INFO0_MSDU_LENGTH);
- msdu_chained = rx_desc->frag_info.ring2_more_count;
+ msdu_chained = rx_desc_frag_info_common->ring2_more_count;
if (msdu_len_invalid)
msdu_len = 0;
skb_trim(msdu, 0);
- skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
+ skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw)));
msdu_len -= msdu->len;
/* Note: Chained buffers do not contain rx descriptor */
@@ -411,11 +425,12 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu_chaining = 1;
}
- last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
+ last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) &
RX_MSDU_END_INFO0_LAST_MSDU;
- trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
- sizeof(*rx_desc) - sizeof(u32));
+ /* FIXME: why are we skipping the first part of the rx_desc? */
+ trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32),
+ hw->rx_desc_ops->rx_desc_size - sizeof(u32));
if (last_msdu)
break;
@@ -480,6 +495,7 @@ static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
struct htt_rx_in_ord_msdu_desc **msdu_desc)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
u32 paddr;
struct sk_buff *frag_buf;
struct sk_buff *prev_frag_buf;
@@ -488,12 +504,12 @@ static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
struct htt_rx_desc *rxd;
int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
- rxd = (void *)msdu->data;
- trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
- skb_put(msdu, sizeof(struct htt_rx_desc));
- skb_pull(msdu, sizeof(struct htt_rx_desc));
- skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
amsdu_len -= msdu->len;
last_frag = ind_desc->reserved;
@@ -556,6 +572,7 @@ ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
u64 paddr;
struct sk_buff *frag_buf;
struct sk_buff *prev_frag_buf;
@@ -564,12 +581,12 @@ ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
struct htt_rx_desc *rxd;
int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
- rxd = (void *)msdu->data;
- trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
- skb_put(msdu, sizeof(struct htt_rx_desc));
- skb_pull(msdu, sizeof(struct htt_rx_desc));
- skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
amsdu_len -= msdu->len;
last_frag = ind_desc->reserved;
@@ -631,8 +648,10 @@ static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
struct sk_buff_head *list)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
struct sk_buff *msdu;
int msdu_count, ret;
bool is_offload;
@@ -667,15 +686,16 @@ static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
__skb_queue_tail(list, msdu);
if (!is_offload) {
- rxd = (void *)msdu->data;
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
- trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
- skb_put(msdu, sizeof(*rxd));
- skb_pull(msdu, sizeof(*rxd));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
- if (!(__le32_to_cpu(rxd->attention.flags) &
+ if (!(__le32_to_cpu(rxd_attention->flags) &
RX_ATTENTION_FLAGS_MSDU_DONE)) {
ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
return -EIO;
@@ -693,8 +713,10 @@ static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
struct sk_buff_head *list)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
struct sk_buff *msdu;
int msdu_count, ret;
bool is_offload;
@@ -728,15 +750,16 @@ static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
__skb_queue_tail(list, msdu);
if (!is_offload) {
- rxd = (void *)msdu->data;
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
- trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
- skb_put(msdu, sizeof(*rxd));
- skb_pull(msdu, sizeof(*rxd));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
- if (!(__le32_to_cpu(rxd->attention.flags) &
+ if (!(__le32_to_cpu(rxd_attention->flags) &
RX_ATTENTION_FLAGS_MSDU_DONE)) {
ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
return -EIO;
@@ -944,16 +967,32 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_attention *rxd_attention;
+ struct rx_mpdu_start *rxd_mpdu_start;
+ struct rx_mpdu_end *rxd_mpdu_end;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ struct rx_ppdu_start *rxd_ppdu_start;
struct ieee80211_supported_band *sband;
u8 cck, rate, bw, sgi, mcs, nss;
+ u8 *rxd_msdu_payload;
u8 preamble = 0;
u8 group_id;
u32 info1, info2, info3;
u32 stbc, nsts_su;
- info1 = __le32_to_cpu(rxd->ppdu_start.info1);
- info2 = __le32_to_cpu(rxd->ppdu_start.info2);
- info3 = __le32_to_cpu(rxd->ppdu_start.info3);
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+ rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd);
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
+ rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd);
+
+ info1 = __le32_to_cpu(rxd_ppdu_start->info1);
+ info2 = __le32_to_cpu(rxd_ppdu_start->info2);
+ info3 = __le32_to_cpu(rxd_ppdu_start->info3);
preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
@@ -1022,24 +1061,24 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
if (mcs > 0x09) {
ath10k_warn(ar, "invalid MCS received %u\n", mcs);
ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
- __le32_to_cpu(rxd->attention.flags),
- __le32_to_cpu(rxd->mpdu_start.info0),
- __le32_to_cpu(rxd->mpdu_start.info1),
- __le32_to_cpu(rxd->msdu_start.common.info0),
- __le32_to_cpu(rxd->msdu_start.common.info1),
- rxd->ppdu_start.info0,
- __le32_to_cpu(rxd->ppdu_start.info1),
- __le32_to_cpu(rxd->ppdu_start.info2),
- __le32_to_cpu(rxd->ppdu_start.info3),
- __le32_to_cpu(rxd->ppdu_start.info4));
+ __le32_to_cpu(rxd_attention->flags),
+ __le32_to_cpu(rxd_mpdu_start->info0),
+ __le32_to_cpu(rxd_mpdu_start->info1),
+ __le32_to_cpu(rxd_msdu_start_common->info0),
+ __le32_to_cpu(rxd_msdu_start_common->info1),
+ rxd_ppdu_start->info0,
+ __le32_to_cpu(rxd_ppdu_start->info1),
+ __le32_to_cpu(rxd_ppdu_start->info2),
+ __le32_to_cpu(rxd_ppdu_start->info3),
+ __le32_to_cpu(rxd_ppdu_start->info4));
ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
- __le32_to_cpu(rxd->msdu_end.common.info0),
- __le32_to_cpu(rxd->mpdu_end.info0));
+ __le32_to_cpu(rxd_msdu_end_common->info0),
+ __le32_to_cpu(rxd_mpdu_end->info0));
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
"rx desc msdu payload: ",
- rxd->msdu_payload, 50);
+ rxd_msdu_payload, 50);
}
status->rate_idx = mcs;
@@ -1059,6 +1098,10 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
static struct ieee80211_channel *
ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_attention *rxd_attention;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ struct rx_mpdu_start *rxd_mpdu_start;
struct ath10k_peer *peer;
struct ath10k_vif *arvif;
struct cfg80211_chan_def def;
@@ -1069,15 +1112,19 @@ ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
if (!rxd)
return NULL;
- if (rxd->attention.flags &
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+
+ if (rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
return NULL;
- if (!(rxd->msdu_end.common.info0 &
+ if (!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
return NULL;
- peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0),
RX_MPDU_START_INFO0_PEER_IDX);
peer = ath10k_peer_find_by_id(ar, peer_id);
@@ -1167,14 +1214,16 @@ static void ath10k_htt_rx_h_signal(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
int i;
for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
status->chains &= ~BIT(i);
- if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
+ if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) {
status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
- rxd->ppdu_start.rssi_chains[i].pri20_mhz;
+ rxd_ppdu_start->rssi_chains[i].pri20_mhz;
status->chains |= BIT(i);
}
@@ -1182,7 +1231,7 @@ static void ath10k_htt_rx_h_signal(struct ath10k *ar,
/* FIXME: Get real NF */
status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
- rxd->ppdu_start.rssi_comb;
+ rxd_ppdu_start->rssi_comb;
status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
}
@@ -1190,13 +1239,18 @@ static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_ppdu_end_common *rxd_ppdu_end_common;
+
+ rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd);
+
/* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
* means all prior MSDUs in a PPDU are reported to mac80211 without the
* TSF. Is it worth holding frames until end of PPDU is known?
*
* FIXME: Can we get/compute 64bit TSF?
*/
- status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
+ status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp);
status->flag |= RX_FLAG_MACTIME_END;
}
@@ -1206,7 +1260,9 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
u32 vdev_id)
{
struct sk_buff *first;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
bool is_first_ppdu;
bool is_last_ppdu;
@@ -1214,11 +1270,14 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
return;
first = skb_peek(amsdu);
- rxd = (void *)first->data - sizeof(*rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
- is_first_ppdu = !!(rxd->attention.flags &
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+
+ is_first_ppdu = !!(rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
- is_last_ppdu = !!(rxd->attention.flags &
+ is_last_ppdu = !!(rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
if (is_first_ppdu) {
@@ -1357,7 +1416,9 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
const u8 first_hdr[64])
{
struct ieee80211_hdr *hdr;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
size_t hdr_len;
size_t crypto_len;
bool is_first;
@@ -1366,10 +1427,13 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
int bytes_aligned = ar->hw_params.decap_align_bytes;
u8 *qos;
- rxd = (void *)msdu->data - sizeof(*rxd);
- is_first = !!(rxd->msdu_end.common.info0 &
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ is_first = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
- is_last = !!(rxd->msdu_end.common.info0 &
+ is_last = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
/* Delivered decapped frame:
@@ -1387,7 +1451,7 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
* error packets. If limit exceeds, hw sends all remaining MSDUs as
* a single last MSDU with this msdu limit error set.
*/
- msdu_limit_err = ath10k_rx_desc_msdu_limit_error(&ar->hw_params, rxd);
+ msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd);
/* If MSDU limit error happens, then don't warn on, the partial raw MSDU
* without first MSDU is expected in that case, and handled later here.
@@ -1479,6 +1543,7 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
const u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct ieee80211_hdr *hdr;
struct htt_rx_desc *rxd;
size_t hdr_len;
@@ -1499,9 +1564,10 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
*/
/* pull decapped header and copy SA & DA */
- rxd = (void *)msdu->data - sizeof(*rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data -
+ hw->rx_desc_ops->rx_desc_size);
- l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
@@ -1537,18 +1603,25 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ieee80211_hdr *hdr;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ u8 *rxd_rx_hdr_status;
size_t hdr_len, crypto_len;
void *rfc1042;
bool is_first, is_last, is_amsdu;
int bytes_aligned = ar->hw_params.decap_align_bytes;
- rxd = (void *)msdu->data - sizeof(*rxd);
- hdr = (void *)rxd->rx_hdr_status;
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
+ hdr = (void *)rxd_rx_hdr_status;
- is_first = !!(rxd->msdu_end.common.info0 &
+ is_first = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
- is_last = !!(rxd->msdu_end.common.info0 &
+ is_last = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
is_amsdu = !(is_first && is_last);
@@ -1574,6 +1647,7 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
const u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct ieee80211_hdr *hdr;
struct ethhdr *eth;
size_t hdr_len;
@@ -1593,8 +1667,10 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
if (WARN_ON_ONCE(!rfc1042))
return;
- rxd = (void *)msdu->data - sizeof(*rxd);
- l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
skb_pull(msdu, l3_pad_bytes);
@@ -1635,6 +1711,7 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
const u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct ieee80211_hdr *hdr;
size_t hdr_len;
int l3_pad_bytes;
@@ -1647,8 +1724,10 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
* [payload]
*/
- rxd = (void *)msdu->data - sizeof(*rxd);
- l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
@@ -1673,7 +1752,9 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type enctype,
bool is_decrypted)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
enum rx_msdu_decap_format decap;
/* First msdu's decapped header:
@@ -1687,8 +1768,11 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
* [rfc1042/llc]
*/
- rxd = (void *)msdu->data - sizeof(*rxd);
- decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
switch (decap) {
@@ -1710,17 +1794,23 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
}
}
-static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
+static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
u32 flags, info;
bool is_ip4, is_ip6;
bool is_tcp, is_udp;
bool ip_csum_ok, tcpudp_csum_ok;
- rxd = (void *)skb->data - sizeof(*rxd);
- flags = __le32_to_cpu(rxd->attention.flags);
- info = __le32_to_cpu(rxd->msdu_start.common.info1);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)skb->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ flags = __le32_to_cpu(rxd_attention->flags);
+ info = __le32_to_cpu(rxd_msdu_start_common->info1);
is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
@@ -1741,9 +1831,10 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
return CHECKSUM_UNNECESSARY;
}
-static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
+static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw,
+ struct sk_buff *msdu)
{
- msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
+ msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu);
}
static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
@@ -1835,7 +1926,11 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
struct sk_buff *first;
struct sk_buff *last;
struct sk_buff *msdu, *temp;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
+ struct rx_mpdu_start *rxd_mpdu_start;
+
struct ieee80211_hdr *hdr;
enum htt_rx_mpdu_encrypt_type enctype;
u8 first_hdr[64];
@@ -1853,18 +1948,22 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
return;
first = skb_peek(amsdu);
- rxd = (void *)first->data - sizeof(*rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
- is_mgmt = !!(rxd->attention.flags &
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+
+ is_mgmt = !!(rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
/* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
* decapped header. It'll be used for undecapping of each MSDU.
*/
- hdr = (void *)rxd->rx_hdr_status;
+ hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
if (rx_hdr)
@@ -1882,8 +1981,11 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
/* Some attention flags are valid only in the last MSDU. */
last = skb_peek_tail(amsdu);
- rxd = (void *)last->data - sizeof(*rxd);
- attention = __le32_to_cpu(rxd->attention.flags);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)last->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ attention = __le32_to_cpu(rxd_attention->flags);
has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
@@ -1971,7 +2073,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
continue;
}
- ath10k_htt_rx_h_csum_offload(msdu);
+ ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu);
if (frag && !fill_crypt_header &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
@@ -2083,12 +2185,19 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
unsigned long *unchain_cnt)
{
struct sk_buff *first;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
+ struct rx_frag_info_common *rxd_frag_info;
enum rx_msdu_decap_format decap;
first = skb_peek(amsdu);
- rxd = (void *)first->data - sizeof(*rxd);
- decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd);
+ decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
/* FIXME: Current unchaining logic can only handle simple case of raw
@@ -2097,7 +2206,7 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
* try re-constructing such frames - it'll be pretty much garbage.
*/
if (decap != RX_MSDU_DECAP_RAW ||
- skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
+ skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) {
*drop_cnt += skb_queue_len(amsdu);
__skb_queue_purge(amsdu);
return;
@@ -2112,7 +2221,10 @@ static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
u8 *subframe_hdr;
struct sk_buff *first;
bool is_first, is_last;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ struct rx_mpdu_start *rxd_mpdu_start;
struct ieee80211_hdr *hdr;
size_t hdr_len, crypto_len;
enum htt_rx_mpdu_encrypt_type enctype;
@@ -2120,12 +2232,16 @@ static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
first = skb_peek(amsdu);
- rxd = (void *)first->data - sizeof(*rxd);
- hdr = (void *)rxd->rx_hdr_status;
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+ hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
- is_first = !!(rxd->msdu_end.common.info0 &
+ is_first = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
- is_last = !!(rxd->msdu_end.common.info0 &
+ is_last = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
/* Return in case of non-aggregated msdu */
@@ -2136,7 +2252,7 @@ static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
if (!is_first)
return false;
- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
hdr_len = ieee80211_hdrlen(hdr->frame_control);
@@ -3028,11 +3144,13 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
spin_unlock_bh(&ar->data_lock);
}
-static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
+static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw,
+ struct sk_buff_head *list,
struct sk_buff_head *amsdu)
{
struct sk_buff *msdu;
struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
if (skb_queue_empty(list))
return -ENOBUFS;
@@ -3043,15 +3161,22 @@ static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
while ((msdu = __skb_dequeue(list))) {
__skb_queue_tail(amsdu, msdu);
- rxd = (void *)msdu->data - sizeof(*rxd);
- if (rxd->msdu_end.common.info0 &
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data -
+ hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ if (rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
break;
}
msdu = skb_peek_tail(amsdu);
- rxd = (void *)msdu->data - sizeof(*rxd);
- if (!(rxd->msdu_end.common.info0 &
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ if (!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
skb_queue_splice_init(amsdu, list);
return -EAGAIN;
@@ -3194,7 +3319,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
while (!skb_queue_empty(&list)) {
__skb_queue_head_init(&amsdu);
- ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
+ ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu);
switch (ret) {
case 0:
/* Note: The in-order indication may report interleaved
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index b793eac2cfac..9842a4b2f78f 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -796,47 +796,26 @@ static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
return 0;
}
-static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring)
+static void ath10k_htt_fill_rx_desc_offset_32(struct ath10k_hw_params *hw, void *rx_ring)
{
struct htt_rx_ring_setup_ring32 *ring =
(struct htt_rx_ring_setup_ring32 *)rx_ring;
-#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
- ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
- ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
- ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
- ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
- ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
- ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
- ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
- ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
- ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
- ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
-#undef desc_offset
+ ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets);
}
-static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring)
+static void ath10k_htt_fill_rx_desc_offset_64(struct ath10k_hw_params *hw, void *rx_ring)
{
struct htt_rx_ring_setup_ring64 *ring =
(struct htt_rx_ring_setup_ring64 *)rx_ring;
-#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
- ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
- ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
- ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
- ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
- ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
- ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
- ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
- ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
- ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
- ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
-#undef desc_offset
+ ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets);
}
static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring32 *ring;
@@ -896,7 +875,7 @@ static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
ring->flags = __cpu_to_le16(flags);
ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
- ath10k_htt_fill_rx_desc_offset_32(ring);
+ ath10k_htt_fill_rx_desc_offset_32(hw, ring);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
@@ -909,6 +888,7 @@ static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring64 *ring;
@@ -965,7 +945,7 @@ static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
ring->flags = __cpu_to_le16(flags);
ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
- ath10k_htt_fill_rx_desc_offset_64(ring);
+ ath10k_htt_fill_rx_desc_offset_64(hw, ring);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 57c58af64a57..e52e41a70321 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -11,6 +11,7 @@
#include "hif.h"
#include "wmi-ops.h"
#include "bmi.h"
+#include "rx_desc.h"
const struct ath10k_hw_regs qca988x_regs = {
.rtc_soc_base_address = 0x00004000,
@@ -1134,21 +1135,7 @@ const struct ath10k_hw_ops qca988x_ops = {
.is_rssi_enable = ath10k_htt_tx_rssi_enable,
};
-static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
-{
- return MS(__le32_to_cpu(rxd->msdu_end.qca99x0.info1),
- RX_MSDU_END_INFO1_L3_HDR_PAD);
-}
-
-static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd)
-{
- return !!(rxd->msdu_end.common.info0 &
- __cpu_to_le32(RX_MSDU_END_INFO0_MSDU_LIMIT_ERR));
-}
-
const struct ath10k_hw_ops qca99x0_ops = {
- .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
- .rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error,
.is_rssi_enable = ath10k_htt_tx_rssi_enable,
};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 591ef7416b61..5215a6816d71 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -510,6 +510,8 @@ struct ath10k_hw_clk_params {
u32 outdiv;
};
+struct htt_rx_desc_ops;
+
struct ath10k_hw_params {
u32 id;
u16 dev_id;
@@ -562,6 +564,9 @@ struct ath10k_hw_params {
*/
bool sw_decrypt_mcast_mgmt;
+ /* Rx descriptor abstraction */
+ const struct ath10k_htt_rx_desc_ops *rx_desc_ops;
+
const struct ath10k_hw_ops *hw_ops;
/* Number of bytes used for alignment in rx_hdr_status of rx desc. */
@@ -630,16 +635,14 @@ struct ath10k_hw_params {
bool dynamic_sar_support;
};
-struct htt_rx_desc;
struct htt_resp;
struct htt_data_tx_completion_ext;
+struct htt_rx_ring_rx_desc_offsets;
/* Defines needed for Rx descriptor abstraction */
struct ath10k_hw_ops {
- int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
void (*set_coverage_class)(struct ath10k *ar, s16 value);
int (*enable_pll_clk)(struct ath10k *ar);
- bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd);
int (*tx_data_rssi_pad_bytes)(struct htt_resp *htt);
int (*is_rssi_enable)(struct htt_resp *resp);
};
@@ -653,24 +656,6 @@ extern const struct ath10k_hw_ops wcn3990_ops;
extern const struct ath10k_hw_clk_params qca6174_clk[];
static inline int
-ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
- struct htt_rx_desc *rxd)
-{
- if (hw->hw_ops->rx_desc_get_l3_pad_bytes)
- return hw->hw_ops->rx_desc_get_l3_pad_bytes(rxd);
- return 0;
-}
-
-static inline bool
-ath10k_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw,
- struct htt_rx_desc *rxd)
-{
- if (hw->hw_ops->rx_desc_get_msdu_limit_error)
- return hw->hw_ops->rx_desc_get_msdu_limit_error(rxd);
- return false;
-}
-
-static inline int
ath10k_tx_data_rssi_get_pad_bytes(struct ath10k_hw_params *hw,
struct htt_resp *htt)
{
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index 705b6295e466..6ce2a8b1060d 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -196,17 +196,31 @@ struct rx_attention {
* descriptor.
*/
-struct rx_frag_info {
+struct rx_frag_info_common {
u8 ring0_more_count;
u8 ring1_more_count;
u8 ring2_more_count;
u8 ring3_more_count;
+} __packed;
+
+struct rx_frag_info_wcn3990 {
u8 ring4_more_count;
u8 ring5_more_count;
u8 ring6_more_count;
u8 ring7_more_count;
} __packed;
+struct rx_frag_info {
+ struct rx_frag_info_common common;
+ union {
+ struct rx_frag_info_wcn3990 wcn3990;
+ } __packed;
+} __packed;
+
+struct rx_frag_info_v1 {
+ struct rx_frag_info_common common;
+} __packed;
+
/*
* ring0_more_count
* Indicates the number of more buffers associated with RX DMA
@@ -474,11 +488,17 @@ struct rx_msdu_start_wcn3990 {
struct rx_msdu_start {
struct rx_msdu_start_common common;
union {
- struct rx_msdu_start_qca99x0 qca99x0;
struct rx_msdu_start_wcn3990 wcn3990;
} __packed;
} __packed;
+struct rx_msdu_start_v1 {
+ struct rx_msdu_start_common common;
+ union {
+ struct rx_msdu_start_qca99x0 qca99x0;
+ } __packed;
+} __packed;
+
/*
* msdu_length
* MSDU length in bytes after decapsulation. This field is
@@ -612,11 +632,17 @@ struct rx_msdu_end_wcn3990 {
struct rx_msdu_end {
struct rx_msdu_end_common common;
union {
- struct rx_msdu_end_qca99x0 qca99x0;
struct rx_msdu_end_wcn3990 wcn3990;
} __packed;
} __packed;
+struct rx_msdu_end_v1 {
+ struct rx_msdu_end_common common;
+ union {
+ struct rx_msdu_end_qca99x0 qca99x0;
+ } __packed;
+} __packed;
+
/*
*ip_hdr_chksum
* This can include the IP header checksum or the pseudo header
@@ -1136,11 +1162,17 @@ struct rx_ppdu_end_wcn3990 {
struct rx_ppdu_end {
struct rx_ppdu_end_common common;
union {
+ struct rx_ppdu_end_wcn3990 wcn3990;
+ } __packed;
+} __packed;
+
+struct rx_ppdu_end_v1 {
+ struct rx_ppdu_end_common common;
+ union {
struct rx_ppdu_end_qca988x qca988x;
struct rx_ppdu_end_qca6174 qca6174;
struct rx_ppdu_end_qca99x0 qca99x0;
struct rx_ppdu_end_qca9984 qca9984;
- struct rx_ppdu_end_wcn3990 wcn3990;
} __packed;
} __packed;
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 9513ab696fff..8328966a0471 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -1306,13 +1306,10 @@ static int ath10k_snoc_resource_init(struct ath10k *ar)
}
for (i = 0; i < CE_COUNT; i++) {
- res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
- if (!res) {
- ath10k_err(ar, "failed to get IRQ%d\n", i);
- ret = -ENODEV;
- goto out;
- }
- ar_snoc->ce_irqs[i].irq_line = res->start;
+ ret = platform_get_irq(ar_snoc->dev, i);
+ if (ret < 0)
+ return ret;
+ ar_snoc->ce_irqs[i].irq_line = ret;
}
ret = device_property_read_u32(&pdev->dev, "qcom,xo-cal-data",
@@ -1323,10 +1320,8 @@ static int ath10k_snoc_resource_init(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_SNOC, "xo cal data %x\n",
ar_snoc->xo_cal_data);
}
- ret = 0;
-out:
- return ret;
+ return 0;
}
static void ath10k_snoc_quirks_init(struct ath10k *ar)
@@ -1556,11 +1551,11 @@ static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (node) {
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret) {
dev_err(dev, "failed to resolve msa fixed region\n");
return ret;
}
- of_node_put(node);
ar->msa.paddr = r.start;
ar->msa.mem_size = resource_size(&r);
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
index 25e0ad36ddb1..b4733b5ded34 100644
--- a/drivers/net/wireless/ath/ath10k/swap.h
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -17,7 +17,7 @@ struct ath10k_fw_file;
struct ath10k_swap_code_seg_tlv {
__le32 address;
__le32 length;
- u8 data[0];
+ u8 data[];
} __packed;
struct ath10k_swap_code_seg_tail {
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 6f8b64218894..10123974c3da 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -125,7 +125,7 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
tx_done->ack_rssi != ATH10K_INVALID_RSSI) {
info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
tx_done->ack_rssi;
- info->status.is_valid_ack_signal = true;
+ info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
ieee80211_tx_status(htt->ar->hw, msdu);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 62c453a21e49..cd438f76f284 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2427,7 +2427,7 @@ wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param)
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
param->ack_rssi;
- info->status.is_valid_ack_signal = true;
+ info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
ieee80211_tx_status_irqsafe(ar->hw, msdu);
@@ -2611,36 +2611,9 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_mac_handle_beacon(ar, skb);
if (ieee80211_is_beacon(hdr->frame_control) ||
- ieee80211_is_probe_resp(hdr->frame_control)) {
- struct ieee80211_mgmt *mgmt = (void *)skb->data;
- enum cfg80211_bss_frame_type ftype;
- u8 *ies;
- int ies_ch;
-
+ ieee80211_is_probe_resp(hdr->frame_control))
status->boottime_ns = ktime_get_boottime_ns();
- if (!ar->scan_channel)
- goto drop;
-
- ies = mgmt->u.beacon.variable;
-
- if (ieee80211_is_beacon(mgmt->frame_control))
- ftype = CFG80211_BSS_FTYPE_BEACON;
- else
- ftype = CFG80211_BSS_FTYPE_PRESP;
-
- ies_ch = cfg80211_get_ies_channel_number(mgmt->u.beacon.variable,
- skb_tail_pointer(skb) - ies,
- sband->band, ftype);
-
- if (ies_ch > 0 && ies_ch != channel) {
- ath10k_dbg(ar, ATH10K_DBG_MGMT,
- "channel mismatched ds channel %d scan channel %d\n",
- ies_ch, channel);
- goto drop;
- }
- }
-
ath10k_dbg(ar, ATH10K_DBG_MGMT,
"event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
skb, skb->len,
@@ -2654,10 +2627,6 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ieee80211_rx_ni(ar->hw, skb);
return 0;
-
-drop:
- dev_kfree_skb(skb);
- return 0;
}
static int freq_to_idx(struct ath10k *ar, int freq)
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 7d65c115669f..20b9aa8ddf7d 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -337,14 +337,15 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
if (patterns[i].mask[j / 8] & BIT(j % 8))
bitmask[j] = 0xff;
old_pattern.mask = bitmask;
- new_pattern = old_pattern;
if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
- if (patterns[i].pkt_offset < ETH_HLEN)
+ if (patterns[i].pkt_offset < ETH_HLEN) {
ath10k_wow_convert_8023_to_80211(&new_pattern,
&old_pattern);
- else
+ } else {
+ new_pattern = old_pattern;
new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
+ }
}
if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 3fb0aa000825..f407d4af2074 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -391,6 +391,8 @@ static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
for (j = 0; j < irq_grp->num_irq; j++)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+
+ netif_napi_del(&irq_grp->napi);
}
}
@@ -466,7 +468,7 @@ static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
+static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
{
struct ath11k_hw_params *hw = &ab->hw_params;
int i, j;
@@ -574,7 +576,7 @@ static int ath11k_ahb_config_irq(struct ath11k_base *ab)
}
/* Configure external interrupts */
- ret = ath11k_ahb_ext_irq_config(ab);
+ ret = ath11k_ahb_config_ext_irq(ab);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
index 8255b6cfab0c..9644ff909502 100644
--- a/drivers/net/wireless/ath/ath11k/ce.h
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -145,7 +145,7 @@ struct ath11k_ce_ring {
u32 hal_ring_id;
/* keep last */
- struct sk_buff *skb[0];
+ struct sk_buff *skb[];
};
struct ath11k_ce_pipe {
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 293563b3f784..71eb7d04c3bf 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -97,6 +98,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.wakeup_mhi = false,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -161,6 +164,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.wakeup_mhi = false,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
},
{
.name = "qca6390 hw2.0",
@@ -224,6 +229,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.wakeup_mhi = true,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
},
{
.name = "qcn9074 hw1.0",
@@ -287,6 +294,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.wakeup_mhi = false,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
},
{
.name = "wcn6855 hw2.0",
@@ -350,6 +359,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.wakeup_mhi = true,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
},
{
.name = "wcn6855 hw2.1",
@@ -412,6 +423,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.wakeup_mhi = true,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
},
};
@@ -1404,6 +1417,8 @@ EXPORT_SYMBOL(ath11k_core_deinit);
void ath11k_core_free(struct ath11k_base *ab)
{
+ destroy_workqueue(ab->workqueue);
+
kfree(ab);
}
EXPORT_SYMBOL(ath11k_core_free);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 9e88ccca5ca7..c0228e91a596 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -263,6 +263,9 @@ struct ath11k_vif {
bool bcca_zero_sent;
bool do_not_send_tmpl;
struct ieee80211_chanctx_conf chanctx;
+#ifdef CONFIG_ATH11K_DEBUGFS
+ struct dentry *debugfs_twt;
+#endif /* CONFIG_ATH11K_DEBUGFS */
};
struct ath11k_vif_iter {
@@ -441,6 +444,8 @@ struct ath11k_dbg_htt_stats {
spinlock_t lock;
};
+#define MAX_MODULE_ID_BITMAP_WORDS 16
+
struct ath11k_debug {
struct dentry *debugfs_pdev;
struct ath11k_dbg_htt_stats htt_stats;
@@ -454,6 +459,9 @@ struct ath11k_debug {
u32 pktlog_peer_valid;
u8 pktlog_peer_addr[ETH_ALEN];
u32 rx_filter;
+ u32 mem_offset;
+ u32 module_id_bitmap[MAX_MODULE_ID_BITMAP_WORDS];
+ struct ath11k_debug_dbr *dbr_debug[WMI_DIRECT_BUF_MAX];
};
struct ath11k_per_peer_tx_stats {
@@ -603,6 +611,8 @@ struct ath11k {
struct completion finish_11d_ch_list;
bool pending_11d;
bool regdom_set_by_user;
+ int hw_rate_code;
+ u8 twt_enabled;
};
struct ath11k_band_cap {
@@ -806,7 +816,7 @@ struct ath11k_base {
} id;
/* must be last */
- u8 drv_priv[0] __aligned(sizeof(void *));
+ u8 drv_priv[] __aligned(sizeof(void *));
};
struct ath11k_fw_stats_pdev {
diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
index eda67ebfc4c2..2107ec05d14f 100644
--- a/drivers/net/wireless/ath/ath11k/dbring.c
+++ b/drivers/net/wireless/ath/ath11k/dbring.c
@@ -37,7 +37,8 @@ static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
struct ath11k_dbring *ring,
- struct ath11k_dbring_element *buff)
+ struct ath11k_dbring_element *buff,
+ enum wmi_direct_buffer_module id)
{
struct ath11k_base *ab = ar->ab;
struct hal_srng *srng;
@@ -84,6 +85,7 @@ static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
+ ath11k_debugfs_add_dbring_entry(ar, id, ATH11K_DBG_DBR_EVENT_REPLENISH, srng);
ath11k_hal_srng_access_end(ab, srng);
return 0;
@@ -101,7 +103,8 @@ err:
}
static int ath11k_dbring_fill_bufs(struct ath11k *ar,
- struct ath11k_dbring *ring)
+ struct ath11k_dbring *ring,
+ enum wmi_direct_buffer_module id)
{
struct ath11k_dbring_element *buff;
struct hal_srng *srng;
@@ -129,7 +132,7 @@ static int ath11k_dbring_fill_bufs(struct ath11k *ar,
kfree(buff);
break;
}
- ret = ath11k_dbring_bufs_replenish(ar, ring, buff);
+ ret = ath11k_dbring_bufs_replenish(ar, ring, buff, id);
if (ret) {
ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
num_remain, req_entries);
@@ -210,7 +213,7 @@ int ath11k_dbring_buf_setup(struct ath11k *ar,
ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
- ret = ath11k_dbring_fill_bufs(ar, ring);
+ ret = ath11k_dbring_fill_bufs(ar, ring, db_cap->id);
return ret;
}
@@ -270,7 +273,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
struct ath11k_buffer_addr desc;
u8 *vaddr_unalign;
u32 num_entry, num_buff_reaped;
- u8 pdev_idx, rbm;
+ u8 pdev_idx, rbm, module_id;
u32 cookie;
int buf_id;
int size;
@@ -278,6 +281,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
int ret = 0;
pdev_idx = ev->fixed.pdev_id;
+ module_id = ev->fixed.module_id;
if (pdev_idx >= ab->num_radios) {
ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
@@ -346,6 +350,9 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
DMA_FROM_DEVICE);
+ ath11k_debugfs_add_dbring_entry(ar, module_id,
+ ATH11K_DBG_DBR_EVENT_RX, srng);
+
if (ring->handler) {
vaddr_unalign = buff->payload;
handler_data.data = PTR_ALIGN(vaddr_unalign,
@@ -357,7 +364,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
buff->paddr = 0;
memset(buff->payload, 0, size);
- ath11k_dbring_bufs_replenish(ar, ring, buff);
+ ath11k_dbring_bufs_replenish(ar, ring, buff, module_id);
}
spin_unlock_bh(&srng->lock);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 198ade90b725..a82266c8befc 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -52,6 +52,45 @@ static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = {
"MONITOR_DEST_RING",
};
+void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ enum wmi_direct_buffer_module id,
+ enum ath11k_dbg_dbr_event event,
+ struct hal_srng *srng)
+{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_data;
+ struct ath11k_dbg_dbr_entry *entry;
+
+ if (id >= WMI_DIRECT_BUF_MAX || event >= ATH11K_DBG_DBR_EVENT_MAX)
+ return;
+
+ dbr_debug = ar->debug.dbr_debug[id];
+ if (!dbr_debug)
+ return;
+
+ if (!dbr_debug->dbr_debug_enabled)
+ return;
+
+ dbr_data = &dbr_debug->dbr_dbg_data;
+
+ spin_lock_bh(&dbr_data->lock);
+
+ if (dbr_data->entries) {
+ entry = &dbr_data->entries[dbr_data->dbr_debug_idx];
+ entry->hp = srng->u.src_ring.hp;
+ entry->tp = *srng->u.src_ring.tp_addr;
+ entry->timestamp = jiffies;
+ entry->event = event;
+
+ dbr_data->dbr_debug_idx++;
+ if (dbr_data->dbr_debug_idx ==
+ dbr_data->num_ring_debug_entries)
+ dbr_data->dbr_debug_idx = 0;
+ }
+
+ spin_unlock_bh(&dbr_data->lock);
+}
+
static void ath11k_fw_stats_pdevs_free(struct list_head *head)
{
struct ath11k_fw_stats_pdev *i, *tmp;
@@ -666,6 +705,12 @@ static ssize_t ath11k_write_extd_rx_stats(struct file *file,
goto exit;
}
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
+ ar->debug.extd_rx_stats = enable;
+ ret = count;
+ goto exit;
+ }
+
if (enable) {
rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
@@ -870,6 +915,69 @@ static const struct file_operations fops_soc_dp_stats = {
.llseek = default_llseek,
};
+static ssize_t ath11k_write_fw_dbglog(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[128] = {0};
+ struct ath11k_fw_dbglog dbglog;
+ unsigned int param, mod_id_index, is_end;
+ u64 value;
+ int ret, num;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ num = sscanf(buf, "%u %llx %u %u", &param, &value, &mod_id_index, &is_end);
+
+ if (num < 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (param == WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP ||
+ param == WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP) {
+ if (num != 4 || mod_id_index > (MAX_MODULE_ID_BITMAP_WORDS - 1)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ar->debug.module_id_bitmap[mod_id_index] = upper_32_bits(value);
+ if (!is_end) {
+ ret = count;
+ goto out;
+ }
+ } else {
+ if (num != 2) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ dbglog.param = param;
+ dbglog.value = lower_32_bits(value);
+ ret = ath11k_wmi_fw_dbglog_cfg(ar, ar->debug.module_id_bitmap, &dbglog);
+ if (ret) {
+ ath11k_warn(ar->ab, "fw dbglog config failed from debugfs: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_fw_dbglog = {
+ .write = ath11k_write_fw_dbglog,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
{
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
@@ -1107,6 +1215,169 @@ static const struct file_operations fops_simulate_radar = {
.open = simple_open
};
+static ssize_t ath11k_debug_dump_dbr_entries(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_dbg_dbr_data *dbr_dbg_data = file->private_data;
+ static const char * const event_id_to_string[] = {"empty", "Rx", "Replenish"};
+ int size = ATH11K_DEBUG_DBR_ENTRIES_MAX * 100;
+ char *buf;
+ int i, ret;
+ int len = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len,
+ "-----------------------------------------\n");
+ len += scnprintf(buf + len, size - len,
+ "| idx | hp | tp | timestamp | event |\n");
+ len += scnprintf(buf + len, size - len,
+ "-----------------------------------------\n");
+
+ spin_lock_bh(&dbr_dbg_data->lock);
+
+ for (i = 0; i < dbr_dbg_data->num_ring_debug_entries; i++) {
+ len += scnprintf(buf + len, size - len,
+ "|%4u|%8u|%8u|%11llu|%8s|\n", i,
+ dbr_dbg_data->entries[i].hp,
+ dbr_dbg_data->entries[i].tp,
+ dbr_dbg_data->entries[i].timestamp,
+ event_id_to_string[dbr_dbg_data->entries[i].event]);
+ }
+
+ spin_unlock_bh(&dbr_dbg_data->lock);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations fops_debug_dump_dbr_entries = {
+ .read = ath11k_debug_dump_dbr_entries,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath11k_debugfs_dbr_dbg_destroy(struct ath11k *ar, int dbr_id)
+{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_dbg_data;
+
+ if (!ar->debug.dbr_debug[dbr_id])
+ return;
+
+ dbr_debug = ar->debug.dbr_debug[dbr_id];
+ dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+
+ debugfs_remove_recursive(dbr_debug->dbr_debugfs);
+ kfree(dbr_dbg_data->entries);
+ kfree(dbr_debug);
+ ar->debug.dbr_debug[dbr_id] = NULL;
+}
+
+static int ath11k_debugfs_dbr_dbg_init(struct ath11k *ar, int dbr_id)
+{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_dbg_data;
+ static const char * const dbr_id_to_str[] = {"spectral", "CFR"};
+
+ if (ar->debug.dbr_debug[dbr_id])
+ return 0;
+
+ ar->debug.dbr_debug[dbr_id] = kzalloc(sizeof(*dbr_debug),
+ GFP_KERNEL);
+
+ if (!ar->debug.dbr_debug[dbr_id])
+ return -ENOMEM;
+
+ dbr_debug = ar->debug.dbr_debug[dbr_id];
+ dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+
+ if (dbr_debug->dbr_debugfs)
+ return 0;
+
+ dbr_debug->dbr_debugfs = debugfs_create_dir(dbr_id_to_str[dbr_id],
+ ar->debug.debugfs_pdev);
+ if (IS_ERR_OR_NULL(dbr_debug->dbr_debugfs)) {
+ if (IS_ERR(dbr_debug->dbr_debugfs))
+ return PTR_ERR(dbr_debug->dbr_debugfs);
+ return -ENOMEM;
+ }
+
+ dbr_debug->dbr_debug_enabled = true;
+ dbr_dbg_data->num_ring_debug_entries = ATH11K_DEBUG_DBR_ENTRIES_MAX;
+ dbr_dbg_data->dbr_debug_idx = 0;
+ dbr_dbg_data->entries = kcalloc(ATH11K_DEBUG_DBR_ENTRIES_MAX,
+ sizeof(struct ath11k_dbg_dbr_entry),
+ GFP_KERNEL);
+ if (!dbr_dbg_data->entries)
+ return -ENOMEM;
+
+ spin_lock_init(&dbr_dbg_data->lock);
+
+ debugfs_create_file("dump_dbr_debug", 0444, dbr_debug->dbr_debugfs,
+ dbr_dbg_data, &fops_debug_dump_dbr_entries);
+
+ return 0;
+}
+
+static ssize_t ath11k_debugfs_write_enable_dbr_dbg(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32] = {0};
+ u32 dbr_id, enable;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ goto out;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%u %u", &dbr_id, &enable);
+ if (ret != 2 || dbr_id > 1 || enable > 1) {
+ ret = -EINVAL;
+ ath11k_warn(ar->ab, "usage: echo <dbr_id> <val> dbr_id:0-Spectral 1-CFR val:0-disable 1-enable\n");
+ goto out;
+ }
+
+ if (enable) {
+ ret = ath11k_debugfs_dbr_dbg_init(ar, dbr_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "db ring module debugfs init failed: %d\n",
+ ret);
+ goto out;
+ }
+ } else {
+ ath11k_debugfs_dbr_dbg_destroy(ar, dbr_id);
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_dbr_debug = {
+ .write = ath11k_debugfs_write_enable_dbr_dbg,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath11k_debugfs_register(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
@@ -1136,6 +1407,9 @@ int ath11k_debugfs_register(struct ath11k *ar)
debugfs_create_file("pktlog_filter", 0644,
ar->debug.debugfs_pdev, ar,
&fops_pktlog_filter);
+ debugfs_create_file("fw_dbglog_config", 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_fw_dbglog);
if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) {
debugfs_create_file("dfs_simulate_radar", 0200,
@@ -1146,9 +1420,250 @@ int ath11k_debugfs_register(struct ath11k *ar)
&ar->dfs_block_radar_events);
}
+ if (ab->hw_params.dbr_debug_support)
+ debugfs_create_file("enable_dbr_debug", 0200, ar->debug.debugfs_pdev,
+ ar, &fops_dbr_debug);
+
return 0;
}
void ath11k_debugfs_unregister(struct ath11k *ar)
{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_dbg_data;
+ int i;
+
+ for (i = 0; i < WMI_DIRECT_BUF_MAX; i++) {
+ dbr_debug = ar->debug.dbr_debug[i];
+ if (!dbr_debug)
+ continue;
+
+ dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+ kfree(dbr_dbg_data->entries);
+ debugfs_remove_recursive(dbr_debug->dbr_debugfs);
+ kfree(dbr_debug);
+ ar->debug.dbr_debug[i] = NULL;
+ }
+}
+
+static ssize_t ath11k_write_twt_add_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_add_dialog_params params = { 0 };
+ u8 buf[128] = {0};
+ int ret;
+
+ if (arvif->ar->twt_enabled == 0) {
+ ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf,
+ "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u %u %u %hhu %hhu %hhu %hhu %hhu",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id,
+ &params.wake_intvl_us,
+ &params.wake_intvl_mantis,
+ &params.wake_dura_us,
+ &params.sp_offset_us,
+ &params.twt_cmd,
+ &params.flag_bcast,
+ &params.flag_trigger,
+ &params.flag_flow_type,
+ &params.flag_protection);
+ if (ret != 16)
+ return -EINVAL;
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_add_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t ath11k_write_twt_del_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_del_dialog_params params = { 0 };
+ u8 buf[64] = {0};
+ int ret;
+
+ if (arvif->ar->twt_enabled == 0) {
+ ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id);
+ if (ret != 7)
+ return -EINVAL;
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_del_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t ath11k_write_twt_pause_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_pause_dialog_params params = { 0 };
+ u8 buf[64] = {0};
+ int ret;
+
+ if (arvif->ar->twt_enabled == 0) {
+ ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id);
+ if (ret != 7)
+ return -EINVAL;
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_pause_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t ath11k_write_twt_resume_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_resume_dialog_params params = { 0 };
+ u8 buf[64] = {0};
+ int ret;
+
+ if (arvif->ar->twt_enabled == 0) {
+ ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id,
+ &params.sp_offset_us,
+ &params.next_twt_size);
+ if (ret != 9)
+ return -EINVAL;
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_resume_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations ath11k_fops_twt_add_dialog = {
+ .write = ath11k_write_twt_add_dialog,
+ .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_del_dialog = {
+ .write = ath11k_write_twt_del_dialog,
+ .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_pause_dialog = {
+ .write = ath11k_write_twt_pause_dialog,
+ .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_resume_dialog = {
+ .write = ath11k_write_twt_resume_dialog,
+ .open = simple_open
+};
+
+int ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+{
+ if (arvif->vif->type == NL80211_IFTYPE_AP && !arvif->debugfs_twt) {
+ arvif->debugfs_twt = debugfs_create_dir("twt",
+ arvif->vif->debugfs_dir);
+ if (!arvif->debugfs_twt || IS_ERR(arvif->debugfs_twt)) {
+ ath11k_warn(arvif->ar->ab,
+ "failed to create directory %p\n",
+ arvif->debugfs_twt);
+ arvif->debugfs_twt = NULL;
+ return -1;
+ }
+
+ debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_add_dialog);
+
+ debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_del_dialog);
+
+ debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_pause_dialog);
+
+ debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_resume_dialog);
+ }
+ return 0;
+}
+
+void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
+{
+ debugfs_remove_recursive(arvif->debugfs_twt);
+ arvif->debugfs_twt = NULL;
}
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
index 4c0740394c95..30c00cb28311 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -47,6 +47,36 @@ enum ath11k_dbg_htt_ext_stats_type {
ATH11K_DBG_HTT_NUM_EXT_STATS,
};
+#define ATH11K_DEBUG_DBR_ENTRIES_MAX 512
+
+enum ath11k_dbg_dbr_event {
+ ATH11K_DBG_DBR_EVENT_INVALID,
+ ATH11K_DBG_DBR_EVENT_RX,
+ ATH11K_DBG_DBR_EVENT_REPLENISH,
+ ATH11K_DBG_DBR_EVENT_MAX,
+};
+
+struct ath11k_dbg_dbr_entry {
+ u32 hp;
+ u32 tp;
+ u64 timestamp;
+ enum ath11k_dbg_dbr_event event;
+};
+
+struct ath11k_dbg_dbr_data {
+ /* protects ath11k_db_ring_debug data */
+ spinlock_t lock;
+ struct ath11k_dbg_dbr_entry *entries;
+ u32 dbr_debug_idx;
+ u32 num_ring_debug_entries;
+};
+
+struct ath11k_debug_dbr {
+ struct ath11k_dbg_dbr_data dbr_dbg_data;
+ struct dentry *dbr_debugfs;
+ bool dbr_debug_enabled;
+};
+
struct debug_htt_stats_req {
bool done;
u8 pdev_id;
@@ -88,6 +118,7 @@ enum ath11k_pktlog_mode {
};
enum ath11k_pktlog_enum {
+ ATH11K_PKTLOG_TYPE_INVALID = 0,
ATH11K_PKTLOG_TYPE_TX_CTRL = 1,
ATH11K_PKTLOG_TYPE_TX_STAT = 2,
ATH11K_PKTLOG_TYPE_TX_MSDU_ID = 3,
@@ -107,6 +138,130 @@ enum ath11k_dbg_aggr_mode {
ATH11K_DBG_AGGR_MODE_MAX,
};
+enum fw_dbglog_wlan_module_id {
+ WLAN_MODULE_ID_MIN = 0,
+ WLAN_MODULE_INF = WLAN_MODULE_ID_MIN,
+ WLAN_MODULE_WMI,
+ WLAN_MODULE_STA_PWRSAVE,
+ WLAN_MODULE_WHAL,
+ WLAN_MODULE_COEX,
+ WLAN_MODULE_ROAM,
+ WLAN_MODULE_RESMGR_CHAN_MANAGER,
+ WLAN_MODULE_RESMGR,
+ WLAN_MODULE_VDEV_MGR,
+ WLAN_MODULE_SCAN,
+ WLAN_MODULE_RATECTRL,
+ WLAN_MODULE_AP_PWRSAVE,
+ WLAN_MODULE_BLOCKACK,
+ WLAN_MODULE_MGMT_TXRX,
+ WLAN_MODULE_DATA_TXRX,
+ WLAN_MODULE_HTT,
+ WLAN_MODULE_HOST,
+ WLAN_MODULE_BEACON,
+ WLAN_MODULE_OFFLOAD,
+ WLAN_MODULE_WAL,
+ WLAN_WAL_MODULE_DE,
+ WLAN_MODULE_PCIELP,
+ WLAN_MODULE_RTT,
+ WLAN_MODULE_RESOURCE,
+ WLAN_MODULE_DCS,
+ WLAN_MODULE_CACHEMGR,
+ WLAN_MODULE_ANI,
+ WLAN_MODULE_P2P,
+ WLAN_MODULE_CSA,
+ WLAN_MODULE_NLO,
+ WLAN_MODULE_CHATTER,
+ WLAN_MODULE_WOW,
+ WLAN_MODULE_WAL_VDEV,
+ WLAN_MODULE_WAL_PDEV,
+ WLAN_MODULE_TEST,
+ WLAN_MODULE_STA_SMPS,
+ WLAN_MODULE_SWBMISS,
+ WLAN_MODULE_WMMAC,
+ WLAN_MODULE_TDLS,
+ WLAN_MODULE_HB,
+ WLAN_MODULE_TXBF,
+ WLAN_MODULE_BATCH_SCAN,
+ WLAN_MODULE_THERMAL_MGR,
+ WLAN_MODULE_PHYERR_DFS,
+ WLAN_MODULE_RMC,
+ WLAN_MODULE_STATS,
+ WLAN_MODULE_NAN,
+ WLAN_MODULE_IBSS_PWRSAVE,
+ WLAN_MODULE_HIF_UART,
+ WLAN_MODULE_LPI,
+ WLAN_MODULE_EXTSCAN,
+ WLAN_MODULE_UNIT_TEST,
+ WLAN_MODULE_MLME,
+ WLAN_MODULE_SUPPL,
+ WLAN_MODULE_ERE,
+ WLAN_MODULE_OCB,
+ WLAN_MODULE_RSSI_MONITOR,
+ WLAN_MODULE_WPM,
+ WLAN_MODULE_CSS,
+ WLAN_MODULE_PPS,
+ WLAN_MODULE_SCAN_CH_PREDICT,
+ WLAN_MODULE_MAWC,
+ WLAN_MODULE_CMC_QMIC,
+ WLAN_MODULE_EGAP,
+ WLAN_MODULE_NAN20,
+ WLAN_MODULE_QBOOST,
+ WLAN_MODULE_P2P_LISTEN_OFFLOAD,
+ WLAN_MODULE_HALPHY,
+ WLAN_WAL_MODULE_ENQ,
+ WLAN_MODULE_GNSS,
+ WLAN_MODULE_WAL_MEM,
+ WLAN_MODULE_SCHED_ALGO,
+ WLAN_MODULE_TX,
+ WLAN_MODULE_RX,
+ WLAN_MODULE_WLM,
+ WLAN_MODULE_RU_ALLOCATOR,
+ WLAN_MODULE_11K_OFFLOAD,
+ WLAN_MODULE_STA_TWT,
+ WLAN_MODULE_AP_TWT,
+ WLAN_MODULE_UL_OFDMA,
+ WLAN_MODULE_HPCS_PULSE,
+ WLAN_MODULE_DTF,
+ WLAN_MODULE_QUIET_IE,
+ WLAN_MODULE_SHMEM_MGR,
+ WLAN_MODULE_CFIR,
+ WLAN_MODULE_CODE_COVER,
+ WLAN_MODULE_SHO,
+ WLAN_MODULE_MLO_MGR,
+ WLAN_MODULE_PEER_INIT,
+ WLAN_MODULE_STA_MLO_PS,
+
+ WLAN_MODULE_ID_MAX,
+ WLAN_MODULE_ID_INVALID = WLAN_MODULE_ID_MAX,
+};
+
+enum fw_dbglog_log_level {
+ ATH11K_FW_DBGLOG_ML = 0,
+ ATH11K_FW_DBGLOG_VERBOSE = 0,
+ ATH11K_FW_DBGLOG_INFO,
+ ATH11K_FW_DBGLOG_INFO_LVL_1,
+ ATH11K_FW_DBGLOG_INFO_LVL_2,
+ ATH11K_FW_DBGLOG_WARN,
+ ATH11K_FW_DBGLOG_ERR,
+ ATH11K_FW_DBGLOG_LVL_MAX
+};
+
+struct ath11k_fw_dbglog {
+ enum wmi_debug_log_param param;
+ union {
+ struct {
+ /* log_level values are given in enum fw_dbglog_log_level */
+ u16 log_level;
+ /* module_id values are given in enum fw_dbglog_wlan_module_id */
+ u16 module_id;
+ };
+ /* value is either log_level&module_id/vdev_id/vdev_id_bitmap/log_level
+ * according to param
+ */
+ u32 value;
+ };
+};
+
#ifdef CONFIG_ATH11K_DEBUGFS
int ath11k_debugfs_soc_create(struct ath11k_base *ab);
void ath11k_debugfs_soc_destroy(struct ath11k_base *ab);
@@ -151,6 +306,13 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
return ar->debug.rx_filter;
}
+int ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
+void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif);
+void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ enum wmi_direct_buffer_module id,
+ enum ath11k_dbg_dbr_event event,
+ struct hal_srng *srng);
+
#else
static inline int ath11k_debugfs_soc_create(struct ath11k_base *ab)
{
@@ -224,6 +386,22 @@ static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
return 0;
}
-#endif /* CONFIG_MAC80211_DEBUGFS*/
+static inline int ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+{
+ return 0;
+}
+
+static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
+{
+}
+
+static inline void
+ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ enum wmi_direct_buffer_module id,
+ enum ath11k_dbg_dbr_event event,
+ struct hal_srng *srng)
+{
+}
+#endif /* CONFIG_ATH11K_DEBUGFS*/
#endif /* _ATH11K_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 409d6cc5a1d5..e9dfa209098b 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -115,6 +115,8 @@ struct ath11k_pdev_mon_stats {
u32 dest_mpdu_drop;
u32 dup_mon_linkdesc_cnt;
u32 dup_mon_buf_cnt;
+ u32 dest_mon_stuck;
+ u32 dest_mon_not_reaped;
};
struct dp_full_mon_mpdu {
@@ -167,6 +169,7 @@ struct ath11k_mon_data {
struct ath11k_pdev_dp {
u32 mac_id;
+ u32 mon_dest_ring_stuck_cnt;
atomic_t num_tx_pending;
wait_queue_head_t tx_empty_waitq;
struct dp_rxdma_ring rx_refill_buf_ring;
@@ -1170,12 +1173,12 @@ struct ath11k_htt_ppdu_stats_msg {
u32 ppdu_id;
u32 timestamp;
u32 rsvd;
- u8 data[0];
+ u8 data[];
} __packed;
struct htt_tlv {
u32 header;
- u8 value[0];
+ u8 value[];
} __packed;
#define HTT_TLV_TAG GENMASK(11, 0)
@@ -1362,7 +1365,7 @@ struct htt_ppdu_stats_usr_cmn_array {
* tx_ppdu_stats_info is variable length, with length =
* number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info)
*/
- struct htt_tx_ppdu_stats_info tx_ppdu_info[0];
+ struct htt_tx_ppdu_stats_info tx_ppdu_info[];
} __packed;
struct htt_ppdu_user_stats {
@@ -1424,7 +1427,7 @@ struct htt_ppdu_stats_info {
*/
struct htt_pktlog_msg {
u32 hdr;
- u8 payload[0];
+ u8 payload[];
};
/**
@@ -1645,7 +1648,7 @@ struct ath11k_htt_extd_stats_msg {
u32 info0;
u64 cookie;
u32 info1;
- u8 data[0];
+ u8 data[];
} __packed;
#define HTT_MAC_ADDR_L32_0 GENMASK(7, 0)
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index c212a789421e..049774cc158c 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -43,6 +43,13 @@ static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
}
static inline
+bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc);
+}
+
+static inline
u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
@@ -2313,7 +2320,7 @@ static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
u8 bw;
u8 rate_mcs, nss;
u8 sgi;
- bool is_cck;
+ bool is_cck, is_ldpc;
pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
@@ -2355,6 +2362,9 @@ static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
if (sgi)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc);
+ if (is_ldpc)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
break;
case RX_MSDU_START_PKT_TYPE_11AX:
rx_status->rate_idx = rate_mcs;
@@ -2642,9 +2652,9 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
spin_lock_bh(&srng->lock);
+try_again:
ath11k_hal_srng_access_begin(ab, srng);
-try_again:
while (likely(desc =
(struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
srng))) {
@@ -3080,79 +3090,6 @@ move_next:
return num_buffs_reaped;
}
-int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
- struct napi_struct *napi, int budget)
-{
- struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
- enum hal_rx_mon_status hal_status;
- struct sk_buff *skb;
- struct sk_buff_head skb_list;
- struct hal_rx_mon_ppdu_info ppdu_info;
- struct ath11k_peer *peer;
- struct ath11k_sta *arsta;
- int num_buffs_reaped = 0;
- u32 rx_buf_sz;
- u16 log_type = 0;
-
- __skb_queue_head_init(&skb_list);
-
- num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
- &skb_list);
- if (!num_buffs_reaped)
- goto exit;
-
- memset(&ppdu_info, 0, sizeof(ppdu_info));
- ppdu_info.peer_id = HAL_INVALID_PEERID;
-
- while ((skb = __skb_dequeue(&skb_list))) {
- if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
- log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
- rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
- } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
- log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
- rx_buf_sz = DP_RX_BUFFER_SIZE;
- }
-
- if (log_type)
- trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
-
- hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
-
- if (ppdu_info.peer_id == HAL_INVALID_PEERID ||
- hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
- dev_kfree_skb_any(skb);
- continue;
- }
-
- rcu_read_lock();
- spin_lock_bh(&ab->base_lock);
- peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id);
-
- if (!peer || !peer->sta) {
- ath11k_dbg(ab, ATH11K_DBG_DATA,
- "failed to find the peer with peer_id %d\n",
- ppdu_info.peer_id);
- goto next_skb;
- }
-
- arsta = (struct ath11k_sta *)peer->sta->drv_priv;
- ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
-
- if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
- trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
-
-next_skb:
- spin_unlock_bh(&ab->base_lock);
- rcu_read_unlock();
-
- dev_kfree_skb_any(skb);
- memset(&ppdu_info, 0, sizeof(ppdu_info));
- ppdu_info.peer_id = HAL_INVALID_PEERID;
- }
-exit:
- return num_buffs_reaped;
-}
-
static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
{
struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
@@ -4870,7 +4807,6 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
{
struct ath11k_base *ab = ar->ab;
struct sk_buff *msdu, *prev_buf;
- u32 wifi_hdr_len;
struct hal_rx_desc *rx_desc;
char *hdr_desc;
u8 *dest, decap_format;
@@ -4912,38 +4848,27 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
- __le16 qos_field;
u8 qos_pkt = 0;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
/* Base size */
- wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
wh = (struct ieee80211_hdr_3addr *)hdr_desc;
- if (ieee80211_is_data_qos(wh->frame_control)) {
- struct ieee80211_qos_hdr *qwh =
- (struct ieee80211_qos_hdr *)hdr_desc;
-
- qos_field = qwh->qos_ctrl;
+ if (ieee80211_is_data_qos(wh->frame_control))
qos_pkt = 1;
- }
+
msdu = head_msdu;
while (msdu) {
- rx_desc = (struct hal_rx_desc *)msdu->data;
- hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
-
+ ath11k_dp_rx_msdus_set_payload(ar, msdu);
if (qos_pkt) {
dest = skb_push(msdu, sizeof(__le16));
if (!dest)
goto err_merge_fail;
- memcpy(dest, hdr_desc, wifi_hdr_len);
- memcpy(dest + wifi_hdr_len,
- (u8 *)&qos_field, sizeof(__le16));
+ memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
}
- ath11k_dp_rx_msdus_set_payload(ar, msdu);
prev_buf = msdu;
msdu = msdu->next;
}
@@ -4967,8 +4892,98 @@ err_merge_fail:
return NULL;
}
+static void
+ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
+ u8 *rtap_buf)
+{
+ u32 rtap_len = 0;
+
+ put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
+}
+
+static void
+ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
+ u8 *rtap_buf)
+{
+ u32 rtap_len = 0;
+
+ put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[0];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[1];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[2];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[3];
+}
+
+static void ath11k_update_radiotap(struct ath11k *ar,
+ struct hal_rx_mon_ppdu_info *ppduinfo,
+ struct sk_buff *mon_skb,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ieee80211_supported_band *sband;
+ u8 *ptr = NULL;
+
+ rxs->flag |= RX_FLAG_MACTIME_START;
+ rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
+
+ if (ppduinfo->nss)
+ rxs->nss = ppduinfo->nss;
+
+ if (ppduinfo->he_mu_flags) {
+ rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ rxs->encoding = RX_ENC_HE;
+ ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
+ ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);
+ } else if (ppduinfo->he_flags) {
+ rxs->flag |= RX_FLAG_RADIOTAP_HE;
+ rxs->encoding = RX_ENC_HE;
+ ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
+ ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);
+ rxs->rate_idx = ppduinfo->rate;
+ } else if (ppduinfo->vht_flags) {
+ rxs->encoding = RX_ENC_VHT;
+ rxs->rate_idx = ppduinfo->rate;
+ } else if (ppduinfo->ht_flags) {
+ rxs->encoding = RX_ENC_HT;
+ rxs->rate_idx = ppduinfo->rate;
+ } else {
+ rxs->encoding = RX_ENC_LEGACY;
+ sband = &ar->mac.sbands[rxs->band];
+ rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
+ ppduinfo->cck_flag);
+ }
+
+ rxs->mactime = ppduinfo->tsft;
+}
+
static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
struct sk_buff *head_msdu,
+ struct hal_rx_mon_ppdu_info *ppduinfo,
struct sk_buff *tail_msdu,
struct napi_struct *napi)
{
@@ -5003,7 +5018,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
} else {
rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
}
- rxs->flag |= RX_FLAG_ONLY_MONITOR;
+ ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
mon_skb = skb_next;
@@ -5022,6 +5037,12 @@ mon_deliver_fail:
return -EINVAL;
}
+/* The destination ring processing is stuck if the destination is not
+ * moving while status ring moves 16 PPDU. The destination ring processing
+ * skips this destination ring PPDU as a workaround.
+ */
+#define MON_DEST_RING_STUCK_MAX_CNT 16
+
static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
u32 quota, struct napi_struct *napi)
{
@@ -5035,6 +5056,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
u32 ring_id;
struct ath11k_pdev_mon_stats *rx_mon_stats;
u32 npackets = 0;
+ u32 mpdu_rx_bufs_used;
if (ar->ab->hw_params.rxdma1_enable)
ring_id = dp->rxdma_mon_dst_ring.ring_id;
@@ -5064,20 +5086,44 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
head_msdu = NULL;
tail_msdu = NULL;
- rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
- &head_msdu,
- &tail_msdu,
- &npackets, &ppdu_id);
+ mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
+ &head_msdu,
+ &tail_msdu,
+ &npackets, &ppdu_id);
+
+ rx_bufs_used += mpdu_rx_bufs_used;
+
+ if (mpdu_rx_bufs_used) {
+ dp->mon_dest_ring_stuck_cnt = 0;
+ } else {
+ dp->mon_dest_ring_stuck_cnt++;
+ rx_mon_stats->dest_mon_not_reaped++;
+ }
+
+ if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
+ rx_mon_stats->dest_mon_stuck++;
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
+ pmon->mon_ppdu_info.ppdu_id, ppdu_id,
+ dp->mon_dest_ring_stuck_cnt,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
+ pmon->mon_ppdu_info.ppdu_id = ppdu_id;
+ continue;
+ }
if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
- "dest_rx: new ppdu_id %x != status ppdu_id %x",
- ppdu_id, pmon->mon_ppdu_info.ppdu_id);
+ "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
+ ppdu_id, pmon->mon_ppdu_info.ppdu_id,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
break;
}
if (head_msdu && tail_msdu) {
ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
+ &pmon->mon_ppdu_info,
tail_msdu, napi);
rx_mon_stats->dest_mpdu_done++;
}
@@ -5106,36 +5152,92 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
}
}
-static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
- int mac_id, u32 quota,
- struct napi_struct *napi)
+int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
{
- struct ath11k_pdev_dp *dp = &ar->dp;
- struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
- struct hal_rx_mon_ppdu_info *ppdu_info;
- struct sk_buff *status_skb;
- u32 tlv_status = HAL_TLV_STATUS_BUF_DONE;
- struct ath11k_pdev_mon_stats *rx_mon_stats;
+ struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
+ enum hal_rx_mon_status hal_status;
+ struct sk_buff *skb;
+ struct sk_buff_head skb_list;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ int num_buffs_reaped = 0;
+ u32 rx_buf_sz;
+ u16 log_type;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
+ struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
- ppdu_info = &pmon->mon_ppdu_info;
- rx_mon_stats = &pmon->rx_mon_stats;
+ __skb_queue_head_init(&skb_list);
- if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START)
- return;
+ num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
+ &skb_list);
+ if (!num_buffs_reaped)
+ goto exit;
- while (!skb_queue_empty(&pmon->rx_status_q)) {
- status_skb = skb_dequeue(&pmon->rx_status_q);
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
- tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info,
- status_skb);
- if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
+ while ((skb = __skb_dequeue(&skb_list))) {
+ if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
+ rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
+ } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
+ } else {
+ log_type = ATH11K_PKTLOG_TYPE_INVALID;
+ rx_buf_sz = 0;
+ }
+
+ if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
+
+ memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info));
+ hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
+ pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
+ hal_status == HAL_TLV_STATUS_PPDU_DONE) {
rx_mon_stats->status_ppdu_done++;
pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
- ath11k_dp_rx_mon_dest_process(ar, mac_id, quota, napi);
+ ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
}
- dev_kfree_skb_any(status_skb);
+
+ if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
+ hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
+
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "failed to find the peer with peer_id %d\n",
+ ppdu_info->peer_id);
+ goto next_skb;
+ }
+
+ arsta = (struct ath11k_sta *)peer->sta->drv_priv;
+ ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
+
+ if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
+
+next_skb:
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+
+ dev_kfree_skb_any(skb);
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
}
+exit:
+ return num_buffs_reaped;
}
static u32
@@ -5352,6 +5454,7 @@ static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
tail_msdu = mon_mpdu->tail;
if (head_msdu && tail_msdu) {
ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
+ &pmon->mon_ppdu_info,
tail_msdu, napi);
rx_mon_stats->dest_mpdu_done++;
ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
@@ -5489,22 +5592,6 @@ reap_status_ring:
return quota;
}
-static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
- struct napi_struct *napi, int budget)
-{
- struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
- struct ath11k_pdev_dp *dp = &ar->dp;
- struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
- int num_buffs_reaped = 0;
-
- num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, &budget,
- &pmon->rx_status_q);
- if (num_buffs_reaped)
- ath11k_dp_rx_mon_status_process_tlv(ar, mac_id, budget, napi);
-
- return num_buffs_reaped;
-}
-
int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
@@ -5514,8 +5601,6 @@ int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
ab->hw_params.full_monitor_mode)
ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
- else if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
- ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
else
ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 91d6244b6543..00a45819907e 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -351,7 +351,8 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
ts->ack_rssi;
- info->status.is_valid_ack_signal = true;
+ info->status.flags |=
+ IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
} else {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
}
@@ -426,7 +427,7 @@ void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts)
struct ath11k_sta *arsta;
struct ieee80211_sta *sta;
u16 rate, ru_tones;
- u8 mcs, rate_idx, ofdma;
+ u8 mcs, rate_idx = 0, ofdma;
int ret;
spin_lock_bh(&ab->base_lock);
@@ -518,9 +519,13 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
{
+ struct ieee80211_tx_status status = { 0 };
struct ath11k_base *ab = ar->ab;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ struct rate_info rate;
if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
/* Must not happen */
@@ -552,7 +557,7 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
ts->ack_rssi;
- info->status.is_valid_ack_signal = true;
+ info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
@@ -583,12 +588,26 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
ath11k_dp_tx_cache_peer_stats(ar, msdu, ts);
}
- /* NOTE: Tx rate status reporting. Tx completion status does not have
- * necessary information (for example nss) to build the tx rate.
- * Might end up reporting it out-of-band from HTT stats.
- */
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "dp_tx: failed to find the peer with peer_id %d\n",
+ ts->peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ dev_kfree_skb_any(msdu);
+ return;
+ }
+ arsta = (struct ath11k_sta *)peer->sta->drv_priv;
+ status.sta = peer->sta;
+ status.skb = msdu;
+ status.info = info;
+ rate = arsta->last_txrate;
+ status.rate = &rate;
- ieee80211_tx_status(ar->hw, msdu);
+ spin_unlock_bh(&ab->base_lock);
+
+ ieee80211_tx_status_ext(ar->hw, &status);
}
static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
index 406767672844..24e72e75a8c7 100644
--- a/drivers/net/wireless/ath/ath11k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -474,6 +474,7 @@ enum hal_tlv_tag {
#define HAL_TLV_HDR_TAG GENMASK(9, 1)
#define HAL_TLV_HDR_LEN GENMASK(25, 10)
+#define HAL_TLV_USR_ID GENMASK(31, 26)
#define HAL_TLV_ALIGN 4
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index a3b353a4b5f7..4bb1fbaed0c9 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -453,10 +453,12 @@ void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
desc->info0));
ath11k_dbg(ab, ATH11k_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
- ath11k_dbg(ab, ATH11k_DBG_HAL, "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
+ ath11k_dbg(ab, ATH11k_DBG_HAL,
+ "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
desc->last_rx_enqueue_timestamp,
desc->last_rx_dequeue_timestamp);
- ath11k_dbg(ab, ATH11k_DBG_HAL, "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
+ ath11k_dbg(ab, ATH11k_DBG_HAL,
+ "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
desc->rx_bitmap[6], desc->rx_bitmap[7]);
@@ -802,12 +804,75 @@ void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
}
}
+#define HAL_MAX_UL_MU_USERS 37
+static inline void
+ath11k_hal_rx_handle_ofdma_info(void *rx_tlv,
+ struct hal_rx_user_status *rx_user_status)
+{
+ struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
+ (struct hal_rx_ppdu_end_user_stats *)rx_tlv;
+
+ rx_user_status->ul_ofdma_user_v0_word0 = __le32_to_cpu(ppdu_end_user->info6);
+
+ rx_user_status->ul_ofdma_user_v0_word1 = __le32_to_cpu(ppdu_end_user->rsvd2[10]);
+}
+
+static inline void
+ath11k_hal_rx_populate_byte_count(void *rx_tlv, void *ppduinfo,
+ struct hal_rx_user_status *rx_user_status)
+{
+ struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
+ (struct hal_rx_ppdu_end_user_stats *)rx_tlv;
+
+ rx_user_status->mpdu_ok_byte_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT,
+ __le32_to_cpu(ppdu_end_user->rsvd2[6]));
+ rx_user_status->mpdu_err_byte_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT,
+ __le32_to_cpu(ppdu_end_user->rsvd2[8]));
+}
+
+static inline void
+ath11k_hal_rx_populate_mu_user_info(void *rx_tlv, struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct hal_rx_user_status *rx_user_status)
+{
+ rx_user_status->ast_index = ppdu_info->ast_index;
+ rx_user_status->tid = ppdu_info->tid;
+ rx_user_status->tcp_msdu_count =
+ ppdu_info->tcp_msdu_count;
+ rx_user_status->udp_msdu_count =
+ ppdu_info->udp_msdu_count;
+ rx_user_status->other_msdu_count =
+ ppdu_info->other_msdu_count;
+ rx_user_status->frame_control = ppdu_info->frame_control;
+ rx_user_status->frame_control_info_valid =
+ ppdu_info->frame_control_info_valid;
+ rx_user_status->data_sequence_control_info_valid =
+ ppdu_info->data_sequence_control_info_valid;
+ rx_user_status->first_data_seq_ctrl =
+ ppdu_info->first_data_seq_ctrl;
+ rx_user_status->preamble_type = ppdu_info->preamble_type;
+ rx_user_status->ht_flags = ppdu_info->ht_flags;
+ rx_user_status->vht_flags = ppdu_info->vht_flags;
+ rx_user_status->he_flags = ppdu_info->he_flags;
+ rx_user_status->rs_flags = ppdu_info->rs_flags;
+
+ rx_user_status->mpdu_cnt_fcs_ok =
+ ppdu_info->num_mpdu_fcs_ok;
+ rx_user_status->mpdu_cnt_fcs_err =
+ ppdu_info->num_mpdu_fcs_err;
+
+ ath11k_hal_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
+}
+
static enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info,
- u32 tlv_tag, u8 *tlv_data)
+ u32 tlv_tag, u8 *tlv_data, u32 userid)
{
- u32 info0, info1;
+ u32 info0, info1, value;
+ u8 he_dcm = 0, he_stbc = 0;
+ u16 he_gi = 0, he_ltf = 0;
switch (tlv_tag) {
case HAL_RX_PPDU_START: {
@@ -828,6 +893,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0 = __le32_to_cpu(eu_stats->info0);
info1 = __le32_to_cpu(eu_stats->info1);
+ ppdu_info->ast_index =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX,
+ __le32_to_cpu(eu_stats->info2));
ppdu_info->tid =
ffs(FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP,
__le32_to_cpu(eu_stats->info6))) - 1;
@@ -851,6 +919,44 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
ppdu_info->num_mpdu_fcs_err =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR,
info0);
+ switch (ppdu_info->preamble_type) {
+ case HAL_RX_PREAMBLE_11N:
+ ppdu_info->ht_flags = 1;
+ break;
+ case HAL_RX_PREAMBLE_11AC:
+ ppdu_info->vht_flags = 1;
+ break;
+ case HAL_RX_PREAMBLE_11AX:
+ ppdu_info->he_flags = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (userid < HAL_MAX_UL_MU_USERS) {
+ struct hal_rx_user_status *rxuser_stats =
+ &ppdu_info->userstats;
+
+ ath11k_hal_rx_handle_ofdma_info(tlv_data, rxuser_stats);
+ ath11k_hal_rx_populate_mu_user_info(tlv_data, ppdu_info,
+ rxuser_stats);
+ }
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[0] =
+ __le32_to_cpu(eu_stats->rsvd1[0]);
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[1] =
+ __le32_to_cpu(eu_stats->rsvd1[1]);
+
+ break;
+ }
+ case HAL_RX_PPDU_END_USER_STATS_EXT: {
+ struct hal_rx_ppdu_end_user_stats_ext *eu_stats =
+ (struct hal_rx_ppdu_end_user_stats_ext *)tlv_data;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[2] = eu_stats->info1;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[3] = eu_stats->info2;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[4] = eu_stats->info3;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[5] = eu_stats->info4;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[6] = eu_stats->info5;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[7] = eu_stats->info6;
break;
}
case HAL_PHYRX_HT_SIG: {
@@ -949,50 +1055,151 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
else
ppdu_info->reception_type =
HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ ppdu_info->vht_flag_values5 = group_id;
+ ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
+ ppdu_info->nss);
+ ppdu_info->vht_flag_values2 = ppdu_info->bw;
+ ppdu_info->vht_flag_values4 =
+ FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING, info1);
break;
}
case HAL_PHYRX_HE_SIG_A_SU: {
struct hal_rx_he_sig_a_su_info *he_sig_a =
(struct hal_rx_he_sig_a_su_info *)tlv_data;
- u32 nsts, cp_ltf, dcm;
+ ppdu_info->he_flags = 1;
info0 = __le32_to_cpu(he_sig_a->info0);
info1 = __le32_to_cpu(he_sig_a->info1);
- ppdu_info->mcs =
- FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS,
- info0);
- ppdu_info->bw =
- FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW,
- info0);
- ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info0);
- ppdu_info->is_stbc = info1 &
- HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC;
- ppdu_info->beamformed = info1 &
- HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF;
- dcm = info0 & HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM;
- cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE,
- info0);
- nsts = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND, info0);
- switch (cp_ltf) {
+ if (value == 0)
+ ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG;
+ else
+ ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU;
+
+ ppdu_info->he_data1 |=
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
+
+ ppdu_info->he_data2 |=
+ IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR, info0);
+ ppdu_info->he_data3 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS, info0);
+ ppdu_info->mcs = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, value);
+
+ he_dcm = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM, info0);
+ ppdu_info->dcm = he_dcm;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, he_dcm);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info1);
+ ppdu_info->ldpc = (value == HAL_RX_SU_MU_CODING_LDPC) ? 1 : 0;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA, info1);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
+ he_stbc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC, info1);
+ ppdu_info->is_stbc = he_stbc;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, he_stbc);
+
+ /* data4 */
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE, info0);
+ ppdu_info->he_data4 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
+
+ /* data5 */
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW, info0);
+ ppdu_info->bw = value;
+ ppdu_info->he_data5 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE, info0);
+ switch (value) {
case 0:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_1_X;
+ break;
case 1:
- ppdu_info->gi = HAL_RX_GI_0_8_US;
- break;
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_2_X;
+ break;
case 2:
- ppdu_info->gi = HAL_RX_GI_1_6_US;
- break;
+ he_gi = HE_GI_1_6;
+ he_ltf = HE_LTF_2_X;
+ break;
case 3:
- if (dcm && ppdu_info->is_stbc)
- ppdu_info->gi = HAL_RX_GI_0_8_US;
- else
- ppdu_info->gi = HAL_RX_GI_3_2_US;
- break;
+ if (he_dcm && he_stbc) {
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_4_X;
+ } else {
+ he_gi = HE_GI_3_2;
+ he_ltf = HE_LTF_4_X;
+ }
+ break;
}
+ ppdu_info->gi = he_gi;
+ he_gi = (he_gi != 0) ? he_gi - 1 : 0;
+ ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
+ ppdu_info->ltf_size = he_ltf;
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
+ (he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR, info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF, info1);
+ ppdu_info->beamformed = value;
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_TXBF, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM, info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
+
+ /* data6 */
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+ value++;
+ ppdu_info->nss = value;
+ ppdu_info->he_data6 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_NSTS, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND, info1);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION, info1);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
- ppdu_info->nss = nsts + 1;
- ppdu_info->dcm = dcm;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
break;
}
@@ -1000,29 +1207,142 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
(struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
- u32 cp_ltf;
-
info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
- ppdu_info->bw =
- FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW,
- info0);
- cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE,
- info0);
-
- switch (cp_ltf) {
+ ppdu_info->he_mu_flags = 1;
+
+ ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU;
+ ppdu_info->he_data1 |=
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
+
+ ppdu_info->he_data2 =
+ IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
+
+ /*data3*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR, info0);
+ ppdu_info->he_data3 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA, info1);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC, info1);
+ he_stbc = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, value);
+
+ /*data4*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE, info0);
+ ppdu_info->he_data4 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
+
+ /*data5*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
+ ppdu_info->bw = value;
+ ppdu_info->he_data5 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE, info0);
+ switch (value) {
case 0:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_4_X;
+ break;
case 1:
- ppdu_info->gi = HAL_RX_GI_0_8_US;
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_2_X;
break;
case 2:
- ppdu_info->gi = HAL_RX_GI_1_6_US;
+ he_gi = HE_GI_1_6;
+ he_ltf = HE_LTF_2_X;
break;
case 3:
- ppdu_info->gi = HAL_RX_GI_3_2_US;
+ he_gi = HE_GI_3_2;
+ he_ltf = HE_LTF_4_X;
break;
}
+ ppdu_info->gi = he_gi;
+ he_gi = (he_gi != 0) ? he_gi - 1 : 0;
+ ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
+ ppdu_info->ltf_size = he_ltf;
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
+ (he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB, info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR,
+ info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM,
+ info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
+
+ /*data6*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION,
+ info0);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION, info1);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
+
+ /* HE-MU Flags */
+ /* HE-MU-flags1 */
+ ppdu_info->he_flags1 =
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN;
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB, info0);
+ ppdu_info->he_flags1 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN,
+ value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB, info0);
+ ppdu_info->he_flags1 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN,
+ value);
+
+ /* HE-MU-flags2 */
+ ppdu_info->he_flags2 =
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN;
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
+ ppdu_info->he_flags2 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW,
+ value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB, info0);
+ ppdu_info->he_flags2 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB, info0);
+ value = value - 1;
+ ppdu_info->he_flags2 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS,
+ value);
ppdu_info->is_stbc = info1 &
HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC;
@@ -1040,7 +1360,7 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0);
ppdu_info->ru_alloc =
ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(ru_tones);
-
+ ppdu_info->he_RU[0] = ru_tones;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
break;
}
@@ -1050,14 +1370,25 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0 = __le32_to_cpu(he_sig_b2_mu->info0);
+ ppdu_info->he_data1 |= IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
+
ppdu_info->mcs =
- FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS,
- info0);
+ FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING, info0);
+ ppdu_info->ldpc = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID, info0);
+ ppdu_info->he_data4 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
+
ppdu_info->nss =
- FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS,
- info0) + 1;
- ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING,
- info0);
+ FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS, info0) + 1;
break;
}
case HAL_PHYRX_HE_SIG_B2_OFDMA: {
@@ -1066,17 +1397,40 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
+ ppdu_info->he_data1 |=
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
+
+ /* HE-data2 */
+ ppdu_info->he_data2 |= IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN;
+
ppdu_info->mcs =
FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS,
info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM, info0);
+ he_dcm = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING, info0);
+ ppdu_info->ldpc = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+
+ /* HE-data4 */
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID, info0);
+ ppdu_info->he_data4 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
+
ppdu_info->nss =
FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS,
info0) + 1;
ppdu_info->beamformed =
- info0 &
- HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
- ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING,
- info0);
+ info0 & HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
break;
}
@@ -1118,6 +1472,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
ppdu_info->rx_duration =
FIELD_GET(HAL_RX_PPDU_END_DURATION,
__le32_to_cpu(ppdu_rx_duration->info0));
+ ppdu_info->tsft = __le32_to_cpu(ppdu_rx_duration->rsvd0[1]);
+ ppdu_info->tsft = (ppdu_info->tsft << 32) |
+ __le32_to_cpu(ppdu_rx_duration->rsvd0[0]);
break;
}
case HAL_DUMMY:
@@ -1141,12 +1498,14 @@ ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
u16 tlv_tag;
u16 tlv_len;
+ u32 tlv_userid = 0;
u8 *ptr = skb->data;
do {
tlv = (struct hal_tlv_hdr *)ptr;
tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl);
tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
+ tlv_userid = FIELD_GET(HAL_TLV_USR_ID, tlv->tl);
ptr += sizeof(*tlv);
/* The actual length of PPDU_END is the combined length of many PHY
@@ -1158,7 +1517,7 @@ ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
hal_status = ath11k_hal_rx_parse_mon_status_tlv(ab, ppdu_info,
- tlv_tag, ptr);
+ tlv_tag, ptr, tlv_userid);
ptr += tlv_len;
ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
index 571054c6d7f8..f6bae07abfd3 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -65,10 +65,6 @@ enum hal_rx_reception_type {
HAL_RX_RECEPTION_TYPE_MAX,
};
-#define HAL_TLV_STATUS_PPDU_NOT_DONE 0
-#define HAL_TLV_STATUS_PPDU_DONE 1
-#define HAL_TLV_STATUS_BUF_DONE 2
-#define HAL_TLV_STATUS_PPDU_NON_STD_DONE 3
#define HAL_RX_FCS_LEN 4
enum hal_rx_mon_status {
@@ -77,6 +73,40 @@ enum hal_rx_mon_status {
HAL_RX_MON_STATUS_BUF_DONE,
};
+struct hal_rx_user_status {
+ u32 mcs:4,
+ nss:3,
+ ofdma_info_valid:1,
+ dl_ofdma_ru_start_index:7,
+ dl_ofdma_ru_width:7,
+ dl_ofdma_ru_size:8;
+ u32 ul_ofdma_user_v0_word0;
+ u32 ul_ofdma_user_v0_word1;
+ u32 ast_index;
+ u32 tid;
+ u16 tcp_msdu_count;
+ u16 udp_msdu_count;
+ u16 other_msdu_count;
+ u16 frame_control;
+ u8 frame_control_info_valid;
+ u8 data_sequence_control_info_valid;
+ u16 first_data_seq_ctrl;
+ u32 preamble_type;
+ u16 ht_flags;
+ u16 vht_flags;
+ u16 he_flags;
+ u8 rs_flags;
+ u32 mpdu_cnt_fcs_ok;
+ u32 mpdu_cnt_fcs_err;
+ u32 mpdu_fcs_ok_bitmap[8];
+ u32 mpdu_ok_byte_count;
+ u32 mpdu_err_byte_count;
+};
+
+#define HAL_TLV_STATUS_PPDU_NOT_DONE HAL_RX_MON_STATUS_PPDU_NOT_DONE
+#define HAL_TLV_STATUS_PPDU_DONE HAL_RX_MON_STATUS_PPDU_DONE
+#define HAL_TLV_STATUS_BUF_DONE HAL_RX_MON_STATUS_BUF_DONE
+
struct hal_sw_mon_ring_entries {
dma_addr_t mon_dst_paddr;
dma_addr_t mon_status_paddr;
@@ -107,6 +137,12 @@ struct hal_rx_mon_ppdu_info {
u8 mcs;
u8 nss;
u8 bw;
+ u8 vht_flag_values1;
+ u8 vht_flag_values2;
+ u8 vht_flag_values3[4];
+ u8 vht_flag_values4;
+ u8 vht_flag_values5;
+ u16 vht_flag_values6;
u8 is_stbc;
u8 gi;
u8 ldpc;
@@ -114,10 +150,46 @@ struct hal_rx_mon_ppdu_info {
u8 rssi_comb;
u8 rssi_chain_pri20[HAL_RX_MAX_NSS];
u8 tid;
+ u16 ht_flags;
+ u16 vht_flags;
+ u16 he_flags;
+ u16 he_mu_flags;
u8 dcm;
u8 ru_alloc;
u8 reception_type;
+ u64 tsft;
u64 rx_duration;
+ u16 frame_control;
+ u32 ast_index;
+ u8 rs_fcs_err;
+ u8 rs_flags;
+ u8 cck_flag;
+ u8 ofdm_flag;
+ u8 ulofdma_flag;
+ u8 frame_control_info_valid;
+ u16 he_per_user_1;
+ u16 he_per_user_2;
+ u8 he_per_user_position;
+ u8 he_per_user_known;
+ u16 he_flags1;
+ u16 he_flags2;
+ u8 he_RU[4];
+ u16 he_data1;
+ u16 he_data2;
+ u16 he_data3;
+ u16 he_data4;
+ u16 he_data5;
+ u16 he_data6;
+ u32 ppdu_len;
+ u32 prev_ppdu_id;
+ u32 device_id;
+ u16 first_data_seq_ctrl;
+ u8 monitor_direct_used;
+ u8 data_sequence_control_info_valid;
+ u8 ltf_size;
+ u8 rxpcu_filter_pass;
+ char rssi_chain[8][8];
+ struct hal_rx_user_status userstats;
};
#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
@@ -150,6 +222,9 @@ struct hal_rx_ppdu_start {
#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16)
+#define HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT GENMASK(24, 0)
+#define HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT GENMASK(24, 0)
+
struct hal_rx_ppdu_end_user_stats {
__le32 rsvd0[2];
__le32 info0;
@@ -164,6 +239,16 @@ struct hal_rx_ppdu_end_user_stats {
__le32 rsvd2[11];
} __packed;
+struct hal_rx_ppdu_end_user_stats_ext {
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 info4;
+ u32 info5;
+ u32 info6;
+} __packed;
+
#define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0)
#define HAL_RX_HT_SIG_INFO_INFO0_BW BIT(7)
@@ -212,25 +297,62 @@ enum hal_rx_vht_sig_a_gi_setting {
HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY = 3,
};
+#define HAL_RX_SU_MU_CODING_LDPC 0x01
+
+#define HE_GI_0_8 0
+#define HE_GI_0_4 1
+#define HE_GI_1_6 2
+#define HE_GI_3_2 3
+
+#define HE_LTF_1_X 0
+#define HE_LTF_2_X 1
+#define HE_LTF_4_X 2
+#define HE_LTF_UNKNOWN 3
+
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS GENMASK(6, 3)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM BIT(7)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW GENMASK(20, 19)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE GENMASK(22, 21)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS GENMASK(25, 23)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR GENMASK(13, 8)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE GENMASK(18, 15)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND BIT(0)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE BIT(1)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG BIT(2)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION GENMASK(6, 0)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA BIT(8)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC BIT(9)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF BIT(10)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR GENMASK(12, 11)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM BIT(13)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND BIT(15)
struct hal_rx_he_sig_a_su_info {
__le32 info0;
__le32 info1;
} __packed;
-#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW GENMASK(17, 15)
-#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE GENMASK(24, 23)
-
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG BIT(1)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB GENMASK(3, 1)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB BIT(4)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR GENMASK(10, 5)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE GENMASK(14, 11)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW GENMASK(17, 15)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB GENMASK(21, 18)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB BIT(22)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE GENMASK(24, 23)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION BIT(25)
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION GENMASK(6, 0)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_CODING BIT(7)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB GENMASK(10, 8)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA BIT(11)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC BIT(12)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXBF BIT(10)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR GENMASK(14, 13)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM BIT(15)
struct hal_rx_he_sig_a_mu_dl_info {
__le32 info0;
@@ -243,6 +365,7 @@ struct hal_rx_he_sig_b1_mu_info {
__le32 info0;
} __packed;
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID GENMASK(10, 0)
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS GENMASK(18, 15)
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING BIT(20)
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS GENMASK(31, 29)
@@ -251,6 +374,7 @@ struct hal_rx_he_sig_b2_mu_info {
__le32 info0;
} __packed;
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID GENMASK(10, 0)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15)
@@ -279,11 +403,14 @@ struct hal_rx_phyrx_rssi_legacy_info {
#define HAL_RX_MPDU_INFO_INFO0_PEERID GENMASK(31, 16)
#define HAL_RX_MPDU_INFO_INFO0_PEERID_WCN6855 GENMASK(15, 0)
+#define HAL_RX_MPDU_INFO_INFO1_MPDU_LEN GENMASK(13, 0)
struct hal_rx_mpdu_info {
__le32 rsvd0;
__le32 info0;
- __le32 rsvd1[21];
+ __le32 rsvd1[11];
+ __le32 info1;
+ __le32 rsvd2[9];
} __packed;
struct hal_rx_mpdu_info_wcn6855 {
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index 3b0fdc1a6b3f..d1b0e76d9ec2 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -273,6 +273,12 @@ static u8 ath11k_hw_ipq8074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
__le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
}
+static bool ath11k_hw_ipq8074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
+}
+
static bool ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
@@ -444,6 +450,12 @@ static u8 ath11k_hw_qcn9074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
__le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
}
+static bool ath11k_hw_qcn9074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
+}
+
static bool ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID,
@@ -801,6 +813,12 @@ static u16 ath11k_hw_wcn6855_mpdu_info_get_peerid(u8 *tlv_data)
return peer_id;
}
+static bool ath11k_hw_wcn6855_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
+}
+
const struct ath11k_hw_ops ipq8074_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_ipq8074,
@@ -815,6 +833,7 @@ const struct ath11k_hw_ops ipq8074_ops = {
.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
@@ -853,6 +872,7 @@ const struct ath11k_hw_ops ipq6018_ops = {
.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
@@ -891,6 +911,7 @@ const struct ath11k_hw_ops qca6390_ops = {
.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
@@ -929,6 +950,7 @@ const struct ath11k_hw_ops qcn9074_ops = {
.rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
@@ -967,6 +989,7 @@ const struct ath11k_hw_ops wcn6855_ops = {
.rx_desc_get_encrypt_type = ath11k_hw_wcn6855_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_wcn6855_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_wcn6855_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_wcn6855_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no,
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 29934b36c14e..27ca4a9c20fc 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -192,6 +192,8 @@ struct ath11k_hw_params {
bool wakeup_mhi;
bool supports_rssi_stats;
bool fw_wmi_diag_event;
+ bool current_cc_support;
+ bool dbr_debug_support;
};
struct ath11k_hw_ops {
@@ -210,6 +212,7 @@ struct ath11k_hw_ops {
u32 (*rx_desc_get_encrypt_type)(struct hal_rx_desc *desc);
u8 (*rx_desc_get_decap_type)(struct hal_rx_desc *desc);
u8 (*rx_desc_get_mesh_ctl)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_ldpc_support)(struct hal_rx_desc *desc);
bool (*rx_desc_get_mpdu_seq_ctl_vld)(struct hal_rx_desc *desc);
bool (*rx_desc_get_mpdu_fc_valid)(struct hal_rx_desc *desc);
u16 (*rx_desc_get_mpdu_start_seq_no)(struct hal_rx_desc *desc);
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 07f499d5ec92..d5b83f90d27a 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -2319,6 +2319,9 @@ static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar,
if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->he_6ghz_capa.capa)
return;
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ arg->bw_40 = true;
+
if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
arg->bw_80 = true;
@@ -2862,6 +2865,11 @@ static void ath11k_recalculate_mgmt_rate(struct ath11k *ar,
if (ret)
ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
+ /* For WCN6855, firmware will clear this param when vdev starts, hence
+ * cache it here so that we can reconfigure it once vdev starts.
+ */
+ ar->hw_rate_code = hw_rate_code;
+
vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
hw_rate_code);
@@ -4504,24 +4512,30 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
sta->addr, arvif->vdev_id);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
- ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
+ bool skip_peer_delete = ar->ab->hw_params.vdev_start_delay &&
+ vif->type == NL80211_IFTYPE_STATION;
- if (ar->ab->hw_params.vdev_start_delay &&
- vif->type == NL80211_IFTYPE_STATION)
- goto free;
+ ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
- ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
- if (ret)
- ath11k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- else
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
+ if (!skip_peer_delete) {
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "Failed to delete peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab,
+ ATH11K_DBG_MAC,
+ "Removed peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ }
ath11k_mac_dec_num_stations(arvif, sta);
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer && peer->sta == sta) {
+ if (skip_peer_delete && peer) {
+ peer->sta = NULL;
+ } else if (peer && peer->sta == sta) {
ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
vif->addr, arvif->vdev_id);
peer->sta = NULL;
@@ -4531,7 +4545,6 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
}
spin_unlock_bh(&ar->ab->base_lock);
-free:
kfree(arsta->tx_stats);
arsta->tx_stats = NULL;
@@ -5566,7 +5579,7 @@ static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
skb_queue_tail(q, skb);
atomic_inc(&ar->num_pending_mgmt_tx);
- ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+ queue_work(ar->ab->workqueue, &ar->wmi_mgmt_tx_work);
return 0;
}
@@ -6341,6 +6354,10 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
}
}
+ ret = ath11k_debugfs_add_interface(arvif);
+ if (ret)
+ goto err_peer_del;
+
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -6375,6 +6392,7 @@ err_vdev_del:
spin_unlock_bh(&ar->data_lock);
err:
+ ath11k_debugfs_remove_interface(arvif);
mutex_unlock(&ar->conf_mutex);
return ret;
@@ -6473,6 +6491,8 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath11k_mac_txpower_recalc(ar);
+ ath11k_debugfs_remove_interface(arvif);
+
/* TODO: recal traffic pause state based on the available vdevs */
mutex_unlock(&ar->conf_mutex);
@@ -6610,12 +6630,13 @@ static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
static int
ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
- const struct cfg80211_chan_def *chandef,
+ struct ieee80211_chanctx_conf *ctx,
bool restart)
{
struct ath11k *ar = arvif->ar;
struct ath11k_base *ab = ar->ab;
struct wmi_vdev_start_req_arg arg = {};
+ const struct cfg80211_chan_def *chandef = &ctx->def;
int he_support = arvif->vif->bss_conf.he_support;
int ret = 0;
@@ -6650,8 +6671,7 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
arg.channel.chan_radar =
!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
- arg.channel.freq2_radar =
- !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+ arg.channel.freq2_radar = ctx->radar_enabled;
arg.channel.passive = arg.channel.chan_radar;
@@ -6761,15 +6781,15 @@ err:
}
static int ath11k_mac_vdev_start(struct ath11k_vif *arvif,
- const struct cfg80211_chan_def *chandef)
+ struct ieee80211_chanctx_conf *ctx)
{
- return ath11k_mac_vdev_start_restart(arvif, chandef, false);
+ return ath11k_mac_vdev_start_restart(arvif, ctx, false);
}
static int ath11k_mac_vdev_restart(struct ath11k_vif *arvif,
- const struct cfg80211_chan_def *chandef)
+ struct ieee80211_chanctx_conf *ctx)
{
- return ath11k_mac_vdev_start_restart(arvif, chandef, true);
+ return ath11k_mac_vdev_start_restart(arvif, ctx, true);
}
struct ath11k_mac_change_chanctx_arg {
@@ -6836,13 +6856,33 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
if (WARN_ON(!arvif->is_started))
continue;
- if (WARN_ON(!arvif->is_up))
- continue;
+ /* change_chanctx can be called even before vdev_up from
+ * ieee80211_start_ap->ieee80211_vif_use_channel->
+ * ieee80211_recalc_radar_chanctx.
+ *
+ * Firmware expect vdev_restart only if vdev is up.
+ * If vdev is down then it expect vdev_stop->vdev_start.
+ */
+ if (arvif->is_up) {
+ ret = ath11k_mac_vdev_restart(arvif, vifs[i].new_ctx);
+ if (ret) {
+ ath11k_warn(ab, "failed to restart vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ } else {
+ ret = ath11k_mac_vdev_stop(arvif);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+
+ ret = ath11k_mac_vdev_start(arvif, vifs[i].new_ctx);
+ if (ret)
+ ath11k_warn(ab, "failed to start vdev %d: %d\n",
+ arvif->vdev_id, ret);
- ret = ath11k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
- if (ret) {
- ath11k_warn(ab, "failed to restart vdev %d: %d\n",
- arvif->vdev_id, ret);
continue;
}
@@ -6927,7 +6967,8 @@ static void ath11k_mac_op_change_chanctx(struct ieee80211_hw *hw,
if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
goto unlock;
- if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH)
+ if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH ||
+ changed & IEEE80211_CHANCTX_CHANGE_RADAR)
ath11k_mac_update_active_vif_chan(ar, ctx);
/* TODO: Recalc radar detection */
@@ -6947,7 +6988,7 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
if (WARN_ON(arvif->is_started))
return -EBUSY;
- ret = ath11k_mac_vdev_start(arvif, &arvif->chanctx.def);
+ ret = ath11k_mac_vdev_start(arvif, &arvif->chanctx);
if (ret) {
ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
arvif->vdev_id, vif->addr,
@@ -6955,6 +6996,19 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
return ret;
}
+ /* Reconfigure hardware rate code since it is cleared by firmware.
+ */
+ if (ar->hw_rate_code > 0) {
+ u32 vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ ar->hw_rate_code);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
+ return ret;
+ }
+ }
+
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, 0, ar->mac_addr);
if (ret) {
@@ -7028,7 +7082,7 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- ret = ath11k_mac_vdev_start(arvif, &ctx->def);
+ ret = ath11k_mac_vdev_start(arvif, ctx);
if (ret) {
ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
arvif->vdev_id, vif->addr,
@@ -8422,7 +8476,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ar->hw->queues = ATH11K_HW_MAX_QUEUES;
ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN;
ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1;
- ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
ar->hw->vif_data_size = sizeof(struct ath11k_vif);
ar->hw->sta_data_size = sizeof(struct ath11k_sta);
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index e4250ba8dfee..fc3524e83e52 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -13,6 +13,7 @@
#include "pci.h"
#define MHI_TIMEOUT_DEFAULT_MS 90000
+#define RDDM_DUMP_SIZE 0x420000
static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
{
@@ -332,6 +333,7 @@ static int ath11k_mhi_read_addr_from_dt(struct mhi_controller *mhi_ctrl)
return -ENOENT;
ret = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
if (ret)
return ret;
@@ -381,6 +383,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
mhi_ctrl->iova_stop = 0xFFFFFFFF;
}
+ mhi_ctrl->rddm_size = RDDM_DUMP_SIZE;
mhi_ctrl->sbl_size = SZ_512K;
mhi_ctrl->seg_len = SZ_512K;
mhi_ctrl->fbc_download = true;
@@ -560,7 +563,7 @@ static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci,
ret = 0;
break;
case ATH11K_MHI_POWER_ON:
- ret = mhi_async_power_up(ab_pci->mhi_ctrl);
+ ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_POWER_OFF:
mhi_power_down(ab_pci->mhi_ctrl, true);
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index de71ad594f34..903758751c99 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -1571,6 +1571,11 @@ static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
struct ath11k_base *ab = dev_get_drvdata(dev);
int ret;
+ if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci suspend as qmi is not initialised\n");
+ return 0;
+ }
+
ret = ath11k_core_suspend(ab);
if (ret)
ath11k_warn(ab, "failed to suspend core: %d\n", ret);
@@ -1583,6 +1588,11 @@ static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
struct ath11k_base *ab = dev_get_drvdata(dev);
int ret;
+ if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci resume as qmi is not initialised\n");
+ return 0;
+ }
+
ret = ath11k_core_resume(ab);
if (ret)
ath11k_warn(ab, "failed to resume core: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 85471f8b3563..332886bc6b33 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -252,7 +252,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
{
struct ath11k_peer *peer;
struct ath11k_sta *arsta;
- int ret;
+ int ret, fbret;
lockdep_assert_held(&ar->conf_mutex);
@@ -291,22 +291,8 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
param->peer_addr, param->vdev_id);
- reinit_completion(&ar->peer_delete_done);
-
- ret = ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr,
- param->vdev_id);
- if (ret) {
- ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
- param->vdev_id, param->peer_addr);
- return ret;
- }
-
- ret = ath11k_wait_for_peer_delete_done(ar, param->vdev_id,
- param->peer_addr);
- if (ret)
- return ret;
-
- return -ENOENT;
+ ret = -ENOENT;
+ goto cleanup;
}
peer->pdev_idx = ar->pdev_idx;
@@ -335,4 +321,24 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
spin_unlock_bh(&ar->ab->base_lock);
return 0;
+
+cleanup:
+ reinit_completion(&ar->peer_delete_done);
+
+ fbret = ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr,
+ param->vdev_id);
+ if (fbret) {
+ ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
+ param->vdev_id, param->peer_addr);
+ goto exit;
+ }
+
+ fbret = ath11k_wait_for_peer_delete_done(ar, param->vdev_id,
+ param->peer_addr);
+ if (fbret)
+ ath11k_warn(ar->ab, "failed wait for peer %pM delete done id %d fallback ret %d\n",
+ param->peer_addr, param->vdev_id, fbret);
+
+exit:
+ return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 65d3c6ba35ae..04e966830c18 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1932,10 +1932,11 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
if (!hremote_node) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"qmi fail to get hremote_node\n");
- return ret;
+ return -ENODEV;
}
ret = of_address_to_resource(hremote_node, 0, &res);
+ of_node_put(hremote_node);
if (ret) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"qmi fail to get reg from hremote\n");
@@ -2341,6 +2342,7 @@ static void ath11k_qmi_m3_free(struct ath11k_base *ab)
dma_free_coherent(ab->dev, m3_mem->size,
m3_mem->vaddr, m3_mem->paddr);
m3_mem->vaddr = NULL;
+ m3_mem->size = 0;
}
static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
@@ -2958,7 +2960,11 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
clear_bit(ATH11K_FLAG_CRASH_FLUSH,
&ab->dev_flags);
clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
- ath11k_core_qmi_firmware_ready(ab);
+ ret = ath11k_core_qmi_firmware_ready(ab);
+ if (ret) {
+ set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ }
set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
}
@@ -3024,3 +3030,8 @@ void ath11k_qmi_deinit_service(struct ath11k_base *ab)
}
EXPORT_SYMBOL(ath11k_qmi_deinit_service);
+void ath11k_qmi_free_resource(struct ath11k_base *ab)
+{
+ ath11k_qmi_free_target_mem_chunk(ab);
+ ath11k_qmi_m3_free(ab);
+}
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index ba2eff4d59cb..61678de56ac7 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -492,5 +492,6 @@ void ath11k_qmi_event_work(struct work_struct *work);
void ath11k_qmi_msg_recv_work(struct work_struct *work);
void ath11k_qmi_deinit_service(struct ath11k_base *ab);
int ath11k_qmi_init_service(struct ath11k_base *ab);
+void ath11k_qmi_free_resource(struct ath11k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index d6575feca5a2..81e11cde31d7 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -48,6 +48,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct wmi_init_country_params init_country_param;
+ struct wmi_set_current_country_params set_current_param = {};
struct ath11k *ar = hw->priv;
int ret;
@@ -76,18 +77,26 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
return;
}
- /* Set the country code to the firmware and wait for
+ /* Set the country code to the firmware and will receive
* the WMI_REG_CHAN_LIST_CC EVENT for updating the
* reg info
*/
- init_country_param.flags = ALPHA_IS_SET;
- memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
- init_country_param.cc_info.alpha2[2] = 0;
+ if (ar->ab->hw_params.current_cc_support) {
+ memcpy(&set_current_param.alpha2, request->alpha2, 2);
+ ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed set current country code: %d\n", ret);
+ } else {
+ init_country_param.flags = ALPHA_IS_SET;
+ memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
+ init_country_param.cc_info.alpha2[2] = 0;
- ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
- if (ret)
- ath11k_warn(ar->ab,
- "INIT Country code set to fw failed : %d\n", ret);
+ ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "INIT Country code set to fw failed : %d\n", ret);
+ }
ath11k_mac_11d_scan_stop(ar);
ar->regdom_set_by_user = true;
diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h
index 79c50804d7dc..26ecc1bcd9d5 100644
--- a/drivers/net/wireless/ath/ath11k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath11k/rx_desc.h
@@ -1445,7 +1445,7 @@ struct hal_rx_desc_ipq8074 {
__le32 hdr_status_tag;
__le32 phy_ppdu_id;
u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
- u8 msdu_payload[0];
+ u8 msdu_payload[];
} __packed;
struct hal_rx_desc_qcn9074 {
@@ -1464,7 +1464,7 @@ struct hal_rx_desc_qcn9074 {
__le32 hdr_status_tag;
__le32 phy_ppdu_id;
u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
- u8 msdu_payload[0];
+ u8 msdu_payload[];
} __packed;
struct hal_rx_desc_wcn6855 {
@@ -1483,7 +1483,7 @@ struct hal_rx_desc_wcn6855 {
__le32 hdr_status_tag;
__le32 phy_ppdu_id;
u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
- u8 msdu_payload[0];
+ u8 msdu_payload[];
} __packed;
struct hal_rx_desc {
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index 4100cc1449a2..2b18871d5f7c 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -107,7 +107,7 @@ struct spectral_search_fft_report {
__le32 info1;
__le32 info2;
__le32 reserve0;
- u8 bins[0];
+ u8 bins[];
} __packed;
struct ath11k_spectral_search_report {
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 6b68ccf65e39..b4f86c45d81f 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -144,6 +144,8 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_11d_new_cc_ev) },
[WMI_TAG_PER_CHAIN_RSSI_STATS] = {
.min_len = sizeof(struct wmi_per_chain_rssi_stats) },
+ [WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_twt_add_dialog_event) },
};
#define PRIMAP(_hw_mode_) \
@@ -3085,11 +3087,12 @@ ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id)
/* TODO add MBSSID support */
cmd->mbss_support = 0;
- ret = ath11k_wmi_cmd_send(wmi, skb,
- WMI_TWT_ENABLE_CMDID);
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID);
if (ret) {
ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
dev_kfree_skb(skb);
+ } else {
+ ar->twt_enabled = 1;
}
return ret;
}
@@ -3114,11 +3117,181 @@ ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id)
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = pdev_id;
- ret = ath11k_wmi_cmd_send(wmi, skb,
- WMI_TWT_DISABLE_CMDID);
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DISABLE_CMDID);
if (ret) {
ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
dev_kfree_skb(skb);
+ } else {
+ ar->twt_enabled = 0;
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_add_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_add_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_add_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ADD_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+ cmd->wake_intvl_us = params->wake_intvl_us;
+ cmd->wake_intvl_mantis = params->wake_intvl_mantis;
+ cmd->wake_dura_us = params->wake_dura_us;
+ cmd->sp_offset_us = params->sp_offset_us;
+ cmd->flags = params->twt_cmd;
+ if (params->flag_bcast)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_BCAST;
+ if (params->flag_trigger)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_TRIGGER;
+ if (params->flag_flow_type)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE;
+ if (params->flag_protection)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_PROTECTION;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi add twt dialog vdev %u dialog id %u wake interval %u mantissa %u wake duration %u service period offset %u flags 0x%x\n",
+ cmd->vdev_id, cmd->dialog_id, cmd->wake_intvl_us,
+ cmd->wake_intvl_mantis, cmd->wake_dura_us, cmd->sp_offset_us,
+ cmd->flags);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ADD_DIALOG_CMDID);
+
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to add twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_del_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_del_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_del_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DEL_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi delete twt dialog vdev %u dialog id %u\n",
+ cmd->vdev_id, cmd->dialog_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DEL_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to delete twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_pause_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_pause_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_pause_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_TWT_PAUSE_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi pause twt dialog vdev %u dialog id %u\n",
+ cmd->vdev_id, cmd->dialog_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_PAUSE_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to pause twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_resume_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_resume_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_resume_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_TWT_RESUME_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+ cmd->sp_offset_us = params->sp_offset_us;
+ cmd->next_twt_size = params->next_twt_size;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi resume twt dialog vdev %u dialog id %u service period offset %u next twt subfield size %u\n",
+ cmd->vdev_id, cmd->dialog_id, cmd->sp_offset_us,
+ cmd->next_twt_size);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_RESUME_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to resume twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
}
return ret;
}
@@ -7532,6 +7705,66 @@ ath11k_wmi_diag_event(struct ath11k_base *ab,
trace_ath11k_wmi_diag(ab, skb->data, skb->len);
}
+static const char *ath11k_wmi_twt_add_dialog_event_status(u32 status)
+{
+ switch (status) {
+ case WMI_ADD_TWT_STATUS_OK:
+ return "ok";
+ case WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED:
+ return "twt disabled";
+ case WMI_ADD_TWT_STATUS_USED_DIALOG_ID:
+ return "dialog id in use";
+ case WMI_ADD_TWT_STATUS_INVALID_PARAM:
+ return "invalid parameters";
+ case WMI_ADD_TWT_STATUS_NOT_READY:
+ return "not ready";
+ case WMI_ADD_TWT_STATUS_NO_RESOURCE:
+ return "resource unavailable";
+ case WMI_ADD_TWT_STATUS_NO_ACK:
+ return "no ack";
+ case WMI_ADD_TWT_STATUS_NO_RESPONSE:
+ return "no response";
+ case WMI_ADD_TWT_STATUS_DENIED:
+ return "denied";
+ case WMI_ADD_TWT_STATUS_UNKNOWN_ERROR:
+ fallthrough;
+ default:
+ return "unknown error";
+ }
+}
+
+static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_twt_add_dialog_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab,
+ "failed to parse wmi twt add dialog status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch twt add dialog wmi event\n");
+ goto exit;
+ }
+
+ if (ev->status)
+ ath11k_warn(ab,
+ "wmi add twt dialog event vdev %d dialog id %d status %s\n",
+ ev->vdev_id, ev->dialog_id,
+ ath11k_wmi_twt_add_dialog_event_status(ev->status));
+
+exit:
+ kfree(tb);
+}
+
static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -7629,11 +7862,17 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
ath11k_wmi_obss_color_collision_event(ab, skb);
break;
+ case WMI_TWT_ADD_DIALOG_EVENTID:
+ ath11k_wmi_twt_add_dialog_event(ab, skb);
+ break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
case WMI_TWT_ENABLE_EVENTID:
case WMI_TWT_DISABLE_EVENTID:
+ case WMI_TWT_DEL_DIALOG_EVENTID:
+ case WMI_TWT_PAUSE_DIALOG_EVENTID:
+ case WMI_TWT_RESUME_DIALOG_EVENTID:
case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
case WMI_PEER_CREATE_CONF_EVENTID:
ath11k_dbg(ab, ATH11K_DBG_WMI,
@@ -7798,6 +8037,59 @@ int ath11k_wmi_simulate_radar(struct ath11k *ar)
return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
}
+int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
+ struct ath11k_fw_dbglog *dbglog)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_debug_log_config_cmd_fixed_param *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ int ret, len;
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + (MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_debug_log_config_cmd_fixed_param *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DEBUG_LOG_CONFIG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->dbg_log_param = dbglog->param;
+
+ tlv = (struct wmi_tlv *)((u8 *)cmd + sizeof(*cmd));
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+
+ switch (dbglog->param) {
+ case WMI_DEBUG_LOG_PARAM_LOG_LEVEL:
+ case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE:
+ case WMI_DEBUG_LOG_PARAM_VDEV_DISABLE:
+ case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP:
+ cmd->value = dbglog->value;
+ break;
+ case WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP:
+ case WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP:
+ cmd->value = dbglog->value;
+ memcpy(tlv->value, module_id_bitmap,
+ MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ /* clear current config to be used for next user config */
+ memset(module_id_bitmap, 0,
+ MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ break;
+ default:
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DBGLOG_CFG_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_DBGLOG_CFG_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
int ath11k_wmi_connect(struct ath11k_base *ab)
{
u32 i;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 2f26ec1a8aa3..587f42307250 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -12,6 +12,7 @@
struct ath11k_base;
struct ath11k;
struct ath11k_fw_stats;
+struct ath11k_fw_dbglog;
#define PSOC_HOST_MAX_NUM_SS (8)
@@ -4952,6 +4953,112 @@ struct wmi_twt_disable_params_cmd {
u32 pdev_id;
} __packed;
+enum WMI_HOST_TWT_COMMAND {
+ WMI_HOST_TWT_COMMAND_REQUEST_TWT = 0,
+ WMI_HOST_TWT_COMMAND_SUGGEST_TWT,
+ WMI_HOST_TWT_COMMAND_DEMAND_TWT,
+ WMI_HOST_TWT_COMMAND_TWT_GROUPING,
+ WMI_HOST_TWT_COMMAND_ACCEPT_TWT,
+ WMI_HOST_TWT_COMMAND_ALTERNATE_TWT,
+ WMI_HOST_TWT_COMMAND_DICTATE_TWT,
+ WMI_HOST_TWT_COMMAND_REJECT_TWT,
+};
+
+#define WMI_TWT_ADD_DIALOG_FLAG_BCAST BIT(8)
+#define WMI_TWT_ADD_DIALOG_FLAG_TRIGGER BIT(9)
+#define WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE BIT(10)
+#define WMI_TWT_ADD_DIALOG_FLAG_PROTECTION BIT(11)
+
+struct wmi_twt_add_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+ u32 wake_intvl_us;
+ u32 wake_intvl_mantis;
+ u32 wake_dura_us;
+ u32 sp_offset_us;
+ u32 flags;
+} __packed;
+
+struct wmi_twt_add_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+ u32 wake_intvl_us;
+ u32 wake_intvl_mantis;
+ u32 wake_dura_us;
+ u32 sp_offset_us;
+ u8 twt_cmd;
+ u8 flag_bcast;
+ u8 flag_trigger;
+ u8 flag_flow_type;
+ u8 flag_protection;
+} __packed;
+
+enum wmi_twt_add_dialog_status {
+ WMI_ADD_TWT_STATUS_OK,
+ WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED,
+ WMI_ADD_TWT_STATUS_USED_DIALOG_ID,
+ WMI_ADD_TWT_STATUS_INVALID_PARAM,
+ WMI_ADD_TWT_STATUS_NOT_READY,
+ WMI_ADD_TWT_STATUS_NO_RESOURCE,
+ WMI_ADD_TWT_STATUS_NO_ACK,
+ WMI_ADD_TWT_STATUS_NO_RESPONSE,
+ WMI_ADD_TWT_STATUS_DENIED,
+ WMI_ADD_TWT_STATUS_UNKNOWN_ERROR,
+};
+
+struct wmi_twt_add_dialog_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+ u32 status;
+} __packed;
+
+struct wmi_twt_del_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_del_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_pause_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_pause_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_resume_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+ u32 sp_offset_us;
+ u32 next_twt_size;
+} __packed;
+
+struct wmi_twt_resume_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+ u32 sp_offset_us;
+ u32 next_twt_size;
+} __packed;
+
struct wmi_obss_spatial_reuse_params_cmd {
u32 tlv_header;
u32 pdev_id;
@@ -5240,6 +5347,21 @@ struct wmi_rfkill_state_change_ev {
u32 radio_state;
} __packed;
+enum wmi_debug_log_param {
+ WMI_DEBUG_LOG_PARAM_LOG_LEVEL = 0x1,
+ WMI_DEBUG_LOG_PARAM_VDEV_ENABLE,
+ WMI_DEBUG_LOG_PARAM_VDEV_DISABLE,
+ WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP,
+ WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP,
+ WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP,
+};
+
+struct wmi_debug_log_config_cmd_fixed_param {
+ u32 tlv_header;
+ u32 dbg_log_param;
+ u32 value;
+} __packed;
+
#define WMI_MAX_MEM_REQS 32
#define MAX_RADIOS 3
@@ -5546,6 +5668,14 @@ void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
int ath11k_wmi_simulate_radar(struct ath11k *ar);
int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id);
int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id);
+int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_add_dialog_params *params);
+int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_del_dialog_params *params);
+int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_pause_dialog_params *params);
+int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_resume_dialog_params *params);
int ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
struct ieee80211_he_obss_pd *he_obss_pd);
int ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap);
@@ -5582,4 +5712,6 @@ int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar);
int ath11k_wmi_wow_enable(struct ath11k *ar);
int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
const u8 mac_addr[ETH_ALEN]);
+int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
+ struct ath11k_fw_dbglog *dbglog);
#endif
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 234ea939d316..f595204f493d 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1395,10 +1395,6 @@ struct ath5k_hw {
u32 ah_txq_imr_nofrm;
u32 ah_txq_isr_txok_all;
- u32 ah_txq_isr_txurn;
- u32 ah_txq_isr_qcborn;
- u32 ah_txq_isr_qcburn;
- u32 ah_txq_isr_qtrig;
u32 *ah_rf_banks;
size_t ah_rf_banks_size;
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index e6c52f7c26e7..d9e376eb040e 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -650,6 +650,7 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
*/
*interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
+ ah->ah_txq_isr_txok_all = 0;
/* We treat TXOK,TXDESC, TXERR and TXEOL
* the same way (schedule the tx tasklet)
@@ -670,13 +671,6 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
AR5K_SISR1_QCU_TXEOL);
- /* Currently this is not much useful since we treat
- * all queues the same way if we get a TXURN (update
- * tx trigger level) but we might need it later on*/
- if (pisr & AR5K_ISR_TXURN)
- ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
- AR5K_SISR2_QCU_TXURN);
-
/* Misc Beacon related interrupts */
/* For AR5211 */
@@ -709,25 +703,16 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
*interrupt_mask |= AR5K_INT_BNR;
/* A queue got CBR overrun */
- if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
+ if (unlikely(pisr & (AR5K_ISR_QCBRORN)))
*interrupt_mask |= AR5K_INT_QCBRORN;
- ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
- AR5K_SISR3_QCBRORN);
- }
/* A queue got CBR underrun */
- if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
+ if (unlikely(pisr & (AR5K_ISR_QCBRURN)))
*interrupt_mask |= AR5K_INT_QCBRURN;
- ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
- AR5K_SISR3_QCBRURN);
- }
/* A queue got triggered */
- if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
+ if (unlikely(pisr & (AR5K_ISR_QTRIG)))
*interrupt_mask |= AR5K_INT_QTRIG;
- ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
- AR5K_SISR4_QTRIG);
- }
data = pisr;
}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 1fbc2c19848f..d444b3d70ba2 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -746,6 +746,9 @@ ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode,
}
}
+ if (idx == AR5K_EEPROM_N_PD_CURVES)
+ goto err_out;
+
ee->ee_pd_gains[mode] = 1;
pd = &chinfo[pier].pd_curves[idx];
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index b22ed499f7ba..a56fab6232a9 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -839,7 +839,7 @@ static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
skb->protocol = eth_type_trans(skb, skb->dev);
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index aba70f35e574..65e683effdcb 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -1217,6 +1217,7 @@ static int ath6kl_usb_pm_resume(struct usb_interface *interface)
static const struct usb_device_id ath6kl_usb_ids[] = {
{USB_DEVICE(0x0cf3, 0x9375)},
{USB_DEVICE(0x0cf3, 0x9374)},
+ {USB_DEVICE(0x04da, 0x390d)},
{ /* Terminating entry */ },
};
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index bd1ef6334997..3787b9fb0075 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1750,7 +1750,6 @@ static int ath6kl_wmi_snr_threshold_event_rx(struct wmi *wmi, u8 *datap,
static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
{
- u16 ap_info_entry_size;
struct wmi_aplist_event *ev = (struct wmi_aplist_event *) datap;
struct wmi_ap_info_v1 *ap_info_v1;
u8 index;
@@ -1759,14 +1758,12 @@ static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
ev->ap_list_ver != APLIST_VER1)
return -EINVAL;
- ap_info_entry_size = sizeof(struct wmi_ap_info_v1);
ap_info_v1 = (struct wmi_ap_info_v1 *) ev->ap_list;
ath6kl_dbg(ATH6KL_DBG_WMI,
"number of APs in aplist event: %d\n", ev->num_ap);
- if (len < (int) (sizeof(struct wmi_aplist_event) +
- (ev->num_ap - 1) * ap_info_entry_size))
+ if (len < struct_size(ev, ap_list, ev->num_ap))
return -EINVAL;
/* AP list version 1 contents */
@@ -1959,21 +1956,15 @@ static int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
{
struct sk_buff *skb;
struct wmi_start_scan_cmd *sc;
- s8 size;
int i, ret;
- size = sizeof(struct wmi_start_scan_cmd);
-
if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
return -EINVAL;
if (num_chan > WMI_MAX_CHANNELS)
return -EINVAL;
- if (num_chan)
- size += sizeof(u16) * (num_chan - 1);
-
- skb = ath6kl_wmi_get_new_buf(size);
+ skb = ath6kl_wmi_get_new_buf(struct_size(sc, ch_list, num_chan));
if (!skb)
return -ENOMEM;
@@ -2008,7 +1999,7 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
struct ieee80211_supported_band *sband;
struct sk_buff *skb;
struct wmi_begin_scan_cmd *sc;
- s8 size, *supp_rates;
+ s8 *supp_rates;
int i, band, ret;
struct ath6kl *ar = wmi->parent_dev;
int num_rates;
@@ -2023,18 +2014,13 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
num_chan, ch_list);
}
- size = sizeof(struct wmi_begin_scan_cmd);
-
if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
return -EINVAL;
if (num_chan > WMI_MAX_CHANNELS)
return -EINVAL;
- if (num_chan)
- size += sizeof(u16) * (num_chan - 1);
-
- skb = ath6kl_wmi_get_new_buf(size);
+ skb = ath6kl_wmi_get_new_buf(struct_size(sc, ch_list, num_chan));
if (!skb)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 784940ba4c90..672014973cee 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -863,7 +863,7 @@ struct wmi_begin_scan_cmd {
u8 num_ch;
/* channels in Mhz */
- __le16 ch_list[1];
+ __le16 ch_list[];
} __packed;
/* wmi_start_scan_cmd is to be deprecated. Use
@@ -889,7 +889,7 @@ struct wmi_start_scan_cmd {
u8 num_ch;
/* channels in Mhz */
- __le16 ch_list[1];
+ __le16 ch_list[];
} __packed;
/*
@@ -1373,7 +1373,7 @@ struct wmi_channel_list_reply {
u8 num_ch;
/* channel in Mhz */
- __le16 ch_list[1];
+ __le16 ch_list[];
} __packed;
/* List of Events (target to host) */
@@ -1545,7 +1545,7 @@ struct wmi_connect_event {
u8 beacon_ie_len;
u8 assoc_req_len;
u8 assoc_resp_len;
- u8 assoc_info[1];
+ u8 assoc_info[];
} __packed;
/* Disconnect Event */
@@ -1596,7 +1596,7 @@ struct wmi_disconnect_event {
u8 disconn_reason;
u8 assoc_resp_len;
- u8 assoc_info[1];
+ u8 assoc_info[];
} __packed;
/*
@@ -1637,7 +1637,7 @@ struct bss_bias {
struct bss_bias_info {
u8 num_bss;
- struct bss_bias bss_bias[0];
+ struct bss_bias bss_bias[];
} __packed;
struct low_rssi_scan_params {
@@ -1720,7 +1720,7 @@ struct wmi_neighbor_info {
struct wmi_neighbor_report_event {
u8 num_neighbors;
- struct wmi_neighbor_info neighbor[0];
+ struct wmi_neighbor_info neighbor[];
} __packed;
/* TKIP MIC Error Event */
@@ -1957,7 +1957,7 @@ union wmi_ap_info {
struct wmi_aplist_event {
u8 ap_list_ver;
u8 num_ap;
- union wmi_ap_info ap_list[1];
+ union wmi_ap_info ap_list[];
} __packed;
/* Developer Commands */
@@ -2051,7 +2051,7 @@ struct wmi_get_keepalive_cmd {
struct wmi_set_appie_cmd {
u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */
u8 ie_len;
- u8 ie_info[0];
+ u8 ie_info[];
} __packed;
struct wmi_set_ie_cmd {
@@ -2059,7 +2059,7 @@ struct wmi_set_ie_cmd {
u8 ie_field; /* enum wmi_ie_field_type */
u8 ie_len;
u8 reserved;
- u8 ie_info[0];
+ u8 ie_info[];
} __packed;
/* Notify the WSC registration status to the target */
@@ -2127,7 +2127,7 @@ struct wmi_add_wow_pattern_cmd {
u8 filter_list_id;
u8 filter_size;
u8 filter_offset;
- u8 filter[0];
+ u8 filter[];
} __packed;
struct wmi_del_wow_pattern_cmd {
@@ -2360,7 +2360,7 @@ struct wmi_send_action_cmd {
__le32 freq;
__le32 wait;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct wmi_send_mgmt_cmd {
@@ -2369,7 +2369,7 @@ struct wmi_send_mgmt_cmd {
__le32 wait;
__le32 no_cck;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct wmi_tx_status_event {
@@ -2389,7 +2389,7 @@ struct wmi_set_appie_extended_cmd {
u8 role_id;
u8 mgmt_frm_type;
u8 ie_len;
- u8 ie_info[0];
+ u8 ie_info[];
} __packed;
struct wmi_remain_on_chnl_event {
@@ -2406,18 +2406,18 @@ struct wmi_cancel_remain_on_chnl_event {
struct wmi_rx_action_event {
__le32 freq;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct wmi_p2p_capabilities_event {
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct wmi_p2p_rx_probe_req_event {
__le32 freq;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
#define P2P_FLAG_CAPABILITIES_REQ (0x00000001)
@@ -2431,7 +2431,7 @@ struct wmi_get_p2p_info {
struct wmi_p2p_info_event {
__le32 info_req_flags;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct wmi_p2p_capabilities {
@@ -2450,7 +2450,7 @@ struct wmi_p2p_probe_response_cmd {
__le32 freq;
u8 destination_addr[ETH_ALEN];
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
/* Extended WMI (WMIX)
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index ef6f5ea06c1f..3ccf8cfc6b63 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -1071,8 +1071,9 @@ struct ath_softc {
#endif
#ifdef CONFIG_ATH9K_HWRNG
+ struct hwrng rng_ops;
u32 rng_last;
- struct task_struct *rng_task;
+ char rng_name[sizeof("ath9k_65535")];
#endif
};
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index e6b3cd49ea18..efb7889142d4 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -670,8 +670,6 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
int ath9k_hw_eeprom_init(struct ath_hw *ah)
{
- int status;
-
if (AR_SREV_9300_20_OR_LATER(ah))
ah->eep_ops = &eep_ar9300_ops;
else if (AR_SREV_9287(ah)) {
@@ -685,7 +683,5 @@ int ath9k_hw_eeprom_init(struct ath_hw *ah)
if (!ah->eep_ops->fill_eeprom(ah))
return -EIO;
- status = ah->eep_ops->check_eeprom(ah);
-
- return status;
+ return ah->eep_ops->check_eeprom(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 510e61e97dbc..994ec48b2f66 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -30,6 +30,7 @@ static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
hdr->endpoint_id = epid;
hdr->flags = flags;
hdr->payload_len = cpu_to_be16(len);
+ memset(hdr->control, 0, sizeof(hdr->control));
status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb);
@@ -272,6 +273,10 @@ int htc_connect_service(struct htc_target *target,
conn_msg->dl_pipeid = endpoint->dl_pipeid;
conn_msg->ul_pipeid = endpoint->ul_pipeid;
+ /* To prevent infoleak */
+ conn_msg->svc_meta_len = 0;
+ conn_msg->pad = 0;
+
ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
if (ret)
goto err;
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 39d46c203f6b..039bf0c35fbe 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -43,7 +43,7 @@ static bool ath_mci_add_profile(struct ath_common *common,
struct ath_mci_profile_info *info)
{
struct ath_mci_profile_info *entry;
- u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 };
+ static const u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 };
if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
(info->type == MCI_GPM_COEX_PROFILE_VOICE))
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index f9d3d6eedd3c..cb5414265a9b 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -21,11 +21,6 @@
#include "hw.h"
#include "ar9003_phy.h"
-#define ATH9K_RNG_BUF_SIZE 320
-#define ATH9K_RNG_ENTROPY(x) (((x) * 8 * 10) >> 5) /* quality: 10/32 */
-
-static DECLARE_WAIT_QUEUE_HEAD(rng_queue);
-
static int ath9k_rng_data_read(struct ath_softc *sc, u32 *buf, u32 buf_size)
{
int i, j;
@@ -71,61 +66,56 @@ static u32 ath9k_rng_delay_get(u32 fail_stats)
return delay;
}
-static int ath9k_rng_kthread(void *data)
+static int ath9k_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
{
- int bytes_read;
- struct ath_softc *sc = data;
- u32 *rng_buf;
- u32 delay, fail_stats = 0;
-
- rng_buf = kmalloc_array(ATH9K_RNG_BUF_SIZE, sizeof(u32), GFP_KERNEL);
- if (!rng_buf)
- goto out;
-
- while (!kthread_should_stop()) {
- bytes_read = ath9k_rng_data_read(sc, rng_buf,
- ATH9K_RNG_BUF_SIZE);
- if (unlikely(!bytes_read)) {
- delay = ath9k_rng_delay_get(++fail_stats);
- wait_event_interruptible_timeout(rng_queue,
- kthread_should_stop(),
- msecs_to_jiffies(delay));
- continue;
+ struct ath_softc *sc = container_of(rng, struct ath_softc, rng_ops);
+ u32 fail_stats = 0, word;
+ int bytes_read = 0;
+
+ for (;;) {
+ if (max & ~3UL)
+ bytes_read = ath9k_rng_data_read(sc, buf, max >> 2);
+ if ((max & 3UL) && ath9k_rng_data_read(sc, &word, 1)) {
+ memcpy(buf + bytes_read, &word, max & 3UL);
+ bytes_read += max & 3UL;
+ memzero_explicit(&word, sizeof(word));
}
+ if (!wait || !max || likely(bytes_read) || fail_stats > 110)
+ break;
- fail_stats = 0;
-
- /* sleep until entropy bits under write_wakeup_threshold */
- add_hwgenerator_randomness((void *)rng_buf, bytes_read,
- ATH9K_RNG_ENTROPY(bytes_read));
+ msleep_interruptible(ath9k_rng_delay_get(++fail_stats));
}
- kfree(rng_buf);
-out:
- sc->rng_task = NULL;
-
- return 0;
+ if (wait && !bytes_read && max)
+ bytes_read = -EIO;
+ return bytes_read;
}
void ath9k_rng_start(struct ath_softc *sc)
{
+ static atomic_t serial = ATOMIC_INIT(0);
struct ath_hw *ah = sc->sc_ah;
- if (sc->rng_task)
+ if (sc->rng_ops.read)
return;
if (!AR_SREV_9300_20_OR_LATER(ah))
return;
- sc->rng_task = kthread_run(ath9k_rng_kthread, sc, "ath9k-hwrng");
- if (IS_ERR(sc->rng_task))
- sc->rng_task = NULL;
+ snprintf(sc->rng_name, sizeof(sc->rng_name), "ath9k_%u",
+ (atomic_inc_return(&serial) - 1) & U16_MAX);
+ sc->rng_ops.name = sc->rng_name;
+ sc->rng_ops.read = ath9k_rng_read;
+ sc->rng_ops.quality = 320;
+
+ if (devm_hwrng_register(sc->dev, &sc->rng_ops))
+ sc->rng_ops.read = NULL;
}
void ath9k_rng_stop(struct ath_softc *sc)
{
- if (sc->rng_task) {
- kthread_stop(sc->rng_task);
- sc->rng_task = NULL;
+ if (sc->rng_ops.read) {
+ devm_hwrng_unregister(sc->dev, &sc->rng_ops);
+ sc->rng_ops.read = NULL;
}
}
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 84a8ce0784b1..ba29b4aebe9f 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -458,7 +458,6 @@ struct ar9170 {
# define CARL9170_HWRNG_CACHE_SIZE CARL9170_MAX_CMD_PAYLOAD_LEN
struct {
struct hwrng rng;
- bool initialized;
char name[30 + 1];
u16 cache[CARL9170_HWRNG_CACHE_SIZE / sizeof(u16)];
unsigned int cache_idx;
diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h
index 503b21abbba5..10acb6ad30d0 100644
--- a/drivers/net/wireless/ath/carl9170/fwdesc.h
+++ b/drivers/net/wireless/ath/carl9170/fwdesc.h
@@ -149,7 +149,7 @@ struct carl9170fw_fix_entry {
struct carl9170fw_fix_desc {
struct carl9170fw_desc_head head;
- struct carl9170fw_fix_entry data[0];
+ struct carl9170fw_fix_entry data[];
} __packed;
#define CARL9170FW_FIX_DESC_SIZE \
(sizeof(struct carl9170fw_fix_desc))
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 49f7ee1c912b..76e84adf57c1 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1412,7 +1412,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
return -EOPNOTSUPP;
tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!tid_info)
return -ENOMEM;
@@ -1494,7 +1494,7 @@ static int carl9170_register_wps_button(struct ar9170 *ar)
if (!(ar->features & CARL9170_WPS_BUTTON))
return 0;
- input = input_allocate_device();
+ input = devm_input_allocate_device(&ar->udev->dev);
if (!input)
return -ENOMEM;
@@ -1512,10 +1512,8 @@ static int carl9170_register_wps_button(struct ar9170 *ar)
input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
err = input_register_device(input);
- if (err) {
- input_free_device(input);
+ if (err)
return err;
- }
ar->wps.pbc = input;
return 0;
@@ -1539,7 +1537,7 @@ static int carl9170_rng_get(struct ar9170 *ar)
BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
- if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
+ if (!IS_ACCEPTING_CMD(ar))
return -EAGAIN;
count = ARRAY_SIZE(ar->rng.cache);
@@ -1585,14 +1583,6 @@ static int carl9170_rng_read(struct hwrng *rng, u32 *data)
return sizeof(u16);
}
-static void carl9170_unregister_hwrng(struct ar9170 *ar)
-{
- if (ar->rng.initialized) {
- hwrng_unregister(&ar->rng.rng);
- ar->rng.initialized = false;
- }
-}
-
static int carl9170_register_hwrng(struct ar9170 *ar)
{
int err;
@@ -1603,25 +1593,14 @@ static int carl9170_register_hwrng(struct ar9170 *ar)
ar->rng.rng.data_read = carl9170_rng_read;
ar->rng.rng.priv = (unsigned long)ar;
- if (WARN_ON(ar->rng.initialized))
- return -EALREADY;
-
- err = hwrng_register(&ar->rng.rng);
+ err = devm_hwrng_register(&ar->udev->dev, &ar->rng.rng);
if (err) {
dev_err(&ar->udev->dev, "Failed to register the random "
"number generator (%d)\n", err);
return err;
}
- ar->rng.initialized = true;
-
- err = carl9170_rng_get(ar);
- if (err) {
- carl9170_unregister_hwrng(ar);
- return err;
- }
-
- return 0;
+ return carl9170_rng_get(ar);
}
#endif /* CONFIG_CARL9170_HWRNG */
@@ -1914,7 +1893,7 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
WARN_ON(!(tx_streams >= 1 && tx_streams <=
IEEE80211_HT_MCS_TX_MAX_STREAMS));
- tx_params = (tx_streams - 1) <<
+ tx_params |= (tx_streams - 1) <<
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
@@ -1937,7 +1916,8 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
if (!bands)
return -EINVAL;
- ar->survey = kcalloc(chans, sizeof(struct survey_info), GFP_KERNEL);
+ ar->survey = devm_kcalloc(&ar->udev->dev, chans,
+ sizeof(struct survey_info), GFP_KERNEL);
if (!ar->survey)
return -ENOMEM;
ar->num_channels = chans;
@@ -1964,11 +1944,7 @@ int carl9170_register(struct ar9170 *ar)
struct ath_regulatory *regulatory = &ar->common.regulatory;
int err = 0, i;
- if (WARN_ON(ar->mem_bitmap))
- return -EINVAL;
-
- ar->mem_bitmap = bitmap_zalloc(ar->fw.mem_blocks, GFP_KERNEL);
-
+ ar->mem_bitmap = devm_bitmap_zalloc(&ar->udev->dev, ar->fw.mem_blocks, GFP_KERNEL);
if (!ar->mem_bitmap)
return -ENOMEM;
@@ -2057,17 +2033,6 @@ void carl9170_unregister(struct ar9170 *ar)
carl9170_debugfs_unregister(ar);
#endif /* CONFIG_CARL9170_DEBUGFS */
-#ifdef CONFIG_CARL9170_WPC
- if (ar->wps.pbc) {
- input_unregister_device(ar->wps.pbc);
- ar->wps.pbc = NULL;
- }
-#endif /* CONFIG_CARL9170_WPC */
-
-#ifdef CONFIG_CARL9170_HWRNG
- carl9170_unregister_hwrng(ar);
-#endif /* CONFIG_CARL9170_HWRNG */
-
carl9170_cancel_worker(ar);
cancel_work_sync(&ar->restart_work);
@@ -2082,12 +2047,6 @@ void carl9170_free(struct ar9170 *ar)
kfree_skb(ar->rx_failover);
ar->rx_failover = NULL;
- bitmap_free(ar->mem_bitmap);
- ar->mem_bitmap = NULL;
-
- kfree(ar->survey);
- ar->survey = NULL;
-
mutex_destroy(&ar->mutex);
ieee80211_free_hw(ar->hw);
diff --git a/drivers/net/wireless/ath/carl9170/wlan.h b/drivers/net/wireless/ath/carl9170/wlan.h
index bb73553fd7c2..0a4e42e806b9 100644
--- a/drivers/net/wireless/ath/carl9170/wlan.h
+++ b/drivers/net/wireless/ath/carl9170/wlan.h
@@ -327,7 +327,7 @@ struct _carl9170_tx_superdesc {
struct _carl9170_tx_superframe {
struct _carl9170_tx_superdesc s;
struct _ar9170_tx_hwdesc f;
- u8 frame_data[0];
+ u8 frame_data[];
} __packed __aligned(4);
#define CARL9170_TX_SUPERDESC_LEN 24
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 75cb53a3ec15..27f4d74a41c8 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -197,7 +197,7 @@ static void channel_detector_exit(struct dfs_pattern_detector *dpd,
static struct channel_detector *
channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
{
- u32 sz, i;
+ u32 i;
struct channel_detector *cd;
cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
@@ -206,8 +206,8 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
INIT_LIST_HEAD(&cd->head);
cd->freq = freq;
- sz = sizeof(cd->detectors) * dpd->num_radar_types;
- cd->detectors = kzalloc(sz, GFP_ATOMIC);
+ cd->detectors = kmalloc_array(dpd->num_radar_types,
+ sizeof(*cd->detectors), GFP_ATOMIC);
if (cd->detectors == NULL)
goto fail;
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index b2400e2417a5..f15e7bd690b5 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -667,14 +667,14 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
/*
* Some users have reported their EEPROM programmed with
- * 0x8000 or 0x0 set, this is not a supported regulatory
- * domain but since we have more than one user with it we
- * need a solution for them. We default to 0x64, which is
- * the default Atheros world regulatory domain.
+ * 0x8000 set, this is not a supported regulatory domain
+ * but since we have more than one user with it we need
+ * a solution for them. We default to 0x64, which is the
+ * default Atheros world regulatory domain.
*/
static void ath_regd_sanitize(struct ath_regulatory *reg)
{
- if (reg->current_rd != COUNTRY_ERD_FLAG && reg->current_rd != 0)
+ if (reg->current_rd != COUNTRY_ERD_FLAG)
return;
printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n");
reg->current_rd = 0x64;
diff --git a/drivers/net/wireless/ath/spectral_common.h b/drivers/net/wireless/ath/spectral_common.h
index e14f374f97d4..fe187c1fbeb0 100644
--- a/drivers/net/wireless/ath/spectral_common.h
+++ b/drivers/net/wireless/ath/spectral_common.h
@@ -108,7 +108,7 @@ struct fft_sample_ath10k {
u8 avgpwr_db;
u8 max_exp;
- u8 data[0];
+ u8 data[];
} __packed;
struct fft_sample_ath11k {
@@ -123,7 +123,7 @@ struct fft_sample_ath11k {
__be32 tsf;
__be32 noise;
- u8 data[0];
+ u8 data[];
} __packed;
#endif /* SPECTRAL_COMMON_H */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 9575d7373bf2..95ea7d040d8c 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -331,6 +331,7 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
INIT_LIST_HEAD(&wcn->vif_list);
spin_lock_init(&wcn->dxe_lock);
+ spin_lock_init(&wcn->survey_lock);
return 0;
@@ -392,11 +393,41 @@ static void wcn36xx_change_opchannel(struct wcn36xx *wcn, int ch)
{
struct ieee80211_vif *vif = NULL;
struct wcn36xx_vif *tmp;
+ struct ieee80211_supported_band *band;
+ struct ieee80211_channel *channel = NULL;
+ unsigned long flags;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(wcn->hw->wiphy->bands); i++) {
+ band = wcn->hw->wiphy->bands[i];
+ if (!band)
+ break;
+ for (j = 0; j < band->n_channels; j++) {
+ if (HW_VALUE_CHANNEL(band->channels[j].hw_value) == ch) {
+ channel = &band->channels[j];
+ break;
+ }
+ }
+ if (channel)
+ break;
+ }
+
+ if (!channel) {
+ wcn36xx_err("Cannot tune to channel %d\n", ch);
+ return;
+ }
+
+ spin_lock_irqsave(&wcn->survey_lock, flags);
+ wcn->band = band;
+ wcn->channel = channel;
+ spin_unlock_irqrestore(&wcn->survey_lock, flags);
list_for_each_entry(tmp, &wcn->vif_list, list) {
vif = wcn36xx_priv_to_vif(tmp);
wcn36xx_smd_switch_channel(wcn, vif, ch);
}
+
+ return;
}
static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
@@ -1326,6 +1357,49 @@ static void wcn36xx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
}
+static int wcn36xx_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct ieee80211_supported_band *sband;
+ struct wcn36xx_chan_survey *chan_survey;
+ int band_idx;
+ unsigned long flags;
+
+ sband = wcn->hw->wiphy->bands[NL80211_BAND_2GHZ];
+ band_idx = idx;
+ if (band_idx >= sband->n_channels) {
+ band_idx -= sband->n_channels;
+ sband = wcn->hw->wiphy->bands[NL80211_BAND_5GHZ];
+ }
+
+ if (!sband || band_idx >= sband->n_channels)
+ return -ENOENT;
+
+ spin_lock_irqsave(&wcn->survey_lock, flags);
+
+ chan_survey = &wcn->chan_survey[idx];
+ survey->channel = &sband->channels[band_idx];
+ survey->noise = chan_survey->rssi - chan_survey->snr;
+ survey->filled = 0;
+
+ if (chan_survey->rssi > -100 && chan_survey->rssi < 0)
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+
+ if (survey->channel == wcn->channel)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+ spin_unlock_irqrestore(&wcn->survey_lock, flags);
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "ch %d rssi %d snr %d noise %d filled %x freq %d\n",
+ HW_VALUE_CHANNEL(survey->channel->hw_value),
+ chan_survey->rssi, chan_survey->snr, survey->noise,
+ survey->filled, survey->channel->center_freq);
+
+ return 0;
+}
+
static const struct ieee80211_ops wcn36xx_ops = {
.start = wcn36xx_start,
.stop = wcn36xx_stop,
@@ -1354,6 +1428,7 @@ static const struct ieee80211_ops wcn36xx_ops = {
.ipv6_addr_change = wcn36xx_ipv6_addr_change,
#endif
.flush = wcn36xx_flush,
+ .get_survey = wcn36xx_get_survey,
CFG80211_TESTMODE_CMD(wcn36xx_tm_cmd)
};
@@ -1446,25 +1521,20 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
{
struct device_node *mmio_node;
struct device_node *iris_node;
- struct resource *res;
int index;
int ret;
/* Set TX IRQ */
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "tx");
- if (!res) {
- wcn36xx_err("failed to get tx_irq\n");
- return -ENOENT;
- }
- wcn->tx_irq = res->start;
+ ret = platform_get_irq_byname(pdev, "tx");
+ if (ret < 0)
+ return ret;
+ wcn->tx_irq = ret;
/* Set RX IRQ */
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "rx");
- if (!res) {
- wcn36xx_err("failed to get rx_irq\n");
- return -ENOENT;
- }
- wcn->rx_irq = res->start;
+ ret = platform_get_irq_byname(pdev, "rx");
+ if (ret < 0)
+ return ret;
+ wcn->rx_irq = ret;
/* Acquire SMSM tx enable handle */
wcn->tx_enable_state = qcom_smem_state_get(&pdev->dev,
@@ -1513,6 +1583,9 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
if (iris_node) {
if (of_device_is_compatible(iris_node, "qcom,wcn3620"))
wcn->rf_id = RF_IRIS_WCN3620;
+ if (of_device_is_compatible(iris_node, "qcom,wcn3660") ||
+ of_device_is_compatible(iris_node, "qcom,wcn3660b"))
+ wcn->rf_id = RF_IRIS_WCN3660;
if (of_device_is_compatible(iris_node, "qcom,wcn3680"))
wcn->rf_id = RF_IRIS_WCN3680;
of_node_put(iris_node);
@@ -1535,6 +1608,7 @@ static int wcn36xx_probe(struct platform_device *pdev)
void *wcnss;
int ret;
const u8 *addr;
+ int n_channels;
wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n");
@@ -1562,6 +1636,13 @@ static int wcn36xx_probe(struct platform_device *pdev)
goto out_wq;
}
+ n_channels = wcn_band_2ghz.n_channels + wcn_band_5ghz.n_channels;
+ wcn->chan_survey = devm_kmalloc(wcn->dev, n_channels, GFP_KERNEL);
+ if (!wcn->chan_survey) {
+ ret = -ENOMEM;
+ goto out_wq;
+ }
+
ret = dma_set_mask_and_coherent(wcn->dev, DMA_BIT_MASK(32));
if (ret < 0) {
wcn36xx_err("failed to set DMA mask: %d\n", ret);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index caeb68901326..59ad332156ae 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -3347,7 +3347,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
case WCN36XX_HAL_PRINT_REG_INFO_IND:
case WCN36XX_HAL_SCAN_OFFLOAD_IND:
- msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_ATOMIC);
+ msg_ind = kmalloc(struct_size(msg_ind, msg, len), GFP_ATOMIC);
if (!msg_ind) {
wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
msg_header->msg_type);
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index dd58dde8c836..df749b114568 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -23,6 +23,11 @@ static inline int get_rssi0(struct wcn36xx_rx_bd *bd)
return 100 - ((bd->phy_stat0 >> 24) & 0xff);
}
+static inline int get_snr(struct wcn36xx_rx_bd *bd)
+{
+ return ((bd->phy_stat1 >> 24) & 0xff);
+}
+
struct wcn36xx_rate {
u16 bitrate;
u16 mcs_or_legacy_index;
@@ -266,6 +271,34 @@ static void __skb_queue_purge_irq(struct sk_buff_head *list)
dev_kfree_skb_irq(skb);
}
+static void wcn36xx_update_survey(struct wcn36xx *wcn, int rssi, int snr,
+ int band, int freq)
+{
+ static struct ieee80211_channel *channel;
+ struct ieee80211_supported_band *sband;
+ int idx;
+ int i;
+
+ idx = 0;
+ if (band == NL80211_BAND_5GHZ)
+ idx = wcn->hw->wiphy->bands[NL80211_BAND_2GHZ]->n_channels;
+
+ sband = wcn->hw->wiphy->bands[band];
+ channel = sband->channels;
+
+ for (i = 0; i < sband->n_channels; i++, channel++) {
+ if (channel->center_freq == freq) {
+ idx += i;
+ break;
+ }
+ }
+
+ spin_lock(&wcn->survey_lock);
+ wcn->chan_survey[idx].rssi = rssi;
+ wcn->chan_survey[idx].snr = snr;
+ spin_unlock(&wcn->survey_lock);
+}
+
int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
{
struct ieee80211_rx_status status;
@@ -343,6 +376,9 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
status.freq = WCN36XX_CENTER_FREQ(wcn);
}
+ wcn36xx_update_survey(wcn, status.signal, get_snr(bd),
+ status.band, status.freq);
+
if (bd->rate_id < ARRAY_SIZE(wcn36xx_rate_table)) {
rate = &wcn36xx_rate_table[bd->rate_id];
status.encoding = rate->encoding;
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index fbd0558c2c19..9aa08b636d08 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -97,6 +97,7 @@ enum wcn36xx_ampdu_state {
#define RF_UNKNOWN 0x0000
#define RF_IRIS_WCN3620 0x3620
+#define RF_IRIS_WCN3660 0x3660
#define RF_IRIS_WCN3680 0x3680
static inline void buff_to_be(u32 *buf, size_t len)
@@ -194,7 +195,14 @@ struct wcn36xx_sta {
enum wcn36xx_ampdu_state ampdu_state[16];
int non_agg_frame_ct;
};
+
struct wcn36xx_dxe_ch;
+
+struct wcn36xx_chan_survey {
+ s8 rssi;
+ u8 snr;
+};
+
struct wcn36xx {
struct ieee80211_hw *hw;
struct device *dev;
@@ -281,6 +289,12 @@ struct wcn36xx {
/* Debug file system entry */
struct wcn36xx_dfs_entry dfs;
#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+ struct ieee80211_supported_band *band;
+ struct ieee80211_channel *channel;
+
+ spinlock_t survey_lock; /* protects chan_survey */
+ struct wcn36xx_chan_survey *chan_survey;
};
static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index cc830c795b33..5704defd7be1 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -958,7 +958,7 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
if (gro)
napi_gro_receive(&wil->napi_rx, skb);
else
- netif_rx_ni(skb);
+ netif_rx(skb);
}
ndev->stats.rx_packets++;
stats->rx_packets++;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index dd8abbb28849..98b4c189eecc 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1199,7 +1199,7 @@ static void wmi_evt_eapol_rx(struct wil6210_vif *vif, int id, void *d, int len)
eth->h_proto = cpu_to_be16(ETH_P_PAE);
skb_put_data(skb, evt->eapol, eapol_len);
skb->protocol = eth_type_trans(skb, ndev);
- if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
+ if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += sz;
if (stats) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 3984fd7d918e..2c95a08a5871 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -397,9 +397,9 @@ brcmf_proto_bcdc_add_tdls_peer(struct brcmf_pub *drvr, int ifidx,
}
static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp,
- struct sk_buff *skb, bool inirq)
+ struct sk_buff *skb)
{
- brcmf_fws_rxreorder(ifp, skb, inirq);
+ brcmf_fws_rxreorder(ifp, skb);
}
static void
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index ba52318615ae..f0ad1e23f3c8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -16,6 +16,7 @@
#include <brcmu_utils.h>
#include <defs.h>
#include <brcmu_wifi.h>
+#include <brcm_hw_ids.h>
#include "core.h"
#include "debug.h"
#include "tracepoint.h"
@@ -4622,7 +4623,7 @@ exit:
s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif)
{
- s32 pktflags[] = {
+ static const s32 pktflags[] = {
BRCMF_VNDR_IE_PRBREQ_FLAG,
BRCMF_VNDR_IE_PRBRSP_FLAG,
BRCMF_VNDR_IE_BEACON_FLAG
@@ -7476,6 +7477,16 @@ int brcmf_cfg80211_wait_vif_event(struct brcmf_cfg80211_info *cfg,
vif_event_equals(event, action), timeout);
}
+static bool brmcf_use_iso3166_ccode_fallback(struct brcmf_pub *drvr)
+{
+ switch (drvr->bus_if->chip) {
+ case BRCM_CC_4345_CHIP_ID:
+ return true;
+ default:
+ return false;
+ }
+}
+
static s32 brcmf_translate_country_code(struct brcmf_pub *drvr, char alpha2[2],
struct brcmf_fil_country_le *ccreq)
{
@@ -7484,18 +7495,28 @@ static s32 brcmf_translate_country_code(struct brcmf_pub *drvr, char alpha2[2],
s32 found_index;
int i;
- country_codes = drvr->settings->country_codes;
- if (!country_codes) {
- brcmf_dbg(TRACE, "No country codes configured for device\n");
- return -EINVAL;
- }
-
if ((alpha2[0] == ccreq->country_abbrev[0]) &&
(alpha2[1] == ccreq->country_abbrev[1])) {
brcmf_dbg(TRACE, "Country code already set\n");
return -EAGAIN;
}
+ country_codes = drvr->settings->country_codes;
+ if (!country_codes) {
+ if (brmcf_use_iso3166_ccode_fallback(drvr)) {
+ brcmf_dbg(TRACE, "No country codes configured for device, using ISO3166 code and 0 rev\n");
+ memset(ccreq, 0, sizeof(*ccreq));
+ ccreq->country_abbrev[0] = alpha2[0];
+ ccreq->country_abbrev[1] = alpha2[1];
+ ccreq->ccode[0] = alpha2[0];
+ ccreq->ccode[1] = alpha2[1];
+ return 0;
+ }
+
+ brcmf_dbg(TRACE, "No country codes configured for device\n");
+ return -EINVAL;
+ }
+
found_index = -1;
for (i = 0; i < country_codes->table_size; i++) {
cc = &country_codes->table[i];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 1ee49f9e325d..4ec7773b6906 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -704,6 +704,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
{
switch (ci->pub.chip) {
case BRCM_CC_4345_CHIP_ID:
+ case BRCM_CC_43454_CHIP_ID:
return 0x198000;
case BRCM_CC_4335_CHIP_ID:
case BRCM_CC_4339_CHIP_ID:
@@ -1401,6 +1402,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
case BRCM_CC_4354_CHIP_ID:
case BRCM_CC_4356_CHIP_ID:
case BRCM_CC_4345_CHIP_ID:
+ case BRCM_CC_43454_CHIP_ID:
/* explicitly check SR engine enable bit */
pmu_cc3_mask = BIT(2);
fallthrough;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index fed9cd5f29a2..26fab4bee22c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -400,7 +400,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
}
-void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
+void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
{
/* Most of Broadcom's firmwares send 802.11f ADD frame every time a new
* STA connects to the AP interface. This is an obsoleted standard most
@@ -423,15 +423,7 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
ifp->ndev->stats.rx_packets++;
brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
- if (inirq) {
- netif_rx(skb);
- } else {
- /* If the receive is not processed inside an ISR,
- * the softirqd must be woken explicitly to service
- * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
- */
- netif_rx_ni(skb);
- }
+ netif_rx(skb);
}
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
@@ -480,7 +472,7 @@ void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
- brcmf_netif_rx(ifp, skb, false);
+ brcmf_netif_rx(ifp, skb);
}
static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
@@ -515,7 +507,7 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event,
return;
if (brcmf_proto_is_reorder_skb(skb)) {
- brcmf_proto_rxreorder(ifp, skb, inirq);
+ brcmf_proto_rxreorder(ifp, skb);
} else {
/* Process special event packets */
if (handle_event) {
@@ -524,7 +516,7 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event,
brcmf_fweh_process_skb(ifp->drvr, skb,
BCMILCP_SUBTYPE_VENDOR_LONG, gfp);
}
- brcmf_netif_rx(ifp, skb, inirq);
+ brcmf_netif_rx(ifp, skb);
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 8212c9de14f1..340346c122d3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -208,7 +208,7 @@ void brcmf_remove_interface(struct brcmf_if *ifp, bool locked);
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
-void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
+void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_net_detach(struct net_device *ndev, bool locked);
int brcmf_net_mon_attach(struct brcmf_if *ifp);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 7c68d9849324..d2ac844e1e9f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -248,7 +248,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_firmware_capabilities(ifp);
memset(&gscan_cfg, 0, sizeof(gscan_cfg));
if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID &&
- drvr->bus_if->chip != BRCM_CC_4345_CHIP_ID)
+ drvr->bus_if->chip != BRCM_CC_4345_CHIP_ID &&
+ drvr->bus_if->chip != BRCM_CC_43454_CHIP_ID)
brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN,
"pfn_gscan_cfg",
&gscan_cfg, sizeof(gscan_cfg));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index d99140960a82..dcbe55b56e43 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -207,6 +207,8 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
size = BRCMF_FW_MAX_NVRAM_SIZE;
else
size = data_len;
+ /* Add space for properties we may add */
+ size += strlen(BRCMF_FW_DEFAULT_BOARDREV) + 1;
/* Alloc for extra 0 byte + roundup by 4 + length field */
size += 1 + 3 + sizeof(u32);
nvp->nvram = kzalloc(size, GFP_KERNEL);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
index d5578ca681bb..72fe8bce6eaf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
@@ -192,7 +192,7 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
}
static u32
-brcmf_create_iovar(char *name, const char *data, u32 datalen,
+brcmf_create_iovar(const char *name, const char *data, u32 datalen,
char *buf, u32 buflen)
{
u32 len;
@@ -213,7 +213,7 @@ brcmf_create_iovar(char *name, const char *data, u32 datalen,
s32
-brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
+brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *data,
u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -241,7 +241,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
}
s32
-brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -272,7 +272,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
}
s32
-brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data)
+brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data)
{
__le32 data_le = cpu_to_le32(data);
@@ -280,7 +280,7 @@ brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data)
}
s32
-brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data)
+brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
{
__le32 data_le = cpu_to_le32(*data);
s32 err;
@@ -292,7 +292,7 @@ brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data)
}
static u32
-brcmf_create_bsscfg(s32 bsscfgidx, char *name, char *data, u32 datalen,
+brcmf_create_bsscfg(s32 bsscfgidx, const char *name, char *data, u32 datalen,
char *buf, u32 buflen)
{
const s8 *prefix = "bsscfg:";
@@ -337,7 +337,7 @@ brcmf_create_bsscfg(s32 bsscfgidx, char *name, char *data, u32 datalen,
}
s32
-brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
+brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -366,7 +366,7 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
}
s32
-brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
+brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -396,7 +396,7 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
}
s32
-brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data)
+brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data)
{
__le32 data_le = cpu_to_le32(data);
@@ -405,7 +405,7 @@ brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data)
}
s32
-brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data)
+brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
{
__le32 data_le = cpu_to_le32(*data);
s32 err;
@@ -417,7 +417,7 @@ brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data)
return err;
}
-static u32 brcmf_create_xtlv(char *name, u16 id, char *data, u32 len,
+static u32 brcmf_create_xtlv(const char *name, u16 id, char *data, u32 len,
char *buf, u32 buflen)
{
u32 iolen;
@@ -438,7 +438,7 @@ static u32 brcmf_create_xtlv(char *name, u16 id, char *data, u32 len,
return iolen;
}
-s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, char *name, u16 id,
+s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -466,7 +466,7 @@ s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, char *name, u16 id,
return err;
}
-s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, char *name, u16 id,
+s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -495,7 +495,7 @@ s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, char *name, u16 id,
return err;
}
-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, char *name, u16 id, u32 data)
+s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data)
{
__le32 data_le = cpu_to_le32(data);
@@ -503,7 +503,7 @@ s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, char *name, u16 id, u32 data)
sizeof(data_le));
}
-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, char *name, u16 id, u32 *data)
+s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data)
{
__le32 data_le = cpu_to_le32(*data);
s32 err;
@@ -514,12 +514,12 @@ s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, char *name, u16 id, u32 *data)
return err;
}
-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, char *name, u16 id, u8 *data)
+s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data)
{
return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data));
}
-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, char *name, u16 id, u16 *data)
+s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data)
{
__le16 data_le = cpu_to_le16(*data);
s32 err;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index cb26f8c59c21..bc693157c4b1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -84,26 +84,26 @@ s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
+s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *data,
u32 len);
-s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
u32 len);
-s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data);
-s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data);
+s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data);
+s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
-s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name, void *data,
+s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name, void *data,
u32 len);
-s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name, void *data,
+s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name, void *data,
u32 len);
-s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data);
-s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data);
-s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, char *name, u16 id,
+s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data);
+s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
+s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
-s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, char *name, u16 id,
+s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, char *name, u16 id, u32 data);
-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, char *name, u16 id, u32 *data);
-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, char *name, u16 id, u8 *data);
-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, char *name, u16 id, u16 *data);
+s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data);
+s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data);
+s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data);
+s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data);
#endif /* _fwil_h_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index e69d1e56996f..c87b829adb0d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -1068,7 +1068,7 @@ struct brcmf_mkeep_alive_pkt_le {
__le32 period_msec;
__le16 len_bytes;
u8 keep_alive_id;
- u8 data[0];
+ u8 data[];
} __packed;
#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 19b0f318f93e..d58525ebe618 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -1664,7 +1664,7 @@ static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
rfi->pend_pkts -= skb_queue_len(skb_list);
}
-void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
{
struct brcmf_pub *drvr = ifp->drvr;
u8 *reorder_data;
@@ -1682,7 +1682,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
/* validate flags and flow id */
if (flags == 0xFF) {
bphy_err(drvr, "invalid flags...so ignore this packet\n");
- brcmf_netif_rx(ifp, pkt, inirq);
+ brcmf_netif_rx(ifp, pkt);
return;
}
@@ -1694,7 +1694,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
if (rfi == NULL) {
brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
flow_id);
- brcmf_netif_rx(ifp, pkt, inirq);
+ brcmf_netif_rx(ifp, pkt);
return;
}
@@ -1719,7 +1719,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
rfi = kzalloc(buf_size, GFP_ATOMIC);
if (rfi == NULL) {
bphy_err(drvr, "failed to alloc buffer\n");
- brcmf_netif_rx(ifp, pkt, inirq);
+ brcmf_netif_rx(ifp, pkt);
return;
}
@@ -1833,7 +1833,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
netif_rx:
skb_queue_walk_safe(&reorder_list, pkt, pnext) {
__skb_unlink(pkt, &reorder_list);
- brcmf_netif_rx(ifp, pkt, inirq);
+ brcmf_netif_rx(ifp, pkt);
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index 50e424b5880d..b16a9d1c0508 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -42,6 +42,6 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp);
void brcmf_fws_del_interface(struct brcmf_if *ifp);
void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
-void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb);
#endif /* FWSIGNAL_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 7c8e08ee8f0f..b2d0f7570aa9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -536,8 +536,7 @@ static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
return -ENODEV;
}
-static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb,
- bool inirq)
+static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
{
}
@@ -1191,7 +1190,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
}
skb->protocol = eth_type_trans(skb, ifp->ndev);
- brcmf_netif_rx(ifp, skb, false);
+ brcmf_netif_rx(ifp, skb);
}
static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index 513c7e6421b2..8623bde5eb70 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -71,16 +71,18 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
/* Set board-type to the first string of the machine compatible prop */
root = of_find_node_by_path("/");
if (root) {
- int i, len;
+ int i;
char *board_type;
const char *tmp;
of_property_read_string_index(root, "compatible", 0, &tmp);
/* get rid of '/' in the compatible string to be able to find the FW */
- len = strlen(tmp) + 1;
- board_type = devm_kzalloc(dev, len, GFP_KERNEL);
- strscpy(board_type, tmp, len);
+ board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
+ if (!board_type) {
+ of_node_put(root);
+ return;
+ }
for (i = 0; i < board_type[i]; i++) {
if (board_type[i] == '/')
board_type[i] = '-';
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 4735063e4c03..479041f070f9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -90,8 +90,8 @@
#define P2PSD_ACTION_CATEGORY 0x04 /* Public action frame */
#define P2PSD_ACTION_ID_GAS_IREQ 0x0a /* GAS Initial Request AF */
#define P2PSD_ACTION_ID_GAS_IRESP 0x0b /* GAS Initial Response AF */
-#define P2PSD_ACTION_ID_GAS_CREQ 0x0c /* GAS Comback Request AF */
-#define P2PSD_ACTION_ID_GAS_CRESP 0x0d /* GAS Comback Response AF */
+#define P2PSD_ACTION_ID_GAS_CREQ 0x0c /* GAS Comeback Request AF */
+#define P2PSD_ACTION_ID_GAS_CRESP 0x0d /* GAS Comeback Response AF */
#define BRCMF_P2P_DISABLE_TIMEOUT msecs_to_jiffies(500)
@@ -158,7 +158,7 @@ struct brcmf_p2p_pub_act_frame {
u8 oui_type;
u8 subtype;
u8 dialog_token;
- u8 elts[1];
+ u8 elts[];
};
/**
@@ -177,7 +177,7 @@ struct brcmf_p2p_action_frame {
u8 type;
u8 subtype;
u8 dialog_token;
- u8 elts[1];
+ u8 elts[];
};
/**
@@ -192,7 +192,7 @@ struct brcmf_p2psd_gas_pub_act_frame {
u8 category;
u8 action;
u8 dialog_token;
- u8 query_data[1];
+ u8 query_data[];
};
/**
@@ -225,7 +225,7 @@ static bool brcmf_p2p_is_pub_action(void *frame, u32 frame_len)
return false;
pact_frm = (struct brcmf_p2p_pub_act_frame *)frame;
- if (frame_len < sizeof(struct brcmf_p2p_pub_act_frame) - 1)
+ if (frame_len < sizeof(*pact_frm))
return false;
if (pact_frm->category == P2P_PUB_AF_CATEGORY &&
@@ -253,7 +253,7 @@ static bool brcmf_p2p_is_p2p_action(void *frame, u32 frame_len)
return false;
act_frm = (struct brcmf_p2p_action_frame *)frame;
- if (frame_len < sizeof(struct brcmf_p2p_action_frame) - 1)
+ if (frame_len < sizeof(*act_frm))
return false;
if (act_frm->category == P2P_AF_CATEGORY &&
@@ -280,7 +280,7 @@ static bool brcmf_p2p_is_gas_action(void *frame, u32 frame_len)
return false;
sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
- if (frame_len < sizeof(struct brcmf_p2psd_gas_pub_act_frame) - 1)
+ if (frame_len < sizeof(*sd_act_frm))
return false;
if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
@@ -396,11 +396,11 @@ static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len)
(tx) ? "TX" : "RX");
break;
case P2PSD_ACTION_ID_GAS_CREQ:
- brcmf_dbg(TRACE, "%s P2P GAS Comback Request\n",
+ brcmf_dbg(TRACE, "%s P2P GAS Comeback Request\n",
(tx) ? "TX" : "RX");
break;
case P2PSD_ACTION_ID_GAS_CRESP:
- brcmf_dbg(TRACE, "%s P2P GAS Comback Response\n",
+ brcmf_dbg(TRACE, "%s P2P GAS Comeback Response\n",
(tx) ? "TX" : "RX");
break;
default:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 8b149996fc00..97f0f13dfe50 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/bcma/bcma.h>
#include <linux/sched.h>
+#include <linux/io.h>
#include <asm/unaligned.h>
#include <soc.h>
@@ -59,6 +60,13 @@ BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
+/* firmware config files */
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.txt");
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt");
+
+/* per-board firmware binaries */
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.bin");
+
static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
@@ -448,47 +456,6 @@ brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
static void
-brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
- void *srcaddr, u32 len)
-{
- void __iomem *address = devinfo->tcm + mem_offset;
- __le32 *src32;
- __le16 *src16;
- u8 *src8;
-
- if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
- if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
- src8 = (u8 *)srcaddr;
- while (len) {
- iowrite8(*src8, address);
- address++;
- src8++;
- len--;
- }
- } else {
- len = len / 2;
- src16 = (__le16 *)srcaddr;
- while (len) {
- iowrite16(le16_to_cpu(*src16), address);
- address += 2;
- src16++;
- len--;
- }
- }
- } else {
- len = len / 4;
- src32 = (__le32 *)srcaddr;
- while (len) {
- iowrite32(le32_to_cpu(*src32), address);
- address += 4;
- src32++;
- len--;
- }
- }
-}
-
-
-static void
brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
void *dstaddr, u32 len)
{
@@ -777,6 +744,8 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
return;
console = &devinfo->shared.console;
+ if (!console->base_addr)
+ return;
addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
newidx = brcmf_pcie_read_tcm32(devinfo, addr);
while (newidx != console->read_idx) {
@@ -1348,6 +1317,18 @@ static void brcmf_pcie_down(struct device *dev)
{
}
+static int brcmf_pcie_preinit(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+
+ brcmf_dbg(PCIE, "Enter\n");
+
+ brcmf_pcie_intr_enable(buspub->devinfo);
+ brcmf_pcie_hostready(buspub->devinfo);
+
+ return 0;
+}
static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
{
@@ -1456,6 +1437,7 @@ static int brcmf_pcie_reset(struct device *dev)
}
static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
+ .preinit = brcmf_pcie_preinit,
.txdata = brcmf_pcie_tx,
.stop = brcmf_pcie_down,
.txctl = brcmf_pcie_tx_ctlpkt,
@@ -1540,6 +1522,7 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
shared->max_rxbufpost, shared->rx_dataoffset);
brcmf_pcie_bus_console_init(devinfo);
+ brcmf_pcie_bus_console_read(devinfo, false);
return 0;
}
@@ -1563,8 +1546,8 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
return err;
brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
- brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
- (void *)fw->data, fw->size);
+ memcpy_toio(devinfo->tcm + devinfo->ci->rambase,
+ (void *)fw->data, fw->size);
resetintr = get_unaligned_le32(fw->data);
release_firmware(fw);
@@ -1578,7 +1561,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
address = devinfo->ci->rambase + devinfo->ci->ramsize -
nvram_len;
- brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
+ memcpy_toio(devinfo->tcm + address, nvram, nvram_len);
brcmf_fw_nvram_free(nvram);
} else {
brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
@@ -1777,6 +1760,8 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
ret = brcmf_chip_get_raminfo(devinfo->ci);
if (ret) {
brcmf_err(bus, "Failed to get RAM info\n");
+ release_firmware(fw);
+ brcmf_fw_nvram_free(nvram);
goto fail;
}
@@ -1826,9 +1811,6 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
init_waitqueue_head(&devinfo->mbdata_resp_wait);
- brcmf_pcie_intr_enable(devinfo);
- brcmf_pcie_hostready(devinfo);
-
ret = brcmf_attach(&devinfo->pdev->dev);
if (ret)
goto fail;
@@ -1980,6 +1962,7 @@ brcmf_pcie_remove(struct pci_dev *pdev)
return;
devinfo = bus->bus_priv.pcie->devinfo;
+ brcmf_pcie_bus_console_read(devinfo, false);
devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
if (devinfo->ci)
@@ -2106,6 +2089,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_RAW_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index f4a79e217da5..bd08d3aaa8f4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -32,7 +32,7 @@ struct brcmf_proto {
u8 peer[ETH_ALEN]);
void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx,
u8 peer[ETH_ALEN]);
- void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
+ void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb);
void (*add_if)(struct brcmf_if *ifp);
void (*del_if)(struct brcmf_if *ifp);
void (*reset_if)(struct brcmf_if *ifp);
@@ -109,9 +109,9 @@ static inline bool brcmf_proto_is_reorder_skb(struct sk_buff *skb)
}
static inline void
-brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
+brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
{
- ifp->drvr->proto->rxreorder(ifp, skb, inirq);
+ ifp->drvr->proto->rxreorder(ifp, skb);
}
static inline void
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 8effeb7a7269..ba3c159111d3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -629,7 +629,6 @@ BRCMF_FW_CLM_DEF(43752, "brcmfmac43752-sdio");
/* firmware config files */
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.txt");
-MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt");
/* per-board firmware binaries */
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.bin");
@@ -652,6 +651,7 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFC, 43430B0),
BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0x00000200, 43456),
BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFDC0, 43455),
+ BRCMF_FW_ENTRY(BRCM_CC_43454_CHIP_ID, 0x00000040, 43455),
BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h
index e1930ce1b642..b2c7ae8966a1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h
@@ -15,7 +15,7 @@
struct brcmf_xtlv {
u16 id;
u16 len;
- u8 data[0];
+ u8 data[];
};
enum brcmf_xtlv_option {
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index 9d81320164ce..ed0b707f0cdf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -32,6 +32,7 @@
#define BRCM_CC_4339_CHIP_ID 0x4339
#define BRCM_CC_43430_CHIP_ID 43430
#define BRCM_CC_4345_CHIP_ID 0x4345
+#define BRCM_CC_43454_CHIP_ID 43454
#define BRCM_CC_43465_CHIP_ID 43465
#define BRCM_CC_4350_CHIP_ID 0x4350
#define BRCM_CC_43525_CHIP_ID 43525
@@ -71,6 +72,7 @@
#define BRCM_PCIE_4356_DEVICE_ID 0x43ec
#define BRCM_PCIE_43567_DEVICE_ID 0x43d3
#define BRCM_PCIE_43570_DEVICE_ID 0x43d9
+#define BRCM_PCIE_43570_RAW_DEVICE_ID 0xaa31
#define BRCM_PCIE_4358_DEVICE_ID 0x43e9
#define BRCM_PCIE_4359_DEVICE_ID 0x43ef
#define BRCM_PCIE_43602_DEVICE_ID 0x43ba
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 452d08545d31..10daef81c355 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -545,7 +545,7 @@ struct ConfigRid {
#define MODE_CFG_MASK cpu_to_le16(0xff)
#define MODE_ETHERNET_HOST cpu_to_le16(0<<8) /* rx payloads converted */
#define MODE_LLC_HOST cpu_to_le16(1<<8) /* rx payloads left as is */
-#define MODE_AIRONET_EXTEND cpu_to_le16(1<<9) /* enable Aironet extenstions */
+#define MODE_AIRONET_EXTEND cpu_to_le16(1<<9) /* enable Aironet extensions */
#define MODE_AP_INTERFACE cpu_to_le16(1<<10) /* enable ap interface extensions */
#define MODE_ANTENNA_ALIGN cpu_to_le16(1<<11) /* enable antenna alignment */
#define MODE_ETHER_LLC cpu_to_le16(1<<12) /* enable ethernet LLC */
diff --git a/drivers/net/wireless/intel/Makefile b/drivers/net/wireless/intel/Makefile
index 1364b0014488..208e73a16051 100644
--- a/drivers/net/wireless/intel/Makefile
+++ b/drivers/net/wireless/intel/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_IPW2200) += ipw2x00/
obj-$(CONFIG_IWLEGACY) += iwlegacy/
obj-$(CONFIG_IWLWIFI) += iwlwifi/
+obj-$(CONFIG_IWLMEI) += iwlwifi/
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 85e704283755..a647a406b87b 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -139,6 +139,7 @@ config IWLMEI
tristate "Intel Management Engine communication over WLAN"
depends on INTEL_MEI
depends on PM
+ depends on CFG80211
help
Enables the iwlmei kernel module.
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 330ef04ca51a..8ff967edc8f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -1,15 +1,16 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
#include "iwl-prph.h"
+#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 69
+#define IWL_22000_UCODE_API_MAX 72
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -39,6 +40,7 @@
#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
#define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
+#define IWL_SO_A_MR_A_FW_PRE "iwlwifi-so-a0-mr-a0-"
#define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
#define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-"
#define IWL_SNJ_A_HR_B_FW_PRE "iwlwifi-SoSnj-a0-hr-b0-"
@@ -119,8 +121,6 @@
IWL_BZ_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_GL_A_FM_A_MODULE_FIRMWARE(api) \
IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode"
-#define IWL_BZ_Z_GF_A_MODULE_FIRMWARE(api) \
- IWL_BZ_Z_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_FM_A_MODULE_FIRMWARE(api) \
IWL_BNJ_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(api) \
@@ -224,7 +224,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.trans.base_params = &iwl_ax210_base_params, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
- .min_256_ba_txq_size = 1024, \
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = DBGC_CUR_DBGBUF_STATUS, \
@@ -285,7 +285,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.trans.base_params = &iwl_ax210_base_params, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
- .min_256_ba_txq_size = 1024, \
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = DBGC_CUR_DBGBUF_STATUS, \
@@ -299,6 +299,12 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.addr = DBGC_CUR_DBGBUF_STATUS, \
.mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
}, \
+ }, \
+ .mon_dbgi_regs = { \
+ .write_ptr = { \
+ .addr = DBGI_SRAM_FIFO_POINTERS, \
+ .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \
+ }, \
}
const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
@@ -385,6 +391,21 @@ const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
+const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg = {
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .device_family = IWL_DEVICE_FAMILY_AX210,
+ .base_params = &iwl_ax210_base_params,
+ .umac_prph_offset = 0x300000,
+ .integrated = true,
+ .low_latency_xtal = true,
+ .xtal_latency = 12000,
+ .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
+ .imr_enabled = true,
+};
+
/*
* If the device doesn't support HE, no need to have that many buffers.
* 22000 devices can split multiple frames into a single RB, so fewer are
@@ -476,6 +497,7 @@ const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
+const char iwl_ax204_name[] = "Intel(R) Wi-Fi 6 AX204 160MHz";
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz";
const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
@@ -816,6 +838,20 @@ const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwl_cfg_ma_a0_ms_a0 = {
+ .fw_name_pre = IWL_MA_A_MR_A_FW_PRE,
+ .uhb_supported = false,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_so_a0_ms_a0 = {
+ .fw_name_pre = IWL_SO_A_MR_A_FW_PRE,
+ .uhb_supported = false,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
const struct iwl_cfg iwl_cfg_ma_a0_fm_a0 = {
.fw_name_pre = IWL_MA_A_FM_A_FW_PRE,
.uhb_supported = true,
@@ -830,6 +866,13 @@ const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwl_cfg_snj_a0_ms_a0 = {
+ .fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE,
+ .uhb_supported = false,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = {
.fw_name_pre = IWL_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210,
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 754876cd27ce..e8bd4f0e3d2d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -299,7 +299,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
priv->is_open = 1;
IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
+ return ret;
}
static void iwlagn_mac_stop(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 90b9becd1673..caf452922dbd 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -48,6 +48,7 @@
#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IWLWIFI);
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search.
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index db0c41bbeb0e..e9d2717362cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -3,7 +3,7 @@
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
- * Copyright(c) 2018, 2020 Intel Corporation
+ * Copyright(c) 2018, 2020-2021 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portionhelp of the ieee80211 subsystem header files.
@@ -915,7 +915,7 @@ static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
len += 1 + 2;
copylen += 1 + 2;
- new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC);
+ new_data = kmalloc(struct_size(new_data, data, len), GFP_ATOMIC);
if (new_data) {
new_data->length = len;
new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC;
@@ -1015,8 +1015,7 @@ void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
/* No handling needed */
IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
iwl_get_cmd_string(priv->trans,
- iwl_cmd_id(pkt->hdr.cmd,
- 0, 0)),
+ WIDE_ID(0, pkt->hdr.cmd)),
pkt->hdr.cmd);
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index c17ab53fcd8f..33aae639ad37 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -4,6 +4,7 @@
* Copyright (C) 2019-2022 Intel Corporation
*/
#include <linux/uuid.h>
+#include <linux/dmi.h>
#include "iwl-drv.h"
#include "iwl-debug.h"
#include "acpi.h"
@@ -19,6 +20,30 @@ const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
0xDD, 0x26, 0xB5, 0xFD);
IWL_EXPORT_SYMBOL(iwl_rfi_guid);
+static const struct dmi_system_id dmi_ppag_approved_list[] = {
+ { .ident = "HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ },
+ },
+ { .ident = "SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "MSFT",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ },
+ },
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."),
+ },
+ },
+ {}
+};
+
static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
acpi_handle *ret_handle)
{
@@ -242,7 +267,7 @@ found:
IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg_range);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- struct iwl_tas_config_cmd_v3 *cmd)
+ union iwl_tas_config_cmd *cmd, int fw_ver)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev, i, block_list_size, enabled;
@@ -268,10 +293,18 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
(tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS;
u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >>
ACPI_WTAS_ENABLE_IEC_POS;
+ u8 usa_tas_uhb = (tas_selection & ACPI_WTAS_USA_UHB_MSK) >> ACPI_WTAS_USA_UHB_POS;
+
enabled = tas_selection & ACPI_WTAS_ENABLED_MSK;
- cmd->override_tas_iec = cpu_to_le16(override_iec);
- cmd->enable_tas_iec = cpu_to_le16(enabled_iec);
+ if (fw_ver <= 3) {
+ cmd->v3.override_tas_iec = cpu_to_le16(override_iec);
+ cmd->v3.enable_tas_iec = cpu_to_le16(enabled_iec);
+ } else {
+ cmd->v4.usa_tas_uhb_allowed = usa_tas_uhb;
+ cmd->v4.override_tas_iec = (u8)override_iec;
+ cmd->v4.enable_tas_iec = (u8)enabled_iec;
+ }
} else if (tbl_rev == 0 &&
wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) {
@@ -297,7 +330,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
goto out_free;
}
block_list_size = wifi_pkg->package.elements[2].integer.value;
- cmd->block_list_size = cpu_to_le32(block_list_size);
+ cmd->v4.block_list_size = cpu_to_le32(block_list_size);
IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size);
if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
@@ -319,7 +352,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
}
country = wifi_pkg->package.elements[3 + i].integer.value;
- cmd->block_list_array[i] = cpu_to_le32(country);
+ cmd->v4.block_list_array[i] = cpu_to_le32(country);
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
}
@@ -529,8 +562,8 @@ IWL_EXPORT_SYMBOL(iwl_sar_select_profile);
int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *table, *data;
- bool enabled;
int ret, tbl_rev;
+ u32 flags;
u8 num_chains, num_sub_bands;
data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDS_METHOD);
@@ -596,7 +629,8 @@ read_table:
IWL_DEBUG_RADIO(fwrt, "Reading WRDS tbl_rev=%d\n", tbl_rev);
- enabled = !!(wifi_pkg->package.elements[1].integer.value);
+ flags = wifi_pkg->package.elements[1].integer.value;
+ fwrt->reduced_power_flags = flags >> IWL_REDUCE_POWER_FLAGS_POS;
/* position of the actual table */
table = &wifi_pkg->package.elements[2];
@@ -604,7 +638,8 @@ read_table:
/* The profile from WRDS is officially profile 1, but goes
* into sar_profiles[0] (because we don't have a profile 0).
*/
- ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], enabled,
+ ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0],
+ flags & IWL_SAR_ENABLE_MSK,
num_chains, num_sub_bands);
out_free:
kfree(data);
@@ -962,3 +997,181 @@ __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
return config_bitmap;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap);
+
+int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *data, *flags;
+ int i, j, ret, tbl_rev, num_sub_bands = 0;
+ int idx = 2;
+
+ fwrt->ppag_flags = 0;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_PPAG_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ /* try to read ppag table rev 2 or 1 (both have the same data size) */
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
+
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev == 1 || tbl_rev == 2) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ IWL_DEBUG_RADIO(fwrt,
+ "Reading PPAG table v2 (tbl_rev=%d)\n",
+ tbl_rev);
+ goto read_table;
+ } else {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ /* try to read ppag table revision 0 */
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_PPAG_WIFI_DATA_SIZE_V1, &tbl_rev);
+
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ num_sub_bands = IWL_NUM_SUB_BANDS_V1;
+ IWL_DEBUG_RADIO(fwrt, "Reading PPAG table v1 (tbl_rev=0)\n");
+ goto read_table;
+ }
+
+read_table:
+ fwrt->ppag_ver = tbl_rev;
+ flags = &wifi_pkg->package.elements[1];
+
+ if (flags->type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ fwrt->ppag_flags = flags->integer.value & ACPI_PPAG_MASK;
+
+ if (!fwrt->ppag_flags) {
+ ret = 0;
+ goto out_free;
+ }
+
+ /*
+ * read, verify gain values and save them into the PPAG table.
+ * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the
+ * following sub-bands to High-Band (5GHz).
+ */
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (j = 0; j < num_sub_bands; j++) {
+ union acpi_object *ent;
+
+ ent = &wifi_pkg->package.elements[idx++];
+ if (ent->type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ fwrt->ppag_chains[i].subbands[j] = ent->integer.value;
+
+ if ((j == 0 &&
+ (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB ||
+ fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) ||
+ (j != 0 &&
+ (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB ||
+ fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) {
+ fwrt->ppag_flags = 0;
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+ }
+
+
+ ret = 0;
+
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_ppag_table);
+
+int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
+ int *cmd_size)
+{
+ u8 cmd_ver;
+ int i, j, num_sub_bands;
+ s8 *gain;
+
+ if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG capability not supported by FW, command not sent.\n");
+ return -EINVAL;
+ }
+ if (!fwrt->ppag_flags) {
+ IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
+ return -EINVAL;
+ }
+
+ /* The 'flags' field is the same in v1 and in v2 so we can just
+ * use v1 to access it.
+ */
+ cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
+ cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
+ WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD),
+ IWL_FW_CMD_VER_UNKNOWN);
+ if (cmd_ver == 1) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V1;
+ gain = cmd->v1.gain[0];
+ *cmd_size = sizeof(cmd->v1);
+ if (fwrt->ppag_ver == 1 || fwrt->ppag_ver == 2) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table rev is %d but FW supports v1, sending truncated table\n",
+ fwrt->ppag_ver);
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
+ }
+ } else if (cmd_ver == 2 || cmd_ver == 3) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ gain = cmd->v2.gain[0];
+ *cmd_size = sizeof(cmd->v2);
+ if (fwrt->ppag_ver == 0) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table is v1 but FW supports v2, sending padded table\n");
+ } else if (cmd_ver == 2 && fwrt->ppag_ver == 2) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table is v3 but FW supports v2, sending partial bitmap.\n");
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
+ }
+ } else {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (j = 0; j < num_sub_bands; j++) {
+ gain[i * num_sub_bands + j] =
+ fwrt->ppag_chains[i].subbands[j];
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table: chain[%d] band[%d]: gain = %d\n",
+ i, j, gain[i * num_sub_bands + j]);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_read_ppag_table);
+
+bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+{
+
+ if (!dmi_check_system(dmi_ppag_approved_list)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "System vendor '%s' is not in the approved list, disabling PPAG.\n",
+ dmi_get_system_info(DMI_SYS_VENDOR));
+ fwrt->ppag_flags = 0;
+ return false;
+ }
+
+ return true;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_is_ppag_approved);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 22b3c665f91a..6f361c59106f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_fw_acpi__
#define __iwl_fw_acpi__
@@ -77,6 +77,8 @@
#define ACPI_WTAS_ENABLE_IEC_MSK 0x4
#define ACPI_WTAS_OVERRIDE_IEC_POS 0x1
#define ACPI_WTAS_ENABLE_IEC_POS 0x2
+#define ACPI_WTAS_USA_UHB_MSK BIT(16)
+#define ACPI_WTAS_USA_UHB_POS 16
#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \
@@ -89,6 +91,11 @@
#define ACPI_PPAG_MAX_LB 24
#define ACPI_PPAG_MIN_HB -16
#define ACPI_PPAG_MAX_HB 40
+#define ACPI_PPAG_MASK 3
+#define IWL_PPAG_ETSI_MASK BIT(0)
+
+#define IWL_SAR_ENABLE_MSK BIT(0)
+#define IWL_REDUCE_POWER_FLAGS_POS 1
/*
* The profile for revision 2 is a superset of revision 1, which is in
@@ -126,7 +133,8 @@ enum iwl_dsm_funcs_rev_0 {
DSM_FUNC_ENABLE_6E = 3,
DSM_FUNC_11AX_ENABLEMENT = 6,
DSM_FUNC_ENABLE_UNII4_CHAN = 7,
- DSM_FUNC_ACTIVATE_CHANNEL = 8
+ DSM_FUNC_ACTIVATE_CHANNEL = 8,
+ DSM_FUNC_FORCE_DISABLE_CHANNELS = 9
};
enum iwl_dsm_values_srd {
@@ -213,10 +221,17 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
u32 n_bands, u32 n_profiles);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- struct iwl_tas_config_cmd_v3 *cmd);
+ union iwl_tas_config_cmd *cmd, int fw_ver);
__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
+ int *cmd_size);
+
+bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt);
+
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
@@ -294,7 +309,7 @@ static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
}
static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- struct iwl_tas_config_cmd_v3 *cmd)
+ union iwl_tas_config_cmd *cmd, int fw_ver)
{
return -ENOENT;
}
@@ -304,6 +319,22 @@ static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt
return 0;
}
+static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt,
+ union iwl_ppag_table_cmd *cmd, int *cmd_size)
+{
+ return -ENOENT;
+}
+
+static inline bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+{
+ return false;
+}
+
#endif /* CONFIG_ACPI */
static inline union acpi_object *
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 35b8856e511f..c78d2f1c722c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -323,14 +323,6 @@ enum iwl_legacy_cmds {
REPLY_THERMAL_MNG_BACKOFF = 0x7e,
/**
- * @DC2DC_CONFIG_CMD:
- * Set/Get DC2DC frequency tune
- * Command is &struct iwl_dc2dc_config_cmd,
- * response is &struct iwl_dc2dc_config_resp
- */
- DC2DC_CONFIG_CMD = 0x83,
-
- /**
* @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd
*/
NVM_ACCESS_CMD = 0x88,
@@ -608,6 +600,11 @@ enum iwl_system_subcmd_ids {
* @SYSTEM_FEATURES_CONTROL_CMD: &struct iwl_system_features_control_cmd
*/
SYSTEM_FEATURES_CONTROL_CMD = 0xd,
+
+ /**
+ * @RFI_DEACTIVATE_NOTIF: &struct iwl_rfi_deactivate_notif
+ */
+ RFI_DEACTIVATE_NOTIF = 0xff,
};
#endif /* __iwl_fw_api_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
index 1ab92f62c414..087354b3c308 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
@@ -114,37 +114,4 @@ enum iwl_dc2dc_config_id {
DCDC_FREQ_TUNE_SET = 0x2,
}; /* MARKER_ID_API_E_VER_1 */
-/**
- * struct iwl_dc2dc_config_cmd - configure dc2dc values
- *
- * (DC2DC_CONFIG_CMD = 0x83)
- *
- * Set/Get & configure dc2dc values.
- * The command always returns the current dc2dc values.
- *
- * @flags: set/get dc2dc
- * @enable_low_power_mode: not used.
- * @dc2dc_freq_tune0: frequency divider - digital domain
- * @dc2dc_freq_tune1: frequency divider - analog domain
- */
-struct iwl_dc2dc_config_cmd {
- __le32 flags;
- __le32 enable_low_power_mode; /* not used */
- __le32 dc2dc_freq_tune0;
- __le32 dc2dc_freq_tune1;
-} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
-
-/**
- * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd
- *
- * Current dc2dc values returned by the FW.
- *
- * @dc2dc_freq_tune0: frequency divider - digital domain
- * @dc2dc_freq_tune1: frequency divider - analog domain
- */
-struct iwl_dc2dc_config_resp {
- __le32 dc2dc_freq_tune0;
- __le32 dc2dc_freq_tune1;
-} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
-
#endif /* __iwl_fw_api_config_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 89236f42c5a4..43619acc29fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -42,7 +42,7 @@ enum iwl_data_path_subcmd_ids {
RFH_QUEUE_CONFIG_CMD = 0xD,
/**
- * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
+ * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd_v4
*/
TLC_MNG_CONFIG_CMD = 0xF,
@@ -58,6 +58,20 @@ enum iwl_data_path_subcmd_ids {
CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14,
/**
+ * @RX_BAID_ALLOCATION_CONFIG_CMD: Allocate/deallocate a BAID for an RX
+ * blockack session, uses &struct iwl_rx_baid_cfg_cmd for the
+ * command, and &struct iwl_rx_baid_cfg_resp as a response.
+ */
+ RX_BAID_ALLOCATION_CONFIG_CMD = 0x16,
+
+ /**
+ * @SCD_QUEUE_CONFIG_CMD: new scheduler queue allocation/config/removal
+ * command, uses &struct iwl_scd_queue_cfg_cmd and the response
+ * is (same as before) &struct iwl_tx_queue_cfg_rsp.
+ */
+ SCD_QUEUE_CONFIG_CMD = 0x17,
+
+ /**
* @MONITOR_NOTIF: Datapath monitoring notification, using
* &struct iwl_datapath_monitor_notif
*/
@@ -257,4 +271,136 @@ struct iwl_rlc_config_cmd {
u8 reserved[3];
} __packed; /* RLC_CONFIG_CMD_API_S_VER_2 */
+#define IWL_MAX_BAID_OLD 16 /* MAX_IMMEDIATE_BA_API_D_VER_2 */
+#define IWL_MAX_BAID 32 /* MAX_IMMEDIATE_BA_API_D_VER_3 */
+
+/**
+ * enum iwl_rx_baid_action - BAID allocation/config action
+ * @IWL_RX_BAID_ACTION_ADD: add a new BAID session
+ * @IWL_RX_BAID_ACTION_MODIFY: modify the BAID session
+ * @IWL_RX_BAID_ACTION_REMOVE: remove the BAID session
+ */
+enum iwl_rx_baid_action {
+ IWL_RX_BAID_ACTION_ADD,
+ IWL_RX_BAID_ACTION_MODIFY,
+ IWL_RX_BAID_ACTION_REMOVE,
+}; /* RX_BAID_ALLOCATION_ACTION_E_VER_1 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_alloc - BAID allocation data
+ * @sta_id_mask: station ID mask
+ * @tid: the TID for this session
+ * @reserved: reserved
+ * @ssn: the starting sequence number
+ * @win_size: RX BA session window size
+ */
+struct iwl_rx_baid_cfg_cmd_alloc {
+ __le32 sta_id_mask;
+ u8 tid;
+ u8 reserved[3];
+ __le16 ssn;
+ __le16 win_size;
+} __packed; /* RX_BAID_ALLOCATION_ADD_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_modify - BAID modification data
+ * @old_sta_id_mask: old station ID mask
+ * @new_sta_id_mask: new station ID mask
+ * @tid: TID of the BAID
+ */
+struct iwl_rx_baid_cfg_cmd_modify {
+ __le32 old_sta_id_mask;
+ __le32 new_sta_id_mask;
+ __le32 tid;
+} __packed; /* RX_BAID_ALLOCATION_MODIFY_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_remove_v1 - BAID removal data
+ * @baid: the BAID to remove
+ */
+struct iwl_rx_baid_cfg_cmd_remove_v1 {
+ __le32 baid;
+} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_remove - BAID removal data
+ * @sta_id_mask: the station mask of the BAID to remove
+ * @tid: the TID of the BAID to remove
+ */
+struct iwl_rx_baid_cfg_cmd_remove {
+ __le32 sta_id_mask;
+ __le32 tid;
+} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd - BAID allocation/config command
+ * @action: the action, from &enum iwl_rx_baid_action
+ */
+struct iwl_rx_baid_cfg_cmd {
+ __le32 action;
+ union {
+ struct iwl_rx_baid_cfg_cmd_alloc alloc;
+ struct iwl_rx_baid_cfg_cmd_modify modify;
+ struct iwl_rx_baid_cfg_cmd_remove_v1 remove_v1;
+ struct iwl_rx_baid_cfg_cmd_remove remove;
+ }; /* RX_BAID_ALLOCATION_OPERATION_API_U_VER_2 */
+} __packed; /* RX_BAID_ALLOCATION_CONFIG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_rx_baid_cfg_resp - BAID allocation response
+ * @baid: the allocated BAID
+ */
+struct iwl_rx_baid_cfg_resp {
+ __le32 baid;
+}; /* RX_BAID_ALLOCATION_RESPONSE_API_S_VER_1 */
+
+/**
+ * enum iwl_scd_queue_cfg_operation - scheduler queue operation
+ * @IWL_SCD_QUEUE_ADD: allocate a new queue
+ * @IWL_SCD_QUEUE_REMOVE: remove a queue
+ * @IWL_SCD_QUEUE_MODIFY: modify a queue
+ */
+enum iwl_scd_queue_cfg_operation {
+ IWL_SCD_QUEUE_ADD = 0,
+ IWL_SCD_QUEUE_REMOVE = 1,
+ IWL_SCD_QUEUE_MODIFY = 2,
+};
+
+/**
+ * struct iwl_scd_queue_cfg_cmd - scheduler queue allocation command
+ * @operation: the operation, see &enum iwl_scd_queue_cfg_operation
+ * @u.add.sta_mask: station mask
+ * @u.add.tid: TID
+ * @u.add.reserved: reserved
+ * @u.add.flags: flags from &enum iwl_tx_queue_cfg_actions, except
+ * %TX_QUEUE_CFG_ENABLE_QUEUE is not valid
+ * @u.add.cb_size: size code
+ * @u.add.bc_dram_addr: byte-count table IOVA
+ * @u.add.tfdq_dram_addr: TFD queue IOVA
+ * @u.remove.queue: queue ID for removal
+ * @u.modify.sta_mask: new station mask for modify
+ * @u.modify.queue: queue ID to modify
+ */
+struct iwl_scd_queue_cfg_cmd {
+ __le32 operation;
+ union {
+ struct {
+ __le32 sta_mask;
+ u8 tid;
+ u8 reserved[3];
+ __le32 flags;
+ __le32 cb_size;
+ __le64 bc_dram_addr;
+ __le64 tfdq_dram_addr;
+ } __packed add; /* TX_QUEUE_CFG_CMD_ADD_API_S_VER_1 */
+ struct {
+ __le32 queue;
+ } __packed remove; /* TX_QUEUE_CFG_CMD_REMOVE_API_S_VER_1 */
+ struct {
+ __le32 sta_mask;
+ __le32 queue;
+ } __packed modify; /* TX_QUEUE_CFG_CMD_MODIFY_API_S_VER_1 */
+ } __packed u; /* TX_QUEUE_CFG_CMD_OPERATION_API_U_VER_1 */
+} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_3 */
+
#endif /* __iwl_fw_api_datapath_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 456b7eaac570..52bf96585fc6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_fw_dbg_tlv_h__
#define __iwl_fw_dbg_tlv_h__
@@ -11,7 +11,8 @@
#define IWL_FW_INI_MAX_NAME 32
#define IWL_FW_INI_MAX_CFG_NAME 64
#define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
-#define IWL_FW_INI_REGION_V2_MASK 0x0000FFFF
+#define IWL_FW_INI_REGION_ID_MASK GENMASK(15, 0)
+#define IWL_FW_INI_REGION_DUMP_POLICY_MASK GENMASK(31, 16)
/**
* struct iwl_fw_ini_hcmd
@@ -249,11 +250,10 @@ struct iwl_fw_ini_hcmd_tlv {
} __packed; /* FW_TLV_DEBUG_HCMD_API_S_VER_1 */
/**
-* struct iwl_fw_ini_conf_tlv - preset configuration TLV
+* struct iwl_fw_ini_addr_val - Address and value to set it to
*
* @address: the base address
* @value: value to set at address
-
*/
struct iwl_fw_ini_addr_val {
__le32 address;
@@ -475,6 +475,7 @@ enum iwl_fw_ini_time_point {
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG: override trigger configuration
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data.
* Append otherwise
+ * @IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD: send cmd once dump collected
*/
enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0),
@@ -482,6 +483,7 @@ enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS = BIT(8),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
+ IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD = BIT(16),
};
/**
@@ -496,4 +498,31 @@ enum iwl_fw_ini_trigger_reset_fw_policy {
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY,
IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW
};
+
+/**
+ * enum iwl_fw_ini_dump_policy - Determines how to handle dump based on enabled flags
+ *
+ * @IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT: OS has no limit of dump size
+ * @IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB: mini dump only 600KB region dump
+ * @IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB: mini dump 5MB size dump
+ */
+enum iwl_fw_ini_dump_policy {
+ IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT = BIT(0),
+ IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB = BIT(1),
+ IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB = BIT(2),
+
+};
+
+/**
+ * enum iwl_fw_ini_dump_type - Determines dump type based on size defined by FW.
+ *
+ * @IWL_FW_INI_DUMP_BRIEF : only dump the most important regions
+ * @IWL_FW_INI_DEBUG_MEDIUM: dump more regions than "brief", but not all regions
+ * @IWL_FW_INI_DUMP_VERBOSE : dump all regions
+ */
+enum iwl_fw_ini_dump_type {
+ IWL_FW_INI_DUMP_BRIEF,
+ IWL_FW_INI_DUMP_MEDIUM,
+ IWL_FW_INI_DUMP_VERBOSE,
+};
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index 029ae64bf2b2..6255257ddebe 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -43,6 +43,12 @@ enum iwl_debug_cmds {
*/
BUFFER_ALLOCATION = 0x8,
/**
+ * @FW_DUMP_COMPLETE_CMD:
+ * sends command to fw once dump collection completed
+ * &struct iwl_dbg_dump_complete_cmd
+ */
+ FW_DUMP_COMPLETE_CMD = 0xB,
+ /**
* @MFU_ASSERT_DUMP_NTF:
* &struct iwl_mfu_assert_dump_notif
*/
@@ -404,4 +410,15 @@ struct iwl_dbg_host_event_cfg_cmd {
__le32 enabled_severities;
} __packed; /* DEBUG_HOST_EVENT_CFG_CMD_API_S_VER_1 */
+/**
+ * struct iwl_dbg_dump_complete_cmd - dump complete cmd
+ *
+ * @tp: timepoint whose dump has completed
+ * @tp_data: timepoint data
+ */
+struct iwl_dbg_dump_complete_cmd {
+ __le32 tp;
+ __le32 tp_data;
+} __packed; /* FW_DUMP_COMPLETE_CMD_API_S_VER_1 */
+
#endif /* __iwl_fw_api_debug_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index d088c820b1a9..712532f17630 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -27,6 +27,10 @@ enum iwl_mac_conf_subcmd_ids {
* @SESSION_PROTECTION_CMD: &struct iwl_mvm_session_prot_cmd
*/
SESSION_PROTECTION_CMD = 0x5,
+ /**
+ * @CANCEL_CHANNEL_SWITCH_CMD: &struct iwl_cancel_channel_switch_cmd
+ */
+ CANCEL_CHANNEL_SWITCH_CMD = 0x6,
/**
* @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif
@@ -42,6 +46,11 @@ enum iwl_mac_conf_subcmd_ids {
* @CHANNEL_SWITCH_START_NOTIF: &struct iwl_channel_switch_start_notif
*/
CHANNEL_SWITCH_START_NOTIF = 0xFF,
+
+ /**
+ *@CHANNEL_SWITCH_ERROR_NOTIF: &struct iwl_channel_switch_error_notif
+ */
+ CHANNEL_SWITCH_ERROR_NOTIF = 0xF9,
};
#define IWL_P2P_NOA_DESC_COUNT (2)
@@ -110,6 +119,31 @@ struct iwl_channel_switch_start_notif {
__le32 id_and_color;
} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
+#define CS_ERR_COUNT_ERROR BIT(0)
+#define CS_ERR_LONG_DELAY_AFTER_CS BIT(1)
+#define CS_ERR_LONG_TX_BLOCK BIT(2)
+#define CS_ERR_TX_BLOCK_TIMER_EXPIRED BIT(3)
+
+/**
+ * struct iwl_channel_switch_error_notif - Channel switch error notification
+ *
+ * @mac_id: the mac for which the ucode sends the notification for
+ * @csa_err_mask: mask of channel switch error that can occur
+ */
+struct iwl_channel_switch_error_notif {
+ __le32 mac_id;
+ __le32 csa_err_mask;
+} __packed; /* CHANNEL_SWITCH_ERROR_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_cancel_channel_switch_cmd - Cancel Channel Switch command
+ *
+ * @mac_id: the mac that should cancel the channel switch
+ */
+struct iwl_cancel_channel_switch_cmd {
+ __le32 mac_id;
+} __packed; /* MAC_CANCEL_CHANNEL_SWITCH_S_VER_1 */
+
/**
* struct iwl_chan_switch_te_cmd - Channel Switch Time Event command
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 11f0bd283e49..9b7caf968346 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -413,10 +413,11 @@ enum iwl_he_pkt_ext_constellations {
};
#define MAX_HE_SUPP_NSS 2
-#define MAX_HE_CHANNEL_BW_INDX 4
+#define MAX_CHANNEL_BW_INDX_API_D_VER_2 4
+#define MAX_CHANNEL_BW_INDX_API_D_VER_3 5
/**
- * struct iwl_he_pkt_ext - QAM thresholds
+ * struct iwl_he_pkt_ext_v1 - QAM thresholds
* The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
* The IE is organized in the following way:
* Support for Nss x BW (or RU) matrix:
@@ -435,9 +436,34 @@ enum iwl_he_pkt_ext_constellations {
* Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x
* (0-low_th, 1-high_th)
*/
-struct iwl_he_pkt_ext {
- u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2];
-} __packed; /* PKT_EXT_DOT11AX_API_S */
+struct iwl_he_pkt_ext_v1 {
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
+} __packed; /* PKT_EXT_DOT11AX_API_S_VER_1 */
+
+/**
+ * struct iwl_he_pkt_ext_v2 - QAM thresholds
+ * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
+ * The IE is organized in the following way:
+ * Support for Nss x BW (or RU) matrix:
+ * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
+ * Each entry contains 2 QAM thresholds for 8us and 16us:
+ * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE
+ * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
+ * QAM_tx < QAM_th1 --> PPE=0us
+ * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
+ * QAM_th2 <= QAM_tx --> PPE=16us
+ * @pkt_ext_qam_th: QAM thresholds
+ * For each Nss/Bw define 2 QAM thrsholds (0..5)
+ * For rates below the low_th, no need for PPE
+ * For rates between low_th and high_th, need 8us PPE
+ * For rates equal or higher then the high_th, need 16us PPE
+ * Nss (0-siso, 1-mimo2) x
+ * BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz, 4-320MHz) x
+ * (0-low_th, 1-high_th)
+ */
+struct iwl_he_pkt_ext_v2 {
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_3][2];
+} __packed; /* PKT_EXT_DOT11AX_API_S_VER_2 */
/**
* enum iwl_he_sta_ctxt_flags - HE STA context flags
@@ -464,6 +490,11 @@ struct iwl_he_pkt_ext {
* @STA_CTXT_HE_RU_2MHZ_BLOCK: indicates that 26-tone RU OFDMA transmission are
* not allowed (as there are OBSS that might classify such transmissions as
* radar pulses).
+ * @STA_CTXT_HE_NDP_FEEDBACK_ENABLED: mark support for NDP feedback and change
+ * of threshold
+ * @STA_CTXT_EHT_PUNCTURE_MASK_VALID: indicates the puncture_mask field is valid
+ * @STA_CTXT_EHT_LONG_PPE_ENABLED: indicates the PPE requirement should be
+ * extended to 20us for BW > 160Mhz or for MCS w/ 4096-QAM.
*/
enum iwl_he_sta_ctxt_flags {
STA_CTXT_HE_REF_BSSID_VALID = BIT(4),
@@ -477,6 +508,9 @@ enum iwl_he_sta_ctxt_flags {
STA_CTXT_HE_MU_EDCA_CW = BIT(12),
STA_CTXT_HE_NIC_NOT_ACK_ENABLED = BIT(13),
STA_CTXT_HE_RU_2MHZ_BLOCK = BIT(14),
+ STA_CTXT_HE_NDP_FEEDBACK_ENABLED = BIT(15),
+ STA_CTXT_EHT_PUNCTURE_MASK_VALID = BIT(16),
+ STA_CTXT_EHT_LONG_PPE_ENABLED = BIT(17),
};
/**
@@ -551,7 +585,7 @@ struct iwl_he_sta_context_cmd_v1 {
u8 frag_min_size;
/* The below fields are set via PPE thresholds element */
- struct iwl_he_pkt_ext pkt_ext;
+ struct iwl_he_pkt_ext_v1 pkt_ext;
/* The below fields are set via HE-Operation IE */
u8 bss_color;
@@ -568,7 +602,7 @@ struct iwl_he_sta_context_cmd_v1 {
} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_1 */
/**
- * struct iwl_he_sta_context_cmd - configure FW to work with HE AP
+ * struct iwl_he_sta_context_cmd_v2 - configure FW to work with HE AP
* @sta_id: STA id
* @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
* 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
@@ -599,7 +633,7 @@ struct iwl_he_sta_context_cmd_v1 {
* @bssid_count: actual number of VAPs in the MultiBSS Set
* @reserved4: alignment
*/
-struct iwl_he_sta_context_cmd {
+struct iwl_he_sta_context_cmd_v2 {
u8 sta_id;
u8 tid_limit;
u8 reserved1;
@@ -619,7 +653,7 @@ struct iwl_he_sta_context_cmd {
u8 frag_min_size;
/* The below fields are set via PPE thresholds element */
- struct iwl_he_pkt_ext pkt_ext;
+ struct iwl_he_pkt_ext_v1 pkt_ext;
/* The below fields are set via HE-Operation IE */
u8 bss_color;
@@ -643,6 +677,81 @@ struct iwl_he_sta_context_cmd {
} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
/**
+ * struct iwl_he_sta_context_cmd_v3 - configure FW to work with HE AP
+ * @sta_id: STA id
+ * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
+ * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
+ * @reserved1: reserved byte for future use
+ * @reserved2: reserved byte for future use
+ * @flags: see %iwl_11ax_sta_ctxt_flags
+ * @ref_bssid_addr: reference BSSID used by the AP
+ * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
+ * @htc_flags: which features are supported in HTC
+ * @frag_flags: frag support in A-MSDU
+ * @frag_level: frag support level
+ * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
+ * @frag_min_size: min frag size (except last frag)
+ * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
+ * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
+ * @htc_trig_based_pkt_ext: default PE in 4us units
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
+ * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
+ * @puncture_mask: puncture mask for EHT
+ * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
+ * @max_bssid_indicator: indicator of the max bssid supported on the associated
+ * bss
+ * @bssid_index: index of the associated VAP
+ * @ema_ap: AP supports enhanced Multi BSSID advertisement
+ * @profile_periodicity: number of Beacon periods that are needed to receive the
+ * complete VAPs info
+ * @bssid_count: actual number of VAPs in the MultiBSS Set
+ * @reserved4: alignment
+ */
+struct iwl_he_sta_context_cmd_v3 {
+ u8 sta_id;
+ u8 tid_limit;
+ u8 reserved1;
+ u8 reserved2;
+ __le32 flags;
+
+ /* The below fields are set via Multiple BSSID IE */
+ u8 ref_bssid_addr[6];
+ __le16 reserved0;
+
+ /* The below fields are set via HE-capabilities IE */
+ __le32 htc_flags;
+
+ u8 frag_flags;
+ u8 frag_level;
+ u8 frag_max_num;
+ u8 frag_min_size;
+
+ /* The below fields are set via PPE thresholds element */
+ struct iwl_he_pkt_ext_v2 pkt_ext;
+
+ /* The below fields are set via HE-Operation IE */
+ u8 bss_color;
+ u8 htc_trig_based_pkt_ext;
+ __le16 frame_time_rts_th;
+
+ /* Random access parameter set (i.e. RAPS) */
+ u8 rand_alloc_ecwmin;
+ u8 rand_alloc_ecwmax;
+ __le16 puncture_mask;
+
+ /* The below fields are set via MU EDCA parameter set element */
+ struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
+
+ u8 max_bssid_indicator;
+ u8 bssid_index;
+ u8 ema_ap;
+ u8 profile_periodicity;
+ u8 bssid_count;
+ u8 reserved4[3];
+} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
+
+/**
* struct iwl_he_monitor_cmd - configure air sniffer for HE
* @bssid: the BSSID to sniff for
* @reserved1: reserved for dword alignment
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 4949fcf85257..91bfde6d5367 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -420,6 +420,30 @@ struct iwl_tas_config_cmd_v3 {
} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */
/**
+ * struct iwl_tas_config_cmd_v3 - configures the TAS
+ * @block_list_size: size of relevant field in block_list_array
+ * @block_list_array: list of countries where TAS must be disabled
+ * @override_tas_iec: indicates whether to override default value of IEC regulatory
+ * @enable_tas_iec: in case override_tas_iec is set -
+ * indicates whether IEC regulatory is enabled or disabled
+ * @usa_tas_uhb_allowed: if set, allow TAS UHB in the USA
+ * @reserved: reserved
+*/
+struct iwl_tas_config_cmd_v4 {
+ __le32 block_list_size;
+ __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
+ u8 override_tas_iec;
+ u8 enable_tas_iec;
+ u8 usa_tas_uhb_allowed;
+ u8 reserved;
+} __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */
+
+union iwl_tas_config_cmd {
+ struct iwl_tas_config_cmd_v2 v2;
+ struct iwl_tas_config_cmd_v3 v3;
+ struct iwl_tas_config_cmd_v4 v4;
+};
+/**
* enum iwl_lari_configs - bit masks for the various LARI config operations
* @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
* @LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK: ETSI 5.8GHz SRD passive scan
@@ -515,6 +539,32 @@ struct iwl_lari_config_change_cmd_v5 {
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_5 */
/**
+ * struct iwl_lari_config_change_cmd_v6 - change LARI configuration
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ * different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate allow/disallow unii4 channels.
+ * @chan_state_active_bitmap: Bitmap for overriding channel state to active.
+ * Each bit represents a country or region to activate, according to the BIOS
+ * definitions.
+ * @force_disable_channels_bitmap: Bitmap of disabled bands/channels.
+ * Each bit represents a set of channels in a specific band that should be disabled
+ */
+struct iwl_lari_config_change_cmd_v6 {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+ __le32 oem_11ax_allow_bitmap;
+ __le32 oem_unii4_allow_bitmap;
+ __le32 chan_state_active_bitmap;
+ __le32 force_disable_channels_bitmap;
+} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_6 */
+
+/**
* struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
* @status: PNVM image loading status
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
index c04f2521fcb3..b1b9c29859c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
@@ -166,14 +166,24 @@ struct iwl_dts_measurement_resp {
/**
* struct ct_kill_notif - CT-kill entry notification
+ * This structure represent both versions of this notification.
*
* @temperature: the current temperature in celsius
- * @reserved: reserved
+ * @dts: only in v2: DTS that trigger the CT Kill bitmap:
+ * bit 0: ToP master
+ * bit 1: PA chain A master
+ * bit 2: PA chain B master
+ * bit 3: ToP slave
+ * bit 4: PA chain A slave
+ * bit 5: PA chain B slave)
+ * bits 6,7: reserved (set to 0)
+ * @scheme: only for v2: scheme that trigger the CT Kill (0-SW, 1-HW)
*/
struct ct_kill_notif {
__le16 temperature;
- __le16 reserved;
-} __packed; /* GRP_PHY_CT_KILL_NTF */
+ u8 dts;
+ u8 scheme;
+} __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */
/**
* enum ctdp_cmd_operation - CTDP command operations
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 81318208f2f6..f92cac1da764 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -340,7 +340,7 @@ struct iwl_dev_tx_power_cmd_v5 {
} __packed; /* TX_REDUCED_POWER_API_S_VER_5 */
/**
- * struct iwl_dev_tx_power_cmd_v5 - TX power reduction command version 5
+ * struct iwl_dev_tx_power_cmd_v6 - TX power reduction command version 6
* @per_chain: per chain restrictions
* @enable_ack_reduction: enable or disable close range ack TX power
* reduction.
@@ -361,6 +361,28 @@ struct iwl_dev_tx_power_cmd_v6 {
} __packed; /* TX_REDUCED_POWER_API_S_VER_6 */
/**
+ * struct iwl_dev_tx_power_cmd_v7 - TX power reduction command version 7
+ * @per_chain: per chain restrictions
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ * reduction.
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ * from last command. used if set_mode is
+ * IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ * note: if not changed, the command is used for keep alive only.
+ * @reserved: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ * @flags: reduce power flags.
+ */
+struct iwl_dev_tx_power_cmd_v7 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ u8 enable_ack_reduction;
+ u8 per_chain_restriction_changed;
+ u8 reserved[2];
+ __le32 timer_period;
+ __le32 flags;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_7 */
+/**
* struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
* @common: common part of the command
* @v3: version 3 part of the command
@@ -375,6 +397,7 @@ struct iwl_dev_tx_power_cmd {
struct iwl_dev_tx_power_cmd_v4 v4;
struct iwl_dev_tx_power_cmd_v5 v5;
struct iwl_dev_tx_power_cmd_v6 v6;
+ struct iwl_dev_tx_power_cmd_v7 v7;
};
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h
index c678b9aa9b55..1a84a4081e7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2020 Intel Corporation
+ * Copyright (C) 2020-2021 Intel Corporation
*/
#ifndef __iwl_fw_api_rfi_h__
#define __iwl_fw_api_rfi_h__
@@ -57,4 +57,12 @@ struct iwl_rfi_freq_table_resp_cmd {
__le32 status;
} __packed; /* RFI_CONFIG_CMD_API_S_VER_1 */
+/**
+ * struct iwl_rfi_deactivate_notif - notifcation that FW disaled RFIm
+ *
+ * @reason: used only for a log message
+ */
+struct iwl_rfi_deactivate_notif {
+ __le32 reason;
+} __packed; /* RFI_DEACTIVATE_NTF_S_VER_1 */
#endif /* __iwl_fw_api_rfi_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 4a7723eb8c1d..687f804c46b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_rs_h__
@@ -133,7 +133,7 @@ enum IWL_TLC_MCS_PER_BW {
};
/**
- * struct tlc_config_cmd - TLC configuration
+ * struct iwl_tlc_config_cmd_v3 - TLC configuration
* @sta_id: station id
* @reserved1: reserved
* @max_ch_width: max supported channel width from @enum iwl_tlc_mng_cfg_cw
@@ -168,7 +168,7 @@ struct iwl_tlc_config_cmd_v3 {
} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */
/**
- * struct tlc_config_cmd - TLC configuration
+ * struct iwl_tlc_config_cmd_v4 - TLC configuration
* @sta_id: station id
* @reserved1: reserved
* @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index e73cc7380a26..ecc6706f66ed 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_tx_h__
@@ -296,8 +296,7 @@ struct iwl_tx_cmd_gen2 {
* @dram_info: FW internal DRAM storage
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
* cleared. Combination of RATE_MCS_*
- * @ttl: time to live - packet lifetime limit. The FW should drop if
- * passed.
+ * @reserved: reserved
* @hdr: 802.11 header
*/
struct iwl_tx_cmd_gen3 {
@@ -306,7 +305,7 @@ struct iwl_tx_cmd_gen3 {
__le32 offload_assist;
struct iwl_dram_sec_info dram_info;
__le32 rate_n_flags;
- __le64 ttl;
+ u8 reserved[8];
struct ieee80211_hdr hdr[];
} __packed; /* TX_CMD_API_S_VER_8,
TX_CMD_API_S_VER_10 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
index 8b3a00df41da..e018946310d1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2019-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2019-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -76,6 +76,8 @@ enum iwl_tx_queue_cfg_actions {
TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
};
+#define IWL_DEFAULT_QUEUE_SIZE_EHT (1024 * 4)
+#define IWL_DEFAULT_QUEUE_SIZE_HE 1024
#define IWL_DEFAULT_QUEUE_SIZE 256
#define IWL_MGMT_QUEUE_SIZE 16
#define IWL_CMD_QUEUE_SIZE 32
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 7ad9cee925da..abf49022edbe 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -12,7 +12,7 @@
#include "iwl-io.h"
#include "iwl-prph.h"
#include "iwl-csr.h"
-
+#include "iwl-fh.h"
/**
* struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump
*
@@ -303,9 +303,6 @@ static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
iwl_trans_release_nic_access(fwrt->trans);
}
-#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
-#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
-
struct iwl_prph_range {
u32 start, end;
};
@@ -1027,7 +1024,7 @@ struct iwl_dump_ini_region_data {
static int
iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1052,7 +1049,7 @@ iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1102,7 +1099,7 @@ iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1121,7 +1118,7 @@ static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_trans *trans = fwrt->trans;
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
@@ -1153,7 +1150,7 @@ static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1175,7 +1172,7 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
}
static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct page *page = fwrt->fw_paging_db[idx].fw_paging_block;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1195,7 +1192,7 @@ static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_error_dump_range *range;
u32 page_size;
@@ -1204,7 +1201,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
idx++;
if (!fwrt->trans->trans_cfg->gen2)
- return _iwl_dump_ini_paging_iter(fwrt, range_ptr, idx);
+ return _iwl_dump_ini_paging_iter(fwrt, range_ptr, range_len, idx);
range = range_ptr;
page_size = fwrt->trans->init_dram.paging[idx].size;
@@ -1220,7 +1217,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1239,7 +1236,7 @@ iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_mon_smem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1307,7 +1304,7 @@ static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1442,7 +1439,7 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1509,7 +1506,7 @@ out:
static int
iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_region_err_table *err_table = &reg->err_table;
@@ -1528,7 +1525,7 @@ iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_region_special_device_memory *special_mem =
@@ -1549,7 +1546,7 @@ iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1561,8 +1558,6 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
return -EBUSY;
range->range_data_size = reg->dev_addr.size;
- iwl_write_prph_no_grab(fwrt->trans, DBGI_SRAM_TARGET_ACCESS_CFG,
- DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK);
for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) {
prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ?
DBGI_SRAM_TARGET_ACCESS_RDATA_MSB :
@@ -1579,7 +1574,7 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_error_dump_range *range = range_ptr;
struct iwl_rx_packet *pkt = reg_data->dump_data->fw_pkt;
@@ -1598,10 +1593,37 @@ static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
+static int iwl_dump_ini_imr_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ /* read the IMR memory and DMA it to SRAM */
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u64 imr_curr_addr = fwrt->trans->dbg.imr_data.imr_curr_addr;
+ u32 imr_rem_bytes = fwrt->trans->dbg.imr_data.imr2sram_remainbyte;
+ u32 sram_addr = fwrt->trans->dbg.imr_data.sram_addr;
+ u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
+ u32 size_to_dump = (imr_rem_bytes > sram_size) ? sram_size : imr_rem_bytes;
+
+ range->range_data_size = cpu_to_le32(size_to_dump);
+ if (iwl_trans_write_imr_mem(fwrt->trans, sram_addr,
+ imr_curr_addr, size_to_dump)) {
+ IWL_ERR(fwrt, "WRT_DEBUG: IMR Memory transfer failed\n");
+ return -1;
+ }
+
+ fwrt->trans->dbg.imr_data.imr_curr_addr = imr_curr_addr + size_to_dump;
+ fwrt->trans->dbg.imr_data.imr2sram_remainbyte -= size_to_dump;
+
+ iwl_trans_read_mem_bytes(fwrt->trans, sram_addr, range->data,
+ size_to_dump);
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
static void *
iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data)
+ void *data, u32 data_len)
{
struct iwl_fw_ini_error_dump *dump = data;
@@ -1677,7 +1699,7 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
static void *
iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data)
+ void *data, u32 data_len)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
@@ -1688,7 +1710,7 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
static void *
iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data)
+ void *data, u32 data_len)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
@@ -1697,9 +1719,20 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
}
static void *
+iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data, u32 data_len)
+{
+ struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+
+ return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+ &fwrt->trans->cfg->mon_dbgi_regs);
+}
+
+static void *
iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data)
+ void *data, u32 data_len)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_err_table_dump *dump = data;
@@ -1713,7 +1746,7 @@ iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
static void *
iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data)
+ void *data, u32 data_len)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_special_device_memory *dump = data;
@@ -1725,6 +1758,18 @@ iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
return dump->data;
}
+static void *
+iwl_dump_ini_imr_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data, u32 data_len)
+{
+ struct iwl_fw_ini_error_dump *dump = data;
+
+ dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
+
+ return dump->data;
+}
+
static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@@ -1784,6 +1829,26 @@ static u32 iwl_dump_ini_single_range(struct iwl_fw_runtime *fwrt,
return 1;
}
+static u32 iwl_dump_ini_imr_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ /* range is total number of pages need to copied from
+ *IMR memory to SRAM and later from SRAM to DRAM
+ */
+ u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
+ u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
+ u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
+
+ if (imr_enable == 0 || imr_size == 0 || sram_size == 0) {
+ IWL_DEBUG_INFO(fwrt,
+ "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n",
+ imr_enable, imr_size, sram_size);
+ return 0;
+ }
+
+ return((imr_size % sram_size) ? (imr_size / sram_size + 1) : (imr_size / sram_size));
+}
+
static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@@ -1861,6 +1926,20 @@ iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
return size;
}
+static u32 iwl_dump_ini_mon_dbgi_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 size = le32_to_cpu(reg->dev_addr.size);
+ u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data);
+
+ if (!size || !ranges)
+ return 0;
+
+ return sizeof(struct iwl_fw_ini_monitor_dump) + ranges *
+ (size + sizeof(struct iwl_fw_ini_error_dump_range));
+}
+
static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@@ -1948,6 +2027,33 @@ iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt,
return size;
}
+static u32
+iwl_dump_ini_imr_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ u32 size = 0;
+ u32 ranges = 0;
+ u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
+ u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
+ u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
+
+ if (imr_enable == 0 || imr_size == 0 || sram_size == 0) {
+ IWL_DEBUG_INFO(fwrt,
+ "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n",
+ imr_enable, imr_size, sram_size);
+ return size;
+ }
+ size = imr_size;
+ ranges = iwl_dump_ini_imr_ranges(fwrt, reg_data);
+ if (!size && !ranges) {
+ IWL_ERR(fwrt, "WRT: imr_size :=%d, ranges :=%d\n", size, ranges);
+ return 0;
+ }
+ size += sizeof(struct iwl_fw_ini_error_dump) +
+ ranges * sizeof(struct iwl_fw_ini_error_dump_range);
+ return size;
+}
+
/**
* struct iwl_dump_ini_mem_ops - ini memory dump operations
* @get_num_of_ranges: returns the number of memory ranges in the region.
@@ -1964,10 +2070,10 @@ struct iwl_dump_ini_mem_ops {
struct iwl_dump_ini_region_data *reg_data);
void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data);
+ void *data, u32 data_len);
int (*fill_range)(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range, int idx);
+ void *range, u32 range_len, int idx);
};
/**
@@ -1990,24 +2096,53 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
struct iwl_fw_ini_error_dump_data *tlv;
struct iwl_fw_ini_error_dump_header *header;
u32 type = reg->type;
- u32 id = le32_to_cpu(reg->id);
+ u32 id = le32_get_bits(reg->id, IWL_FW_INI_REGION_ID_MASK);
u32 num_of_ranges, i, size;
- void *range;
-
- /*
- * The higher part of the ID from 2 is irrelevant for
- * us, so mask it out.
- */
- if (le32_to_cpu(reg->hdr.version) >= 2)
- id &= IWL_FW_INI_REGION_V2_MASK;
+ u8 *range;
+ u32 free_size;
+ u64 header_size;
+ u32 dump_policy = IWL_FW_INI_DUMP_VERBOSE;
+
+ IWL_DEBUG_FW(fwrt, "WRT: Collecting region: dump type=%d, id=%d, type=%d\n",
+ dump_policy, id, type);
+
+ if (le32_to_cpu(reg->hdr.version) >= 2) {
+ u32 dp = le32_get_bits(reg->id,
+ IWL_FW_INI_REGION_DUMP_POLICY_MASK);
+
+ if (dump_policy == IWL_FW_INI_DUMP_VERBOSE &&
+ !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT)) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: no dump - type %d and policy mismatch=%d\n",
+ dump_policy, dp);
+ return 0;
+ } else if (dump_policy == IWL_FW_INI_DUMP_MEDIUM &&
+ !(dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB)) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: no dump - type %d and policy mismatch=%d\n",
+ dump_policy, dp);
+ return 0;
+ } else if (dump_policy == IWL_FW_INI_DUMP_BRIEF &&
+ !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB)) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: no dump - type %d and policy mismatch=%d\n",
+ dump_policy, dp);
+ return 0;
+ }
+ }
if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
- !ops->fill_range)
+ !ops->fill_range) {
+ IWL_DEBUG_FW(fwrt, "WRT: no ops for collecting data\n");
return 0;
+ }
size = ops->get_size(fwrt, reg_data);
- if (!size)
+
+ if (size < sizeof(*header)) {
+ IWL_DEBUG_FW(fwrt, "WRT: size didn't include space for header\n");
return 0;
+ }
entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + size);
if (!entry)
@@ -2022,9 +2157,6 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
tlv->reserved = reg->reserved;
tlv->len = cpu_to_le32(size);
- IWL_DEBUG_FW(fwrt, "WRT: Collecting region: id=%d, type=%d\n", id,
- type);
-
num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data);
header = (void *)tlv->data;
@@ -2033,7 +2165,8 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME);
memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME);
- range = ops->fill_mem_hdr(fwrt, reg_data, header);
+ free_size = size;
+ range = ops->fill_mem_hdr(fwrt, reg_data, header, free_size);
if (!range) {
IWL_ERR(fwrt,
"WRT: Failed to fill region header: id=%d, type=%d\n",
@@ -2041,8 +2174,21 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
goto out_err;
}
+ header_size = range - (u8 *)header;
+
+ if (WARN(header_size > free_size,
+ "header size %llu > free_size %d",
+ header_size, free_size)) {
+ IWL_ERR(fwrt,
+ "WRT: fill_mem_hdr used more than given free_size\n");
+ goto out_err;
+ }
+
+ free_size -= header_size;
+
for (i = 0; i < num_of_ranges; i++) {
- int range_size = ops->fill_range(fwrt, reg_data, range, i);
+ int range_size = ops->fill_range(fwrt, reg_data, range,
+ free_size, i);
if (range_size < 0) {
IWL_ERR(fwrt,
@@ -2050,6 +2196,15 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
id, type);
goto out_err;
}
+
+ if (WARN(range_size > free_size, "range_size %d > free_size %d",
+ range_size, free_size)) {
+ IWL_ERR(fwrt,
+ "WRT: fill_raged used more than given free_size\n");
+ goto out_err;
+ }
+
+ free_size -= range_size;
range = range + range_size;
}
@@ -2240,7 +2395,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
.fill_range = iwl_dump_ini_csr_iter,
},
- [IWL_FW_INI_REGION_DRAM_IMR] = {},
+ [IWL_FW_INI_REGION_DRAM_IMR] = {
+ .get_num_of_ranges = iwl_dump_ini_imr_ranges,
+ .get_size = iwl_dump_ini_imr_get_size,
+ .fill_mem_hdr = iwl_dump_ini_imr_fill_header,
+ .fill_range = iwl_dump_ini_imr_iter,
+ },
[IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
.get_size = iwl_dump_ini_mem_get_size,
@@ -2255,8 +2415,8 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
},
[IWL_FW_INI_REGION_DBGI_SRAM] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
- .get_size = iwl_dump_ini_mem_get_size,
- .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .get_size = iwl_dump_ini_mon_dbgi_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mon_dbgi_fill_header,
.fill_range = iwl_dump_ini_dbgi_sram_iter,
},
};
@@ -2270,6 +2430,9 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data reg_data = {
.dump_data = dump_data,
};
+ struct iwl_dump_ini_region_data imr_reg_data = {
+ .dump_data = dump_data,
+ };
int i;
u32 size = 0;
u64 regions_mask = le64_to_cpu(trigger->regions_mask) &
@@ -2305,10 +2468,32 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
tp_id);
continue;
}
+ /*
+ * DRAM_IMR can be collected only for FW/HW error timepoint
+ * when fw is not alive. In addition, it must be collected
+ * lastly as it overwrites SRAM that can possibly contain
+ * debug data which also need to be collected.
+ */
+ if (reg_type == IWL_FW_INI_REGION_DRAM_IMR) {
+ if (tp_id == IWL_FW_INI_TIME_POINT_FW_ASSERT ||
+ tp_id == IWL_FW_INI_TIME_POINT_FW_HW_ERROR)
+ imr_reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i];
+ else
+ IWL_INFO(fwrt,
+ "WRT: trying to collect DRAM_IMR at time point: %d, skipping\n",
+ tp_id);
+ /* continue to next region */
+ continue;
+ }
+
size += iwl_dump_ini_mem(fwrt, list, &reg_data,
&iwl_dump_ini_region_ops[reg_type]);
}
+ /* collect DRAM_IMR region in the last */
+ if (imr_reg_data.reg_tlv)
+ size += iwl_dump_ini_mem(fwrt, list, &reg_data,
+ &iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]);
if (size)
size += iwl_dump_ini_info(fwrt, trigger, list);
@@ -2444,7 +2629,7 @@ static void iwl_fw_error_dump_data_free(struct iwl_fwrt_dump_data *dump_data)
static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data)
{
- struct list_head dump_list = LIST_HEAD_INIT(dump_list);
+ LIST_HEAD(dump_list);
struct scatterlist *sg_dump_data;
u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list);
@@ -2589,7 +2774,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
delay = le32_to_cpu(trigger->stop_delay) * USEC_PER_MSEC;
}
- desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
+ desc = kzalloc(struct_size(desc, trig_desc.data, len), GFP_ATOMIC);
if (!desc)
return -ENOMEM;
@@ -2685,6 +2870,28 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
}
IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
+void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
+ u32 timepoint,
+ u32 timepoint_data)
+{
+ struct iwl_dbg_dump_complete_cmd hcmd_data;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DEBUG_GROUP, FW_DUMP_COMPLETE_CMD),
+ .data[0] = &hcmd_data,
+ .len[0] = sizeof(hcmd_data),
+ };
+
+ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
+ return;
+
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT)) {
+ hcmd_data.tp = cpu_to_le32(timepoint);
+ hcmd_data.tp_data = cpu_to_le32(timepoint_data);
+ iwl_trans_send_cmd(fwrt->trans, &hcmd);
+ }
+}
+
/* this function assumes dump_start was called beforehand and dump_end will be
* called afterwards
*/
@@ -2693,10 +2900,16 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
struct iwl_fw_dbg_params params = {0};
struct iwl_fwrt_dump_data *dump_data =
&fwrt->dump.wks[wk_idx].dump_data;
-
+ u32 policy;
+ u32 time_point;
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
+ if (!dump_data->trig) {
+ IWL_ERR(fwrt, "dump trigger data is not set\n");
+ goto out;
+ }
+
if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) {
IWL_ERR(fwrt, "Device is not enabled - cannot dump error\n");
goto out;
@@ -2719,6 +2932,13 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
+ policy = le32_to_cpu(dump_data->trig->apply_policy);
+ time_point = le32_to_cpu(dump_data->trig->time_point);
+
+ if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
+ IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
+ iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
+ }
if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
iwl_force_nmi(fwrt->trans);
@@ -2777,10 +2997,10 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
"WRT: Collecting data: ini trigger %d fired (delay=%dms).\n",
tp_id, (u32)(delay / USEC_PER_MSEC));
- schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
-
if (sync)
iwl_fw_dbg_collect_sync(fwrt, idx);
+ else
+ schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
return 0;
}
@@ -2795,9 +3015,8 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
/* assumes the op mode mutex is locked in dump_start since
* iwl_fw_dbg_collect_sync can't run in parallel
*/
- if (fwrt->ops && fwrt->ops->dump_start &&
- fwrt->ops->dump_start(fwrt->ops_ctx))
- return;
+ if (fwrt->ops && fwrt->ops->dump_start)
+ fwrt->ops->dump_start(fwrt->ops_ctx);
iwl_fw_dbg_collect_sync(fwrt, wks->idx);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 8c3c890066b0..be7806407de8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2019, 2021-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -324,4 +324,7 @@ static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
}
void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt);
+void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
+ u32 timepoint,
+ u32 timepoint_data);
#endif /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index a152ce306475..43e997283db0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -150,7 +150,7 @@ static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt,
{
struct iwl_dbg_host_event_cfg_cmd event_cfg;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(HOST_EVENT_CFG, DEBUG_GROUP, 0),
+ .id = WIDE_ID(DEBUG_GROUP, HOST_EVENT_CFG),
.flags = CMD_ASYNC,
.data[0] = &event_cfg,
.len[0] = sizeof(event_cfg),
@@ -358,7 +358,7 @@ static int iwl_dbgfs_fw_info_seq_show(struct seq_file *seq, void *v)
ver = &fw->ucode_capa.cmd_versions[state->pos];
- cmd_id = iwl_cmd_id(ver->cmd, ver->group, 0);
+ cmd_id = WIDE_ID(ver->group, ver->cmd);
seq_printf(seq, " 0x%04x:\n", cmd_id);
seq_printf(seq, " name: %s\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index efc6540d31af..5679a78758be 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -119,7 +119,7 @@ enum iwl_ucode_tlv_type {
struct iwl_ucode_tlv {
__le32 type; /* see above */
__le32 length; /* not including type/length fields */
- u8 data[0];
+ u8 data[];
};
#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
@@ -310,7 +310,6 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
* @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
- * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
* @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
* @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
* @IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD: supports U-APSD on p2p interface when it
@@ -368,6 +367,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* reset flow
* @IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN: Support for passive scan on 6GHz PSC
* channels even when these are not enabled.
+ * @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection
+ * complete to FW.
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -386,7 +387,6 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
- IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD = (__force iwl_ucode_tlv_capa_t)26,
@@ -419,6 +419,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60,
IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO = (__force iwl_ucode_tlv_capa_t)61,
IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)62,
+ IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT = (__force iwl_ucode_tlv_capa_t)63,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
@@ -453,6 +454,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104,
+ IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = (__force iwl_ucode_tlv_capa_t)105,
#ifdef __CHECKER__
/* sparse says it cannot increment the previous enum member */
@@ -514,34 +516,6 @@ enum iwl_fw_phy_cfg {
FW_PHY_CFG_SHARED_CLK = BIT(31),
};
-#define IWL_UCODE_MAX_CS 1
-
-/**
- * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW.
- * @cipher: a cipher suite selector
- * @flags: cipher scheme flags (currently reserved for a future use)
- * @hdr_len: a size of MPDU security header
- * @pn_len: a size of PN
- * @pn_off: an offset of pn from the beginning of the security header
- * @key_idx_off: an offset of key index byte in the security header
- * @key_idx_mask: a bit mask of key_idx bits
- * @key_idx_shift: bit shift needed to get key_idx
- * @mic_len: mic length in bytes
- * @hw_cipher: a HW cipher index used in host commands
- */
-struct iwl_fw_cipher_scheme {
- __le32 cipher;
- u8 flags;
- u8 hdr_len;
- u8 pn_len;
- u8 pn_off;
- u8 key_idx_off;
- u8 key_idx_mask;
- u8 key_idx_shift;
- u8 mic_len;
- u8 hw_cipher;
-} __packed;
-
enum iwl_fw_dbg_reg_operator {
CSR_ASSIGN,
CSR_SETBIT,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.c b/drivers/net/wireless/intel/iwlwifi/fw/img.c
index 530674a35eeb..b7deca05a953 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.c
@@ -2,13 +2,16 @@
/*
* Copyright(c) 2019 - 2021 Intel Corporation
*/
-
+#include <fw/api/commands.h>
#include "img.h"
-u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def)
{
const struct iwl_fw_cmd_version *entry;
unsigned int i;
+ /* prior to LONG_GROUP, we never used this CMD version API */
+ u8 grp = iwl_cmd_groupid(cmd_id) ?: LONG_GROUP;
+ u8 cmd = iwl_cmd_opcode(cmd_id);
if (!fw->ucode_capa.cmd_versions ||
!fw->ucode_capa.n_cmd_versions)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index fa7b1780064c..f878ac508801 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -134,16 +134,6 @@ struct iwl_fw_paging {
};
/**
- * struct iwl_fw_cscheme_list - a cipher scheme list
- * @size: a number of entries
- * @cs: cipher scheme entries
- */
-struct iwl_fw_cscheme_list {
- u8 size;
- struct iwl_fw_cipher_scheme cs[];
-} __packed;
-
-/**
* enum iwl_fw_type - iwlwifi firmware type
* @IWL_FW_DVM: DVM firmware
* @IWL_FW_MVM: MVM firmware
@@ -197,7 +187,6 @@ struct iwl_dump_exclude {
* @inst_evtlog_size: event log size for runtime ucode.
* @inst_errlog_ptr: error log offfset for runtime ucode.
* @type: firmware type (&enum iwl_fw_type)
- * @cipher_scheme: optional external cipher scheme.
* @human_readable: human readable version
* we get the ALIVE from the uCode
* @phy_integration_ver: PHY integration version string
@@ -228,7 +217,6 @@ struct iwl_fw {
enum iwl_fw_type type;
- struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
struct iwl_fw_dbg dbg;
@@ -275,7 +263,7 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
return &fw->img[ucode_type];
}
-u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def);
u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
const char *iwl_fw_lookup_assert_desc(u32 num);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index 139ece879fab..135bd48bfe9f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -58,7 +58,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
{
struct iwl_soc_configuration_cmd cmd = {};
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(SOC_CONFIGURATION_CMD, SYSTEM_GROUP, 0),
+ .id = WIDE_ID(SYSTEM_GROUP, SOC_CONFIGURATION_CMD),
.data[0] = &cmd,
.len[0] = sizeof(cmd),
};
@@ -87,8 +87,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
cmd.flags |= le32_encode_bits(fwrt->trans->trans_cfg->ltr_delay,
SOC_FLAGS_LTR_APPLY_DELAY_MASK);
- if (iwl_fw_lookup_cmd_ver(fwrt->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC,
+ if (iwl_fw_lookup_cmd_ver(fwrt->fw, SCAN_REQ_UMAC,
IWL_FW_CMD_VER_UNKNOWN) >= 2 &&
fwrt->trans->trans_cfg->low_latency_xtal)
cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
index 58ca3849d1f3..945bc4160cc9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
@@ -197,7 +197,7 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
}
memcpy(page_address(block->fw_paging_block),
- image->sec[sec_idx].data + offset, len);
+ (const u8 *)image->sec[sec_idx].data + offset, len);
block->fw_offs = image->sec[sec_idx].offset + offset;
dma_sync_single_for_device(fwrt->trans->dev,
block->fw_paging_phys,
@@ -243,7 +243,7 @@ static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt,
.block_num = cpu_to_le32(fwrt->num_of_paging_blk),
};
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(FW_PAGING_BLOCK_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, FW_PAGING_BLOCK_CMD),
.len = { sizeof(paging_cmd), },
.data = { &paging_cmd, },
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 7d4aa398729a..b6d3ac6ed440 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -33,7 +33,7 @@ static bool iwl_pnvm_complete_fn(struct iwl_notif_wait_data *notif_wait,
static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
size_t len)
{
- struct iwl_ucode_tlv *tlv;
+ const struct iwl_ucode_tlv *tlv;
u32 sha1 = 0;
u16 mac_type = 0, rf_id = 0;
u8 *pnvm_data = NULL, *tmp;
@@ -47,7 +47,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@@ -70,7 +70,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
break;
}
- sha1 = le32_to_cpup((__le32 *)data);
+ sha1 = le32_to_cpup((const __le32 *)data);
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_VERSION %0x\n",
@@ -87,8 +87,8 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
if (hw_match)
break;
- mac_type = le16_to_cpup((__le16 *)data);
- rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16)));
+ mac_type = le16_to_cpup((const __le16 *)data);
+ rf_id = le16_to_cpup((const __le16 *)(data + sizeof(__le16)));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
@@ -99,7 +99,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
hw_match = true;
break;
case IWL_UCODE_TLV_SEC_RT: {
- struct iwl_pnvm_section *section = (void *)data;
+ const struct iwl_pnvm_section *section = (const void *)data;
u32 data_len = tlv_len - sizeof(*section);
IWL_DEBUG_FW(trans,
@@ -107,7 +107,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
tlv_len);
/* TODO: remove, this is a deprecated separator */
- if (le32_to_cpup((__le32 *)data) == 0xddddeeee) {
+ if (le32_to_cpup((const __le32 *)data) == 0xddddeeee) {
IWL_DEBUG_FW(trans, "Ignoring separator.\n");
break;
}
@@ -173,7 +173,7 @@ out:
static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
size_t len)
{
- struct iwl_ucode_tlv *tlv;
+ const struct iwl_ucode_tlv *tlv;
IWL_DEBUG_FW(trans, "Parsing PNVM file\n");
@@ -181,7 +181,7 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@@ -193,8 +193,8 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
}
if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
- struct iwl_sku_id *sku_id =
- (void *)(data + sizeof(*tlv));
+ const struct iwl_sku_id *sku_id =
+ (const void *)(data + sizeof(*tlv));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 3cb0ddbe3ab2..d3cb1ae68a96 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_fw_runtime_h__
#define __iwl_fw_runtime_h__
@@ -16,7 +16,7 @@
#include "fw/acpi.h"
struct iwl_fw_runtime_ops {
- int (*dump_start)(void *ctx);
+ void (*dump_start)(void *ctx);
void (*dump_end)(void *ctx);
bool (*fw_running)(void *ctx);
int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
@@ -163,6 +163,7 @@ struct iwl_fw_runtime {
u32 ppag_ver;
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
+ u8 reduced_power_flags;
#endif
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index f2f1789f470d..3f1272014daf 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -89,7 +89,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
if (fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
- cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
+ cmd.id = WIDE_ID(SYSTEM_GROUP, SHARED_MEM_CFG_CMD);
else
cmd.id = SHARED_MEM_CFG;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index bd82c24811c8..23b1d689ba7b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -69,7 +69,7 @@ out:
static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
const u8 *data, size_t len)
{
- struct iwl_ucode_tlv *tlv;
+ const struct iwl_ucode_tlv *tlv;
u8 *reduce_power_data = NULL, *tmp;
u32 size = 0;
@@ -79,7 +79,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@@ -154,7 +154,7 @@ out:
static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
const u8 *data, size_t len)
{
- struct iwl_ucode_tlv *tlv;
+ const struct iwl_ucode_tlv *tlv;
void *sec_data;
IWL_DEBUG_FW(trans, "Parsing REDUCE_POWER data\n");
@@ -163,7 +163,7 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@@ -175,8 +175,8 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
}
if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
- struct iwl_sku_id *sku_id =
- (void *)(data + sizeof(*tlv));
+ const struct iwl_sku_id *sku_id =
+ (const void *)(data + sizeof(*tlv));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index e122b8b4e1fc..f5b556a103e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -260,6 +260,7 @@ enum iwl_cfg_trans_ltr_delay {
* @integrated: discrete or integrated
* @low_latency_xtal: use the low latency xtal if supported
* @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay.
+ * @imr_enabled: use the IMR if supported.
*/
struct iwl_cfg_trans_params {
const struct iwl_base_params *base_params;
@@ -274,7 +275,8 @@ struct iwl_cfg_trans_params {
integrated:1,
low_latency_xtal:1,
bisr_workaround:1,
- ltr_delay:2;
+ ltr_delay:2,
+ imr_enabled:1;
};
/**
@@ -343,8 +345,8 @@ struct iwl_fw_mon_regs {
* @bisr_workaround: BISR hardware workaround (for 22260 series devices)
* @min_txq_size: minimum number of slots required in a TX queue
* @uhb_supported: ultra high band channels supported
- * @min_256_ba_txq_size: minimum number of slots required in a TX queue which
- * supports 256 BA aggregation
+ * @min_ba_txq_size: minimum number of slots required in a TX queue which
+ * based on hardware support (HE - 256, EHT - 1K).
* @num_rbds: number of receive buffer descriptors to use
* (only used for multi-queue capable devices)
* @mac_addr_csr_base: CSR base register for MAC address access, if not set
@@ -405,9 +407,10 @@ struct iwl_cfg {
u32 d3_debug_data_length;
u32 min_txq_size;
u32 gp2_reg_addr;
- u32 min_256_ba_txq_size;
+ u32 min_ba_txq_size;
const struct iwl_fw_mon_regs mon_dram_regs;
const struct iwl_fw_mon_regs mon_smem_regs;
+ const struct iwl_fw_mon_regs mon_dbgi_regs;
};
#define IWL_CFG_ANY (~0)
@@ -433,6 +436,7 @@ struct iwl_cfg {
#define IWL_CFG_RF_TYPE_HR1 0x10C
#define IWL_CFG_RF_TYPE_GF 0x10D
#define IWL_CFG_RF_TYPE_MR 0x110
+#define IWL_CFG_RF_TYPE_MS 0x111
#define IWL_CFG_RF_TYPE_FM 0x112
#define IWL_CFG_RF_ID_TH 0x1
@@ -489,6 +493,7 @@ extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_snj_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_so_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg;
extern const char iwl9162_name[];
@@ -509,6 +514,7 @@ extern const char iwl9560_killer_1550i_name[];
extern const char iwl9560_killer_1550s_name[];
extern const char iwl_ax200_name[];
extern const char iwl_ax203_name[];
+extern const char iwl_ax204_name[];
extern const char iwl_ax201_name[];
extern const char iwl_ax101_name[];
extern const char iwl_ax200_killer_1650w_name[];
@@ -631,9 +637,12 @@ extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
+extern const struct iwl_cfg iwl_cfg_ma_a0_ms_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_fm_a0;
extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0;
+extern const struct iwl_cfg iwl_cfg_snj_a0_ms_a0;
extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
+extern const struct iwl_cfg iwl_cfg_so_a0_ms_a0;
extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_bz_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
index 5adf485db38e..b84884034c74 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018, 2020-2021 Intel Corporation
+ * Copyright (C) 2018, 2020-2022 Intel Corporation
*/
#ifndef __iwl_context_info_file_gen3_h__
#define __iwl_context_info_file_gen3_h__
@@ -34,6 +34,7 @@ enum iwl_prph_scratch_mtr_format {
/**
* enum iwl_prph_scratch_flags - PRPH scratch control flags
+ * @IWL_PRPH_SCRATCH_IMR_DEBUG_EN: IMR support for debug
* @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf
* @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated
* in hwm config.
@@ -55,6 +56,7 @@ enum iwl_prph_scratch_mtr_format {
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size
*/
enum iwl_prph_scratch_flags {
+ IWL_PRPH_SCRATCH_IMR_DEBUG_EN = BIT(1),
IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4),
IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8),
IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 8e10ba88afb3..3e1f011e93aa 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -534,6 +534,9 @@ enum {
* 11-8: queue selector
*/
#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
+/* This register is common for Tx and Rx, Rx queues start from 512 */
+#define HBUS_TARG_WRPTR_Q_SHIFT (16)
+#define HBUS_TARG_WRPTR_RX_Q(q) (((q) + 512) << HBUS_TARG_WRPTR_Q_SHIFT)
/**********************************************************
* CSR values
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index c73672d61356..866a33f49915 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include <linux/firmware.h>
#include "iwl-drv.h"
@@ -74,7 +74,8 @@ static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
if (!node)
return -ENOMEM;
- memcpy(&node->tlv, tlv, sizeof(node->tlv) + len);
+ memcpy(&node->tlv, tlv, sizeof(node->tlv));
+ memcpy(node->tlv.data, tlv->data, len);
list_add_tail(&node->list, list);
return 0;
@@ -181,11 +182,11 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
/*
- * The higher part of the ID in from version 2 is irrelevant for
- * us, so mask it out.
+ * The higher part of the ID from version 2 is debug policy.
+ * The id will be only lsb 16 bits, so mask it out.
*/
if (le32_to_cpu(reg->hdr.version) >= 2)
- id &= IWL_FW_INI_REGION_V2_MASK;
+ id &= IWL_FW_INI_REGION_ID_MASK;
if (le32_to_cpu(tlv->length) < sizeof(*reg))
return -EINVAL;
@@ -211,6 +212,14 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
return -EOPNOTSUPP;
}
+ if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) {
+ trans->dbg.imr_data.sram_addr =
+ le32_to_cpu(reg->internal_buffer.base_addr);
+ trans->dbg.imr_data.sram_size =
+ le32_to_cpu(reg->internal_buffer.size);
+ }
+
+
active_reg = &trans->dbg.active_regions[id];
if (*active_reg) {
IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
@@ -271,7 +280,7 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
const struct iwl_ucode_tlv *tlv)
{
- struct iwl_fw_ini_conf_set_tlv *conf_set = (void *)tlv->data;
+ const struct iwl_fw_ini_conf_set_tlv *conf_set = (const void *)tlv->data;
u32 tp = le32_to_cpu(conf_set->time_point);
u32 type = le32_to_cpu(conf_set->set_type);
@@ -460,7 +469,7 @@ static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
while (len >= sizeof(*tlv)) {
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
@@ -577,8 +586,7 @@ static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
return 0;
num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
- if (!fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) {
+ if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
return -EIO;
num_frags = 1;
@@ -762,33 +770,40 @@ static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt,
static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt)
{
- int ret, i, dram_alloc = 0;
- struct iwl_dram_info dram_info;
+ int ret, i;
+ bool dram_alloc = false;
struct iwl_dram_data *frags =
&fwrt->trans->dbg.fw_mon_ini[IWL_FW_INI_ALLOCATION_ID_DBGC1].frags[0];
+ struct iwl_dram_info *dram_info;
+
+ if (!frags || !frags->block)
+ return;
+
+ dram_info = frags->block;
if (!fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
return;
- dram_info.first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
- dram_info.second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
+ dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
+ dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1;
i <= IWL_FW_INI_ALLOCATION_ID_DBGC3; i++) {
- ret = iwl_dbg_tlv_update_dram(fwrt, i, &dram_info);
+ ret = iwl_dbg_tlv_update_dram(fwrt, i, dram_info);
if (!ret)
- dram_alloc++;
+ dram_alloc = true;
else
IWL_WARN(fwrt,
"WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n",
i, ret);
}
- if (dram_alloc) {
- memcpy(frags->block, &dram_info, sizeof(dram_info));
- IWL_DEBUG_FW(fwrt, "block data after %016x\n",
- *((int *)fwrt->trans->dbg.fw_mon_ini[1].frags[0].block));
- }
+
+ if (dram_alloc)
+ IWL_DEBUG_FW(fwrt, "block data after %08x\n",
+ dram_info->first_word);
+ else
+ memset(frags->block, 0, sizeof(*dram_info));
}
static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
@@ -811,11 +826,11 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
}
static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
- struct list_head *config_list)
+ struct list_head *conf_list)
{
struct iwl_dbg_tlv_node *node;
- list_for_each_entry(node, config_list, list) {
+ list_for_each_entry(node, conf_list, list) {
struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data;
u32 count, address, value;
u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8;
@@ -861,11 +876,18 @@ static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: {
struct iwl_dbgc1_info dram_info = {};
struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0];
- __le64 dram_base_addr = cpu_to_le64(frags->physical);
- __le32 dram_size = cpu_to_le32(frags->size);
- u64 dram_addr = le64_to_cpu(dram_base_addr);
+ __le64 dram_base_addr;
+ __le32 dram_size;
+ u64 dram_addr;
u32 ret;
+ if (!frags)
+ break;
+
+ dram_base_addr = cpu_to_le64(frags->physical);
+ dram_size = cpu_to_le32(frags->size);
+ dram_addr = le64_to_cpu(dram_base_addr);
+
IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n",
dram_base_addr, dram_size);
IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
index 79287708bd6e..128059ca77e6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_dbg_tlv_h__
#define __iwl_dbg_tlv_h__
@@ -10,6 +10,8 @@
#include <fw/file.h>
#include <fw/api/dbg-tlv.h>
+#define IWL_DBG_TLV_MAX_PRESET 15
+
/**
* struct iwl_dbg_tlv_node - debug TLV node
* @list: list of &struct iwl_dbg_tlv_node
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 6651e78b39ec..a2203f661321 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -243,14 +243,14 @@ struct iwl_firmware_pieces {
/* FW debug data parsed for driver usage */
bool dbg_dest_tlv_init;
- u8 *dbg_dest_ver;
+ const u8 *dbg_dest_ver;
union {
- struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
- struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
+ const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+ const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
};
- struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
+ const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
- struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
+ const struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
size_t n_mem_tlv;
@@ -324,29 +324,6 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces,
pieces->img[type].sec[sec].offset = offset;
}
-static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
-{
- int i, j;
- struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data;
- struct iwl_fw_cipher_scheme *fwcs;
-
- if (len < sizeof(*l) ||
- len < sizeof(l->size) + l->size * sizeof(l->cs[0]))
- return -EINVAL;
-
- for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) {
- fwcs = &l->cs[j];
-
- /* we skip schemes with zero cipher suite selector */
- if (!fwcs->cipher)
- continue;
-
- fw->cs[j++] = *fwcs;
- }
-
- return 0;
-}
-
/*
* Gets uCode section from tlv.
*/
@@ -356,13 +333,13 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
{
struct fw_img_parsing *img;
struct fw_sec *sec;
- struct fw_sec_parsing *sec_parse;
+ const struct fw_sec_parsing *sec_parse;
size_t alloc_size;
if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX))
return -1;
- sec_parse = (struct fw_sec_parsing *)data;
+ sec_parse = (const struct fw_sec_parsing *)data;
img = &pieces->img[type];
@@ -385,8 +362,8 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
{
- struct iwl_tlv_calib_data *def_calib =
- (struct iwl_tlv_calib_data *)data;
+ const struct iwl_tlv_calib_data *def_calib =
+ (const struct iwl_tlv_calib_data *)data;
u32 ucode_type = le32_to_cpu(def_calib->ucode_type);
if (ucode_type >= IWL_UCODE_TYPE_MAX) {
IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n",
@@ -404,7 +381,7 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
struct iwl_ucode_capabilities *capa)
{
- const struct iwl_ucode_api *ucode_api = (void *)data;
+ const struct iwl_ucode_api *ucode_api = (const void *)data;
u32 api_index = le32_to_cpu(ucode_api->api_index);
u32 api_flags = le32_to_cpu(ucode_api->api_flags);
int i;
@@ -425,7 +402,7 @@ static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
struct iwl_ucode_capabilities *capa)
{
- const struct iwl_ucode_capa *ucode_capa = (void *)data;
+ const struct iwl_ucode_capa *ucode_capa = (const void *)data;
u32 api_index = le32_to_cpu(ucode_capa->api_index);
u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
int i;
@@ -457,7 +434,7 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
const struct firmware *ucode_raw,
struct iwl_firmware_pieces *pieces)
{
- struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
+ const struct iwl_ucode_header *ucode = (const void *)ucode_raw->data;
u32 api_ver, hdr_size, build;
char buildstr[25];
const u8 *src;
@@ -600,7 +577,7 @@ static void iwl_parse_dbg_tlv_assert_tables(struct iwl_drv *drv,
sizeof(region->special_mem))
return;
- region = (void *)tlv->data;
+ region = (const void *)tlv->data;
addr = le32_to_cpu(region->special_mem.base_addr);
addr += le32_to_cpu(region->special_mem.offset);
addr &= ~FW_ADDR_CACHE_CONTROL;
@@ -655,7 +632,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
struct iwl_ucode_capabilities *capa,
bool *usniffer_images)
{
- struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
+ const struct iwl_tlv_ucode_header *ucode = (const void *)ucode_raw->data;
const struct iwl_ucode_tlv *tlv;
size_t len = ucode_raw->size;
const u8 *data;
@@ -704,8 +681,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
while (len >= sizeof(*tlv)) {
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
tlv_data = tlv->data;
@@ -762,7 +739,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
capa->max_probe_length =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_PAN:
if (tlv_len)
@@ -783,7 +760,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
* will not work with the new firmware, or
* it'll not take advantage of new features.
*/
- capa->flags = le32_to_cpup((__le32 *)tlv_data);
+ capa->flags = le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_API_CHANGES_SET:
if (tlv_len != sizeof(struct iwl_ucode_api))
@@ -799,37 +776,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->init_evtlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->init_evtlog_size =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->init_errlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->inst_evtlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->inst_evtlog_size =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->inst_errlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
if (tlv_len)
@@ -858,7 +835,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
capa->standard_phy_calibration_size =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_SEC_RT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
@@ -884,7 +861,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_PHY_SKU:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
- drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
+ drv->fw.phy_config = le32_to_cpup((const __le32 *)tlv_data);
drv->fw.valid_tx_ant = (drv->fw.phy_config &
FW_PHY_CFG_TX_CHAIN) >>
FW_PHY_CFG_TX_CHAIN_POS;
@@ -911,7 +888,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
num_of_cpus =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
if (num_of_cpus == 2) {
drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus =
@@ -925,18 +902,14 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
break;
- case IWL_UCODE_TLV_CSCHEME:
- if (iwl_store_cscheme(&drv->fw, tlv_data, tlv_len))
- goto invalid_tlv_len;
- break;
case IWL_UCODE_TLV_N_SCAN_CHANNELS:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
capa->n_scan_channels =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_FW_VERSION: {
- __le32 *ptr = (void *)tlv_data;
+ const __le32 *ptr = (const void *)tlv_data;
u32 major, minor;
u8 local_comp;
@@ -960,15 +933,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_FW_DBG_DEST: {
- struct iwl_fw_dbg_dest_tlv *dest = NULL;
- struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
+ const struct iwl_fw_dbg_dest_tlv *dest = NULL;
+ const struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
u8 mon_mode;
- pieces->dbg_dest_ver = (u8 *)tlv_data;
+ pieces->dbg_dest_ver = (const u8 *)tlv_data;
if (*pieces->dbg_dest_ver == 1) {
- dest = (void *)tlv_data;
+ dest = (const void *)tlv_data;
} else if (*pieces->dbg_dest_ver == 0) {
- dest_v1 = (void *)tlv_data;
+ dest_v1 = (const void *)tlv_data;
} else {
IWL_ERR(drv,
"The version is %d, and it is invalid\n",
@@ -1009,7 +982,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_FW_DBG_CONF: {
- struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data;
+ const struct iwl_fw_dbg_conf_tlv *conf =
+ (const void *)tlv_data;
if (!pieces->dbg_dest_tlv_init) {
IWL_ERR(drv,
@@ -1043,8 +1017,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_FW_DBG_TRIGGER: {
- struct iwl_fw_dbg_trigger_tlv *trigger =
- (void *)tlv_data;
+ const struct iwl_fw_dbg_trigger_tlv *trigger =
+ (const void *)tlv_data;
u32 trigger_id = le32_to_cpu(trigger->id);
if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) {
@@ -1075,7 +1049,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
}
drv->fw.dbg.dump_mask =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
}
case IWL_UCODE_TLV_SEC_RT_USNIFFER:
@@ -1087,7 +1061,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_PAGING:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
- paging_mem_size = le32_to_cpup((__le32 *)tlv_data);
+ paging_mem_size = le32_to_cpup((const __le32 *)tlv_data);
IWL_DEBUG_FW(drv,
"Paging: paging enabled (size = %u bytes)\n",
@@ -1117,8 +1091,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
/* ignored */
break;
case IWL_UCODE_TLV_FW_MEM_SEG: {
- struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
- (void *)tlv_data;
+ const struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
+ (const void *)tlv_data;
size_t size;
struct iwl_fw_dbg_mem_seg_tlv *n;
@@ -1146,10 +1120,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_FW_RECOVERY_INFO: {
- struct {
+ const struct {
__le32 buf_addr;
__le32 buf_size;
- } *recov_info = (void *)tlv_data;
+ } *recov_info = (const void *)tlv_data;
if (tlv_len != sizeof(*recov_info))
goto invalid_tlv_len;
@@ -1160,10 +1134,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
}
break;
case IWL_UCODE_TLV_FW_FSEQ_VERSION: {
- struct {
+ const struct {
u8 version[32];
u8 sha1[20];
- } *fseq_ver = (void *)tlv_data;
+ } *fseq_ver = (const void *)tlv_data;
if (tlv_len != sizeof(*fseq_ver))
goto invalid_tlv_len;
@@ -1174,19 +1148,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_FW_NUM_STATIONS:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
- if (le32_to_cpup((__le32 *)tlv_data) >
+ if (le32_to_cpup((const __le32 *)tlv_data) >
IWL_MVM_STATION_COUNT_MAX) {
IWL_ERR(drv,
"%d is an invalid number of station\n",
- le32_to_cpup((__le32 *)tlv_data));
+ le32_to_cpup((const __le32 *)tlv_data));
goto tlv_error;
}
capa->num_stations =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: {
- struct iwl_umac_debug_addrs *dbg_ptrs =
- (void *)tlv_data;
+ const struct iwl_umac_debug_addrs *dbg_ptrs =
+ (const void *)tlv_data;
if (tlv_len != sizeof(*dbg_ptrs))
goto invalid_tlv_len;
@@ -1201,8 +1175,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_LMAC_DEBUG_ADDRS: {
- struct iwl_lmac_debug_addrs *dbg_ptrs =
- (void *)tlv_data;
+ const struct iwl_lmac_debug_addrs *dbg_ptrs =
+ (const void *)tlv_data;
if (tlv_len != sizeof(*dbg_ptrs))
goto invalid_tlv_len;
@@ -1277,7 +1251,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (len) {
IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len);
- iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len);
+ iwl_print_hex_dump(drv, IWL_DL_FW, data, len);
return -EINVAL;
}
@@ -1418,7 +1392,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
{
struct iwl_drv *drv = context;
struct iwl_fw *fw = &drv->fw;
- struct iwl_ucode_header *ucode;
+ const struct iwl_ucode_header *ucode;
struct iwlwifi_opmode_table *op;
int err;
struct iwl_firmware_pieces *pieces;
@@ -1456,7 +1430,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
}
/* Data from ucode file: header followed by uCode images */
- ucode = (struct iwl_ucode_header *)ucode_raw->data;
+ ucode = (const struct iwl_ucode_header *)ucode_raw->data;
if (ucode->ver)
err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces);
@@ -1645,6 +1619,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
+ iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
+
mutex_lock(&iwlwifi_opmode_table_mtx);
switch (fw->type) {
case IWL_FW_DVM:
@@ -1661,8 +1637,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
drv->fw.fw_version, op->name);
- iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
-
/* add this device to the list of devices using this op_mode */
list_add_tail(&drv->list, &op->drv);
@@ -1796,6 +1770,7 @@ void iwl_drv_stop(struct iwl_drv *drv)
kfree(drv);
}
+#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1)
/* shared module parameters */
struct iwl_mod_params iwlwifi_mod_params = {
@@ -1803,7 +1778,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
.bt_coex_active = true,
.power_level = IWL_POWER_INDEX_1,
.uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT,
- .enable_ini = true,
+ .enable_ini = ENABLE_INI,
/* the rest are 0 by default */
};
IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@@ -1915,10 +1890,42 @@ MODULE_PARM_DESC(nvm_file, "NVM file name");
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
MODULE_PARM_DESC(uapsd_disable,
"disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
-module_param_named(enable_ini, iwlwifi_mod_params.enable_ini,
- bool, S_IRUGO | S_IWUSR);
+
+static int enable_ini_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret = 0;
+ bool res;
+ __u32 new_enable_ini;
+
+ /* in case the argument type is a number */
+ ret = kstrtou32(arg, 0, &new_enable_ini);
+ if (!ret) {
+ if (new_enable_ini > ENABLE_INI) {
+ pr_err("enable_ini cannot be %d, in range 0-16\n", new_enable_ini);
+ return -EINVAL;
+ }
+ goto out;
+ }
+
+ /* in case the argument type is boolean */
+ ret = kstrtobool(arg, &res);
+ if (ret)
+ return ret;
+ new_enable_ini = (res ? ENABLE_INI : 0);
+
+out:
+ iwlwifi_mod_params.enable_ini = new_enable_ini;
+ return 0;
+}
+
+static const struct kernel_param_ops enable_ini_ops = {
+ .set = enable_ini_set
+};
+
+module_param_cb(enable_ini, &enable_ini_ops, &iwlwifi_mod_params.enable_ini, 0644);
MODULE_PARM_DESC(enable_ini,
- "Enable debug INI TLV FW debug infrastructure (default: true");
+ "0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined,"
+ "Debug INI TLV FW debug infrastructure (default: 16)");
/*
* set bt_coex_active to true, uCode will do kill/defer
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
index 0fd009e6d685..80073f973334 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -84,7 +84,7 @@ void iwl_drv_stop(struct iwl_drv *drv);
* everything is built-in, then we can avoid that.
*/
#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
-#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_GPL(sym)
+#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_NS_GPL(sym, IWLWIFI)
#else
#define IWL_EXPORT_SYMBOL(sym)
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
index b9e86bf972e5..5f386bb1a353 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
@@ -23,26 +23,22 @@
*/
#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
-#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
-#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-
/*
* The device's EEPROM semaphore prevents conflicts between driver and uCode
* when accessing the EEPROM; each access is a series of pulses to/from the
* EEPROM chip, not a single event, so even reads could conflict if they
* weren't arbitrated by the semaphore.
*/
+#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
+#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
-#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
{
u16 count;
int ret;
- for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+ for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) {
/* Request semaphore */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
@@ -51,7 +47,7 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- EEPROM_SEM_TIMEOUT);
+ IWL_EEPROM_SEM_TIMEOUT);
if (ret >= 0) {
IWL_DEBUG_EEPROM(trans->dev,
"Acquired semaphore after %d tries.\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index e6fd4941a4cb..bedd78a47f67 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fh_h__
@@ -590,11 +590,31 @@ struct iwl_rb_status {
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
-#define TFD_QUEUE_BC_SIZE_GEN3 1024
+#define TFD_QUEUE_BC_SIZE_GEN3_AX210 1024
+#define TFD_QUEUE_BC_SIZE_GEN3_BZ (1024 * 4)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
#define IWL_TFH_NUM_TBS 25
+/* IMR DMA registers */
+#define IMR_TFH_SRV_DMA_CHNL0_CTRL 0x00a0a51c
+#define IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR 0x00a0a520
+#define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB 0x00a0a524
+#define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB 0x00a0a528
+#define IMR_TFH_SRV_DMA_CHNL0_BC 0x00a0a52c
+#define TFH_SRV_DMA_CHNL0_LEFT_BC 0x00a0a530
+
+/* RFH S2D DMA registers */
+#define IMR_RFH_GEN_CFG_SERVICE_DMA_RS_MSK 0x0000000c
+#define IMR_RFH_GEN_CFG_SERVICE_DMA_SNOOP_MSK 0x00000002
+
+/* TFH D2S DMA registers */
+#define IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK 0x80000000
+#define IMR_UREG_CHICK 0x00d05c00
+#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS 0x00800000
+#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK 0x00000030
+#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS 0x80000000
+
static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
{
return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF;
@@ -707,14 +727,14 @@ struct iwlagn_scd_bc_tbl {
} __packed;
/**
- * struct iwl_gen3_bc_tbl scheduler byte count table gen3
+ * struct iwl_gen3_bc_tbl_entry scheduler byte count table entry gen3
* For AX210 and on:
* @tfd_offset: 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
*/
-struct iwl_gen3_bc_tbl {
- __le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
+struct iwl_gen3_bc_tbl_entry {
+ __le16 tfd_offset;
} __packed;
#endif /* !__iwl_fh_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 253eac4cbf59..396f2c997da6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -65,14 +65,14 @@ IWL_EXPORT_SYMBOL(iwl_poll_bit);
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
{
- u32 value = 0x5a5a5a5a;
-
if (iwl_trans_grab_nic_access(trans)) {
- value = iwl_read32(trans, reg);
+ u32 value = iwl_read32(trans, reg);
+
iwl_trans_release_nic_access(trans);
+ return value;
}
- return value;
+ return 0x5a5a5a5a;
}
IWL_EXPORT_SYMBOL(iwl_read_direct32);
@@ -135,13 +135,15 @@ IWL_EXPORT_SYMBOL(iwl_write_prph64_no_grab);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
- u32 val = 0x5a5a5a5a;
-
if (iwl_trans_grab_nic_access(trans)) {
- val = iwl_read_prph_no_grab(trans, ofs);
+ u32 val = iwl_read_prph_no_grab(trans, ofs);
+
iwl_trans_release_nic_access(trans);
+
+ return val;
}
- return val;
+
+ return 0x5a5a5a5a;
}
IWL_EXPORT_SYMBOL(iwl_read_prph);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index 004ebdac4535..d0b4d02bdab9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
*/
#ifndef __iwl_modparams_h__
#define __iwl_modparams_h__
@@ -83,7 +83,8 @@ struct iwl_mod_params {
*/
bool disable_11ax;
bool remove_when_gone;
- bool enable_ini;
+ u32 enable_ini;
+ bool disable_11be;
};
static inline bool iwl_enable_rx_ampdu(void)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index dd58c8f9aa11..9040da3dcce3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -375,10 +375,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
if (v4)
ch_flags =
- __le32_to_cpup((__le32 *)nvm_ch_flags + ch_idx);
+ __le32_to_cpup((const __le32 *)nvm_ch_flags + ch_idx);
else
ch_flags =
- __le16_to_cpup((__le16 *)nvm_ch_flags + ch_idx);
+ __le16_to_cpup((const __le16 *)nvm_ch_flags + ch_idx);
if (band == NL80211_BAND_5GHZ &&
!data->sku_cap_band_52ghz_enable)
@@ -553,8 +553,7 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
.has_he = true,
.he_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_HE_MAC_CAP0_HTC_HE |
- IEEE80211_HE_MAC_CAP0_TWT_REQ,
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
.mac_cap_info[1] =
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
@@ -584,9 +583,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
.phy_cap_info[3] =
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
.phy_cap_info[4] =
IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
@@ -654,9 +653,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US,
.phy_cap_info[3] =
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
.phy_cap_info[6] =
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
@@ -732,7 +731,7 @@ static void iwl_init_he_6ghz_capa(struct iwl_trans *trans,
IWL_DEBUG_EEPROM(trans->dev, "he_6ghz_capa=0x%x\n", he_6ghz_capa);
/* we know it's writable - we set it before ourselves */
- iftype_data = (void *)sband->iftype_data;
+ iftype_data = (void *)(uintptr_t)sband->iftype_data;
for (i = 0; i < sband->n_iftype_data; i++)
iftype_data[i].he_6ghz_capa.capa = cpu_to_le16(he_6ghz_capa);
}
@@ -784,6 +783,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
case IWL_CFG_RF_TYPE_GF:
case IWL_CFG_RF_TYPE_MR:
+ case IWL_CFG_RF_TYPE_MS:
iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
if (!is_ap)
@@ -912,7 +912,7 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + SKU);
- return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
+ return le32_to_cpup((const __le32 *)(phy_sku + SKU_FAMILY_8000));
}
static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
@@ -920,8 +920,8 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + NVM_VERSION);
else
- return le32_to_cpup((__le32 *)(nvm_sw +
- NVM_VERSION_EXT_NVM));
+ return le32_to_cpup((const __le32 *)(nvm_sw +
+ NVM_VERSION_EXT_NVM));
}
static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
@@ -930,7 +930,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + RADIO_CFG);
- return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
+ return le32_to_cpup((const __le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
}
@@ -941,7 +941,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + N_HW_ADDRS);
- n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
+ n_hw_addr = le32_to_cpup((const __le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
return n_hw_addr & N_HW_ADDR_MASK;
}
@@ -1080,7 +1080,9 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
return -EINVAL;
}
- IWL_INFO(trans, "base HW address: %pM\n", data->hw_addr);
+ if (!trans->csme_own)
+ IWL_INFO(trans, "base HW address: %pM, OTP minor version: 0x%x\n",
+ data->hw_addr, iwl_read_prph(trans, REG_OTP_MINOR));
return 0;
}
@@ -1385,8 +1387,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
nvm_chan = iwl_nvm_channels;
}
- if (WARN_ON(num_of_ch > max_num_ch))
+ if (num_of_ch > max_num_ch) {
+ IWL_DEBUG_DEV(dev, IWL_DL_LAR,
+ "Num of channels (%d) is greater than expected. Truncating to %d\n",
+ num_of_ch, max_num_ch);
num_of_ch = max_num_ch;
+ }
if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
return ERR_PTR(-EINVAL);
@@ -1592,7 +1598,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
}
eof = fw_entry->data + fw_entry->size;
- dword_buff = (__le32 *)fw_entry->data;
+ dword_buff = (const __le32 *)fw_entry->data;
/* some NVM file will contain a header.
* The header is identified by 2 dwords header as follow:
@@ -1604,7 +1610,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
if (fw_entry->size > NVM_HEADER_SIZE &&
dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
- file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE);
+ file_sec = (const void *)(fw_entry->data + NVM_HEADER_SIZE);
IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
IWL_INFO(trans, "NVM Manufacturing date %08X\n",
le32_to_cpu(dword_buff[3]));
@@ -1617,7 +1623,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
goto out;
}
} else {
- file_sec = (void *)fw_entry->data;
+ file_sec = (const void *)fw_entry->data;
}
while (true) {
@@ -1685,7 +1691,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
nvm_sections[section_id].length = section_size;
/* advance to the next section */
- file_sec = (void *)(file_sec->data + section_size);
+ file_sec = (const void *)(file_sec->data + section_size);
}
out:
release_firmware(fw_entry);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
index 5378315d0179..0a93ac769f66 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2020-2021 Intel Corporation
* Copyright (C) 2016 Intel Deutschland GmbH
*/
#include <linux/slab.h>
@@ -13,8 +13,6 @@
#include "iwl-op-mode.h"
#include "iwl-trans.h"
-#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
-
struct iwl_phy_db_entry {
u16 size;
u8 *data;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 95b3dae7b504..a22788a68168 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -354,10 +354,10 @@
#define WFPM_GP2 0xA030B4
/* DBGI SRAM Register details */
-#define DBGI_SRAM_TARGET_ACCESS_CFG 0x00A2E14C
-#define DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK 0x10000
#define DBGI_SRAM_TARGET_ACCESS_RDATA_LSB 0x00A2E154
#define DBGI_SRAM_TARGET_ACCESS_RDATA_MSB 0x00A2E158
+#define DBGI_SRAM_FIFO_POINTERS 0x00A2E148
+#define DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK 0x00000FFF
enum {
ENABLE_WFPM = BIT(31),
@@ -386,6 +386,11 @@ enum {
#define UREG_LMAC1_CURRENT_PC 0xa05c1c
#define UREG_LMAC2_CURRENT_PC 0xa05c20
+#define WFPM_LMAC1_PD_NOTIFICATION 0xa0338c
+#define WFPM_ARC1_PD_NOTIFICATION 0xa03044
+#define HPM_SECONDARY_DEVICE_STATE 0xa03404
+
+
/* For UMAG_GEN_HW_STATUS reg check */
enum {
UMAG_GEN_HW_IS_FPGA = BIT(1),
@@ -491,4 +496,6 @@ enum {
#define HBUS_TIMEOUT 0xA5A5A5A1
#define WFPM_DPHY_OFF 0xDF10FF
+#define REG_OTP_MINOR 0xA0333C
+
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 9236f9106826..b1af9359cea5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -78,8 +78,12 @@ int iwl_trans_init(struct iwl_trans *trans)
if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
return -EINVAL;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ trans->txqs.bc_tbl_size =
+ sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ;
+ else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ trans->txqs.bc_tbl_size =
+ sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210;
else
trans->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
/*
@@ -203,10 +207,10 @@ IWL_EXPORT_SYMBOL(iwl_trans_send_cmd);
static int iwl_hcmd_names_cmp(const void *key, const void *elt)
{
const struct iwl_hcmd_names *name = elt;
- u8 cmd1 = *(u8 *)key;
+ const u8 *cmd1 = key;
u8 cmd2 = name->cmd_id;
- return (cmd1 - cmd2);
+ return (*cmd1 - cmd2);
}
const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 1bcaa3598785..d659ccd065f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -406,6 +406,9 @@ struct iwl_dump_sanitize_ops {
* @cb_data_offs: offset inside skb->cb to store transport data at, must have
* space for at least two pointers
* @fw_reset_handshake: firmware supports reset flow handshake
+ * @queue_alloc_cmd_ver: queue allocation command version, set to 0
+ * for using the older SCD_QUEUE_CFG, set to the version of
+ * SCD_QUEUE_CONFIG_CMD otherwise.
*/
struct iwl_trans_config {
struct iwl_op_mode *op_mode;
@@ -424,6 +427,7 @@ struct iwl_trans_config {
u8 cb_data_offs;
bool fw_reset_handshake;
+ u8 queue_alloc_cmd_ver;
};
struct iwl_trans_dump_data {
@@ -569,10 +573,9 @@ struct iwl_trans_ops {
void (*txq_disable)(struct iwl_trans *trans, int queue,
bool configure_scd);
/* 22000 functions */
- int (*txq_alloc)(struct iwl_trans *trans,
- __le16 flags, u8 sta_id, u8 tid,
- int cmd_id, int size,
- unsigned int queue_wdg_timeout);
+ int (*txq_alloc)(struct iwl_trans *trans, u32 flags,
+ u32 sta_mask, u8 tid,
+ int size, unsigned int queue_wdg_timeout);
void (*txq_free)(struct iwl_trans *trans, int queue);
int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data);
@@ -615,6 +618,10 @@ struct iwl_trans_ops {
int (*set_reduce_power)(struct iwl_trans *trans,
const void *data, u32 len);
void (*interrupts)(struct iwl_trans *trans, bool enable);
+ int (*imr_dma_data)(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr,
+ u32 byte_cnt);
+
};
/**
@@ -722,6 +729,26 @@ struct iwl_self_init_dram {
};
/**
+ * struct iwl_imr_data - imr dram data used during debug process
+ * @imr_enable: imr enable status received from fw
+ * @imr_size: imr dram size received from fw
+ * @sram_addr: sram address from debug tlv
+ * @sram_size: sram size from debug tlv
+ * @imr2sram_remainbyte`: size remained after each dma transfer
+ * @imr_curr_addr: current dst address used during dma transfer
+ * @imr_base_addr: imr address received from fw
+ */
+struct iwl_imr_data {
+ u32 imr_enable;
+ u32 imr_size;
+ u32 sram_addr;
+ u32 sram_size;
+ u32 imr2sram_remainbyte;
+ u64 imr_curr_addr;
+ __le64 imr_base_addr;
+};
+
+/**
* struct iwl_trans_debug - transport debug related data
*
* @n_dest_reg: num of reg_ops in %dbg_dest_tlv
@@ -785,6 +812,7 @@ struct iwl_trans_debug {
u32 ucode_preset;
bool restart_required;
u32 last_tp_resetfw;
+ struct iwl_imr_data imr_data;
};
struct iwl_dma_ptr {
@@ -904,6 +932,7 @@ struct iwl_txq {
* @queue_used - bit mask of used queues
* @queue_stopped - bit mask of stopped queues
* @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
+ * @queue_alloc_cmd_ver: queue allocation command version
*/
struct iwl_trans_txqs {
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
@@ -929,6 +958,8 @@ struct iwl_trans_txqs {
} tfd;
struct iwl_dma_ptr scd_bc_tbls;
+
+ u8 queue_alloc_cmd_ver;
};
/**
@@ -1220,9 +1251,8 @@ iwl_trans_txq_free(struct iwl_trans *trans, int queue)
static inline int
iwl_trans_txq_alloc(struct iwl_trans *trans,
- __le16 flags, u8 sta_id, u8 tid,
- int cmd_id, int size,
- unsigned int wdg_timeout)
+ u32 flags, u32 sta_mask, u8 tid,
+ int size, unsigned int wdg_timeout)
{
might_sleep();
@@ -1234,8 +1264,8 @@ iwl_trans_txq_alloc(struct iwl_trans *trans,
return -EIO;
}
- return trans->ops->txq_alloc(trans, flags, sta_id, tid,
- cmd_id, size, wdg_timeout);
+ return trans->ops->txq_alloc(trans, flags, sta_mask, tid,
+ size, wdg_timeout);
}
static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
@@ -1368,6 +1398,15 @@ static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
} while (0)
+static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr,
+ u32 byte_cnt)
+{
+ if (trans->ops->imr_dma_data)
+ return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt);
+ return 0;
+}
+
static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
{
u32 value;
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
index 2f7f0f994ca3..b4f45234cfc8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
@@ -312,7 +312,7 @@ static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev,
memcpy(q_head + wr, hdr, tx_sz);
} else {
memcpy(q_head + wr, hdr, q_sz - wr);
- memcpy(q_head, (u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
+ memcpy(q_head, (const u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
}
WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
@@ -432,7 +432,7 @@ void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx)
u32 q_sz;
u32 rd;
u32 wr;
- void *q_head;
+ u8 *q_head;
if (!iwl_mei_global_cldev)
return;
@@ -2003,7 +2003,11 @@ static void iwl_mei_remove(struct mei_cl_device *cldev)
}
static const struct mei_cl_device_id iwl_mei_tbl[] = {
- { KBUILD_MODNAME, MEI_WLAN_UUID, MEI_CL_VERSION_ANY},
+ {
+ .name = KBUILD_MODNAME,
+ .uuid = MEI_WLAN_UUID,
+ .version = MEI_CL_VERSION_ANY,
+ },
/* required last entry */
{ }
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/net.c b/drivers/net/wireless/intel/iwlwifi/mei/net.c
index 468102a95e1b..3472167c8370 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/net.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/net.c
@@ -102,8 +102,8 @@ static bool iwl_mei_rx_filter_arp(struct sk_buff *skb,
* src IP address - 4 bytes
* target MAC addess - 6 bytes
*/
- target_ip = (void *)((u8 *)(arp + 1) +
- ETH_ALEN + sizeof(__be32) + ETH_ALEN);
+ target_ip = (const void *)((const u8 *)(arp + 1) +
+ ETH_ALEN + sizeof(__be32) + ETH_ALEN);
/*
* ARP request is forwarded to ME only if IP address match in the
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index b400867e94f0..a995bba0ba81 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -31,7 +31,7 @@ void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
memcpy(mvmvif->rekey_data.kck, data->kck, data->kck_len);
mvmvif->rekey_data.akm = data->akm & 0xFF;
mvmvif->rekey_data.replay_ctr =
- cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
+ cpu_to_le64(be64_to_cpup((const __be64 *)data->replay_ctr));
mvmvif->rekey_data.valid = true;
mutex_unlock(&mvm->mutex);
@@ -453,8 +453,7 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- WOWLAN_TSC_RSC_PARAM,
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TSC_RSC_PARAM,
IWL_FW_CMD_VER_UNKNOWN);
int ret;
@@ -672,8 +671,7 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
int i, err;
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- WOWLAN_PATTERNS,
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
IWL_FW_CMD_VER_UNKNOWN);
if (!wowlan->n_patterns)
@@ -921,8 +919,7 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- WOWLAN_CONFIGURATION, 0) < 6) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) {
/* Query the last used seqno and set it */
int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
@@ -1017,8 +1014,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- WOWLAN_TKIP_PARAM,
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TKIP_PARAM,
IWL_FW_CMD_VER_UNKNOWN);
struct wowlan_key_tkip_data tkip_data = {};
int size;
@@ -1058,7 +1054,6 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
};
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
- IWL_ALWAYS_LONG_GROUP,
WOWLAN_KEK_KCK_MATERIAL,
IWL_FW_CMD_VER_UNKNOWN);
if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 && cmd_ver != 4 &&
@@ -1089,7 +1084,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
/* skip the sta_id at the beginning */
_kek_kck_cmd = (void *)
- ((u8 *)_kek_kck_cmd) + sizeof(kek_kck_cmd.sta_id);
+ ((u8 *)_kek_kck_cmd + sizeof(kek_kck_cmd.sta_id));
}
IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n",
@@ -1489,7 +1484,7 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
int pktsize = status->wake_packet_bufsize;
int pktlen = status->wake_packet_length;
const u8 *pktdata = status->wake_packet;
- struct ieee80211_hdr *hdr = (void *)pktdata;
+ const struct ieee80211_hdr *hdr = (const void *)pktdata;
int truncated = pktlen - pktsize;
/* this would be a firmware bug */
@@ -2074,8 +2069,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
};
int ret, len;
u8 notif_ver;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- WOWLAN_GET_STATUSES,
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
@@ -2182,8 +2176,7 @@ out_free_resp:
static struct iwl_wowlan_status_data *
iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id)
{
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- OFFLOADS_QUERY_CMD,
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, OFFLOADS_QUERY_CMD,
IWL_FW_CMD_VER_UNKNOWN);
__le32 station_id = cpu_to_le32(sta_id);
u32 cmd_size = cmd_ver != IWL_FW_CMD_VER_UNKNOWN ? sizeof(station_id) : 0;
@@ -2704,7 +2697,9 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
/* start pseudo D3 */
rtnl_lock();
+ wiphy_lock(mvm->hw->wiphy);
err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
+ wiphy_unlock(mvm->hw->wiphy);
rtnl_unlock();
if (err > 0)
err = -EINVAL;
@@ -2760,7 +2755,9 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
rtnl_lock();
+ wiphy_lock(mvm->hw->wiphy);
__iwl_mvm_resume(mvm, true);
+ wiphy_unlock(mvm->hw->wiphy);
rtnl_unlock();
iwl_mvm_resume_tcm(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 63432c24eb59..49898fd99594 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#include <linux/vmalloc.h>
+#include <linux/err.h>
#include <linux/ieee80211.h>
#include <linux/netdevice.h>
@@ -425,8 +426,7 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta,
return -EINVAL;
/* only change from debug set <-> debug unset */
- if ((amsdu_len && mvmsta->orig_amsdu_len) ||
- (!!amsdu_len && mvmsta->orig_amsdu_len))
+ if (amsdu_len && mvmsta->orig_amsdu_len)
return -EBUSY;
if (amsdu_len) {
@@ -1478,7 +1478,7 @@ iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
.mvm = mvm,
};
u16 wait_cmds[] = {
- iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD, DATA_PATH_GROUP, 0),
+ WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD),
};
u32 aid;
int ret;
@@ -1513,8 +1513,9 @@ iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
wait_cmds, ARRAY_SIZE(wait_cmds),
iwl_mvm_sniffer_apply, &apply);
- ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD,
- DATA_PATH_GROUP, 0), 0,
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD),
+ 0,
sizeof(he_mon_cmd), &he_mon_cmd);
/* no need to really wait, we already did anyway */
@@ -1726,8 +1727,7 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
if (!iwl_mvm_firmware_running(mvm))
return -EIO;
- hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
- DEBUG_GROUP, 0);
+ hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR);
cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ);
/* Take care of alignment of both the position and the length */
@@ -1757,7 +1757,7 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
goto out;
}
- ret = len - copy_to_user(user_buf, (void *)rsp->data + delta, len);
+ ret = len - copy_to_user(user_buf, (u8 *)rsp->data + delta, len);
*ppos += ret;
out:
@@ -1781,8 +1781,7 @@ static ssize_t iwl_dbgfs_mem_write(struct file *file,
if (!iwl_mvm_firmware_running(mvm))
return -EIO;
- hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
- DEBUG_GROUP, 0);
+ hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR);
if (*ppos & 0x3 || count < 4) {
op = DEBUG_MEM_OP_WRITE_BYTES;
@@ -1857,7 +1856,6 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
{
struct dentry *bcast_dir __maybe_unused;
- char buf[100];
spin_lock_init(&mvm->drv_stats_lock);
@@ -1939,6 +1937,11 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
* Create a symlink with mac80211. It will be removed when mac80211
* exists (before the opmode exists which removes the target.)
*/
- snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent);
- debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf);
+ if (!IS_ERR(mvm->debugfs_dir)) {
+ char buf[100];
+
+ snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent);
+ debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir,
+ buf);
+ }
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 628aee634b2a..430044bc4755 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -346,8 +346,8 @@ iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
*format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
break;
case NL80211_CHAN_WIDTH_160:
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RANGE_REQ_CMD,
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver >= 13) {
@@ -548,7 +548,7 @@ static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_tof_range_req_cmd_v5 cmd_v5;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd_v5,
.len[0] = sizeof(cmd_v5),
@@ -574,7 +574,7 @@ static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_tof_range_req_cmd_v7 cmd_v7;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd_v7,
.len[0] = sizeof(cmd_v7),
@@ -604,7 +604,7 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_tof_range_req_cmd_v8 cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@@ -630,7 +630,7 @@ static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_tof_range_req_cmd_v9 cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@@ -728,7 +728,7 @@ static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
{
struct iwl_tof_range_req_cmd_v11 cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@@ -799,7 +799,7 @@ static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm,
{
struct iwl_tof_range_req_cmd_v12 cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@@ -827,7 +827,7 @@ static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
{
struct iwl_tof_range_req_cmd_v13 cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@@ -877,8 +877,8 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EBUSY;
if (new_api) {
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RANGE_REQ_CMD,
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
IWL_FW_CMD_VER_UNKNOWN);
switch (cmd_ver) {
@@ -927,8 +927,7 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
iwl_mvm_ftm_reset(mvm);
- if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD,
- LOCATION_GROUP, 0),
+ if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD),
0, sizeof(cmd), &cmd))
IWL_ERR(mvm, "failed to abort FTM process\n");
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index bda6da7d988e..9729680476fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -106,6 +106,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
+ u32 cmd_id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_CONFIG_CMD);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
/*
* The command structure is the same for versions 6, 7 and 8 (only the
@@ -120,8 +121,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
IWL_TOF_RESPONDER_CMD_VALID_STA_ID),
.sta_id = mvmvif->bcast_sta.sta_id,
};
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RESPONDER_CONFIG_CMD, 6);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 6);
int err;
int cmd_size;
@@ -161,9 +161,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
memcpy(cmd.bssid, vif->addr, ETH_ALEN);
- return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RESPONDER_CONFIG_CMD,
- LOCATION_GROUP, 0),
- 0, cmd_size, &cmd);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
}
static int
@@ -177,8 +175,7 @@ iwl_mvm_ftm_responder_dyn_cfg_v2(struct iwl_mvm *mvm,
};
u8 data[IWL_LCI_CIVIC_IE_MAX_SIZE] = {0};
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD,
- LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
.data[0] = &cmd,
.len[0] = sizeof(cmd),
.data[1] = &data,
@@ -220,8 +217,7 @@ iwl_mvm_ftm_responder_dyn_cfg_v3(struct iwl_mvm *mvm,
{
struct iwl_tof_responder_dyn_config_cmd cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD,
- LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
.data[0] = &cmd,
.len[0] = sizeof(cmd),
/* may not be able to DMA from stack */
@@ -278,8 +274,9 @@ iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm,
struct ieee80211_ftm_responder_params *params)
{
int ret;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RESPONDER_DYN_CONFIG_CMD, 2);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
+ 2);
switch (cmd_ver) {
case 2:
@@ -320,8 +317,9 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
.addr = addr,
.hltk = hltk,
};
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RESPONDER_DYN_CONFIG_CMD, 2);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
+ 2);
lockdep_assert_held(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index ae589b3b8c46..e842816134f1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -25,11 +25,6 @@
#define MVM_UCODE_ALIVE_TIMEOUT (HZ)
#define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
-#define UCODE_VALID_OK cpu_to_le32(0x1)
-
-#define IWL_PPAG_MASK 3
-#define IWL_PPAG_ETSI_MASK BIT(0)
-
#define IWL_TAS_US_MCC 0x5553
#define IWL_TAS_CANADA_MCC 0x4341
@@ -79,7 +74,7 @@ static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
struct iwl_dqa_enable_cmd dqa_cmd = {
.cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
};
- u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, DQA_ENABLE_CMD);
int ret;
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
@@ -126,13 +121,54 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
u32 lmac_error_event_table, umac_error_table;
u32 version = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
UCODE_ALIVE_NTFY, 0);
+ u32 i;
- /*
- * For v5 and above, we can check the version, for older
- * versions we need to check the size.
- */
- if (version == 5 || version == 6) {
- /* v5 and v6 are compatible (only IMR addition) */
+ if (version == 6) {
+ struct iwl_alive_ntf_v6 *palive;
+
+ if (pkt_len < sizeof(*palive))
+ return false;
+
+ palive = (void *)pkt->data;
+ mvm->trans->dbg.imr_data.imr_enable =
+ le32_to_cpu(palive->imr.enabled);
+ mvm->trans->dbg.imr_data.imr_size =
+ le32_to_cpu(palive->imr.size);
+ mvm->trans->dbg.imr_data.imr2sram_remainbyte =
+ mvm->trans->dbg.imr_data.imr_size;
+ mvm->trans->dbg.imr_data.imr_base_addr =
+ palive->imr.base_addr;
+ mvm->trans->dbg.imr_data.imr_curr_addr =
+ le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr);
+ IWL_DEBUG_FW(mvm, "IMR Enabled: 0x0%x size 0x0%x Address 0x%016llx\n",
+ mvm->trans->dbg.imr_data.imr_enable,
+ mvm->trans->dbg.imr_data.imr_size,
+ le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr));
+
+ if (!mvm->trans->dbg.imr_data.imr_enable) {
+ for (i = 0; i < ARRAY_SIZE(mvm->trans->dbg.active_regions); i++) {
+ struct iwl_ucode_tlv *reg_tlv;
+ struct iwl_fw_ini_region_tlv *reg;
+
+ reg_tlv = mvm->trans->dbg.active_regions[i];
+ if (!reg_tlv)
+ continue;
+
+ reg = (void *)reg_tlv->data;
+ /*
+ * We have only one DRAM IMR region, so we
+ * can break as soon as we find the first
+ * one.
+ */
+ if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) {
+ mvm->trans->dbg.unsupported_region_msk |= BIT(i);
+ break;
+ }
+ }
+ }
+ }
+
+ if (version >= 5) {
struct iwl_alive_ntf_v5 *palive;
if (pkt_len < sizeof(*palive))
@@ -249,6 +285,26 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
return false;
}
+static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm)
+{
+ struct iwl_trans *trans = mvm->trans;
+ enum iwl_device_family device_family = trans->trans_cfg->device_family;
+
+ if (device_family < IWL_DEVICE_FAMILY_8000)
+ return;
+
+ if (device_family <= IWL_DEVICE_FAMILY_9000)
+ IWL_ERR(mvm, "WFPM_ARC1_PD_NOTIFICATION: 0x%x\n",
+ iwl_read_umac_prph(trans, WFPM_ARC1_PD_NOTIFICATION));
+ else
+ IWL_ERR(mvm, "WFPM_LMAC1_PD_NOTIFICATION: 0x%x\n",
+ iwl_read_umac_prph(trans, WFPM_LMAC1_PD_NOTIFICATION));
+
+ IWL_ERR(mvm, "HPM_SECONDARY_DEVICE_STATE: 0x%x\n",
+ iwl_read_umac_prph(trans, HPM_SECONDARY_DEVICE_STATE));
+
+}
+
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type ucode_type)
{
@@ -314,6 +370,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_read_prph(trans, SB_CPU_2_STATUS));
}
+ iwl_mvm_print_pd_notification(mvm);
+
/* LMAC/UMAC PC info */
if (trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_9000) {
@@ -546,8 +604,7 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
return 0;
}
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, REGULATORY_AND_NVM_GROUP,
- SAR_OFFSET_MAPPING_TABLE_CMD,
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver != 2) {
@@ -572,6 +629,7 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
+ u32 cmd_id = PHY_CONFIGURATION_CMD;
struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd;
enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
struct iwl_phy_specific_cfg phy_filters = {};
@@ -603,8 +661,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
phy_cfg_cmd.calib_control.flow_trigger =
mvm->fw->default_calib[ucode_type].flow_trigger;
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- PHY_CONFIGURATION_CMD,
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == 3) {
iwl_mvm_phy_filter_init(mvm, &phy_filters);
@@ -616,8 +673,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
phy_cfg_cmd.phy_cfg);
cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) :
sizeof(struct iwl_phy_cfg_cmd_v1);
- return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
- cmd_size, &phy_cfg_cmd);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd);
}
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
@@ -737,7 +793,7 @@ out:
mvm->nvm_data->bands[0].n_channels = 1;
mvm->nvm_data->bands[0].n_bitrates = 1;
mvm->nvm_data->bands[0].bitrates =
- (void *)mvm->nvm_data->channels + 1;
+ (void *)((u8 *)mvm->nvm_data->channels + 1);
mvm->nvm_data->bands[0].bitrates->hw_value = 10;
}
@@ -760,6 +816,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
#ifdef CONFIG_ACPI
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
+ u32 cmd_id = REDUCE_TX_POWER_CMD;
struct iwl_dev_tx_power_cmd cmd = {
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
@@ -767,11 +824,14 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
int ret;
u16 len = 0;
u32 n_subbands;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- REDUCE_TX_POWER_CMD,
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
-
- if (cmd_ver == 6) {
+ if (cmd_ver == 7) {
+ len = sizeof(cmd.v7);
+ n_subbands = IWL_NUM_SUB_BANDS_V2;
+ per_chain = cmd.v7.per_chain[0][0];
+ cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags);
+ } else if (cmd_ver == 6) {
len = sizeof(cmd.v6);
n_subbands = IWL_NUM_SUB_BANDS_V2;
per_chain = cmd.v6.per_chain[0][0];
@@ -805,7 +865,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
iwl_mei_set_power_limit(per_chain);
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
- return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
@@ -814,9 +874,12 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
struct iwl_geo_tx_power_profiles_resp *resp;
u16 len;
int ret;
- struct iwl_host_cmd cmd;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
- PER_CHAIN_LIMIT_OFFSET_CMD,
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
+ .flags = CMD_WANT_SKB,
+ .data = { &geo_tx_cmd },
+ };
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
IWL_FW_CMD_VER_UNKNOWN);
/* the ops field is at the same spot for all versions, so set in v1 */
@@ -838,12 +901,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
if (!iwl_sar_geo_support(&mvm->fwrt))
return -EOPNOTSUPP;
- cmd = (struct iwl_host_cmd){
- .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
- .len = { len, },
- .flags = CMD_WANT_SKB,
- .data = { &geo_tx_cmd },
- };
+ cmd.len[0] = len;
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
@@ -863,14 +921,14 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
+ u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD);
union iwl_geo_tx_power_profiles_cmd cmd;
u16 len;
u32 n_bands;
u32 n_profiles;
u32 sk = 0;
int ret;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
- PER_CHAIN_LIMIT_OFFSET_CMD,
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) !=
@@ -948,167 +1006,19 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
IWL_UCODE_TLV_API_SAR_TABLE_VER))
cmd.v2.table_revision = cpu_to_le32(sk);
- return iwl_mvm_send_cmd_pdu(mvm,
- WIDE_ID(PHY_OPS_GROUP,
- PER_CHAIN_LIMIT_OFFSET_CMD),
- 0, len, &cmd);
-}
-
-static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
-{
- union acpi_object *wifi_pkg, *data, *flags;
- int i, j, ret, tbl_rev, num_sub_bands;
- int idx = 2;
-
- mvm->fwrt.ppag_flags = 0;
-
- data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD);
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- /* try to read ppag table rev 2 or 1 (both have the same data size) */
- wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
- if (!IS_ERR(wifi_pkg)) {
- if (tbl_rev == 1 || tbl_rev == 2) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- IWL_DEBUG_RADIO(mvm,
- "Reading PPAG table v2 (tbl_rev=%d)\n",
- tbl_rev);
- goto read_table;
- } else {
- ret = -EINVAL;
- goto out_free;
- }
- }
-
- /* try to read ppag table revision 0 */
- wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_PPAG_WIFI_DATA_SIZE_V1, &tbl_rev);
- if (!IS_ERR(wifi_pkg)) {
- if (tbl_rev != 0) {
- ret = -EINVAL;
- goto out_free;
- }
- num_sub_bands = IWL_NUM_SUB_BANDS_V1;
- IWL_DEBUG_RADIO(mvm, "Reading PPAG table v1 (tbl_rev=0)\n");
- goto read_table;
- }
- ret = PTR_ERR(wifi_pkg);
- goto out_free;
-
-read_table:
- mvm->fwrt.ppag_ver = tbl_rev;
- flags = &wifi_pkg->package.elements[1];
-
- if (flags->type != ACPI_TYPE_INTEGER) {
- ret = -EINVAL;
- goto out_free;
- }
-
- mvm->fwrt.ppag_flags = flags->integer.value & IWL_PPAG_MASK;
-
- if (!mvm->fwrt.ppag_flags) {
- ret = 0;
- goto out_free;
- }
-
- /*
- * read, verify gain values and save them into the PPAG table.
- * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the
- * following sub-bands to High-Band (5GHz).
- */
- for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
- for (j = 0; j < num_sub_bands; j++) {
- union acpi_object *ent;
-
- ent = &wifi_pkg->package.elements[idx++];
- if (ent->type != ACPI_TYPE_INTEGER) {
- ret = -EINVAL;
- goto out_free;
- }
-
- mvm->fwrt.ppag_chains[i].subbands[j] = ent->integer.value;
-
- if ((j == 0 &&
- (mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB ||
- mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) ||
- (j != 0 &&
- (mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB ||
- mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) {
- mvm->fwrt.ppag_flags = 0;
- ret = -EINVAL;
- goto out_free;
- }
- }
- }
-
- ret = 0;
-out_free:
- kfree(data);
- return ret;
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
{
union iwl_ppag_table_cmd cmd;
- u8 cmd_ver;
- int i, j, ret, num_sub_bands, cmd_size;
- s8 *gain;
+ int ret, cmd_size;
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
- IWL_DEBUG_RADIO(mvm,
- "PPAG capability not supported by FW, command not sent.\n");
- return 0;
- }
- if (!mvm->fwrt.ppag_flags) {
- IWL_DEBUG_RADIO(mvm, "PPAG not enabled, command not sent.\n");
+ ret = iwl_read_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
+ /* Not supporting PPAG table is a valid scenario */
+ if(ret < 0)
return 0;
- }
- /* The 'flags' field is the same in v1 and in v2 so we can just
- * use v1 to access it.
- */
- cmd.v1.flags = cpu_to_le32(mvm->fwrt.ppag_flags);
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
- PER_PLATFORM_ANT_GAIN_CMD,
- IWL_FW_CMD_VER_UNKNOWN);
- if (cmd_ver == 1) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V1;
- gain = cmd.v1.gain[0];
- cmd_size = sizeof(cmd.v1);
- if (mvm->fwrt.ppag_ver == 1 || mvm->fwrt.ppag_ver == 2) {
- IWL_DEBUG_RADIO(mvm,
- "PPAG table rev is %d but FW supports v1, sending truncated table\n",
- mvm->fwrt.ppag_ver);
- cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
- }
- } else if (cmd_ver == 2 || cmd_ver == 3) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- gain = cmd.v2.gain[0];
- cmd_size = sizeof(cmd.v2);
- if (mvm->fwrt.ppag_ver == 0) {
- IWL_DEBUG_RADIO(mvm,
- "PPAG table is v1 but FW supports v2, sending padded table\n");
- } else if (cmd_ver == 2 && mvm->fwrt.ppag_ver == 2) {
- IWL_DEBUG_RADIO(mvm,
- "PPAG table is v3 but FW supports v2, sending partial bitmap.\n");
- cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
- }
- } else {
- IWL_DEBUG_RADIO(mvm, "Unsupported PPAG command version\n");
- return 0;
- }
-
- for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
- for (j = 0; j < num_sub_bands; j++) {
- gain[i * num_sub_bands + j] =
- mvm->fwrt.ppag_chains[i].subbands[j];
- IWL_DEBUG_RADIO(mvm,
- "PPAG table: chain[%d] band[%d]: gain = %d\n",
- i, j, gain[i * num_sub_bands + j]);
- }
- }
IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD),
@@ -1120,40 +1030,11 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
return ret;
}
-static const struct dmi_system_id dmi_ppag_approved_list[] = {
- { .ident = "HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- },
- },
- { .ident = "SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "MSFT",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- },
- },
- { .ident = "ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."),
- },
- },
- {}
-};
-
static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
{
/* no need to read the table, done in INIT stage */
- if (!dmi_check_system(dmi_ppag_approved_list)) {
- IWL_DEBUG_RADIO(mvm,
- "System vendor '%s' is not in the approved list, disabling PPAG.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
- mvm->fwrt.ppag_flags = 0;
+ if (!(iwl_acpi_is_ppag_approved(&mvm->fwrt)))
return 0;
- }
return iwl_mvm_ppag_send_cmd(mvm);
}
@@ -1205,11 +1086,12 @@ static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigne
static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
{
+ u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
int ret;
- struct iwl_tas_config_cmd_v3 cmd = {};
- int cmd_size;
+ union iwl_tas_config_cmd cmd = {};
+ int cmd_size, fw_ver;
- BUILD_BUG_ON(ARRAY_SIZE(cmd.block_list_array) <
+ BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) <
APCI_WTAS_BLACK_LIST_MAX);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
@@ -1217,7 +1099,10 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
return;
}
- ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd);
+ fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"TAS table invalid or unavailable. (%d)\n",
@@ -1232,25 +1117,24 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
dmi_get_system_info(DMI_SYS_VENDOR));
- if ((!iwl_mvm_add_to_tas_block_list(cmd.block_list_array,
- &cmd.block_list_size, IWL_TAS_US_MCC)) ||
- (!iwl_mvm_add_to_tas_block_list(cmd.block_list_array,
- &cmd.block_list_size, IWL_TAS_CANADA_MCC))) {
+ if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
+ &cmd.v4.block_list_size,
+ IWL_TAS_US_MCC)) ||
+ (!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
+ &cmd.v4.block_list_size,
+ IWL_TAS_CANADA_MCC))) {
IWL_DEBUG_RADIO(mvm,
"Unable to add US/Canada to TAS block list, disabling TAS\n");
return;
}
}
- cmd_size = iwl_fw_lookup_cmd_ver(mvm->fw, REGULATORY_AND_NVM_GROUP,
- TAS_CONFIG,
- IWL_FW_CMD_VER_UNKNOWN) < 3 ?
+ /* v4 is the same size as v3, so no need to differentiate here */
+ cmd_size = fw_ver < 3 ?
sizeof(struct iwl_tas_config_cmd_v2) :
sizeof(struct iwl_tas_config_cmd_v3);
- ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
- TAS_CONFIG),
- 0, cmd_size, &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
if (ret < 0)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
@@ -1283,7 +1167,7 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
int ret;
u32 value;
- struct iwl_lari_config_change_cmd_v5 cmd = {};
+ struct iwl_lari_config_change_cmd_v6 cmd = {};
cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
@@ -1310,25 +1194,43 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
if (!ret)
cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
+ ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
+ DSM_FUNC_FORCE_DISABLE_CHANNELS,
+ &iwl_guid, &value);
+ if (!ret)
+ cmd.force_disable_channels_bitmap = cpu_to_le32(value);
+
if (cmd.config_bitmap ||
cmd.oem_uhb_allow_bitmap ||
cmd.oem_11ax_allow_bitmap ||
cmd.oem_unii4_allow_bitmap ||
- cmd.chan_state_active_bitmap) {
+ cmd.chan_state_active_bitmap ||
+ cmd.force_disable_channels_bitmap) {
size_t cmd_size;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
- REGULATORY_AND_NVM_GROUP,
- LARI_CONFIG_CHANGE, 1);
- if (cmd_ver == 5)
+ WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE),
+ 1);
+ switch (cmd_ver) {
+ case 6:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
+ break;
+ case 5:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
- else if (cmd_ver == 4)
+ break;
+ case 4:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
- else if (cmd_ver == 3)
+ break;
+ case 3:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
- else if (cmd_ver == 2)
+ break;
+ case 2:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
- else
+ break;
+ default:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
+ break;
+ }
IWL_DEBUG_RADIO(mvm,
"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
@@ -1340,8 +1242,9 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
le32_to_cpu(cmd.chan_state_active_bitmap),
cmd_ver);
IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x\n",
- le32_to_cpu(cmd.oem_uhb_allow_bitmap));
+ "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
+ le32_to_cpu(cmd.oem_uhb_allow_bitmap),
+ le32_to_cpu(cmd.force_disable_channels_bitmap));
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE),
@@ -1358,7 +1261,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
int ret;
/* read PPAG table */
- ret = iwl_mvm_get_ppag_table(mvm);
+ ret = iwl_acpi_get_ppag_table(&mvm->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"PPAG BIOS table invalid or unavailable. (%d)\n",
@@ -1641,9 +1544,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
* internal aux station for all aux activities that don't
* requires a dedicated data queue.
*/
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA,
- 0) < 12) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
/*
* In old version the aux station uses mac id like other
* station and not lmac id
@@ -1658,8 +1559,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
while (!sband && i < NUM_NL80211_BANDS)
sband = mvm->hw->wiphy->bands[i++];
- if (WARN_ON_ONCE(!sband))
+ if (WARN_ON_ONCE(!sband)) {
+ ret = -ENODEV;
goto error;
+ }
chan = &sband->channels[0];
@@ -1800,9 +1703,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA,
- 0) < 12) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
/*
* Add auxiliary station for scanning.
* Newer versions of this command implies that the fw uses
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index fd7d4abfb454..5aa4520b70ac 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -821,10 +821,7 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx)
{
u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx);
- bool is_new_rate = iwl_fw_lookup_cmd_ver(fw,
- LONG_GROUP,
- BEACON_TEMPLATE_CMD,
- 0) > 10;
+ bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10;
if (rate_idx <= IWL_FIRST_CCK_RATE)
flags |= is_new_rate ? IWL_MAC_BEACON_CCK
@@ -960,8 +957,7 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
WARN_ON(channel == 0);
if (cfg80211_channel_is_psc(ctx->def.chan) &&
!IWL_MVM_DISABLE_AP_FILS) {
- flags |= iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- BEACON_TEMPLATE_CMD,
+ flags |= iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD,
0) > 10 ?
IWL_MAC_BEACON_FILS :
IWL_MAC_BEACON_FILS_V1;
@@ -1458,8 +1454,9 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
struct sk_buff *skb;
u8 *data;
u32 size = le32_to_cpu(sb->byte_count);
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PROT_OFFLOAD_GROUP,
- STORED_BEACON_NTF, 0);
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF),
+ 0);
if (size == 0)
return;
@@ -1602,6 +1599,18 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
RCU_INIT_POINTER(mvm->csa_vif, NULL);
return;
case NL80211_IFTYPE_STATION:
+ /*
+ * if we don't know about an ongoing channel switch,
+ * make sure FW cancels it
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ CHANNEL_SWITCH_ERROR_NOTIF,
+ 0) && !vif->csa_active) {
+ IWL_DEBUG_INFO(mvm, "Channel Switch was canceled\n");
+ iwl_mvm_cancel_channel_switch(mvm, vif, mac_id);
+ break;
+ }
+
iwl_mvm_csa_client_absent(mvm, vif);
cancel_delayed_work(&mvmvif->csa_work);
ieee80211_chswitch_done(vif, true);
@@ -1615,6 +1624,31 @@ out_unlock:
rcu_read_unlock();
}
+void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_channel_switch_error_notif *notif = (void *)pkt->data;
+ struct ieee80211_vif *vif;
+ u32 id = le32_to_cpu(notif->mac_id);
+ u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask);
+
+ rcu_read_lock();
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
+ if (!vif) {
+ rcu_read_unlock();
+ return;
+ }
+
+ IWL_DEBUG_INFO(mvm, "FW reports CSA error: mac_id=%u, csa_err_mask=%u\n",
+ id, csa_err_mask);
+ if (csa_err_mask & (CS_ERR_COUNT_ERROR |
+ CS_ERR_LONG_DELAY_AFTER_CS |
+ CS_ERR_TX_BLOCK_TIMER_EXPIRED))
+ ieee80211_channel_switch_disconnect(vif, true);
+ rcu_read_unlock();
+}
+
void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 4ac599f6ad22..784d91281c02 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -226,7 +226,6 @@ static const u8 he_if_types_ext_capa_sta[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
- [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};
static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
@@ -375,28 +374,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->n_cipher_suites++;
}
- /* currently FW API supports only one optional cipher scheme */
- if (mvm->fw->cs[0].cipher) {
- const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0];
- struct ieee80211_cipher_scheme *cs = &mvm->cs[0];
-
- mvm->hw->n_cipher_schemes = 1;
-
- cs->cipher = le32_to_cpu(fwcs->cipher);
- cs->iftype = BIT(NL80211_IFTYPE_STATION);
- cs->hdr_len = fwcs->hdr_len;
- cs->pn_len = fwcs->pn_len;
- cs->pn_off = fwcs->pn_off;
- cs->key_idx_off = fwcs->key_idx_off;
- cs->key_idx_mask = fwcs->key_idx_mask;
- cs->key_idx_shift = fwcs->key_idx_shift;
- cs->mic_len = fwcs->mic_len;
-
- mvm->hw->cipher_schemes = mvm->cs;
- mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher;
- hw->wiphy->n_cipher_suites++;
- }
-
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) {
wiphy_ext_feature_set(hw->wiphy,
@@ -554,8 +531,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
- if (iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- WOWLAN_KEK_KCK_MATERIAL,
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL,
IWL_FW_CMD_VER_UNKNOWN) == 3)
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
@@ -568,9 +544,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
if (iwl_mvm_is_oce_supported(mvm)) {
- u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
- IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC, 0);
+ u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, 0);
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
@@ -1155,7 +1129,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* async_handlers_wk is now blocked */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA, 0) < 12)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12)
iwl_mvm_rm_aux_sta(mvm);
iwl_mvm_stop_device(mvm);
@@ -1247,6 +1221,7 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
s16 tx_power)
{
+ u32 cmd_id = REDUCE_TX_POWER_CMD;
int len;
struct iwl_dev_tx_power_cmd cmd = {
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
@@ -1254,14 +1229,15 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
.common.pwr_restriction = cpu_to_le16(8 * tx_power),
};
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- REDUCE_TX_POWER_CMD,
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
- if (cmd_ver == 6)
+ if (cmd_ver == 7)
+ len = sizeof(cmd.v7);
+ else if (cmd_ver == 6)
len = sizeof(cmd.v6);
else if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_REDUCE_TX_POWER))
@@ -1275,7 +1251,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* all structs have the same common part, add it */
len += sizeof(cmd.common);
- return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
@@ -1336,6 +1312,15 @@ static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
};
+ /*
+ * In the new flow since FW is in charge of the timing,
+ * if driver has canceled the channel switch he will receive the
+ * CHANNEL_SWITCH_START_NOTIF notification from FW and then cancel it
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ CHANNEL_SWITCH_ERROR_NOTIF, 0))
+ return;
+
IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id);
mutex_lock(&mvm->mutex);
@@ -1846,11 +1831,108 @@ static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
return res;
}
+static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
+ struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss,
+ u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit)
+{
+ int i;
+
+ /*
+ * FW currently supports only nss == MAX_HE_SUPP_NSS
+ *
+ * If nss > MAX: we can ignore values we don't support
+ * If nss < MAX: we can set zeros in other streams
+ */
+ if (nss > MAX_HE_SUPP_NSS) {
+ IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
+ MAX_HE_SUPP_NSS);
+ nss = MAX_HE_SUPP_NSS;
+ }
+
+ for (i = 0; i < nss; i++) {
+ u8 ru_index_tmp = ru_index_bitmap << 1;
+ u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE;
+ u8 bw;
+
+ for (bw = 0;
+ bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ ru_index_tmp >>= 1;
+
+ if (!(ru_index_tmp & 1))
+ continue;
+
+ high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
+ ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
+ ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+
+ pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
+ pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
+ }
+ }
+}
+
+static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_he_pkt_ext_v2 *pkt_ext)
+{
+ u8 nss = (sta->he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1;
+ u8 *ppe = &sta->he_cap.ppe_thres[0];
+ u8 ru_index_bitmap =
+ u8_get_bits(*ppe,
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ /* Starting after PPE header */
+ u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE;
+
+ iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit);
+}
+
+static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext,
+ u8 nominal_padding,
+ u32 *flags)
+{
+ int low_th = -1;
+ int high_th = -1;
+ int i;
+
+ switch (nominal_padding) {
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
+ low_th = IWL_HE_PKT_EXT_NONE;
+ high_th = IWL_HE_PKT_EXT_NONE;
+ break;
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
+ low_th = IWL_HE_PKT_EXT_BPSK;
+ high_th = IWL_HE_PKT_EXT_NONE;
+ break;
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
+ low_th = IWL_HE_PKT_EXT_NONE;
+ high_th = IWL_HE_PKT_EXT_BPSK;
+ break;
+ }
+
+ /* Set the PPE thresholds accordingly */
+ if (low_th >= 0 && high_th >= 0) {
+ for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
+ u8 bw;
+
+ for (bw = 0;
+ bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
+ pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
+ }
+ }
+
+ *flags |= STA_CTXT_HE_PACKET_EXT;
+ }
+}
+
static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, u8 sta_id)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
+ struct iwl_he_sta_context_cmd_v3 sta_ctxt_cmd = {
.sta_id = sta_id,
.tid_limit = IWL_MAX_TID_COUNT,
.bss_color = vif->bss_conf.he_bss_color.color,
@@ -1858,16 +1940,39 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
.frame_time_rts_th =
cpu_to_le16(vif->bss_conf.frame_time_rts_th),
};
- int size = fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_MBSSID_HE) ?
- sizeof(sta_ctxt_cmd) :
- sizeof(struct iwl_he_sta_context_cmd_v1);
+ struct iwl_he_sta_context_cmd_v2 sta_ctxt_cmd_v2 = {};
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, STA_HE_CTXT_CMD);
+ u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 2);
+ int size;
struct ieee80211_sta *sta;
u32 flags;
int i;
const struct ieee80211_sta_he_cap *own_he_cap = NULL;
struct ieee80211_chanctx_conf *chanctx_conf;
const struct ieee80211_supported_band *sband;
+ void *cmd;
+
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE))
+ ver = 1;
+
+ switch (ver) {
+ case 1:
+ /* same layout as v2 except some data at the end */
+ cmd = &sta_ctxt_cmd_v2;
+ size = sizeof(struct iwl_he_sta_context_cmd_v1);
+ break;
+ case 2:
+ cmd = &sta_ctxt_cmd_v2;
+ size = sizeof(struct iwl_he_sta_context_cmd_v2);
+ break;
+ case 3:
+ cmd = &sta_ctxt_cmd;
+ size = sizeof(struct iwl_he_sta_context_cmd_v3);
+ break;
+ default:
+ IWL_ERR(mvm, "bad STA_HE_CTXT_CMD version %d\n", ver);
+ return;
+ }
rcu_read_lock();
@@ -1932,97 +2037,25 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
* Initialize the PPE thresholds to "None" (7), as described in Table
* 9-262ac of 80211.ax/D3.0.
*/
- memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext));
+ memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE,
+ sizeof(sta_ctxt_cmd.pkt_ext));
/* If PPE Thresholds exist, parse them into a FW-familiar format. */
if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
- IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
- u8 nss = (sta->he_cap.ppe_thres[0] &
- IEEE80211_PPE_THRES_NSS_MASK) + 1;
- u8 ru_index_bitmap =
- (sta->he_cap.ppe_thres[0] &
- IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
- IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
- u8 *ppe = &sta->he_cap.ppe_thres[0];
- u8 ppe_pos_bit = 7; /* Starting after PPE header */
-
- /*
- * FW currently supports only nss == MAX_HE_SUPP_NSS
- *
- * If nss > MAX: we can ignore values we don't support
- * If nss < MAX: we can set zeros in other streams
- */
- if (nss > MAX_HE_SUPP_NSS) {
- IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
- MAX_HE_SUPP_NSS);
- nss = MAX_HE_SUPP_NSS;
- }
-
- for (i = 0; i < nss; i++) {
- u8 ru_index_tmp = ru_index_bitmap << 1;
- u8 bw;
-
- for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
- ru_index_tmp >>= 1;
- if (!(ru_index_tmp & 1))
- continue;
-
- sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
- iwl_mvm_he_get_ppe_val(ppe,
- ppe_pos_bit);
- ppe_pos_bit +=
- IEEE80211_PPE_THRES_INFO_PPET_SIZE;
- sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
- iwl_mvm_he_get_ppe_val(ppe,
- ppe_pos_bit);
- ppe_pos_bit +=
- IEEE80211_PPE_THRES_INFO_PPET_SIZE;
- }
- }
-
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta,
+ &sta_ctxt_cmd.pkt_ext);
flags |= STA_CTXT_HE_PACKET_EXT;
- } else if (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
- IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)
- != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) {
- int low_th = -1;
- int high_th = -1;
-
- /* Take the PPE thresholds from the nominal padding info */
- switch (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
- IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)) {
- case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
- low_th = IWL_HE_PKT_EXT_NONE;
- high_th = IWL_HE_PKT_EXT_NONE;
- break;
- case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
- low_th = IWL_HE_PKT_EXT_BPSK;
- high_th = IWL_HE_PKT_EXT_NONE;
- break;
- case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
- low_th = IWL_HE_PKT_EXT_NONE;
- high_th = IWL_HE_PKT_EXT_BPSK;
- break;
- }
-
- /* Set the PPE thresholds accordingly */
- if (low_th >= 0 && high_th >= 0) {
- struct iwl_he_pkt_ext *pkt_ext =
- (struct iwl_he_pkt_ext *)&sta_ctxt_cmd.pkt_ext;
-
- for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
- u8 bw;
-
- for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX;
- bw++) {
- pkt_ext->pkt_ext_qam_th[i][bw][0] =
- low_th;
- pkt_ext->pkt_ext_qam_th[i][bw][1] =
- high_th;
- }
- }
-
- flags |= STA_CTXT_HE_PACKET_EXT;
- }
+ /* PPE Thresholds doesn't exist - set the API PPE values
+ * according to Common Nominal Packet Padding fiels. */
+ } else {
+ u8 nominal_padding =
+ u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
+ if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED)
+ iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext,
+ nominal_padding,
+ &flags);
}
if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
@@ -2085,9 +2118,46 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
sta_ctxt_cmd.flags = cpu_to_le32(flags);
- if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
- DATA_PATH_GROUP, 0),
- 0, size, &sta_ctxt_cmd))
+ if (ver < 3) {
+ /* fields before pkt_ext */
+ BUILD_BUG_ON(offsetof(typeof(sta_ctxt_cmd), pkt_ext) !=
+ offsetof(typeof(sta_ctxt_cmd_v2), pkt_ext));
+ memcpy(&sta_ctxt_cmd_v2, &sta_ctxt_cmd,
+ offsetof(typeof(sta_ctxt_cmd), pkt_ext));
+
+ /* pkt_ext */
+ for (i = 0;
+ i < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th);
+ i++) {
+ u8 bw;
+
+ for (bw = 0;
+ bw < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i]);
+ bw++) {
+ BUILD_BUG_ON(sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]) !=
+ sizeof(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw]));
+
+ memcpy(&sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw],
+ &sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw],
+ sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]));
+ }
+ }
+
+ /* fields after pkt_ext */
+ BUILD_BUG_ON(sizeof(sta_ctxt_cmd) -
+ offsetofend(typeof(sta_ctxt_cmd), pkt_ext) !=
+ sizeof(sta_ctxt_cmd_v2) -
+ offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext));
+ memcpy((u8 *)&sta_ctxt_cmd_v2 +
+ offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext),
+ (u8 *)&sta_ctxt_cmd +
+ offsetofend(typeof(sta_ctxt_cmd), pkt_ext),
+ sizeof(sta_ctxt_cmd) -
+ offsetofend(typeof(sta_ctxt_cmd), pkt_ext));
+ sta_ctxt_cmd_v2.reserved3 = 0;
+ }
+
+ if (iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, size, cmd))
IWL_ERR(mvm, "Failed to config FW to work HE!\n");
}
@@ -2302,11 +2372,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/*
* We received a beacon from the associated AP so
* remove the session protection.
- * A firmware with the new API will remove it automatically.
*/
- if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
- iwl_mvm_stop_session_protection(mvm, vif);
+ iwl_mvm_stop_session_protection(mvm, vif);
iwl_mvm_sf_update(mvm, vif, false);
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
@@ -2950,7 +3017,7 @@ static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm,
if (he_cap) {
/* we know that ours is writable */
- struct ieee80211_sta_he_cap *he = (void *)he_cap;
+ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
he->he_cap_elem.phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
@@ -3414,12 +3481,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
/* support HW crypto on TX */
return 0;
default:
- /* currently FW supports only one optional cipher scheme */
- if (hw->n_cipher_schemes &&
- hw->cipher_schemes->cipher == key->cipher)
- key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
- else
- return -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
switch (cmd) {
@@ -3802,8 +3864,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
/* Use aux roc framework (HS20) */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA, 0) >= 12) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) {
u32 lmac_id;
lmac_id = iwl_mvm_get_lmac_id(mvm->fw,
@@ -4607,6 +4668,15 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_STATION:
/*
+ * In the new flow FW is in charge of timing the switch so there
+ * is no need for all of this
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ CHANNEL_SWITCH_ERROR_NOTIF,
+ 0))
+ break;
+
+ /*
* We haven't configured the firmware to be associated yet since
* we don't know the dtim period. In this case, the firmware can't
* track the beacons.
@@ -4677,6 +4747,14 @@ static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
.cs_mode = chsw->block_tx,
};
+ /*
+ * In the new flow FW is in charge of timing the switch so there is no
+ * need for all of this
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ CHANNEL_SWITCH_ERROR_NOTIF, 0))
+ return;
+
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index d78f40730594..c6bc85d4600a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -1064,7 +1064,6 @@ struct iwl_mvm {
u32 ciphers[IWL_MVM_NUM_CIPHERS];
- struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
struct cfg80211_ftm_responder_stats ftm_resp_stats;
struct {
@@ -1086,7 +1085,6 @@ struct iwl_mvm {
} cmd_ver;
struct ieee80211_vif *nan_vif;
-#define IWL_MAX_BAID 32
struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
/*
@@ -1106,6 +1104,8 @@ struct iwl_mvm {
unsigned long last_6ghz_passive_scan_jiffies;
unsigned long last_reset_or_resume_time_jiffies;
+
+ bool sta_remove_requires_queue_remove;
};
/* Extract MVM priv from op_mode and _hw */
@@ -1671,6 +1671,8 @@ void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* Bindings */
int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -1932,10 +1934,6 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
void iwl_mvm_stop_device(struct iwl_mvm *mvm);
-/* Re-configure the SCD for a queue that has already been configured */
-int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
- int tid, int frame_limit, u16 ssn);
-
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
@@ -2085,6 +2083,8 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm,
struct iwl_rfi_lut_entry *rfi_table);
struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm);
+void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band)
{
@@ -2159,8 +2159,7 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw)
{
- u8 ver = iwl_fw_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+ u8 ver = iwl_fw_lookup_cmd_ver(fw, SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
IWL_FW_CMD_VER_UNKNOWN);
return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ?
IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
index 41880517e8bb..c7dabc6b3765 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
@@ -47,8 +47,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct iwl_proto_offload_cmd_common *common;
u32 enabled = 0, size;
u32 capa_flags = mvm->fw->ucode_capa.flags;
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- PROT_OFFLOAD_CONFIG_CMD, 0);
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 0);
#if IS_ENABLED(CONFIG_IPV6)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 1f8b97995b94..b2f33ebdf485 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -24,6 +24,7 @@
#include "iwl-prph.h"
#include "rs.h"
#include "fw/api/scan.h"
+#include "fw/api/rfi.h"
#include "time-event.h"
#include "fw-api.h"
#include "fw/acpi.h"
@@ -32,6 +33,7 @@
#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IWLWIFI);
static const struct iwl_op_mode_ops iwl_mvm_ops;
static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
@@ -191,7 +193,7 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
if (he_cap) {
/* we know that ours is writable */
- struct ieee80211_sta_he_cap *he = (void *)he_cap;
+ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
WARN_ON(!he->has_he);
WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
@@ -235,7 +237,8 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
*/
mvm->fw_static_smps_request =
req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE);
- ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ ieee80211_iterate_interfaces(mvm->hw,
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
iwl_mvm_intf_dual_chain_req, NULL);
}
@@ -382,6 +385,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF,
iwl_mvm_channel_switch_start_notif,
RX_HANDLER_SYNC, struct iwl_channel_switch_start_notif),
+ RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF,
+ iwl_mvm_channel_switch_error_notif,
+ RX_HANDLER_ASYNC_UNLOCKED,
+ struct iwl_channel_switch_error_notif),
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_datapath_monitor_notif),
@@ -390,6 +397,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_rx_thermal_dual_chain_req,
RX_HANDLER_ASYNC_LOCKED,
struct iwl_thermal_dual_chain_request),
+
+ RX_HANDLER_GRP(SYSTEM_GROUP, RFI_DEACTIVATE_NOTIF,
+ iwl_rfi_deactivate_notif_handler, RX_HANDLER_ASYNC_UNLOCKED,
+ struct iwl_rfi_deactivate_notif),
};
#undef RX_HANDLER
#undef RX_HANDLER_GRP
@@ -443,7 +454,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(POWER_TABLE_CMD),
HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
- HCMD_NAME(DC2DC_CONFIG_CMD),
HCMD_NAME(NVM_ACCESS_CMD),
HCMD_NAME(BEACON_NOTIFICATION),
HCMD_NAME(BEACON_TEMPLATE_CMD),
@@ -499,6 +509,7 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
HCMD_NAME(RFI_CONFIG_CMD),
HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD),
+ HCMD_NAME(RFI_DEACTIVATE_NOTIF),
};
/* Please keep this array *SORTED* by hex value.
@@ -535,6 +546,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
HCMD_NAME(TLC_MNG_CONFIG_CMD),
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
+ HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
HCMD_NAME(STA_PM_NOTIF),
@@ -633,13 +645,11 @@ unlock:
mutex_unlock(&mvm->mutex);
}
-static int iwl_mvm_fwrt_dump_start(void *ctx)
+static void iwl_mvm_fwrt_dump_start(void *ctx)
{
struct iwl_mvm *mvm = ctx;
mutex_lock(&mvm->mutex);
-
- return 0;
}
static void iwl_mvm_fwrt_dump_end(void *ctx)
@@ -1076,12 +1086,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (!hw)
return NULL;
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
if (cfg->max_tx_agg_size)
hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
else
- hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
op_mode = hw->priv;
@@ -1244,6 +1254,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE);
+ trans_cfg.queue_alloc_cmd_ver =
+ iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(DATA_PATH_GROUP,
+ SCD_QUEUE_CONFIG_CMD),
+ 0);
+ mvm->sta_remove_requires_queue_remove =
+ trans_cfg.queue_alloc_cmd_ver > 0;
+
/* Configure transport layer */
iwl_trans_configure(mvm->trans, &trans_cfg);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 9af40b0fa37a..a3cefbc43e80 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -158,8 +158,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
/* we only support RLC command version 2 */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
- RLC_CONFIG_CMD, 0) < 2)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2)
iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info,
chains_static, chains_dynamic);
}
@@ -172,8 +171,7 @@ static int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm,
.phy_id = cpu_to_le32(ctxt->id),
};
- if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
- RLC_CONFIG_CMD, 0) < 2)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2)
return 0;
BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_DRIVER_FORCE !=
@@ -209,8 +207,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
u32 action)
{
int ret;
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- PHY_CONTEXT_CMD, 1);
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1);
if (ver == 3 || ver == 4) {
struct iwl_phy_context_cmd cmd = {};
@@ -301,8 +298,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
lockdep_assert_held(&mvm->mutex);
- if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
- RLC_CONFIG_CMD, 0) >= 2 &&
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) >= 2 &&
ctxt->channel == chandef->chan &&
ctxt->width == chandef->width &&
ctxt->center_freq1 == chandef->center_freq1)
@@ -349,18 +345,31 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
* otherwise we might not be able to reuse this phy.
*/
if (ctxt->ref == 0) {
- struct ieee80211_channel *chan;
+ struct ieee80211_channel *chan = NULL;
struct cfg80211_chan_def chandef;
- struct ieee80211_supported_band *sband = NULL;
- enum nl80211_band band = NL80211_BAND_2GHZ;
+ struct ieee80211_supported_band *sband;
+ enum nl80211_band band;
+ int channel;
- while (!sband && band < NUM_NL80211_BANDS)
- sband = mvm->hw->wiphy->bands[band++];
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ sband = mvm->hw->wiphy->bands[band];
- if (WARN_ON(!sband))
- return;
+ if (!sband)
+ continue;
+
+ for (channel = 0; channel < sband->n_channels; channel++)
+ if (!(sband->channels[channel].flags &
+ IEEE80211_CHAN_DISABLED)) {
+ chan = &sband->channels[channel];
+ break;
+ }
- chan = &sband->channels[0];
+ if (chan)
+ break;
+ }
+
+ if (WARN_ON(!chan))
+ return;
cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
index 3d0166df2002..c862bd243b55 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
index f054ce76bed5..bb77bc9aa821 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
@@ -125,12 +125,19 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size))
return ERR_PTR(-EIO);
- resp = kzalloc(resp_size, GFP_KERNEL);
+ resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL);
if (!resp)
return ERR_PTR(-ENOMEM);
- memcpy(resp, cmd.resp_pkt->data, resp_size);
-
iwl_free_resp(&cmd);
return resp;
}
+
+void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rfi_deactivate_notif *notif = (void *)pkt->data;
+
+ IWL_INFO(mvm, "RFIm is deactivated, reason = %d\n", notif->reason);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 66808c55aa0e..9830d2663689 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include "rs.h"
#include "fw-api.h"
@@ -97,7 +97,10 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
if (he_cap->has_he &&
(he_cap->he_cap_elem.phy_cap_info[3] &
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK))
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK &&
+ sband->iftype_data &&
+ sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[3] &
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK))
flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
return flags;
@@ -420,7 +423,7 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct ieee80211_hw *hw = mvm->hw;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
- u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD);
struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
struct iwl_tlc_config_cmd_v4 cfg_cmd = {
@@ -449,8 +452,22 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
*/
sta->max_amsdu_len = max_amsdu_len;
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
- TLC_MNG_CONFIG_CMD, 0);
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(DATA_PATH_GROUP,
+ TLC_MNG_CONFIG_CMD),
+ 0);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
+ cfg_cmd.sta_id, cfg_cmd.max_ch_width, cfg_cmd.mode);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, chains=0x%X, ch_wid_supp=%d, flags=0x%X\n",
+ cfg_cmd.chains, cfg_cmd.sgi_ch_width_supp, cfg_cmd.flags);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, mpdu_len=%d, no_ht_rate=0x%X, tx_op=%d\n",
+ cfg_cmd.max_mpdu_len, cfg_cmd.non_ht_rates, cfg_cmd.max_tx_op);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][0]=0x%X, ht_rate[1][0]=0x%X\n",
+ cfg_cmd.ht_rates[0][0], cfg_cmd.ht_rates[1][0]);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][1]=0x%X, ht_rate[1][1]=0x%X\n",
+ cfg_cmd.ht_rates[0][1], cfg_cmd.ht_rates[1][1]);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][2]=0x%X, ht_rate[1][2]=0x%X\n",
+ cfg_cmd.ht_rates[0][2], cfg_cmd.ht_rates[1][2]);
if (cmd_ver == 4) {
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC,
sizeof(cfg_cmd), &cfg_cmd);
@@ -474,8 +491,9 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
u16 cmd_size = sizeof(cfg_cmd_v3);
/* In old versions of the API the struct is 4 bytes smaller */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
- TLC_MNG_CONFIG_CMD, 0) < 3)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(DATA_PATH_GROUP,
+ TLC_MNG_CONFIG_CMD), 0) < 3)
cmd_size -= 4;
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index f4d02f9fe16d..62114616317c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -454,8 +454,6 @@ static const u16 expected_tpt_mimo2_160MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 971, 0, 1925, 2861, 3779, 5574, 7304, 8147, 8976, 10592, 11640},
};
-#define MCS_INDEX_PER_STREAM (8)
-
static const char *rs_pretty_lq_type(enum iwl_table_type type)
{
static const char * const lq_types[] = {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 64446a11ef98..78198da7e55b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -83,8 +83,8 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
fraglen = len - hdrlen;
if (fraglen) {
- int offset = (void *)hdr + hdrlen -
- rxb_addr(rxb) + rxb_offset(rxb);
+ int offset = (u8 *)hdr + hdrlen -
+ (u8 *)rxb_addr(rxb) + rxb_offset(rxb);
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
fraglen, rxb->truesize);
@@ -640,7 +640,7 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u16 vif_id = mvmvif->id;
- if (WARN_ONCE(vif_id > MAC_INDEX_AUX, "invalid vif id: %d", vif_id))
+ if (WARN_ONCE(vif_id >= MAC_INDEX_AUX, "invalid vif id: %d", vif_id))
return;
if (vif->type != NL80211_IFTYPE_STATION)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 295629c5c035..2c43a9989783 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -209,13 +209,16 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
shdr->type != htons(ETH_P_PAE) &&
shdr->type != htons(ETH_P_TDLS))))
skb->ip_summed = CHECKSUM_NONE;
+ else
+ /* mac80211 assumes full CSUM including SNAP header */
+ skb_postpush_rcsum(skb, shdr, sizeof(*shdr));
}
fraglen = len - headlen;
if (fraglen) {
- int offset = (void *)hdr + headlen + pad_len -
- rxb_addr(rxb) + rxb_offset(rxb);
+ int offset = (u8 *)hdr + headlen + pad_len -
+ (u8 *)rxb_addr(rxb) + rxb_offset(rxb);
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
fraglen, rxb->truesize);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 5f92a09db374..a4077053e374 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -20,7 +20,6 @@
#define IWL_SCAN_DWELL_FRAGMENTED 44
#define IWL_SCAN_DWELL_EXTENDED 90
#define IWL_SCAN_NUM_OF_FRAGS 3
-#define IWL_SCAN_LAST_2_4_CHN 14
/* adaptive dwell max budget time [TU] for full scan */
#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
@@ -98,6 +97,7 @@ struct iwl_mvm_scan_params {
u32 n_6ghz_params;
bool scan_6ghz;
bool enable_6ghz_passive;
+ bool respect_p2p_go, respect_p2p_go_hb;
};
static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
@@ -169,17 +169,6 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
}
-static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int *global_cnt = data;
-
- if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
- mvmvif->phy_ctxt->id < NUM_PHY_CTX)
- *global_cnt += 1;
-}
-
static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
{
return mvm->tcm.result.global_load;
@@ -191,26 +180,31 @@ iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
return mvm->tcm.result.band_load[band];
}
-struct iwl_is_dcm_with_go_iterator_data {
+struct iwl_mvm_scan_iter_data {
+ u32 global_cnt;
struct ieee80211_vif *current_vif;
bool is_dcm_with_p2p_go;
};
-static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+static void iwl_mvm_scan_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- struct iwl_is_dcm_with_go_iterator_data *data = _data;
- struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm_vif *curr_mvmvif =
- iwl_mvm_vif_from_mac80211(data->current_vif);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_scan_iter_data *data = _data;
+ struct iwl_mvm_vif *curr_mvmvif;
- /* exclude the given vif */
- if (vif == data->current_vif)
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
+ mvmvif->phy_ctxt->id < NUM_PHY_CTX)
+ data->global_cnt += 1;
+
+ if (!data->current_vif || vif == data->current_vif)
return;
+ curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif);
+
if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
- other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
- other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
+ mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
+ mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
data->is_dcm_with_p2p_go = true;
}
@@ -220,13 +214,18 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
enum iwl_mvm_traffic_load load,
bool low_latency)
{
- int global_cnt = 0;
+ struct iwl_mvm_scan_iter_data data = {
+ .current_vif = vif,
+ .is_dcm_with_p2p_go = false,
+ .global_cnt = 0,
+ };
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_scan_condition_iterator,
- &global_cnt);
- if (!global_cnt)
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_scan_iterator,
+ &data);
+
+ if (!data.global_cnt)
return IWL_SCAN_TYPE_UNASSOC;
if (fw_has_api(&mvm->fw->ucode_capa,
@@ -235,23 +234,14 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
(!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
return IWL_SCAN_TYPE_FRAGMENTED;
- /* in case of DCM with GO where BSS DTIM interval < 220msec
+ /*
+ * in case of DCM with GO where BSS DTIM interval < 220msec
* set all scan requests as fast-balance scan
- * */
+ */
if (vif && vif->type == NL80211_IFTYPE_STATION &&
- vif->bss_conf.dtim_period < 220) {
- struct iwl_is_dcm_with_go_iterator_data data = {
- .current_vif = vif,
- .is_dcm_with_p2p_go = false,
- };
-
- ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_is_dcm_with_go_iterator,
- &data);
- if (data.is_dcm_with_p2p_go)
- return IWL_SCAN_TYPE_FAST_BALANCE;
- }
+ vif->bss_conf.dtim_period < 220 &&
+ data.is_dcm_with_p2p_go)
+ return IWL_SCAN_TYPE_FAST_BALANCE;
}
if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
@@ -651,9 +641,7 @@ static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
NL80211_BAND_2GHZ,
no_cck);
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA,
- 0) < 12) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
@@ -1090,8 +1078,7 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
/* This function should not be called when using ADD_STA ver >=12 */
- WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA, 0) >= 12);
+ WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12);
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
@@ -1142,8 +1129,7 @@ static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
/* This function should not be called when using ADD_STA ver >=12 */
- WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA, 0) >= 12);
+ WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12);
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
@@ -1156,7 +1142,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
void *cfg;
int ret, cmd_size;
struct iwl_host_cmd cmd = {
- .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
};
enum iwl_mvm_scan_type type;
enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET;
@@ -1247,7 +1233,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
{
struct iwl_scan_config cfg;
struct iwl_host_cmd cmd = {
- .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
.len[0] = sizeof(cfg),
.data[0] = &cfg,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
@@ -1258,11 +1244,9 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
memset(&cfg, 0, sizeof(cfg));
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA, 0) < 12) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
cfg.bcast_sta_id = mvm->aux_sta.sta_id;
- } else if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- SCAN_CFG_CMD, 0) < 5) {
+ } else if (iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_CFG_CMD, 0) < 5) {
/*
* Fw doesn't use this sta anymore. Deprecated on SCAN_CFG_CMD
* version 5.
@@ -1662,7 +1646,7 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm,
}
}
-static int
+static void
iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct iwl_scan_probe_params_v4 *pp)
@@ -1731,31 +1715,40 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
pp->short_ssid_num = idex_s;
pp->bssid_num = idex_b;
- return 0;
}
/* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v6 */
-static void
-iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
+static u32
+iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
u32 n_channels,
struct iwl_scan_probe_params_v4 *pp,
struct iwl_scan_channel_params_v6 *cp,
enum nl80211_iftype vif_type)
{
- struct iwl_scan_channel_cfg_umac *channel_cfg = cp->channel_config;
int i;
struct cfg80211_scan_6ghz_params *scan_6ghz_params =
params->scan_6ghz_params;
+ u32 ch_cnt;
- for (i = 0; i < params->n_channels; i++) {
+ for (i = 0, ch_cnt = 0; i < params->n_channels; i++) {
struct iwl_scan_channel_cfg_umac *cfg =
- &cp->channel_config[i];
+ &cp->channel_config[ch_cnt];
u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries;
bool force_passive, found = false, allow_passive = true,
unsolicited_probe_on_chan = false, psc_no_listen = false;
+ /*
+ * Avoid performing passive scan on non PSC channels unless the
+ * scan is specifically a passive scan, i.e., no SSIDs
+ * configured in the scan command.
+ */
+ if (!cfg80211_channel_is_psc(params->channels[i]) &&
+ !params->n_6ghz_params && params->n_ssids)
+ continue;
+
cfg->v1.channel_num = params->channels[i]->hw_value;
cfg->v2.band = 2;
cfg->v2.iter_count = 1;
@@ -1875,8 +1868,16 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
else
flags |= bssid_bitmap | (s_ssid_bitmap << 16);
- channel_cfg[i].flags |= cpu_to_le32(flags);
+ cfg->flags |= cpu_to_le32(flags);
+ ch_cnt++;
}
+
+ if (params->n_channels > ch_cnt)
+ IWL_DEBUG_SCAN(mvm,
+ "6GHz: reducing number channels: (%u->%u)\n",
+ params->n_channels, ch_cnt);
+
+ return ch_cnt;
}
static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
@@ -1893,9 +1894,25 @@ static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
/* set fragmented ebs for fragmented scan on HB channels */
- if (iwl_mvm_is_scan_fragmented(params->hb_type))
+ if ((!iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->type)) ||
+ (iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->hb_type)))
flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
+ /*
+ * force EBS in case the scan is a fragmented and there is a need to take P2P
+ * GO operation into consideration during scan operation.
+ */
+ if ((!iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->type) && params->respect_p2p_go) ||
+ (iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->hb_type) &&
+ params->respect_p2p_go_hb)) {
+ IWL_DEBUG_SCAN(mvm, "Respect P2P GO. Force EBS\n");
+ flags |= IWL_SCAN_CHANNEL_FLAG_FORCE_EBS;
+ }
+
return flags;
}
@@ -2046,6 +2063,26 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
return flags;
}
+static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif, int type)
+{
+ u8 flags = 0;
+
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ if (params->respect_p2p_go)
+ flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB;
+ if (params->respect_p2p_go_hb)
+ flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
+ } else {
+ if (params->respect_p2p_go)
+ flags = IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB |
+ IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
+ }
+
+ return flags;
+}
+
static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif)
@@ -2164,7 +2201,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
struct iwl_scan_umac_chan_param *chan_param;
void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
- void *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
+ void *sec_part = (u8 *)cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
struct iwl_scan_req_umac_tail_v2 *tail_v2 =
(struct iwl_scan_req_umac_tail_v2 *)sec_part;
@@ -2248,13 +2285,17 @@ iwl_mvm_scan_umac_fill_general_p_v11(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif,
struct iwl_scan_general_params_v11 *gp,
- u16 gen_flags)
+ u16 gen_flags, u8 gen_flags2)
{
struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
iwl_mvm_scan_umac_dwell_v11(mvm, gp, params);
+ IWL_DEBUG_SCAN(mvm, "Gerenal: flags=0x%x, flags2=0x%x\n",
+ gen_flags, gen_flags2);
+
gp->flags = cpu_to_le16(gen_flags);
+ gp->flags2 = gen_flags2;
if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
@@ -2358,7 +2399,7 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif,
&scan_p->general_params,
- gen_flags);
+ gen_flags, 0);
ret = iwl_mvm_fill_scan_sched_params(params,
scan_p->periodic_params.schedule,
@@ -2384,6 +2425,7 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
struct iwl_scan_probe_params_v4 *pb = &scan_p->probe_params;
int ret;
u16 gen_flags;
+ u8 gen_flags2;
u32 bitmap_ssid = 0;
mvm->scan_uid_status[uid] = type;
@@ -2392,9 +2434,15 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
+
+ if (version >= 15)
+ gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type);
+ else
+ gen_flags2 = 0;
+
iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif,
&scan_p->general_params,
- gen_flags);
+ gen_flags, gen_flags2);
ret = iwl_mvm_fill_scan_sched_params(params,
scan_p->periodic_params.schedule,
@@ -2417,14 +2465,16 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
- ret = iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
- if (ret)
- return ret;
+ iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
+
+ cp->count = iwl_mvm_umac_scan_cfg_channels_v6_6g(mvm, params,
+ params->n_channels,
+ pb, cp, vif->type);
+ if (!cp->count) {
+ mvm->scan_uid_status[uid] = 0;
+ return -EINVAL;
+ }
- iwl_mvm_umac_scan_cfg_channels_v6_6g(params,
- params->n_channels,
- pb, cp, vif->type);
- cp->count = params->n_channels;
if (!params->n_ssids ||
(params->n_ssids == 1 && !params->ssids[0].ssid_len))
cp->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER;
@@ -2588,10 +2638,9 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
if (uid < 0)
return uid;
- hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
+ hcmd->id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_REQ_UMAC);
- scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC,
+ scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
IWL_FW_CMD_VER_UNKNOWN);
for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
@@ -2611,6 +2660,85 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
return uid;
}
+struct iwl_mvm_scan_respect_p2p_go_iter_data {
+ struct ieee80211_vif *current_vif;
+ bool p2p_go;
+ enum nl80211_band band;
+};
+
+static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_scan_respect_p2p_go_iter_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* exclude the given vif */
+ if (vif == data->current_vif)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+ mvmvif->phy_ctxt->id < NUM_PHY_CTX &&
+ (data->band == NUM_NL80211_BANDS ||
+ mvmvif->phy_ctxt->channel->band == data->band))
+ data->p2p_go = true;
+}
+
+static bool _iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool low_latency,
+ enum nl80211_band band)
+{
+ struct iwl_mvm_scan_respect_p2p_go_iter_data data = {
+ .current_vif = vif,
+ .p2p_go = false,
+ .band = band,
+ };
+
+ if (!low_latency)
+ return false;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_scan_respect_p2p_go_iter,
+ &data);
+
+ return data.p2p_go;
+}
+
+static bool iwl_mvm_get_respect_p2p_go_band(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum nl80211_band band)
+{
+ bool low_latency = iwl_mvm_low_latency_band(mvm, band);
+
+ return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency, band);
+}
+
+static bool iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ bool low_latency = iwl_mvm_low_latency(mvm);
+
+ return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency,
+ NUM_NL80211_BANDS);
+}
+
+static void iwl_mvm_fill_respect_p2p_go(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif)
+{
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ params->respect_p2p_go =
+ iwl_mvm_get_respect_p2p_go_band(mvm, vif,
+ NL80211_BAND_2GHZ);
+ params->respect_p2p_go_hb =
+ iwl_mvm_get_respect_p2p_go_band(mvm, vif,
+ NL80211_BAND_5GHZ);
+ } else {
+ params->respect_p2p_go = iwl_mvm_get_respect_p2p_go(mvm, vif);
+ }
+}
+
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
@@ -2662,6 +2790,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
params.scan_6ghz_params = req->scan_6ghz_params;
params.scan_6ghz = req->scan_6ghz;
iwl_mvm_fill_scan_type(mvm, &params, vif);
+ iwl_mvm_fill_respect_p2p_go(mvm, &params, vif);
if (req->duration)
params.iter_notif = true;
@@ -2753,6 +2882,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
params.scan_plans = req->scan_plans;
iwl_mvm_fill_scan_type(mvm, &params, vif);
+ iwl_mvm_fill_respect_p2p_go(mvm, &params, vif);
/* In theory, LMAC scans can handle a 32-bit delay, but since
* waiting for over 18 hours to start the scan is a bit silly
@@ -2922,8 +3052,7 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
ret = iwl_mvm_send_cmd_pdu(mvm,
- iwl_cmd_id(SCAN_ABORT_UMAC,
- IWL_ALWAYS_LONG_GROUP, 0),
+ WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
0, sizeof(cmd), &cmd);
if (!ret)
mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
@@ -2978,8 +3107,7 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size, tail_size;
- u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC,
+ u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
IWL_FW_CMD_VER_UNKNOWN);
base_size = iwl_scan_req_umac_get_size(scan_ver);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index feab0bfcd7a2..c7f9d3870f21 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2015, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2015, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -87,6 +87,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_320:
case IEEE80211_STA_RX_BW_160:
add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
fallthrough;
@@ -316,7 +317,7 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
}
static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- u16 *queueptr, u8 tid, u8 flags)
+ u16 *queueptr, u8 tid)
{
int queue = *queueptr;
struct iwl_scd_txq_cfg_cmd cmd = {
@@ -325,11 +326,28 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
};
int ret;
+ lockdep_assert_held(&mvm->mutex);
+
if (iwl_mvm_has_new_tx_api(mvm)) {
+ if (mvm->sta_remove_requires_queue_remove) {
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
+ SCD_QUEUE_CONFIG_CMD);
+ struct iwl_scd_queue_cfg_cmd remove_cmd = {
+ .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
+ .u.remove.queue = cpu_to_le32(queue),
+ };
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
+ sizeof(remove_cmd),
+ &remove_cmd);
+ } else {
+ ret = 0;
+ }
+
iwl_trans_txq_free(mvm->trans, queue);
*queueptr = IWL_MVM_INVALID_QUEUE;
- return 0;
+ return ret;
}
if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
@@ -373,7 +391,7 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
mvm->queue_info[queue].reserved = false;
iwl_trans_txq_disable(mvm->trans, queue, false);
- ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
if (ret)
@@ -512,7 +530,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
iwl_mvm_invalidate_sta_queue(mvm, queue,
disable_agg_tids, false);
- ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0);
+ ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid);
if (ret) {
IWL_ERR(mvm,
"Failed to free inactive queue %d (ret=%d)\n",
@@ -596,6 +614,39 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
return queue;
}
+/* Re-configure the SCD for a queue that has already been configured */
+static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo,
+ int sta_id, int tid, int frame_limit, u16 ssn)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_ENABLE_QUEUE,
+ .window = frame_limit,
+ .sta_id = sta_id,
+ .ssn = cpu_to_le16(ssn),
+ .tx_fifo = fifo,
+ .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
+ .tid = tid,
+ };
+ int ret;
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
+ if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
+ "Trying to reconfig unallocated queue %d\n", queue))
+ return -ENXIO;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+ WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
+ queue, fifo, ret);
+
+ return ret;
+}
+
/*
* If a given queue has a higher AC than the TID stream that is being compared
* to, the queue needs to be redirected to the lower AC. This function does that
@@ -716,21 +767,40 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
u8 sta_id, u8 tid, unsigned int timeout)
{
- int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
- mvm->trans->cfg->min_256_ba_txq_size);
+ int queue, size;
if (tid == IWL_MAX_TID_COUNT) {
tid = IWL_MGMT_TID;
size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
mvm->trans->cfg->min_txq_size);
+ } else {
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ /* this queue isn't used for traffic (cab_queue) */
+ if (IS_ERR_OR_NULL(sta)) {
+ size = IWL_MGMT_QUEUE_SIZE;
+ } else if (sta->he_cap.has_he) {
+ /* support for 256 ba size */
+ size = IWL_DEFAULT_QUEUE_SIZE_HE;
+ } else {
+ size = IWL_DEFAULT_QUEUE_SIZE;
+ }
+
+ rcu_read_unlock();
}
- do {
- __le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE);
+ /* take the min with bc tbl entries allowed */
+ size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
- queue = iwl_trans_txq_alloc(mvm->trans, enable,
- sta_id, tid, SCD_QUEUE_CFG,
- size, timeout);
+ /* size needs to be power of 2 values for calculating read/write pointers */
+ size = rounddown_pow_of_two(size);
+
+ do {
+ queue = iwl_trans_txq_alloc(mvm->trans, 0, BIT(sta_id),
+ tid, size, timeout);
if (queue < 0)
IWL_DEBUG_TX_QUEUES(mvm,
@@ -1019,12 +1089,12 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
* Remove the ones that did.
*/
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- u16 tid_bitmap;
+ u16 q_tid_bitmap;
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
- tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ q_tid_bitmap = mvm->queue_info[queue].tid_bitmap;
/*
* We need to take into account a situation in which a TXQ was
@@ -1037,7 +1107,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
* Mark this queue in the right bitmap, we'll send the command
* to the firmware later.
*/
- if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
+ if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
set_bit(queue, changetid_queues);
IWL_DEBUG_TX_QUEUES(mvm,
@@ -1337,7 +1407,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
out_err:
queue_tmp = queue;
- iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0);
+ iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid);
return ret;
}
@@ -1516,8 +1586,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
memset(&cmd, 0, sizeof(cmd));
cmd.sta_id = sta->sta_id;
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA,
- 0) >= 12 &&
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 &&
sta->type == IWL_STA_AUX_ACTIVITY)
cmd.mac_id_n_color = cpu_to_le32(mac_id);
else
@@ -1784,8 +1853,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
continue;
- iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i,
- 0);
+ iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i);
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
}
@@ -1993,7 +2061,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
if (ret) {
if (!iwl_mvm_has_new_tx_api(mvm))
iwl_mvm_disable_txq(mvm, NULL, queue,
- IWL_MAX_TID_COUNT, 0);
+ IWL_MAX_TID_COUNT);
return ret;
}
@@ -2065,7 +2133,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
return -EINVAL;
- iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
+ iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT);
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
@@ -2082,7 +2150,7 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
return -EINVAL;
- iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
+ iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT);
ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
@@ -2199,7 +2267,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
}
queue = *queueptr;
- iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0);
+ iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT);
if (iwl_mvm_has_new_tx_api(mvm))
return;
@@ -2434,7 +2502,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
- iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0);
+ iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
if (ret)
@@ -2443,8 +2511,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret;
}
-#define IWL_MAX_RX_BA_SESSIONS 16
-
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
{
struct iwl_mvm_delba_data notif = {
@@ -2526,18 +2592,126 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
}
}
+static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ bool start, int tid, u16 ssn,
+ u16 buf_size)
+{
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
+ .sta_id = mvm_sta->sta_id,
+ .add_modify = STA_MODE_MODIFY,
+ };
+ u32 status;
+ int ret;
+
+ if (start) {
+ cmd.add_immediate_ba_tid = tid;
+ cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ cmd.rx_ba_window = cpu_to_le16(buf_size);
+ cmd.modify_mask = STA_MODIFY_ADD_BA_TID;
+ } else {
+ cmd.remove_immediate_ba_tid = tid;
+ cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;
+ }
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
+ start ? "start" : "stopp");
+ if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&
+ !(status & IWL_ADD_STA_BAID_VALID_MASK)))
+ return -EINVAL;
+ return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);
+ case ADD_STA_IMMEDIATE_BA_FAILURE:
+ IWL_WARN(mvm, "RX BA Session refused by fw\n");
+ return -ENOSPC;
+ default:
+ IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
+ start ? "start" : "stopp", status);
+ return -EIO;
+ }
+}
+
+static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ bool start, int tid, u16 ssn,
+ u16 buf_size, int baid)
+{
+ struct iwl_rx_baid_cfg_cmd cmd = {
+ .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
+ cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
+ };
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
+
+ if (start) {
+ cmd.alloc.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
+ cmd.alloc.tid = tid;
+ cmd.alloc.ssn = cpu_to_le16(ssn);
+ cmd.alloc.win_size = cpu_to_le16(buf_size);
+ baid = -EIO;
+ } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
+ cmd.remove_v1.baid = cpu_to_le32(baid);
+ BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
+ } else {
+ cmd.remove.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
+ cmd.remove.tid = cpu_to_le32(tid);
+ }
+
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
+ &cmd, &baid);
+ if (ret)
+ return ret;
+
+ if (!start) {
+ /* ignore firmware baid on remove */
+ baid = 0;
+ }
+
+ IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
+ start ? "start" : "stopp");
+
+ if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map))
+ return -EINVAL;
+
+ return baid;
+}
+
+static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta,
+ bool start, int tid, u16 ssn, u16 buf_size,
+ int baid)
+{
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT))
+ return iwl_mvm_fw_baid_op_cmd(mvm, mvm_sta, start,
+ tid, ssn, buf_size, baid);
+
+ return iwl_mvm_fw_baid_op_sta(mvm, mvm_sta, start,
+ tid, ssn, buf_size);
+}
+
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_add_sta_cmd cmd = {};
struct iwl_mvm_baid_data *baid_data = NULL;
- int ret;
- u32 status;
+ int ret, baid;
+ u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID :
+ IWL_MAX_BAID_OLD;
lockdep_assert_held(&mvm->mutex);
- if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
+ if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) {
IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
return -ENOSPC;
}
@@ -2583,59 +2757,29 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
reorder_buf_size / sizeof(baid_data->entries[0]);
}
- cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
- cmd.sta_id = mvm_sta->sta_id;
- cmd.add_modify = STA_MODE_MODIFY;
- if (start) {
- cmd.add_immediate_ba_tid = (u8) tid;
- cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
- cmd.rx_ba_window = cpu_to_le16(buf_size);
+ if (iwl_mvm_has_new_rx_api(mvm) && !start) {
+ baid = mvm_sta->tid_to_baid[tid];
} else {
- cmd.remove_immediate_ba_tid = (u8) tid;
+ /* we don't really need it in this case */
+ baid = -1;
}
- cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
- STA_MODIFY_REMOVE_BA_TID;
- status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
- iwl_mvm_add_sta_cmd_size(mvm),
- &cmd, &status);
- if (ret)
- goto out_free;
+ /* Don't send command to remove (start=0) BAID during restart */
+ if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size,
+ baid);
- switch (status & IWL_ADD_STA_STATUS_MASK) {
- case ADD_STA_SUCCESS:
- IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
- start ? "start" : "stopp");
- break;
- case ADD_STA_IMMEDIATE_BA_FAILURE:
- IWL_WARN(mvm, "RX BA Session refused by fw\n");
- ret = -ENOSPC;
- break;
- default:
- ret = -EIO;
- IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
- start ? "start" : "stopp", status);
- break;
- }
-
- if (ret)
+ if (baid < 0) {
+ ret = baid;
goto out_free;
+ }
if (start) {
- u8 baid;
-
mvm->rx_ba_sessions++;
if (!iwl_mvm_has_new_rx_api(mvm))
return 0;
- if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
- ret = -EINVAL;
- goto out_free;
- }
- baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
- IWL_ADD_STA_BAID_SHIFT);
baid_data->baid = baid;
baid_data->timeout = timeout;
baid_data->last_rx = jiffies;
@@ -2663,7 +2807,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
rcu_assign_pointer(mvm->baid_map[baid], baid_data);
} else {
- u8 baid = mvm_sta->tid_to_baid[tid];
+ baid = mvm_sta->tid_to_baid[tid];
if (mvm->rx_ba_sessions > 0)
/* check that restart flow didn't zero the counter */
@@ -3238,8 +3382,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
int i, size;
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
- int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA_KEY,
+ int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
new_api ? 2 : 1);
if (sta_id == IWL_MVM_INVALID_STA)
@@ -3939,7 +4082,7 @@ void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
- if (!WARN_ON(!mvmsta))
+ if (mvmsta)
iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
rcu_read_unlock();
@@ -3998,3 +4141,21 @@ out:
iwl_mvm_dealloc_int_sta(mvm, sta);
return ret;
}
+
+void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 mac_id)
+{
+ struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {
+ .mac_id = cpu_to_le32(mac_id),
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),
+ CMD_ASYNC,
+ sizeof(cancel_channel_switch_cmd),
+ &cancel_channel_switch_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to cancel the channel switch\n");
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index e34b82b2a288..f1a4fc3e4038 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -548,4 +548,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
u8 *key, u32 key_len);
+void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 mac_id);
#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index ab06dcda1462..6edf2b79db43 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -97,8 +97,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
/* In newer version of this command an aux station is added only
* in cases of dedicated tx queue and need to be removed in end
* of use */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA, 0) >= 12)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12)
iwl_mvm_rm_aux_sta(mvm);
}
@@ -658,8 +657,8 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
};
int ret;
- ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
- MAC_CONF_GROUP, 0),
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm,
@@ -923,8 +922,8 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
}
cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
- return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
- MAC_CONF_GROUP, 0),
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd);
}
@@ -1162,8 +1161,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
- const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF,
- MAC_CONF_GROUP, 0) };
+ const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
struct iwl_notification_wait wait_notif;
struct iwl_mvm_session_prot_cmd cmd = {
.id_and_color =
@@ -1201,8 +1199,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
if (!wait_for_notif) {
if (iwl_mvm_send_cmd_pdu(mvm,
- iwl_cmd_id(SESSION_PROTECTION_CMD,
- MAC_CONF_GROUP, 0),
+ WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd)) {
IWL_ERR(mvm,
"Couldn't send the SESSION_PROTECTION_CMD\n");
@@ -1219,8 +1216,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
iwl_mvm_session_prot_notif, NULL);
if (iwl_mvm_send_cmd_pdu(mvm,
- iwl_cmd_id(SESSION_PROTECTION_CMD,
- MAC_CONF_GROUP, 0),
+ WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd)) {
IWL_ERR(mvm,
"Couldn't send the SESSION_PROTECTION_CMD\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 398390c59344..69cf3a372759 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2019-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@@ -160,6 +160,11 @@ void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
notif = (struct ct_kill_notif *)pkt->data;
IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n",
notif->temperature);
+ if (iwl_fw_lookup_notif_ver(mvm->fw, PHY_OPS_GROUP,
+ CT_KILL_NOTIFICATION, 0) > 1)
+ IWL_DEBUG_TEMP(mvm,
+ "CT kill notification DTS bitmap = 0x%x, Scheme = %d\n",
+ notif->dts, notif->scheme);
iwl_mvm_enter_ctkill(mvm);
}
@@ -240,8 +245,8 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
* a response. For older versions we send the command and wait for a
* notification (no command TLV for previous versions).
*/
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
- CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == 1)
return iwl_mvm_send_temp_cmd(mvm, true, temp);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 9213f8518f10..7763037b93ed 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -318,15 +318,14 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
/* info->control is only relevant for non HW rate control */
if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
/* HT rate doesn't make sense for a non data frame */
WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
!ieee80211_is_data(fc),
"Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
info->control.rates[0].flags,
info->control.rates[0].idx,
- le16_to_cpu(fc), sta ? mvmsta->sta_state : -1);
+ le16_to_cpu(fc),
+ sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1);
rate_idx = info->control.rates[0].idx;
}
@@ -351,7 +350,7 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE);
/* Set CCK or OFDM flag */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 8) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) {
if (!is_cck)
rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
else
@@ -654,7 +653,8 @@ static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt;
struct iwl_probe_resp_data *resp_data;
- u8 *ie, *pos;
+ const u8 *ie;
+ u8 *pos;
u8 match[] = {
(WLAN_OUI_WFA >> 16) & 0xff,
(WLAN_OUI_WFA >> 8) & 0xff,
@@ -671,10 +671,10 @@ static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
if (!resp_data->notif.noa_active)
goto out;
- ie = (u8 *)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
- mgmt->u.probe_resp.variable,
- skb->len - base_len,
- match, 4, 2);
+ ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
+ mgmt->u.probe_resp.variable,
+ skb->len - base_len,
+ match, 4, 2);
if (!ie) {
IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n");
goto out;
@@ -1602,8 +1602,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
seq_ctl = le16_to_cpu(hdr->seq_ctrl);
if (unlikely(!seq_ctl)) {
- struct ieee80211_hdr *hdr = (void *)skb->data;
-
/*
* If it is an NDP, we can't update next_reclaim since
* its sequence control is 0. Note that for that same
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 1f3e90e5dbd4..bc947733d982 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -169,8 +169,7 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
{
- if (iwl_fw_lookup_cmd_ver(fw, LONG_GROUP,
- TX_CMD, 0) > 8)
+ if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
/* In the new rate legacy rates are indexed:
* 0 - 3 for CCK and 0 - 7 for OFDM.
*/
@@ -241,38 +240,6 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
return last_idx;
}
-int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
- int tid, int frame_limit, u16 ssn)
-{
- struct iwl_scd_txq_cfg_cmd cmd = {
- .scd_queue = queue,
- .action = SCD_CFG_ENABLE_QUEUE,
- .window = frame_limit,
- .sta_id = sta_id,
- .ssn = cpu_to_le16(ssn),
- .tx_fifo = fifo,
- .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
- queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
- .tid = tid,
- };
- int ret;
-
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return -EINVAL;
-
- if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
- "Trying to reconfig unallocated queue %d\n", queue))
- return -ENXIO;
-
- IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
- WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
- queue, fifo, ret);
-
- return ret;
-}
-
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
* @mvm: Driver data.
@@ -480,8 +447,7 @@ void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
cmd.low_latency_tx = 1;
}
- if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD,
- MAC_CONF_GROUP, 0),
+ if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD),
0, sizeof(cmd), &cmd))
IWL_ERR(mvm, "Failed to send low latency command\n");
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
index 78450366312b..080a1587caa5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
@@ -71,12 +71,13 @@ static int iwl_mvm_vendor_host_get_ownership(struct wiphy *wiphy,
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
mutex_lock(&mvm->mutex);
- iwl_mvm_mei_get_ownership(mvm);
+ ret = iwl_mvm_mei_get_ownership(mvm);
mutex_unlock(&mvm->mutex);
- return 0;
+ return ret;
}
static const struct wiphy_vendor_command iwl_mvm_vendor_commands[] = {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 85a6da70ca78..75fd386b048e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@@ -125,6 +125,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT;
+ if (trans->trans_cfg->imr_enabled)
+ control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN;
+
/* initialize RX default queue */
prph_sc_ctrl->rbd_cfg.free_rbd_addr =
cpu_to_le64(trans_pcie->rxq->bd_dma);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 5178e852c5d3..b16d4ae182d1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -491,18 +491,21 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* So devices */
{IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_so_trans_cfg)},
{IWL_PCI_DEVICE(0x2726, PCI_ANY_ID, iwl_snj_trans_cfg)},
- {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)},
{IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)},
{IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)},
{IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)},
/* Ma devices */
{IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
{IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
- {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_ma_trans_cfg)},
/* Bz devices */
{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -668,8 +671,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x2726, 0x1652, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name),
IWL_DEV_INFO(0x2726, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name),
IWL_DEV_INFO(0x2726, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name),
- IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name),
- IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name),
+ IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
+ IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
/* SO with GF2 */
IWL_DEV_INFO(0x2726, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
@@ -682,6 +685,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x7A70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
IWL_DEV_INFO(0x7AF0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
IWL_DEV_INFO(0x7AF0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
+ IWL_DEV_INFO(0x7F70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
+ IWL_DEV_INFO(0x7F70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
/* MA with GF2 */
IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675s_name),
@@ -1301,7 +1306,30 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
- iwlax210_2ax_cfg_so_jf_b0, iwl9462_name)
+ iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
+
+/* MsP */
+/* For now we use the same FW as MR, but this will change in the future. */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+ iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+ iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+ iwl_cfg_ma_a0_ms_a0, iwl_ax204_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+ iwl_cfg_snj_a0_ms_a0, iwl_ax204_name)
#endif /* CONFIG_IWLMVM */
};
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index a43e56c7689f..f7e4f868363d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2003-2015, 2018-2021 Intel Corporation
+ * Copyright (C) 2003-2015, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -104,6 +104,18 @@ struct iwl_rx_completion_desc {
} __packed;
/**
+ * struct iwl_rx_completion_desc_bz - Bz completion descriptor
+ * @rbid: unique tag of the received buffer
+ * @flags: flags (0: fragmented, all others: reserved)
+ * @reserved: reserved
+ */
+struct iwl_rx_completion_desc_bz {
+ __le16 rbid;
+ u8 flags;
+ u8 reserved[1];
+} __packed;
+
+/**
* struct iwl_rxq - Rx queue
* @id: queue index
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
@@ -133,11 +145,7 @@ struct iwl_rxq {
int id;
void *bd;
dma_addr_t bd_dma;
- union {
- void *used_bd;
- __le32 *bd_32;
- struct iwl_rx_completion_desc *cd;
- };
+ void *used_bd;
dma_addr_t used_bd_dma;
u32 read;
u32 write;
@@ -262,6 +270,20 @@ enum iwl_pcie_fw_reset_state {
};
/**
+ * enum wl_pcie_imr_status - imr dma transfer state
+ * @IMR_D2S_IDLE: default value of the dma transfer
+ * @IMR_D2S_REQUESTED: dma transfer requested
+ * @IMR_D2S_COMPLETED: dma transfer completed
+ * @IMR_D2S_ERROR: dma transfer error
+ */
+enum iwl_pcie_imr_status {
+ IMR_D2S_IDLE,
+ IMR_D2S_REQUESTED,
+ IMR_D2S_COMPLETED,
+ IMR_D2S_ERROR,
+};
+
+/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
* @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
@@ -319,6 +341,8 @@ enum iwl_pcie_fw_reset_state {
* @alloc_page_lock: spinlock for the page allocator
* @alloc_page: allocated page to still use parts of
* @alloc_page_used: how much of the allocated page was already used (bytes)
+ * @imr_status: imr dma state machine
+ * @wait_queue_head_t: imr wait queue for dma completion
* @rf_name: name/version of the CRF, if any
*/
struct iwl_trans_pcie {
@@ -363,7 +387,7 @@ struct iwl_trans_pcie {
/* PCI bus related data */
struct pci_dev *pci_dev;
- void __iomem *hw_base;
+ u8 __iomem *hw_base;
bool ucode_write_complete;
bool sx_complete;
@@ -414,7 +438,8 @@ struct iwl_trans_pcie {
bool fw_reset_handshake;
enum iwl_pcie_fw_reset_state fw_reset_state;
wait_queue_head_t fw_reset_waitq;
-
+ enum iwl_pcie_imr_status imr_status;
+ wait_queue_head_t imr_waitq;
char rf_name[32];
};
@@ -809,4 +834,9 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
+void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr, u32 byte_cnt);
+int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr, u32 byte_cnt);
+
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 8247014278f3..68a4572cee53 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -190,11 +190,14 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
}
rxq->write_actual = round_down(rxq->write, 8);
- if (trans->trans_cfg->mq_rx_supported)
+ if (!trans->trans_cfg->mq_rx_supported)
+ iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+ else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
+ HBUS_TARG_WRPTR_RX_Q(rxq->id));
+ else
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
rxq->write_actual);
- else
- iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
}
static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
@@ -652,23 +655,30 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
iwl_pcie_rx_allocator(trans_pcie->trans);
}
-static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
+static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
{
- struct iwl_rx_transfer_desc *rx_td;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return sizeof(struct iwl_rx_transfer_desc);
- if (use_rx_td)
- return sizeof(*rx_td);
- else
- return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) :
- sizeof(__le32);
+ return trans->trans_cfg->mq_rx_supported ?
+ sizeof(__le64) : sizeof(__le32);
+}
+
+static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
+{
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ return sizeof(struct iwl_rx_completion_desc_bz);
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return sizeof(struct iwl_rx_completion_desc);
+
+ return sizeof(__le32);
}
static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
- bool use_rx_td = (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_AX210);
- int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+ int free_size = iwl_pcie_free_bd_size(trans);
if (rxq->bd)
dma_free_coherent(trans->dev,
@@ -682,8 +692,8 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
if (rxq->used_bd)
dma_free_coherent(trans->dev,
- (use_rx_td ? sizeof(*rxq->cd) :
- sizeof(__le32)) * rxq->queue_size,
+ iwl_pcie_used_bd_size(trans) *
+ rxq->queue_size,
rxq->used_bd, rxq->used_bd_dma);
rxq->used_bd_dma = 0;
rxq->used_bd = NULL;
@@ -707,7 +717,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
else
rxq->queue_size = RX_QUEUE_SIZE;
- free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+ free_size = iwl_pcie_free_bd_size(trans);
/*
* Allocate the circular buffer of Read Buffer Descriptors
@@ -720,14 +730,15 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
if (trans->trans_cfg->mq_rx_supported) {
rxq->used_bd = dma_alloc_coherent(dev,
- (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
+ iwl_pcie_used_bd_size(trans) *
+ rxq->queue_size,
&rxq->used_bd_dma,
GFP_KERNEL);
if (!rxq->used_bd)
goto err;
}
- rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
+ rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
rxq->rb_stts_dma =
trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
@@ -1307,9 +1318,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
"Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
rxq->id, offset,
iwl_get_cmd_string(trans,
- iwl_cmd_id(pkt->hdr.cmd,
- pkt->hdr.group_id,
- 0)),
+ WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
pkt->hdr.group_id, pkt->hdr.cmd,
le16_to_cpu(pkt->hdr.sequence));
@@ -1319,7 +1328,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
/* check that what the device tells us made sense */
- if (offset > max_len)
+ if (len < sizeof(*pkt) || offset > max_len)
break;
trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
@@ -1419,6 +1428,7 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
u16 vid;
BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
+ BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
if (!trans->trans_cfg->mq_rx_supported) {
rxb = rxq->queue[i];
@@ -1426,11 +1436,20 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
return rxb;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- vid = le16_to_cpu(rxq->cd[i].rbid);
- *join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
+
+ vid = le16_to_cpu(cd[i].rbid);
+ *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
+ } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_rx_completion_desc *cd = rxq->used_bd;
+
+ vid = le16_to_cpu(cd[i].rbid);
+ *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
} else {
- vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
+ __le32 *cd = rxq->used_bd;
+
+ vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */
}
if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
@@ -1608,10 +1627,13 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
if (WARN_ON(entry->entry >= trans->num_rx_queues))
return IRQ_NONE;
- if (WARN_ONCE(!rxq,
- "[%d] Got MSI-X interrupt before we have Rx queues",
- entry->entry))
+ if (!rxq) {
+ if (net_ratelimit())
+ IWL_ERR(trans,
+ "[%d] Got MSI-X interrupt before we have Rx queues\n",
+ entry->entry);
return IRQ_NONE;
+ }
lock_map_acquire(&trans->sync_cmd_lockdep_map);
IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
@@ -1954,7 +1976,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
CSR_INT, CSR_INT_BIT_RX_PERIODIC);
}
/* Sending RX interrupt require many steps to be done in the
- * the device:
+ * device:
* 1- write interrupt to current index in ICT table.
* 2- dma RX frame.
* 3- update RX shared data to indicate last write index.
@@ -1998,6 +2020,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* Wake up uCode load routine, now that load is complete */
trans_pcie->ucode_write_complete = true;
wake_up(&trans_pcie->ucode_write_waitq);
+ /* Wake up IMR write routine, now that write to SRAM is complete */
+ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ trans_pcie->imr_status = IMR_D2S_COMPLETED;
+ wake_up(&trans_pcie->ucode_write_waitq);
+ }
}
if (inta & ~handled) {
@@ -2211,7 +2238,17 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
/* This "Tx" DMA channel is used only for loading uCode */
- if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
+ if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM &&
+ trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n");
+ isr_stats->tx++;
+
+ /* Wake up IMR routine once write to SRAM is complete */
+ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ trans_pcie->imr_status = IMR_D2S_COMPLETED;
+ wake_up(&trans_pcie->ucode_write_waitq);
+ }
+ } else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
isr_stats->tx++;
/*
@@ -2220,6 +2257,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
*/
trans_pcie->ucode_write_complete = true;
wake_up(&trans_pcie->ucode_write_waitq);
+
+ /* Wake up IMR routine once write to SRAM is complete */
+ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ trans_pcie->imr_status = IMR_D2S_COMPLETED;
+ wake_up(&trans_pcie->ucode_write_waitq);
+ }
}
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
@@ -2234,7 +2277,10 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
inta_fh);
isr_stats->sw++;
/* during FW reset flow report errors from there */
- if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
+ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ trans_pcie->imr_status = IMR_D2S_ERROR;
+ wake_up(&trans_pcie->imr_waitq);
+ } else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
trans_pcie->fw_reset_state = FW_RESET_ERROR;
wake_up(&trans_pcie->fw_reset_waitq);
} else {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index ef14584fc0a1..8be3c3c8c68b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2007-2015, 2018-2020 Intel Corporation
+ * Copyright (C) 2007-2015, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -745,7 +745,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
iwl_set_bits_prph(trans, LMPM_CHICK,
LMPM_CHICK_EXTENDED_ADDR_SPACE);
- memcpy(v_addr, (u8 *)section->data + offset, copy_size);
+ memcpy(v_addr, (const u8 *)section->data + offset, copy_size);
ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
copy_size);
@@ -1112,7 +1112,7 @@ static const struct iwl_causes_list causes_list_pre_bz[] = {
};
static const struct iwl_causes_list causes_list_bz[] = {
- {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ, CSR_MSIX_HW_INT_MASK_AD, 0x29},
+ {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ, CSR_MSIX_HW_INT_MASK_AD, 0x15},
};
static void iwl_pcie_map_list(struct iwl_trans *trans,
@@ -1948,6 +1948,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
trans->txqs.page_offs = trans_cfg->cb_data_offs;
trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
+ trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0;
@@ -2863,7 +2864,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
+ u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
struct cont_rec *data = &trans_pcie->fw_mon_data;
u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
ssize_t size, bytes_copied = 0;
@@ -3468,7 +3469,8 @@ static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
.d3_suspend = iwl_trans_pcie_d3_suspend, \
.d3_resume = iwl_trans_pcie_d3_resume, \
.interrupts = iwl_trans_pci_interrupts, \
- .sync_nmi = iwl_trans_pcie_sync_nmi \
+ .sync_nmi = iwl_trans_pcie_sync_nmi, \
+ .imr_dma_data = iwl_trans_pcie_copy_imr \
static const struct iwl_trans_ops trans_ops_pcie = {
IWL_TRANS_COMMON_OPS,
@@ -3553,6 +3555,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
init_waitqueue_head(&trans_pcie->fw_reset_waitq);
+ init_waitqueue_head(&trans_pcie->imr_waitq);
trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
WQ_HIGHPRI | WQ_UNBOUND, 1);
@@ -3681,3 +3684,41 @@ out_free_trans:
iwl_trans_free(trans);
return ERR_PTR(ret);
}
+
+void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr, u32 byte_cnt)
+{
+ iwl_write_prph(trans, IMR_UREG_CHICK,
+ iwl_read_prph(trans, IMR_UREG_CHICK) |
+ IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK);
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr);
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB,
+ (u32)(src_addr & 0xFFFFFFFF));
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB,
+ iwl_get_dma_hi_addr(src_addr));
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt);
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL,
+ IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS |
+ IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS |
+ IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK);
+}
+
+int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr, u32 byte_cnt)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret = -1;
+
+ trans_pcie->imr_status = IMR_D2S_REQUESTED;
+ iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt);
+ ret = wait_event_timeout(trans_pcie->imr_waitq,
+ trans_pcie->imr_status !=
+ IMR_D2S_REQUESTED, 5 * HZ);
+ if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) {
+ IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n");
+ iwl_trans_pcie_dump_regs(trans);
+ return -ETIMEDOUT;
+ }
+ trans_pcie->imr_status = IMR_D2S_IDLE;
+ return 0;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 34bde8c87324..c72a84d8bb4f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -213,7 +213,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
/* map the remaining (adjusted) nocopy/dup fragments */
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
- const void *data = cmddata[i];
+ void *data = (void *)(uintptr_t)cmddata[i];
if (!cmdlen[i])
continue;
@@ -222,7 +222,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
continue;
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
data = dup_buf;
- phys_addr = dma_map_single(trans->dev, (void *)data,
+ phys_addr = dma_map_single(trans->dev, data,
cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
idx = -ENOMEM;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 4f6c187eed69..3546c5269c3b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -154,7 +154,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
void *tfd;
u32 num_tbs;
- tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
+ tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
if (reset)
memset(tfd, 0, trans->txqs.tfd.size);
@@ -540,7 +540,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
trans->cfg->min_txq_size);
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
- trans->cfg->min_256_ba_txq_size);
+ trans->cfg->min_ba_txq_size);
trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
cmd_queue);
@@ -594,7 +594,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
trans->cfg->min_txq_size);
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
- trans->cfg->min_256_ba_txq_size);
+ trans->cfg->min_ba_txq_size);
ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
cmd_queue);
if (ret) {
@@ -877,7 +877,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
if (configure_scd) {
iwl_scd_txq_set_inactive(trans, txq_id);
- iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
+ iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val,
ARRAY_SIZE(zero_val));
}
@@ -1114,7 +1114,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
/* map the remaining (adjusted) nocopy/dup fragments */
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
- const void *data = cmddata[i];
+ void *data = (void *)(uintptr_t)cmddata[i];
if (!cmdlen[i])
continue;
@@ -1123,7 +1123,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
continue;
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
data = dup_buf;
- phys_addr = dma_map_single(trans->dev, (void *)data,
+ phys_addr = dma_map_single(trans->dev, data,
cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
@@ -1201,7 +1201,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta;
group_id = cmd->hdr.group_id;
- cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
+ cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
index 0730657d54bf..726185d6fab8 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -1,13 +1,15 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2020-2021 Intel Corporation
+ * Copyright (C) 2020-2022 Intel Corporation
*/
#include <net/tso.h>
#include <linux/tcp.h>
#include "iwl-debug.h"
#include "iwl-io.h"
+#include "fw/api/commands.h"
#include "fw/api/tx.h"
+#include "fw/api/datapath.h"
#include "queue/tx.h"
#include "iwl-fh.h"
#include "iwl-scd.h"
@@ -41,13 +43,13 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
+ struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
/* Starting from AX210, the HW expects bytes */
WARN_ON(trans->txqs.bc_table_dword);
WARN_ON(len > 0x3FFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
- scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
+ scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
} else {
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
@@ -189,7 +191,7 @@ static struct page *get_workaround_page(struct iwl_trans *trans,
return NULL;
/* set the chaining pointer to the previous page if there */
- *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
+ *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
*page_ptr = ret;
return ret;
@@ -314,7 +316,7 @@ alloc:
return NULL;
p->pos = page_address(p->page);
/* set the chaining pointer to NULL */
- *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
+ *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
out:
*page_ptr = p->page;
get_page(p->page);
@@ -963,7 +965,7 @@ void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
while (next) {
struct page *tmp = next;
- next = *(void **)(page_address(next) + PAGE_SIZE -
+ next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
sizeof(void *));
__free_page(tmp);
}
@@ -1083,9 +1085,8 @@ error:
return -ENOMEM;
}
-static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
- struct iwl_txq **intxq, int size,
- unsigned int timeout)
+static struct iwl_txq *
+iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
{
size_t bc_tbl_size, bc_tbl_entries;
struct iwl_txq *txq;
@@ -1097,18 +1098,18 @@ static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
bc_tbl_entries = bc_tbl_size / sizeof(u16);
if (WARN_ON(size > bc_tbl_entries))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
txq = kzalloc(sizeof(*txq), GFP_KERNEL);
if (!txq)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
&txq->bc_tbl.dma);
if (!txq->bc_tbl.addr) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
kfree(txq);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
ret = iwl_txq_alloc(trans, txq, size, false);
@@ -1124,12 +1125,11 @@ static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
txq->wd_timeout = msecs_to_jiffies(timeout);
- *intxq = txq;
- return 0;
+ return txq;
error:
iwl_txq_gen2_free_memory(trans, txq);
- return ret;
+ return ERR_PTR(ret);
}
static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
@@ -1186,30 +1186,61 @@ error_free_resp:
return ret;
}
-int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid,
- int cmd_id, int size, unsigned int timeout)
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
+ u8 tid, int size, unsigned int timeout)
{
- struct iwl_txq *txq = NULL;
- struct iwl_tx_queue_cfg_cmd cmd = {
- .flags = flags,
- .sta_id = sta_id,
- .tid = tid,
- };
+ struct iwl_txq *txq;
+ union {
+ struct iwl_tx_queue_cfg_cmd old;
+ struct iwl_scd_queue_cfg_cmd new;
+ } cmd;
struct iwl_host_cmd hcmd = {
- .id = cmd_id,
- .len = { sizeof(cmd) },
- .data = { &cmd, },
.flags = CMD_WANT_SKB,
};
int ret;
- ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout);
- if (ret)
- return ret;
+ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
+ trans->hw_rev_step == SILICON_A_STEP)
+ size = 4096;
+
+ txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
+ if (IS_ERR(txq))
+ return PTR_ERR(txq);
- cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
- cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
- cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ if (trans->txqs.queue_alloc_cmd_ver == 0) {
+ memset(&cmd.old, 0, sizeof(cmd.old));
+ cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
+ cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
+ cmd.old.tid = tid;
+
+ if (hweight32(sta_mask) != 1) {
+ ret = -EINVAL;
+ goto error;
+ }
+ cmd.old.sta_id = ffs(sta_mask) - 1;
+
+ hcmd.id = SCD_QUEUE_CFG;
+ hcmd.len[0] = sizeof(cmd.old);
+ hcmd.data[0] = &cmd.old;
+ } else if (trans->txqs.queue_alloc_cmd_ver == 3) {
+ memset(&cmd.new, 0, sizeof(cmd.new));
+ cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
+ cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
+ cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ cmd.new.u.add.flags = cpu_to_le32(flags);
+ cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
+ cmd.new.u.add.tid = tid;
+
+ hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
+ hcmd.len[0] = sizeof(cmd.new);
+ hcmd.data[0] = &cmd.new;
+ } else {
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
ret = iwl_trans_send_cmd(trans, &hcmd);
if (ret)
@@ -1307,10 +1338,10 @@ static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
dma_addr_t hi_len;
if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd = _tfd;
- struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+ struct iwl_tfh_tfd *tfh_tfd = _tfd;
+ struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
- return (dma_addr_t)(le64_to_cpu(tb->addr));
+ return (dma_addr_t)(le64_to_cpu(tfh_tb->addr));
}
tfd = _tfd;
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
index 20efc62acf13..eca53bfd326d 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2020-2021 Intel Corporation
+ * Copyright (C) 2020-2022 Intel Corporation
*/
#ifndef __iwl_trans_queue_tx_h__
#define __iwl_trans_queue_tx_h__
@@ -41,7 +41,7 @@ static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
if (trans->trans_cfg->use_tfh)
idx = iwl_txq_get_cmd_index(txq, idx);
- return txq->tfds + trans->txqs.tfd.size * idx;
+ return (u8 *)txq->tfds + trans->txqs.tfd.size * idx;
}
int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
@@ -112,10 +112,9 @@ void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
struct iwl_cmd_meta *meta,
struct iwl_tfh_tfd *tfd);
-int iwl_txq_dyn_alloc(struct iwl_trans *trans,
- __le16 flags, u8 sta_id, u8 tid,
- int cmd_id, int size,
- unsigned int timeout);
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
+ u32 sta_mask, u8 tid,
+ int size, unsigned int timeout);
int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
@@ -137,9 +136,9 @@ static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
struct iwl_tfd *tfd;
if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd = _tfd;
+ struct iwl_tfh_tfd *tfh_tfd = _tfd;
- return le16_to_cpu(tfd->num_tbs) & 0x1f;
+ return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f;
}
tfd = (struct iwl_tfd *)_tfd;
@@ -153,10 +152,10 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
struct iwl_tfd_tb *tb;
if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd = _tfd;
- struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+ struct iwl_tfh_tfd *tfh_tfd = _tfd;
+ struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
- return le16_to_cpu(tb->tb_len);
+ return le16_to_cpu(tfh_tb->tb_len);
}
tfd = (struct iwl_tfd *)_tfd;
diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c
index ab0fe8565851..f99b7ba69fc3 100644
--- a/drivers/net/wireless/intersil/p54/p54spi.c
+++ b/drivers/net/wireless/intersil/p54/p54spi.c
@@ -669,7 +669,7 @@ err_free:
return ret;
}
-static int p54spi_remove(struct spi_device *spi)
+static void p54spi_remove(struct spi_device *spi)
{
struct p54s_priv *priv = spi_get_drvdata(spi);
@@ -684,8 +684,6 @@ static int p54spi_remove(struct spi_device *spi)
mutex_destroy(&priv->mutex);
p54_free_common(priv->hw);
-
- return 0;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index fc5725f6daee..28bfa7b7b73c 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -4,7 +4,7 @@
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2020 Intel Corporation
+ * Copyright (C) 2018 - 2022 Intel Corporation
*/
/*
@@ -173,9 +173,23 @@ static const struct ieee80211_regdomain hwsim_world_regdom_custom_02 = {
}
};
+static const struct ieee80211_regdomain hwsim_world_regdom_custom_03 = {
+ .n_reg_rules = 6,
+ .alpha2 = "99",
+ .reg_rules = {
+ REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0),
+ REG_RULE(2484 - 10, 2484 + 10, 40, 0, 20, 0),
+ REG_RULE(5150 - 10, 5240 + 10, 40, 0, 30, 0),
+ REG_RULE(5745 - 10, 5825 + 10, 40, 0, 30, 0),
+ REG_RULE(5855 - 10, 5925 + 10, 40, 0, 33, 0),
+ REG_RULE(5955 - 10, 7125 + 10, 320, 0, 33, 0),
+ }
+};
+
static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = {
&hwsim_world_regdom_custom_01,
&hwsim_world_regdom_custom_02,
+ &hwsim_world_regdom_custom_03,
};
struct hwsim_vif_priv {
@@ -475,16 +489,16 @@ static const struct ieee80211_sta_s1g_cap hwsim_s1g_cap = {
0 },
};
-static void hwsim_init_s1g_channels(struct ieee80211_channel *channels)
+static void hwsim_init_s1g_channels(struct ieee80211_channel *chans)
{
int ch, freq;
for (ch = 0; ch < NUM_S1G_CHANS_US; ch++) {
freq = 902000 + (ch + 1) * 500;
- channels[ch].band = NL80211_BAND_S1GHZ;
- channels[ch].center_freq = KHZ_TO_MHZ(freq);
- channels[ch].freq_offset = freq % 1000;
- channels[ch].hw_value = ch + 1;
+ chans[ch].band = NL80211_BAND_S1GHZ;
+ chans[ch].center_freq = KHZ_TO_MHZ(freq);
+ chans[ch].freq_offset = freq % 1000;
+ chans[ch].hw_value = ch + 1;
}
}
@@ -503,6 +517,8 @@ static const struct ieee80211_rate hwsim_rates[] = {
{ .bitrate = 540 }
};
+#define DEFAULT_RX_RSSI -50
+
static const u32 hwsim_ciphers[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
@@ -652,6 +668,7 @@ struct mac80211_hwsim_data {
ARRAY_SIZE(hwsim_channels_6ghz)];
struct ieee80211_channel *channel;
+ enum nl80211_chan_width bw;
u64 beacon_int /* beacon interval in us */;
unsigned int rx_filter;
bool started, idle, scanning;
@@ -690,6 +707,9 @@ struct mac80211_hwsim_data {
u64 rx_bytes;
u64 tx_dropped;
u64 tx_failed;
+
+ /* RSSI in rx status of the receiver */
+ int rx_rssi;
};
static const struct rhashtable_params hwsim_rht_params = {
@@ -803,6 +823,40 @@ extern int hwsim_tx_virtio(struct mac80211_hwsim_data *data,
#define hwsim_virtio_enabled false
#endif
+static int hwsim_get_chanwidth(enum nl80211_chan_width bw)
+{
+ switch (bw) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return 20;
+ case NL80211_CHAN_WIDTH_40:
+ return 40;
+ case NL80211_CHAN_WIDTH_80:
+ return 80;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ return 160;
+ case NL80211_CHAN_WIDTH_320:
+ return 320;
+ case NL80211_CHAN_WIDTH_5:
+ return 5;
+ case NL80211_CHAN_WIDTH_10:
+ return 10;
+ case NL80211_CHAN_WIDTH_1:
+ return 1;
+ case NL80211_CHAN_WIDTH_2:
+ return 2;
+ case NL80211_CHAN_WIDTH_4:
+ return 4;
+ case NL80211_CHAN_WIDTH_8:
+ return 8;
+ case NL80211_CHAN_WIDTH_16:
+ return 16;
+ }
+
+ return INT_MAX;
+}
+
static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct ieee80211_channel *chan);
@@ -964,6 +1018,29 @@ DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_group,
hwsim_fops_group_read, hwsim_fops_group_write,
"%llx\n");
+static int hwsim_fops_rx_rssi_read(void *dat, u64 *val)
+{
+ struct mac80211_hwsim_data *data = dat;
+ *val = data->rx_rssi;
+ return 0;
+}
+
+static int hwsim_fops_rx_rssi_write(void *dat, u64 val)
+{
+ struct mac80211_hwsim_data *data = dat;
+ int rssi = (int)val;
+
+ if (rssi >= 0 || rssi < -100)
+ return -EINVAL;
+
+ data->rx_rssi = rssi;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_rx_rssi,
+ hwsim_fops_rx_rssi_read, hwsim_fops_rx_rssi_write,
+ "%lld\n");
+
static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1482,8 +1559,8 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
rx_status.bw = RATE_INFO_BW_20;
if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
- /* TODO: simulate real signal strength (and optional packet loss) */
- rx_status.signal = -50;
+ /* TODO: simulate optional packet loss */
+ rx_status.signal = data->rx_rssi;
if (info->control.vif)
rx_status.signal += info->control.vif->bss_conf.txpower;
@@ -1595,7 +1672,8 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_channel *channel;
bool ack;
- u32 _portid;
+ enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT;
+ u32 _portid, i;
if (WARN_ON(skb->len < 10)) {
/* Should not happen; just a sanity check for addr1 use */
@@ -1605,14 +1683,17 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
if (!data->use_chanctx) {
channel = data->channel;
+ confbw = data->bw;
} else if (txi->hw_queue == 4) {
channel = data->tmp_chan;
} else {
chanctx_conf = rcu_dereference(txi->control.vif->chanctx_conf);
- if (chanctx_conf)
+ if (chanctx_conf) {
channel = chanctx_conf->def.chan;
- else
+ confbw = chanctx_conf->def.width;
+ } else {
channel = NULL;
+ }
}
if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) {
@@ -1636,6 +1717,25 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
txi->control.rates,
ARRAY_SIZE(txi->control.rates));
+ for (i = 0; i < ARRAY_SIZE(txi->control.rates); i++) {
+ u16 rflags = txi->control.rates[i].flags;
+ /* initialize to data->bw for 5/10 MHz handling */
+ enum nl80211_chan_width bw = data->bw;
+
+ if (txi->control.rates[i].idx == -1)
+ break;
+
+ if (rflags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = NL80211_CHAN_WIDTH_40;
+ else if (rflags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ bw = NL80211_CHAN_WIDTH_80;
+ else if (rflags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ bw = NL80211_CHAN_WIDTH_160;
+
+ if (WARN_ON(hwsim_get_chanwidth(bw) > hwsim_get_chanwidth(confbw)))
+ return;
+ }
+
if (skb->len >= 24 + 8 &&
ieee80211_is_probe_resp(hdr->frame_control)) {
/* fake header transmission time */
@@ -1935,6 +2035,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
}
data->channel = conf->chandef.chan;
+ data->bw = conf->chandef.width;
for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) {
if (data->survey_data[idx].channel &&
@@ -1946,6 +2047,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
}
} else {
data->channel = conf->chandef.chan;
+ data->bw = conf->chandef.width;
}
mutex_unlock(&data->mutex);
@@ -2077,12 +2179,49 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
wiphy_dbg(hw->wiphy, " TX Power: %d dBm\n", info->txpower);
}
+static void
+mac80211_hwsim_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct mac80211_hwsim_data *data = hw->priv;
+ u32 bw = U32_MAX;
+ enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT;
+
+ switch (sta->bandwidth) {
+#define C(_bw) case IEEE80211_STA_RX_BW_##_bw: bw = _bw; break
+ C(20);
+ C(40);
+ C(80);
+ C(160);
+ C(320);
+#undef C
+ }
+
+ if (!data->use_chanctx) {
+ confbw = data->bw;
+ } else {
+ struct ieee80211_chanctx_conf *chanctx_conf =
+ rcu_dereference(vif->chanctx_conf);
+
+ if (!WARN_ON(!chanctx_conf))
+ confbw = chanctx_conf->def.width;
+ }
+
+ WARN(bw > hwsim_get_chanwidth(confbw),
+ "intf %pM: bad STA %pM bandwidth %d MHz (%d) > channel config %d MHz (%d)\n",
+ vif->addr, sta->addr, bw, sta->bandwidth,
+ hwsim_get_chanwidth(data->bw), data->bw);
+}
+
static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
hwsim_check_magic(vif);
hwsim_set_sta_magic(sta);
+ mac80211_hwsim_sta_rc_update(hw, vif, sta, 0);
return 0;
}
@@ -2658,6 +2797,7 @@ static int mac80211_hwsim_tx_last_beacon(struct ieee80211_hw *hw)
.sta_add = mac80211_hwsim_sta_add, \
.sta_remove = mac80211_hwsim_sta_remove, \
.sta_notify = mac80211_hwsim_sta_notify, \
+ .sta_rc_update = mac80211_hwsim_sta_rc_update, \
.set_tim = mac80211_hwsim_set_tim, \
.conf_tx = mac80211_hwsim_conf_tx, \
.get_survey = mac80211_hwsim_get_survey, \
@@ -2809,7 +2949,7 @@ out_err:
nlmsg_free(mcast_skb);
}
-static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = {
+static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
{
.types_mask = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
@@ -2855,6 +2995,66 @@ static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = {
.tx_mcs_80p80 = cpu_to_le16(0xffff),
},
},
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_NSEP_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * Since B0, B1, B2 and B3 are not set in
+ * the supported channel width set field in the
+ * HE PHY capabilities information field the
+ * device is a 20MHz only device on 2.4GHz band.
+ */
+ .only_20mhz = {
+ .rx_tx_mcs7_max_nss = 0x88,
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
},
#ifdef CONFIG_MAC80211_MESH
{
@@ -2897,7 +3097,7 @@ static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = {
#endif
};
-static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = {
+static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = {
{
/* TODO: should we support other types, e.g., P2P?*/
.types_mask = BIT(NL80211_IFTYPE_STATION) |
@@ -2948,6 +3148,81 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = {
.tx_mcs_80p80 = cpu_to_le16(0xfffa),
},
},
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_NSEP_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE |
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK,
+ .phy_cap_info[1] =
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK,
+ .phy_cap_info[2] =
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * As B1 and B2 are set in the supported
+ * channel width set field in the HE PHY
+ * capabilities information field include all
+ * the following MCS/NSS.
+ */
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
},
#ifdef CONFIG_MAC80211_MESH
{
@@ -2995,7 +3270,7 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = {
#endif
};
-static const struct ieee80211_sband_iftype_data he_capa_6ghz[] = {
+static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = {
{
/* TODO: should we support other types, e.g., P2P?*/
.types_mask = BIT(NL80211_IFTYPE_STATION) |
@@ -3055,6 +3330,93 @@ static const struct ieee80211_sband_iftype_data he_capa_6ghz[] = {
.tx_mcs_80p80 = cpu_to_le16(0xfffa),
},
},
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_NSEP_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ |
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE |
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK,
+ .phy_cap_info[1] =
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK,
+ .phy_cap_info[2] =
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK |
+ IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * As B1 and B2 are set in the supported
+ * channel width set field in the HE PHY
+ * capabilities information field and 320MHz in
+ * 6GHz is supported include all the following
+ * MCS/NSS.
+ */
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._320 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
},
#ifdef CONFIG_MAC80211_MESH
{
@@ -3111,22 +3473,22 @@ static const struct ieee80211_sband_iftype_data he_capa_6ghz[] = {
#endif
};
-static void mac80211_hwsim_he_capab(struct ieee80211_supported_band *sband)
+static void mac80211_hwsim_sband_capab(struct ieee80211_supported_band *sband)
{
u16 n_iftype_data;
if (sband->band == NL80211_BAND_2GHZ) {
- n_iftype_data = ARRAY_SIZE(he_capa_2ghz);
+ n_iftype_data = ARRAY_SIZE(sband_capa_2ghz);
sband->iftype_data =
- (struct ieee80211_sband_iftype_data *)he_capa_2ghz;
+ (struct ieee80211_sband_iftype_data *)sband_capa_2ghz;
} else if (sband->band == NL80211_BAND_5GHZ) {
- n_iftype_data = ARRAY_SIZE(he_capa_5ghz);
+ n_iftype_data = ARRAY_SIZE(sband_capa_5ghz);
sband->iftype_data =
- (struct ieee80211_sband_iftype_data *)he_capa_5ghz;
+ (struct ieee80211_sband_iftype_data *)sband_capa_5ghz;
} else if (sband->band == NL80211_BAND_6GHZ) {
- n_iftype_data = ARRAY_SIZE(he_capa_6ghz);
+ n_iftype_data = ARRAY_SIZE(sband_capa_6ghz);
sband->iftype_data =
- (struct ieee80211_sband_iftype_data *)he_capa_6ghz;
+ (struct ieee80211_sband_iftype_data *)sband_capa_6ghz;
} else {
return;
}
@@ -3318,6 +3680,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
hw->wiphy->n_cipher_suites = param->n_ciphers;
}
+ data->rx_rssi = DEFAULT_RX_RSSI;
+
INIT_DELAYED_WORK(&data->roc_start, hw_roc_start);
INIT_DELAYED_WORK(&data->roc_done, hw_roc_done);
INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work);
@@ -3449,7 +3813,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
}
- mac80211_hwsim_he_capab(sband);
+ mac80211_hwsim_sband_capab(sband);
hw->wiphy->bands[band] = sband;
}
@@ -3509,6 +3873,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps);
debugfs_create_file("group", 0666, data->debugfs, data,
&hwsim_fops_group);
+ debugfs_create_file("rx_rssi", 0666, data->debugfs, data,
+ &hwsim_fops_rx_rssi);
if (!data->use_chanctx)
debugfs_create_file("dfs_simulate_radar", 0222,
data->debugfs,
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index cd9f8ecf171f..ff1c7ec8c450 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -1195,7 +1195,7 @@ out:
return err;
}
-static int libertas_spi_remove(struct spi_device *spi)
+static void libertas_spi_remove(struct spi_device *spi)
{
struct if_spi_card *card = spi_get_drvdata(spi);
struct lbs_private *priv = card->priv;
@@ -1212,8 +1212,6 @@ static int libertas_spi_remove(struct spi_device *spi)
if (card->pdata->teardown)
card->pdata->teardown(spi);
free_if_spi_card(card);
-
- return 0;
}
static int if_spi_suspend(struct device *dev)
diff --git a/drivers/net/wireless/marvell/libertas/rx.c b/drivers/net/wireless/marvell/libertas/rx.c
index 9f24b0760e1f..c34d30f7cbe0 100644
--- a/drivers/net/wireless/marvell/libertas/rx.c
+++ b/drivers/net/wireless/marvell/libertas/rx.c
@@ -147,7 +147,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
dev->stats.rx_packets++;
skb->protocol = eth_type_trans(skb, dev);
- netif_rx_any_context(skb);
+ netif_rx(skb);
ret = 0;
done:
@@ -262,7 +262,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
dev->stats.rx_packets++;
skb->protocol = eth_type_trans(skb, priv->dev);
- netif_rx_any_context(skb);
+ netif_rx(skb);
ret = 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index 18e89777b784..630e1679c3f9 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -389,7 +389,7 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
{
const u8 *vendor_ie;
const u8 *wmm_ie;
- u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02};
+ static const u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02};
vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 245ff644f81e..4e49ed21c5ce 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -350,7 +350,7 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
/* Forward multicast/broadcast packet to upper layer*/
- netif_rx_any_context(skb);
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index d583fa600a29..d5edb1e89f5b 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -488,7 +488,7 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
(skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
- netif_rx_any_context(skb);
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 3a9af8931c35..02daeefb0761 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -93,7 +93,7 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
{
int i;
- if (!q)
+ if (!q || !q->ndesc)
return;
/* clear descriptors */
@@ -233,7 +233,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
struct mt76_queue_entry entry;
int last;
- if (!q)
+ if (!q || !q->ndesc)
return;
spin_lock_bh(&q->cleanup_lock);
@@ -448,6 +448,9 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
int len = SKB_WITH_OVERHEAD(q->buf_size);
int offset = q->buf_offset;
+ if (!q->ndesc)
+ return 0;
+
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
@@ -465,6 +468,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
qbuf.addr = addr + offset;
qbuf.len = len - offset;
+ qbuf.skip_unmap = false;
mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
frames++;
}
@@ -484,6 +488,9 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
void *buf;
bool more;
+ if (!q->ndesc)
+ return;
+
spin_lock_bh(&q->lock);
do {
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
@@ -508,6 +515,9 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
struct mt76_queue *q = &dev->q_rx[qid];
int i;
+ if (!q->ndesc)
+ return;
+
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 8bb1c7ab5b50..5b53d008eb66 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -180,7 +180,7 @@ static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
{ .start_freq = 5725, .end_freq = 5950, },
};
-const struct cfg80211_sar_capa mt76_sar_capa = {
+static const struct cfg80211_sar_capa mt76_sar_capa = {
.type = NL80211_SAR_TYPE_POWER,
.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
.freq_ranges = &mt76_sar_freq_ranges[0],
@@ -823,6 +823,10 @@ void mt76_set_channel(struct mt76_phy *phy)
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
mt76_update_survey(phy);
+ if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
+ phy->chandef.width != chandef->width)
+ phy->dfs_state = MT_DFS_STATE_UNKNOWN;
+
phy->chandef = *chandef;
phy->chan_state = mt76_channel_state(phy, chandef->chan);
@@ -928,6 +932,36 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
EXPORT_SYMBOL(mt76_wcid_key_setup);
+static int
+mt76_rx_signal(struct mt76_rx_status *status)
+{
+ s8 *chain_signal = status->chain_signal;
+ int signal = -128;
+ u8 chains;
+
+ for (chains = status->chains; chains; chains >>= 1, chain_signal++) {
+ int cur, diff;
+
+ cur = *chain_signal;
+ if (!(chains & BIT(0)) ||
+ cur > 0)
+ continue;
+
+ if (cur > signal)
+ swap(cur, signal);
+
+ diff = signal - cur;
+ if (diff == 0)
+ signal += 3;
+ else if (diff <= 2)
+ signal += 2;
+ else if (diff <= 6)
+ signal += 1;
+ }
+
+ return signal;
+}
+
static void
mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_hw **hw,
@@ -956,6 +990,9 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
status->ampdu_reference = mstat.ampdu_ref;
status->device_timestamp = mstat.timestamp;
status->mactime = mstat.timestamp;
+ status->signal = mt76_rx_signal(&mstat);
+ if (status->signal <= -128)
+ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
if (ieee80211_is_beacon(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control))
@@ -1604,3 +1641,27 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
wi->worker_stat_count = ei - wi->initial_stat_idx;
}
EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
+
+enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
+{
+ struct ieee80211_hw *hw = phy->hw;
+ struct mt76_dev *dev = phy->dev;
+
+ if (dev->region == NL80211_DFS_UNSET ||
+ test_bit(MT76_SCANNING, &phy->state))
+ return MT_DFS_STATE_DISABLED;
+
+ if (!hw->conf.radar_enabled) {
+ if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
+ (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ return MT_DFS_STATE_ACTIVE;
+
+ return MT_DFS_STATE_DISABLED;
+ }
+
+ if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
+ return MT_DFS_STATE_CAC;
+
+ return MT_DFS_STATE_ACTIVE;
+}
+EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 404c3d1a70d6..882fb5d2517f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -19,7 +19,7 @@
#define MT_MCU_RING_SIZE 32
#define MT_RX_BUF_SIZE 2048
-#define MT_SKB_HEAD_LEN 128
+#define MT_SKB_HEAD_LEN 256
#define MT_MAX_NON_AQL_PKT 16
#define MT_TXQ_FREE_THR 32
@@ -85,6 +85,7 @@ enum mt76_rxq_id {
MT_RXQ_MCU_WA,
MT_RXQ_EXT,
MT_RXQ_EXT_WA,
+ MT_RXQ_MAIN_WA,
__MT_RXQ_MAX
};
@@ -104,6 +105,13 @@ enum mt76_cipher_type {
MT_CIPHER_GCMP_256,
};
+enum mt76_dfs_state {
+ MT_DFS_STATE_UNKNOWN,
+ MT_DFS_STATE_DISABLED,
+ MT_DFS_STATE_CAC,
+ MT_DFS_STATE_ACTIVE,
+};
+
struct mt76_queue_buf {
dma_addr_t addr;
u16 len;
@@ -224,7 +232,7 @@ enum mt76_wcid_flags {
MT_WCID_FLAG_HDR_TRANS,
};
-#define MT76_N_WCIDS 288
+#define MT76_N_WCIDS 544
/* stored in ieee80211_tx_info::hw_queue */
#define MT_TX_HW_QUEUE_EXT_PHY BIT(3)
@@ -496,7 +504,7 @@ struct mt76_usb {
} mcu;
};
-#define MT76S_XMIT_BUF_SZ (16 * PAGE_SIZE)
+#define MT76S_XMIT_BUF_SZ 0x3fe00
#define MT76S_NUM_TX_ENTRIES 256
#define MT76S_NUM_RX_ENTRIES 512
struct mt76_sdio {
@@ -506,7 +514,8 @@ struct mt76_sdio {
struct work_struct stat_work;
- u8 *xmit_buf[IEEE80211_NUM_ACS + 2];
+ u8 *xmit_buf;
+ u32 xmit_buf_sz;
struct sdio_func *func;
void *intr_data;
@@ -621,6 +630,7 @@ struct mt76_vif {
u8 band_idx;
u8 wmm_idx;
u8 scan_seq_num;
+ u8 cipher;
};
struct mt76_phy {
@@ -636,6 +646,7 @@ struct mt76_phy {
struct ieee80211_channel *main_chan;
struct mt76_channel_state *chan_state;
+ enum mt76_dfs_state dfs_state;
ktime_t survey_time;
struct mt76_hw_cap cap;
@@ -897,8 +908,8 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
#define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
#define mt76_for_each_q_rx(dev, i) \
- for (i = 0; i < ARRAY_SIZE((dev)->q_rx) && \
- (dev)->q_rx[i].ndesc; i++)
+ for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \
+ if ((dev)->q_rx[i].ndesc)
struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
const struct ieee80211_ops *ops,
@@ -1181,6 +1192,7 @@ void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
+enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy);
int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
@@ -1262,13 +1274,21 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
struct mt76_sta_stats *stats);
int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
+int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
+ u16 val, u16 offset, void *buf, size_t len);
int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
void *buf, size_t len);
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val);
-int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
- bool ext);
+void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
+ void *data, int len);
+u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr);
+void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
+ u32 addr, u32 val);
+int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
+ struct mt76_bus_ops *ops);
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
int mt76u_alloc_queues(struct mt76_dev *dev);
void mt76u_stop_tx(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index 415ea17b9be6..37b092e3ea51 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -76,7 +76,7 @@ void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
__le32 *end = (__le32 *)&skb->data[skb->len];
enum rx_pkt_type type;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
if (q == MT_RXQ_MCU) {
if (type == PKT_TYPE_RX_EVENT)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index a272d64808c3..17713c821d80 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -643,11 +643,6 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) +
dev->rssi_offset[1];
- status->signal = status->chain_signal[0];
- if (status->chains & BIT(1))
- status->signal = max(status->signal,
- status->chain_signal[1]);
-
if (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0) == 1)
status->bw = RATE_INFO_BW_40;
@@ -1135,7 +1130,7 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
}
rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
- rs_idx = !((u32)(FIELD_GET(MT_TXS1_F0_TIMESTAMP, le32_to_cpu(txs_data[1])) -
+ rs_idx = !((u32)(le32_get_bits(txs_data[1], MT_TXS1_F0_TIMESTAMP) -
rate_set_tsf) < 1000000);
rs_idx ^= rate_set_tsf & BIT(0);
rs = &sta->rateset[rs_idx];
@@ -1249,14 +1244,11 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
struct mt7603_sta *msta = NULL;
struct mt76_wcid *wcid;
__le32 *txs_data = data;
- u32 txs;
u8 wcidx;
u8 pid;
- txs = le32_to_cpu(txs_data[4]);
- pid = FIELD_GET(MT_TXS4_PID, txs);
- txs = le32_to_cpu(txs_data[3]);
- wcidx = FIELD_GET(MT_TXS3_WCID, txs);
+ pid = le32_get_bits(txs_data[4], MT_TXS4_PID);
+ wcidx = le32_get_bits(txs_data[3], MT_TXS3_WCID);
if (pid == MT_PACKET_ID_NO_ACK)
return;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 2b546bc05d82..83c5eec5b163 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -641,6 +641,9 @@ mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
int i;
+ if (!sta_rates)
+ return;
+
spin_lock_bh(&dev->mt76.lock);
for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
msta->rates[i].idx = sta_rates->rate[i].idx;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index b53528014fbc..c26b45a09923 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -105,10 +105,10 @@ mt7615_pm_set(void *data, u64 val)
if (!mt7615_firmware_offload(dev) || mt76_is_usb(&dev->mt76))
return -EOPNOTSUPP;
- if (val == pm->enable)
- return 0;
+ mutex_lock(&dev->mt76.mutex);
- mt7615_mutex_acquire(dev);
+ if (val == pm->enable)
+ goto out;
if (dev->phy.n_beacon_vif) {
ret = -EBUSY;
@@ -119,9 +119,16 @@ mt7615_pm_set(void *data, u64 val)
pm->stats.last_wake_event = jiffies;
pm->stats.last_doze_event = jiffies;
}
+ /* make sure the chip is awake here and ps_work is scheduled
+ * just at end of the this routine.
+ */
+ pm->enable = false;
+ mt76_connac_pm_wake(&dev->mphy, pm);
+
pm->enable = val;
+ mt76_connac_power_save_sched(&dev->mphy, pm);
out:
- mt7615_mutex_release(dev);
+ mutex_unlock(&dev->mt76.mutex);
return ret;
}
@@ -436,11 +443,16 @@ mt7615_ext_mac_addr_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct mt7615_dev *dev = file->private_data;
- char buf[32 * ((ETH_ALEN * 3) + 4) + 1];
+ u32 len = 32 * ((ETH_ALEN * 3) + 4) + 1;
u8 addr[ETH_ALEN];
+ char *buf;
int ofs = 0;
int i;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
for (i = 0; i < 32; i++) {
if (!(dev->muar_mask & BIT(i)))
continue;
@@ -451,10 +463,13 @@ mt7615_ext_mac_addr_read(struct file *file, char __user *userbuf,
put_unaligned_le32(mt76_rr(dev, MT_WF_RMAC_MAR0), addr);
put_unaligned_le16((mt76_rr(dev, MT_WF_RMAC_MAR1) &
MT_WF_RMAC_MAR1_ADDR), addr + 4);
- ofs += snprintf(buf + ofs, sizeof(buf) - ofs, "%d=%pM\n", i, addr);
+ ofs += snprintf(buf + ofs, len - ofs, "%d=%pM\n", i, addr);
}
- return simple_read_from_buffer(userbuf, count, ppos, buf, ofs);
+ ofs = simple_read_from_buffer(userbuf, count, ppos, buf, ofs);
+
+ kfree(buf);
+ return ofs;
}
static ssize_t
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index a753c7476d31..a06dcbb8c673 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -552,7 +552,6 @@ void mt7615_init_device(struct mt7615_dev *dev)
dev->pm.stats.last_wake_event = jiffies;
dev->pm.stats.last_doze_event = jiffies;
mt7615_cap_dbdc_disable(dev);
- dev->phy.dfs_state = -1;
#ifdef CONFIG_NL80211_TESTMODE
dev->mt76.test_ops = &mt7615_testmode_ops;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index ec25e5a95d44..bd687f7de628 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -253,15 +253,15 @@ static void mt7615_mac_fill_tm_rx(struct mt7615_phy *phy, __le32 *rxv)
static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
struct mt7615_sta *msta = (struct mt7615_sta *)status->wcid;
+ __le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct ieee80211_hdr hdr;
- struct ethhdr eth_hdr;
- __le32 *rxd = (__le32 *)skb->data;
- __le32 qos_ctrl, ht_ctrl;
+ u16 frame_control;
- if (FIELD_GET(MT_RXD1_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[1])) !=
+ if (le32_get_bits(rxd[1], MT_RXD1_NORMAL_ADDR_TYPE) !=
MT_RXD1_NORMAL_U2M)
return -EINVAL;
@@ -275,47 +275,53 @@ static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
/* store the info from RXD and ethhdr to avoid being overridden */
- memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
- hdr.frame_control = FIELD_GET(MT_RXD4_FRAME_CONTROL, rxd[4]);
- hdr.seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, rxd[6]);
- qos_ctrl = FIELD_GET(MT_RXD6_QOS_CTL, rxd[6]);
- ht_ctrl = FIELD_GET(MT_RXD7_HT_CONTROL, rxd[7]);
-
+ frame_control = le32_get_bits(rxd[4], MT_RXD4_FRAME_CONTROL);
+ hdr.frame_control = cpu_to_le16(frame_control);
+ hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_SEQ_CTRL));
hdr.duration_id = 0;
+
ether_addr_copy(hdr.addr1, vif->addr);
ether_addr_copy(hdr.addr2, sta->addr);
- switch (le16_to_cpu(hdr.frame_control) &
- (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
+ switch (frame_control & (IEEE80211_FCTL_TODS |
+ IEEE80211_FCTL_FROMDS)) {
case 0:
ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
break;
case IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_source);
break;
case IEEE80211_FCTL_TODS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
break;
case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
- ether_addr_copy(hdr.addr4, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
+ ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
break;
}
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
- if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
- eth_hdr.h_proto == htons(ETH_P_IPX))
+ if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
+ eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
- else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
+ else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
else
skb_pull(skb, 2);
if (ieee80211_has_order(hdr.frame_control))
- memcpy(skb_push(skb, 2), &ht_ctrl, 2);
- if (ieee80211_is_data_qos(hdr.frame_control))
- memcpy(skb_push(skb, 2), &qos_ctrl, 2);
+ memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[7],
+ IEEE80211_HT_CTL_LEN);
+
+ if (ieee80211_is_data_qos(hdr.frame_control)) {
+ __le16 qos_ctrl;
+
+ qos_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_QOS_CTL));
+ memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
+ IEEE80211_QOS_CTL_LEN);
+ }
+
if (ieee80211_has_a4(hdr.frame_control))
memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
else
@@ -570,15 +576,6 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
- status->signal = status->chain_signal[0];
-
- for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
- if (!(status->chains & BIT(i)))
- continue;
-
- status->signal = max(status->signal,
- status->chain_signal[i]);
- }
mt7615_mac_fill_tm_rx(mphy->priv, rxd);
@@ -1430,7 +1427,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
}
rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
- rs_idx = !((u32)(FIELD_GET(MT_TXS4_F0_TIMESTAMP, le32_to_cpu(txs_data[4])) -
+ rs_idx = !((u32)(le32_get_bits(txs_data[4], MT_TXS4_F0_TIMESTAMP) -
rate_set_tsf) < 1000000);
rs_idx ^= rate_set_tsf & BIT(0);
rs = &sta->rateset[rs_idx];
@@ -1561,14 +1558,11 @@ static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
struct mt76_wcid *wcid;
struct mt76_phy *mphy = &dev->mt76.phy;
__le32 *txs_data = data;
- u32 txs;
u8 wcidx;
u8 pid;
- txs = le32_to_cpu(txs_data[0]);
- pid = FIELD_GET(MT_TXS0_PID, txs);
- txs = le32_to_cpu(txs_data[2]);
- wcidx = FIELD_GET(MT_TXS2_WCID, txs);
+ pid = le32_get_bits(txs_data[0], MT_TXS0_PID);
+ wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
if (pid == MT_PACKET_ID_NO_ACK)
return;
@@ -1642,9 +1636,10 @@ mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
mt7615_txwi_free(dev, txwi);
}
-static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
+static void mt7615_mac_tx_free(struct mt7615_dev *dev, void *data, int len)
{
- struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
+ struct mt7615_tx_free *free = (struct mt7615_tx_free *)data;
+ void *end = data + len;
u8 i, count;
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
@@ -1655,21 +1650,25 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
}
- count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
+ count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_ID_CNT);
if (is_mt7615(&dev->mt76)) {
__le16 *token = &free->token[0];
+ if (WARN_ON_ONCE((void *)&token[count] > end))
+ return;
+
for (i = 0; i < count; i++)
mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i]));
} else {
__le32 *token = (__le32 *)&free->token[0];
+ if (WARN_ON_ONCE((void *)&token[count] > end))
+ return;
+
for (i = 0; i < count; i++)
mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i]));
}
- dev_kfree_skb(skb);
-
rcu_read_lock();
mt7615_mac_sta_poll(dev);
rcu_read_unlock();
@@ -1677,6 +1676,29 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
mt76_worker_schedule(&dev->mt76.tx_worker);
}
+bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ __le32 *rxd = (__le32 *)data;
+ __le32 *end = (__le32 *)&rxd[len / 4];
+ enum rx_pkt_type type;
+
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7615_mac_tx_free(dev, data, len);
+ return false;
+ case PKT_TYPE_TXS:
+ for (rxd++; rxd + 7 <= end; rxd += 7)
+ mt7615_mac_add_txs(dev, rxd);
+ return false;
+ default:
+ return true;
+ }
+}
+EXPORT_SYMBOL_GPL(mt7615_rx_check);
+
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
{
@@ -1686,8 +1708,8 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
enum rx_pkt_type type;
u16 flag;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
- flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+ flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG);
if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
type = PKT_TYPE_NORMAL_MCU;
@@ -1698,7 +1720,8 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
dev_kfree_skb(skb);
break;
case PKT_TYPE_TXRX_NOTIFY:
- mt7615_mac_tx_free(dev, skb);
+ mt7615_mac_tx_free(dev, skb->data, skb->len);
+ dev_kfree_skb(skb);
break;
case PKT_TYPE_RX_EVENT:
mt7615_mcu_rx_event(dev, skb);
@@ -1835,7 +1858,7 @@ mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy,
struct mt7615_dev *dev = phy->dev;
int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck;
bool ext_phy = phy != &dev->phy;
- u16 def_th = ofdm ? -98 : -110;
+ s16 def_th = ofdm ? -98 : -110;
bool update = false;
s8 *sensitivity;
int signal;
@@ -2068,6 +2091,7 @@ void mt7615_pm_wake_work(struct work_struct *work)
int i;
if (mt76_is_sdio(mdev)) {
+ mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt76_worker_schedule(&mdev->sdio.txrx_worker);
} else {
mt76_for_each_q_rx(mdev, i)
@@ -2103,6 +2127,14 @@ void mt7615_pm_power_save_work(struct work_struct *work)
test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
goto out;
+ if (mutex_is_locked(&dev->mt76.mutex))
+ /* if mt76 mutex is held we should not put the device
+ * to sleep since we are currently accessing device
+ * register map. We need to wait for the next power_save
+ * trigger.
+ */
+ goto out;
+
if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
delta = dev->pm.last_activity + delta - jiffies;
goto out;
@@ -2160,21 +2192,24 @@ static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy)
struct mt7615_dev *dev = phy->dev;
if (phy->rdd_state & BIT(0))
- mt7615_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
+ MT_RX_SEL0, 0);
if (phy->rdd_state & BIT(1))
- mt7615_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
+ MT_RX_SEL0, 0);
}
static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
{
int err;
- err = mt7615_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
- return mt7615_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
- MT_RX_SEL0, 1);
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
+ MT_RX_SEL0, 1);
}
static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
@@ -2185,7 +2220,8 @@ static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
int err;
/* start CAC */
- err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, ext_phy,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
@@ -2246,50 +2282,60 @@ mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
{
- struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
+ enum mt76_dfs_state dfs_state, prev_state;
int err;
if (is_mt7663(&dev->mt76))
return 0;
- if (dev->mt76.region == NL80211_DFS_UNSET) {
- phy->dfs_state = -1;
- if (phy->rdd_state)
- goto stop;
+ prev_state = phy->mt76->dfs_state;
+ dfs_state = mt76_phy_dfs_state(phy->mt76);
+ if (prev_state == dfs_state)
return 0;
- }
- if (test_bit(MT76_SCANNING, &phy->mt76->state))
- return 0;
+ if (prev_state == MT_DFS_STATE_UNKNOWN)
+ mt7615_dfs_stop_radar_detector(phy);
- if (phy->dfs_state == chandef->chan->dfs_state)
- return 0;
-
- err = mt7615_dfs_init_radar_specs(phy);
- if (err < 0) {
- phy->dfs_state = -1;
+ if (dfs_state == MT_DFS_STATE_DISABLED)
goto stop;
- }
- phy->dfs_state = chandef->chan->dfs_state;
+ if (prev_state <= MT_DFS_STATE_DISABLED) {
+ err = mt7615_dfs_init_radar_specs(phy);
+ if (err < 0)
+ return err;
+
+ err = mt7615_dfs_start_radar_detector(phy);
+ if (err < 0)
+ return err;
+
+ phy->mt76->dfs_state = MT_DFS_STATE_CAC;
+ }
- if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
- if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
- return mt7615_dfs_start_radar_detector(phy);
+ if (dfs_state == MT_DFS_STATE_CAC)
+ return 0;
- return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
- MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
+ ext_phy, MT_RX_SEL0, 0);
+ if (err < 0) {
+ phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
+ return err;
}
+ phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
+ return 0;
+
stop:
- err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, ext_phy,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
mt7615_dfs_stop_radar_detector(phy);
+ phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
+
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 82d625a16a62..d79cbdbd5a05 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -291,7 +291,8 @@ static void mt7615_init_dfs_state(struct mt7615_phy *phy)
if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
return;
- if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
+ if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ !(mphy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
return;
if (mphy->chandef.chan->center_freq == chandef->chan->center_freq &&
@@ -365,6 +366,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
&mvif->sta;
@@ -403,6 +405,11 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt7615_mutex_acquire(dev);
+ if (cmd == SET_KEY && !sta && !mvif->mt76.cipher) {
+ mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ mt7615_mcu_add_bss_info(phy, vif, NULL, true);
+ }
+
if (cmd == SET_KEY)
*wcid_keyidx = idx;
else if (idx == *wcid_keyidx)
@@ -424,6 +431,29 @@ out:
return err;
}
+static int mt7615_set_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ int err;
+
+ if (!cfg80211_chandef_valid(&phy->mt76->chandef))
+ return -EINVAL;
+
+ err = mt76_init_sar_power(hw, sar);
+ if (err)
+ return err;
+
+ if (mt7615_firmware_offload(phy->dev))
+ return mt76_connac_mcu_set_rate_txpower(phy->mt76);
+
+ ieee80211_stop_queues(hw);
+ err = mt7615_set_channel(phy);
+ ieee80211_wake_queues(hw);
+
+ return err;
+}
+
static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
@@ -683,6 +713,9 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
int i;
+ if (!sta_rates)
+ return;
+
spin_lock_bh(&dev->mt76.lock);
for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
msta->rates[i].idx = sta_rates->rate[i].idx;
@@ -1323,6 +1356,7 @@ const struct ieee80211_ops mt7615_ops = {
.set_wakeup = mt7615_set_wakeup,
.set_rekey_data = mt7615_set_rekey_data,
#endif /* CONFIG_PM */
+ .set_sar_specs = mt7615_set_sar_specs,
};
EXPORT_SYMBOL_GPL(mt7615_ops);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index 759dcf0e6783..97e2a85cb728 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -71,19 +71,6 @@ struct mt7663_fw_buf {
#define IMG_CRC_LEN 4
-#define FW_FEATURE_SET_ENCRYPT BIT(0)
-#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
-
-#define DL_MODE_ENCRYPT BIT(0)
-#define DL_MODE_KEY_IDX GENMASK(2, 1)
-#define DL_MODE_RESET_SEC_IV BIT(3)
-#define DL_MODE_WORKING_PDA_CR4 BIT(4)
-#define DL_MODE_VALID_RAM_ENTRY BIT(5)
-#define DL_MODE_NEED_RSP BIT(31)
-
-#define FW_START_OVERRIDE BIT(0)
-#define FW_START_WORKING_PDA_CR4 BIT(2)
-
void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
int cmd, int *wait_seq)
{
@@ -756,145 +743,7 @@ out:
static int
mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
{
-#define ENTER_PM_STATE 1
-#define EXIT_PM_STATE 2
- struct {
- u8 pm_number;
- u8 pm_state;
- u8 bssid[ETH_ALEN];
- u8 dtim_period;
- u8 wlan_idx;
- __le16 bcn_interval;
- __le32 aid;
- __le32 rx_filter;
- u8 band_idx;
- u8 rsv[3];
- __le32 feature;
- u8 omac_idx;
- u8 wmm_idx;
- u8 bcn_loss_cnt;
- u8 bcn_sp_duration;
- } __packed req = {
- .pm_number = 5,
- .pm_state = state ? ENTER_PM_STATE : EXIT_PM_STATE,
- .band_idx = band,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PM_STATE_CTRL),
- &req, sizeof(req), true);
-}
-
-static int
-mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, struct mt7615_phy *phy,
- bool enable)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- u32 type = vif->p2p ? NETWORK_P2P : NETWORK_INFRA;
- struct bss_info_basic *bss;
- u8 wlan_idx = mvif->sta.wcid.idx;
- struct tlv *tlv;
-
- tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
-
- switch (vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_MONITOR:
- break;
- case NL80211_IFTYPE_STATION:
- /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
- if (enable && sta) {
- struct mt7615_sta *msta;
-
- msta = (struct mt7615_sta *)sta->drv_priv;
- wlan_idx = msta->wcid.idx;
- }
- break;
- case NL80211_IFTYPE_ADHOC:
- type = NETWORK_IBSS;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- bss = (struct bss_info_basic *)tlv;
- bss->network_type = cpu_to_le32(type);
- bss->bmc_wcid_lo = wlan_idx;
- bss->wmm_idx = mvif->mt76.wmm_idx;
- bss->active = enable;
-
- if (vif->type != NL80211_IFTYPE_MONITOR) {
- memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
- bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
- bss->dtim_period = vif->bss_conf.dtim_period;
- } else {
- memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN);
- }
-
- return 0;
-}
-
-static void
-mt7615_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- u8 omac_idx = mvif->mt76.omac_idx;
- struct bss_info_omac *omac;
- struct tlv *tlv;
- u32 type = 0;
-
- tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
-
- switch (vif->type) {
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- if (vif->p2p)
- type = CONNECTION_P2P_GO;
- else
- type = CONNECTION_INFRA_AP;
- break;
- case NL80211_IFTYPE_STATION:
- if (vif->p2p)
- type = CONNECTION_P2P_GC;
- else
- type = CONNECTION_INFRA_STA;
- break;
- case NL80211_IFTYPE_ADHOC:
- type = CONNECTION_IBSS_ADHOC;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- omac = (struct bss_info_omac *)tlv;
- omac->conn_type = cpu_to_le32(type);
- omac->omac_idx = mvif->mt76.omac_idx;
- omac->band_idx = mvif->mt76.band_idx;
- omac->hw_bss_idx = omac_idx > EXT_BSSID_START ? HW_BSSID_0 : omac_idx;
-}
-
-/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
-#define BCN_TX_ESTIMATE_TIME (4096 + 20)
-static void
-mt7615_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7615_vif *mvif)
-{
- struct bss_info_ext_bss *ext;
- int ext_bss_idx, tsf_offset;
- struct tlv *tlv;
-
- ext_bss_idx = mvif->mt76.omac_idx - EXT_BSSID_START;
- if (ext_bss_idx < 0)
- return;
-
- tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
-
- ext = (struct bss_info_ext_bss *)tlv;
- tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
- ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
+ return mt76_connac_mcu_set_pm(&dev->mt76, band, state);
}
static int
@@ -913,13 +762,14 @@ mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
return PTR_ERR(skb);
if (enable)
- mt7615_mcu_bss_omac_tlv(skb, vif);
+ mt76_connac_mcu_bss_omac_tlv(skb, vif);
- mt7615_mcu_bss_basic_tlv(skb, vif, sta, phy, enable);
+ mt76_connac_mcu_bss_basic_tlv(skb, vif, sta, phy->mt76,
+ mvif->sta.wcid.idx, enable);
if (enable && mvif->mt76.omac_idx >= EXT_BSSID_START &&
mvif->mt76.omac_idx < REPEATER_BSSID_START)
- mt7615_mcu_bss_ext_tlv(skb, mvif);
+ mt76_connac_mcu_bss_ext_tlv(skb, &mvif->mt76);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
@@ -1030,7 +880,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
NULL, wtbl_hdr);
if (sta)
mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, wskb, sta,
- NULL, wtbl_hdr);
+ NULL, wtbl_hdr, true, true);
mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, vif, &msta->wcid,
NULL, wtbl_hdr);
}
@@ -1057,19 +907,7 @@ mt7615_mcu_wtbl_update_hdr_trans(struct mt7615_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- struct wtbl_req_hdr *wtbl_hdr;
- struct sk_buff *skb = NULL;
-
- wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
- WTBL_SET, NULL, &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, &msta->wcid, NULL,
- wtbl_hdr);
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(WTBL_UPDATE), true);
+ return mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
static const struct mt7615_mcu_ops wtbl_update_ops = {
@@ -1303,7 +1141,8 @@ mt7615_mcu_uni_tx_ba(struct mt7615_dev *dev,
struct mt7615_sta *sta = (struct mt7615_sta *)params->sta->drv_priv;
return mt76_connac_mcu_sta_ba(&dev->mt76, &sta->vif->mt76, params,
- enable, true);
+ MCU_UNI_CMD(STA_REC_UPDATE), enable,
+ true);
}
static int
@@ -1451,20 +1290,6 @@ release_fw:
return ret;
}
-static u32 mt7615_mcu_gen_dl_mode(u8 feature_set, bool is_cr4)
-{
- u32 ret = 0;
-
- ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
- (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
- ret |= FIELD_PREP(DL_MODE_KEY_IDX,
- FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
- ret |= DL_MODE_NEED_RSP;
- ret |= is_cr4 ? DL_MODE_WORKING_PDA_CR4 : 0;
-
- return ret;
-}
-
static int
mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
const struct mt7615_fw_trailer *hdr,
@@ -1475,7 +1300,8 @@ mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
u32 len, addr, mode;
for (i = 0; i < n_region; i++) {
- mode = mt7615_mcu_gen_dl_mode(hdr[i].feature_set, is_cr4);
+ mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
+ hdr[i].feature_set, is_cr4);
len = le32_to_cpu(hdr[i].len) + IMG_CRC_LEN;
addr = le32_to_cpu(hdr[i].addr);
@@ -1723,7 +1549,8 @@ static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
dev_info(dev->mt76.dev, "Parsing tailer Region: %d\n", i);
buf = (const struct mt7663_fw_buf *)(base_addr - shift);
- mode = mt7615_mcu_gen_dl_mode(buf->feature_set, false);
+ mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
+ buf->feature_set, false);
addr = le32_to_cpu(buf->img_dest_addr);
len = le32_to_cpu(buf->img_size);
@@ -2064,27 +1891,6 @@ int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
&req, sizeof(req), true);
}
-int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
- enum mt7615_rdd_cmd cmd, u8 index,
- u8 rx_sel, u8 val)
-{
- struct {
- u8 ctrl;
- u8 rdd_idx;
- u8 rdd_rx_sel;
- u8 val;
- u8 rsv[4];
- } req = {
- .ctrl = cmd,
- .rdd_idx = index,
- .rdd_rx_sel = rx_sel,
- .val = val,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_CTRL),
- &req, sizeof(req), true);
-}
-
int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val)
{
struct {
@@ -2214,7 +2020,7 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
struct mt76_power_limits limits;
s8 *limits_array = (s8 *)&limits;
int n_chains = hweight8(mphy->antenna_mask);
- int tx_power;
+ int tx_power = hw->conf.power_level * 2;
int i;
static const u8 sku_mapping[] = {
#define SKU_FIELD(_type, _field) \
@@ -2271,9 +2077,8 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
#undef SKU_FIELD
};
- tx_power = hw->conf.power_level * 2 -
- mt76_tx_power_nss_delta(n_chains);
-
+ tx_power = mt76_get_sar_power(mphy, mphy->chandef.chan, tx_power);
+ tx_power -= mt76_tx_power_nss_delta(n_chains);
tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
&limits, tx_power);
mphy->txpower_cur = tx_power;
@@ -2346,10 +2151,13 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
.center_chan2 = ieee80211_frequency_to_channel(freq2),
};
- if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+ dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
+ req.switch_reason = CH_SWITCH_NORMAL;
+ else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
- else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
+ NL80211_IFTYPE_AP))
req.switch_reason = CH_SWITCH_DFS;
else
req.switch_reason = CH_SWITCH_NORMAL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index 33f72f3657d0..ce45c3bfc443 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -194,6 +194,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
.token_size = MT7615_TOKEN_SIZE,
.tx_prepare_skb = mt7615_tx_prepare_skb,
.tx_complete_skb = mt7615_tx_complete_skb,
+ .rx_check = mt7615_rx_check,
.rx_skb = mt7615_queue_rx_skb,
.rx_poll_complete = mt7615_rx_poll_complete,
.sta_ps = mt7615_sta_ps,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 6ff6d5800918..2e91f6a27d0f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -403,30 +403,9 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd);
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
const struct ieee80211_tx_queue_params *params);
void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb);
-int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
- enum mt7615_rdd_cmd cmd, u8 index,
- u8 rx_sel, u8 val);
int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev);
int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl);
-static inline bool is_mt7622(struct mt76_dev *dev)
-{
- if (!IS_ENABLED(CONFIG_MT7622_WMAC))
- return false;
-
- return mt76_chip(dev) == 0x7622;
-}
-
-static inline bool is_mt7615(struct mt76_dev *dev)
-{
- return mt76_chip(dev) == 0x7615 || mt76_chip(dev) == 0x7611;
-}
-
-static inline bool is_mt7611(struct mt76_dev *dev)
-{
- return mt76_chip(dev) == 0x7611;
-}
-
static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask)
{
mt76_set_irq_mask(&dev->mt76, 0, 0, mask);
@@ -530,6 +509,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
void mt7615_tx_worker(struct mt76_worker *w);
void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7615_tx_token_put(struct mt7615_dev *dev);
+bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
@@ -579,6 +559,7 @@ void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
struct mt76_queue_entry *e);
int mt7663_usb_sdio_register_device(struct mt7615_dev *dev);
int mt7663u_mcu_init(struct mt7615_dev *dev);
+int mt7663u_mcu_power_on(struct mt7615_dev *dev);
/* sdio */
int mt7663s_mcu_init(struct mt7615_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index 31c4a76b7f91..49ab3a1f3b9b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -56,7 +56,10 @@ static int mt7663s_parse_intr(struct mt76_dev *dev, struct mt76s_intr *intr)
struct mt7663s_intr *irq_data = sdio->intr_data;
int i, err;
+ sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, irq_data, MCR_WHISR, sizeof(*irq_data));
+ sdio_release_host(sdio->func);
+
if (err)
return err;
@@ -98,7 +101,7 @@ static int mt7663s_probe(struct sdio_func *func,
struct ieee80211_ops *ops;
struct mt7615_dev *dev;
struct mt76_dev *mdev;
- int i, ret;
+ int ret;
ops = devm_kmemdup(&func->dev, &mt7615_ops, sizeof(mt7615_ops),
GFP_KERNEL);
@@ -137,16 +140,6 @@ static int mt7663s_probe(struct sdio_func *func,
goto error;
}
- for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) {
- mdev->sdio.xmit_buf[i] = devm_kmalloc(mdev->dev,
- MT76S_XMIT_BUF_SZ,
- GFP_KERNEL);
- if (!mdev->sdio.xmit_buf[i]) {
- ret = -ENOMEM;
- goto error;
- }
- }
-
ret = mt76s_alloc_rx_queue(mdev, MT_RXQ_MAIN);
if (ret)
goto error;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index 0396ad532ba6..967641aebf5f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -17,9 +17,68 @@
static const struct usb_device_id mt7615_device_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7663, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x043e, 0x310c, 0xff, 0xff, 0xff) },
{ },
};
+static u32 mt7663u_rr(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = ___mt76u_rr(dev, MT_VEND_READ_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void mt7663u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | USB_TYPE_VENDOR, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static u32 mt7663u_rmw(struct mt76_dev *dev, u32 addr,
+ u32 mask, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ val |= ___mt76u_rr(dev, MT_VEND_READ_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR, addr) & ~mask;
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | USB_TYPE_VENDOR, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return val;
+}
+
+static void mt7663u_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int ret, i = 0, batch_len;
+ const u8 *val = data;
+
+ len = round_up(len, 4);
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (i < len) {
+ batch_len = min_t(int, usb->data_len, len - i);
+ memcpy(usb->data, val + i, batch_len);
+ ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ (offset + i) >> 16, offset + i,
+ usb->data, batch_len);
+ if (ret < 0)
+ break;
+
+ i += batch_len;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
static void mt7663u_stop(struct ieee80211_hw *hw)
{
struct mt7615_phy *phy = mt7615_hw_phy(hw);
@@ -65,6 +124,14 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
};
+ static struct mt76_bus_ops bus_ops = {
+ .rr = mt7663u_rr,
+ .wr = mt7663u_wr,
+ .rmw = mt7663u_rmw,
+ .read_copy = mt76u_read_copy,
+ .write_copy = mt7663u_copy,
+ .type = MT76_BUS_USB,
+ };
struct usb_device *udev = interface_to_usbdev(usb_intf);
struct ieee80211_ops *ops;
struct mt7615_dev *dev;
@@ -91,7 +158,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
INIT_WORK(&dev->mcu_work, mt7663u_init_work);
dev->reg_map = mt7663_usb_sdio_reg_map;
dev->ops = ops;
- ret = mt76u_init(mdev, usb_intf, true);
+ ret = __mt76u_init(mdev, usb_intf, &bus_ops);
if (ret < 0)
goto error;
@@ -99,27 +166,15 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
- if (mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
- FW_STATE_PWR_ON << 1, 500)) {
- dev_dbg(dev->mt76.dev, "Usb device already powered on\n");
- set_bit(MT76_STATE_POWER_OFF, &dev->mphy.state);
- goto alloc_queues;
- }
-
- ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
- USB_DIR_OUT | USB_TYPE_VENDOR,
- 0x0, 0x1, NULL, 0);
- if (ret)
- goto error;
-
if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
FW_STATE_PWR_ON << 1, 500)) {
- dev_err(dev->mt76.dev, "Timeout for power on\n");
- ret = -EIO;
- goto error;
+ ret = mt7663u_mcu_power_on(dev);
+ if (ret)
+ goto error;
+ } else {
+ set_bit(MT76_STATE_POWER_OFF, &dev->mphy.state);
}
-alloc_queues:
ret = mt76u_alloc_mcu_queue(&dev->mt76);
if (ret)
goto error;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
index 0ebb4c3c336a..98bf2f6ae936 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
@@ -42,6 +42,26 @@ out:
return ret;
}
+int mt7663u_mcu_power_on(struct mt7615_dev *dev)
+{
+ int ret;
+
+ ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x0, 0x1, NULL, 0);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
+ MT_TOP_MISC2_FW_PWR_ON,
+ FW_STATE_PWR_ON << 1, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for power on\n");
+ ret = -EIO;
+ }
+
+ return 0;
+}
+
int mt7663u_mcu_init(struct mt7615_dev *dev)
{
static const struct mt76_mcu_ops mt7663u_mcu_ops = {
@@ -57,23 +77,17 @@ int mt7663u_mcu_init(struct mt7615_dev *dev)
mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {
- mt7615_mcu_restart(&dev->mt76);
+ ret = mt7615_mcu_restart(&dev->mt76);
+ if (ret)
+ return ret;
+
if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
MT_TOP_MISC2_FW_PWR_ON, 0, 500))
return -EIO;
- ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
- USB_DIR_OUT | USB_TYPE_VENDOR,
- 0x0, 0x1, NULL, 0);
+ ret = mt7663u_mcu_power_on(dev);
if (ret)
return ret;
-
- if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
- MT_TOP_MISC2_FW_PWR_ON,
- FW_STATE_PWR_ON << 1, 500)) {
- dev_err(dev->mt76.dev, "Timeout for power on\n");
- return -EIO;
- }
}
ret = __mt7663_load_firmware(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index e7f01c2978a2..400ba514460e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -45,9 +45,11 @@ enum {
};
struct mt76_connac_pm {
- bool enable;
- bool ds_enable;
- bool suspended;
+ bool enable:1;
+ bool enable_user:1;
+ bool ds_enable:1;
+ bool ds_enable_user:1;
+ bool suspended:1;
spinlock_t txq_lock;
struct {
@@ -83,6 +85,11 @@ struct mt76_connac_coredump {
unsigned long last_activity;
};
+struct mt76_connac_sta_key_conf {
+ s8 keyidx;
+ u8 key[16];
+};
+
extern const struct wiphy_wowlan_support mt76_connac_wowlan_support;
static inline bool is_mt7922(struct mt76_dev *dev)
@@ -100,6 +107,69 @@ static inline bool is_mt7663(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7663;
}
+static inline bool is_mt7915(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7915;
+}
+
+static inline bool is_mt7916(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7906;
+}
+
+static inline bool is_mt7986(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7986;
+}
+
+static inline bool is_mt7622(struct mt76_dev *dev)
+{
+ if (!IS_ENABLED(CONFIG_MT7622_WMAC))
+ return false;
+
+ return mt76_chip(dev) == 0x7622;
+}
+
+static inline bool is_mt7615(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7615 || mt76_chip(dev) == 0x7611;
+}
+
+static inline bool is_mt7611(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7611;
+}
+
+static inline bool is_connac_v1(struct mt76_dev *dev)
+{
+ return is_mt7615(dev) || is_mt7663(dev) || is_mt7622(dev);
+}
+
+static inline u8 mt76_connac_chan_bw(struct cfg80211_chan_def *chandef)
+{
+ static const u8 width_to_bw[] = {
+ [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
+ [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
+ [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
+ [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
+ [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
+ [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
+ [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
+ [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
+ };
+
+ if (chandef->width >= ARRAY_SIZE(width_to_bw))
+ return 0;
+
+ return width_to_bw[chandef->width];
+}
+
+static inline u8 mt76_connac_lmac_mapping(u8 ac)
+{
+ /* LMAC uses the reverse order of mac80211 AC indexes */
+ return 3 - ac;
+}
+
int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm);
void mt76_connac_power_save_sched(struct mt76_phy *phy,
struct mt76_connac_pm *pm);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index f79e3d5084f3..7cb17bf40e35 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -62,8 +62,8 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
};
int cmd;
- if (is_mt7921(dev) &&
- (req.addr == cpu_to_le32(MCU_PATCH_ADDRESS) || addr == 0x900000))
+ if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) ||
+ (is_mt7921(dev) && addr == 0x900000))
cmd = MCU_CMD(PATCH_START_REQ);
else
cmd = MCU_CMD(TARGET_ADDRESS_LEN_REQ);
@@ -266,8 +266,8 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_nested_tlv);
struct sk_buff *
-mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
- struct mt76_wcid *wcid)
+__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
+ struct mt76_wcid *wcid, int len)
{
struct sta_req_hdr hdr = {
.bss_idx = mvif->idx,
@@ -278,7 +278,7 @@ mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
&hdr.wlan_idx_hi);
- skb = mt76_mcu_msg_alloc(dev, NULL, MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ skb = mt76_mcu_msg_alloc(dev, NULL, len);
if (!skb)
return ERR_PTR(-ENOMEM);
@@ -286,7 +286,7 @@ mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
return skb;
}
-EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_sta_req);
+EXPORT_SYMBOL_GPL(__mt76_connac_mcu_alloc_sta_req);
struct wtbl_req_hdr *
mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
@@ -310,12 +310,54 @@ mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
if (sta_hdr)
- sta_hdr->len = cpu_to_le16(sizeof(hdr));
+ le16_add_cpu(&sta_hdr->len, sizeof(hdr));
return skb_put_data(nskb, &hdr, sizeof(hdr));
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_wtbl_req);
+void mt76_connac_mcu_bss_omac_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ u8 omac_idx = mvif->omac_idx;
+ struct bss_info_omac *omac;
+ struct tlv *tlv;
+ u32 type = 0;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ if (vif->p2p)
+ type = CONNECTION_P2P_GO;
+ else
+ type = CONNECTION_INFRA_AP;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (vif->p2p)
+ type = CONNECTION_P2P_GC;
+ else
+ type = CONNECTION_INFRA_STA;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = CONNECTION_IBSS_ADHOC;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
+
+ omac = (struct bss_info_omac *)tlv;
+ omac->conn_type = cpu_to_le32(type);
+ omac->omac_idx = mvif->omac_idx;
+ omac->band_idx = mvif->band_idx;
+ omac->hw_bss_idx = omac_idx > EXT_BSSID_START ? HW_BSSID_0 : omac_idx;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_omac_tlv);
+
void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -376,9 +418,8 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_basic_tlv);
-static void
-mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+void mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct sta_rec_uapsd *uapsd;
struct tlv *tlv;
@@ -407,6 +448,7 @@ mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
}
uapsd->max_sp = sta->max_sp;
}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_uapsd);
void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
@@ -420,13 +462,17 @@ void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
sizeof(*htr),
wtbl_tlv, sta_wtbl);
htr = (struct wtbl_hdr_trans *)tlv;
- htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
+ htr->no_rx_trans = true;
if (vif->type == NL80211_IFTYPE_STATION)
htr->to_ds = true;
else
htr->from_ds = true;
+ if (!wcid)
+ return;
+
+ htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) {
htr->to_ds = true;
htr->from_ds = true;
@@ -461,6 +507,25 @@ int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_update_hdr_trans);
+int mt76_connac_mcu_wtbl_update_hdr_trans(struct mt76_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct sk_buff *skb = NULL;
+
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid, WTBL_SET, NULL,
+ &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, NULL, wtbl_hdr);
+
+ return mt76_mcu_skb_send_msg(dev, skb, MCU_EXT_CMD(WTBL_UPDATE), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_update_hdr_trans);
+
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
struct sk_buff *skb,
struct ieee80211_vif *vif,
@@ -488,8 +553,7 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
generic->muar_idx = mvif->omac_idx;
generic->qos = sta->wme;
} else {
- if (is_mt7921(dev) &&
- vif->type == NL80211_IFTYPE_STATION)
+ if (!is_connac_v1(dev) && vif->type == NL80211_IFTYPE_STATION)
memcpy(generic->peer_addr, vif->bss_conf.bssid,
ETH_ALEN);
else
@@ -506,7 +570,7 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
rx->rca2 = 1;
rx->rv = 1;
- if (is_mt7921(dev))
+ if (!is_connac_v1(dev))
return;
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SPE, sizeof(*spe),
@@ -819,9 +883,9 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_tlv);
-static void
-mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- void *sta_wtbl, void *wtbl_tlv)
+void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ void *sta_wtbl, void *wtbl_tlv)
{
struct wtbl_smps *smps;
struct tlv *tlv;
@@ -829,30 +893,39 @@ mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
wtbl_tlv, sta_wtbl);
smps = (struct wtbl_smps *)tlv;
-
- if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
- smps->smps = true;
+ smps->smps = (sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_smps_tlv);
void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_sta *sta, void *sta_wtbl,
- void *wtbl_tlv)
+ void *wtbl_tlv, bool ht_ldpc, bool vht_ldpc)
{
struct wtbl_ht *ht = NULL;
struct tlv *tlv;
u32 flags = 0;
- if (sta->ht_cap.ht_supported) {
+ if (sta->ht_cap.ht_supported || sta->he_6ghz_capa.capa) {
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
wtbl_tlv, sta_wtbl);
ht = (struct wtbl_ht *)tlv;
- ht->ldpc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
- ht->af = sta->ht_cap.ampdu_factor;
- ht->mm = sta->ht_cap.ampdu_density;
+ ht->ldpc = ht_ldpc &&
+ !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
+
+ if (sta->ht_cap.ht_supported) {
+ ht->af = sta->ht_cap.ampdu_factor;
+ ht->mm = sta->ht_cap.ampdu_density;
+ } else {
+ ht->af = le16_get_bits(sta->he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+ ht->mm = le16_get_bits(sta->he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
+ }
+
ht->ht = true;
}
- if (sta->vht_cap.vht_supported) {
+ if (sta->vht_cap.vht_supported || sta->he_6ghz_capa.capa) {
struct wtbl_vht *vht;
u8 af;
@@ -860,7 +933,8 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
sizeof(*vht), wtbl_tlv,
sta_wtbl);
vht = (struct wtbl_vht *)tlv;
- vht->ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
+ vht->ldpc = vht_ldpc &&
+ !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
vht->vht = true;
af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
@@ -871,7 +945,7 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv);
- if (!is_mt7921(dev) && sta->ht_cap.ht_supported) {
+ if (is_connac_v1(dev) && sta->ht_cap.ht_supported) {
/* sgi */
u32 msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
@@ -939,7 +1013,8 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
sta_wtbl, wtbl_hdr);
if (info->sta)
mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta,
- sta_wtbl, wtbl_hdr);
+ sta_wtbl, wtbl_hdr,
+ true, true);
}
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
@@ -973,13 +1048,13 @@ void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
ba->rst_ba_sb = 1;
}
- if (is_mt7921(dev)) {
+ if (!is_connac_v1(dev)) {
ba->ba_winsize = enable ? cpu_to_le16(params->buf_size) : 0;
return;
}
if (enable && tx) {
- u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
+ static const u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
int i;
for (i = 7; i > 0; i--) {
@@ -1106,7 +1181,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv);
int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
struct ieee80211_ampdu_params *params,
- bool enable, bool tx)
+ int cmd, bool enable, bool tx)
{
struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
struct wtbl_req_hdr *wtbl_hdr;
@@ -1129,8 +1204,7 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_wtbl_ba_tlv(dev, skb, params, enable, tx, sta_wtbl,
wtbl_hdr);
- ret = mt76_mcu_skb_send_msg(dev, skb,
- MCU_UNI_CMD(STA_REC_UPDATE), true);
+ ret = mt76_mcu_skb_send_msg(dev, skb, cmd, true);
if (ret)
return ret;
@@ -1140,15 +1214,12 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx);
- return mt76_mcu_skb_send_msg(dev, skb,
- MCU_UNI_CMD(STA_REC_UPDATE), true);
+ return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba);
-static u8
-mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
- enum nl80211_band band,
- struct ieee80211_sta *sta)
+u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ enum nl80211_band band, struct ieee80211_sta *sta)
{
struct mt76_dev *dev = phy->dev;
const struct ieee80211_sta_he_cap *he_cap;
@@ -1156,7 +1227,7 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta_ht_cap *ht_cap;
u8 mode = 0;
- if (!is_mt7921(dev))
+ if (is_connac_v1(dev))
return 0x38;
if (sta) {
@@ -1180,7 +1251,7 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
if (he_cap && he_cap->has_he)
mode |= PHY_MODE_AX_24G;
- } else if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) {
+ } else if (band == NL80211_BAND_5GHZ) {
mode |= PHY_MODE_A;
if (ht_cap->ht_supported)
@@ -1189,14 +1260,18 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
if (vht_cap->vht_supported)
mode |= PHY_MODE_AC;
- if (he_cap && he_cap->has_he && band == NL80211_BAND_5GHZ)
+ if (he_cap && he_cap->has_he)
mode |= PHY_MODE_AX_5G;
+ } else if (band == NL80211_BAND_6GHZ) {
+ mode |= PHY_MODE_A | PHY_MODE_AN |
+ PHY_MODE_AC | PHY_MODE_AX_5G;
}
return mode;
}
+EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode);
-static const struct ieee80211_sta_he_cap *
+const struct ieee80211_sta_he_cap *
mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
{
enum nl80211_band band = phy->chandef.chan->band;
@@ -1206,6 +1281,7 @@ mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
return ieee80211_get_he_iftype_cap(sband, vif->type);
}
+EXPORT_SYMBOL_GPL(mt76_connac_get_he_phy_cap);
#define DEFAULT_HE_PE_DURATION 4
#define DEFAULT_HE_DURATION_RTS_THRES 1023
@@ -1438,7 +1514,6 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
struct ieee80211_channel **scan_list = sreq->channels;
struct mt76_dev *mdev = phy->dev;
- bool ext_phy = phy == mdev->phy2;
struct mt76_connac_mcu_scan_channel *chan;
struct mt76_connac_hw_scan_req *req;
struct sk_buff *skb;
@@ -1452,7 +1527,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
req = (struct mt76_connac_hw_scan_req *)skb_put(skb, sizeof(*req));
- req->seq_num = mvif->scan_seq_num | ext_phy << 7;
+ req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
req->bss_idx = mvif->idx;
req->scan_type = sreq->n_ssids ? 1 : 0;
req->probe_req_num = sreq->n_ssids ? 2 : 0;
@@ -1560,7 +1635,6 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
struct mt76_connac_mcu_scan_channel *chan;
struct mt76_connac_sched_scan_req *req;
struct mt76_dev *mdev = phy->dev;
- bool ext_phy = phy == mdev->phy2;
struct cfg80211_match_set *match;
struct cfg80211_ssid *ssid;
struct sk_buff *skb;
@@ -1574,7 +1648,7 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
req = (struct mt76_connac_sched_scan_req *)skb_put(skb, sizeof(*req));
req->version = 1;
- req->seq_num = mvif->scan_seq_num | ext_phy << 7;
+ req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
u8 *addr = is_mt7663(phy->dev) ? req->mt7663.random_mac
@@ -2482,5 +2556,257 @@ void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val)
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_reg_wr);
+static int
+mt76_connac_mcu_sta_key_tlv(struct mt76_connac_sta_key_conf *sta_key_conf,
+ struct sk_buff *skb,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd)
+{
+ struct sta_rec_sec *sec;
+ u32 len = sizeof(*sec);
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
+ sec = (struct sta_rec_sec *)tlv;
+ sec->add = cmd;
+
+ if (cmd == SET_KEY) {
+ struct sec_key *sec_key;
+ u8 cipher;
+
+ cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ if (cipher == MCU_CIPHER_NONE)
+ return -EOPNOTSUPP;
+
+ sec_key = &sec->key[0];
+ sec_key->cipher_len = sizeof(*sec_key);
+
+ if (cipher == MCU_CIPHER_BIP_CMAC_128) {
+ sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
+ sec_key->key_id = sta_key_conf->keyidx;
+ sec_key->key_len = 16;
+ memcpy(sec_key->key, sta_key_conf->key, 16);
+
+ sec_key = &sec->key[1];
+ sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
+ sec_key->cipher_len = sizeof(*sec_key);
+ sec_key->key_len = 16;
+ memcpy(sec_key->key, key->key, 16);
+ sec->n_cipher = 2;
+ } else {
+ sec_key->cipher_id = cipher;
+ sec_key->key_id = key->keyidx;
+ sec_key->key_len = key->keylen;
+ memcpy(sec_key->key, key->key, key->keylen);
+
+ if (cipher == MCU_CIPHER_TKIP) {
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(sec_key->key + 16, key->key + 24, 8);
+ memcpy(sec_key->key + 24, key->key + 16, 8);
+ }
+
+ /* store key_conf for BIP batch update */
+ if (cipher == MCU_CIPHER_AES_CCMP) {
+ memcpy(sta_key_conf->key, key->key, key->keylen);
+ sta_key_conf->keyidx = key->keyidx;
+ }
+
+ len -= sizeof(*sec_key);
+ sec->n_cipher = 1;
+ }
+ } else {
+ len -= sizeof(sec->key);
+ sec->n_cipher = 0;
+ }
+ sec->len = cpu_to_le16(len);
+
+ return 0;
+}
+
+int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct mt76_connac_sta_key_conf *sta_key_conf,
+ struct ieee80211_key_conf *key, int mcu_cmd,
+ struct mt76_wcid *wcid, enum set_key_cmd cmd)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = mt76_connac_mcu_sta_key_tlv(sta_key_conf, skb, key, cmd);
+ if (ret)
+ return ret;
+
+ return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_key);
+
+/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
+#define BCN_TX_ESTIMATE_TIME (4096 + 20)
+void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif *mvif)
+{
+ struct bss_info_ext_bss *ext;
+ int ext_bss_idx, tsf_offset;
+ struct tlv *tlv;
+
+ ext_bss_idx = mvif->omac_idx - EXT_BSSID_START;
+ if (ext_bss_idx < 0)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
+
+ ext = (struct bss_info_ext_bss *)tlv;
+ tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
+ ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_ext_tlv);
+
+int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct mt76_phy *phy, u16 wlan_idx,
+ bool enable)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ u32 type = vif->p2p ? NETWORK_P2P : NETWORK_INFRA;
+ struct bss_info_basic *bss;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
+ bss = (struct bss_info_basic *)tlv;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_MONITOR:
+ break;
+ case NL80211_IFTYPE_AP:
+ if (ieee80211_hw_check(phy->hw, SUPPORTS_MULTI_BSSID)) {
+ u8 bssid_id = vif->bss_conf.bssid_indicator;
+ struct wiphy *wiphy = phy->hw->wiphy;
+
+ if (bssid_id > ilog2(wiphy->mbssid_max_interfaces))
+ return -EINVAL;
+
+ bss->non_tx_bssid = vif->bss_conf.bssid_index;
+ bss->max_bssid = bssid_id;
+ }
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (enable) {
+ rcu_read_lock();
+ if (!sta)
+ sta = ieee80211_find_sta(vif,
+ vif->bss_conf.bssid);
+ /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
+ if (sta) {
+ struct mt76_wcid *wcid;
+
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+ wlan_idx = wcid->idx;
+ }
+ rcu_read_unlock();
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = NETWORK_IBSS;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ bss->network_type = cpu_to_le32(type);
+ bss->bmc_wcid_lo = to_wcid_lo(wlan_idx);
+ bss->bmc_wcid_hi = to_wcid_hi(wlan_idx);
+ bss->wmm_idx = mvif->wmm_idx;
+ bss->active = enable;
+ bss->cipher = mvif->cipher;
+
+ if (vif->type != NL80211_IFTYPE_MONITOR) {
+ struct cfg80211_chan_def *chandef = &phy->chandef;
+
+ memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ bss->dtim_period = vif->bss_conf.dtim_period;
+ bss->phy_mode = mt76_connac_get_phy_mode(phy, vif,
+ chandef->chan->band, NULL);
+ } else {
+ memcpy(bss->bssid, phy->macaddr, ETH_ALEN);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_basic_tlv);
+
+#define ENTER_PM_STATE 1
+#define EXIT_PM_STATE 2
+int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter)
+{
+ struct {
+ u8 pm_number;
+ u8 pm_state;
+ u8 bssid[ETH_ALEN];
+ u8 dtim_period;
+ u8 wlan_idx_lo;
+ __le16 bcn_interval;
+ __le32 aid;
+ __le32 rx_filter;
+ u8 band_idx;
+ u8 wlan_idx_hi;
+ u8 rsv[2];
+ __le32 feature;
+ u8 omac_idx;
+ u8 wmm_idx;
+ u8 bcn_loss_cnt;
+ u8 bcn_sp_duration;
+ } __packed req = {
+ .pm_number = 5,
+ .pm_state = enter ? ENTER_PM_STATE : EXIT_PM_STATE,
+ .band_idx = band,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_EXT_CMD(PM_STATE_CTRL), &req,
+ sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_pm);
+
+int mt76_connac_mcu_restart(struct mt76_dev *dev)
+{
+ struct {
+ u8 power_mode;
+ u8 rsv[3];
+ } req = {
+ .power_mode = 1,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_CMD(NIC_POWER_CTRL), &req,
+ sizeof(req), false);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_restart);
+
+int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
+ u8 rx_sel, u8 val)
+{
+ struct {
+ u8 ctrl;
+ u8 rdd_idx;
+ u8 rdd_rx_sel;
+ u8 val;
+ u8 rsv[4];
+ } __packed req = {
+ .ctrl = cmd,
+ .rdd_idx = index,
+ .rdd_rx_sel = rx_sel,
+ .val = val,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_EXT_CMD(SET_RDD_CTRL), &req,
+ sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_rdd_cmd);
+
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 5baf8370b7bd..c3c93338d56a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -6,6 +6,26 @@
#include "mt76_connac.h"
+#define FW_FEATURE_SET_ENCRYPT BIT(0)
+#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
+#define FW_FEATURE_ENCRY_MODE BIT(4)
+#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
+
+#define DL_MODE_ENCRYPT BIT(0)
+#define DL_MODE_KEY_IDX GENMASK(2, 1)
+#define DL_MODE_RESET_SEC_IV BIT(3)
+#define DL_MODE_WORKING_PDA_CR4 BIT(4)
+#define DL_MODE_VALID_RAM_ENTRY BIT(5)
+#define DL_CONFIG_ENCRY_MODE_SEL BIT(6)
+#define DL_MODE_NEED_RSP BIT(31)
+
+#define FW_START_OVERRIDE BIT(0)
+#define FW_START_WORKING_PDA_CR4 BIT(2)
+
+#define PATCH_SEC_NOT_SUPPORT GENMASK(31, 0)
+#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
+#define PATCH_SEC_TYPE_INFO 0x2
+
struct tlv {
__le16 tag;
__le16 len;
@@ -570,6 +590,7 @@ struct wtbl_raw {
sizeof(struct sta_rec_muru) + \
sizeof(struct sta_rec_bfee) + \
sizeof(struct sta_rec_ra) + \
+ sizeof(struct sta_rec_sec) + \
sizeof(struct sta_rec_ra_fixed) + \
sizeof(struct sta_rec_he_6g_capa) + \
sizeof(struct tlv) + \
@@ -956,6 +977,7 @@ enum {
MCU_EXT_CMD_SCS_CTRL = 0x82,
MCU_EXT_CMD_TWT_AGRT_UPDATE = 0x94,
MCU_EXT_CMD_FW_DBG_CTRL = 0x95,
+ MCU_EXT_CMD_OFFCH_SCAN_CTRL = 0x9a,
MCU_EXT_CMD_SET_RDD_TH = 0x9d,
MCU_EXT_CMD_MURU_CTRL = 0x9f,
MCU_EXT_CMD_SET_SPR = 0xa8,
@@ -971,6 +993,7 @@ enum {
MCU_UNI_CMD_SUSPEND = 0x05,
MCU_UNI_CMD_OFFLOAD = 0x06,
MCU_UNI_CMD_HIF_CTRL = 0x07,
+ MCU_UNI_CMD_SNIFFER = 0x24,
};
enum {
@@ -996,7 +1019,8 @@ enum {
MCU_CE_CMD_SET_BSS_CONNECTED = 0x16,
MCU_CE_CMD_SET_BSS_ABORT = 0x17,
MCU_CE_CMD_CANCEL_HW_SCAN = 0x1b,
- MCU_CE_CMD_SET_ROC = 0x1d,
+ MCU_CE_CMD_SET_ROC = 0x1c,
+ MCU_CE_CMD_SET_EDCA_PARMS = 0x1d,
MCU_CE_CMD_SET_P2P_OPPPS = 0x33,
MCU_CE_CMD_SET_RATE_TX_POWER = 0x5d,
MCU_CE_CMD_SCHED_SCAN_ENABLE = 0x61,
@@ -1427,6 +1451,51 @@ struct mt76_connac_config {
u8 data[320];
} __packed;
+static inline enum mcu_cipher_type
+mt76_connac_mcu_get_cipher(int cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MCU_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MCU_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MCU_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ return MCU_CIPHER_BIP_CMAC_128;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MCU_CIPHER_AES_CCMP;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return MCU_CIPHER_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return MCU_CIPHER_GCMP;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return MCU_CIPHER_GCMP_256;
+ case WLAN_CIPHER_SUITE_SMS4:
+ return MCU_CIPHER_WAPI;
+ default:
+ return MCU_CIPHER_NONE;
+ }
+}
+
+static inline u32
+mt76_connac_mcu_gen_dl_mode(struct mt76_dev *dev, u8 feature_set, bool is_wa)
+{
+ u32 ret = 0;
+
+ ret |= feature_set & FW_FEATURE_SET_ENCRYPT ?
+ DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV : 0;
+ if (is_mt7921(dev))
+ ret |= feature_set & FW_FEATURE_ENCRY_MODE ?
+ DL_CONFIG_ENCRY_MODE_SEL : 0;
+ ret |= FIELD_PREP(DL_MODE_KEY_IDX,
+ FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
+ ret |= DL_MODE_NEED_RSP;
+ ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0;
+
+ return ret;
+}
+
#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
@@ -1436,7 +1505,7 @@ mt76_connac_mcu_get_wlan_idx(struct mt76_dev *dev, struct mt76_wcid *wcid,
{
*wlan_idx_hi = 0;
- if (is_mt7921(dev)) {
+ if (!is_connac_v1(dev)) {
*wlan_idx_lo = wcid ? to_wcid_lo(wcid->idx) : 0;
*wlan_idx_hi = wcid ? to_wcid_hi(wcid->idx) : 0;
} else {
@@ -1445,8 +1514,16 @@ mt76_connac_mcu_get_wlan_idx(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
struct sk_buff *
+__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
+ struct mt76_wcid *wcid, int len);
+static inline struct sk_buff *
mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
- struct mt76_wcid *wcid);
+ struct mt76_wcid *wcid)
+{
+ return __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid,
+ MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+}
+
struct wtbl_req_hdr *
mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
int cmd, void *sta_wtbl, struct sk_buff **skb);
@@ -1476,13 +1553,16 @@ void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid, int cmd);
+int mt76_connac_mcu_wtbl_update_hdr_trans(struct mt76_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
struct ieee80211_sta *sta,
struct ieee80211_vif *vif,
u8 rcpi, u8 state);
void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_sta *sta, void *sta_wtbl,
- void *wtbl_tlv);
+ void *wtbl_tlv, bool ht_ldpc, bool vht_ldpc);
void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_ampdu_params *params,
bool enable, bool tx, void *sta_wtbl,
@@ -1496,7 +1576,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
bool enable);
int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
struct ieee80211_ampdu_params *params,
- bool enable, bool tx);
+ int cmd, bool enable, bool tx);
int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid,
@@ -1546,4 +1626,32 @@ int mt76_connac_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
u32 mt76_connac_mcu_reg_rr(struct mt76_dev *dev, u32 offset);
void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
+
+const struct ieee80211_sta_he_cap *
+mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
+u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ enum nl80211_band band, struct ieee80211_sta *sta);
+
+int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct mt76_connac_sta_key_conf *sta_key_conf,
+ struct ieee80211_key_conf *key, int mcu_cmd,
+ struct mt76_wcid *wcid, enum set_key_cmd cmd);
+
+void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif *mvif);
+void mt76_connac_mcu_bss_omac_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif);
+int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct mt76_phy *phy, u16 wlan_idx,
+ bool enable);
+void mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ void *sta_wtbl, void *wtbl_tlv);
+int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter);
+int mt76_connac_mcu_restart(struct mt76_dev *dev);
+int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
+ u8 rx_sel, u8 val);
#endif /* __MT76_CONNAC_MCU_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 436daf6d6d86..0422c332354a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -245,7 +245,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
usb_set_intfdata(usb_intf, dev);
mt76x02u_init_mcu(mdev);
- ret = mt76u_init(mdev, usb_intf, false);
+ ret = mt76u_init(mdev, usb_intf);
if (ret)
goto err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 44d1a92d9a90..f76fd22ee035 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -103,7 +103,8 @@ struct mt76x02_dev {
u8 tbtt_count;
u32 tx_hang_reset;
- u8 tx_hang_check;
+ u8 tx_hang_check[4];
+ u8 beacon_hang_check;
u8 mcu_timeout;
struct mt76x02_calibration cal;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index a601350531cd..024a5c0a5a57 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -823,10 +823,7 @@ EXPORT_SYMBOL_GPL(mt76x02_phy_dfs_adjust_agc);
void mt76x02_dfs_init_params(struct mt76x02_dev *dev)
{
- struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
-
- if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- dev->mt76.region != NL80211_DFS_UNSET) {
+ if (mt76_phy_dfs_state(&dev->mphy) > MT_DFS_STATE_DISABLED) {
mt76x02_dfs_init_sw_detector(dev);
mt76x02_dfs_set_bbp_params(dev);
/* enable debug mode */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index a404fd7ea968..2afad8c76ca6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -860,9 +860,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
status->chain_signal[1] = mt76x02_mac_get_rssi(dev,
rxwi->rssi[1],
1);
- signal = max_t(s8, signal, status->chain_signal[1]);
}
- status->signal = signal;
status->freq = dev->mphy.chandef.chan->center_freq;
status->band = dev->mphy.chandef.chan->band;
@@ -1040,12 +1038,26 @@ EXPORT_SYMBOL_GPL(mt76x02_update_channel);
static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
{
- u32 val = mt76_rr(dev, 0x10f4);
+ if (dev->mt76.beacon_mask) {
+ if (mt76_rr(dev, MT_TX_STA_0) & MT_TX_STA_0_BEACONS) {
+ dev->beacon_hang_check = 0;
+ return;
+ }
- if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
- return;
+ if (++dev->beacon_hang_check < 10)
+ return;
+
+ dev->beacon_hang_check = 0;
+ } else {
+ u32 val = mt76_rr(dev, 0x10f4);
+ if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+ return;
+ }
+
+ dev_err(dev->mt76.dev, "MAC error detected\n");
- dev_err(dev->mt76.dev, "mac specific condition occurred\n");
+ mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+ mt76x02_wait_for_txrx_idle(&dev->mt76);
mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
udelay(10);
@@ -1178,8 +1190,7 @@ void mt76x02_mac_work(struct work_struct *work)
dev->mt76.aggr_stats[idx++] += val >> 16;
}
- if (!dev->mt76.beacon_mask)
- mt76x02_check_mac_err(dev);
+ mt76x02_check_mac_err(dev);
if (dev->ed_monitor)
mt76x02_edcca_check(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index ec0de691129a..8bcd8afa0d3a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -348,18 +348,20 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
for (i = 0; i < 4; i++) {
q = dev->mphy.q_tx[i];
- if (!q->queued)
- continue;
-
prev_dma_idx = dev->mt76.tx_dma_idx[i];
dma_idx = readl(&q->regs->dma_idx);
dev->mt76.tx_dma_idx[i] = dma_idx;
- if (prev_dma_idx == dma_idx)
- break;
+ if (!q->queued || prev_dma_idx != dma_idx) {
+ dev->tx_hang_check[i] = 0;
+ continue;
+ }
+
+ if (++dev->tx_hang_check[i] >= MT_TX_HANG_TH)
+ return true;
}
- return i < 4;
+ return false;
}
static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -530,23 +532,13 @@ static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
if (test_bit(MT76_RESTART, &dev->mphy.state))
return;
- if (mt76x02_tx_hang(dev)) {
- if (++dev->tx_hang_check >= MT_TX_HANG_TH)
- goto restart;
- } else {
- dev->tx_hang_check = 0;
- }
-
- if (dev->mcu_timeout)
- goto restart;
-
- return;
+ if (!mt76x02_tx_hang(dev) && !dev->mcu_timeout)
+ return;
-restart:
mt76x02_watchdog_reset(dev);
dev->tx_hang_reset++;
- dev->tx_hang_check = 0;
+ memset(dev->tx_hang_check, 0, sizeof(dev->tx_hang_check));
memset(dev->mt76.tx_dma_idx, 0xff,
sizeof(dev->mt76.tx_dma_idx));
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
index fa7872ac22bf..fe0c5e3298bc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -571,6 +571,8 @@
#define MT_RX_STAT_2_OVERFLOW_ERRORS GENMASK(31, 16)
#define MT_TX_STA_0 0x170c
+#define MT_TX_STA_0_BEACONS GENMASK(31, 16)
+
#define MT_TX_STA_1 0x1710
#define MT_TX_STA_2 0x1714
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index 2575369e44e2..55068f3252ef 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -57,7 +57,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
usb_set_intfdata(intf, dev);
mt76x02u_init_mcu(mdev);
- err = mt76u_init(mdev, intf, false);
+ err = mt76u_init(mdev, intf);
if (err < 0)
goto err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
index d98225da694c..f21282cea845 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
@@ -1,9 +1,10 @@
# SPDX-License-Identifier: ISC
config MT7915E
tristate "MediaTek MT7915E (PCIe) support"
- select MT76_CORE
+ select MT76_CONNAC_LIB
depends on MAC80211
depends on PCI
+ select RELAY
help
This adds support for MT7915-based wireless PCIe devices,
which support concurrent dual-band operation at both 5GHz
@@ -11,3 +12,13 @@ config MT7915E
OFDMA, spatial reuse and dual carrier modulation.
To compile this driver as a module, choose M here.
+
+config MT7986_WMAC
+ bool "MT7986 (SoC) WMAC support"
+ depends on MT7915E
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select REGMAP
+ help
+ This adds support for the built-in WMAC on MT7986 SoC device
+ which has the same feature set as a MT7915, but enables 6E
+ support.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
index 80e49244348e..b794ceb79c37 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
@@ -6,3 +6,4 @@ mt7915e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \
debugfs.o mmio.o
mt7915e-$(CONFIG_NL80211_TESTMODE) += testmode.o
+mt7915e-$(CONFIG_MT7986_WMAC) += soc.o \ No newline at end of file
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index e96d1c31dd36..4e1ecaec8f4f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -1,9 +1,13 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc. */
+#include <linux/relay.h>
#include "mt7915.h"
#include "eeprom.h"
#include "mcu.h"
+#include "mac.h"
+
+#define FW_BIN_LOG_MAGIC 0x44e98caf
/** global debugfs **/
@@ -75,7 +79,11 @@ mt7915_radar_trigger(void *data, u64 val)
{
struct mt7915_dev *dev = data;
- return mt7915_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE, 1, 0, 0);
+ if (val > MT_RX_SEL2)
+ return -EINVAL;
+
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_RADAR_EMULATE,
+ val, 0, 0);
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL,
@@ -301,6 +309,53 @@ exit:
DEFINE_SHOW_ATTRIBUTE(mt7915_muru_stats);
static int
+mt7915_rdd_monitor(struct seq_file *s, void *data)
+{
+ struct mt7915_dev *dev = dev_get_drvdata(s->private);
+ struct cfg80211_chan_def *chandef = &dev->rdd2_chandef;
+ const char *bw;
+ int ret = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (!cfg80211_chandef_valid(chandef)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!dev->rdd2_phy) {
+ seq_puts(s, "not running\n");
+ goto out;
+ }
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ bw = "40";
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bw = "80";
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ bw = "160";
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ bw = "80P80";
+ break;
+ default:
+ bw = "20";
+ break;
+ }
+
+ seq_printf(s, "channel %d (%d MHz) width %s MHz center1: %d MHz\n",
+ chandef->chan->hw_value, chandef->chan->center_freq,
+ bw, chandef->center_freq1);
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static int
mt7915_fw_debug_wm_set(void *data, u64 val)
{
struct mt7915_dev *dev = data;
@@ -311,16 +366,31 @@ mt7915_fw_debug_wm_set(void *data, u64 val)
DEBUG_SPL,
DEBUG_RPT_RX,
} debug;
+ bool tx, rx, en;
int ret;
dev->fw_debug_wm = val ? MCU_FW_LOG_TO_HOST : 0;
- ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, dev->fw_debug_wm);
+ if (dev->fw_debug_bin)
+ val = 16;
+ else
+ val = dev->fw_debug_wm;
+
+ tx = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(1));
+ rx = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(2));
+ en = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(0));
+
+ ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, val);
if (ret)
return ret;
for (debug = DEBUG_TXCMD; debug <= DEBUG_RPT_RX; debug++) {
- ret = mt7915_mcu_fw_dbg_ctrl(dev, debug, !!dev->fw_debug_wm);
+ if (debug == DEBUG_RPT_RX)
+ val = en && rx;
+ else
+ val = en && tx;
+
+ ret = mt7915_mcu_fw_dbg_ctrl(dev, debug, val);
if (ret)
return ret;
}
@@ -376,6 +446,65 @@ mt7915_fw_debug_wa_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_wa, mt7915_fw_debug_wa_get,
mt7915_fw_debug_wa_set, "%lld\n");
+static struct dentry *
+create_buf_file_cb(const char *filename, struct dentry *parent, umode_t mode,
+ struct rchan_buf *buf, int *is_global)
+{
+ struct dentry *f;
+
+ f = debugfs_create_file("fwlog_data", mode, parent, buf,
+ &relay_file_operations);
+ if (IS_ERR(f))
+ return NULL;
+
+ *is_global = 1;
+
+ return f;
+}
+
+static int
+remove_buf_file_cb(struct dentry *f)
+{
+ debugfs_remove(f);
+
+ return 0;
+}
+
+static int
+mt7915_fw_debug_bin_set(void *data, u64 val)
+{
+ static struct rchan_callbacks relay_cb = {
+ .create_buf_file = create_buf_file_cb,
+ .remove_buf_file = remove_buf_file_cb,
+ };
+ struct mt7915_dev *dev = data;
+
+ if (!dev->relay_fwlog)
+ dev->relay_fwlog = relay_open("fwlog_data", dev->debugfs_dir,
+ 1500, 512, &relay_cb, NULL);
+ if (!dev->relay_fwlog)
+ return -ENOMEM;
+
+ dev->fw_debug_bin = val;
+
+ relay_reset(dev->relay_fwlog);
+
+ return mt7915_fw_debug_wm_set(dev, dev->fw_debug_wm);
+}
+
+static int
+mt7915_fw_debug_bin_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+
+ *val = dev->fw_debug_bin;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_bin, mt7915_fw_debug_bin_get,
+ mt7915_fw_debug_bin_set, "%lld\n");
+
static int
mt7915_fw_util_wm_show(struct seq_file *file, void *data)
{
@@ -419,12 +548,12 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
/* Tx ampdu stat */
for (i = 0; i < ARRAY_SIZE(range); i++)
- range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i));
+ range[i] = mt76_rr(dev, MT_MIB_ARNG(phy->band_idx, i));
for (i = 0; i < ARRAY_SIZE(bound); i++)
bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
- seq_printf(file, "\nPhy %d\n", ext_phy);
+ seq_printf(file, "\nPhy %d, Phy band %d\n", ext_phy, phy->band_idx);
seq_printf(file, "Length: %8d | ", bound[0]);
for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
@@ -432,7 +561,7 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
bound[i] + 1, bound[i + 1]);
seq_puts(file, "\nCount: ");
- n = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ n = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < ARRAY_SIZE(bound); i++)
seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i + n]);
seq_puts(file, "\n");
@@ -521,14 +650,14 @@ mt7915_tx_stats_show(struct seq_file *file, void *data)
DEFINE_SHOW_ATTRIBUTE(mt7915_tx_stats);
static void
-mt7915_hw_queue_read(struct seq_file *s, u32 base, u32 size,
+mt7915_hw_queue_read(struct seq_file *s, u32 size,
const struct hw_queue_map *map)
{
struct mt7915_phy *phy = s->private;
struct mt7915_dev *dev = phy->dev;
u32 i, val;
- val = mt76_rr(dev, base + MT_FL_Q_EMPTY);
+ val = mt76_rr(dev, MT_FL_Q_EMPTY);
for (i = 0; i < size; i++) {
u32 ctrl, head, tail, queued;
@@ -536,13 +665,13 @@ mt7915_hw_queue_read(struct seq_file *s, u32 base, u32 size,
continue;
ctrl = BIT(31) | (map[i].pid << 10) | (map[i].qid << 24);
- mt76_wr(dev, base + MT_FL_Q0_CTRL, ctrl);
+ mt76_wr(dev, MT_FL_Q0_CTRL, ctrl);
- head = mt76_get_field(dev, base + MT_FL_Q2_CTRL,
+ head = mt76_get_field(dev, MT_FL_Q2_CTRL,
GENMASK(11, 0));
- tail = mt76_get_field(dev, base + MT_FL_Q2_CTRL,
+ tail = mt76_get_field(dev, MT_FL_Q2_CTRL,
GENMASK(27, 16));
- queued = mt76_get_field(dev, base + MT_FL_Q3_CTRL,
+ queued = mt76_get_field(dev, MT_FL_Q3_CTRL,
GENMASK(11, 0));
seq_printf(s, "\t%s: ", map[i].name);
@@ -570,8 +699,8 @@ mt7915_sta_hw_queue_read(void *data, struct ieee80211_sta *sta)
if (val & BIT(offs))
continue;
- mt76_wr(dev, MT_PLE_BASE + MT_FL_Q0_CTRL, ctrl | msta->wcid.idx);
- qlen = mt76_get_field(dev, MT_PLE_BASE + MT_FL_Q3_CTRL,
+ mt76_wr(dev, MT_FL_Q0_CTRL, ctrl | msta->wcid.idx);
+ qlen = mt76_get_field(dev, MT_FL_Q3_CTRL,
GENMASK(11, 0));
seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n",
sta->addr, msta->wcid.idx,
@@ -633,7 +762,7 @@ mt7915_hw_queues_show(struct seq_file *file, void *data)
val, head, tail);
seq_puts(file, "PLE non-empty queue info:\n");
- mt7915_hw_queue_read(file, MT_PLE_BASE, ARRAY_SIZE(ple_queue_map),
+ mt7915_hw_queue_read(file, ARRAY_SIZE(ple_queue_map),
&ple_queue_map[0]);
/* iterate per-sta ple queue */
@@ -641,7 +770,7 @@ mt7915_hw_queues_show(struct seq_file *file, void *data)
mt7915_sta_hw_queue_read, file);
/* pse queue */
seq_puts(file, "PSE non-empty queue info:\n");
- mt7915_hw_queue_read(file, MT_PSE_BASE, ARRAY_SIZE(pse_queue_map),
+ mt7915_hw_queue_read(file, ARRAY_SIZE(pse_queue_map),
&pse_queue_map[0]);
return 0;
@@ -757,6 +886,7 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
debugfs_create_file("tx_stats", 0400, dir, phy, &mt7915_tx_stats_fops);
debugfs_create_file("fw_debug_wm", 0600, dir, dev, &fops_fw_debug_wm);
debugfs_create_file("fw_debug_wa", 0600, dir, dev, &fops_fw_debug_wa);
+ debugfs_create_file("fw_debug_bin", 0600, dir, dev, &fops_fw_debug_bin);
debugfs_create_file("fw_util_wm", 0400, dir, dev,
&mt7915_fw_util_wm_fops);
debugfs_create_file("fw_util_wa", 0400, dir, dev,
@@ -768,16 +898,77 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
debugfs_create_devm_seqfile(dev->mt76.dev, "twt_stats", dir,
mt7915_twt_stats);
debugfs_create_file("ser_trigger", 0200, dir, dev, &fops_ser_trigger);
- if (!dev->dbdc_support || ext_phy) {
+ if (!dev->dbdc_support || phy->band_idx) {
debugfs_create_u32("dfs_hw_pattern", 0400, dir,
&dev->hw_pattern);
debugfs_create_file("radar_trigger", 0200, dir, dev,
&fops_radar_trigger);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "rdd_monitor", dir,
+ mt7915_rdd_monitor);
}
+ if (!ext_phy)
+ dev->debugfs_dir = dir;
+
return 0;
}
+static void
+mt7915_debugfs_write_fwlog(struct mt7915_dev *dev, const void *hdr, int hdrlen,
+ const void *data, int len)
+{
+ static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
+ void *dest;
+
+ spin_lock_irqsave(&lock, flags);
+ dest = relay_reserve(dev->relay_fwlog, hdrlen + len + 4);
+ if (dest) {
+ *(u32 *)dest = hdrlen + len;
+ dest += 4;
+
+ if (hdrlen) {
+ memcpy(dest, hdr, hdrlen);
+ dest += hdrlen;
+ }
+
+ memcpy(dest, data, len);
+ relay_flush(dev->relay_fwlog);
+ }
+ spin_unlock_irqrestore(&lock, flags);
+}
+
+void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int len)
+{
+ struct {
+ __le32 magic;
+ __le32 timestamp;
+ __le16 msg_type;
+ __le16 len;
+ } hdr = {
+ .magic = cpu_to_le32(FW_BIN_LOG_MAGIC),
+ .msg_type = cpu_to_le16(PKT_TYPE_RX_FW_MONITOR),
+ };
+
+ if (!dev->relay_fwlog)
+ return;
+
+ hdr.timestamp = cpu_to_le32(mt76_rr(dev, MT_LPON_FRCR(0)));
+ hdr.len = *(__le16 *)data;
+ mt7915_debugfs_write_fwlog(dev, &hdr, sizeof(hdr), data, len);
+}
+
+bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len)
+{
+ if (get_unaligned_le32(data) != FW_BIN_LOG_MAGIC)
+ return false;
+
+ if (dev->relay_fwlog)
+ mt7915_debugfs_write_fwlog(dev, NULL, 0, data, len);
+
+ return true;
+}
+
#ifdef CONFIG_MAC80211_DEBUGFS
/** per-station debugfs **/
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index 9182568f95c7..49b4d8ade16b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -5,11 +5,11 @@
#include "../dma.h"
#include "mac.h"
-int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc)
+int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
{
int i, err;
- err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
+ err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base);
if (err < 0)
return err;
@@ -40,140 +40,392 @@ static int mt7915_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
+static void mt7915_dma_config(struct mt7915_dev *dev)
+{
+#define Q_CONFIG(q, wfdma, int, id) do { \
+ if (wfdma) \
+ dev->wfdma_mask |= (1 << (q)); \
+ dev->q_int_mask[(q)] = int; \
+ dev->q_id[(q)] = id; \
+ } while (0)
+
+#define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id))
+#define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
+#define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
+
+ if (is_mt7915(&dev->mt76)) {
+ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7915_RXQ_BAND0);
+ RXQ_CONFIG(MT_RXQ_MCU, WFDMA1, MT_INT_RX_DONE_WM, MT7915_RXQ_MCU_WM);
+ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA1, MT_INT_RX_DONE_WA, MT7915_RXQ_MCU_WA);
+ RXQ_CONFIG(MT_RXQ_EXT, WFDMA0, MT_INT_RX_DONE_BAND1, MT7915_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_EXT_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT, MT7915_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA1, MT_INT_RX_DONE_WA_MAIN, MT7915_RXQ_MCU_WA);
+ TXQ_CONFIG(0, WFDMA1, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA1, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA1, MT_INT_TX_DONE_MCU_WM, MT7915_TXQ_MCU_WM);
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA1, MT_INT_TX_DONE_MCU_WA, MT7915_TXQ_MCU_WA);
+ MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA1, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
+ } else {
+ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916, MT7916_RXQ_BAND0);
+ RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7916_RXQ_MCU_WM);
+ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7916_RXQ_MCU_WA);
+ RXQ_CONFIG(MT_RXQ_EXT, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916, MT7916_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_EXT_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916, MT7916_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916, MT7916_RXQ_MCU_WA_MAIN);
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7915_TXQ_MCU_WM);
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA_MT7916, MT7915_TXQ_MCU_WA);
+ MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
+ }
+}
+
static void __mt7915_dma_prefetch(struct mt7915_dev *dev, u32 ofs)
{
-#define PREFETCH(base, depth) ((base) << 16 | (depth))
-
- mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL + ofs, PREFETCH(0x0, 0x4));
- mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL + ofs, PREFETCH(0x40, 0x4));
- mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL + ofs, PREFETCH(0x80, 0x0));
-
- mt76_wr(dev, MT_WFDMA1_TX_RING0_EXT_CTRL + ofs, PREFETCH(0x80, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING1_EXT_CTRL + ofs, PREFETCH(0xc0, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING2_EXT_CTRL + ofs, PREFETCH(0x100, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING3_EXT_CTRL + ofs, PREFETCH(0x140, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING4_EXT_CTRL + ofs, PREFETCH(0x180, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING5_EXT_CTRL + ofs, PREFETCH(0x1c0, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING6_EXT_CTRL + ofs, PREFETCH(0x200, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING7_EXT_CTRL + ofs, PREFETCH(0x240, 0x4));
-
- mt76_wr(dev, MT_WFDMA1_TX_RING16_EXT_CTRL + ofs, PREFETCH(0x280, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING17_EXT_CTRL + ofs, PREFETCH(0x2c0, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING18_EXT_CTRL + ofs, PREFETCH(0x300, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING19_EXT_CTRL + ofs, PREFETCH(0x340, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING20_EXT_CTRL + ofs, PREFETCH(0x380, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING21_EXT_CTRL + ofs, PREFETCH(0x3c0, 0x0));
-
- mt76_wr(dev, MT_WFDMA1_RX_RING0_EXT_CTRL + ofs, PREFETCH(0x3c0, 0x4));
- mt76_wr(dev, MT_WFDMA1_RX_RING1_EXT_CTRL + ofs, PREFETCH(0x400, 0x4));
- mt76_wr(dev, MT_WFDMA1_RX_RING2_EXT_CTRL + ofs, PREFETCH(0x440, 0x4));
- mt76_wr(dev, MT_WFDMA1_RX_RING3_EXT_CTRL + ofs, PREFETCH(0x480, 0x0));
+#define PREFETCH(_base, _depth) ((_base) << 16 | (_depth))
+ u32 base = 0;
+
+ /* prefetch SRAM wrapping boundary for tx/rx ring. */
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x4));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x40, 0x4));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x80, 0x4));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0xc0, 0x4));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x100, 0x4));
+
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x140, 0x4));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x180, 0x4));
+ if (!is_mt7915(&dev->mt76)) {
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x1c0, 0x4));
+ base = 0x40;
+ }
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT_WA) + ofs, PREFETCH(0x1c0 + base, 0x4));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x200 + base, 0x4));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT) + ofs, PREFETCH(0x240 + base, 0x4));
+
+ /* for mt7915, the ring which is next the last
+ * used ring must be initialized.
+ */
+ if (is_mt7915(&dev->mt76)) {
+ ofs += 0x4;
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x140, 0x0));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT_WA) + ofs, PREFETCH(0x200 + base, 0x0));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT) + ofs, PREFETCH(0x280 + base, 0x0));
+ }
}
void mt7915_dma_prefetch(struct mt7915_dev *dev)
{
__mt7915_dma_prefetch(dev, 0);
if (dev->hif2)
- __mt7915_dma_prefetch(dev, MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE);
+ __mt7915_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0));
}
-int mt7915_dma_init(struct mt7915_dev *dev)
+static void mt7915_dma_disable(struct mt7915_dev *dev, bool rst)
{
+ struct mt76_dev *mdev = &dev->mt76;
u32 hif1_ofs = 0;
- int ret;
-
- mt76_dma_attach(&dev->mt76);
if (dev->hif2)
- hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ /* reset */
+ if (rst) {
+ mt76_clear(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ if (is_mt7915(mdev)) {
+ mt76_clear(dev, MT_WFDMA1_RST,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA1_RST,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+ }
+
+ if (dev->hif2) {
+ mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST + hif1_ofs,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ if (is_mt7915(mdev)) {
+ mt76_clear(dev, MT_WFDMA1_RST + hif1_ofs,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA1_RST + hif1_ofs,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+ }
+ }
+ }
+
+ /* disable */
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (dev->hif2) {
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2);
+ }
+}
- /* configure global setting */
- mt76_set(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
+static int mt7915_dma_enable(struct mt7915_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ u32 hif1_ofs = 0;
+ u32 irq_mask;
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
/* reset dma idx */
mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
- mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0);
+ if (is_mt7915(mdev))
+ mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0);
+ if (dev->hif2) {
+ mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
+ if (is_mt7915(mdev))
+ mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0);
+ }
- /* configure delay interrupt */
+ /* configure delay interrupt off */
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
- mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
+ if (is_mt7915(mdev)) {
+ mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
+ } else {
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0);
+ }
if (dev->hif2) {
- mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
+ if (is_mt7915(mdev)) {
+ mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 +
+ hif1_ofs, 0);
+ } else {
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 +
+ hif1_ofs, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 +
+ hif1_ofs, 0);
+ }
+ }
+
+ /* configure perfetch settings */
+ mt7915_dma_prefetch(dev);
+
+ /* hif wait WFDMA idle */
+ mt76_set(dev, MT_WFDMA0_BUSY_ENA,
+ MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_BUSY_ENA_RX_FIFO);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_BUSY_ENA,
+ MT_WFDMA1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA1_BUSY_ENA_RX_FIFO);
+
+ if (dev->hif2) {
+ mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs,
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_BUSY_ENA + hif1_ofs,
+ MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO);
+ }
+
+ mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
+ MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
+
+ /* set WFDMA Tx/Rx */
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
- mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
- mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0);
+ if (dev->hif2) {
+ mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
- mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
- mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_PDMA_BAND);
}
- /* configure perfetch settings */
- mt7915_dma_prefetch(dev);
+ /* enable interrupts for TX/RX rings */
+ irq_mask = MT_INT_RX_DONE_MCU |
+ MT_INT_TX_DONE_MCU |
+ MT_INT_MCU_CMD;
+
+ if (!dev->phy.band_idx)
+ irq_mask |= MT_INT_BAND0_RX_DONE;
+
+ if (dev->dbdc_support || dev->phy.band_idx)
+ irq_mask |= MT_INT_BAND1_RX_DONE;
+
+ mt7915_irq_enable(dev, irq_mask);
+
+ return 0;
+}
+
+int mt7915_dma_init(struct mt7915_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ u32 hif1_ofs = 0;
+ int ret;
+
+ mt7915_dma_config(dev);
+
+ mt76_dma_attach(&dev->mt76);
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ mt7915_dma_disable(dev, true);
/* init tx queue */
- ret = mt7915_init_tx_queues(&dev->phy, MT7915_TXQ_BAND0,
- MT7915_TX_RING_SIZE);
+ ret = mt7915_init_tx_queues(&dev->phy,
+ MT_TXQ_ID(dev->phy.band_idx),
+ MT7915_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(0));
if (ret)
return ret;
/* command to WM */
- ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7915_TXQ_MCU_WM,
- MT7915_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
+ MT_MCUQ_ID(MT_MCUQ_WM),
+ MT7915_TX_MCU_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_WM));
if (ret)
return ret;
/* command to WA */
- ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, MT7915_TXQ_MCU_WA,
- MT7915_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
+ MT_MCUQ_ID(MT_MCUQ_WA),
+ MT7915_TX_MCU_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_WA));
if (ret)
return ret;
/* firmware download */
- ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7915_TXQ_FWDL,
- MT7915_TX_FWDL_RING_SIZE, MT_TX_RING_BASE);
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
+ MT_MCUQ_ID(MT_MCUQ_FWDL),
+ MT7915_TX_FWDL_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_FWDL));
if (ret)
return ret;
/* event from WM */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
- MT7915_RXQ_MCU_WM, MT7915_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
+ MT_RXQ_ID(MT_RXQ_MCU),
+ MT7915_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MCU));
if (ret)
return ret;
/* event from WA */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
- MT7915_RXQ_MCU_WA, MT7915_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
+ MT_RXQ_ID(MT_RXQ_MCU_WA),
+ MT7915_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
if (ret)
return ret;
- /* rx data queue */
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
- MT7915_RXQ_BAND0, MT7915_RX_RING_SIZE,
- MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE);
- if (ret)
- return ret;
+ /* rx data queue for band0 */
+ if (!dev->phy.band_idx) {
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
+ MT_RXQ_ID(MT_RXQ_MAIN),
+ MT7915_RX_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MAIN));
+ if (ret)
+ return ret;
+ }
- if (dev->dbdc_support) {
+ /* tx free notify event from WA for band0 */
+ if (!is_mt7915(mdev)) {
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
+ MT_RXQ_ID(MT_RXQ_MAIN_WA),
+ MT7915_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
+ if (ret)
+ return ret;
+ }
+
+ if (dev->dbdc_support || dev->phy.band_idx) {
+ /* rx data queue for band1 */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT],
- MT7915_RXQ_BAND1, MT7915_RX_RING_SIZE,
+ MT_RXQ_ID(MT_RXQ_EXT),
+ MT7915_RX_RING_SIZE,
MT_RX_BUF_SIZE,
- MT_RX_DATA_RING_BASE + hif1_ofs);
+ MT_RXQ_RING_BASE(MT_RXQ_EXT) + hif1_ofs);
if (ret)
return ret;
- /* event from WA */
+ /* tx free notify event from WA for band1 */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT_WA],
- MT7915_RXQ_MCU_WA_EXT,
+ MT_RXQ_ID(MT_RXQ_EXT_WA),
MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
- MT_RX_EVENT_RING_BASE + hif1_ofs);
+ MT_RXQ_RING_BASE(MT_RXQ_EXT_WA) + hif1_ofs);
if (ret)
return ret;
}
@@ -186,80 +438,14 @@ int mt7915_dma_init(struct mt7915_dev *dev)
mt7915_poll_tx, NAPI_POLL_WEIGHT);
napi_enable(&dev->mt76.tx_napi);
- /* hif wait WFDMA idle */
- mt76_set(dev, MT_WFDMA0_BUSY_ENA,
- MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
- MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
- MT_WFDMA0_BUSY_ENA_RX_FIFO);
-
- mt76_set(dev, MT_WFDMA1_BUSY_ENA,
- MT_WFDMA1_BUSY_ENA_TX_FIFO0 |
- MT_WFDMA1_BUSY_ENA_TX_FIFO1 |
- MT_WFDMA1_BUSY_ENA_RX_FIFO);
-
- mt76_set(dev, MT_WFDMA0_PCIE1_BUSY_ENA,
- MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
- MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
- MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
-
- mt76_set(dev, MT_WFDMA1_PCIE1_BUSY_ENA,
- MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 |
- MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 |
- MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO);
-
- mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
- MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
-
- /* set WFDMA Tx/Rx */
- mt76_set(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- mt76_set(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
-
- if (dev->hif2) {
- mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
- (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN));
- mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
- (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
- MT_WFDMA1_GLO_CFG_RX_DMA_EN));
- mt76_set(dev, MT_WFDMA_HOST_CONFIG,
- MT_WFDMA_HOST_CONFIG_PDMA_BAND);
- }
-
- /* enable interrupts for TX/RX rings */
- mt7915_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_MCU |
- MT_INT_MCU_CMD);
+ mt7915_dma_enable(dev);
return 0;
}
void mt7915_dma_cleanup(struct mt7915_dev *dev)
{
- /* disable */
- mt76_clear(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- mt76_clear(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN |
- MT_WFDMA1_GLO_CFG_RX_DMA_EN);
-
- /* reset */
- mt76_clear(dev, MT_WFDMA1_RST,
- MT_WFDMA1_RST_DMASHDL_ALL_RST |
- MT_WFDMA1_RST_LOGIC_RST);
-
- mt76_set(dev, MT_WFDMA1_RST,
- MT_WFDMA1_RST_DMASHDL_ALL_RST |
- MT_WFDMA1_RST_LOGIC_RST);
-
- mt76_clear(dev, MT_WFDMA0_RST,
- MT_WFDMA0_RST_DMASHDL_ALL_RST |
- MT_WFDMA0_RST_LOGIC_RST);
-
- mt76_set(dev, MT_WFDMA0_RST,
- MT_WFDMA0_RST_DMASHDL_ALL_RST |
- MT_WFDMA0_RST_LOGIC_RST);
+ mt7915_dma_disable(dev, true);
mt76_dma_cleanup(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index edd74d0de157..5b133bcdab17 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -10,6 +10,7 @@ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
struct mt76_dev *mdev = &dev->mt76;
u8 *eeprom = mdev->eeprom.data;
u32 val = eeprom[MT_EE_DO_PRE_CAL];
+ u32 offs;
if (!dev->flash_mode)
return 0;
@@ -22,7 +23,9 @@ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
if (!dev->cal)
return -ENOMEM;
- return mt76_get_of_eeprom(mdev, dev->cal, MT_EE_PRECAL, val);
+ offs = is_mt7915(&dev->mt76) ? MT_EE_PRECAL : MT_EE_PRECAL_V2;
+
+ return mt76_get_of_eeprom(mdev, dev->cal, offs, val);
}
static int mt7915_check_eeprom(struct mt7915_dev *dev)
@@ -32,24 +35,49 @@ static int mt7915_check_eeprom(struct mt7915_dev *dev)
switch (val) {
case 0x7915:
+ case 0x7916:
+ case 0x7986:
return 0;
default:
return -EINVAL;
}
}
+static char *mt7915_eeprom_name(struct mt7915_dev *dev)
+{
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7915:
+ return dev->dbdc_support ?
+ MT7915_EEPROM_DEFAULT_DBDC : MT7915_EEPROM_DEFAULT;
+ case 0x7986:
+ switch (mt7915_check_adie(dev, true)) {
+ case MT7976_ONE_ADIE_DBDC:
+ return MT7986_EEPROM_MT7976_DEFAULT_DBDC;
+ case MT7975_ONE_ADIE:
+ return MT7986_EEPROM_MT7975_DEFAULT;
+ case MT7976_ONE_ADIE:
+ return MT7986_EEPROM_MT7976_DEFAULT;
+ case MT7975_DUAL_ADIE:
+ return MT7986_EEPROM_MT7975_DUAL_DEFAULT;
+ case MT7976_DUAL_ADIE:
+ return MT7986_EEPROM_MT7976_DUAL_DEFAULT;
+ default:
+ break;
+ }
+ return NULL;
+ default:
+ return MT7916_EEPROM_DEFAULT;
+ }
+}
+
static int
mt7915_eeprom_load_default(struct mt7915_dev *dev)
{
- char *default_bin = MT7915_EEPROM_DEFAULT;
u8 *eeprom = dev->mt76.eeprom.data;
const struct firmware *fw = NULL;
int ret;
- if (dev->dbdc_support)
- default_bin = MT7915_EEPROM_DEFAULT_DBDC;
-
- ret = request_firmware(&fw, default_bin, dev->mt76.dev);
+ ret = request_firmware(&fw, mt7915_eeprom_name(dev), dev->mt76.dev);
if (ret)
return ret;
@@ -59,7 +87,7 @@ mt7915_eeprom_load_default(struct mt7915_dev *dev)
goto out;
}
- memcpy(eeprom, fw->data, MT7915_EEPROM_SIZE);
+ memcpy(eeprom, fw->data, mt7915_eeprom_size(dev));
dev->flash_mode = true;
out:
@@ -71,8 +99,9 @@ out:
static int mt7915_eeprom_load(struct mt7915_dev *dev)
{
int ret;
+ u16 eeprom_size = mt7915_eeprom_size(dev);
- ret = mt76_eeprom_init(&dev->mt76, MT7915_EEPROM_SIZE);
+ ret = mt76_eeprom_init(&dev->mt76, eeprom_size);
if (ret < 0)
return ret;
@@ -88,7 +117,7 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
return -EINVAL;
/* read eeprom data from efuse */
- block_num = DIV_ROUND_UP(MT7915_EEPROM_SIZE,
+ block_num = DIV_ROUND_UP(eeprom_size,
MT7915_EEPROM_BLOCK_SIZE);
for (i = 0; i < block_num; i++)
mt7915_mcu_get_eeprom(dev,
@@ -98,17 +127,32 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
return mt7915_check_eeprom(dev);
}
-void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
+static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
u8 *eeprom = dev->mt76.eeprom.data;
u32 val;
- val = eeprom[MT_EE_WIFI_CONF + ext_phy];
+ val = eeprom[MT_EE_WIFI_CONF + phy->band_idx];
val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
- if (val == MT_EE_BAND_SEL_DEFAULT && dev->dbdc_support)
- val = ext_phy ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ;
+
+ if (!is_mt7915(&dev->mt76)) {
+ switch (val) {
+ case MT_EE_V2_BAND_SEL_5GHZ:
+ phy->mt76->cap.has_5ghz = true;
+ return;
+ case MT_EE_V2_BAND_SEL_6GHZ:
+ phy->mt76->cap.has_6ghz = true;
+ return;
+ case MT_EE_V2_BAND_SEL_5GHZ_6GHZ:
+ phy->mt76->cap.has_5ghz = true;
+ phy->mt76->cap.has_6ghz = true;
+ return;
+ default:
+ phy->mt76->cap.has_2ghz = true;
+ return;
+ }
+ }
switch (val) {
case MT_EE_BAND_SEL_5GHZ:
@@ -124,32 +168,65 @@ void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
}
}
-static void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev)
+void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
+ struct mt7915_phy *phy)
{
- u8 nss, nss_band, *eeprom = dev->mt76.eeprom.data;
+ u8 nss, nss_band, nss_band_max, *eeprom = dev->mt76.eeprom.data;
+ struct mt76_phy *mphy = phy->mt76;
+ bool ext_phy = phy != &dev->phy;
+
+ mt7915_eeprom_parse_band_config(phy);
- mt7915_eeprom_parse_band_config(&dev->phy);
+ /* read tx/rx mask from eeprom */
+ if (is_mt7915(&dev->mt76)) {
+ nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
+ eeprom[MT_EE_WIFI_CONF]);
+ } else {
+ nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
+ eeprom[MT_EE_WIFI_CONF + phy->band_idx]);
+ }
- /* read tx mask from eeprom */
- nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH, eeprom[MT_EE_WIFI_CONF]);
if (!nss || nss > 4)
nss = 4;
+ /* read tx/rx stream */
nss_band = nss;
if (dev->dbdc_support) {
- nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
- eeprom[MT_EE_WIFI_CONF + 3]);
- if (!nss_band || nss_band > 2)
- nss_band = 2;
+ if (is_mt7915(&dev->mt76)) {
+ nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
+ eeprom[MT_EE_WIFI_CONF + 3]);
+ if (phy->band_idx)
+ nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B1,
+ eeprom[MT_EE_WIFI_CONF + 3]);
+ } else {
+ nss_band = FIELD_GET(MT_EE_WIFI_CONF_STREAM_NUM,
+ eeprom[MT_EE_WIFI_CONF + 2 + phy->band_idx]);
+ }
+
+ nss_band_max = is_mt7986(&dev->mt76) ?
+ MT_EE_NSS_MAX_DBDC_MA7986 : MT_EE_NSS_MAX_DBDC_MA7915;
+ } else {
+ nss_band_max = is_mt7986(&dev->mt76) ?
+ MT_EE_NSS_MAX_MA7986 : MT_EE_NSS_MAX_MA7915;
+ }
- if (nss_band >= nss)
- nss = 4;
+ if (!nss_band || nss_band > nss_band_max)
+ nss_band = nss_band_max;
+
+ if (nss_band > nss) {
+ dev_warn(dev->mt76.dev,
+ "nss mismatch, nss(%d) nss_band(%d) band(%d) ext_phy(%d)\n",
+ nss, nss_band, phy->band_idx, ext_phy);
+ nss = nss_band;
}
- dev->chainmask = BIT(nss) - 1;
- dev->mphy.antenna_mask = BIT(nss_band) - 1;
- dev->mphy.chainmask = dev->mphy.antenna_mask;
+ mphy->chainmask = BIT(nss) - 1;
+ if (ext_phy)
+ mphy->chainmask <<= dev->chainshift;
+ mphy->antenna_mask = BIT(nss_band) - 1;
+ dev->chainmask |= mphy->chainmask;
+ dev->chainshift = hweight8(dev->mphy.chainmask);
}
int mt7915_eeprom_init(struct mt7915_dev *dev)
@@ -171,7 +248,7 @@ int mt7915_eeprom_init(struct mt7915_dev *dev)
if (ret)
return ret;
- mt7915_eeprom_parse_hw_cap(dev);
+ mt7915_eeprom_parse_hw_cap(dev, &dev->phy);
memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
@@ -186,27 +263,43 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
{
u8 *eeprom = dev->mt76.eeprom.data;
int index, target_power;
- bool tssi_on;
+ bool tssi_on, is_7976;
if (chain_idx > 3)
return -EINVAL;
tssi_on = mt7915_tssi_enabled(dev, chan->band);
+ is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
if (chan->band == NL80211_BAND_2GHZ) {
- index = MT_EE_TX0_POWER_2G + chain_idx * 3;
- target_power = eeprom[index];
-
- if (!tssi_on)
- target_power += eeprom[index + 1];
+ if (is_7976) {
+ index = MT_EE_TX0_POWER_2G_V2 + chain_idx;
+ target_power = eeprom[index];
+ } else {
+ index = MT_EE_TX0_POWER_2G + chain_idx * 3;
+ target_power = eeprom[index];
+
+ if (!tssi_on)
+ target_power += eeprom[index + 1];
+ }
+ } else if (chan->band == NL80211_BAND_5GHZ) {
+ int group = mt7915_get_channel_group_5g(chan->hw_value, is_7976);
+
+ if (is_7976) {
+ index = MT_EE_TX0_POWER_5G_V2 + chain_idx * 5;
+ target_power = eeprom[index + group];
+ } else {
+ index = MT_EE_TX0_POWER_5G + chain_idx * 12;
+ target_power = eeprom[index + group];
+
+ if (!tssi_on)
+ target_power += eeprom[index + 8];
+ }
} else {
- int group = mt7915_get_channel_group(chan->hw_value);
-
- index = MT_EE_TX0_POWER_5G + chain_idx * 12;
- target_power = eeprom[index + group];
+ int group = mt7915_get_channel_group_6g(chan->hw_value);
- if (!tssi_on)
- target_power += eeprom[index + 8];
+ index = MT_EE_TX0_POWER_6G_V2 + chain_idx * 8;
+ target_power = is_7976 ? eeprom[index + group] : 0;
}
return target_power;
@@ -215,15 +308,20 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band)
{
u8 *eeprom = dev->mt76.eeprom.data;
- u32 val;
+ u32 val, offs;
s8 delta;
+ bool is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
if (band == NL80211_BAND_2GHZ)
- val = eeprom[MT_EE_RATE_DELTA_2G];
+ offs = is_7976 ? MT_EE_RATE_DELTA_2G_V2 : MT_EE_RATE_DELTA_2G;
+ else if (band == NL80211_BAND_5GHZ)
+ offs = is_7976 ? MT_EE_RATE_DELTA_5G_V2 : MT_EE_RATE_DELTA_5G;
else
- val = eeprom[MT_EE_RATE_DELTA_5G];
+ offs = is_7976 ? MT_EE_RATE_DELTA_6G_V2 : 0;
+
+ val = eeprom[offs];
- if (!(val & MT_EE_RATE_DELTA_EN))
+ if (!offs || !(val & MT_EE_RATE_DELTA_EN))
return 0;
delta = FIELD_GET(MT_EE_RATE_DELTA_MASK, val);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
index a43389a41800..7578ac6d0be6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -23,11 +23,19 @@ enum mt7915_eeprom_field {
MT_EE_RATE_DELTA_5G = 0x29d,
MT_EE_TX0_POWER_2G = 0x2fc,
MT_EE_TX0_POWER_5G = 0x34b,
+ MT_EE_RATE_DELTA_2G_V2 = 0x7d3,
+ MT_EE_RATE_DELTA_5G_V2 = 0x81e,
+ MT_EE_RATE_DELTA_6G_V2 = 0x884, /* 6g fields only appear in eeprom v2 */
+ MT_EE_TX0_POWER_2G_V2 = 0x441,
+ MT_EE_TX0_POWER_5G_V2 = 0x445,
+ MT_EE_TX0_POWER_6G_V2 = 0x465,
MT_EE_ADIE_FT_VERSION = 0x9a0,
__MT_EE_MAX = 0xe00,
+ __MT_EE_MAX_V2 = 0x1000,
/* 0xe10 ~ 0x5780 used to save group cal data */
- MT_EE_PRECAL = 0xe10
+ MT_EE_PRECAL = 0xe10,
+ MT_EE_PRECAL_V2 = 0x1010
};
#define MT_EE_WIFI_CAL_GROUP BIT(0)
@@ -39,6 +47,7 @@ enum mt7915_eeprom_field {
#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
#define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6)
#define MT_EE_WIFI_CONF1_BAND_SEL GENMASK(7, 6)
+#define MT_EE_WIFI_CONF_STREAM_NUM GENMASK(7, 5)
#define MT_EE_WIFI_CONF3_TX_PATH_B0 GENMASK(1, 0)
#define MT_EE_WIFI_CONF3_TX_PATH_B1 GENMASK(5, 4)
#define MT_EE_WIFI_CONF7_TSSI0_2G BIT(0)
@@ -49,6 +58,19 @@ enum mt7915_eeprom_field {
#define MT_EE_RATE_DELTA_SIGN BIT(6)
#define MT_EE_RATE_DELTA_EN BIT(7)
+#define MT_EE_NSS_MAX_MA7915 4
+#define MT_EE_NSS_MAX_DBDC_MA7915 2
+#define MT_EE_NSS_MAX_MA7986 4
+#define MT_EE_NSS_MAX_DBDC_MA7986 4
+
+enum mt7915_adie_sku {
+ MT7976_ONE_ADIE_DBDC = 0x7,
+ MT7975_ONE_ADIE = 0x8,
+ MT7976_ONE_ADIE = 0xa,
+ MT7975_DUAL_ADIE = 0xd,
+ MT7976_DUAL_ADIE = 0xf,
+};
+
enum mt7915_eeprom_band {
MT_EE_BAND_SEL_DEFAULT,
MT_EE_BAND_SEL_5GHZ,
@@ -56,6 +78,13 @@ enum mt7915_eeprom_band {
MT_EE_BAND_SEL_DUAL,
};
+enum {
+ MT_EE_V2_BAND_SEL_2GHZ,
+ MT_EE_V2_BAND_SEL_5GHZ,
+ MT_EE_V2_BAND_SEL_6GHZ,
+ MT_EE_V2_BAND_SEL_5GHZ_6GHZ,
+};
+
enum mt7915_sku_rate_group {
SKU_CCK,
SKU_OFDM,
@@ -76,8 +105,20 @@ enum mt7915_sku_rate_group {
};
static inline int
-mt7915_get_channel_group(int channel)
+mt7915_get_channel_group_5g(int channel, bool is_7976)
{
+ if (is_7976) {
+ if (channel <= 64)
+ return 0;
+ if (channel <= 96)
+ return 1;
+ if (channel <= 128)
+ return 2;
+ if (channel <= 144)
+ return 3;
+ return 4;
+ }
+
if (channel >= 184 && channel <= 196)
return 0;
if (channel <= 48)
@@ -95,6 +136,15 @@ mt7915_get_channel_group(int channel)
return 7;
}
+static inline int
+mt7915_get_channel_group_6g(int channel)
+{
+ if (channel <= 29)
+ return 0;
+
+ return DIV_ROUND_UP(channel - 29, 32);
+}
+
static inline bool
mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index d054cdecd5f7..6d29366c5139 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -50,15 +50,22 @@ static ssize_t mt7915_thermal_temp_show(struct device *dev,
int i = to_sensor_dev_attr(attr)->index;
int temperature;
- if (i)
- return sprintf(buf, "%u\n", phy->throttle_temp[i - 1] * 1000);
-
- temperature = mt7915_mcu_get_temperature(phy);
- if (temperature < 0)
- return temperature;
-
- /* display in millidegree celcius */
- return sprintf(buf, "%u\n", temperature * 1000);
+ switch (i) {
+ case 0:
+ temperature = mt7915_mcu_get_temperature(phy);
+ if (temperature < 0)
+ return temperature;
+ /* display in millidegree celcius */
+ return sprintf(buf, "%u\n", temperature * 1000);
+ case 1:
+ case 2:
+ return sprintf(buf, "%u\n",
+ phy->throttle_temp[i - 1] * 1000);
+ case 3:
+ return sprintf(buf, "%hhu\n", phy->throttle_state);
+ default:
+ return -EINVAL;
+ }
}
static ssize_t mt7915_thermal_temp_store(struct device *dev,
@@ -84,11 +91,13 @@ static ssize_t mt7915_thermal_temp_store(struct device *dev,
static SENSOR_DEVICE_ATTR_RO(temp1_input, mt7915_thermal_temp, 0);
static SENSOR_DEVICE_ATTR_RW(temp1_crit, mt7915_thermal_temp, 1);
static SENSOR_DEVICE_ATTR_RW(temp1_max, mt7915_thermal_temp, 2);
+static SENSOR_DEVICE_ATTR_RO(throttle1, mt7915_thermal_temp, 3);
static struct attribute *mt7915_hwmon_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_throttle1.dev_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(mt7915_hwmon);
@@ -97,7 +106,7 @@ static int
mt7915_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
- *state = MT7915_THERMAL_THROTTLE_MAX;
+ *state = MT7915_CDEV_THROTTLE_MAX;
return 0;
}
@@ -108,7 +117,7 @@ mt7915_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
{
struct mt7915_phy *phy = cdev->devdata;
- *state = phy->throttle_state;
+ *state = phy->cdev_state;
return 0;
}
@@ -118,22 +127,27 @@ mt7915_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct mt7915_phy *phy = cdev->devdata;
+ u8 throttling = MT7915_THERMAL_THROTTLE_MAX - state;
int ret;
- if (state > MT7915_THERMAL_THROTTLE_MAX)
+ if (state > MT7915_CDEV_THROTTLE_MAX)
return -EINVAL;
if (phy->throttle_temp[0] > phy->throttle_temp[1])
return 0;
- if (state == phy->throttle_state)
+ if (state == phy->cdev_state)
return 0;
- ret = mt7915_mcu_set_thermal_throttling(phy, state);
+ /*
+ * cooling_device convention: 0 = no cooling, more = more cooling
+ * mcu convention: 1 = max cooling, more = less cooling
+ */
+ ret = mt7915_mcu_set_thermal_throttling(phy, throttling);
if (ret)
return ret;
- phy->throttle_state = state;
+ phy->cdev_state = state;
return 0;
}
@@ -186,7 +200,8 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
phy->throttle_temp[0] = 110;
phy->throttle_temp[1] = 120;
- return 0;
+ return mt7915_mcu_set_thermal_throttling(phy,
+ MT7915_THERMAL_THROTTLE_MAX);
}
static void mt7915_led_set_config(struct led_classdev *led_cdev,
@@ -288,17 +303,18 @@ mt7915_regd_notifier(struct wiphy *wiphy,
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt76_phy *mphy = hw->priv;
struct mt7915_phy *phy = mphy->priv;
- struct cfg80211_chan_def *chandef = &mphy->chandef;
memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
+ if (dev->mt76.region == NL80211_DFS_UNSET)
+ mt7915_mcu_rdd_background_enable(phy, NULL);
+
mt7915_init_txpower(dev, &mphy->sband_2g.sband);
mt7915_init_txpower(dev, &mphy->sband_5g.sband);
+ mt7915_init_txpower(dev, &mphy->sband_6g.sband);
- if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
- return;
-
+ mphy->dfs_state = MT_DFS_STATE_UNKNOWN;
mt7915_dfs_init_radar_detector(phy);
}
@@ -306,11 +322,13 @@ static void
mt7915_init_wiphy(struct ieee80211_hw *hw)
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt76_dev *mdev = &phy->dev->mt76;
struct wiphy *wiphy = hw->wiphy;
+ struct mt7915_dev *dev = phy->dev;
hw->queues = 4;
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
- hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
hw->netdev_features = NETIF_F_RXCSUM;
hw->radiotap_timestamp.units_pos =
@@ -325,6 +343,7 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
wiphy->reg_notifier = mt7915_regd_notifier;
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wiphy->mbssid_max_interfaces = 16;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_COLOR);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
@@ -333,9 +352,16 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE);
+ if (!mdev->dev->of_node ||
+ !of_property_read_bool(mdev->dev->of_node,
+ "mediatek,disable-radar-background"))
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_RADAR_BACKGROUND);
+
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
+ ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
hw->max_tx_fragments = 4;
@@ -349,14 +375,34 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
phy->mt76->sband_5g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_MAX_AMSDU;
- phy->mt76->sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+
+ if (is_mt7915(&dev->mt76)) {
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+
+ if (!dev->dbdc_support)
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
+ } else {
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+
+ /* mt7916 dbdc with 2g 2x2 bw40 and 5g 2x2 bw160c */
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+ }
}
mt76_set_stream_caps(phy->mt76, true);
mt7915_set_stream_vht_txbf_caps(phy);
mt7915_set_stream_he_caps(phy);
+
+ wiphy->available_antennas_rx = phy->mt76->antenna_mask;
+ wiphy->available_antennas_tx = phy->mt76->antenna_mask;
}
static void
@@ -387,19 +433,27 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 0x680);
- /* disable rx rate report by default due to hw issues */
+
+ /* mt7915: disable rx rate report by default due to hw issues */
mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
}
static void mt7915_mac_init(struct mt7915_dev *dev)
{
int i;
+ u32 rx_len = is_mt7915(&dev->mt76) ? 0x400 : 0x680;
+
+ /* config pse qid6 wfdma port selection */
+ if (!is_mt7915(&dev->mt76) && dev->hif2)
+ mt76_rmw(dev, MT_WF_PP_TOP_RXQ_WFDMA_CF_5, 0,
+ MT_WF_PP_TOP_RXQ_QID6_WFDMA_HIF_SEL_MASK);
+
+ mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, rx_len);
- mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 0x400);
/* enable hardware de-agg */
mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
- for (i = 0; i < MT7915_WTBL_SIZE; i++)
+ for (i = 0; i < mt7915_wtbl_size(dev); i++)
mt7915_mac_wtbl_update(dev, i,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
for (i = 0; i < 2; i++)
@@ -449,20 +503,32 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
phy = mphy->priv;
phy->dev = dev;
phy->mt76 = mphy;
- mphy->chainmask = dev->chainmask & ~dev->mphy.chainmask;
- mphy->antenna_mask = BIT(hweight8(mphy->chainmask)) - 1;
+
+ /* Bind main phy to band0 and ext_phy to band1 for dbdc case */
+ phy->band_idx = 1;
INIT_DELAYED_WORK(&mphy->mac_work, mt7915_mac_work);
- mt7915_eeprom_parse_band_config(phy);
- mt7915_init_wiphy(mphy->hw);
+ mt7915_eeprom_parse_hw_cap(dev, phy);
memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR2,
ETH_ALEN);
+ /* Make the secondary PHY MAC address local without overlapping with
+ * the usual MAC address allocation scheme on multiple virtual interfaces
+ */
+ if (!is_valid_ether_addr(mphy->macaddr)) {
+ memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+ ETH_ALEN);
+ mphy->macaddr[0] |= 2;
+ mphy->macaddr[0] ^= BIT(7);
+ }
mt76_eeprom_override(mphy);
- ret = mt7915_init_tx_queues(phy, MT7915_TXQ_BAND1,
- MT7915_TX_RING_SIZE);
+ /* init wiphy according to mphy and phy */
+ mt7915_init_wiphy(mphy->hw);
+ ret = mt7915_init_tx_queues(phy, MT_TXQ_ID(phy->band_idx),
+ MT7915_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(1));
if (ret)
goto error;
@@ -495,46 +561,88 @@ static void mt7915_init_work(struct work_struct *work)
mt7915_mac_init(dev);
mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
+ mt7915_init_txpower(dev, &dev->mphy.sband_6g.sband);
mt7915_txbf_init(dev);
}
static void mt7915_wfsys_reset(struct mt7915_dev *dev)
{
- u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
-
#define MT_MCU_DUMMY_RANDOM GENMASK(15, 0)
#define MT_MCU_DUMMY_DEFAULT GENMASK(31, 16)
- mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
+ if (is_mt7915(&dev->mt76)) {
+ u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
- /* change to software control */
- val |= MT_TOP_PWR_SW_RST;
- mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+ mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
- /* reset wfsys */
- val &= ~MT_TOP_PWR_SW_RST;
- mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+ /* change to software control */
+ val |= MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
- /* release wfsys then mcu re-excutes romcode */
- val |= MT_TOP_PWR_SW_RST;
- mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+ /* reset wfsys */
+ val &= ~MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
- /* switch to hw control */
- val &= ~MT_TOP_PWR_SW_RST;
- val |= MT_TOP_PWR_HW_CTRL;
- mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+ /* release wfsys then mcu re-executes romcode */
+ val |= MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
- /* check whether mcu resets to default */
- if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_DEFAULT,
- MT_MCU_DUMMY_DEFAULT, 1000)) {
- dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
- return;
+ /* switch to hw control */
+ val &= ~MT_TOP_PWR_SW_RST;
+ val |= MT_TOP_PWR_HW_CTRL;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+
+ /* check whether mcu resets to default */
+ if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR,
+ MT_MCU_DUMMY_DEFAULT, MT_MCU_DUMMY_DEFAULT,
+ 1000)) {
+ dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
+ return;
+ }
+
+ /* wfsys reset won't clear host registers */
+ mt76_clear(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE);
+
+ msleep(100);
+ } else if (is_mt7986(&dev->mt76)) {
+ mt7986_wmac_disable(dev);
+ msleep(20);
+
+ mt7986_wmac_enable(dev);
+ msleep(20);
+ } else {
+ mt76_set(dev, MT_WF_SUBSYS_RST, 0x1);
+ msleep(20);
+
+ mt76_clear(dev, MT_WF_SUBSYS_RST, 0x1);
+ msleep(20);
}
+}
- /* wfsys reset won't clear host registers */
- mt76_clear(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE);
+static bool mt7915_band_config(struct mt7915_dev *dev)
+{
+ bool ret = true;
+
+ dev->phy.band_idx = 0;
+
+ if (is_mt7986(&dev->mt76)) {
+ u32 sku = mt7915_check_adie(dev, true);
- msleep(100);
+ /*
+ * for mt7986, dbdc support is determined by the number
+ * of adie chips and the main phy is bound to band1 when
+ * dbdc is disabled.
+ */
+ if (sku == MT7975_ONE_ADIE || sku == MT7976_ONE_ADIE) {
+ dev->phy.band_idx = 1;
+ ret = false;
+ }
+ } else {
+ ret = is_mt7915(&dev->mt76) ?
+ !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5)) : true;
+ }
+
+ return ret;
}
static int mt7915_init_hardware(struct mt7915_dev *dev)
@@ -544,7 +652,8 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->init_work, mt7915_init_work);
- dev->dbdc_support = !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5));
+
+ dev->dbdc_support = mt7915_band_config(dev);
/* If MCU was already running, it is likely in a bad state */
if (mt76_get_field(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE) >
@@ -557,12 +666,6 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
- /*
- * force firmware operation mode into normal state,
- * which should be set before firmware download stage.
- */
- mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
-
ret = mt7915_mcu_init(dev);
if (ret) {
/* Reset and try again */
@@ -577,7 +680,6 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
if (ret < 0)
return ret;
-
if (dev->flash_mode) {
ret = mt7915_mcu_apply_group_cal(dev);
if (ret)
@@ -626,11 +728,18 @@ void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy)
}
static void
-mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
+mt7915_set_stream_he_txbf_caps(struct mt7915_dev *dev,
+ struct ieee80211_sta_he_cap *he_cap,
int vif, int nss)
{
struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
- u8 c;
+ u8 c, nss_160;
+
+ /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
+ if (is_mt7915(&dev->mt76) && !dev->dbdc_support)
+ nss_160 = nss / 2;
+ else
+ nss_160 = nss;
#ifdef CONFIG_MAC80211_MESH
if (vif == NL80211_IFTYPE_MESH_POINT)
@@ -684,13 +793,21 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
/* num_snd_dim
* for mt7915, max supported nss is 2 for bw > 80MHz
*/
- c = (nss - 1) |
- IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2;
+ c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+ nss - 1) |
+ FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
+ nss_160 - 1);
elem->phy_cap_info[5] |= c;
c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
elem->phy_cap_info[6] |= c;
+
+ if (!is_mt7915(&dev->mt76)) {
+ c = IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
+ elem->phy_cap_info[7] |= c;
+ }
}
static void
@@ -718,9 +835,17 @@ static int
mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
struct ieee80211_sband_iftype_data *data)
{
+ struct mt7915_dev *dev = phy->dev;
int i, idx = 0, nss = hweight8(phy->mt76->chainmask);
u16 mcs_map = 0;
u16 mcs_map_160 = 0;
+ u8 nss_160;
+
+ /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
+ if (is_mt7915(&dev->mt76) && !dev->dbdc_support)
+ nss_160 = nss / 2;
+ else
+ nss_160 = nss;
for (i = 0; i < 8; i++) {
if (i < nss)
@@ -728,8 +853,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
else
mcs_map |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
- /* Can do 1/2 of NSS streams in 160Mhz mode. */
- if (i < nss / 2)
+ if (i < nss_160)
mcs_map_160 |= (IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2));
else
mcs_map_160 |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
@@ -767,7 +891,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
if (band == NL80211_BAND_2GHZ)
he_cap_elem->phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
- else if (band == NL80211_BAND_5GHZ)
+ else
he_cap_elem->phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
@@ -806,7 +930,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
if (band == NL80211_BAND_2GHZ)
he_cap_elem->phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G;
- else if (band == NL80211_BAND_5GHZ)
+ else
he_cap_elem->phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G;
@@ -845,7 +969,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map_160);
he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map_160);
- mt7915_set_stream_he_txbf_caps(he_cap, i, nss);
+ mt7915_set_stream_he_txbf_caps(dev, he_cap, i, nss);
memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
if (he_cap_elem->phy_cap_info[6] &
@@ -856,6 +980,21 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
}
+
+ if (band == NL80211_BAND_6GHZ) {
+ u16 cap = IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS |
+ IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
+
+ cap |= u16_encode_bits(IEEE80211_HT_MPDU_DENSITY_8,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
+ u16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
+ u16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+
+ data[idx].he_6ghz_capa.capa = cpu_to_le16(cap);
+ }
+
idx++;
}
@@ -885,6 +1024,15 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy)
band->iftype_data = data;
band->n_iftype_data = n;
}
+
+ if (phy->mt76->cap.has_6ghz) {
+ data = phy->iftype[NL80211_BAND_6GHZ];
+ n = mt7915_init_he_caps(phy, NL80211_BAND_6GHZ, data);
+
+ band = &phy->mt76->sband_6g.sband;
+ band->iftype_data = data;
+ band->n_iftype_data = n;
+ }
}
static void mt7915_unregister_ext_phy(struct mt7915_dev *dev)
@@ -924,15 +1072,6 @@ int mt7915_register_device(struct mt7915_dev *dev)
mt7915_init_wiphy(hw);
- if (!dev->dbdc_support)
- dev->mphy.sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
-
- dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
- dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
- dev->phy.dfs_state = -1;
-
#ifdef CONFIG_NL80211_TESTMODE
dev->mt76.test_ops = &mt7915_testmode_ops;
#endif
@@ -971,5 +1110,8 @@ void mt7915_unregister_device(struct mt7915_dev *dev)
mt7915_dma_cleanup(dev);
tasklet_disable(&dev->irq_tasklet);
+ if (is_mt7986(&dev->mt76))
+ mt7986_wmac_disable(dev);
+
mt76_free_device(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 48f115502282..e9e7efbf350d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -165,7 +165,7 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
sta = container_of((void *)msta, struct ieee80211_sta,
drv_priv);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u8 q = mt7915_lmac_mapping(dev, i);
+ u8 q = mt76_connac_lmac_mapping(i);
u32 tx_cur = tx_time[q];
u32 rx_cur = rx_time[q];
u8 tid = ac_to_tid[i];
@@ -226,8 +226,8 @@ mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
u32 ru_h, ru_l;
u8 ru, offs = 0;
- ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
- ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
+ ru_l = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC_L);
+ ru_h = le32_get_bits(rxv[1], MT_PRXV_HE_RU_ALLOC_H);
ru = (u8)(ru_l | ru_h << 4);
status->bw = RATE_INFO_BW_HE_RU;
@@ -349,14 +349,16 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
case MT_PHY_TYPE_HE_SU:
he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
HE_BITS(DATA1_UL_DL_KNOWN) |
- HE_BITS(DATA1_BEAM_CHANGE_KNOWN);
+ HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
+ HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
break;
case MT_PHY_TYPE_HE_EXT_SU:
he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
- HE_BITS(DATA1_UL_DL_KNOWN);
+ HE_BITS(DATA1_UL_DL_KNOWN) |
+ HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
break;
@@ -376,7 +378,8 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
- he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
+ he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
@@ -391,15 +394,15 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
struct mt7915_sta *msta = (struct mt7915_sta *)status->wcid;
+ __le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct ieee80211_hdr hdr;
- struct ethhdr eth_hdr;
- __le32 *rxd = (__le32 *)skb->data;
- __le32 qos_ctrl, ht_ctrl;
+ u16 frame_control;
- if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) !=
+ if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
MT_RXD3_NORMAL_U2M)
return -EINVAL;
@@ -413,47 +416,52 @@ static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
/* store the info from RXD and ethhdr to avoid being overridden */
- memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
- hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]);
- hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]);
- qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]);
- ht_ctrl = FIELD_GET(MT_RXD9_HT_CONTROL, rxd[9]);
-
+ frame_control = le32_get_bits(rxd[6], MT_RXD6_FRAME_CONTROL);
+ hdr.frame_control = cpu_to_le16(frame_control);
+ hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_SEQ_CTRL));
hdr.duration_id = 0;
+
ether_addr_copy(hdr.addr1, vif->addr);
ether_addr_copy(hdr.addr2, sta->addr);
- switch (le16_to_cpu(hdr.frame_control) &
- (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
+ switch (frame_control & (IEEE80211_FCTL_TODS |
+ IEEE80211_FCTL_FROMDS)) {
case 0:
ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
break;
case IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_source);
break;
case IEEE80211_FCTL_TODS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
break;
case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
- ether_addr_copy(hdr.addr4, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
+ ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
break;
}
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
- if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
- eth_hdr.h_proto == htons(ETH_P_IPX))
+ if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
+ eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
- else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
+ else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
else
skb_pull(skb, 2);
if (ieee80211_has_order(hdr.frame_control))
- memcpy(skb_push(skb, 2), &ht_ctrl, 2);
- if (ieee80211_is_data_qos(hdr.frame_control))
- memcpy(skb_push(skb, 2), &qos_ctrl, 2);
+ memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[9],
+ IEEE80211_HT_CTL_LEN);
+ if (ieee80211_is_data_qos(hdr.frame_control)) {
+ __le16 qos_ctrl;
+
+ qos_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_QOS_CTL));
+ memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
+ IEEE80211_QOS_CTL_LEN);
+ }
+
if (ieee80211_has_a4(hdr.frame_control))
memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
else
@@ -463,6 +471,108 @@ static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
}
static int
+mt7915_mac_fill_rx_rate(struct mt7915_dev *dev,
+ struct mt76_rx_status *status,
+ struct ieee80211_supported_band *sband,
+ __le32 *rxv)
+{
+ u32 v0, v2;
+ u8 stbc, gi, bw, dcm, mode, nss;
+ int i, idx;
+ bool cck = false;
+
+ v0 = le32_to_cpu(rxv[0]);
+ v2 = le32_to_cpu(rxv[2]);
+
+ idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
+ nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
+
+ if (!is_mt7915(&dev->mt76)) {
+ stbc = FIELD_GET(MT_PRXV_HT_STBC, v0);
+ gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v0);
+ mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
+ dcm = FIELD_GET(MT_PRXV_DCM, v0);
+ bw = FIELD_GET(MT_PRXV_FRAME_MODE, v0);
+ } else {
+ stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
+ gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
+ mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
+ dcm = !!(idx & GENMASK(3, 0) & MT_PRXV_TX_DCM);
+ bw = FIELD_GET(MT_CRXV_FRAME_MODE, v2);
+ }
+
+ switch (mode) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ i = mt76_get_rate(&dev->mt76, sband, i, cck);
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ if (gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (i > 31)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->nss = nss;
+ status->encoding = RX_ENC_VHT;
+ if (gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (i > 9)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_HE_MU:
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ status->nss = nss;
+ status->encoding = RX_ENC_HE;
+ i &= GENMASK(3, 0);
+
+ if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
+ status->he_gi = gi;
+
+ status->he_dcm = dcm;
+ break;
+ default:
+ return -EINVAL;
+ }
+ status->rate_idx = i;
+
+ switch (bw) {
+ case IEEE80211_STA_RX_BW_20:
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ if (mode & MT_PHY_TYPE_HE_EXT_SU &&
+ (idx & MT_PRXV_TX_ER_SU_106T)) {
+ status->bw = RATE_INFO_BW_HE_RU;
+ status->he_ru =
+ NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ } else {
+ status->bw = RATE_INFO_BW_40;
+ }
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ status->bw = RATE_INFO_BW_160;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
+ if (mode < MT_PHY_TYPE_HE_SU && gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ return 0;
+}
+
+static int
mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
@@ -485,11 +595,11 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
u16 seq_ctrl = 0;
u8 qos_ctl = 0;
__le16 fc = 0;
- int i, idx;
+ int idx;
memset(status, 0, sizeof(*status));
- if (rxd1 & MT_RXD1_NORMAL_BAND_IDX) {
+ if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->band_idx) {
mphy = dev->mt76.phy2;
if (!mphy)
return -EINVAL;
@@ -530,6 +640,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
status->band = mphy->chandef.chan->band;
if (status->band == NL80211_BAND_5GHZ)
sband = &mphy->sband_5g.sband;
+ else if (status->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
else
sband = &mphy->sband_2g.sband;
@@ -626,7 +738,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
/* RXD Group 3 - P-RXV */
if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
- u32 v0, v1, v2;
+ u32 v0, v1;
+ int ret;
rxv = rxd;
rxd += 2;
@@ -635,7 +748,6 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
v0 = le32_to_cpu(rxv[0]);
v1 = le32_to_cpu(rxv[1]);
- v2 = le32_to_cpu(rxv[2]);
if (v0 & MT_PRXV_HT_AD_CODE)
status->enc_flags |= RX_ENC_FLAG_LDPC;
@@ -645,94 +757,18 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
- status->signal = status->chain_signal[0];
-
- for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
- if (!(status->chains & BIT(i)))
- continue;
-
- status->signal = max(status->signal,
- status->chain_signal[i]);
- }
/* RXD Group 5 - C-RXV */
if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
- u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
- u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
- bool cck = false;
-
rxd += 18;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
+ }
- idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
- mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
-
- switch (mode) {
- case MT_PHY_TYPE_CCK:
- cck = true;
- fallthrough;
- case MT_PHY_TYPE_OFDM:
- i = mt76_get_rate(&dev->mt76, sband, i, cck);
- break;
- case MT_PHY_TYPE_HT_GF:
- case MT_PHY_TYPE_HT:
- status->encoding = RX_ENC_HT;
- if (i > 31)
- return -EINVAL;
- break;
- case MT_PHY_TYPE_VHT:
- status->nss =
- FIELD_GET(MT_PRXV_NSTS, v0) + 1;
- status->encoding = RX_ENC_VHT;
- if (i > 9)
- return -EINVAL;
- break;
- case MT_PHY_TYPE_HE_MU:
- case MT_PHY_TYPE_HE_SU:
- case MT_PHY_TYPE_HE_EXT_SU:
- case MT_PHY_TYPE_HE_TB:
- status->nss =
- FIELD_GET(MT_PRXV_NSTS, v0) + 1;
- status->encoding = RX_ENC_HE;
- i &= GENMASK(3, 0);
-
- if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
- status->he_gi = gi;
-
- status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
- break;
- default:
- return -EINVAL;
- }
- status->rate_idx = i;
-
- switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
- case IEEE80211_STA_RX_BW_20:
- break;
- case IEEE80211_STA_RX_BW_40:
- if (mode & MT_PHY_TYPE_HE_EXT_SU &&
- (idx & MT_PRXV_TX_ER_SU_106T)) {
- status->bw = RATE_INFO_BW_HE_RU;
- status->he_ru =
- NL80211_RATE_INFO_HE_RU_ALLOC_106;
- } else {
- status->bw = RATE_INFO_BW_40;
- }
- break;
- case IEEE80211_STA_RX_BW_80:
- status->bw = RATE_INFO_BW_80;
- break;
- case IEEE80211_STA_RX_BW_160:
- status->bw = RATE_INFO_BW_160;
- break;
- default:
- return -EINVAL;
- }
-
- status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
- if (mode < MT_PHY_TYPE_HE_SU && gi)
- status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) {
+ ret = mt7915_mac_fill_rx_rate(dev, status, sband, rxv);
+ if (ret < 0)
+ return ret;
}
}
@@ -801,6 +837,10 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
+ /* drop no data frame */
+ if (fc & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))
+ return -EINVAL;
+
status->aggr = unicast &&
!ieee80211_is_qos_nullfunc(fc);
status->qos_ctl = qos_ctl;
@@ -818,13 +858,13 @@ mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
__le32 *rxv_hdr = rxd + 2;
__le32 *rxv = rxd + 4;
u32 rcpi, ib_rssi, wb_rssi, v20, v21;
- bool ext_phy;
+ u8 band_idx;
s32 foe;
u8 snr;
int i;
- ext_phy = FIELD_GET(MT_RXV_HDR_BAND_IDX, le32_to_cpu(rxv_hdr[1]));
- if (ext_phy)
+ band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX);
+ if (band_idx && !phy->band_idx)
phy = mt7915_ext_phy(dev);
rcpi = le32_to_cpu(rxv[6]);
@@ -1065,6 +1105,7 @@ mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi,
if (ieee80211_is_beacon(fc)) {
txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
+ txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, 0x18));
}
if (info->flags & IEEE80211_TX_CTL_INJECTED) {
@@ -1080,6 +1121,7 @@ mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi,
val = MT_TXD3_SN_VALID |
FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
txwi[3] |= cpu_to_le32(val);
+ txwi[7] &= ~cpu_to_le32(MT_TXD7_HW_AMSDU);
}
val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
@@ -1140,7 +1182,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
struct ieee80211_vif *vif = info->control.vif;
struct mt76_phy *mphy = &dev->mphy;
bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
- u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
+ u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0, band_idx = 0;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
bool mcast = false;
u16 tx_count = 15;
@@ -1151,6 +1193,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
omac_idx = mvif->mt76.omac_idx;
wmm_idx = mvif->mt76.wmm_idx;
+ band_idx = mvif->mt76.band_idx;
}
if (ext_phy && dev->mt76.phy2)
@@ -1165,7 +1208,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
} else {
p_fmt = MT_TX_TYPE_CT;
q_idx = wmm_idx * MT7915_MAX_WMM_SETS +
- mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb));
+ mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
}
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
@@ -1177,7 +1220,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
- if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
+ if (ext_phy || band_idx)
val |= MT_TXD1_TGID;
txwi[1] = cpu_to_le32(val);
@@ -1311,10 +1354,10 @@ mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
u16 fc, tid;
u32 val;
- if (!sta || !sta->ht_cap.ht_supported)
+ if (!sta || !(sta->ht_cap.ht_supported || sta->he_cap.has_he))
return;
- tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
+ tid = le32_get_bits(txwi[1], MT_TXD1_TID);
if (tid >= 6) /* skip VO queue */
return;
@@ -1362,7 +1405,7 @@ mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
mt7915_tx_check_aggr(sta, txwi);
} else {
- wcid_idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
+ wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
}
__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
@@ -1383,8 +1426,10 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
LIST_HEAD(free_list);
struct sk_buff *skb, *tmp;
void *end = data + len;
- u8 i, count;
- bool wake = false;
+ bool v3, wake = false;
+ u16 total, count = 0;
+ u32 txd = le32_to_cpu(free->txd);
+ __le32 *cur_info;
/* clean DMA queues and unmap buffers first */
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
@@ -1394,17 +1439,14 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
}
- /*
- * TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
- * to the time ack is received or dropped by hw (air + hw queue time).
- * Should avoid accessing WTBL to get Tx airtime, and use it instead.
- */
- count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
- if (WARN_ON_ONCE((void *)&free->info[count] > end))
+ total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
+ v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
+ if (WARN_ON_ONCE((void *)&free->info[total >> v3] > end))
return;
- for (i = 0; i < count; i++) {
- u32 msdu, info = le32_to_cpu(free->info[i]);
+ for (cur_info = &free->info[0]; count < total; cur_info++) {
+ u32 msdu, info = le32_to_cpu(*cur_info);
+ u8 i;
/*
* 1'b1: new wcid pair.
@@ -1415,7 +1457,6 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
struct mt76_wcid *wcid;
u16 idx;
- count++;
idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
wcid = rcu_dereference(dev->mt76.wcid[idx]);
sta = wcid_to_sta(wcid);
@@ -1430,12 +1471,24 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
continue;
}
- msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
- txwi = mt76_token_release(mdev, msdu, &wake);
- if (!txwi)
+ if (v3 && (info & MT_TX_FREE_MPDU_HEADER))
continue;
- mt7915_txwi_free(dev, txwi, sta, &free_list);
+ for (i = 0; i < 1 + v3; i++) {
+ if (v3) {
+ msdu = (info >> (15 * i)) & MT_TX_FREE_MSDU_ID_V3;
+ if (msdu == MT_TX_FREE_MSDU_ID_V3)
+ continue;
+ } else {
+ msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
+ }
+ count++;
+ txwi = mt76_token_release(mdev, msdu, &wake);
+ if (!txwi)
+ continue;
+
+ mt7915_txwi_free(dev, txwi, sta, &free_list);
+ }
}
mt7915_mac_sta_poll(dev);
@@ -1504,6 +1557,8 @@ mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid,
if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
sband = &mphy->sband_5g.sband;
+ else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
else
sband = &mphy->sband_2g.sband;
@@ -1512,7 +1567,6 @@ mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid,
break;
case MT_PHY_TYPE_HT:
case MT_PHY_TYPE_HT_GF:
- rate.mcs += (rate.nss - 1) * 8;
if (rate.mcs > 31)
goto out;
@@ -1578,23 +1632,18 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
struct mt76_wcid *wcid;
__le32 *txs_data = data;
u16 wcidx;
- u32 txs;
u8 pid;
- txs = le32_to_cpu(txs_data[0]);
- if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1)
+ if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
return;
- txs = le32_to_cpu(txs_data[2]);
- wcidx = FIELD_GET(MT_TXS2_WCID, txs);
-
- txs = le32_to_cpu(txs_data[3]);
- pid = FIELD_GET(MT_TXS3_PID, txs);
+ wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
+ pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
if (pid < MT_PACKET_ID_FIRST)
return;
- if (wcidx >= MT7915_WTBL_SIZE)
+ if (wcidx >= mt7915_wtbl_size(dev))
return;
rcu_read_lock();
@@ -1626,7 +1675,8 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
__le32 *end = (__le32 *)&rxd[len / 4];
enum rx_pkt_type type;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
mt7915_mac_tx_free(dev, data, len);
@@ -1635,6 +1685,9 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
for (rxd += 2; rxd + 8 <= end; rxd += 8)
mt7915_mac_add_txs(dev, rxd);
return false;
+ case PKT_TYPE_RX_FW_MONITOR:
+ mt7915_debugfs_rx_fw_monitor(dev, data, len);
+ return false;
default:
return true;
}
@@ -1648,7 +1701,7 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
__le32 *end = (__le32 *)&skb->data[skb->len];
enum rx_pkt_type type;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
@@ -1666,6 +1719,10 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mt7915_mac_add_txs(dev, rxd);
dev_kfree_skb(skb);
break;
+ case PKT_TYPE_RX_FW_MONITOR:
+ mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
+ dev_kfree_skb(skb);
+ break;
case PKT_TYPE_NORMAL:
if (!mt7915_mac_fill_rx(dev, skb)) {
mt76_rx(&dev->mt76, q, skb);
@@ -1702,8 +1759,7 @@ void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
- u32 reg = MT_WF_PHY_RX_CTRL1(ext_phy);
+ u32 reg = MT_WF_PHY_RX_CTRL1(phy->band_idx);
mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN);
mt76_set(dev, reg, BIT(11) | BIT(9));
@@ -1712,25 +1768,22 @@ void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
void mt7915_mac_reset_counters(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
int i;
for (i = 0; i < 4; i++) {
- mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
- mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
+ mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
+ mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i));
}
- if (ext_phy) {
- dev->mt76.phy2->survey_time = ktime_get_boottime();
+ i = 0;
+ phy->mt76->survey_time = ktime_get_boottime();
+ if (phy->band_idx)
i = ARRAY_SIZE(dev->mt76.aggr_stats) / 2;
- } else {
- dev->mt76.phy.survey_time = ktime_get_boottime();
- i = 0;
- }
+
memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2);
/* reset airtime counters */
- mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(ext_phy),
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->band_idx),
MT_WF_RMAC_MIB_RXTIME_CLR);
mt7915_mcu_get_chan_mib_info(phy, true);
@@ -1740,29 +1793,23 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
{
s16 coverage_class = phy->coverage_class;
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
+ struct mt7915_phy *ext_phy = mt7915_ext_phy(dev);
u32 val, reg_offset;
u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
int offset;
- bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
+ bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
return;
- if (ext_phy) {
+ if (ext_phy)
coverage_class = max_t(s16, dev->phy.coverage_class,
- coverage_class);
- } else {
- struct mt7915_phy *phy_ext = mt7915_ext_phy(dev);
+ ext_phy->coverage_class);
- if (phy_ext)
- coverage_class = max_t(s16, phy_ext->coverage_class,
- coverage_class);
- }
- mt76_set(dev, MT_ARB_SCR(ext_phy),
+ mt76_set(dev, MT_ARB_SCR(phy->band_idx),
MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
udelay(1);
@@ -1770,35 +1817,40 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
- mt76_wr(dev, MT_TMAC_CDTR(ext_phy), cck + reg_offset);
- mt76_wr(dev, MT_TMAC_ODTR(ext_phy), ofdm + reg_offset);
- mt76_wr(dev, MT_TMAC_ICR0(ext_phy),
- FIELD_PREP(MT_IFS_EIFS_OFDM, is_5ghz ? 84 : 78) |
+ mt76_wr(dev, MT_TMAC_CDTR(phy->band_idx), cck + reg_offset);
+ mt76_wr(dev, MT_TMAC_ODTR(phy->band_idx), ofdm + reg_offset);
+ mt76_wr(dev, MT_TMAC_ICR0(phy->band_idx),
+ FIELD_PREP(MT_IFS_EIFS_OFDM, a_band ? 84 : 78) |
FIELD_PREP(MT_IFS_RIFS, 2) |
FIELD_PREP(MT_IFS_SIFS, 10) |
FIELD_PREP(MT_IFS_SLOT, phy->slottime));
- mt76_wr(dev, MT_TMAC_ICR1(ext_phy),
+ mt76_wr(dev, MT_TMAC_ICR1(phy->band_idx),
FIELD_PREP(MT_IFS_EIFS_CCK, 314));
- if (phy->slottime < 20 || is_5ghz)
+ if (phy->slottime < 20 || a_band)
val = MT7915_CFEND_RATE_DEFAULT;
else
val = MT7915_CFEND_RATE_11B;
- mt76_rmw_field(dev, MT_AGG_ACR0(ext_phy), MT_AGG_ACR_CFEND_RATE, val);
- mt76_clear(dev, MT_ARB_SCR(ext_phy),
+ mt76_rmw_field(dev, MT_AGG_ACR0(phy->band_idx), MT_AGG_ACR_CFEND_RATE, val);
+ mt76_clear(dev, MT_ARB_SCR(phy->band_idx),
MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
}
void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy)
{
- mt76_set(dev, MT_WF_PHY_RXTD12(ext_phy),
+ u32 reg;
+
+ reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(ext_phy) :
+ MT_WF_PHY_RXTD12_MT7916(ext_phy);
+ mt76_set(dev, reg,
MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY |
MT_WF_PHY_RXTD12_IRPI_SW_CLR);
- mt76_set(dev, MT_WF_PHY_RX_CTRL1(ext_phy),
- FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
+ reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(ext_phy) :
+ MT_WF_PHY_RX_CTRL1_MT7916(ext_phy);
+ mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
}
static u8
@@ -1810,7 +1862,9 @@ mt7915_phy_get_nf(struct mt7915_phy *phy, int idx)
int nss, i;
for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) {
- u32 reg = MT_WF_IRPI(nss + (idx << dev->dbdc_support));
+ u32 reg = is_mt7915(&dev->mt76) ?
+ MT_WF_IRPI_NSS(0, nss + (idx << dev->dbdc_support)) :
+ MT_WF_IRPI_NSS_MT7916(idx, nss);
for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
val = mt76_rr(dev, reg);
@@ -1829,12 +1883,11 @@ void mt7915_update_channel(struct mt76_phy *mphy)
{
struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv;
struct mt76_channel_state *state = mphy->chan_state;
- bool ext_phy = phy != &phy->dev->phy;
int nf;
mt7915_mcu_get_chan_mib_info(phy, false);
- nf = mt7915_phy_get_nf(phy, ext_phy);
+ nf = mt7915_phy_get_nf(phy, phy->band_idx);
if (!phy->noise)
phy->noise = nf << 4;
else if (nf)
@@ -1891,20 +1944,26 @@ static void
mt7915_dma_reset(struct mt7915_dev *dev)
{
struct mt76_phy *mphy_ext = dev->mt76.phy2;
- u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
+ u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
int i;
mt76_clear(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- mt76_clear(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ if (is_mt7915(&dev->mt76))
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN);
if (dev->hif2) {
mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
- (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN));
- mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
- (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
- MT_WFDMA1_GLO_CFG_RX_DMA_EN));
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ if (is_mt7915(&dev->mt76))
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN);
}
usleep_range(1000, 2000);
@@ -1928,19 +1987,23 @@ mt7915_dma_reset(struct mt7915_dev *dev)
mt76_set(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- mt76_set(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN |
- MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
- if (dev->hif2) {
- mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
- (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN));
- mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
- (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ if (is_mt7915(&dev->mt76))
+ mt76_set(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
MT_WFDMA1_GLO_CFG_RX_DMA_EN |
MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA1_GLO_CFG_OMIT_RX_INFO));
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
+ if (dev->hif2) {
+ mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ if (is_mt7915(&dev->mt76))
+ mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
}
}
@@ -2050,106 +2113,96 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
struct mib_stats *mib = &phy->mib;
- bool ext_phy = phy != &dev->phy;
int i, aggr0, aggr1, cnt;
+ u32 val;
- mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
- MT_MIB_SDR3_FCS_ERR_MASK);
+ cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx));
+ mib->fcs_err_cnt += is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR4(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR4(phy->band_idx));
mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR5(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR5(phy->band_idx));
mib->rx_mpdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR6(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR6(phy->band_idx));
mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR7(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR7(phy->band_idx));
mib->rx_vector_mismatch_cnt += FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR8(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR8(phy->band_idx));
mib->rx_delimiter_fail_cnt += FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR11(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR11(phy->band_idx));
mib->rx_len_mismatch_cnt += FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR12(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR12(phy->band_idx));
mib->tx_ampdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR13(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR13(phy->band_idx));
mib->tx_stop_q_empty_cnt += FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR14(ext_phy));
- mib->tx_mpdu_attempts_cnt += FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR14(phy->band_idx));
+ mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR15(ext_phy));
- mib->tx_mpdu_success_cnt += FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR15(phy->band_idx));
+ mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR22(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR22(phy->band_idx));
mib->rx_ampdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR23(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR23(phy->band_idx));
mib->rx_ampdu_bytes_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR24(ext_phy));
- mib->rx_ampdu_valid_subframe_cnt += FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR24(phy->band_idx));
+ mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR25(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR25(phy->band_idx));
mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR27(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR27(phy->band_idx));
mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR28(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR28(phy->band_idx));
mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR29(ext_phy));
- mib->rx_pfdrop_cnt += FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR29(phy->band_idx));
+ mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR30(ext_phy));
- mib->rx_vec_queue_overflow_drop_cnt +=
- FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDRVEC(phy->band_idx));
+ mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR31(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR31(phy->band_idx));
mib->rx_ba_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR32(ext_phy));
- mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK, cnt);
-
- cnt = mt76_rr(dev, MT_MIB_SDR33(ext_phy));
- mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT_MASK, cnt);
-
- cnt = mt76_rr(dev, MT_MIB_SDR34(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDRMUBF(phy->band_idx));
mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt);
- cnt = mt76_rr(dev, MT_MIB_DR8(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_DR8(phy->band_idx));
mib->tx_mu_mpdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_DR9(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_DR9(phy->band_idx));
mib->tx_mu_acked_mpdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_DR11(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_DR11(phy->band_idx));
mib->tx_su_acked_mpdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(ext_phy));
- mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt);
- mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt);
-
- cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(ext_phy));
- mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt);
- mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt);
- mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt);
- mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt);
-
- cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(ext_phy));
- mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt);
- mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt);
- mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt);
-
- cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(ext_phy));
- mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt);
- mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt);
+ cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(phy->band_idx));
+ mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW, cnt);
+ mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC, cnt);
+ mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR, cnt);
for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
@@ -2157,27 +2210,97 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
mib->tx_amsdu_cnt += cnt;
}
- aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
- for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
- u32 val;
+ aggr0 = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ if (is_mt7915(&dev->mt76)) {
+ for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 4)));
+ mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+ mib->ack_fail_cnt +=
+ FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+
+ val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 4)));
+ mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+ mib->rts_retries_cnt +=
+ FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
- val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
- mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
- mib->ack_fail_cnt +=
- FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+ val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
+ dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
+ dev->mt76.aggr_stats[aggr0++] += val >> 16;
- val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
- mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
- mib->rts_retries_cnt +=
- FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
+ val = mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i));
+ dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
+ dev->mt76.aggr_stats[aggr1++] += val >> 16;
+ }
+
+ cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx));
+ mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR33(phy->band_idx));
+ mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT, cnt);
- val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
- dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
- dev->mt76.aggr_stats[aggr0++] += val >> 16;
+ cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(phy->band_idx));
+ mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt);
+ mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt);
- val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
- dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
- dev->mt76.aggr_stats[aggr1++] += val >> 16;
+ cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(phy->band_idx));
+ mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt);
+ mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt);
+
+ cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(phy->band_idx));
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt);
+ mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt);
+ mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt);
+ mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt);
+ } else {
+ for (i = 0; i < 2; i++) {
+ /* rts count */
+ val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 2)));
+ mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val);
+
+ /* rts retry count */
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 2)));
+ mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val);
+
+ /* ba miss count */
+ val = mt76_rr(dev, MT_MIB_MB_SDR2(phy->band_idx, (i << 2)));
+ mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val);
+
+ /* ack fail count */
+ val = mt76_rr(dev, MT_MIB_MB_BFTF(phy->band_idx, (i << 2)));
+ mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val);
+ }
+
+ for (i = 0; i < 8; i++) {
+ val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
+ dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val);
+ dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val);
+ }
+
+ cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx));
+ mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
+ mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
+ mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
+ mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_BFCR7(phy->band_idx));
+ mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_BFCR2(phy->band_idx));
+ mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_BFCR0(phy->band_idx));
+ mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
+ mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_BFCR1(phy->band_idx));
+ mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
}
}
@@ -2248,39 +2371,59 @@ static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy)
struct mt7915_dev *dev = phy->dev;
if (phy->rdd_state & BIT(0))
- mt7915_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
+ MT_RX_SEL0, 0);
if (phy->rdd_state & BIT(1))
- mt7915_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
+ MT_RX_SEL0, 0);
}
static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
{
- int err;
+ int err, region;
- err = mt7915_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
+ switch (dev->mt76.region) {
+ case NL80211_DFS_ETSI:
+ region = 0;
+ break;
+ case NL80211_DFS_JP:
+ region = 2;
+ break;
+ case NL80211_DFS_FCC:
+ default:
+ region = 1;
+ break;
+ }
+
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
+ MT_RX_SEL0, region);
if (err < 0)
return err;
- return mt7915_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, MT_RX_SEL0, 1);
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
+ MT_RX_SEL0, 1);
}
static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
{
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
int err;
/* start CAC */
- err = mt7915_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, phy->band_idx,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
- err = mt7915_dfs_start_rdd(dev, ext_phy);
+ err = mt7915_dfs_start_rdd(dev, phy->band_idx);
if (err < 0)
return err;
- phy->rdd_state |= BIT(ext_phy);
+ phy->rdd_state |= BIT(phy->band_idx);
+
+ if (!is_mt7915(&dev->mt76))
+ return 0;
if (chandef->width == NL80211_CHAN_WIDTH_160 ||
chandef->width == NL80211_CHAN_WIDTH_80P80) {
@@ -2330,48 +2473,56 @@ mt7915_dfs_init_radar_specs(struct mt7915_phy *phy)
int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
{
- struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
+ enum mt76_dfs_state dfs_state, prev_state;
int err;
- if (dev->mt76.region == NL80211_DFS_UNSET) {
- phy->dfs_state = -1;
- if (phy->rdd_state)
- goto stop;
+ prev_state = phy->mt76->dfs_state;
+ dfs_state = mt76_phy_dfs_state(phy->mt76);
+ if (prev_state == dfs_state)
return 0;
- }
- if (test_bit(MT76_SCANNING, &phy->mt76->state))
- return 0;
-
- if (phy->dfs_state == chandef->chan->dfs_state)
- return 0;
+ if (prev_state == MT_DFS_STATE_UNKNOWN)
+ mt7915_dfs_stop_radar_detector(phy);
- err = mt7915_dfs_init_radar_specs(phy);
- if (err < 0) {
- phy->dfs_state = -1;
+ if (dfs_state == MT_DFS_STATE_DISABLED)
goto stop;
- }
- phy->dfs_state = chandef->chan->dfs_state;
+ if (prev_state <= MT_DFS_STATE_DISABLED) {
+ err = mt7915_dfs_init_radar_specs(phy);
+ if (err < 0)
+ return err;
+
+ err = mt7915_dfs_start_radar_detector(phy);
+ if (err < 0)
+ return err;
- if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
- if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
- return mt7915_dfs_start_radar_detector(phy);
+ phy->mt76->dfs_state = MT_DFS_STATE_CAC;
+ }
+
+ if (dfs_state == MT_DFS_STATE_CAC)
+ return 0;
- return mt7915_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
- MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
+ phy->band_idx, MT_RX_SEL0, 0);
+ if (err < 0) {
+ phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
+ return err;
}
+ phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
+ return 0;
+
stop:
- err = mt7915_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy,
- MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START,
+ phy->band_idx, MT_RX_SEL0, 0);
if (err < 0)
return err;
mt7915_dfs_stop_radar_detector(phy);
+ phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
+
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
index 7a2c740d1464..5add1dd36dbe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
@@ -23,6 +23,7 @@ enum rx_pkt_type {
PKT_TYPE_RETRIEVE,
PKT_TYPE_TXRX_NOTIFY,
PKT_TYPE_RX_EVENT,
+ PKT_TYPE_RX_FW_MONITOR = 0x0c,
};
/* RXD DW1 */
@@ -125,6 +126,12 @@ enum rx_pkt_type {
#define MT_PRXV_RCPI2 GENMASK(23, 16)
#define MT_PRXV_RCPI1 GENMASK(15, 8)
#define MT_PRXV_RCPI0 GENMASK(7, 0)
+#define MT_PRXV_HT_SHORT_GI GENMASK(16, 15)
+#define MT_PRXV_HT_STBC GENMASK(23, 22)
+#define MT_PRXV_TX_MODE GENMASK(27, 24)
+#define MT_PRXV_FRAME_MODE GENMASK(14, 12)
+#define MT_PRXV_DCM BIT(17)
+#define MT_PRXV_NUM_RX BIT(20, 18)
/* C-RXV */
#define MT_CRXV_HT_STBC GENMASK(1, 0)
@@ -298,18 +305,20 @@ struct mt7915_txp {
struct mt7915_tx_free {
__le16 rx_byte_cnt;
__le16 ctrl;
- u8 txd_cnt;
- u8 rsv[3];
+ __le32 txd;
__le32 info[];
} __packed __aligned(4);
+#define MT_TX_FREE_VER GENMASK(18, 16)
#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
#define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
#define MT_TX_FREE_LATENCY GENMASK(12, 0)
/* 0: success, others: dropped */
-#define MT_TX_FREE_STATUS GENMASK(14, 13)
#define MT_TX_FREE_MSDU_ID GENMASK(30, 16)
#define MT_TX_FREE_PAIR BIT(31)
+#define MT_TX_FREE_MPDU_HEADER BIT(30)
+#define MT_TX_FREE_MSDU_ID_V3 GENMASK(14, 0)
+
/* will support this field in further revision */
#define MT_TX_FREE_RATE GENMASK(13, 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 8ac6f59af174..c3f44d801e7f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -34,7 +34,7 @@ static int mt7915_start(struct ieee80211_hw *hw)
running = mt7915_dev_running(dev);
if (!running) {
- ret = mt7915_mcu_set_pm(dev, 0, 0);
+ ret = mt76_connac_mcu_set_pm(&dev->mt76, 0, 0);
if (ret)
goto out;
@@ -49,8 +49,8 @@ static int mt7915_start(struct ieee80211_hw *hw)
mt7915_mac_enable_nf(dev, 0);
}
- if (phy != &dev->phy) {
- ret = mt7915_mcu_set_pm(dev, 1, 0);
+ if (phy != &dev->phy || phy->band_idx) {
+ ret = mt76_connac_mcu_set_pm(&dev->mt76, 1, 0);
if (ret)
goto out;
@@ -65,7 +65,8 @@ static int mt7915_start(struct ieee80211_hw *hw)
mt7915_mac_enable_nf(dev, 1);
}
- ret = mt7915_mcu_set_rts_thresh(phy, 0x92b);
+ ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b,
+ phy != &dev->phy);
if (ret)
goto out;
@@ -106,12 +107,12 @@ static void mt7915_stop(struct ieee80211_hw *hw)
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
if (phy != &dev->phy) {
- mt7915_mcu_set_pm(dev, 1, 1);
+ mt76_connac_mcu_set_pm(&dev->mt76, 1, 1);
mt7915_mcu_set_mac(dev, 1, false, false);
}
if (!mt7915_dev_running(dev)) {
- mt7915_mcu_set_pm(dev, 0, 1);
+ mt76_connac_mcu_set_pm(&dev->mt76, 0, 1);
mt7915_mcu_set_mac(dev, 0, false, false);
}
@@ -216,7 +217,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
}
mvif->mt76.omac_idx = idx;
mvif->phy = phy;
- mvif->mt76.band_idx = ext_phy;
+ mvif->mt76.band_idx = phy->band_idx;
mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
if (ext_phy)
@@ -234,7 +235,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
INIT_LIST_HEAD(&mvif->sta.rc_list);
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.ext_phy = mvif->mt76.band_idx;
+ mvif->sta.wcid.ext_phy = ext_phy;
mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
mt76_packet_id_init(&mvif->sta.wcid);
@@ -256,6 +257,9 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
mt7915_init_bitrate_mask(vif);
memset(&mvif->cap, -1, sizeof(mvif->cap));
+ mt7915_mcu_add_bss_info(phy, vif, true);
+ mt7915_mcu_add_sta(dev, vif, NULL, true);
+
out:
mutex_unlock(&dev->mt76.mutex);
@@ -298,25 +302,6 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
mt76_packet_id_flush(&dev->mt76, &msta->wcid);
}
-static void mt7915_init_dfs_state(struct mt7915_phy *phy)
-{
- struct mt76_phy *mphy = phy->mt76;
- struct ieee80211_hw *hw = mphy->hw;
- struct cfg80211_chan_def *chandef = &hw->conf.chandef;
-
- if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
- return;
-
- if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
- return;
-
- if (mphy->chandef.chan->center_freq == chandef->chan->center_freq &&
- mphy->chandef.width == chandef->width)
- return;
-
- phy->dfs_state = -1;
-}
-
int mt7915_set_channel(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
@@ -327,7 +312,6 @@ int mt7915_set_channel(struct mt7915_phy *phy)
mutex_lock(&dev->mt76.mutex);
set_bit(MT76_RESET, &phy->mt76->state);
- mt7915_init_dfs_state(phy);
mt76_set_channel(phy->mt76);
if (dev->flash_mode) {
@@ -366,6 +350,7 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv :
&mvif->sta;
@@ -405,6 +390,11 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mutex_lock(&dev->mt76.mutex);
+ if (cmd == SET_KEY && !sta && !mvif->mt76.cipher) {
+ mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ mt7915_mcu_add_bss_info(phy, vif, true);
+ }
+
if (cmd == SET_KEY)
*wcid_keyidx = idx;
else if (idx == *wcid_keyidx)
@@ -415,8 +405,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
- err = mt7915_mcu_add_key(dev, vif, msta, key, cmd);
-
+ err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ key, MCU_EXT_CMD(STA_REC_UPDATE),
+ &msta->wcid, cmd);
out:
mutex_unlock(&dev->mt76.mutex);
@@ -498,11 +489,10 @@ static int
mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
/* no need to update right away, we'll get BSS_CHANGED_QOS */
- queue = mt7915_lmac_mapping(dev, queue);
+ queue = mt76_connac_lmac_mapping(queue);
mvif->queue_params[queue] = *params;
return 0;
@@ -664,6 +654,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ bool ext_phy = mvif->phy != &dev->phy;
int ret, idx;
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
@@ -675,7 +666,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
- msta->wcid.ext_phy = mvif->mt76.band_idx;
+ msta->wcid.ext_phy = ext_phy;
msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
msta->jiffies = jiffies;
@@ -746,7 +737,7 @@ static int mt7915_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
int ret;
mutex_lock(&dev->mt76.mutex);
- ret = mt7915_mcu_set_rts_thresh(phy, val);
+ ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, phy != &dev->phy);
mutex_unlock(&dev->mt76.mutex);
return ret;
@@ -861,8 +852,12 @@ u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif)
n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
: mvif->mt76.omac_idx;
/* TSF software read */
- mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
- MT_LPON_TCR_SW_READ);
+ if (is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_READ);
+ else
+ mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_READ);
tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band));
tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band));
@@ -904,8 +899,12 @@ mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
/* TSF software overwrite */
- mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
- MT_LPON_TCR_SW_WRITE);
+ if (is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_WRITE);
+ else
+ mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_WRITE);
mutex_unlock(&dev->mt76.mutex);
}
@@ -931,8 +930,12 @@ mt7915_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
/* TSF software adjust*/
- mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
- MT_LPON_TCR_SW_ADJUST);
+ if (is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_ADJUST);
+ else
+ mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_ADJUST);
mutex_unlock(&dev->mt76.mutex);
}
@@ -967,12 +970,9 @@ mt7915_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
phy->mt76->antenna_mask = tx_ant;
- if (ext_phy) {
- if (dev->chainmask == 0xf)
- tx_ant <<= 2;
- else
- tx_ant <<= 1;
- }
+ if (ext_phy)
+ tx_ant <<= dev->chainshift;
+
phy->mt76->chainmask = tx_ant;
mt76_set_stream_caps(phy->mt76, true);
@@ -994,7 +994,8 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
struct rate_info *txrate = &msta->wcid.rate;
struct rate_info rxrate = {};
- if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
+ if (is_mt7915(&phy->dev->mt76) &&
+ !mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
sinfo->rxrate = rxrate;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
}
@@ -1079,7 +1080,7 @@ static void mt7915_sta_set_4addr(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
- mt7915_mcu_sta_update_hdr_trans(dev, vif, sta);
+ mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
@@ -1095,7 +1096,7 @@ static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
- mt7915_mcu_sta_update_hdr_trans(dev, vif, sta);
+ mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
static const char mt7915_gstrings_stats[][ETH_GSTRING_LEN] = {
@@ -1238,7 +1239,6 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
};
struct mib_stats *mib = &phy->mib;
/* See mt7915_ampdu_stat_read_phy, etc */
- bool ext_phy = phy != &dev->phy;
int i, n, ei = 0;
mutex_lock(&dev->mt76.mutex);
@@ -1255,7 +1255,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
data[ei++] = mib->tx_pkt_ibf_cnt;
/* Tx ampdu stat */
- n = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ n = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < 15 /*ARRAY_SIZE(bound)*/; i++)
data[ei++] = dev->mt76.aggr_stats[i + n];
@@ -1332,6 +1332,55 @@ mt7915_twt_teardown_request(struct ieee80211_hw *hw,
mutex_unlock(&dev->mt76.mutex);
}
+static int
+mt7915_set_radar_background(struct ieee80211_hw *hw,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = phy->dev;
+ int ret = -EINVAL;
+ bool running;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (dev->mt76.region == NL80211_DFS_UNSET)
+ goto out;
+
+ if (dev->rdd2_phy && dev->rdd2_phy != phy) {
+ /* rdd2 is already locked */
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* rdd2 already configured on a radar channel */
+ running = dev->rdd2_phy &&
+ cfg80211_chandef_valid(&dev->rdd2_chandef) &&
+ !!(dev->rdd2_chandef.chan->flags & IEEE80211_CHAN_RADAR);
+
+ if (!chandef || running ||
+ !(chandef->chan->flags & IEEE80211_CHAN_RADAR)) {
+ ret = mt7915_mcu_rdd_background_enable(phy, NULL);
+ if (ret)
+ goto out;
+
+ if (!running)
+ goto update_phy;
+ }
+
+ ret = mt7915_mcu_rdd_background_enable(phy, chandef);
+ if (ret)
+ goto out;
+
+update_phy:
+ dev->rdd2_phy = chandef ? phy : NULL;
+ if (chandef)
+ dev->rdd2_chandef = *chandef;
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
const struct ieee80211_ops mt7915_ops = {
.tx = mt7915_tx,
.start = mt7915_start,
@@ -1378,4 +1427,5 @@ const struct ieee80211_ops mt7915_ops = {
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = mt7915_sta_add_debugfs,
#endif
+ .set_radar_background = mt7915_set_radar_background,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 0911b6f973b5..e7a6f80e7755 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -64,136 +64,31 @@ struct mt7915_fw_region {
u8 reserved1[15];
} __packed;
-#define MCU_PATCH_ADDRESS 0x200000
-
-#define FW_FEATURE_SET_ENCRYPT BIT(0)
-#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
-#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
-
-#define DL_MODE_ENCRYPT BIT(0)
-#define DL_MODE_KEY_IDX GENMASK(2, 1)
-#define DL_MODE_RESET_SEC_IV BIT(3)
-#define DL_MODE_WORKING_PDA_CR4 BIT(4)
-#define DL_MODE_NEED_RSP BIT(31)
-
-#define FW_START_OVERRIDE BIT(0)
-#define FW_START_WORKING_PDA_CR4 BIT(2)
-
-#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
-#define PATCH_SEC_TYPE_INFO 0x2
+#define fw_name(_dev, name, ...) ({ \
+ char *_fw; \
+ switch (mt76_chip(&(_dev)->mt76)) { \
+ case 0x7915: \
+ _fw = MT7915_##name; \
+ break; \
+ case 0x7986: \
+ _fw = MT7986_##name##__VA_ARGS__; \
+ break; \
+ default: \
+ _fw = MT7916_##name; \
+ break; \
+ } \
+ _fw; \
+})
+
+#define fw_name_var(_dev, name) (mt7915_check_adie(dev, false) ? \
+ fw_name(_dev, name) : \
+ fw_name(_dev, name, _MT7975))
-#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
-#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
+#define MCU_PATCH_ADDRESS 0x200000
#define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p)
#define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m)
-static enum mcu_cipher_type
-mt7915_mcu_get_cipher(int cipher)
-{
- switch (cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- return MCU_CIPHER_WEP40;
- case WLAN_CIPHER_SUITE_WEP104:
- return MCU_CIPHER_WEP104;
- case WLAN_CIPHER_SUITE_TKIP:
- return MCU_CIPHER_TKIP;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- return MCU_CIPHER_BIP_CMAC_128;
- case WLAN_CIPHER_SUITE_CCMP:
- return MCU_CIPHER_AES_CCMP;
- case WLAN_CIPHER_SUITE_CCMP_256:
- return MCU_CIPHER_CCMP_256;
- case WLAN_CIPHER_SUITE_GCMP:
- return MCU_CIPHER_GCMP;
- case WLAN_CIPHER_SUITE_GCMP_256:
- return MCU_CIPHER_GCMP_256;
- case WLAN_CIPHER_SUITE_SMS4:
- return MCU_CIPHER_WAPI;
- default:
- return MCU_CIPHER_NONE;
- }
-}
-
-static u8 mt7915_mcu_chan_bw(struct cfg80211_chan_def *chandef)
-{
- static const u8 width_to_bw[] = {
- [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
- [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
- [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
- [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
- [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
- [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
- [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
- [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
- };
-
- if (chandef->width >= ARRAY_SIZE(width_to_bw))
- return 0;
-
- return width_to_bw[chandef->width];
-}
-
-static const struct ieee80211_sta_he_cap *
-mt7915_get_he_phy_cap(struct mt7915_phy *phy, struct ieee80211_vif *vif)
-{
- struct ieee80211_supported_band *sband;
- enum nl80211_band band;
-
- band = phy->mt76->chandef.chan->band;
- sband = phy->mt76->hw->wiphy->bands[band];
-
- return ieee80211_get_he_iftype_cap(sband, vif->type);
-}
-
-static u8
-mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- enum nl80211_band band = mvif->phy->mt76->chandef.chan->band;
- struct ieee80211_sta_ht_cap *ht_cap;
- struct ieee80211_sta_vht_cap *vht_cap;
- const struct ieee80211_sta_he_cap *he_cap;
- u8 mode = 0;
-
- if (sta) {
- ht_cap = &sta->ht_cap;
- vht_cap = &sta->vht_cap;
- he_cap = &sta->he_cap;
- } else {
- struct ieee80211_supported_band *sband;
-
- sband = mvif->phy->mt76->hw->wiphy->bands[band];
-
- ht_cap = &sband->ht_cap;
- vht_cap = &sband->vht_cap;
- he_cap = ieee80211_get_he_iftype_cap(sband, vif->type);
- }
-
- if (band == NL80211_BAND_2GHZ) {
- mode |= PHY_MODE_B | PHY_MODE_G;
-
- if (ht_cap->ht_supported)
- mode |= PHY_MODE_GN;
-
- if (he_cap && he_cap->has_he)
- mode |= PHY_MODE_AX_24G;
- } else if (band == NL80211_BAND_5GHZ) {
- mode |= PHY_MODE_A;
-
- if (ht_cap->ht_supported)
- mode |= PHY_MODE_AN;
-
- if (vht_cap->vht_supported)
- mode |= PHY_MODE_AC;
-
- if (he_cap && he_cap->has_he)
- mode |= PHY_MODE_AX_5G;
- }
-
- return mode;
-}
-
static u8
mt7915_mcu_get_sta_nss(u16 mcs_map)
{
@@ -211,24 +106,13 @@ mt7915_mcu_get_sta_nss(u16 mcs_map)
static void
mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
- const u16 *mask)
+ u16 mcs_map)
{
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct cfg80211_chan_def *chandef = &msta->vif->phy->mt76->chandef;
+ struct mt7915_dev *dev = msta->vif->phy->dev;
+ enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
+ const u16 *mask = msta->vif->bitrate_mask.control[band].he_mcs;
int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
- u16 mcs_map;
-
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_80P80:
- mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80p80);
- break;
- case NL80211_CHAN_WIDTH_160:
- mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
- break;
- default:
- mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
- break;
- }
for (nss = 0; nss < max_nss; nss++) {
int mcs;
@@ -266,8 +150,9 @@ mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
mcs_map &= ~(0x3 << (nss * 2));
mcs_map |= mcs << (nss * 2);
- /* only support 2ss on 160MHz */
- if (nss > 1 && (sta->bandwidth == IEEE80211_STA_RX_BW_160))
+ /* only support 2ss on 160MHz for mt7915 */
+ if (is_mt7915(&dev->mt76) && nss > 1 &&
+ sta->bandwidth == IEEE80211_STA_RX_BW_160)
break;
}
@@ -278,6 +163,8 @@ static void
mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
const u16 *mask)
{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = msta->vif->phy->dev;
u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
u16 mcs;
@@ -299,8 +186,9 @@ mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
vht_mcs[nss] = cpu_to_le16(mcs & mask[nss]);
- /* only support 2ss on 160MHz */
- if (nss > 1 && (sta->bandwidth == IEEE80211_STA_RX_BW_160))
+ /* only support 2ss on 160MHz for mt7915 */
+ if (is_mt7915(&dev->mt76) && nss > 1 &&
+ sta->bandwidth == IEEE80211_STA_RX_BW_160)
break;
}
}
@@ -446,7 +334,7 @@ mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb)
c = (struct mt7915_mcu_csa_notify *)skb->data;
- if (c->band_idx && dev->mt76.phy2)
+ if ((c->band_idx && !dev->phy.band_idx) && dev->mt76.phy2)
mphy = dev->mt76.phy2;
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
@@ -465,7 +353,7 @@ mt7915_mcu_rx_thermal_notify(struct mt7915_dev *dev, struct sk_buff *skb)
if (t->ctrl.ctrl_id != THERMAL_PROTECT_ENABLE)
return;
- if (t->ctrl.band_idx && dev->mt76.phy2)
+ if ((t->ctrl.band_idx && !dev->phy.band_idx) && dev->mt76.phy2)
mphy = dev->mt76.phy2;
phy = (struct mt7915_phy *)mphy->priv;
@@ -480,10 +368,15 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
r = (struct mt7915_mcu_rdd_report *)skb->data;
- if (r->band_idx && dev->mt76.phy2)
+ if ((r->band_idx && !dev->phy.band_idx) && dev->mt76.phy2)
mphy = dev->mt76.phy2;
- ieee80211_radar_detected(mphy->hw);
+ if (r->band_idx == MT_RX_SEL2)
+ cfg80211_background_radar_event(mphy->hw->wiphy,
+ &dev->rdd2_chandef,
+ GFP_ATOMIC);
+ else
+ ieee80211_radar_detected(mphy->hw);
dev->hw_pattern++;
}
@@ -493,9 +386,13 @@ mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb)
struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
const char *data = (char *)&rxd[1];
const char *type;
+ int len = skb->len - sizeof(*rxd);
switch (rxd->s2d_index) {
case 0:
+ if (mt7915_debugfs_rx_log(dev, data, len))
+ return;
+
type = "WM";
break;
case 2:
@@ -506,8 +403,7 @@ mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb)
break;
}
- wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type,
- (int)(skb->len - sizeof(*rxd)), data);
+ wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type, len, data);
}
static void
@@ -520,6 +416,22 @@ mt7915_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
}
static void
+mt7915_mcu_rx_bcc_notify(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7915_mcu_bcc_notify *b;
+
+ b = (struct mt7915_mcu_bcc_notify *)skb->data;
+
+ if ((b->band_idx && !dev->phy.band_idx) && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ ieee80211_iterate_active_interfaces_atomic(mphy->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7915_mcu_cca_finish, mphy->hw);
+}
+
+static void
mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
{
struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
@@ -538,9 +450,7 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
mt7915_mcu_rx_log_message(dev, skb);
break;
case MCU_EXT_EVENT_BCC_NOTIFY:
- ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw,
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7915_mcu_cca_finish, dev);
+ mt7915_mcu_rx_bcc_notify(dev, skb);
break;
default:
break;
@@ -577,88 +487,6 @@ void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
mt76_mcu_rx_event(&dev->mt76, skb);
}
-static struct sk_buff *
-mt7915_mcu_alloc_sta_req(struct mt7915_dev *dev, struct mt7915_vif *mvif,
- struct mt7915_sta *msta, int len)
-{
- struct sta_req_hdr hdr = {
- .bss_idx = mvif->mt76.idx,
- .wlan_idx_lo = msta ? to_wcid_lo(msta->wcid.idx) : 0,
- .wlan_idx_hi = msta ? to_wcid_hi(msta->wcid.idx) : 0,
- .muar_idx = msta && msta->wcid.sta ? mvif->mt76.omac_idx : 0xe,
- .is_tlv_append = 1,
- };
- struct sk_buff *skb;
-
- skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len);
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- skb_put_data(skb, &hdr, sizeof(hdr));
-
- return skb;
-}
-
-static struct wtbl_req_hdr *
-mt7915_mcu_alloc_wtbl_req(struct mt7915_dev *dev, struct mt7915_sta *msta,
- int cmd, void *sta_wtbl, struct sk_buff **skb)
-{
- struct tlv *sta_hdr = sta_wtbl;
- struct wtbl_req_hdr hdr = {
- .wlan_idx_lo = to_wcid_lo(msta->wcid.idx),
- .wlan_idx_hi = to_wcid_hi(msta->wcid.idx),
- .operation = cmd,
- };
- struct sk_buff *nskb = *skb;
-
- if (!nskb) {
- nskb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- MT76_CONNAC_WTBL_UPDATE_MAX_SIZE);
- if (!nskb)
- return ERR_PTR(-ENOMEM);
-
- *skb = nskb;
- }
-
- if (sta_hdr)
- le16_add_cpu(&sta_hdr->len, sizeof(hdr));
-
- return skb_put_data(nskb, &hdr, sizeof(hdr));
-}
-
-static struct tlv *
-mt7915_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
- void *sta_ntlv, void *sta_wtbl)
-{
- struct sta_ntlv_hdr *ntlv_hdr = sta_ntlv;
- struct tlv *sta_hdr = sta_wtbl;
- struct tlv *ptlv, tlv = {
- .tag = cpu_to_le16(tag),
- .len = cpu_to_le16(len),
- };
- u16 ntlv;
-
- ptlv = skb_put(skb, len);
- memcpy(ptlv, &tlv, sizeof(tlv));
-
- ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
- ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
-
- if (sta_hdr) {
- u16 size = le16_to_cpu(sta_hdr->len);
-
- sta_hdr->len = cpu_to_le16(size + len);
- }
-
- return ptlv;
-}
-
-static struct tlv *
-mt7915_mcu_add_tlv(struct sk_buff *skb, int tag, int len)
-{
- return mt7915_mcu_add_nested_tlv(skb, tag, len, skb->data, NULL);
-}
-
static struct tlv *
mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
__le16 *sub_ntlv, __le16 *len)
@@ -678,105 +506,6 @@ mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
}
/** bss info **/
-static int
-mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct mt7915_phy *phy, bool enable)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct bss_info_basic *bss;
- u16 wlan_idx = mvif->sta.wcid.idx;
- u32 type = NETWORK_INFRA;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
-
- switch (vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_MONITOR:
- break;
- case NL80211_IFTYPE_STATION:
- /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
- if (enable) {
- struct ieee80211_sta *sta;
- struct mt7915_sta *msta;
-
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
- if (!sta) {
- rcu_read_unlock();
- return -EINVAL;
- }
-
- msta = (struct mt7915_sta *)sta->drv_priv;
- wlan_idx = msta->wcid.idx;
- rcu_read_unlock();
- }
- break;
- case NL80211_IFTYPE_ADHOC:
- type = NETWORK_IBSS;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- bss = (struct bss_info_basic *)tlv;
- bss->network_type = cpu_to_le32(type);
- bss->bmc_wcid_lo = to_wcid_lo(wlan_idx);
- bss->bmc_wcid_hi = to_wcid_hi(wlan_idx);
- bss->wmm_idx = mvif->mt76.wmm_idx;
- bss->active = enable;
-
- if (vif->type != NL80211_IFTYPE_MONITOR) {
- memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
- bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
- bss->dtim_period = vif->bss_conf.dtim_period;
- bss->phy_mode = mt7915_get_phy_mode(vif, NULL);
- } else {
- memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN);
- }
-
- return 0;
-}
-
-static void
-mt7915_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct bss_info_omac *omac;
- struct tlv *tlv;
- u32 type = 0;
- u8 idx;
-
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
-
- switch (vif->type) {
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- type = CONNECTION_INFRA_AP;
- break;
- case NL80211_IFTYPE_STATION:
- type = CONNECTION_INFRA_STA;
- break;
- case NL80211_IFTYPE_ADHOC:
- type = CONNECTION_IBSS_ADHOC;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- omac = (struct bss_info_omac *)tlv;
- idx = mvif->mt76.omac_idx > EXT_BSSID_START ? HW_BSSID_0
- : mvif->mt76.omac_idx;
- omac->conn_type = cpu_to_le32(type);
- omac->omac_idx = mvif->mt76.omac_idx;
- omac->band_idx = mvif->mt76.band_idx;
- omac->hw_bss_idx = idx;
-}
-
struct mt7915_he_obss_narrow_bw_ru_data {
bool tolerated;
};
@@ -829,12 +558,12 @@ mt7915_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct tlv *tlv;
int freq1 = chandef->center_freq1;
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_RF_CH, sizeof(*ch));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_RF_CH, sizeof(*ch));
ch = (struct bss_info_rf_ch *)tlv;
ch->pri_ch = chandef->chan->hw_value;
ch->center_ch0 = ieee80211_frequency_to_channel(freq1);
- ch->bw = mt7915_mcu_chan_bw(chandef);
+ ch->bw = mt76_connac_chan_bw(chandef);
if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
int freq2 = chandef->center_freq2;
@@ -843,12 +572,7 @@ mt7915_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
}
if (vif->bss_conf.he_support && vif->type == NL80211_IFTYPE_STATION) {
- struct mt7915_dev *dev = phy->dev;
- struct mt76_phy *mphy = &dev->mt76.phy;
- bool ext_phy = phy != &dev->phy;
-
- if (ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ struct mt76_phy *mphy = phy->mt76;
ch->he_ru26_block =
mt7915_check_he_obss_narrow_bw_ru(mphy->hw, vif);
@@ -866,7 +590,7 @@ mt7915_mcu_bss_ra_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct bss_info_ra *ra;
struct tlv *tlv;
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_RA, sizeof(*ra));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_RA, sizeof(*ra));
ra = (struct bss_info_ra *)tlv;
ra->op_mode = vif->type == NL80211_IFTYPE_AP;
@@ -894,9 +618,9 @@ mt7915_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct bss_info_he *he;
struct tlv *tlv;
- cap = mt7915_get_he_phy_cap(phy, vif);
+ cap = mt76_connac_get_he_phy_cap(phy->mt76, vif);
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HE_BASIC, sizeof(*he));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_HE_BASIC, sizeof(*he));
he = (struct bss_info_he *)tlv;
he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext;
@@ -920,7 +644,7 @@ mt7915_mcu_bss_hw_amsdu_tlv(struct sk_buff *skb)
struct bss_info_hw_amsdu *amsdu;
struct tlv *tlv;
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HW_AMSDU, sizeof(*amsdu));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_HW_AMSDU, sizeof(*amsdu));
amsdu = (struct bss_info_hw_amsdu *)tlv;
amsdu->cmp_bitmap_0 = cpu_to_le32(TXD_CMP_MAP1);
@@ -930,26 +654,6 @@ mt7915_mcu_bss_hw_amsdu_tlv(struct sk_buff *skb)
}
static void
-mt7915_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7915_vif *mvif)
-{
-/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
-#define BCN_TX_ESTIMATE_TIME (4096 + 20)
- struct bss_info_ext_bss *ext;
- int ext_bss_idx, tsf_offset;
- struct tlv *tlv;
-
- ext_bss_idx = mvif->mt76.omac_idx - EXT_BSSID_START;
- if (ext_bss_idx < 0)
- return;
-
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
-
- ext = (struct bss_info_ext_bss *)tlv;
- tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
- ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
-}
-
-static void
mt7915_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt7915_phy *phy)
{
struct bss_info_bmc_rate *bmc;
@@ -957,7 +661,7 @@ mt7915_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt7915_phy *phy)
enum nl80211_band band = chandef->chan->band;
struct tlv *tlv;
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BMC_RATE, sizeof(*bmc));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BMC_RATE, sizeof(*bmc));
bmc = (struct bss_info_bmc_rate *)tlv;
if (band == NL80211_BAND_2GHZ) {
@@ -1010,6 +714,7 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
struct ieee80211_vif *vif, int enable)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = phy->dev;
struct sk_buff *skb;
if (mvif->mt76.omac_idx >= REPEATER_BSSID_START) {
@@ -1017,16 +722,17 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
mt7915_mcu_muar_config(phy, vif, true, enable);
}
- skb = mt7915_mcu_alloc_sta_req(phy->dev, mvif, NULL,
- MT7915_BSS_UPDATE_MAX_SIZE);
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL,
+ MT7915_BSS_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* bss_omac must be first */
if (enable)
- mt7915_mcu_bss_omac_tlv(skb, vif);
+ mt76_connac_mcu_bss_omac_tlv(skb, vif);
- mt7915_mcu_bss_basic_tlv(skb, vif, phy, enable);
+ mt76_connac_mcu_bss_basic_tlv(skb, vif, NULL, phy->mt76,
+ mvif->sta.wcid.idx, enable);
if (vif->type == NL80211_IFTYPE_MONITOR)
goto out;
@@ -1042,309 +748,48 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
if (mvif->mt76.omac_idx >= EXT_BSSID_START &&
mvif->mt76.omac_idx < REPEATER_BSSID_START)
- mt7915_mcu_bss_ext_tlv(skb, mvif);
+ mt76_connac_mcu_bss_ext_tlv(skb, &mvif->mt76);
}
out:
- return mt76_mcu_skb_send_msg(&phy->dev->mt76, skb,
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
}
/** starec & wtbl **/
-static int
-mt7915_mcu_sta_key_tlv(struct mt7915_sta *msta, struct sk_buff *skb,
- struct ieee80211_key_conf *key, enum set_key_cmd cmd)
-{
- struct mt7915_sta_key_conf *bip = &msta->bip;
- struct sta_rec_sec *sec;
- struct tlv *tlv;
- u32 len = sizeof(*sec);
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
-
- sec = (struct sta_rec_sec *)tlv;
- sec->add = cmd;
-
- if (cmd == SET_KEY) {
- struct sec_key *sec_key;
- u8 cipher;
-
- cipher = mt7915_mcu_get_cipher(key->cipher);
- if (cipher == MCU_CIPHER_NONE)
- return -EOPNOTSUPP;
-
- sec_key = &sec->key[0];
- sec_key->cipher_len = sizeof(*sec_key);
-
- if (cipher == MCU_CIPHER_BIP_CMAC_128) {
- sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
- sec_key->key_id = bip->keyidx;
- sec_key->key_len = 16;
- memcpy(sec_key->key, bip->key, 16);
-
- sec_key = &sec->key[1];
- sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
- sec_key->cipher_len = sizeof(*sec_key);
- sec_key->key_len = 16;
- memcpy(sec_key->key, key->key, 16);
-
- sec->n_cipher = 2;
- } else {
- sec_key->cipher_id = cipher;
- sec_key->key_id = key->keyidx;
- sec_key->key_len = key->keylen;
- memcpy(sec_key->key, key->key, key->keylen);
-
- if (cipher == MCU_CIPHER_TKIP) {
- /* Rx/Tx MIC keys are swapped */
- memcpy(sec_key->key + 16, key->key + 24, 8);
- memcpy(sec_key->key + 24, key->key + 16, 8);
- }
-
- /* store key_conf for BIP batch update */
- if (cipher == MCU_CIPHER_AES_CCMP) {
- memcpy(bip->key, key->key, key->keylen);
- bip->keyidx = key->keyidx;
- }
-
- len -= sizeof(*sec_key);
- sec->n_cipher = 1;
- }
- } else {
- len -= sizeof(sec->key);
- sec->n_cipher = 0;
- }
- sec->len = cpu_to_le16(len);
-
- return 0;
-}
-
-int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct mt7915_sta *msta, struct ieee80211_key_conf *key,
- enum set_key_cmd cmd)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct sk_buff *skb;
- int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_sec);
- int ret;
-
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- ret = mt7915_mcu_sta_key_tlv(msta, skb, key, cmd);
- if (ret)
- return ret;
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
-}
-
-static void
-mt7915_mcu_sta_ba_tlv(struct sk_buff *skb,
- struct ieee80211_ampdu_params *params,
- bool enable, bool tx)
-{
- struct sta_rec_ba *ba;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba));
-
- ba = (struct sta_rec_ba *)tlv;
- ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT;
- ba->winsize = cpu_to_le16(params->buf_size);
- ba->ssn = cpu_to_le16(params->ssn);
- ba->ba_en = enable << params->tid;
- ba->amsdu = params->amsdu;
- ba->tid = params->tid;
-}
-
-static void
-mt7915_mcu_wtbl_ba_tlv(struct sk_buff *skb,
- struct ieee80211_ampdu_params *params,
- bool enable, bool tx, void *sta_wtbl,
- void *wtbl_tlv)
-{
- struct wtbl_ba *ba;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_BA, sizeof(*ba),
- wtbl_tlv, sta_wtbl);
-
- ba = (struct wtbl_ba *)tlv;
- ba->tid = params->tid;
-
- if (tx) {
- ba->ba_type = MT_BA_TYPE_ORIGINATOR;
- ba->sn = enable ? cpu_to_le16(params->ssn) : 0;
- ba->ba_en = enable;
- } else {
- memcpy(ba->peer_addr, params->sta->addr, ETH_ALEN);
- ba->ba_type = MT_BA_TYPE_RECIPIENT;
- ba->rst_ba_tid = params->tid;
- ba->rst_ba_sel = RST_BA_MAC_TID_MATCH;
- ba->rst_ba_sb = 1;
- }
-
- if (enable)
- ba->ba_winsize = cpu_to_le16(params->buf_size);
-}
-
-static int
-mt7915_mcu_sta_ba(struct mt7915_dev *dev,
- struct ieee80211_ampdu_params *params,
- bool enable, bool tx)
+int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
{
struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv;
struct mt7915_vif *mvif = msta->vif;
- struct wtbl_req_hdr *wtbl_hdr;
- struct tlv *sta_wtbl;
- struct sk_buff *skb;
- int ret;
- if (enable && tx && !params->amsdu)
+ if (enable && !params->amsdu)
msta->wcid.amsdu = false;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
-
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
- &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
-
- ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
- if (ret)
- return ret;
-
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- mt7915_mcu_sta_ba_tlv(skb, params, enable, tx);
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
-}
-
-int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
- struct ieee80211_ampdu_params *params,
- bool enable)
-{
- return mt7915_mcu_sta_ba(dev, params, enable, true);
+ return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ MCU_EXT_CMD(STA_REC_UPDATE),
+ enable, true);
}
int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool enable)
{
- return mt7915_mcu_sta_ba(dev, params, enable, false);
-}
-
-static void
-mt7915_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, void *sta_wtbl,
- void *wtbl_tlv)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct wtbl_generic *generic;
- struct wtbl_rx *rx;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_GENERIC, sizeof(*generic),
- wtbl_tlv, sta_wtbl);
-
- generic = (struct wtbl_generic *)tlv;
-
- if (sta) {
- memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
- generic->partial_aid = cpu_to_le16(sta->aid);
- generic->muar_idx = mvif->mt76.omac_idx;
- generic->qos = sta->wme;
- } else {
- /* use BSSID in station mode */
- if (vif->type == NL80211_IFTYPE_STATION)
- memcpy(generic->peer_addr, vif->bss_conf.bssid,
- ETH_ALEN);
- else
- eth_broadcast_addr(generic->peer_addr);
-
- generic->muar_idx = 0xe;
- }
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_RX, sizeof(*rx),
- wtbl_tlv, sta_wtbl);
-
- rx = (struct wtbl_rx *)tlv;
- rx->rca1 = sta ? vif->type != NL80211_IFTYPE_AP : 1;
- rx->rca2 = 1;
- rx->rv = 1;
-}
-
-static void
-mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable)
-{
-#define EXTRA_INFO_VER BIT(0)
-#define EXTRA_INFO_NEW BIT(1)
- struct sta_rec_basic *basic;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_BASIC, sizeof(*basic));
-
- basic = (struct sta_rec_basic *)tlv;
- basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
-
- if (enable) {
- basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
- basic->conn_state = CONN_STATE_PORT_SECURE;
- } else {
- basic->conn_state = CONN_STATE_DISCONNECT;
- }
-
- if (!sta) {
- basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
- eth_broadcast_addr(basic->peer_addr);
- return;
- }
-
- switch (vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- basic->conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
- break;
- case NL80211_IFTYPE_STATION:
- basic->conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
- break;
- case NL80211_IFTYPE_ADHOC:
- basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
- break;
- default:
- WARN_ON(1);
- break;
- }
+ struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv;
+ struct mt7915_vif *mvif = msta->vif;
- memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
- basic->aid = cpu_to_le16(sta->aid);
- basic->qos = sta->wme;
+ return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ MCU_EXT_CMD(STA_REC_UPDATE),
+ enable, false);
}
static void
mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
struct ieee80211_vif *vif)
{
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct ieee80211_he_cap_elem *elem = &sta->he_cap.he_cap_elem;
- enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
- const u16 *mcs_mask = msta->vif->bitrate_mask.control[band].he_mcs;
+ struct ieee80211_he_mcs_nss_supp mcs_map;
struct sta_rec_he *he;
struct tlv *tlv;
u32 cap = 0;
@@ -1352,7 +797,7 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
if (!sta->he_cap.has_he)
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he));
he = (struct sta_rec_he *)tlv;
@@ -1376,8 +821,9 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G))
cap |= STA_REC_HE_CAP_BW20_RU242_SUPPORT;
- if (mvif->cap.ldpc && (elem->phy_cap_info[1] &
- IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+ if (mvif->cap.he_ldpc &&
+ (elem->phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
cap |= STA_REC_HE_CAP_LDPC;
if (elem->phy_cap_info[1] &
@@ -1434,22 +880,23 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
he->he_cap = cpu_to_le32(cap);
+ mcs_map = sta->he_cap.he_mcs_nss_supp;
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (elem->phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
mt7915_mcu_set_sta_he_mcs(sta,
&he->max_nss_mcs[CMD_HE_MCS_BW8080],
- mcs_mask);
+ le16_to_cpu(mcs_map.rx_mcs_80p80));
mt7915_mcu_set_sta_he_mcs(sta,
&he->max_nss_mcs[CMD_HE_MCS_BW160],
- mcs_mask);
+ le16_to_cpu(mcs_map.rx_mcs_160));
fallthrough;
default:
mt7915_mcu_set_sta_he_mcs(sta,
&he->max_nss_mcs[CMD_HE_MCS_BW80],
- mcs_mask);
+ le16_to_cpu(mcs_map.rx_mcs_80));
break;
}
@@ -1480,38 +927,6 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
}
static void
-mt7915_mcu_sta_uapsd_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- struct ieee80211_vif *vif)
-{
- struct sta_rec_uapsd *uapsd;
- struct tlv *tlv;
-
- if (vif->type != NL80211_IFTYPE_AP || !sta->wme)
- return;
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_APPS, sizeof(*uapsd));
- uapsd = (struct sta_rec_uapsd *)tlv;
-
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) {
- uapsd->dac_map |= BIT(3);
- uapsd->tac_map |= BIT(3);
- }
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) {
- uapsd->dac_map |= BIT(2);
- uapsd->tac_map |= BIT(2);
- }
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) {
- uapsd->dac_map |= BIT(1);
- uapsd->tac_map |= BIT(1);
- }
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) {
- uapsd->dac_map |= BIT(0);
- uapsd->tac_map |= BIT(0);
- }
- uapsd->max_sp = sta->max_sp;
-}
-
-static void
mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
struct ieee80211_vif *vif)
{
@@ -1524,19 +939,19 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
vif->type != NL80211_IFTYPE_AP)
return;
- if (!sta->vht_cap.vht_supported)
- return;
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru));
muru = (struct sta_rec_muru *)tlv;
muru->cfg.mimo_dl_en = mvif->cap.he_mu_ebfer ||
mvif->cap.vht_mu_ebfer ||
mvif->cap.vht_mu_ebfee;
+ muru->cfg.mimo_ul_en = true;
+ muru->cfg.ofdma_dl_en = true;
- muru->mimo_dl.vht_mu_bfee =
- !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+ if (sta->vht_cap.vht_supported)
+ muru->mimo_dl.vht_mu_bfee =
+ !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
if (!sta->he_cap.has_he)
return;
@@ -1544,13 +959,11 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
muru->mimo_dl.partial_bw_dl_mimo =
HE_PHY(CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO, elem->phy_cap_info[6]);
- muru->cfg.mimo_ul_en = true;
muru->mimo_ul.full_ul_mimo =
HE_PHY(CAP2_UL_MU_FULL_MU_MIMO, elem->phy_cap_info[2]);
muru->mimo_ul.partial_ul_mimo =
HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]);
- muru->cfg.ofdma_dl_en = true;
muru->ofdma_dl.punc_pream_rx =
HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]);
muru->ofdma_dl.he_20m_in_40m_2g =
@@ -1574,7 +987,10 @@ mt7915_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
struct sta_rec_ht *ht;
struct tlv *tlv;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
+ if (!sta->ht_cap.ht_supported)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
ht = (struct sta_rec_ht *)tlv;
ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
@@ -1589,7 +1005,7 @@ mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
if (!sta->vht_cap.vht_supported)
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
vht = (struct sta_rec_vht *)tlv;
vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
@@ -1598,8 +1014,8 @@ mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
}
static void
-mt7915_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+mt7915_mcu_sta_amsdu_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct sta_rec_amsdu *amsdu;
@@ -1612,96 +1028,27 @@ mt7915_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
if (!sta->max_amsdu_len)
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
amsdu = (struct sta_rec_amsdu *)tlv;
amsdu->max_amsdu_num = 8;
amsdu->amsdu_en = true;
- amsdu->max_mpdu_size = sta->max_amsdu_len >=
- IEEE80211_MAX_MPDU_LEN_VHT_7991;
msta->wcid.amsdu = true;
-}
-
-static void
-mt7915_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- void *sta_wtbl, void *wtbl_tlv)
-{
- struct wtbl_smps *smps;
- struct tlv *tlv;
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
- wtbl_tlv, sta_wtbl);
- smps = (struct wtbl_smps *)tlv;
- smps->smps = (sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
-}
-
-static void
-mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, void *sta_wtbl,
- void *wtbl_tlv)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct wtbl_ht *ht = NULL;
- struct tlv *tlv;
-
- /* wtbl ht */
- if (sta->ht_cap.ht_supported) {
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
- wtbl_tlv, sta_wtbl);
- ht = (struct wtbl_ht *)tlv;
- ht->ldpc = mvif->cap.ldpc &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
- ht->af = sta->ht_cap.ampdu_factor;
- ht->mm = sta->ht_cap.ampdu_density;
- ht->ht = true;
- }
-
- /* wtbl vht */
- if (sta->vht_cap.vht_supported) {
- struct wtbl_vht *vht;
- u8 af;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht),
- wtbl_tlv, sta_wtbl);
- vht = (struct wtbl_vht *)tlv;
- vht->ldpc = mvif->cap.ldpc &&
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
- vht->vht = true;
-
- af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
- sta->vht_cap.cap);
- if (ht)
- ht->af = max_t(u8, ht->af, af);
- }
-
- mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv);
-}
-
-static void
-mt7915_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- void *sta_wtbl, void *wtbl_tlv)
-{
- struct mt7915_sta *msta;
- struct wtbl_hdr_trans *htr = NULL;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_HDR_TRANS, sizeof(*htr),
- wtbl_tlv, sta_wtbl);
- htr = (struct wtbl_hdr_trans *)tlv;
- htr->no_rx_trans = true;
- if (vif->type == NL80211_IFTYPE_STATION)
- htr->to_ds = true;
- else
- htr->from_ds = true;
-
- if (!sta)
+ switch (sta->max_amsdu_len) {
+ case IEEE80211_MAX_MPDU_LEN_VHT_11454:
+ if (!is_mt7915(&dev->mt76)) {
+ amsdu->max_mpdu_size =
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+ return;
+ }
+ fallthrough;
+ case IEEE80211_MAX_MPDU_LEN_HT_7935:
+ case IEEE80211_MAX_MPDU_LEN_VHT_7991:
+ amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
+ return;
+ default:
+ amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
return;
-
- msta = (struct mt7915_sta *)sta->drv_priv;
- htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
- if (test_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags)) {
- htr->to_ds = true;
- htr->from_ds = true;
}
}
@@ -1712,48 +1059,30 @@ mt7915_mcu_sta_wtbl_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_sta *msta;
struct wtbl_req_hdr *wtbl_hdr;
+ struct mt76_wcid *wcid;
struct tlv *tlv;
msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
+ wcid = sta ? &msta->wcid : NULL;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
- tlv, &skb);
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ WTBL_RESET_AND_SET, tlv,
+ &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
- mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, tlv, wtbl_hdr);
- mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, tlv, wtbl_hdr);
-
+ mt76_connac_mcu_wtbl_generic_tlv(&dev->mt76, skb, vif, sta, tlv,
+ wtbl_hdr);
+ mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, tlv, wtbl_hdr);
if (sta)
- mt7915_mcu_wtbl_ht_tlv(skb, vif, sta, tlv, wtbl_hdr);
+ mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, skb, sta, tlv,
+ wtbl_hdr, mvif->cap.ht_ldpc,
+ mvif->cap.vht_ldpc);
return 0;
}
-int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct wtbl_req_hdr *wtbl_hdr;
- struct sk_buff *skb;
-
- skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- MT76_CONNAC_WTBL_UPDATE_MAX_SIZE);
- if (!skb)
- return -ENOMEM;
-
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, NULL, wtbl_hdr);
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD(WTBL_UPDATE),
- true);
-}
-
static inline bool
mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool bfee)
@@ -1870,7 +1199,8 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
{
struct ieee80211_sta_he_cap *pc = &sta->he_cap;
struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
- const struct ieee80211_sta_he_cap *vc = mt7915_get_he_phy_cap(phy, vif);
+ const struct ieee80211_sta_he_cap *vc =
+ mt76_connac_get_he_phy_cap(phy->mt76, vif);
const struct ieee80211_he_cap_elem *ve = &vc->he_cap_elem;
u16 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80);
u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
@@ -1928,8 +1258,7 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct mt7915_phy *phy =
- mvif->mt76.band_idx ? mt7915_ext_phy(dev) : &dev->phy;
+ struct mt7915_phy *phy = mvif->phy;
int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
@@ -1941,11 +1270,14 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
};
bool ebf;
+ if (!(sta->ht_cap.ht_supported || sta->he_cap.has_he))
+ return;
+
ebf = mt7915_is_ebf_supported(phy, vif, sta, false);
if (!ebf && !dev->ibf)
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
bf = (struct sta_rec_bf *)tlv;
/* he: eBF only, in accordance with spec
@@ -1995,17 +1327,19 @@ mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct mt7915_phy *phy =
- mvif->mt76.band_idx ? mt7915_ext_phy(dev) : &dev->phy;
+ struct mt7915_phy *phy = mvif->phy;
int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bfee *bfee;
struct tlv *tlv;
u8 nrow = 0;
+ if (!(sta->vht_cap.vht_supported || sta->he_cap.has_he))
+ return;
+
if (!mt7915_is_ebf_supported(phy, vif, sta, true))
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
bfee = (struct sta_rec_bfee *)tlv;
if (sta->he_cap.has_he) {
@@ -2050,13 +1384,13 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
struct sta_rec_ra_fixed *ra;
struct sk_buff *skb;
struct tlv *tlv;
- int len = sizeof(struct sta_req_hdr) + sizeof(*ra);
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra));
ra = (struct sta_rec_ra_fixed *)tlv;
switch (field) {
@@ -2091,19 +1425,19 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct sk_buff *skb;
int ret;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
- sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
-
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
- &skb);
+ sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
+ sizeof(struct tlv));
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ WTBL_SET, sta_wtbl, &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
- mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
+ mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(STA_REC_UPDATE), true);
@@ -2134,9 +1468,12 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
phy.sgi |= gi << (i << (_he)); \
phy.he_ltf |= mask->control[band].he_ltf << (i << (_he));\
} \
- for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) \
- nrates += hweight16(mask->control[band]._mcs[i]); \
- phy.mcs = ffs(mask->control[band]._mcs[0]) - 1; \
+ for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) { \
+ if (!mask->control[band]._mcs[i]) \
+ continue; \
+ nrates += hweight16(mask->control[band]._mcs[i]); \
+ phy.mcs = ffs(mask->control[band]._mcs[i]) - 1; \
+ } \
} while (0)
if (sta->he_cap.has_he) {
@@ -2204,7 +1541,8 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
+ struct mt76_phy *mphy = mvif->phy->mt76;
+ struct cfg80211_chan_def *chandef = &mphy->chandef;
struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask;
enum nl80211_band band = chandef->chan->band;
struct sta_rec_ra *ra;
@@ -2212,15 +1550,16 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
u32 supp_rate = sta->supp_rates[band];
u32 cap = sta->wme ? STA_CAP_WMM : 0;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
ra = (struct sta_rec_ra *)tlv;
ra->valid = true;
ra->auto_rate = true;
- ra->phy_mode = mt7915_get_phy_mode(vif, sta);
+ ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta);
ra->channel = chandef->chan->hw_value;
ra->bw = sta->bandwidth;
ra->phy.bw = sta->bandwidth;
+ ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->smps_mode);
if (supp_rate) {
supp_rate &= mask->control[band].legacy;
@@ -2254,7 +1593,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
cap |= STA_CAP_TX_STBC;
if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
cap |= STA_CAP_RX_STBC;
- if (mvif->cap.ldpc &&
+ if (mvif->cap.ht_ldpc &&
(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
cap |= STA_CAP_LDPC;
@@ -2280,7 +1619,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
cap |= STA_CAP_VHT_TX_STBC;
if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
cap |= STA_CAP_VHT_RX_STBC;
- if (mvif->cap.ldpc &&
+ if (mvif->cap.vht_ldpc &&
(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
cap |= STA_CAP_VHT_LDPC;
@@ -2291,6 +1630,10 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
if (sta->he_cap.has_he) {
ra->supp_mode |= MODE_HE;
cap |= STA_CAP_HE;
+
+ if (sta->he_6ghz_capa.capa)
+ ra->af = le16_get_bits(sta->he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
}
ra->sta_cap = cpu_to_le32(cap);
@@ -2304,8 +1647,8 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct sk_buff *skb;
int ret;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -2313,7 +1656,7 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
* once dev->rc_work changes the settings driver should also
* update sta_rec_he here.
*/
- if (sta->he_cap.has_he && changed)
+ if (changed)
mt7915_mcu_sta_he_tlv(skb, sta, vif);
/* sta_rec_ra accommodates BW, NSS and only MCS range format
@@ -2371,18 +1714,18 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* starec basic */
- mt7915_mcu_sta_basic_tlv(skb, vif, sta, enable);
+ mt76_connac_mcu_sta_basic_tlv(skb, vif, sta, enable, true);
if (!enable)
goto out;
/* tag order is in accordance with firmware dependency. */
- if (sta && sta->ht_cap.ht_supported) {
+ if (sta) {
/* starec bfer */
mt7915_mcu_sta_bfer_tlv(dev, skb, vif, sta);
/* starec ht */
@@ -2390,16 +1733,18 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
/* starec vht */
mt7915_mcu_sta_vht_tlv(skb, sta);
/* starec uapsd */
- mt7915_mcu_sta_uapsd_tlv(skb, sta, vif);
+ mt76_connac_mcu_sta_uapsd(skb, vif, sta);
}
ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta);
- if (ret)
+ if (ret) {
+ dev_kfree_skb(skb);
return ret;
+ }
- if (sta && sta->ht_cap.ht_supported) {
+ if (sta) {
/* starec amsdu */
- mt7915_mcu_sta_amsdu_tlv(skb, vif, sta);
+ mt7915_mcu_sta_amsdu_tlv(dev, skb, vif, sta);
/* starec he */
mt7915_mcu_sta_he_tlv(skb, sta, vif);
/* starec muru */
@@ -2409,8 +1754,10 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
}
ret = mt7915_mcu_add_group(dev, vif, sta);
- if (ret)
+ if (ret) {
+ dev_kfree_skb(skb);
return ret;
+ }
out:
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(STA_REC_UPDATE), true);
@@ -2479,6 +1826,55 @@ mt7915_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb,
}
static void
+mt7915_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct bss_info_bcn *bcn,
+ struct ieee80211_mutable_offsets *offs)
+{
+ struct bss_info_bcn_mbss *mbss;
+ const struct element *elem;
+ struct tlv *tlv;
+
+ if (!vif->bss_conf.bssid_indicator)
+ return;
+
+ tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_MBSSID,
+ sizeof(*mbss), &bcn->sub_ntlv,
+ &bcn->len);
+
+ mbss = (struct bss_info_bcn_mbss *)tlv;
+ mbss->offset[0] = cpu_to_le16(offs->tim_offset);
+ mbss->bitmap = cpu_to_le32(1);
+
+ for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID,
+ &skb->data[offs->mbssid_off],
+ skb->len - offs->mbssid_off) {
+ const struct element *sub_elem;
+
+ if (elem->datalen < 2)
+ continue;
+
+ for_each_element(sub_elem, elem->data + 1, elem->datalen - 1) {
+ const u8 *data;
+
+ if (sub_elem->id || sub_elem->datalen < 4)
+ continue; /* not a valid BSS profile */
+
+ /* Find WLAN_EID_MULTI_BSSID_IDX
+ * in the merged nontransmitted profile
+ */
+ data = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
+ sub_elem->data,
+ sub_elem->datalen);
+ if (!data || data[1] < 1 || !data[2])
+ continue;
+
+ mbss->offset[data[2]] = cpu_to_le16(data - skb->data);
+ mbss->bitmap |= cpu_to_le32(BIT(data[2]));
+ }
+ }
+}
+
+static void
mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct sk_buff *rskb, struct sk_buff *skb,
struct bss_info_bcn *bcn,
@@ -2540,8 +1936,8 @@ mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif,
len);
if (ie && ie[1] >= sizeof(*ht)) {
ht = (void *)(ie + 2);
- vc->ldpc |= !!(le16_to_cpu(ht->cap_info) &
- IEEE80211_HT_CAP_LDPC_CODING);
+ vc->ht_ldpc = !!(le16_to_cpu(ht->cap_info) &
+ IEEE80211_HT_CAP_LDPC_CODING);
}
ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, mgmt->u.beacon.variable,
@@ -2552,7 +1948,7 @@ mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif,
vht = (void *)(ie + 2);
bc = le32_to_cpu(vht->vht_cap_info);
- vc->ldpc |= !!(bc & IEEE80211_VHT_CAP_RXLDPC);
+ vc->vht_ldpc = !!(bc & IEEE80211_VHT_CAP_RXLDPC);
vc->vht_su_ebfer =
(bc & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) &&
(pc & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
@@ -2571,11 +1967,13 @@ mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif,
mgmt->u.beacon.variable, len);
if (ie && ie[1] >= sizeof(*he) + 1) {
const struct ieee80211_sta_he_cap *pc =
- mt7915_get_he_phy_cap(phy, vif);
+ mt76_connac_get_he_phy_cap(phy->mt76, vif);
const struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
he = (void *)(ie + 3);
+ vc->he_ldpc =
+ HE_PHY(CAP1_LDPC_CODING_IN_PAYLOAD, pe->phy_cap_info[1]);
vc->he_su_ebfer =
HE_PHY(CAP3_SU_BEAMFORMER, he->phy_cap_info[3]) &&
HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]);
@@ -2601,12 +1999,17 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
struct tlv *tlv;
struct bss_info_bcn *bcn;
int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE;
+ bool ext_phy = phy != &dev->phy;
+
+ if (vif->bss_conf.nontransmitted)
+ return 0;
- rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
+ rskb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ NULL, len);
if (IS_ERR(rskb))
return PTR_ERR(rskb);
- tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
+ tlv = mt76_connac_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
bcn = (struct bss_info_bcn *)tlv;
bcn->enable = en;
@@ -2623,15 +2026,15 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
return -EINVAL;
}
- if (mvif->mt76.band_idx) {
+ if (ext_phy) {
info = IEEE80211_SKB_CB(skb);
info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
}
mt7915_mcu_beacon_check_caps(phy, vif, skb);
- /* TODO: subtag - 11v MBSSID */
mt7915_mcu_beacon_cntdwn(vif, rskb, skb, bcn, &offs);
+ mt7915_mcu_beacon_mbss(rskb, skb, vif, bcn, &offs);
mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
dev_kfree_skb(skb);
@@ -2640,91 +2043,20 @@ out:
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
}
-static int mt7915_mcu_start_firmware(struct mt7915_dev *dev, u32 addr,
- u32 option)
-{
- struct {
- __le32 option;
- __le32 addr;
- } req = {
- .option = cpu_to_le32(option),
- .addr = cpu_to_le32(addr),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD(FW_START_REQ), &req,
- sizeof(req), true);
-}
-
-static int mt7915_mcu_restart(struct mt76_dev *dev)
-{
- struct {
- u8 power_mode;
- u8 rsv[3];
- } req = {
- .power_mode = 1,
- };
-
- return mt76_mcu_send_msg(dev, MCU_CMD(NIC_POWER_CTRL), &req,
- sizeof(req), false);
-}
-
-static int mt7915_mcu_patch_sem_ctrl(struct mt7915_dev *dev, bool get)
+static int mt7915_driver_own(struct mt7915_dev *dev, u8 band)
{
- struct {
- __le32 op;
- } req = {
- .op = cpu_to_le32(get ? PATCH_SEM_GET : PATCH_SEM_RELEASE),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD(PATCH_SEM_CONTROL), &req,
- sizeof(req), true);
-}
-
-static int mt7915_mcu_start_patch(struct mt7915_dev *dev)
-{
- struct {
- u8 check_crc;
- u8 reserved[3];
- } req = {
- .check_crc = 0,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD(PATCH_FINISH_REQ), &req,
- sizeof(req), true);
-}
-
-static int mt7915_driver_own(struct mt7915_dev *dev)
-{
- mt76_wr(dev, MT_TOP_LPCR_HOST_BAND0, MT_TOP_LPCR_HOST_DRV_OWN);
- if (!mt76_poll_msec(dev, MT_TOP_LPCR_HOST_BAND0,
- MT_TOP_LPCR_HOST_FW_OWN, 0, 500)) {
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(band), MT_TOP_LPCR_HOST_DRV_OWN);
+ if (!mt76_poll_msec(dev, MT_TOP_LPCR_HOST_BAND(band),
+ MT_TOP_LPCR_HOST_FW_OWN_STAT, 0, 500)) {
dev_err(dev->mt76.dev, "Timeout for driver own\n");
return -EIO;
}
- return 0;
-}
+ /* clear irq when the driver own success */
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND_IRQ_STAT(band),
+ MT_TOP_LPCR_HOST_BAND_STAT);
-static int mt7915_mcu_init_download(struct mt7915_dev *dev, u32 addr,
- u32 len, u32 mode)
-{
- struct {
- __le32 addr;
- __le32 len;
- __le32 mode;
- } req = {
- .addr = cpu_to_le32(addr),
- .len = cpu_to_le32(len),
- .mode = cpu_to_le32(mode),
- };
- int attr;
-
- if (req.addr == cpu_to_le32(MCU_PATCH_ADDRESS))
- attr = MCU_CMD(PATCH_START_REQ);
- else
- attr = MCU_CMD(TARGET_ADDRESS_LEN_REQ);
-
- return mt76_mcu_send_msg(&dev->mt76, attr, &req, sizeof(req), true);
+ return 0;
}
static int mt7915_load_patch(struct mt7915_dev *dev)
@@ -2733,7 +2065,7 @@ static int mt7915_load_patch(struct mt7915_dev *dev)
const struct firmware *fw = NULL;
int i, ret, sem;
- sem = mt7915_mcu_patch_sem_ctrl(dev, 1);
+ sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, 1);
switch (sem) {
case PATCH_IS_DL:
return 0;
@@ -2744,7 +2076,8 @@ static int mt7915_load_patch(struct mt7915_dev *dev)
return -EAGAIN;
}
- ret = request_firmware(&fw, MT7915_ROM_PATCH, dev->mt76.dev);
+ ret = request_firmware(&fw, fw_name_var(dev, ROM_PATCH),
+ dev->mt76.dev);
if (ret)
goto out;
@@ -2776,8 +2109,8 @@ static int mt7915_load_patch(struct mt7915_dev *dev)
len = be32_to_cpu(sec->info.len);
dl = fw->data + be32_to_cpu(sec->offs);
- ret = mt7915_mcu_init_download(dev, addr, len,
- DL_MODE_NEED_RSP);
+ ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
+ DL_MODE_NEED_RSP);
if (ret) {
dev_err(dev->mt76.dev, "Download request failed\n");
goto out;
@@ -2791,12 +2124,12 @@ static int mt7915_load_patch(struct mt7915_dev *dev)
}
}
- ret = mt7915_mcu_start_patch(dev);
+ ret = mt76_connac_mcu_start_patch(&dev->mt76);
if (ret)
dev_err(dev->mt76.dev, "Failed to start patch\n");
out:
- sem = mt7915_mcu_patch_sem_ctrl(dev, 0);
+ sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, 0);
switch (sem) {
case PATCH_REL_SEM_SUCCESS:
break;
@@ -2810,20 +2143,6 @@ out:
return ret;
}
-static u32 mt7915_mcu_gen_dl_mode(u8 feature_set, bool is_wa)
-{
- u32 ret = 0;
-
- ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
- (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
- ret |= FIELD_PREP(DL_MODE_KEY_IDX,
- FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
- ret |= DL_MODE_NEED_RSP;
- ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0;
-
- return ret;
-}
-
static int
mt7915_mcu_send_ram_firmware(struct mt7915_dev *dev,
const struct mt7915_fw_trailer *hdr,
@@ -2839,14 +2158,16 @@ mt7915_mcu_send_ram_firmware(struct mt7915_dev *dev,
region = (const struct mt7915_fw_region *)((const u8 *)hdr -
(hdr->n_region - i) * sizeof(*region));
- mode = mt7915_mcu_gen_dl_mode(region->feature_set, is_wa);
+ mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
+ region->feature_set, is_wa);
len = le32_to_cpu(region->len);
addr = le32_to_cpu(region->addr);
if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
override = addr;
- err = mt7915_mcu_init_download(dev, addr, len, mode);
+ err = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
+ mode);
if (err) {
dev_err(dev->mt76.dev, "Download request failed\n");
return err;
@@ -2868,7 +2189,7 @@ mt7915_mcu_send_ram_firmware(struct mt7915_dev *dev,
if (is_wa)
option |= FW_START_WORKING_PDA_CR4;
- return mt7915_mcu_start_firmware(dev, override, option);
+ return mt76_connac_mcu_start_firmware(&dev->mt76, override, option);
}
static int mt7915_load_ram(struct mt7915_dev *dev)
@@ -2877,7 +2198,8 @@ static int mt7915_load_ram(struct mt7915_dev *dev)
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, MT7915_FIRMWARE_WM, dev->mt76.dev);
+ ret = request_firmware(&fw, fw_name_var(dev, FIRMWARE_WM),
+ dev->mt76.dev);
if (ret)
return ret;
@@ -2901,7 +2223,8 @@ static int mt7915_load_ram(struct mt7915_dev *dev)
release_firmware(fw);
- ret = request_firmware(&fw, MT7915_FIRMWARE_WA, dev->mt76.dev);
+ ret = request_firmware(&fw, fw_name(dev, FIRMWARE_WA),
+ dev->mt76.dev);
if (ret)
return ret;
@@ -2933,10 +2256,36 @@ out:
return ret;
}
+static int
+mt7915_firmware_state(struct mt7915_dev *dev, bool wa)
+{
+ u32 state = FIELD_PREP(MT_TOP_MISC_FW_STATE,
+ wa ? FW_STATE_RDY : FW_STATE_FW_DOWNLOAD);
+
+ if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
+ state, 1000)) {
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
+ }
+ return 0;
+}
+
static int mt7915_load_firmware(struct mt7915_dev *dev)
{
int ret;
+ /* make sure fw is download state */
+ if (mt7915_firmware_state(dev, false)) {
+ /* restart firmware once */
+ __mt76_mcu_restart(&dev->mt76);
+ ret = mt7915_firmware_state(dev, false);
+ if (ret) {
+ dev_err(dev->mt76.dev,
+ "Firmware is not ready for download\n");
+ return ret;
+ }
+ }
+
ret = mt7915_load_patch(dev);
if (ret)
return ret;
@@ -2945,12 +2294,9 @@ static int mt7915_load_firmware(struct mt7915_dev *dev)
if (ret)
return ret;
- if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
- FIELD_PREP(MT_TOP_MISC_FW_STATE,
- FW_STATE_RDY), 1000)) {
- dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
- return -EIO;
- }
+ ret = mt7915_firmware_state(dev, true);
+ if (ret)
+ return ret;
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
@@ -3021,7 +2367,7 @@ int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms)
u8 band_idx;
} req = {
.cmd = cpu_to_le32(MURU_GET_TXC_TX_STATS),
- .band_idx = phy != &dev->phy,
+ .band_idx = phy->band_idx,
};
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL),
@@ -3110,15 +2456,29 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
.headroom = sizeof(struct mt7915_mcu_txd),
.mcu_skb_send_msg = mt7915_mcu_send_message,
.mcu_parse_response = mt7915_mcu_parse_response,
- .mcu_restart = mt7915_mcu_restart,
+ .mcu_restart = mt76_connac_mcu_restart,
};
int ret;
dev->mt76.mcu_ops = &mt7915_mcu_ops;
- ret = mt7915_driver_own(dev);
+ /* force firmware operation mode into normal state,
+ * which should be set before firmware download stage.
+ */
+ if (is_mt7915(&dev->mt76))
+ mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
+ else
+ mt76_wr(dev, MT_SWDEF_MODE_MT7916, MT_SWDEF_NORMAL_MODE);
+
+ ret = mt7915_driver_own(dev, 0);
if (ret)
return ret;
+ /* set driver own for band1 when two hif exist */
+ if (dev->hif2) {
+ ret = mt7915_driver_own(dev, 1);
+ if (ret)
+ return ret;
+ }
ret = mt7915_load_firmware(dev);
if (ret)
@@ -3153,14 +2513,15 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
void mt7915_mcu_exit(struct mt7915_dev *dev)
{
__mt76_mcu_restart(&dev->mt76);
- if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
- FIELD_PREP(MT_TOP_MISC_FW_STATE,
- FW_STATE_FW_DOWNLOAD), 1000)) {
+ if (mt7915_firmware_state(dev, false)) {
dev_err(dev->mt76.dev, "Failed to exit mcu\n");
return;
}
- mt76_wr(dev, MT_TOP_LPCR_HOST_BAND0, MT_TOP_LPCR_HOST_FW_OWN);
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(0), MT_TOP_LPCR_HOST_FW_OWN);
+ if (dev->hif2)
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(1),
+ MT_TOP_LPCR_HOST_FW_OWN);
skb_queue_purge(&dev->mt76.mcu.res_q);
}
@@ -3238,26 +2599,6 @@ int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable)
sizeof(req), false);
}
-int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val)
-{
- struct mt7915_dev *dev = phy->dev;
- struct {
- u8 prot_idx;
- u8 band;
- u8 rsv[2];
- __le32 len_thresh;
- __le32 pkt_thresh;
- } __packed req = {
- .prot_idx = 1,
- .band = phy != &dev->phy,
- .len_thresh = cpu_to_le32(val),
- .pkt_thresh = cpu_to_le32(0x2),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PROTECT_CTRL), &req,
- sizeof(req), true);
-}
-
int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *param)
{
struct mt7915_mcu_tx *req = (struct mt7915_mcu_tx *)param;
@@ -3303,58 +2644,6 @@ int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif)
return mt7915_mcu_update_edca(dev, &req);
}
-int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter)
-{
-#define ENTER_PM_STATE 1
-#define EXIT_PM_STATE 2
- struct {
- u8 pm_number;
- u8 pm_state;
- u8 bssid[ETH_ALEN];
- u8 dtim_period;
- u8 wlan_idx_lo;
- __le16 bcn_interval;
- __le32 aid;
- __le32 rx_filter;
- u8 band_idx;
- u8 wlan_idx_hi;
- u8 rsv[2];
- __le32 feature;
- u8 omac_idx;
- u8 wmm_idx;
- u8 bcn_loss_cnt;
- u8 bcn_sp_duration;
- } __packed req = {
- .pm_number = 5,
- .pm_state = (enter) ? ENTER_PM_STATE : EXIT_PM_STATE,
- .band_idx = band,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PM_STATE_CTRL), &req,
- sizeof(req), true);
-}
-
-int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev,
- enum mt7915_rdd_cmd cmd, u8 index,
- u8 rx_sel, u8 val)
-{
- struct {
- u8 ctrl;
- u8 rdd_idx;
- u8 rdd_rx_sel;
- u8 val;
- u8 rsv[4];
- } __packed req = {
- .ctrl = cmd,
- .rdd_idx = index,
- .rdd_rx_sel = rx_sel,
- .val = val,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_CTRL), &req,
- sizeof(req), true);
-}
-
int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val)
{
struct {
@@ -3453,12 +2742,109 @@ int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
sizeof(req), true);
}
+static int
+mt7915_mcu_background_chain_ctrl(struct mt7915_phy *phy,
+ struct cfg80211_chan_def *chandef,
+ int cmd)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_channel *chan = mphy->chandef.chan;
+ int freq = mphy->chandef.center_freq1;
+ struct mt7915_mcu_background_chain_ctrl req = {
+ .monitor_scan_type = 2, /* simple rx */
+ };
+
+ if (!chandef && cmd != CH_SWITCH_BACKGROUND_SCAN_STOP)
+ return -EINVAL;
+
+ if (!cfg80211_chandef_valid(&mphy->chandef))
+ return -EINVAL;
+
+ switch (cmd) {
+ case CH_SWITCH_BACKGROUND_SCAN_START: {
+ req.chan = chan->hw_value;
+ req.central_chan = ieee80211_frequency_to_channel(freq);
+ req.bw = mt76_connac_chan_bw(&mphy->chandef);
+ req.monitor_chan = chandef->chan->hw_value;
+ req.monitor_central_chan =
+ ieee80211_frequency_to_channel(chandef->center_freq1);
+ req.monitor_bw = mt76_connac_chan_bw(chandef);
+ req.band_idx = phy != &dev->phy;
+ req.scan_mode = 1;
+ break;
+ }
+ case CH_SWITCH_BACKGROUND_SCAN_RUNNING:
+ req.monitor_chan = chandef->chan->hw_value;
+ req.monitor_central_chan =
+ ieee80211_frequency_to_channel(chandef->center_freq1);
+ req.band_idx = phy != &dev->phy;
+ req.scan_mode = 2;
+ break;
+ case CH_SWITCH_BACKGROUND_SCAN_STOP:
+ req.chan = chan->hw_value;
+ req.central_chan = ieee80211_frequency_to_channel(freq);
+ req.bw = mt76_connac_chan_bw(&mphy->chandef);
+ req.tx_stream = hweight8(mphy->antenna_mask);
+ req.rx_stream = mphy->antenna_mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+ req.band = chandef ? chandef->chan->band == NL80211_BAND_5GHZ : 1;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(OFFCH_SCAN_CTRL),
+ &req, sizeof(req), false);
+}
+
+int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7915_dev *dev = phy->dev;
+ int err, region;
+
+ if (!chandef) { /* disable offchain */
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, MT_RX_SEL2,
+ 0, 0);
+ if (err)
+ return err;
+
+ return mt7915_mcu_background_chain_ctrl(phy, NULL,
+ CH_SWITCH_BACKGROUND_SCAN_STOP);
+ }
+
+ err = mt7915_mcu_background_chain_ctrl(phy, chandef,
+ CH_SWITCH_BACKGROUND_SCAN_START);
+ if (err)
+ return err;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_ETSI:
+ region = 0;
+ break;
+ case NL80211_DFS_JP:
+ region = 2;
+ break;
+ case NL80211_DFS_FCC:
+ default:
+ region = 1;
+ break;
+ }
+
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, MT_RX_SEL2,
+ 0, region);
+}
+
int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
{
+ static const u8 ch_band[] = {
+ [NL80211_BAND_2GHZ] = 0,
+ [NL80211_BAND_5GHZ] = 1,
+ [NL80211_BAND_6GHZ] = 2,
+ };
struct mt7915_dev *dev = phy->dev;
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
int freq1 = chandef->center_freq1;
- bool ext_phy = phy != &dev->phy;
struct {
u8 control_ch;
u8 center_ch;
@@ -3479,11 +2865,11 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
} __packed req = {
.control_ch = chandef->chan->hw_value,
.center_ch = ieee80211_frequency_to_channel(freq1),
- .bw = mt7915_mcu_chan_bw(chandef),
+ .bw = mt76_connac_chan_bw(chandef),
.tx_streams_num = hweight8(phy->mt76->antenna_mask),
.rx_streams = phy->mt76->antenna_mask,
- .band_idx = ext_phy,
- .channel_band = chandef->chan->band,
+ .band_idx = phy->band_idx,
+ .channel_band = ch_band[chandef->chan->band],
};
#ifdef CONFIG_NL80211_TESTMODE
@@ -3494,17 +2880,18 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
req.tx_streams_num = fls(phy->mt76->test.tx_antenna_mask);
req.rx_streams = phy->mt76->test.tx_antenna_mask;
- if (ext_phy) {
- req.tx_streams_num = 2;
- req.rx_streams >>= 2;
- }
+ if (phy != &dev->phy)
+ req.rx_streams >>= dev->chainshift;
}
#endif
- if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+ dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
+ req.switch_reason = CH_SWITCH_NORMAL;
+ else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
- else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
+ NL80211_IFTYPE_AP))
req.switch_reason = CH_SWITCH_DFS;
else
req.switch_reason = CH_SWITCH_NORMAL;
@@ -3527,7 +2914,8 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
#define PAGE_IDX_MASK GENMASK(4, 2)
#define PER_PAGE_SIZE 0x400
struct mt7915_mcu_eeprom req = { .buffer_mode = EE_MODE_BUFFER };
- u8 total = DIV_ROUND_UP(MT7915_EEPROM_SIZE, PER_PAGE_SIZE);
+ u16 eeprom_size = mt7915_eeprom_size(dev);
+ u8 total = DIV_ROUND_UP(eeprom_size, PER_PAGE_SIZE);
u8 *eep = (u8 *)dev->mt76.eeprom.data;
int eep_len;
int i;
@@ -3536,8 +2924,8 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
struct sk_buff *skb;
int ret;
- if (i == total - 1 && !!(MT7915_EEPROM_SIZE % PER_PAGE_SIZE))
- eep_len = MT7915_EEPROM_SIZE % PER_PAGE_SIZE;
+ if (i == total - 1 && !!(eeprom_size % PER_PAGE_SIZE))
+ eep_len = eeprom_size % PER_PAGE_SIZE;
else
eep_len = PER_PAGE_SIZE;
@@ -3770,19 +3158,26 @@ int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy)
int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
{
/* strict order */
- static const enum mt7915_chan_mib_offs offs[] = {
- MIB_BUSY_TIME, MIB_TX_TIME, MIB_RX_TIME, MIB_OBSS_AIRTIME
+ static const u32 offs[] = {
+ MIB_BUSY_TIME, MIB_TX_TIME, MIB_RX_TIME, MIB_OBSS_AIRTIME,
+ MIB_BUSY_TIME_V2, MIB_TX_TIME_V2, MIB_RX_TIME_V2,
+ MIB_OBSS_AIRTIME_V2
};
struct mt76_channel_state *state = phy->mt76->chan_state;
struct mt76_channel_state *state_ts = &phy->state_ts;
struct mt7915_dev *dev = phy->dev;
struct mt7915_mcu_mib *res, req[4];
struct sk_buff *skb;
- int i, ret;
+ int i, ret, start = 0, ofs = 20;
+
+ if (!is_mt7915(&dev->mt76)) {
+ start = 4;
+ ofs = 0;
+ }
for (i = 0; i < 4; i++) {
req[i].band = cpu_to_le32(phy != &dev->phy);
- req[i].offs = cpu_to_le32(offs[i]);
+ req[i].offs = cpu_to_le32(offs[i + start]);
}
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(GET_MIB_INFO),
@@ -3790,7 +3185,7 @@ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
if (ret)
return ret;
- res = (struct mt7915_mcu_mib *)(skb->data + 20);
+ res = (struct mt7915_mcu_mib *)(skb->data + ofs);
if (chan_switch)
goto out;
@@ -3842,7 +3237,7 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
u8 rsv[2];
} __packed req = {
.ctrl = {
- .band_idx = phy != &dev->phy,
+ .band_idx = phy->band_idx,
},
};
int level;
@@ -4137,6 +3532,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
case MT_PHY_TYPE_OFDM:
if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
sband = &mphy->sband_5g.sband;
+ else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
else
sband = &mphy->sband_2g.sband;
@@ -4210,11 +3607,13 @@ int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vi
struct sk_buff *skb;
struct tlv *tlv;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ NULL, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BSS_COLOR, sizeof(*bss_color));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BSS_COLOR,
+ sizeof(*bss_color));
bss_color = (struct bss_info_color *)tlv;
bss_color->disable = !he_bss_color->enabled;
bss_color->color = he_bss_color->color;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index 92268e696931..960072a44222 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -79,6 +79,15 @@ struct mt7915_mcu_csa_notify {
u8 rsv;
} __packed;
+struct mt7915_mcu_bcc_notify {
+ struct mt7915_mcu_rxd rxd;
+
+ u8 band_idx;
+ u8 omac_idx;
+ u8 cca_count;
+ u8 rsv;
+} __packed;
+
struct mt7915_mcu_rdd_report {
struct mt7915_mcu_rxd rxd;
@@ -131,6 +140,29 @@ struct mt7915_mcu_rdd_report {
} hw_pulse[32];
} __packed;
+struct mt7915_mcu_background_chain_ctrl {
+ u8 chan; /* primary channel */
+ u8 central_chan; /* central channel */
+ u8 bw;
+ u8 tx_stream;
+ u8 rx_stream;
+
+ u8 monitor_chan; /* monitor channel */
+ u8 monitor_central_chan;/* monitor central channel */
+ u8 monitor_bw;
+ u8 monitor_tx_stream;
+ u8 monitor_rx_stream;
+
+ u8 scan_mode; /* 0: ScanStop
+ * 1: ScanStart
+ * 2: ScanRunning
+ */
+ u8 band_idx; /* DBDC */
+ u8 monitor_scan_type;
+ u8 band; /* 0: 2.4GHz, 1: 5GHz */
+ u8 rsv[2];
+} __packed;
+
struct mt7915_mcu_eeprom {
u8 buffer_mode;
u8 format;
@@ -161,10 +193,16 @@ struct mt7915_mcu_mib {
} __packed;
enum mt7915_chan_mib_offs {
+ /* mt7915 */
MIB_BUSY_TIME = 14,
MIB_TX_TIME = 81,
MIB_RX_TIME,
- MIB_OBSS_AIRTIME = 86
+ MIB_OBSS_AIRTIME = 86,
+ /* mt7916 */
+ MIB_BUSY_TIME_V2 = 0,
+ MIB_TX_TIME_V2 = 6,
+ MIB_RX_TIME_V2 = 8,
+ MIB_OBSS_AIRTIME_V2 = 490
};
struct edca {
@@ -266,29 +304,6 @@ enum mcu_mmps_mode {
MCU_MMPS_DISABLE,
};
-#define STA_TYPE_STA BIT(0)
-#define STA_TYPE_AP BIT(1)
-#define STA_TYPE_ADHOC BIT(2)
-#define STA_TYPE_WDS BIT(4)
-#define STA_TYPE_BC BIT(5)
-
-#define NETWORK_INFRA BIT(16)
-#define NETWORK_P2P BIT(17)
-#define NETWORK_IBSS BIT(18)
-#define NETWORK_WDS BIT(21)
-
-#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
-#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
-#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
-#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
-#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
-#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
-#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
-
-#define CONN_STATE_DISCONNECT 0
-#define CONN_STATE_CONNECT 1
-#define CONN_STATE_PORT_SECURE 2
-
enum {
SCS_SEND_DATA,
SCS_SET_MANUAL_PD_TH,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 1f6ba306c850..5062e0d8cae4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -1,102 +1,441 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc. */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+
#include "mt7915.h"
+#include "mac.h"
+#include "../trace.h"
+
+static const u32 mt7915_reg[] = {
+ [INT_SOURCE_CSR] = 0xd7010,
+ [INT_MASK_CSR] = 0xd7014,
+ [INT1_SOURCE_CSR] = 0xd7088,
+ [INT1_MASK_CSR] = 0xd708c,
+ [INT_MCU_CMD_SOURCE] = 0xd51f0,
+ [INT_MCU_CMD_EVENT] = 0x3108,
+ [WFDMA0_ADDR] = 0xd4000,
+ [WFDMA0_PCIE1_ADDR] = 0xd8000,
+ [WFDMA_EXT_CSR_ADDR] = 0xd7000,
+ [CBTOP1_PHY_END] = 0x77ffffff,
+ [INFRA_MCU_ADDR_END] = 0x7c3fffff,
+};
+
+static const u32 mt7916_reg[] = {
+ [INT_SOURCE_CSR] = 0xd4200,
+ [INT_MASK_CSR] = 0xd4204,
+ [INT1_SOURCE_CSR] = 0xd8200,
+ [INT1_MASK_CSR] = 0xd8204,
+ [INT_MCU_CMD_SOURCE] = 0xd41f0,
+ [INT_MCU_CMD_EVENT] = 0x2108,
+ [WFDMA0_ADDR] = 0xd4000,
+ [WFDMA0_PCIE1_ADDR] = 0xd8000,
+ [WFDMA_EXT_CSR_ADDR] = 0xd7000,
+ [CBTOP1_PHY_END] = 0x7fffffff,
+ [INFRA_MCU_ADDR_END] = 0x7c085fff,
+};
+
+static const u32 mt7986_reg[] = {
+ [INT_SOURCE_CSR] = 0x24200,
+ [INT_MASK_CSR] = 0x24204,
+ [INT1_SOURCE_CSR] = 0x28200,
+ [INT1_MASK_CSR] = 0x28204,
+ [INT_MCU_CMD_SOURCE] = 0x241f0,
+ [INT_MCU_CMD_EVENT] = 0x54000108,
+ [WFDMA0_ADDR] = 0x24000,
+ [WFDMA0_PCIE1_ADDR] = 0x28000,
+ [WFDMA_EXT_CSR_ADDR] = 0x27000,
+ [CBTOP1_PHY_END] = 0x7fffffff,
+ [INFRA_MCU_ADDR_END] = 0x7c085fff,
+};
+
+static const u32 mt7915_offs[] = {
+ [TMAC_CDTR] = 0x090,
+ [TMAC_ODTR] = 0x094,
+ [TMAC_ATCR] = 0x098,
+ [TMAC_TRCR0] = 0x09c,
+ [TMAC_ICR0] = 0x0a4,
+ [TMAC_ICR1] = 0x0b4,
+ [TMAC_CTCR0] = 0x0f4,
+ [TMAC_TFCR0] = 0x1e0,
+ [MDP_BNRCFR0] = 0x070,
+ [MDP_BNRCFR1] = 0x074,
+ [ARB_DRNGR0] = 0x194,
+ [ARB_SCR] = 0x080,
+ [RMAC_MIB_AIRTIME14] = 0x3b8,
+ [AGG_AWSCR0] = 0x05c,
+ [AGG_PCR0] = 0x06c,
+ [AGG_ACR0] = 0x084,
+ [AGG_MRCR] = 0x098,
+ [AGG_ATCR1] = 0x0f0,
+ [AGG_ATCR3] = 0x0f4,
+ [LPON_UTTR0] = 0x080,
+ [LPON_UTTR1] = 0x084,
+ [LPON_FRCR] = 0x314,
+ [MIB_SDR3] = 0x014,
+ [MIB_SDR4] = 0x018,
+ [MIB_SDR5] = 0x01c,
+ [MIB_SDR7] = 0x024,
+ [MIB_SDR8] = 0x028,
+ [MIB_SDR9] = 0x02c,
+ [MIB_SDR10] = 0x030,
+ [MIB_SDR11] = 0x034,
+ [MIB_SDR12] = 0x038,
+ [MIB_SDR13] = 0x03c,
+ [MIB_SDR14] = 0x040,
+ [MIB_SDR15] = 0x044,
+ [MIB_SDR16] = 0x048,
+ [MIB_SDR17] = 0x04c,
+ [MIB_SDR18] = 0x050,
+ [MIB_SDR19] = 0x054,
+ [MIB_SDR20] = 0x058,
+ [MIB_SDR21] = 0x05c,
+ [MIB_SDR22] = 0x060,
+ [MIB_SDR23] = 0x064,
+ [MIB_SDR24] = 0x068,
+ [MIB_SDR25] = 0x06c,
+ [MIB_SDR27] = 0x074,
+ [MIB_SDR28] = 0x078,
+ [MIB_SDR29] = 0x07c,
+ [MIB_SDRVEC] = 0x080,
+ [MIB_SDR31] = 0x084,
+ [MIB_SDR32] = 0x088,
+ [MIB_SDRMUBF] = 0x090,
+ [MIB_DR8] = 0x0c0,
+ [MIB_DR9] = 0x0c4,
+ [MIB_DR11] = 0x0cc,
+ [MIB_MB_SDR0] = 0x100,
+ [MIB_MB_SDR1] = 0x104,
+ [TX_AGG_CNT] = 0x0a8,
+ [TX_AGG_CNT2] = 0x164,
+ [MIB_ARNG] = 0x4b8,
+ [WTBLON_TOP_WDUCR] = 0x0,
+ [WTBL_UPDATE] = 0x030,
+ [PLE_FL_Q_EMPTY] = 0x0b0,
+ [PLE_FL_Q_CTRL] = 0x1b0,
+ [PLE_AC_QEMPTY] = 0x500,
+ [PLE_FREEPG_CNT] = 0x100,
+ [PLE_FREEPG_HEAD_TAIL] = 0x104,
+ [PLE_PG_HIF_GROUP] = 0x110,
+ [PLE_HIF_PG_INFO] = 0x114,
+ [AC_OFFSET] = 0x040,
+ [ETBF_PAR_RPT0] = 0x068,
+};
+
+static const u32 mt7916_offs[] = {
+ [TMAC_CDTR] = 0x0c8,
+ [TMAC_ODTR] = 0x0cc,
+ [TMAC_ATCR] = 0x00c,
+ [TMAC_TRCR0] = 0x010,
+ [TMAC_ICR0] = 0x014,
+ [TMAC_ICR1] = 0x018,
+ [TMAC_CTCR0] = 0x114,
+ [TMAC_TFCR0] = 0x0e4,
+ [MDP_BNRCFR0] = 0x090,
+ [MDP_BNRCFR1] = 0x094,
+ [ARB_DRNGR0] = 0x1e0,
+ [ARB_SCR] = 0x000,
+ [RMAC_MIB_AIRTIME14] = 0x0398,
+ [AGG_AWSCR0] = 0x030,
+ [AGG_PCR0] = 0x040,
+ [AGG_ACR0] = 0x054,
+ [AGG_MRCR] = 0x068,
+ [AGG_ATCR1] = 0x1a8,
+ [AGG_ATCR3] = 0x080,
+ [LPON_UTTR0] = 0x360,
+ [LPON_UTTR1] = 0x364,
+ [LPON_FRCR] = 0x37c,
+ [MIB_SDR3] = 0x698,
+ [MIB_SDR4] = 0x788,
+ [MIB_SDR5] = 0x780,
+ [MIB_SDR7] = 0x5a8,
+ [MIB_SDR8] = 0x78c,
+ [MIB_SDR9] = 0x024,
+ [MIB_SDR10] = 0x76c,
+ [MIB_SDR11] = 0x790,
+ [MIB_SDR12] = 0x558,
+ [MIB_SDR13] = 0x560,
+ [MIB_SDR14] = 0x564,
+ [MIB_SDR15] = 0x568,
+ [MIB_SDR16] = 0x7fc,
+ [MIB_SDR17] = 0x800,
+ [MIB_SDR18] = 0x030,
+ [MIB_SDR19] = 0x5ac,
+ [MIB_SDR20] = 0x5b0,
+ [MIB_SDR21] = 0x5b4,
+ [MIB_SDR22] = 0x770,
+ [MIB_SDR23] = 0x774,
+ [MIB_SDR24] = 0x778,
+ [MIB_SDR25] = 0x77c,
+ [MIB_SDR27] = 0x080,
+ [MIB_SDR28] = 0x084,
+ [MIB_SDR29] = 0x650,
+ [MIB_SDRVEC] = 0x5a8,
+ [MIB_SDR31] = 0x55c,
+ [MIB_SDR32] = 0x7a8,
+ [MIB_SDRMUBF] = 0x7ac,
+ [MIB_DR8] = 0x56c,
+ [MIB_DR9] = 0x570,
+ [MIB_DR11] = 0x574,
+ [MIB_MB_SDR0] = 0x688,
+ [MIB_MB_SDR1] = 0x690,
+ [TX_AGG_CNT] = 0x7dc,
+ [TX_AGG_CNT2] = 0x7ec,
+ [MIB_ARNG] = 0x0b0,
+ [WTBLON_TOP_WDUCR] = 0x200,
+ [WTBL_UPDATE] = 0x230,
+ [PLE_FL_Q_EMPTY] = 0x360,
+ [PLE_FL_Q_CTRL] = 0x3e0,
+ [PLE_AC_QEMPTY] = 0x600,
+ [PLE_FREEPG_CNT] = 0x380,
+ [PLE_FREEPG_HEAD_TAIL] = 0x384,
+ [PLE_PG_HIF_GROUP] = 0x00c,
+ [PLE_HIF_PG_INFO] = 0x388,
+ [AC_OFFSET] = 0x080,
+ [ETBF_PAR_RPT0] = 0x100,
+};
+
+static const struct __map mt7915_reg_map[] = {
+ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure regs) */
+ { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
+ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
+ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x0, 0x0, 0x0 }, /* imply end of search */
+};
+
+static const struct __map mt7916_reg_map[] = {
+ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
+ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
+ { 0x56000000, 0x04000, 0x1000 }, /* WFDMA_2 (Reserved) */
+ { 0x57000000, 0x05000, 0x1000 }, /* WFDMA_3 (MCU wrap CR) */
+ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
+ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
+ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820ca000, 0x26000, 0x2000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
+ { 0x820d0000, 0x30000, 0x10000}, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000}, /* WF_MCU_SYSRAM (configure cr) */
+ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820c4000, 0xa8000, 0x1000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
+ { 0x820b0000, 0xae000, 0x1000 }, /* [APB2] WFSYS_ON */
+ { 0x80020000, 0xb0000, 0x10000}, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000}, /* WF_TOP_MISC_ON */
+ { 0x0, 0x0, 0x0 }, /* imply end of search */
+};
+
+static const struct __map mt7986_reg_map[] = {
+ { 0x54000000, 0x402000, 0x1000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
+ { 0x55000000, 0x403000, 0x1000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
+ { 0x56000000, 0x404000, 0x1000 }, /* WFDMA_2 (Reserved) */
+ { 0x57000000, 0x405000, 0x1000 }, /* WFDMA_3 (MCU wrap CR) */
+ { 0x58000000, 0x406000, 0x1000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
+ { 0x59000000, 0x407000, 0x1000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
+ { 0x820c0000, 0x408000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x40c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x40e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820e0000, 0x420000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x420400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x420800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x420c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x421000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x421400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820ce000, 0x421c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820e7000, 0x421e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820cf000, 0x422000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e9000, 0x423400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x424000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x424200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x424600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x424800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820ca000, 0x426000, 0x2000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
+ { 0x820d0000, 0x430000, 0x10000}, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x00400000, 0x480000, 0x10000}, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x490000, 0x10000}, /* WF_MCU_SYSRAM */
+ { 0x820f0000, 0x4a0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0x4a0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0x4a0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0x4a0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0x4a1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0x4a1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0x4a1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0x4a3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0x4a4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0x4a4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0x4a4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0x4a4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820c4000, 0x4a8000, 0x1000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
+ { 0x820b0000, 0x4ae000, 0x1000 }, /* [APB2] WFSYS_ON */
+ { 0x80020000, 0x4b0000, 0x10000}, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0x4c0000, 0x10000}, /* WF_TOP_MISC_ON */
+ { 0x89000000, 0x4d0000, 0x1000 }, /* WF_MCU_CFG_ON */
+ { 0x89010000, 0x4d1000, 0x1000 }, /* WF_MCU_CIRQ */
+ { 0x89020000, 0x4d2000, 0x1000 }, /* WF_MCU_GPT */
+ { 0x89030000, 0x4d3000, 0x1000 }, /* WF_MCU_WDT */
+ { 0x80010000, 0x4d4000, 0x1000 }, /* WF_AXIDMA */
+ { 0x0, 0x0, 0x0 }, /* imply end of search */
+};
static u32 mt7915_reg_map_l1(struct mt7915_dev *dev, u32 addr)
{
u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
+ u32 l1_remap;
+
+ if (is_mt7986(&dev->mt76))
+ return MT_CONN_INFRA_OFFSET(addr);
- mt76_rmw_field(dev, MT_HIF_REMAP_L1, MT_HIF_REMAP_L1_MASK, base);
+ l1_remap = is_mt7915(&dev->mt76) ?
+ MT_HIF_REMAP_L1 : MT_HIF_REMAP_L1_MT7916;
+
+ dev->bus_ops->rmw(&dev->mt76, l1_remap,
+ MT_HIF_REMAP_L1_MASK,
+ FIELD_PREP(MT_HIF_REMAP_L1_MASK, base));
/* use read to push write */
- mt76_rr(dev, MT_HIF_REMAP_L1);
+ dev->bus_ops->rr(&dev->mt76, l1_remap);
return MT_HIF_REMAP_BASE_L1 + offset;
}
static u32 mt7915_reg_map_l2(struct mt7915_dev *dev, u32 addr)
{
- u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
- u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+ u32 offset, base;
- mt76_rmw_field(dev, MT_HIF_REMAP_L2, MT_HIF_REMAP_L2_MASK, base);
- /* use read to push write */
- mt76_rr(dev, MT_HIF_REMAP_L2);
+ if (is_mt7915(&dev->mt76)) {
+ offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
+ base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2,
+ MT_HIF_REMAP_L2_MASK,
+ FIELD_PREP(MT_HIF_REMAP_L2_MASK, base));
- return MT_HIF_REMAP_BASE_L2 + offset;
+ /* use read to push write */
+ dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2);
+ } else {
+ u32 ofs = is_mt7986(&dev->mt76) ? 0x400000 : 0;
+
+ offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET_MT7916, addr);
+ base = FIELD_GET(MT_HIF_REMAP_L2_BASE_MT7916, addr);
+
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2_MT7916 + ofs,
+ MT_HIF_REMAP_L2_MASK_MT7916,
+ FIELD_PREP(MT_HIF_REMAP_L2_MASK_MT7916, base));
+
+ /* use read to push write */
+ dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2_MT7916 + ofs);
+
+ offset += (MT_HIF_REMAP_BASE_L2_MT7916 + ofs);
+ }
+
+ return offset;
}
static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
{
- static const struct {
- u32 phys;
- u32 mapped;
- u32 size;
- } fixed_map[] = {
- { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
- { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure regs) */
- { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
- { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
- { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
- { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
- { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
- { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
- { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
- { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
- { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
- { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
- { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
- { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
- };
int i;
if (addr < 0x100000)
return addr;
- for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
+ if (!dev->reg.map) {
+ dev_err(dev->mt76.dev, "err: reg_map is null\n");
+ return addr;
+ }
+
+ for (i = 0; i < dev->reg.map_size; i++) {
u32 ofs;
- if (addr < fixed_map[i].phys)
+ if (addr < dev->reg.map[i].phys)
continue;
- ofs = addr - fixed_map[i].phys;
- if (ofs > fixed_map[i].size)
+ ofs = addr - dev->reg.map[i].phys;
+ if (ofs > dev->reg.map[i].size)
continue;
- return fixed_map[i].mapped + ofs;
+ return dev->reg.map[i].maps + ofs;
}
- if ((addr >= 0x18000000 && addr < 0x18c00000) ||
- (addr >= 0x70000000 && addr < 0x78000000))
+ if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
+ (addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
+ (addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
+ return mt7915_reg_map_l1(dev, addr);
+
+ if (dev_is_pci(dev->mt76.dev) &&
+ ((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
+ (addr >= MT_CBTOP2_PHY_START && addr <= MT_CBTOP2_PHY_END)))
+ return mt7915_reg_map_l1(dev, addr);
+
+ /* CONN_INFRA: covert to phyiscal addr and use layer 1 remap */
+ if (addr >= MT_INFRA_MCU_START && addr <= MT_INFRA_MCU_END) {
+ addr = addr - MT_INFRA_MCU_START + MT_INFRA_BASE;
return mt7915_reg_map_l1(dev, addr);
+ }
return mt7915_reg_map_l2(dev, addr);
}
@@ -125,7 +464,9 @@ static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
return dev->bus_ops->rmw(mdev, addr, mask, val);
}
-int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq)
+static int mt7915_mmio_init(struct mt76_dev *mdev,
+ void __iomem *mem_base,
+ u32 device_id)
{
struct mt76_bus_ops *bus_ops;
struct mt7915_dev *dev;
@@ -133,6 +474,29 @@ int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq)
dev = container_of(mdev, struct mt7915_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
+ switch (device_id) {
+ case 0x7915:
+ dev->reg.reg_rev = mt7915_reg;
+ dev->reg.offs_rev = mt7915_offs;
+ dev->reg.map = mt7915_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7915_reg_map);
+ break;
+ case 0x7906:
+ dev->reg.reg_rev = mt7916_reg;
+ dev->reg.offs_rev = mt7916_offs;
+ dev->reg.map = mt7916_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7916_reg_map);
+ break;
+ case 0x7986:
+ dev->reg.reg_rev = mt7986_reg;
+ dev->reg.offs_rev = mt7916_offs;
+ dev->reg.map = mt7986_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7986_reg_map);
+ break;
+ default:
+ return -EINVAL;
+ }
+
dev->bus_ops = dev->mt76.bus;
bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
GFP_KERNEL);
@@ -144,11 +508,210 @@ int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq)
bus_ops->rmw = mt7915_rmw;
dev->mt76.bus = bus_ops;
- mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ mdev->rev = (device_id << 16) |
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ return 0;
+}
+
+void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev,
+ bool write_reg,
+ u32 clear, u32 set)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->mmio.irq_lock, flags);
+
+ mdev->mmio.irqmask &= ~clear;
+ mdev->mmio.irqmask |= set;
+
+ if (write_reg) {
+ mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
+ mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
+ }
+
+ spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags);
+}
+
+static void mt7915_rx_poll_complete(struct mt76_dev *mdev,
+ enum mt76_rxq_id q)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ mt7915_irq_enable(dev, MT_INT_RX(q));
+}
+
+/* TODO: support 2/4/6/8 MSI-X vectors */
+static void mt7915_irq_tasklet(struct tasklet_struct *t)
+{
+ struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
+ u32 intr, intr1, mask;
+
mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ intr &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (dev->hif2) {
+ intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
+ intr1 &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
+
+ intr |= intr1;
+ }
+
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
+ mask = intr & MT_INT_RX_DONE_ALL;
+ if (intr & MT_INT_TX_DONE_MCU)
+ mask |= MT_INT_TX_DONE_MCU;
+
+ mt7915_irq_disable(dev, mask);
+
+ if (intr & MT_INT_TX_DONE_MCU)
+ napi_schedule(&dev->mt76.tx_napi);
+
+ if (intr & MT_INT_RX(MT_RXQ_MAIN))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
+
+ if (intr & MT_INT_RX(MT_RXQ_EXT))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_EXT]);
+
+ if (intr & MT_INT_RX(MT_RXQ_MCU))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
+
+ if (intr & MT_INT_RX(MT_RXQ_MCU_WA))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
+
+ if (!is_mt7915(&dev->mt76) &&
+ (intr & MT_INT_RX(MT_RXQ_MAIN_WA)))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN_WA]);
+
+ if (intr & MT_INT_RX(MT_RXQ_EXT_WA))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_EXT_WA]);
+
+ if (intr & MT_INT_MCU_CMD) {
+ u32 val = mt76_rr(dev, MT_MCU_CMD);
+
+ mt76_wr(dev, MT_MCU_CMD, val);
+ if (val & MT_MCU_CMD_ERROR_MASK) {
+ dev->reset_state = val;
+ ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
+ wake_up(&dev->reset_wait);
+ }
+ }
+}
+
+irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7915_dev *dev = dev_instance;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return IRQ_NONE;
+
+ tasklet_schedule(&dev->irq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
+ void __iomem *mem_base, u32 device_id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ /* txwi_size = txd size + txp size */
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp),
+ .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .token_size = MT7915_TOKEN_SIZE,
+ .tx_prepare_skb = mt7915_tx_prepare_skb,
+ .tx_complete_skb = mt7915_tx_complete_skb,
+ .rx_skb = mt7915_queue_rx_skb,
+ .rx_check = mt7915_rx_check,
+ .rx_poll_complete = mt7915_rx_poll_complete,
+ .sta_ps = mt7915_sta_ps,
+ .sta_add = mt7915_mac_sta_add,
+ .sta_remove = mt7915_mac_sta_remove,
+ .update_survey = mt7915_update_channel,
+ };
+ struct ieee80211_ops *ops;
+ struct mt7915_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ops = devm_kmemdup(pdev, &mt7915_ops, sizeof(mt7915_ops), GFP_KERNEL);
+ if (!ops)
+ return ERR_PTR(-ENOMEM);
+
+ mdev = mt76_alloc_device(pdev, sizeof(*dev), ops, &drv_ops);
+ if (!mdev)
+ return ERR_PTR(-ENOMEM);
+
+ dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ ret = mt7915_mmio_init(mdev, mem_base, device_id);
+ if (ret)
+ goto error;
+
+ tasklet_setup(&dev->irq_tasklet, mt7915_irq_tasklet);
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ return dev;
+
+error:
+ mt76_free_device(&dev->mt76);
+
+ return ERR_PTR(ret);
+}
+
+static int __init mt7915_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&mt7915_hif_driver);
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&mt7915_pci_driver);
+ if (ret)
+ goto error_pci;
+
+ if (IS_ENABLED(CONFIG_MT7986_WMAC)) {
+ ret = platform_driver_register(&mt7986_wmac_driver);
+ if (ret)
+ goto error_wmac;
+ }
return 0;
+
+error_wmac:
+ pci_unregister_driver(&mt7915_pci_driver);
+error_pci:
+ pci_unregister_driver(&mt7915_hif_driver);
+
+ return ret;
+}
+
+static void __exit mt7915_exit(void)
+{
+ if (IS_ENABLED(CONFIG_MT7986_WMAC))
+ platform_driver_unregister(&mt7986_wmac_driver);
+
+ pci_unregister_driver(&mt7915_pci_driver);
+ pci_unregister_driver(&mt7915_hif_driver);
}
+
+module_init(mt7915_init);
+module_exit(mt7915_exit);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 42d887383e8d..6efa0a2e2345 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -6,13 +6,14 @@
#include <linux/interrupt.h>
#include <linux/ktime.h>
-#include "../mt76.h"
+#include "../mt76_connac.h"
#include "regs.h"
#define MT7915_MAX_INTERFACES 19
#define MT7915_MAX_WMM_SETS 4
#define MT7915_WTBL_SIZE 288
-#define MT7915_WTBL_RESERVED (MT7915_WTBL_SIZE - 1)
+#define MT7916_WTBL_SIZE 544
+#define MT7915_WTBL_RESERVED (mt7915_wtbl_size(dev) - 1)
#define MT7915_WTBL_STA (MT7915_WTBL_RESERVED - \
MT7915_MAX_INTERFACES)
@@ -30,10 +31,28 @@
#define MT7915_FIRMWARE_WM "mediatek/mt7915_wm.bin"
#define MT7915_ROM_PATCH "mediatek/mt7915_rom_patch.bin"
+#define MT7916_FIRMWARE_WA "mediatek/mt7916_wa.bin"
+#define MT7916_FIRMWARE_WM "mediatek/mt7916_wm.bin"
+#define MT7916_ROM_PATCH "mediatek/mt7916_rom_patch.bin"
+
+#define MT7986_FIRMWARE_WA "mediatek/mt7986_wa.bin"
+#define MT7986_FIRMWARE_WM "mediatek/mt7986_wm.bin"
+#define MT7986_FIRMWARE_WM_MT7975 "mediatek/mt7986_wm_mt7975.bin"
+#define MT7986_ROM_PATCH "mediatek/mt7986_rom_patch.bin"
+#define MT7986_ROM_PATCH_MT7975 "mediatek/mt7986_rom_patch_mt7975.bin"
+
#define MT7915_EEPROM_DEFAULT "mediatek/mt7915_eeprom.bin"
#define MT7915_EEPROM_DEFAULT_DBDC "mediatek/mt7915_eeprom_dbdc.bin"
+#define MT7916_EEPROM_DEFAULT "mediatek/mt7916_eeprom.bin"
+#define MT7986_EEPROM_MT7975_DEFAULT "mediatek/mt7986_eeprom_mt7975.bin"
+#define MT7986_EEPROM_MT7975_DUAL_DEFAULT "mediatek/mt7986_eeprom_mt7975_dual.bin"
+#define MT7986_EEPROM_MT7976_DEFAULT "mediatek/mt7986_eeprom_mt7976.bin"
+#define MT7986_EEPROM_MT7976_DEFAULT_DBDC "mediatek/mt7986_eeprom_mt7976_dbdc.bin"
+#define MT7986_EEPROM_MT7976_DUAL_DEFAULT "mediatek/mt7986_eeprom_mt7976_dual.bin"
#define MT7915_EEPROM_SIZE 3584
+#define MT7916_EEPROM_SIZE 4096
+
#define MT7915_EEPROM_BLOCK_SIZE 16
#define MT7915_TOKEN_SIZE 8192
@@ -41,11 +60,13 @@
#define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
#define MT7915_THERMAL_THROTTLE_MAX 100
+#define MT7915_CDEV_THROTTLE_MAX 99
#define MT7915_SKU_RATE_NUM 161
#define MT7915_MAX_TWT_AGRT 16
#define MT7915_MAX_STA_TWT_AGRT 8
+#define MT7915_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 2)
struct mt7915_vif;
struct mt7915_sta;
@@ -68,9 +89,13 @@ enum mt7915_rxq_id {
MT7915_RXQ_MCU_WA_EXT,
};
-struct mt7915_sta_key_conf {
- s8 keyidx;
- u8 key[16];
+enum mt7916_rxq_id {
+ MT7916_RXQ_MCU_WM = 0,
+ MT7916_RXQ_MCU_WA,
+ MT7916_RXQ_MCU_WA_MAIN,
+ MT7916_RXQ_MCU_WA_EXT,
+ MT7916_RXQ_BAND0,
+ MT7916_RXQ_BAND1,
};
struct mt7915_twt_flow {
@@ -104,7 +129,7 @@ struct mt7915_sta {
struct mt76_sta_stats stats;
- struct mt7915_sta_key_conf bip;
+ struct mt76_connac_sta_key_conf bip;
struct {
u8 flowid_mask;
@@ -113,7 +138,9 @@ struct mt7915_sta {
};
struct mt7915_vif_cap {
- bool ldpc:1;
+ bool ht_ldpc:1;
+ bool vht_ldpc:1;
+ bool he_ldpc:1;
bool vht_su_ebfer:1;
bool vht_su_ebfee:1;
bool vht_mu_ebfer:1;
@@ -200,16 +227,18 @@ struct mt7915_phy {
struct mt76_phy *mt76;
struct mt7915_dev *dev;
- struct ieee80211_sband_iftype_data iftype[2][NUM_NL80211_IFTYPES];
+ struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
struct ieee80211_vif *monitor_vif;
struct thermal_cooling_device *cdev;
+ u8 cdev_state;
u8 throttle_state;
u32 throttle_temp[2]; /* 0: critical high, 1: maximum */
u32 rxfilter;
u64 omac_mask;
+ u8 band_idx;
u16 noise;
@@ -217,7 +246,6 @@ struct mt7915_phy {
u8 slottime;
u8 rdd_state;
- int dfs_state;
u32 rx_ampdu_ts;
u32 ampdu_ref;
@@ -247,12 +275,21 @@ struct mt7915_dev {
};
struct mt7915_hif *hif2;
+ struct mt7915_reg_desc reg;
+ u8 q_id[MT7915_MAX_QUEUE];
+ u32 q_int_mask[MT7915_MAX_QUEUE];
+ u32 wfdma_mask;
const struct mt76_bus_ops *bus_ops;
struct tasklet_struct irq_tasklet;
struct mt7915_phy phy;
+ /* monitor rx chain configured channel */
+ struct cfg80211_chan_def rdd2_chandef;
+ struct mt7915_phy *rdd2_phy;
+
u16 chainmask;
+ u16 chainshift;
u32 hif_idx;
struct work_struct init_work;
@@ -274,6 +311,10 @@ struct mt7915_dev {
bool ibf;
u8 fw_debug_wm;
u8 fw_debug_wa;
+ u8 fw_debug_bin;
+
+ struct dentry *debugfs_dir;
+ struct rchan *relay_fwlog;
void *cal;
@@ -281,6 +322,17 @@ struct mt7915_dev {
u8 table_mask;
u8 n_agrt;
} twt;
+
+ struct reset_control *rstc;
+ void __iomem *dcm;
+ void __iomem *sku;
+};
+
+enum {
+ WFDMA0 = 0x0,
+ WFDMA1,
+ WFDMA_EXT,
+ __MT_WFDMA_MAX,
};
enum {
@@ -300,6 +352,7 @@ enum {
enum {
MT_RX_SEL0,
MT_RX_SEL1,
+ MT_RX_SEL2, /* monitor chain */
};
enum mt7915_rdd_cmd {
@@ -345,21 +398,44 @@ mt7915_ext_phy(struct mt7915_dev *dev)
return phy->priv;
}
-static inline u8 mt7915_lmac_mapping(struct mt7915_dev *dev, u8 ac)
+static inline u32 mt7915_check_adie(struct mt7915_dev *dev, bool sku)
{
- /* LMAC uses the reverse order of mac80211 AC indexes */
- return 3 - ac;
+ u32 mask = sku ? MT_CONNINFRA_SKU_MASK : MT_ADIE_TYPE_MASK;
+
+ if (!is_mt7986(&dev->mt76))
+ return 0;
+
+ return mt76_rr(dev, MT_CONNINFRA_SKU_DEC_ADDR) & mask;
}
extern const struct ieee80211_ops mt7915_ops;
extern const struct mt76_testmode_ops mt7915_testmode_ops;
+extern struct pci_driver mt7915_pci_driver;
+extern struct pci_driver mt7915_hif_driver;
+extern struct platform_driver mt7986_wmac_driver;
+
+#ifdef CONFIG_MT7986_WMAC
+int mt7986_wmac_enable(struct mt7915_dev *dev);
+void mt7986_wmac_disable(struct mt7915_dev *dev);
+#else
+static inline int mt7986_wmac_enable(struct mt7915_dev *dev)
+{
+ return 0;
+}
-u32 mt7915_reg_map(struct mt7915_dev *dev, u32 addr);
+static inline void mt7986_wmac_disable(struct mt7915_dev *dev)
+{
+}
+#endif
+struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
+ void __iomem *mem_base, u32 device_id);
+irqreturn_t mt7915_irq_handler(int irq, void *dev_instance);
u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif);
int mt7915_register_device(struct mt7915_dev *dev);
void mt7915_unregister_device(struct mt7915_dev *dev);
int mt7915_eeprom_init(struct mt7915_dev *dev);
-void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy);
+void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
+ struct mt7915_phy *phy);
int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx);
@@ -378,18 +454,12 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
struct ieee80211_vif *vif, int enable);
int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable);
-int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
-int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct mt7915_sta *msta, struct ieee80211_key_conf *key,
- enum set_key_cmd cmd);
int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct cfg80211_he_bss_color *he_bss_color);
int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -417,8 +487,6 @@ int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode,
u8 en);
int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable);
int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band);
-int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val);
-int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter);
int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy);
int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len);
@@ -436,17 +504,22 @@ int mt7915_mcu_get_temperature(struct mt7915_phy *phy);
int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state);
int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
-int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev, enum mt7915_rdd_cmd cmd,
- u8 index, u8 rx_sel, u8 val);
+int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
+ struct cfg80211_chan_def *chandef);
int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3);
int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb);
void mt7915_mcu_exit(struct mt7915_dev *dev);
-static inline bool is_mt7915(struct mt76_dev *dev)
+static inline u16 mt7915_wtbl_size(struct mt7915_dev *dev)
+{
+ return is_mt7915(&dev->mt76) ? MT7915_WTBL_SIZE : MT7916_WTBL_SIZE;
+}
+
+static inline u16 mt7915_eeprom_size(struct mt7915_dev *dev)
{
- return mt76_chip(dev) == 0x7915;
+ return is_mt7915(&dev->mt76) ? MT7915_EEPROM_SIZE : MT7916_EEPROM_SIZE;
}
void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg,
@@ -487,7 +560,6 @@ void mt7915_mac_work(struct work_struct *work);
void mt7915_mac_reset_work(struct work_struct *work);
void mt7915_mac_sta_rc_work(struct work_struct *work);
void mt7915_mac_update_stats(struct mt7915_phy *phy);
-int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq);
void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev,
struct mt7915_sta *msta,
u8 flowid);
@@ -500,7 +572,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info);
void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7915_tx_token_put(struct mt7915_dev *dev);
-int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc);
+int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base);
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len);
@@ -514,6 +586,8 @@ void mt7915_update_channel(struct mt76_phy *mphy);
int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enable);
int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms);
int mt7915_init_debugfs(struct mt7915_phy *phy);
+void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int len);
+bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len);
#ifdef CONFIG_MAC80211_DEBUGFS
void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
index 8130ea43971f..6f819c41a4c4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -18,35 +18,17 @@ static u32 hif_idx;
static const struct pci_device_id mt7915_pci_device_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7915) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7906) },
{ },
};
static const struct pci_device_id mt7915_hif_device_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7916) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x790a) },
{ },
};
-void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg,
- u32 clear, u32 set)
-{
- struct mt76_dev *mdev = &dev->mt76;
- unsigned long flags;
-
- spin_lock_irqsave(&mdev->mmio.irq_lock, flags);
-
- mdev->mmio.irqmask &= ~clear;
- mdev->mmio.irqmask |= set;
-
- if (write_reg) {
- mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
- mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
- }
-
- spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags);
-}
-
-static struct mt7915_hif *
-mt7915_pci_get_hif2(struct mt7915_dev *dev)
+static struct mt7915_hif *mt7915_pci_get_hif2(u32 idx)
{
struct mt7915_hif *hif;
u32 val;
@@ -56,7 +38,7 @@ mt7915_pci_get_hif2(struct mt7915_dev *dev)
list_for_each_entry(hif, &hif_list, list) {
val = readl(hif->regs + MT_PCIE_RECOG_ID);
val &= MT_PCIE_RECOG_ID_MASK;
- if (val != dev->hif_idx)
+ if (val != idx)
continue;
get_device(hif->dev);
@@ -78,123 +60,17 @@ static void mt7915_put_hif2(struct mt7915_hif *hif)
put_device(hif->dev);
}
-static void
-mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-{
- struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
- static const u32 rx_irq_mask[] = {
- [MT_RXQ_MAIN] = MT_INT_RX_DONE_DATA0,
- [MT_RXQ_EXT] = MT_INT_RX_DONE_DATA1,
- [MT_RXQ_MCU] = MT_INT_RX_DONE_WM,
- [MT_RXQ_MCU_WA] = MT_INT_RX_DONE_WA,
- [MT_RXQ_EXT_WA] = MT_INT_RX_DONE_WA_EXT,
- };
-
- mt7915_irq_enable(dev, rx_irq_mask[q]);
-}
-
-/* TODO: support 2/4/6/8 MSI-X vectors */
-static void mt7915_irq_tasklet(struct tasklet_struct *t)
+static struct mt7915_hif *mt7915_pci_init_hif2(struct pci_dev *pdev)
{
- struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
- u32 intr, intr1, mask;
-
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
- if (dev->hif2)
- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
-
- intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
- intr &= dev->mt76.mmio.irqmask;
- mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
-
- if (dev->hif2) {
- intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
- intr1 &= dev->mt76.mmio.irqmask;
- mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
-
- intr |= intr1;
- }
-
- trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
-
- mask = intr & MT_INT_RX_DONE_ALL;
- if (intr & MT_INT_TX_DONE_MCU)
- mask |= MT_INT_TX_DONE_MCU;
+ hif_idx++;
+ if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL) &&
+ !pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x790a, NULL))
+ return NULL;
- mt7915_irq_disable(dev, mask);
+ writel(hif_idx | MT_PCIE_RECOG_ID_SEM,
+ pcim_iomap_table(pdev)[0] + MT_PCIE_RECOG_ID);
- if (intr & MT_INT_TX_DONE_MCU)
- napi_schedule(&dev->mt76.tx_napi);
-
- if (intr & MT_INT_RX_DONE_DATA0)
- napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
-
- if (intr & MT_INT_RX_DONE_DATA1)
- napi_schedule(&dev->mt76.napi[MT_RXQ_EXT]);
-
- if (intr & MT_INT_RX_DONE_WM)
- napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
-
- if (intr & MT_INT_RX_DONE_WA)
- napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
-
- if (intr & MT_INT_RX_DONE_WA_EXT)
- napi_schedule(&dev->mt76.napi[MT_RXQ_EXT_WA]);
-
- if (intr & MT_INT_MCU_CMD) {
- u32 val = mt76_rr(dev, MT_MCU_CMD);
-
- mt76_wr(dev, MT_MCU_CMD, val);
- if (val & MT_MCU_CMD_ERROR_MASK) {
- dev->reset_state = val;
- ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
- wake_up(&dev->reset_wait);
- }
- }
-}
-
-static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
-{
- struct mt7915_dev *dev = dev_instance;
-
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
- if (dev->hif2)
- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
-
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
- return IRQ_NONE;
-
- tasklet_schedule(&dev->irq_tasklet);
-
- return IRQ_HANDLED;
-}
-
-static void mt7915_pci_init_hif2(struct mt7915_dev *dev)
-{
- struct mt7915_hif *hif;
-
- dev->hif_idx = ++hif_idx;
- if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL))
- return;
-
- mt76_wr(dev, MT_PCIE_RECOG_ID, dev->hif_idx | MT_PCIE_RECOG_ID_SEM);
-
- hif = mt7915_pci_get_hif2(dev);
- if (!hif)
- return;
-
- dev->hif2 = hif;
-
- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
-
- if (devm_request_irq(dev->mt76.dev, hif->irq, mt7915_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME "-hif", dev)) {
- mt7915_put_hif2(hif);
- hif = NULL;
- }
-
- /* master switch of PCIe tnterrupt enable */
- mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
+ return mt7915_pci_get_hif2(hif_idx);
}
static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
@@ -219,26 +95,10 @@ static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
static int mt7915_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- static const struct mt76_driver_ops drv_ops = {
- /* txwi_size = txd size + txp size */
- .txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp),
- .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
- .survey_flags = SURVEY_INFO_TIME_TX |
- SURVEY_INFO_TIME_RX |
- SURVEY_INFO_TIME_BSS_RX,
- .token_size = MT7915_TOKEN_SIZE,
- .tx_prepare_skb = mt7915_tx_prepare_skb,
- .tx_complete_skb = mt7915_tx_complete_skb,
- .rx_skb = mt7915_queue_rx_skb,
- .rx_check = mt7915_rx_check,
- .rx_poll_complete = mt7915_rx_poll_complete,
- .sta_ps = mt7915_sta_ps,
- .sta_add = mt7915_mac_sta_add,
- .sta_remove = mt7915_mac_sta_remove,
- .update_survey = mt7915_update_channel,
- };
struct mt7915_dev *dev;
struct mt76_dev *mdev;
+ struct mt7915_hif *hif2;
+ int irq;
int ret;
ret = pcim_enable_device(pdev);
@@ -257,48 +117,65 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
mt76_pci_disable_aspm(pdev);
- if (id->device == 0x7916)
+ if (id->device == 0x7916 || id->device == 0x790a)
return mt7915_pci_hif2_probe(pdev);
- mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7915_ops,
- &drv_ops);
- if (!mdev)
- return -ENOMEM;
+ dev = mt7915_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0],
+ id->device);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
- dev = container_of(mdev, struct mt7915_dev, mt76);
+ mdev = &dev->mt76;
+ hif2 = mt7915_pci_init_hif2(pdev);
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
- goto free;
+ goto free_device;
- ret = mt7915_mmio_init(mdev, pcim_iomap_table(pdev)[0], pdev->irq);
+ irq = pdev->irq;
+ ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
- goto error;
-
- tasklet_setup(&dev->irq_tasklet, mt7915_irq_tasklet);
+ goto free_irq_vector;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
/* master switch of PCIe tnterrupt enable */
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
- ret = devm_request_irq(mdev->dev, pdev->irq, mt7915_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
- if (ret)
- goto error;
+ if (hif2) {
+ dev->hif2 = hif2;
- mt7915_pci_init_hif2(dev);
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ /* master switch of PCIe tnterrupt enable */
+ if (is_mt7915(mdev))
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
+ else
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0xff);
+
+ ret = devm_request_irq(mdev->dev, dev->hif2->irq,
+ mt7915_irq_handler, IRQF_SHARED,
+ KBUILD_MODNAME "-hif", dev);
+ if (ret)
+ goto free_hif2;
+ }
ret = mt7915_register_device(dev);
if (ret)
- goto free_irq;
+ goto free_hif2_irq;
return 0;
-free_irq:
- devm_free_irq(mdev->dev, pdev->irq, dev);
-error:
+
+free_hif2_irq:
+ if (dev->hif2)
+ devm_free_irq(mdev->dev, dev->hif2->irq, dev);
+free_hif2:
+ if (dev->hif2)
+ put_device(dev->hif2->dev);
+ devm_free_irq(mdev->dev, irq, dev);
+free_irq_vector:
pci_free_irq_vectors(pdev);
-free:
+free_device:
mt76_free_device(&dev->mt76);
return ret;
@@ -322,47 +199,25 @@ static void mt7915_pci_remove(struct pci_dev *pdev)
mt7915_unregister_device(dev);
}
-static struct pci_driver mt7915_hif_driver = {
+struct pci_driver mt7915_hif_driver = {
.name = KBUILD_MODNAME "_hif",
.id_table = mt7915_hif_device_table,
.probe = mt7915_pci_probe,
.remove = mt7915_hif_remove,
};
-static struct pci_driver mt7915_pci_driver = {
+struct pci_driver mt7915_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = mt7915_pci_device_table,
.probe = mt7915_pci_probe,
.remove = mt7915_pci_remove,
};
-static int __init mt7915_init(void)
-{
- int ret;
-
- ret = pci_register_driver(&mt7915_hif_driver);
- if (ret)
- return ret;
-
- ret = pci_register_driver(&mt7915_pci_driver);
- if (ret)
- pci_unregister_driver(&mt7915_hif_driver);
-
- return ret;
-}
-
-static void __exit mt7915_exit(void)
-{
- pci_unregister_driver(&mt7915_pci_driver);
- pci_unregister_driver(&mt7915_hif_driver);
-}
-
-module_init(mt7915_init);
-module_exit(mt7915_exit);
-
MODULE_DEVICE_TABLE(pci, mt7915_pci_device_table);
MODULE_DEVICE_TABLE(pci, mt7915_hif_device_table);
MODULE_FIRMWARE(MT7915_FIRMWARE_WA);
MODULE_FIRMWARE(MT7915_FIRMWARE_WM);
MODULE_FIRMWARE(MT7915_ROM_PATCH);
-MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(MT7916_FIRMWARE_WA);
+MODULE_FIRMWARE(MT7916_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7916_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index 59693535b098..e5f93c40591c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -4,41 +4,152 @@
#ifndef __MT7915_REGS_H
#define __MT7915_REGS_H
+struct __map {
+ u32 phys;
+ u32 maps;
+ u32 size;
+};
+
+/* used to differentiate between generations */
+struct mt7915_reg_desc {
+ const u32 *reg_rev;
+ const u32 *offs_rev;
+ const struct __map *map;
+ u32 map_size;
+};
+
+enum reg_rev {
+ INT_SOURCE_CSR,
+ INT_MASK_CSR,
+ INT1_SOURCE_CSR,
+ INT1_MASK_CSR,
+ INT_MCU_CMD_SOURCE,
+ INT_MCU_CMD_EVENT,
+ WFDMA0_ADDR,
+ WFDMA0_PCIE1_ADDR,
+ WFDMA_EXT_CSR_ADDR,
+ CBTOP1_PHY_END,
+ INFRA_MCU_ADDR_END,
+ __MT_REG_MAX,
+};
+
+enum offs_rev {
+ TMAC_CDTR,
+ TMAC_ODTR,
+ TMAC_ATCR,
+ TMAC_TRCR0,
+ TMAC_ICR0,
+ TMAC_ICR1,
+ TMAC_CTCR0,
+ TMAC_TFCR0,
+ MDP_BNRCFR0,
+ MDP_BNRCFR1,
+ ARB_DRNGR0,
+ ARB_SCR,
+ RMAC_MIB_AIRTIME14,
+ AGG_AWSCR0,
+ AGG_PCR0,
+ AGG_ACR0,
+ AGG_MRCR,
+ AGG_ATCR1,
+ AGG_ATCR3,
+ LPON_UTTR0,
+ LPON_UTTR1,
+ LPON_FRCR,
+ MIB_SDR3,
+ MIB_SDR4,
+ MIB_SDR5,
+ MIB_SDR7,
+ MIB_SDR8,
+ MIB_SDR9,
+ MIB_SDR10,
+ MIB_SDR11,
+ MIB_SDR12,
+ MIB_SDR13,
+ MIB_SDR14,
+ MIB_SDR15,
+ MIB_SDR16,
+ MIB_SDR17,
+ MIB_SDR18,
+ MIB_SDR19,
+ MIB_SDR20,
+ MIB_SDR21,
+ MIB_SDR22,
+ MIB_SDR23,
+ MIB_SDR24,
+ MIB_SDR25,
+ MIB_SDR27,
+ MIB_SDR28,
+ MIB_SDR29,
+ MIB_SDRVEC,
+ MIB_SDR31,
+ MIB_SDR32,
+ MIB_SDRMUBF,
+ MIB_DR8,
+ MIB_DR9,
+ MIB_DR11,
+ MIB_MB_SDR0,
+ MIB_MB_SDR1,
+ TX_AGG_CNT,
+ TX_AGG_CNT2,
+ MIB_ARNG,
+ WTBLON_TOP_WDUCR,
+ WTBL_UPDATE,
+ PLE_FL_Q_EMPTY,
+ PLE_FL_Q_CTRL,
+ PLE_AC_QEMPTY,
+ PLE_FREEPG_CNT,
+ PLE_FREEPG_HEAD_TAIL,
+ PLE_PG_HIF_GROUP,
+ PLE_HIF_PG_INFO,
+ AC_OFFSET,
+ ETBF_PAR_RPT0,
+ __MT_OFFS_MAX,
+};
+
+#define __REG(id) (dev->reg.reg_rev[(id)])
+#define __OFFS(id) (dev->reg.offs_rev[(id)])
+
/* MCU WFDMA0 */
#define MT_MCU_WFDMA0_BASE 0x2000
#define MT_MCU_WFDMA0(ofs) (MT_MCU_WFDMA0_BASE + (ofs))
+
#define MT_MCU_WFDMA0_DUMMY_CR MT_MCU_WFDMA0(0x120)
/* MCU WFDMA1 */
#define MT_MCU_WFDMA1_BASE 0x3000
#define MT_MCU_WFDMA1(ofs) (MT_MCU_WFDMA1_BASE + (ofs))
-#define MT_MCU_INT_EVENT MT_MCU_WFDMA1(0x108)
+#define MT_MCU_INT_EVENT __REG(INT_MCU_CMD_EVENT)
#define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0)
#define MT_MCU_INT_EVENT_DMA_INIT BIT(1)
#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2)
#define MT_MCU_INT_EVENT_RESET_DONE BIT(3)
-#define MT_PLE_BASE 0x8000
+/* PLE */
+#define MT_PLE_BASE 0x820c0000
#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
-#define MT_FL_Q_EMPTY 0x0b0
-#define MT_FL_Q0_CTRL 0x1b0
-#define MT_FL_Q2_CTRL 0x1b8
-#define MT_FL_Q3_CTRL 0x1bc
-
-#define MT_PLE_FREEPG_CNT MT_PLE(0x100)
-#define MT_PLE_FREEPG_HEAD_TAIL MT_PLE(0x104)
-#define MT_PLE_PG_HIF_GROUP MT_PLE(0x110)
-#define MT_PLE_HIF_PG_INFO MT_PLE(0x114)
-#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x500 + 0x40 * (ac) + \
- ((n) << 2))
+#define MT_FL_Q_EMPTY MT_PLE(__OFFS(PLE_FL_Q_EMPTY))
+#define MT_FL_Q0_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL))
+#define MT_FL_Q2_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL) + 0x8)
+#define MT_FL_Q3_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL) + 0xc)
+
+#define MT_PLE_FREEPG_CNT MT_PLE(__OFFS(PLE_FREEPG_CNT))
+#define MT_PLE_FREEPG_HEAD_TAIL MT_PLE(__OFFS(PLE_FREEPG_HEAD_TAIL))
+#define MT_PLE_PG_HIF_GROUP MT_PLE(__OFFS(PLE_PG_HIF_GROUP))
+#define MT_PLE_HIF_PG_INFO MT_PLE(__OFFS(PLE_HIF_PG_INFO))
+
+#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(__OFFS(PLE_AC_QEMPTY) + \
+ __OFFS(AC_OFFSET) * \
+ (ac) + ((n) << 2))
#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2))
-#define MT_PSE_BASE 0xc000
+#define MT_PSE_BASE 0x820c8000
#define MT_PSE(ofs) (MT_PSE_BASE + (ofs))
-#define MT_MDP_BASE 0xf000
+/* WF MDP TOP */
+#define MT_MDP_BASE 0x820cd000
#define MT_MDP(ofs) (MT_MDP_BASE + (ofs))
#define MT_MDP_DCR0 MT_MDP(0x000)
@@ -47,73 +158,76 @@
#define MT_MDP_DCR1 MT_MDP(0x004)
#define MT_MDP_DCR1_MAX_RX_LEN GENMASK(15, 3)
-#define MT_MDP_BNRCFR0(_band) MT_MDP(0x070 + ((_band) << 8))
+#define MT_MDP_BNRCFR0(_band) MT_MDP(__OFFS(MDP_BNRCFR0) + \
+ ((_band) << 8))
#define MT_MDP_RCFR0_MCU_RX_MGMT GENMASK(5, 4)
#define MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR GENMASK(7, 6)
#define MT_MDP_RCFR0_MCU_RX_CTL_BAR GENMASK(9, 8)
-#define MT_MDP_BNRCFR1(_band) MT_MDP(0x074 + ((_band) << 8))
+#define MT_MDP_BNRCFR1(_band) MT_MDP(__OFFS(MDP_BNRCFR1) + \
+ ((_band) << 8))
#define MT_MDP_RCFR1_MCU_RX_BYPASS GENMASK(23, 22)
#define MT_MDP_RCFR1_RX_DROPPED_UCAST GENMASK(28, 27)
#define MT_MDP_RCFR1_RX_DROPPED_MCAST GENMASK(30, 29)
#define MT_MDP_TO_HIF 0
#define MT_MDP_TO_WM 1
-/* TMAC: band 0(0x21000), band 1(0xa1000) */
-#define MT_WF_TMAC_BASE(_band) ((_band) ? 0xa1000 : 0x21000)
+/* TMAC: band 0(0x820e4000), band 1(0x820f4000) */
+#define MT_WF_TMAC_BASE(_band) ((_band) ? 0x820f4000 : 0x820e4000)
#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs))
#define MT_TMAC_TCR0(_band) MT_WF_TMAC(_band, 0)
#define MT_TMAC_TCR0_TX_BLINK GENMASK(7, 6)
#define MT_TMAC_TCR0_TBTT_STOP_CTRL BIT(25)
-#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, 0x090)
-#define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, 0x094)
+#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_CDTR))
+ #define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ODTR))
#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
-#define MT_TMAC_ATCR(_band) MT_WF_TMAC(_band, 0x098)
+#define MT_TMAC_ATCR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ATCR))
#define MT_TMAC_ATCR_TXV_TOUT GENMASK(7, 0)
-#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, 0x09c)
+#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_TRCR0))
#define MT_TMAC_TRCR0_TR2T_CHK GENMASK(8, 0)
#define MT_TMAC_TRCR0_I2T_CHK GENMASK(24, 16)
-#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, 0x0a4)
-#define MT_IFS_EIFS_OFDM GENMASK(8, 0)
+#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ICR0))
+#define MT_IFS_EIFS_OFDM GENMASK(8, 0)
#define MT_IFS_RIFS GENMASK(14, 10)
#define MT_IFS_SIFS GENMASK(22, 16)
#define MT_IFS_SLOT GENMASK(30, 24)
-#define MT_TMAC_ICR1(_band) MT_WF_TMAC(_band, 0x0b4)
+#define MT_TMAC_ICR1(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ICR1))
#define MT_IFS_EIFS_CCK GENMASK(8, 0)
-#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, 0x0f4)
+#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_CTCR0))
#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
-#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0)
+#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_TFCR0))
-#define MT_WF_DMA_BASE(_band) ((_band) ? 0xa1e00 : 0x21e00)
+/* WF DMA TOP: band 0(0x820e7000),band 1(0x820f7000) */
+#define MT_WF_DMA_BASE(_band) ((_band) ? 0x820f7000 : 0x820e7000)
#define MT_WF_DMA(_band, ofs) (MT_WF_DMA_BASE(_band) + (ofs))
#define MT_DMA_DCR0(_band) MT_WF_DMA(_band, 0x000)
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 3)
#define MT_DMA_DCR0_RXD_G5_EN BIT(23)
-/* ETBF: band 0(0x24000), band 1(0xa4000) */
-#define MT_WF_ETBF_BASE(_band) ((_band) ? 0xa4000 : 0x24000)
+/* ETBF: band 0(0x820ea000), band 1(0x820fa000) */
+#define MT_WF_ETBF_BASE(_band) ((_band) ? 0x820fa000 : 0x820ea000)
#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs))
#define MT_ETBF_TX_NDP_BFRP(_band) MT_WF_ETBF(_band, 0x040)
#define MT_ETBF_TX_FB_CPL GENMASK(31, 16)
#define MT_ETBF_TX_FB_TRI GENMASK(15, 0)
-#define MT_ETBF_RX_FB_CONT(_band) MT_WF_ETBF(_band, 0x068)
-#define MT_ETBF_RX_FB_BW GENMASK(7, 6)
-#define MT_ETBF_RX_FB_NC GENMASK(5, 3)
-#define MT_ETBF_RX_FB_NR GENMASK(2, 0)
+#define MT_ETBF_PAR_RPT0(_band) MT_WF_ETBF(_band, __OFFS(ETBF_PAR_RPT0))
+#define MT_ETBF_PAR_RPT0_FB_BW GENMASK(7, 6)
+#define MT_ETBF_PAR_RPT0_FB_NC GENMASK(5, 3)
+#define MT_ETBF_PAR_RPT0_FB_NR GENMASK(2, 0)
#define MT_ETBF_TX_APP_CNT(_band) MT_WF_ETBF(_band, 0x0f0)
#define MT_ETBF_TX_IBF_CNT GENMASK(31, 16)
@@ -125,174 +239,209 @@
#define MT_ETBF_RX_FB_VHT GENMASK(15, 8)
#define MT_ETBF_RX_FB_HT GENMASK(7, 0)
-/* LPON: band 0(0x24200), band 1(0xa4200) */
-#define MT_WF_LPON_BASE(_band) ((_band) ? 0xa4200 : 0x24200)
+/* LPON: band 0(0x820eb000), band 1(0x820fb000) */
+#define MT_WF_LPON_BASE(_band) ((_band) ? 0x820fb000 : 0x820eb000)
#define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs))
-#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, 0x080)
-#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, 0x084)
+#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, __OFFS(LPON_UTTR0))
+#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, __OFFS(LPON_UTTR1))
+#define MT_LPON_FRCR(_band) MT_WF_LPON(_band, __OFFS(LPON_FRCR))
-#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + (n) * 4)
+#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + \
+ (((n) * 4) << 1))
+#define MT_LPON_TCR_MT7916(_band, n) MT_WF_LPON(_band, 0x0a8 + \
+ (((n) * 4) << 4))
#define MT_LPON_TCR_SW_MODE GENMASK(1, 0)
#define MT_LPON_TCR_SW_WRITE BIT(0)
#define MT_LPON_TCR_SW_ADJUST BIT(1)
#define MT_LPON_TCR_SW_READ GENMASK(1, 0)
-/* MIB: band 0(0x24800), band 1(0xa4800) */
+/* MIB: band 0(0x820ed000), band 1(0x820fd000) */
/* These counters are (mostly?) clear-on-read. So, some should not
* be read at all in case firmware is already reading them. These
* are commented with 'DNR' below. The DNR stats will be read by querying
* the firmware API for the appropriate message. For counters the driver
* does read, the driver should accumulate the counters.
*/
-#define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800)
+#define MT_WF_MIB_BASE(_band) ((_band) ? 0x820fd000 : 0x820ed000)
#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs))
#define MT_MIB_SDR0(_band) MT_WF_MIB(_band, 0x010)
#define MT_MIB_SDR0_BERACON_TX_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014)
+#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR3))
#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
+#define MT_MIB_SDR3_FCS_ERR_MASK_MT7916 GENMASK(31, 16)
-#define MT_MIB_SDR4(_band) MT_WF_MIB(_band, 0x018)
+#define MT_MIB_SDR4(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR4))
#define MT_MIB_SDR4_RX_FIFO_FULL_MASK GENMASK(15, 0)
/* rx mpdu counter, full 32 bits */
-#define MT_MIB_SDR5(_band) MT_WF_MIB(_band, 0x01c)
+#define MT_MIB_SDR5(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR5))
#define MT_MIB_SDR6(_band) MT_WF_MIB(_band, 0x020)
#define MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR7(_band) MT_WF_MIB(_band, 0x024)
+#define MT_MIB_SDR7(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR7))
#define MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR8(_band) MT_WF_MIB(_band, 0x028)
+#define MT_MIB_SDR8(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR8))
#define MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK GENMASK(15, 0)
/* aka CCA_NAV_TX_TIME */
-#define MT_MIB_SDR9_DNR(_band) MT_WF_MIB(_band, 0x02c)
-#define MT_MIB_SDR9_CCA_BUSY_TIME_MASK GENMASK(23, 0)
+#define MT_MIB_SDR9_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR9))
+#define MT_MIB_SDR9_CCA_BUSY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR10_DNR(_band) MT_WF_MIB(_band, 0x030)
-#define MT_MIB_SDR10_MRDY_COUNT_MASK GENMASK(25, 0)
+#define MT_MIB_SDR10_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR10))
+#define MT_MIB_SDR10_MRDY_COUNT_MASK GENMASK(25, 0)
+#define MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916 GENMASK(31, 0)
-#define MT_MIB_SDR11(_band) MT_WF_MIB(_band, 0x034)
+#define MT_MIB_SDR11(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR11))
#define MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK GENMASK(15, 0)
/* tx ampdu cnt, full 32 bits */
-#define MT_MIB_SDR12(_band) MT_WF_MIB(_band, 0x038)
+#define MT_MIB_SDR12(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR12))
-#define MT_MIB_SDR13(_band) MT_WF_MIB(_band, 0x03c)
+#define MT_MIB_SDR13(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR13))
#define MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK GENMASK(15, 0)
/* counts all mpdus in ampdu, regardless of success */
-#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, 0x040)
+#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR14))
#define MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK GENMASK(23, 0)
+#define MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916 GENMASK(31, 0)
/* counts all successfully tx'd mpdus in ampdu */
-#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, 0x044)
+#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR15))
#define MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK GENMASK(23, 0)
+#define MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916 GENMASK(31, 0)
/* in units of 'us' */
-#define MT_MIB_SDR16_DNR(_band) MT_WF_MIB(_band, 0x048)
+#define MT_MIB_SDR16_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR16))
#define MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR17_DNR(_band) MT_WF_MIB(_band, 0x04c)
+#define MT_MIB_SDR17_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR17))
#define MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR18(_band) MT_WF_MIB(_band, 0x050)
+#define MT_MIB_SDR18(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR18))
#define MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK GENMASK(23, 0)
/* units are us */
-#define MT_MIB_SDR19_DNR(_band) MT_WF_MIB(_band, 0x054)
+#define MT_MIB_SDR19_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR19))
#define MT_MIB_SDR19_CCK_MDRDY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR20_DNR(_band) MT_WF_MIB(_band, 0x058)
+#define MT_MIB_SDR20_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR20))
#define MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR21_DNR(_band) MT_WF_MIB(_band, 0x05c)
+#define MT_MIB_SDR21_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR21))
#define MT_MIB_SDR20_GREEN_MDRDY_TIME_MASK GENMASK(23, 0)
/* rx ampdu count, 32-bit */
-#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, 0x060)
+#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR22))
/* rx ampdu bytes count, 32-bit */
-#define MT_MIB_SDR23(_band) MT_WF_MIB(_band, 0x064)
+#define MT_MIB_SDR23(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR23))
/* rx ampdu valid subframe count */
-#define MT_MIB_SDR24(_band) MT_WF_MIB(_band, 0x068)
+#define MT_MIB_SDR24(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR24))
#define MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK GENMASK(23, 0)
+#define MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916 GENMASK(31, 0)
/* rx ampdu valid subframe bytes count, 32bits */
-#define MT_MIB_SDR25(_band) MT_WF_MIB(_band, 0x06c)
+#define MT_MIB_SDR25(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR25))
/* remaining windows protected stats */
-#define MT_MIB_SDR27(_band) MT_WF_MIB(_band, 0x074)
+#define MT_MIB_SDR27(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR27))
#define MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR28(_band) MT_WF_MIB(_band, 0x078)
+#define MT_MIB_SDR28(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR28))
#define MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR29(_band) MT_WF_MIB(_band, 0x07c)
-#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK GENMASK(7, 0)
+#define MT_MIB_SDR29(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR29))
+#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK GENMASK(7, 0)
+#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916 GENMASK(15, 0)
-#define MT_MIB_SDR30(_band) MT_WF_MIB(_band, 0x080)
+#define MT_MIB_SDRVEC(_band) MT_WF_MIB(_band, __OFFS(MIB_SDRVEC))
#define MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK GENMASK(15, 0)
+#define MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916 GENMASK(31, 16)
/* rx blockack count, 32 bits */
-#define MT_MIB_SDR31(_band) MT_WF_MIB(_band, 0x084)
+#define MT_MIB_SDR31(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR31))
-#define MT_MIB_SDR32(_band) MT_WF_MIB(_band, 0x088)
-#define MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK GENMASK(15, 0)
+#define MT_MIB_SDR32(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR32))
+#define MT_MIB_SDR32_TX_PKT_EBF_CNT GENMASK(15, 0)
+#define MT_MIB_SDR32_TX_PKT_IBF_CNT GENMASK(31, 16)
-#define MT_MIB_SDR33(_band) MT_WF_MIB(_band, 0x08c)
-#define MT_MIB_SDR33_TX_PKT_IBF_CNT_MASK GENMASK(15, 0)
+#define MT_MIB_SDR33(_band) MT_WF_MIB(_band, 0x088)
+#define MT_MIB_SDR33_TX_PKT_IBF_CNT GENMASK(15, 0)
-#define MT_MIB_SDR34(_band) MT_WF_MIB(_band, 0x090)
+#define MT_MIB_SDRMUBF(_band) MT_WF_MIB(_band, __OFFS(MIB_SDRMUBF))
#define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0)
/* 36, 37 both DNR */
-#define MT_MIB_DR8(_band) MT_WF_MIB(_band, 0x0c0)
-#define MT_MIB_DR9(_band) MT_WF_MIB(_band, 0x0c4)
-#define MT_MIB_DR11(_band) MT_WF_MIB(_band, 0x0cc)
+#define MT_MIB_DR8(_band) MT_WF_MIB(_band, __OFFS(MIB_DR8))
+#define MT_MIB_DR9(_band) MT_WF_MIB(_band, __OFFS(MIB_DR9))
+#define MT_MIB_DR11(_band) MT_WF_MIB(_band, __OFFS(MIB_DR11))
-#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4))
+#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, __OFFS(MIB_MB_SDR0) + (n))
#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
-#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, 0x104 + ((n) << 4))
+#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, __OFFS(MIB_MB_SDR1) + (n))
#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0)
#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16)
-#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x0a8 + ((n) << 2))
-#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x164 + ((n) << 2))
-#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x4b8 + ((n) << 2))
+#define MT_MIB_MB_SDR2(_band, n) MT_WF_MIB(_band, 0x518 + (n))
+#define MT_MIB_MB_BFTF(_band, n) MT_WF_MIB(_band, 0x510 + (n))
+
+#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, __OFFS(TX_AGG_CNT) + \
+ ((n) << 2))
+#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, __OFFS(TX_AGG_CNT2) + \
+ ((n) << 2))
+#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, __OFFS(MIB_ARNG) + \
+ ((n) << 2))
#define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0))
-#define MT_WTBLON_TOP_BASE 0x34000
+#define MT_MIB_BFCR0(_band) MT_WF_MIB(_band, 0x7b0)
+#define MT_MIB_BFCR0_RX_FB_HT GENMASK(15, 0)
+#define MT_MIB_BFCR0_RX_FB_VHT GENMASK(31, 16)
+
+#define MT_MIB_BFCR1(_band) MT_WF_MIB(_band, 0x7b4)
+#define MT_MIB_BFCR1_RX_FB_HE GENMASK(15, 0)
+
+#define MT_MIB_BFCR2(_band) MT_WF_MIB(_band, 0x7b8)
+#define MT_MIB_BFCR2_BFEE_TX_FB_TRIG GENMASK(15, 0)
+
+#define MT_MIB_BFCR7(_band) MT_WF_MIB(_band, 0x7cc)
+#define MT_MIB_BFCR7_BFEE_TX_FB_CPL GENMASK(15, 0)
+
+/* WTBLON TOP */
+#define MT_WTBLON_TOP_BASE 0x820d4000
#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs))
-#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x0)
+#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(__OFFS(WTBLON_TOP_WDUCR))
#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0)
-#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x030)
+#define MT_WTBL_UPDATE MT_WTBLON_TOP(__OFFS(WTBL_UPDATE))
#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(9, 0)
#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
#define MT_WTBL_UPDATE_BUSY BIT(31)
-#define MT_WTBL_BASE 0x38000
+/* WTBL */
+#define MT_WTBL_BASE 0x820d8000
#define MT_WTBL_LMAC_ID GENMASK(14, 8)
#define MT_WTBL_LMAC_DW GENMASK(7, 2)
#define MT_WTBL_LMAC_OFFS(_id, _dw) (MT_WTBL_BASE | \
- FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
- FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
+ FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
+ FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
-/* AGG: band 0(0x20800), band 1(0xa0800) */
-#define MT_WF_AGG_BASE(_band) ((_band) ? 0xa0800 : 0x20800)
+/* AGG: band 0(0x820e2000), band 1(0x820f2000) */
+#define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000)
#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
-#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, 0x05c + (_n) * 4)
-#define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, 0x06c + (_n) * 4)
+#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AWSCR0) + \
+ (_n) * 4))
+#define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_PCR0) + \
+ (_n) * 4))
#define MT_AGG_PCR0_MM_PROT BIT(0)
#define MT_AGG_PCR0_GF_PROT BIT(1)
#define MT_AGG_PCR0_BW20_PROT BIT(2)
@@ -305,31 +454,32 @@
#define MT_AGG_PCR1_RTS0_NUM_THRES GENMASK(31, 23)
#define MT_AGG_PCR1_RTS0_LEN_THRES GENMASK(19, 0)
-#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, 0x084)
+#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, __OFFS(AGG_ACR0))
#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0)
#define MT_AGG_ACR_BAR_RATE GENMASK(29, 16)
-#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, 0x098)
-#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12)
-#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6)
-#define MT_AGG_MRCR_RTS_FAIL_LIMIT GENMASK(11, 7)
+#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, __OFFS(AGG_MRCR))
+#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12)
+#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6)
+#define MT_AGG_MRCR_RTS_FAIL_LIMIT GENMASK(11, 7)
#define MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT GENMASK(28, 24)
-#define MT_AGG_ATCR1(_band) MT_WF_AGG(_band, 0x0f0)
-#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, 0x0f4)
+#define MT_AGG_ATCR1(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR1))
+#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR3))
-/* ARB: band 0(0x20c00), band 1(0xa0c00) */
-#define MT_WF_ARB_BASE(_band) ((_band) ? 0xa0c00 : 0x20c00)
+/* ARB: band 0(0x820e3000), band 1(0x820f3000) */
+#define MT_WF_ARB_BASE(_band) ((_band) ? 0x820f3000 : 0x820e3000)
#define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs))
-#define MT_ARB_SCR(_band) MT_WF_ARB(_band, 0x080)
+#define MT_ARB_SCR(_band) MT_WF_ARB(_band, __OFFS(ARB_SCR))
#define MT_ARB_SCR_TX_DISABLE BIT(8)
#define MT_ARB_SCR_RX_DISABLE BIT(9)
-#define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, 0x194 + (_n) * 4)
+#define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, (__OFFS(ARB_DRNGR0) + \
+ (_n) * 4))
-/* RMAC: band 0(0x21400), band 1(0xa1400) */
-#define MT_WF_RMAC_BASE(_band) ((_band) ? 0xa1400 : 0x21400)
+/* RMAC: band 0(0x820e5000), band 1(0x820f5000) */
+#define MT_WF_RMAC_BASE(_band) ((_band) ? 0x820f5000 : 0x820e5000)
#define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs))
#define MT_WF_RFCR(_band) MT_WF_RMAC(_band, 0x000)
@@ -366,7 +516,7 @@
#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
/* WFDMA0 */
-#define MT_WFDMA0_BASE 0xd4000
+#define MT_WFDMA0_BASE __REG(WFDMA0_ADDR)
#define MT_WFDMA0(ofs) (MT_WFDMA0_BASE + (ofs))
#define MT_WFDMA0_RST MT_WFDMA0(0x100)
@@ -381,15 +531,14 @@
#define MT_WFDMA0_GLO_CFG MT_WFDMA0(0x208)
#define MT_WFDMA0_GLO_CFG_TX_DMA_EN BIT(0)
#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WFDMA0_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
-
-#define MT_RX_DATA_RING_BASE MT_WFDMA0(0x500)
-
-#define MT_WFDMA0_RX_RING0_EXT_CTRL MT_WFDMA0(0x680)
-#define MT_WFDMA0_RX_RING1_EXT_CTRL MT_WFDMA0(0x684)
-#define MT_WFDMA0_RX_RING2_EXT_CTRL MT_WFDMA0(0x688)
+#define MT_WFDMA0_PRI_DLY_INT_CFG1 MT_WFDMA0(0x2f4)
+#define MT_WFDMA0_PRI_DLY_INT_CFG2 MT_WFDMA0(0x2f8)
/* WFDMA1 */
#define MT_WFDMA1_BASE 0xd5000
@@ -404,129 +553,371 @@
#define MT_WFDMA1_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA1_BUSY_ENA_RX_FIFO BIT(2)
-#define MT_MCU_CMD MT_WFDMA1(0x1f0)
-#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
-#define MT_MCU_CMD_STOP_DMA BIT(2)
-#define MT_MCU_CMD_RESET_DONE BIT(3)
-#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
-#define MT_MCU_CMD_NORMAL_STATE BIT(5)
-#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
-
#define MT_WFDMA1_GLO_CFG MT_WFDMA1(0x208)
#define MT_WFDMA1_GLO_CFG_TX_DMA_EN BIT(0)
#define MT_WFDMA1_GLO_CFG_RX_DMA_EN BIT(2)
#define MT_WFDMA1_GLO_CFG_OMIT_TX_INFO BIT(28)
#define MT_WFDMA1_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
#define MT_WFDMA1_RST_DTX_PTR MT_WFDMA1(0x20c)
#define MT_WFDMA1_PRI_DLY_INT_CFG0 MT_WFDMA1(0x2f0)
-#define MT_TX_RING_BASE MT_WFDMA1(0x300)
-#define MT_RX_EVENT_RING_BASE MT_WFDMA1(0x500)
-
-#define MT_WFDMA1_TX_RING0_EXT_CTRL MT_WFDMA1(0x600)
-#define MT_WFDMA1_TX_RING1_EXT_CTRL MT_WFDMA1(0x604)
-#define MT_WFDMA1_TX_RING2_EXT_CTRL MT_WFDMA1(0x608)
-#define MT_WFDMA1_TX_RING3_EXT_CTRL MT_WFDMA1(0x60c)
-#define MT_WFDMA1_TX_RING4_EXT_CTRL MT_WFDMA1(0x610)
-#define MT_WFDMA1_TX_RING5_EXT_CTRL MT_WFDMA1(0x614)
-#define MT_WFDMA1_TX_RING6_EXT_CTRL MT_WFDMA1(0x618)
-#define MT_WFDMA1_TX_RING7_EXT_CTRL MT_WFDMA1(0x61c)
-
-#define MT_WFDMA1_TX_RING16_EXT_CTRL MT_WFDMA1(0x640)
-#define MT_WFDMA1_TX_RING17_EXT_CTRL MT_WFDMA1(0x644)
-#define MT_WFDMA1_TX_RING18_EXT_CTRL MT_WFDMA1(0x648)
-#define MT_WFDMA1_TX_RING19_EXT_CTRL MT_WFDMA1(0x64c)
-#define MT_WFDMA1_TX_RING20_EXT_CTRL MT_WFDMA1(0x650)
-#define MT_WFDMA1_TX_RING21_EXT_CTRL MT_WFDMA1(0x654)
-#define MT_WFDMA1_TX_RING22_EXT_CTRL MT_WFDMA1(0x658)
-#define MT_WFDMA1_TX_RING23_EXT_CTRL MT_WFDMA1(0x65c)
-
-#define MT_WFDMA1_RX_RING0_EXT_CTRL MT_WFDMA1(0x680)
-#define MT_WFDMA1_RX_RING1_EXT_CTRL MT_WFDMA1(0x684)
-#define MT_WFDMA1_RX_RING2_EXT_CTRL MT_WFDMA1(0x688)
-#define MT_WFDMA1_RX_RING3_EXT_CTRL MT_WFDMA1(0x68c)
-
/* WFDMA CSR */
-#define MT_WFDMA_EXT_CSR_BASE 0xd7000
+#define MT_WFDMA_EXT_CSR_BASE __REG(WFDMA_EXT_CSR_ADDR)
#define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs))
-#define MT_INT_SOURCE_CSR MT_WFDMA_EXT_CSR(0x10)
-#define MT_INT_MASK_CSR MT_WFDMA_EXT_CSR(0x14)
-#define MT_INT_RX_DONE_DATA0 BIT(16)
-#define MT_INT_RX_DONE_DATA1 BIT(17)
-#define MT_INT_RX_DONE_WM BIT(0)
-#define MT_INT_RX_DONE_WA BIT(1)
-#define MT_INT_RX_DONE_WA_EXT BIT(2)
-#define MT_INT_RX_DONE_ALL (GENMASK(2, 0) | GENMASK(17, 16))
-#define MT_INT_TX_DONE_MCU_WA BIT(15)
-#define MT_INT_TX_DONE_FWDL BIT(26)
-#define MT_INT_TX_DONE_MCU_WM BIT(27)
-#define MT_INT_TX_DONE_BAND0 BIT(30)
-#define MT_INT_TX_DONE_BAND1 BIT(31)
-
-#define MT_INT_BAND1_MASK (MT_INT_RX_DONE_WA_EXT | \
- MT_INT_TX_DONE_BAND1)
-
-#define MT_INT_MCU_CMD BIT(29)
-
-#define MT_INT_TX_DONE_MCU (MT_INT_TX_DONE_MCU_WA | \
- MT_INT_TX_DONE_MCU_WM | \
- MT_INT_TX_DONE_FWDL)
-
#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR(0x30)
#define MT_WFDMA_HOST_CONFIG_PDMA_BAND BIT(0)
#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
-#define MT_INT1_SOURCE_CSR MT_WFDMA_EXT_CSR(0x88)
-#define MT_INT1_MASK_CSR MT_WFDMA_EXT_CSR(0x8c)
-
-#define MT_PCIE_RECOG_ID MT_WFDMA_EXT_CSR(0x90)
+#define MT_PCIE_RECOG_ID 0xd7090
#define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
#define MT_PCIE_RECOG_ID_SEM BIT(31)
/* WFDMA0 PCIE1 */
-#define MT_WFDMA0_PCIE1_BASE 0xd8000
-#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
+#define MT_WFDMA0_PCIE1_BASE __REG(WFDMA0_PCIE1_ADDR)
+#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
-#define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
+#define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
/* WFDMA1 PCIE1 */
-#define MT_WFDMA1_PCIE1_BASE 0xd9000
-#define MT_WFDMA1_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
+#define MT_WFDMA1_PCIE1_BASE 0xd9000
+#define MT_WFDMA1_PCIE1(ofs) (MT_WFDMA1_PCIE1_BASE + (ofs))
-#define MT_WFDMA1_PCIE1_BUSY_ENA MT_WFDMA1_PCIE1(0x13c)
+#define MT_WFDMA1_PCIE1_BUSY_ENA MT_WFDMA1_PCIE1(0x13c)
#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
-#define MT_TOP_RGU_BASE 0xf0000
-#define MT_TOP_PWR_CTRL (MT_TOP_RGU_BASE + (0x0))
-#define MT_TOP_PWR_KEY (0x5746 << 16)
-#define MT_TOP_PWR_SW_RST BIT(0)
-#define MT_TOP_PWR_SW_PWR_ON GENMASK(3, 2)
-#define MT_TOP_PWR_HW_CTRL BIT(4)
-#define MT_TOP_PWR_PWR_ON BIT(7)
+/* WFDMA COMMON */
+#define __RXQ(q) ((q) + __MT_MCUQ_MAX)
+#define __TXQ(q) (__RXQ(q) + __MT_RXQ_MAX)
+
+#define MT_Q_ID(q) (dev->q_id[(q)])
+#define MT_Q_BASE(q) ((dev->wfdma_mask >> (q)) & 0x1 ? \
+ MT_WFDMA1_BASE : MT_WFDMA0_BASE)
+
+#define MT_MCUQ_ID(q) MT_Q_ID(q)
+#define MT_TXQ_ID(q) MT_Q_ID(__TXQ(q))
+#define MT_RXQ_ID(q) MT_Q_ID(__RXQ(q))
+
+#define MT_MCUQ_RING_BASE(q) (MT_Q_BASE(q) + 0x300)
+#define MT_TXQ_RING_BASE(q) (MT_Q_BASE(__TXQ(q)) + 0x300)
+#define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500)
+
+#define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
+ MT_MCUQ_ID(q)* 0x4)
+#define MT_RXQ_EXT_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
+ MT_RXQ_ID(q)* 0x4)
+#define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \
+ MT_TXQ_ID(q)* 0x4)
+
+#define MT_INT_SOURCE_CSR __REG(INT_SOURCE_CSR)
+#define MT_INT_MASK_CSR __REG(INT_MASK_CSR)
+
+#define MT_INT1_SOURCE_CSR __REG(INT1_SOURCE_CSR)
+#define MT_INT1_MASK_CSR __REG(INT1_MASK_CSR)
+
+#define MT_INT_RX_DONE_BAND0 BIT(16)
+#define MT_INT_RX_DONE_BAND1 BIT(17)
+#define MT_INT_RX_DONE_WM BIT(0)
+#define MT_INT_RX_DONE_WA BIT(1)
+#define MT_INT_RX_DONE_WA_MAIN BIT(1)
+#define MT_INT_RX_DONE_WA_EXT BIT(2)
+#define MT_INT_MCU_CMD BIT(29)
+#define MT_INT_RX_DONE_BAND0_MT7916 BIT(22)
+#define MT_INT_RX_DONE_BAND1_MT7916 BIT(23)
+#define MT_INT_RX_DONE_WA_MAIN_MT7916 BIT(2)
+#define MT_INT_RX_DONE_WA_EXT_MT7916 BIT(3)
+
+#define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
+#define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
+
+#define MT_INT_RX_DONE_MCU (MT_INT_RX(MT_RXQ_MCU) | \
+ MT_INT_RX(MT_RXQ_MCU_WA))
+
+#define MT_INT_BAND0_RX_DONE (MT_INT_RX(MT_RXQ_MAIN) | \
+ MT_INT_RX(MT_RXQ_MAIN_WA))
+
+#define MT_INT_BAND1_RX_DONE (MT_INT_RX(MT_RXQ_EXT) | \
+ MT_INT_RX(MT_RXQ_EXT_WA) | \
+ MT_INT_RX(MT_RXQ_MAIN_WA))
+
+#define MT_INT_RX_DONE_ALL (MT_INT_RX_DONE_MCU | \
+ MT_INT_BAND0_RX_DONE | \
+ MT_INT_BAND1_RX_DONE)
-#define MT_INFRA_CFG_BASE 0xf1000
-#define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs))
+#define MT_INT_TX_DONE_FWDL BIT(26)
+#define MT_INT_TX_DONE_MCU_WM BIT(27)
+#define MT_INT_TX_DONE_MCU_WA BIT(15)
+#define MT_INT_TX_DONE_BAND0 BIT(30)
+#define MT_INT_TX_DONE_BAND1 BIT(31)
+#define MT_INT_TX_DONE_MCU_WA_MT7916 BIT(25)
+
+#define MT_INT_TX_DONE_MCU (MT_INT_TX_MCU(MT_MCUQ_WA) | \
+ MT_INT_TX_MCU(MT_MCUQ_WM) | \
+ MT_INT_TX_MCU(MT_MCUQ_FWDL))
+
+#define MT_MCU_CMD __REG(INT_MCU_CMD_SOURCE)
+#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
+#define MT_MCU_CMD_STOP_DMA BIT(2)
+#define MT_MCU_CMD_RESET_DONE BIT(3)
+#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
+#define MT_MCU_CMD_NORMAL_STATE BIT(5)
+#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
-#define MT_HIF_REMAP_L1 MT_INFRA(0x1ac)
+/* TOP RGU */
+#define MT_TOP_RGU_BASE 0x18000000
+#define MT_TOP_PWR_CTRL (MT_TOP_RGU_BASE + (0x0))
+#define MT_TOP_PWR_KEY (0x5746 << 16)
+#define MT_TOP_PWR_SW_RST BIT(0)
+#define MT_TOP_PWR_SW_PWR_ON GENMASK(3, 2)
+#define MT_TOP_PWR_HW_CTRL BIT(4)
+#define MT_TOP_PWR_PWR_ON BIT(7)
+
+#define MT_TOP_RGU_SYSRAM_PDN (MT_TOP_RGU_BASE + 0x050)
+#define MT_TOP_RGU_SYSRAM_SLP (MT_TOP_RGU_BASE + 0x054)
+#define MT_TOP_WFSYS_PWR (MT_TOP_RGU_BASE + 0x010)
+#define MT_TOP_PWR_EN_MASK BIT(7)
+#define MT_TOP_PWR_ACK_MASK BIT(6)
+#define MT_TOP_PWR_KEY_MASK GENMASK(31, 16)
+
+#define MT7986_TOP_WM_RESET (MT_TOP_RGU_BASE + 0x120)
+#define MT7986_TOP_WM_RESET_MASK BIT(0)
+
+/* l1/l2 remap */
+#define MT_HIF_REMAP_L1 0xf11ac
+#define MT_HIF_REMAP_L1_MT7916 0xfe260
#define MT_HIF_REMAP_L1_MASK GENMASK(15, 0)
#define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0)
#define MT_HIF_REMAP_L1_BASE GENMASK(31, 16)
#define MT_HIF_REMAP_BASE_L1 0xe0000
-#define MT_HIF_REMAP_L2 MT_INFRA(0x1b0)
+#define MT_HIF_REMAP_L2 0xf11b0
#define MT_HIF_REMAP_L2_MASK GENMASK(19, 0)
#define MT_HIF_REMAP_L2_OFFSET GENMASK(11, 0)
#define MT_HIF_REMAP_L2_BASE GENMASK(31, 12)
-#define MT_HIF_REMAP_BASE_L2 0x00000
+#define MT_HIF_REMAP_L2_MT7916 0x1b8
+#define MT_HIF_REMAP_L2_MASK_MT7916 GENMASK(31, 16)
+#define MT_HIF_REMAP_L2_OFFSET_MT7916 GENMASK(15, 0)
+#define MT_HIF_REMAP_L2_BASE_MT7916 GENMASK(31, 16)
+#define MT_HIF_REMAP_BASE_L2_MT7916 0x40000
+
+#define MT_INFRA_BASE 0x18000000
+#define MT_WFSYS0_PHY_START 0x18400000
+#define MT_WFSYS1_PHY_START 0x18800000
+#define MT_WFSYS1_PHY_END 0x18bfffff
+#define MT_CBTOP1_PHY_START 0x70000000
+#define MT_CBTOP1_PHY_END __REG(CBTOP1_PHY_END)
+#define MT_CBTOP2_PHY_START 0xf0000000
+#define MT_CBTOP2_PHY_END 0xffffffff
+#define MT_INFRA_MCU_START 0x7c000000
+#define MT_INFRA_MCU_END __REG(INFRA_MCU_ADDR_END)
+#define MT_CONN_INFRA_OFFSET(p) ((p) - MT_INFRA_BASE)
+
+/* CONN INFRA CFG */
+#define MT_CONN_INFRA_BASE 0x18001000
+#define MT_CONN_INFRA(ofs) (MT_CONN_INFRA_BASE + (ofs))
+
+#define MT_CONN_INFRA_EFUSE MT_CONN_INFRA(0x020)
+
+#define MT_CONN_INFRA_ADIE_RESET MT_CONN_INFRA(0x030)
+#define MT_CONN_INFRA_ADIE1_RESET_MASK BIT(0)
+#define MT_CONN_INFRA_ADIE2_RESET_MASK BIT(2)
+
+#define MT_CONN_INFRA_OSC_RC_EN MT_CONN_INFRA(0x380)
+
+#define MT_CONN_INFRA_OSC_CTRL MT_CONN_INFRA(0x300)
+#define MT_CONN_INFRA_OSC_RC_EN_MASK BIT(7)
+#define MT_CONN_INFRA_OSC_STB_TIME_MASK GENMASK(23, 0)
+
+#define MT_CONN_INFRA_HW_CTRL MT_CONN_INFRA(0x200)
+#define MT_CONN_INFRA_HW_CTRL_MASK BIT(0)
+
+#define MT_CONN_INFRA_WF_SLP_PROT MT_CONN_INFRA(0x540)
+#define MT_CONN_INFRA_WF_SLP_PROT_MASK BIT(0)
+
+#define MT_CONN_INFRA_WF_SLP_PROT_RDY MT_CONN_INFRA(0x544)
+#define MT_CONN_INFRA_CONN_WF_MASK (BIT(29) | BIT(31))
+#define MT_CONN_INFRA_CONN (BIT(25) | BIT(29) | BIT(31))
+
+#define MT_CONN_INFRA_EMI_REQ MT_CONN_INFRA(0x414)
+#define MT_CONN_INFRA_EMI_REQ_MASK BIT(0)
+#define MT_CONN_INFRA_INFRA_REQ_MASK BIT(5)
+
+/* AFE */
+#define MT_AFE_CTRL_BASE(_band) (0x18003000 + ((_band) << 19))
+#define MT_AFE_CTRL(_band, ofs) (MT_AFE_CTRL_BASE(_band) + (ofs))
+
+#define MT_AFE_DIG_EN_01(_band) MT_AFE_CTRL(_band, 0x00)
+#define MT_AFE_DIG_EN_02(_band) MT_AFE_CTRL(_band, 0x04)
+#define MT_AFE_DIG_EN_03(_band) MT_AFE_CTRL(_band, 0x08)
+#define MT_AFE_DIG_TOP_01(_band) MT_AFE_CTRL(_band, 0x0c)
+
+#define MT_AFE_PLL_STB_TIME(_band) MT_AFE_CTRL(_band, 0xf4)
+#define MT_AFE_PLL_STB_TIME_MASK (GENMASK(30, 16) | GENMASK(14, 0))
+#define MT_AFE_PLL_STB_TIME_VAL (FIELD_PREP(GENMASK(30, 16), 0x4bc) | \
+ FIELD_PREP(GENMASK(14, 0), 0x7e4))
+#define MT_AFE_BPLL_CFG_MASK GENMASK(7, 6)
+#define MT_AFE_WPLL_CFG_MASK GENMASK(1, 0)
+#define MT_AFE_MCU_WPLL_CFG_MASK GENMASK(3, 2)
+#define MT_AFE_MCU_BPLL_CFG_MASK GENMASK(17, 16)
+#define MT_AFE_PLL_CFG_MASK (MT_AFE_BPLL_CFG_MASK | \
+ MT_AFE_WPLL_CFG_MASK | \
+ MT_AFE_MCU_WPLL_CFG_MASK | \
+ MT_AFE_MCU_BPLL_CFG_MASK)
+#define MT_AFE_PLL_CFG_VAL (FIELD_PREP(MT_AFE_BPLL_CFG_MASK, 0x1) | \
+ FIELD_PREP(MT_AFE_WPLL_CFG_MASK, 0x2) | \
+ FIELD_PREP(MT_AFE_MCU_WPLL_CFG_MASK, 0x1) | \
+ FIELD_PREP(MT_AFE_MCU_BPLL_CFG_MASK, 0x2))
+
+#define MT_AFE_DIG_TOP_01_MASK GENMASK(18, 15)
+#define MT_AFE_DIG_TOP_01_VAL FIELD_PREP(MT_AFE_DIG_TOP_01_MASK, 0x9)
+
+#define MT_AFE_RG_WBG_EN_RCK_MASK BIT(0)
+#define MT_AFE_RG_WBG_EN_BPLL_UP_MASK BIT(21)
+#define MT_AFE_RG_WBG_EN_WPLL_UP_MASK BIT(20)
+#define MT_AFE_RG_WBG_EN_PLL_UP_MASK (MT_AFE_RG_WBG_EN_BPLL_UP_MASK | \
+ MT_AFE_RG_WBG_EN_WPLL_UP_MASK)
+#define MT_AFE_RG_WBG_EN_TXCAL_MASK GENMASK(21, 17)
+
+#define MT_ADIE_SLP_CTRL_BASE(_band) (0x18005000 + ((_band) << 19))
+#define MT_ADIE_SLP_CTRL(_band, ofs) (MT_ADIE_SLP_CTRL_BASE(_band) + (ofs))
+
+#define MT_ADIE_SLP_CTRL_CK0(_band) MT_ADIE_SLP_CTRL(_band, 0x120)
+
+/* ADIE */
+#define MT_ADIE_CHIP_ID 0x02c
+#define MT_ADIE_CHIP_ID_MASK GENMASK(31, 16)
+#define MT_ADIE_IDX0 GENMASK(15, 0)
+#define MT_ADIE_IDX1 GENMASK(31, 16)
+
+#define MT_ADIE_RG_TOP_THADC_BG 0x034
+#define MT_ADIE_VRPI_SEL_CR_MASK GENMASK(15, 12)
+#define MT_ADIE_VRPI_SEL_EFUSE_MASK GENMASK(6, 3)
+
+#define MT_ADIE_RG_TOP_THADC 0x038
+#define MT_ADIE_PGA_GAIN_MASK GENMASK(25, 23)
+#define MT_ADIE_PGA_GAIN_EFUSE_MASK GENMASK(2, 0)
+#define MT_ADIE_LDO_CTRL_MASK GENMASK(27, 26)
+#define MT_ADIE_LDO_CTRL_EFUSE_MASK GENMASK(6, 5)
+
+#define MT_AFE_RG_ENCAL_WBTAC_IF_SW 0x070
+#define MT_ADIE_EFUSE_RDATA0 0x130
+
+#define MT_ADIE_EFUSE2_CTRL 0x148
+#define MT_ADIE_EFUSE_CTRL_MASK BIT(1)
+
+#define MT_ADIE_EFUSE_CFG 0x144
+#define MT_ADIE_EFUSE_MODE_MASK GENMASK(7, 6)
+#define MT_ADIE_EFUSE_ADDR_MASK GENMASK(25, 16)
+#define MT_ADIE_EFUSE_VALID_MASK BIT(29)
+#define MT_ADIE_EFUSE_KICK_MASK BIT(30)
+
+#define MT_ADIE_THADC_ANALOG 0x3a6
+
+#define MT_ADIE_THADC_SLOP 0x3a7
+#define MT_ADIE_ANA_EN_MASK BIT(7)
+
+#define MT_ADIE_7975_XTAL_CAL 0x3a1
+#define MT_ADIE_TRIM_MASK GENMASK(6, 0)
+#define MT_ADIE_EFUSE_TRIM_MASK GENMASK(5, 0)
+#define MT_ADIE_XO_TRIM_EN_MASK BIT(7)
+#define MT_ADIE_XTAL_DECREASE_MASK BIT(6)
+
+#define MT_ADIE_7975_XO_TRIM2 0x3a2
+#define MT_ADIE_7975_XO_TRIM3 0x3a3
+#define MT_ADIE_7975_XO_TRIM4 0x3a4
+#define MT_ADIE_7975_XTAL_EN 0x3a5
+
+#define MT_ADIE_XO_TRIM_FLOW 0x3ac
+#define MT_ADIE_XTAL_AXM_80M_OSC 0x390
+#define MT_ADIE_XTAL_AXM_40M_OSC 0x391
+#define MT_ADIE_XTAL_TRIM1_80M_OSC 0x398
+#define MT_ADIE_XTAL_TRIM1_40M_OSC 0x399
+#define MT_ADIE_WRI_CK_SEL 0x4ac
+#define MT_ADIE_RG_STRAP_PIN_IN 0x4fc
+#define MT_ADIE_XTAL_C1 0x654
+#define MT_ADIE_XTAL_C2 0x658
+#define MT_ADIE_RG_XO_01 0x65c
+#define MT_ADIE_RG_XO_03 0x664
+
+#define MT_ADIE_CLK_EN 0xa00
+
+#define MT_ADIE_7975_XTAL 0xa18
+#define MT_ADIE_7975_XTAL_EN_MASK BIT(29)
+
+#define MT_ADIE_7975_COCLK 0xa1c
+#define MT_ADIE_7975_XO_2 0xa84
+#define MT_ADIE_7975_XO_2_FIX_EN BIT(31)
+
+#define MT_ADIE_7975_XO_CTRL2 0xa94
+#define MT_ADIE_7975_XO_CTRL2_C1_MASK GENMASK(26, 20)
+#define MT_ADIE_7975_XO_CTRL2_C2_MASK GENMASK(18, 12)
+#define MT_ADIE_7975_XO_CTRL2_MASK (MT_ADIE_7975_XO_CTRL2_C1_MASK | \
+ MT_ADIE_7975_XO_CTRL2_C2_MASK)
+
+#define MT_ADIE_7975_XO_CTRL6 0xaa4
+#define MT_ADIE_7975_XO_CTRL6_MASK BIT(16)
+
+/* TOP SPI */
+#define MT_TOP_SPI_ADIE_BASE(_band) (0x18004000 + ((_band) << 19))
+#define MT_TOP_SPI_ADIE(_band, ofs) (MT_TOP_SPI_ADIE_BASE(_band) + (ofs))
+
+#define MT_TOP_SPI_BUSY_CR(_band) MT_TOP_SPI_ADIE(_band, 0)
+#define MT_TOP_SPI_POLLING_BIT BIT(5)
+
+#define MT_TOP_SPI_ADDR_CR(_band) MT_TOP_SPI_ADIE(_band, 0x50)
+#define MT_TOP_SPI_READ_ADDR_FORMAT (BIT(12) | BIT(13) | BIT(15))
+#define MT_TOP_SPI_WRITE_ADDR_FORMAT (BIT(13) | BIT(15))
+
+#define MT_TOP_SPI_WRITE_DATA_CR(_band) MT_TOP_SPI_ADIE(_band, 0x54)
+#define MT_TOP_SPI_READ_DATA_CR(_band) MT_TOP_SPI_ADIE(_band, 0x58)
+
+/* CONN INFRA CKGEN */
+#define MT_INFRA_CKGEN_BASE 0x18009000
+#define MT_INFRA_CKGEN(ofs) (MT_INFRA_CKGEN_BASE + (ofs))
+
+#define MT_INFRA_CKGEN_BUS MT_INFRA_CKGEN(0xa00)
+#define MT_INFRA_CKGEN_BUS_CLK_SEL_MASK BIT(23)
+#define MT_INFRA_CKGEN_BUS_RDY_SEL_MASK BIT(29)
+
+#define MT_INFRA_CKGEN_BUS_WPLL_DIV_1 MT_INFRA_CKGEN(0x008)
+#define MT_INFRA_CKGEN_BUS_WPLL_DIV_2 MT_INFRA_CKGEN(0x00c)
+
+#define MT_INFRA_CKGEN_RFSPI_WPLL_DIV MT_INFRA_CKGEN(0x040)
+#define MT_INFRA_CKGEN_DIV_SEL_MASK GENMASK(7, 2)
+#define MT_INFRA_CKGEN_DIV_EN_MASK BIT(0)
+
+/* CONN INFRA BUS */
+#define MT_INFRA_BUS_BASE 0x1800e000
+#define MT_INFRA_BUS(ofs) (MT_INFRA_BUS_BASE + (ofs))
+
+#define MT_INFRA_BUS_OFF_TIMEOUT MT_INFRA_BUS(0x300)
+#define MT_INFRA_BUS_TIMEOUT_LIMIT_MASK GENMASK(14, 7)
+#define MT_INFRA_BUS_TIMEOUT_EN_MASK GENMASK(3, 0)
+
+#define MT_INFRA_BUS_ON_TIMEOUT MT_INFRA_BUS(0x31c)
+#define MT_INFRA_BUS_EMI_START MT_INFRA_BUS(0x360)
+#define MT_INFRA_BUS_EMI_END MT_INFRA_BUS(0x364)
+
+/* CONN_INFRA_SKU */
+#define MT_CONNINFRA_SKU_DEC_ADDR 0x18050000
+#define MT_CONNINFRA_SKU_MASK GENMASK(15, 0)
+#define MT_ADIE_TYPE_MASK BIT(1)
+
+/* FW MODE SYNC */
+#define MT_SWDEF_MODE 0x41f23c
+#define MT_SWDEF_MODE_MT7916 0x41143c
+#define MT_SWDEF_NORMAL_MODE 0
+#define MT_SWDEF_ICAP_MODE 1
+#define MT_SWDEF_SPECTRUM_MODE 2
#define MT_DIC_CMD_REG_BASE 0x41f000
#define MT_DIC_CMD_REG(ofs) (MT_DIC_CMD_REG_BASE + (ofs))
@@ -540,13 +931,7 @@
#define MT_CPU_UTIL_PEAK_IDLE_CNT MT_CPU_UTIL(0x0c)
#define MT_CPU_UTIL_CTRL MT_CPU_UTIL(0x1c)
-#define MT_SWDEF_BASE 0x41f200
-#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs))
-#define MT_SWDEF_MODE MT_SWDEF(0x3c)
-#define MT_SWDEF_NORMAL_MODE 0
-#define MT_SWDEF_ICAP_MODE 1
-#define MT_SWDEF_SPECTRUM_MODE 2
-
+/* LED */
#define MT_LED_TOP_BASE 0x18013000
#define MT_LED_PHYS(_n) (MT_LED_TOP_BASE + (_n))
@@ -561,46 +946,124 @@
#define MT_LED_EN(_n) MT_LED_PHYS(0x40 + ((_n) * 4))
+#define MT_LED_GPIO_MUX2 0x70005058 /* GPIO 18 */
+#define MT_LED_GPIO_MUX3 0x7000505C /* GPIO 26 */
+#define MT_LED_GPIO_SEL_MASK GENMASK(11, 8)
+
+/* MT TOP */
#define MT_TOP_BASE 0x18060000
#define MT_TOP(ofs) (MT_TOP_BASE + (ofs))
-#define MT_TOP_LPCR_HOST_BAND0 MT_TOP(0x10)
+#define MT_TOP_LPCR_HOST_BAND(_band) MT_TOP(0x10 + ((_band) * 0x10))
#define MT_TOP_LPCR_HOST_FW_OWN BIT(0)
#define MT_TOP_LPCR_HOST_DRV_OWN BIT(1)
+#define MT_TOP_LPCR_HOST_FW_OWN_STAT BIT(2)
+
+#define MT_TOP_LPCR_HOST_BAND_IRQ_STAT(_band) MT_TOP(0x14 + ((_band) * 0x10))
+#define MT_TOP_LPCR_HOST_BAND_STAT BIT(0)
#define MT_TOP_MISC MT_TOP(0xf0)
#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
-#define MT_LED_GPIO_MUX2 0x70005058 /* GPIO 18 */
-#define MT_LED_GPIO_MUX3 0x7000505C /* GPIO 26 */
-#define MT_LED_GPIO_SEL_MASK GENMASK(11, 8)
-
#define MT_HW_BOUND 0x70010020
-#define MT_HW_CHIPID 0x70010200
#define MT_HW_REV 0x70010204
+#define MT_WF_SUBSYS_RST 0x70002600
+
+#define MT_TOP_WFSYS_WAKEUP MT_TOP(0x1a4)
+#define MT_TOP_WFSYS_WAKEUP_MASK BIT(0)
+
+#define MT_TOP_MCU_EMI_BASE MT_TOP(0x1c4)
+#define MT_TOP_MCU_EMI_BASE_MASK GENMASK(19, 0)
+
+#define MT_TOP_CONN_INFRA_WAKEUP MT_TOP(0x1a0)
+#define MT_TOP_CONN_INFRA_WAKEUP_MASK BIT(0)
+
+#define MT_TOP_WFSYS_RESET_STATUS MT_TOP(0x2cc)
+#define MT_TOP_WFSYS_RESET_STATUS_MASK BIT(30)
+
+/* SEMA */
+#define MT_SEMA_BASE 0x18070000
+#define MT_SEMA(ofs) (MT_SEMA_BASE + (ofs))
+
+#define MT_SEMA_RFSPI_STATUS (MT_SEMA(0x2000) + (11 * 4))
+#define MT_SEMA_RFSPI_RELEASE (MT_SEMA(0x2200) + (11 * 4))
+#define MT_SEMA_RFSPI_STATUS_MASK BIT(1)
+
+/* MCU BUS */
+#define MT_MCU_BUS_BASE 0x18400000
+#define MT_MCU_BUS(ofs) (MT_MCU_BUS_BASE + (ofs))
+
+#define MT_MCU_BUS_TIMEOUT MT_MCU_BUS(0xf0440)
+#define MT_MCU_BUS_TIMEOUT_SET_MASK GENMASK(7, 0)
+#define MT_MCU_BUS_TIMEOUT_CG_EN_MASK BIT(28)
+#define MT_MCU_BUS_TIMEOUT_EN_MASK BIT(31)
-#define MT_PCIE1_MAC_BASE 0x74020000
-#define MT_PCIE1_MAC(ofs) (MT_PCIE1_MAC_BASE + (ofs))
-#define MT_PCIE1_MAC_INT_ENABLE MT_PCIE1_MAC(0x188)
+#define MT_MCU_BUS_REMAP MT_MCU_BUS(0x120)
+/* TOP CFG */
+#define MT_TOP_CFG_BASE 0x184b0000
+#define MT_TOP_CFG(ofs) (MT_TOP_CFG_BASE + (ofs))
+
+#define MT_TOP_CFG_IP_VERSION_ADDR MT_TOP_CFG(0x010)
+
+/* TOP CFG ON */
+#define MT_TOP_CFG_ON_BASE 0x184c1000
+#define MT_TOP_CFG_ON(ofs) (MT_TOP_CFG_ON_BASE + (ofs))
+
+#define MT_TOP_CFG_ON_ROM_IDX MT_TOP_CFG_ON(0x604)
+
+/* SLP CTRL */
+#define MT_SLP_BASE 0x184c3000
+#define MT_SLP(ofs) (MT_SLP_BASE + (ofs))
+
+#define MT_SLP_STATUS MT_SLP(0x00c)
+#define MT_SLP_WFDMA2CONN_MASK (BIT(21) | BIT(23))
+#define MT_SLP_CTRL_EN_MASK BIT(0)
+#define MT_SLP_CTRL_BSY_MASK BIT(1)
+
+/* MCU BUS DBG */
+#define MT_MCU_BUS_DBG_BASE 0x18500000
+#define MT_MCU_BUS_DBG(ofs) (MT_MCU_BUS_DBG_BASE + (ofs))
+
+#define MT_MCU_BUS_DBG_TIMEOUT MT_MCU_BUS_DBG(0x0)
+#define MT_MCU_BUS_DBG_TIMEOUT_SET_MASK GENMASK(31, 16)
+#define MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK BIT(3)
+#define MT_MCU_BUS_DBG_TIMEOUT_EN_MASK BIT(2)
+
+/* PCIE MAC */
#define MT_PCIE_MAC_BASE 0x74030000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
-#define MT_WF_IRPI_BASE 0x83006000
-#define MT_WF_IRPI(ofs) (MT_WF_IRPI_BASE + ((ofs) << 16))
+#define MT_PCIE1_MAC_INT_ENABLE 0x74020188
+#define MT_PCIE1_MAC_INT_ENABLE_MT7916 0x74090188
+
+/* PP TOP */
+#define MT_WF_PP_TOP_BASE 0x820cc000
+#define MT_WF_PP_TOP(ofs) (MT_WF_PP_TOP_BASE + (ofs))
+
+#define MT_WF_PP_TOP_RXQ_WFDMA_CF_5 MT_WF_PP_TOP(0x0e8)
+#define MT_WF_PP_TOP_RXQ_QID6_WFDMA_HIF_SEL_MASK BIT(6)
+
+#define MT_WF_IRPI_BASE 0x83000000
+#define MT_WF_IRPI(ofs) (MT_WF_IRPI_BASE + (ofs))
+
+#define MT_WF_IRPI_NSS(phy, nss) MT_WF_IRPI(0x6000 + ((phy) << 20) + ((nss) << 16))
+#define MT_WF_IRPI_NSS_MT7916(phy, nss) MT_WF_IRPI(0x1000 + ((phy) << 20) + ((nss) << 16))
-/* PHY: band 0(0x83080000), band 1(0x83090000) */
+/* PHY */
#define MT_WF_PHY_BASE 0x83080000
#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
#define MT_WF_PHY_RX_CTRL1(_phy) MT_WF_PHY(0x2004 + ((_phy) << 16))
+#define MT_WF_PHY_RX_CTRL1_MT7916(_phy) MT_WF_PHY(0x2004 + ((_phy) << 20))
#define MT_WF_PHY_RX_CTRL1_IPI_EN GENMASK(2, 0)
#define MT_WF_PHY_RX_CTRL1_STSCNT_EN GENMASK(11, 9)
#define MT_WF_PHY_RXTD12(_phy) MT_WF_PHY(0x8230 + ((_phy) << 16))
+#define MT_WF_PHY_RXTD12_MT7916(_phy) MT_WF_PHY(0x8230 + ((_phy) << 20))
#define MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY BIT(18)
-#define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29)
+#define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29)
#define MT_MCU_WM_CIRQ_BASE 0x89010000
#define MT_MCU_WM_CIRQ(ofs) (MT_MCU_WM_CIRQ_BASE + (ofs))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
new file mode 100644
index 000000000000..3028c02cb840
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
@@ -0,0 +1,1212 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2022 MediaTek Inc. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/of_gpio.h>
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+#include <linux/of_net.h>
+
+#include "mt7915.h"
+
+/* INFRACFG */
+#define MT_INFRACFG_CONN2AP_SLPPROT 0x0d0
+#define MT_INFRACFG_AP2CONN_SLPPROT 0x0d4
+
+#define MT_INFRACFG_RX_EN_MASK BIT(16)
+#define MT_INFRACFG_TX_RDY_MASK BIT(4)
+#define MT_INFRACFG_TX_EN_MASK BIT(0)
+
+/* TOP POS */
+#define MT_TOP_POS_FAST_CTRL 0x114
+#define MT_TOP_POS_FAST_EN_MASK BIT(3)
+
+#define MT_TOP_POS_SKU 0x21c
+#define MT_TOP_POS_SKU_MASK GENMASK(31, 28)
+#define MT_TOP_POS_SKU_ADIE_DBDC_MASK BIT(2)
+
+enum {
+ ADIE_SB,
+ ADIE_DBDC
+};
+
+static int
+mt76_wmac_spi_read(struct mt7915_dev *dev, u8 adie, u32 addr, u32 *val)
+{
+ int ret;
+ u32 cur;
+
+ ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_SPI_BUSY_CR(adie));
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_TOP_SPI_ADDR_CR(adie),
+ MT_TOP_SPI_READ_ADDR_FORMAT | addr);
+ mt76_wr(dev, MT_TOP_SPI_WRITE_DATA_CR(adie), 0);
+
+ ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_SPI_BUSY_CR(adie));
+ if (ret)
+ return ret;
+
+ *val = mt76_rr(dev, MT_TOP_SPI_READ_DATA_CR(adie));
+
+ return 0;
+}
+
+static int
+mt76_wmac_spi_write(struct mt7915_dev *dev, u8 adie, u32 addr, u32 val)
+{
+ int ret;
+ u32 cur;
+
+ ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_SPI_BUSY_CR(adie));
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_TOP_SPI_ADDR_CR(adie),
+ MT_TOP_SPI_WRITE_ADDR_FORMAT | addr);
+ mt76_wr(dev, MT_TOP_SPI_WRITE_DATA_CR(adie), val);
+
+ return read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_SPI_BUSY_CR(adie));
+}
+
+static int
+mt76_wmac_spi_rmw(struct mt7915_dev *dev, u8 adie,
+ u32 addr, u32 mask, u32 val)
+{
+ u32 cur, ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, addr, &cur);
+ if (ret)
+ return ret;
+
+ cur &= ~mask;
+ cur |= val;
+
+ return mt76_wmac_spi_write(dev, adie, addr, cur);
+}
+
+static int
+mt7986_wmac_adie_efuse_read(struct mt7915_dev *dev, u8 adie,
+ u32 addr, u32 *data)
+{
+ int ret, temp;
+ u32 val, mask;
+
+ ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_EFUSE_CFG,
+ MT_ADIE_EFUSE_CTRL_MASK);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_EFUSE2_CTRL, BIT(30), 0x0);
+ if (ret)
+ return ret;
+
+ mask = (MT_ADIE_EFUSE_MODE_MASK | MT_ADIE_EFUSE_ADDR_MASK |
+ MT_ADIE_EFUSE_KICK_MASK);
+ val = FIELD_PREP(MT_ADIE_EFUSE_MODE_MASK, 0) |
+ FIELD_PREP(MT_ADIE_EFUSE_ADDR_MASK, addr) |
+ FIELD_PREP(MT_ADIE_EFUSE_KICK_MASK, 1);
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_EFUSE2_CTRL, mask, val);
+ if (ret)
+ return ret;
+
+ ret = read_poll_timeout(mt76_wmac_spi_read, temp,
+ !temp && !FIELD_GET(MT_ADIE_EFUSE_KICK_MASK, val),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, adie, MT_ADIE_EFUSE2_CTRL, &val);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_EFUSE2_CTRL, &val);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(MT_ADIE_EFUSE_VALID_MASK, val) == 1)
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_EFUSE_RDATA0,
+ data);
+
+ return ret;
+}
+
+static inline void mt76_wmac_spi_lock(struct mt7915_dev *dev)
+{
+ u32 cur;
+
+ read_poll_timeout(mt76_rr, cur,
+ FIELD_GET(MT_SEMA_RFSPI_STATUS_MASK, cur),
+ 1000, 1000 * MSEC_PER_SEC, false, dev,
+ MT_SEMA_RFSPI_STATUS);
+}
+
+static inline void mt76_wmac_spi_unlock(struct mt7915_dev *dev)
+{
+ mt76_wr(dev, MT_SEMA_RFSPI_RELEASE, 1);
+}
+
+static u32 mt76_wmac_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
+{
+ val |= readl(base + offset) & ~mask;
+ writel(val, base + offset);
+
+ return val;
+}
+
+static u8 mt7986_wmac_check_adie_type(struct mt7915_dev *dev)
+{
+ u32 val;
+
+ val = readl(dev->sku + MT_TOP_POS_SKU);
+
+ return FIELD_GET(MT_TOP_POS_SKU_ADIE_DBDC_MASK, val);
+}
+
+static int mt7986_wmac_consys_reset(struct mt7915_dev *dev, bool enable)
+{
+ if (!enable)
+ return reset_control_assert(dev->rstc);
+
+ mt76_wmac_rmw(dev->sku, MT_TOP_POS_FAST_CTRL,
+ MT_TOP_POS_FAST_EN_MASK,
+ FIELD_PREP(MT_TOP_POS_FAST_EN_MASK, 0x1));
+
+ return reset_control_deassert(dev->rstc);
+}
+
+static int mt7986_wmac_gpio_setup(struct mt7915_dev *dev)
+{
+ struct pinctrl_state *state;
+ struct pinctrl *pinctrl;
+ int ret;
+ u8 type;
+
+ type = mt7986_wmac_check_adie_type(dev);
+ pinctrl = devm_pinctrl_get(dev->mt76.dev);
+ if (IS_ERR(pinctrl))
+ return PTR_ERR(pinctrl);
+
+ switch (type) {
+ case ADIE_SB:
+ state = pinctrl_lookup_state(pinctrl, "default");
+ if (IS_ERR_OR_NULL(state))
+ return -EINVAL;
+ break;
+ case ADIE_DBDC:
+ state = pinctrl_lookup_state(pinctrl, "dbdc");
+ if (IS_ERR_OR_NULL(state))
+ return -EINVAL;
+ break;
+ }
+
+ ret = pinctrl_select_state(pinctrl, state);
+ if (ret)
+ return ret;
+
+ usleep_range(500, 1000);
+
+ return 0;
+}
+
+static int mt7986_wmac_consys_lockup(struct mt7915_dev *dev, bool enable)
+{
+ int ret;
+ u32 cur;
+
+ mt76_wmac_rmw(dev->dcm, MT_INFRACFG_AP2CONN_SLPPROT,
+ MT_INFRACFG_RX_EN_MASK,
+ FIELD_PREP(MT_INFRACFG_RX_EN_MASK, enable));
+ ret = read_poll_timeout(readl, cur, !(cur & MT_INFRACFG_RX_EN_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev->dcm + MT_INFRACFG_AP2CONN_SLPPROT);
+ if (ret)
+ return ret;
+
+ mt76_wmac_rmw(dev->dcm, MT_INFRACFG_AP2CONN_SLPPROT,
+ MT_INFRACFG_TX_EN_MASK,
+ FIELD_PREP(MT_INFRACFG_TX_EN_MASK, enable));
+ ret = read_poll_timeout(readl, cur, !(cur & MT_INFRACFG_TX_RDY_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev->dcm + MT_INFRACFG_AP2CONN_SLPPROT);
+ if (ret)
+ return ret;
+
+ mt76_wmac_rmw(dev->dcm, MT_INFRACFG_CONN2AP_SLPPROT,
+ MT_INFRACFG_RX_EN_MASK,
+ FIELD_PREP(MT_INFRACFG_RX_EN_MASK, enable));
+ mt76_wmac_rmw(dev->dcm, MT_INFRACFG_CONN2AP_SLPPROT,
+ MT_INFRACFG_TX_EN_MASK,
+ FIELD_PREP(MT_INFRACFG_TX_EN_MASK, enable));
+
+ return 0;
+}
+
+static int mt7986_wmac_coninfra_check(struct mt7915_dev *dev)
+{
+ u32 cur;
+
+ return read_poll_timeout(mt76_rr, cur, (cur == 0x02070000),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC,
+ false, dev, MT_CONN_INFRA_BASE);
+}
+
+static int mt7986_wmac_coninfra_setup(struct mt7915_dev *dev)
+{
+ struct device *pdev = dev->mt76.dev;
+ struct reserved_mem *rmem;
+ struct device_node *np;
+ u32 val;
+
+ np = of_parse_phandle(pdev->of_node, "memory-region", 0);
+ if (!np)
+ return -EINVAL;
+
+ rmem = of_reserved_mem_lookup(np);
+ if (!rmem)
+ return -EINVAL;
+
+ val = (rmem->base >> 16) & MT_TOP_MCU_EMI_BASE_MASK;
+
+ /* Set conninfra subsys PLL check */
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS,
+ MT_INFRA_CKGEN_BUS_RDY_SEL_MASK, 0x1);
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS,
+ MT_INFRA_CKGEN_BUS_RDY_SEL_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_TOP_MCU_EMI_BASE,
+ MT_TOP_MCU_EMI_BASE_MASK, val);
+
+ mt76_wr(dev, MT_INFRA_BUS_EMI_START, rmem->base);
+ mt76_wr(dev, MT_INFRA_BUS_EMI_END, rmem->size);
+
+ mt76_rr(dev, MT_CONN_INFRA_EFUSE);
+
+ /* Set conninfra sysram */
+ mt76_wr(dev, MT_TOP_RGU_SYSRAM_PDN, 0);
+ mt76_wr(dev, MT_TOP_RGU_SYSRAM_SLP, 1);
+
+ return 0;
+}
+
+static int mt7986_wmac_sku_setup(struct mt7915_dev *dev, u32 *adie_type)
+{
+ int ret;
+ u32 adie_main, adie_ext;
+
+ mt76_rmw_field(dev, MT_CONN_INFRA_ADIE_RESET,
+ MT_CONN_INFRA_ADIE1_RESET_MASK, 0x1);
+ mt76_rmw_field(dev, MT_CONN_INFRA_ADIE_RESET,
+ MT_CONN_INFRA_ADIE2_RESET_MASK, 0x1);
+
+ mt76_wmac_spi_lock(dev);
+
+ ret = mt76_wmac_spi_read(dev, 0, MT_ADIE_CHIP_ID, &adie_main);
+ if (ret)
+ goto out;
+
+ ret = mt76_wmac_spi_read(dev, 1, MT_ADIE_CHIP_ID, &adie_ext);
+ if (ret)
+ goto out;
+
+ *adie_type = FIELD_GET(MT_ADIE_CHIP_ID_MASK, adie_main) |
+ (MT_ADIE_CHIP_ID_MASK & adie_ext);
+
+out:
+ mt76_wmac_spi_unlock(dev);
+
+ return 0;
+}
+
+static inline u16 mt7986_adie_idx(u8 adie, u32 adie_type)
+{
+ if (adie == 0)
+ return u32_get_bits(adie_type, MT_ADIE_IDX0);
+ else
+ return u32_get_bits(adie_type, MT_ADIE_IDX1);
+}
+
+static inline bool is_7975(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+ return mt7986_adie_idx(adie, adie_type) == 0x7975;
+}
+
+static inline bool is_7976(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+ return mt7986_adie_idx(adie, adie_type) == 0x7976;
+}
+
+static int mt7986_wmac_adie_thermal_cal(struct mt7915_dev *dev, u8 adie)
+{
+ int ret;
+ u32 data, val;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_THADC_ANALOG,
+ &data);
+ if (ret || FIELD_GET(MT_ADIE_ANA_EN_MASK, data)) {
+ val = FIELD_GET(MT_ADIE_VRPI_SEL_EFUSE_MASK, data);
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_RG_TOP_THADC_BG,
+ MT_ADIE_VRPI_SEL_CR_MASK,
+ FIELD_PREP(MT_ADIE_VRPI_SEL_CR_MASK, val));
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(MT_ADIE_PGA_GAIN_EFUSE_MASK, data);
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_RG_TOP_THADC,
+ MT_ADIE_PGA_GAIN_MASK,
+ FIELD_PREP(MT_ADIE_PGA_GAIN_MASK, val));
+ if (ret)
+ return ret;
+ }
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_THADC_SLOP,
+ &data);
+ if (ret || FIELD_GET(MT_ADIE_ANA_EN_MASK, data)) {
+ val = FIELD_GET(MT_ADIE_LDO_CTRL_EFUSE_MASK, data);
+
+ return mt76_wmac_spi_rmw(dev, adie, MT_ADIE_RG_TOP_THADC,
+ MT_ADIE_LDO_CTRL_MASK,
+ FIELD_PREP(MT_ADIE_LDO_CTRL_MASK, val));
+ }
+
+ return 0;
+}
+
+static int
+mt7986_read_efuse_xo_trim_7976(struct mt7915_dev *dev, u8 adie,
+ bool is_40m, int *result)
+{
+ int ret;
+ u32 data, addr;
+
+ addr = is_40m ? MT_ADIE_XTAL_AXM_40M_OSC : MT_ADIE_XTAL_AXM_80M_OSC;
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, addr, &data);
+ if (ret)
+ return ret;
+
+ if (!FIELD_GET(MT_ADIE_XO_TRIM_EN_MASK, data)) {
+ *result = 64;
+ } else {
+ *result = FIELD_GET(MT_ADIE_TRIM_MASK, data);
+ addr = is_40m ? MT_ADIE_XTAL_TRIM1_40M_OSC :
+ MT_ADIE_XTAL_TRIM1_80M_OSC;
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, addr, &data);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(MT_ADIE_XO_TRIM_EN_MASK, data) &&
+ FIELD_GET(MT_ADIE_XTAL_DECREASE_MASK, data))
+ *result -= FIELD_GET(MT_ADIE_EFUSE_TRIM_MASK, data);
+ else if (FIELD_GET(MT_ADIE_XO_TRIM_EN_MASK, data))
+ *result += FIELD_GET(MT_ADIE_EFUSE_TRIM_MASK, data);
+
+ *result = max(0, min(127, *result));
+ }
+
+ return 0;
+}
+
+static int mt7986_wmac_adie_xtal_trim_7976(struct mt7915_dev *dev, u8 adie)
+{
+ int ret, trim_80m, trim_40m;
+ u32 data, val, mode;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_XO_TRIM_FLOW,
+ &data);
+ if (ret || !FIELD_GET(BIT(1), data))
+ return 0;
+
+ ret = mt7986_read_efuse_xo_trim_7976(dev, adie, false, &trim_80m);
+ if (ret)
+ return ret;
+
+ ret = mt7986_read_efuse_xo_trim_7976(dev, adie, true, &trim_40m);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_RG_STRAP_PIN_IN, &val);
+ if (ret)
+ return ret;
+
+ mode = FIELD_PREP(GENMASK(6, 4), val);
+ if (!mode || mode == 0x2) {
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C1,
+ GENMASK(31, 24),
+ FIELD_PREP(GENMASK(31, 24), trim_80m));
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C2,
+ GENMASK(31, 24),
+ FIELD_PREP(GENMASK(31, 24), trim_80m));
+ } else if (mode == 0x3 || mode == 0x4 || mode == 0x6) {
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C1,
+ GENMASK(23, 16),
+ FIELD_PREP(GENMASK(23, 16), trim_40m));
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C2,
+ GENMASK(23, 16),
+ FIELD_PREP(GENMASK(23, 16), trim_40m));
+ }
+
+ return ret;
+}
+
+static int mt7986_wmac_adie_patch_7976(struct mt7915_dev *dev, u8 adie)
+{
+ int ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_TOP_THADC, 0x4a563b00);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_01, 0x1d59080f);
+ if (ret)
+ return ret;
+
+ return mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_03, 0x34c00fe0);
+}
+
+static int
+mt7986_read_efuse_xo_trim_7975(struct mt7915_dev *dev, u8 adie,
+ u32 addr, u32 *result)
+{
+ int ret;
+ u32 data;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, addr, &data);
+ if (ret)
+ return ret;
+
+ if ((data & MT_ADIE_XO_TRIM_EN_MASK)) {
+ if ((data & MT_ADIE_XTAL_DECREASE_MASK))
+ *result -= (data & MT_ADIE_EFUSE_TRIM_MASK);
+ else
+ *result += (data & MT_ADIE_EFUSE_TRIM_MASK);
+
+ *result = (*result & MT_ADIE_TRIM_MASK);
+ }
+
+ return 0;
+}
+
+static int mt7986_wmac_adie_xtal_trim_7975(struct mt7915_dev *dev, u8 adie)
+{
+ int ret;
+ u32 data, result = 0, value;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_7975_XTAL_EN,
+ &data);
+ if (ret || !(data & BIT(1)))
+ return 0;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_7975_XTAL_CAL,
+ &data);
+ if (ret)
+ return ret;
+
+ if (data & MT_ADIE_XO_TRIM_EN_MASK)
+ result = (data & MT_ADIE_TRIM_MASK);
+
+ ret = mt7986_read_efuse_xo_trim_7975(dev, adie, MT_ADIE_7975_XO_TRIM2,
+ &result);
+ if (ret)
+ return ret;
+
+ ret = mt7986_read_efuse_xo_trim_7975(dev, adie, MT_ADIE_7975_XO_TRIM3,
+ &result);
+ if (ret)
+ return ret;
+
+ ret = mt7986_read_efuse_xo_trim_7975(dev, adie, MT_ADIE_7975_XO_TRIM4,
+ &result);
+ if (ret)
+ return ret;
+
+ /* Update trim value to C1 and C2*/
+ value = FIELD_GET(MT_ADIE_7975_XO_CTRL2_C1_MASK, result) |
+ FIELD_GET(MT_ADIE_7975_XO_CTRL2_C2_MASK, result);
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_XO_CTRL2,
+ MT_ADIE_7975_XO_CTRL2_MASK, value);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_7975_XTAL, &value);
+ if (ret)
+ return ret;
+
+ if (value & MT_ADIE_7975_XTAL_EN_MASK) {
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_XO_2,
+ MT_ADIE_7975_XO_2_FIX_EN, 0x0);
+ if (ret)
+ return ret;
+ }
+
+ return mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_XO_CTRL6,
+ MT_ADIE_7975_XO_CTRL6_MASK, 0x1);
+}
+
+static int mt7986_wmac_adie_patch_7975(struct mt7915_dev *dev, u8 adie)
+{
+ int ret;
+
+ /* disable CAL LDO and fine tune RFDIG LDO */
+ ret = mt76_wmac_spi_write(dev, adie, 0x348, 0x00000002);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x378, 0x00000002);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x3a8, 0x00000002);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x3d8, 0x00000002);
+ if (ret)
+ return ret;
+
+ /* set CKA driving and filter */
+ ret = mt76_wmac_spi_write(dev, adie, 0xa1c, 0x30000aaa);
+ if (ret)
+ return ret;
+
+ /* set CKB LDO to 1.4V */
+ ret = mt76_wmac_spi_write(dev, adie, 0xa84, 0x8470008a);
+ if (ret)
+ return ret;
+
+ /* turn on SX0 LTBUF */
+ ret = mt76_wmac_spi_write(dev, adie, 0x074, 0x00000002);
+ if (ret)
+ return ret;
+
+ /* CK_BUF_SW_EN = 1 (all buf in manual mode.) */
+ ret = mt76_wmac_spi_write(dev, adie, 0xaa4, 0x01001fc0);
+ if (ret)
+ return ret;
+
+ /* BT mode/WF normal mode 00000005 */
+ ret = mt76_wmac_spi_write(dev, adie, 0x070, 0x00000005);
+ if (ret)
+ return ret;
+
+ /* BG thermal sensor offset update */
+ ret = mt76_wmac_spi_write(dev, adie, 0x344, 0x00000088);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x374, 0x00000088);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x3a4, 0x00000088);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x3d4, 0x00000088);
+ if (ret)
+ return ret;
+
+ /* set WCON VDD IPTAT to "0000" */
+ ret = mt76_wmac_spi_write(dev, adie, 0xa80, 0x44d07000);
+ if (ret)
+ return ret;
+
+ /* change back LTBUF SX3 drving to default value */
+ ret = mt76_wmac_spi_write(dev, adie, 0xa88, 0x3900aaaa);
+ if (ret)
+ return ret;
+
+ /* SM input cap off */
+ ret = mt76_wmac_spi_write(dev, adie, 0x2c4, 0x00000000);
+ if (ret)
+ return ret;
+
+ /* set CKB driving and filter */
+ return mt76_wmac_spi_write(dev, adie, 0x2c8, 0x00000072);
+}
+
+static int mt7986_wmac_adie_cfg(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+ int ret;
+
+ mt76_wmac_spi_lock(dev);
+ ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_CLK_EN, ~0);
+ if (ret)
+ goto out;
+
+ if (is_7975(dev, adie, adie_type)) {
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_COCLK,
+ BIT(1), 0x1);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_thermal_cal(dev, adie);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_xtal_trim_7975(dev, adie);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_patch_7975(dev, adie);
+ } else if (is_7976(dev, adie, adie_type)) {
+ if (mt7986_wmac_check_adie_type(dev) == ADIE_DBDC) {
+ ret = mt76_wmac_spi_write(dev, adie,
+ MT_ADIE_WRI_CK_SEL, 0x1c);
+ if (ret)
+ goto out;
+ }
+
+ ret = mt7986_wmac_adie_thermal_cal(dev, adie);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_xtal_trim_7976(dev, adie);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_patch_7976(dev, adie);
+ }
+out:
+ mt76_wmac_spi_unlock(dev);
+
+ return ret;
+}
+
+static int
+mt7986_wmac_afe_cal(struct mt7915_dev *dev, u8 adie, bool dbdc, u32 adie_type)
+{
+ int ret;
+ u8 idx;
+
+ mt76_wmac_spi_lock(dev);
+ if (is_7975(dev, adie, adie_type))
+ ret = mt76_wmac_spi_write(dev, adie,
+ MT_AFE_RG_ENCAL_WBTAC_IF_SW,
+ 0x80000000);
+ else
+ ret = mt76_wmac_spi_write(dev, adie,
+ MT_AFE_RG_ENCAL_WBTAC_IF_SW,
+ 0x88888005);
+ if (ret)
+ goto out;
+
+ idx = dbdc ? ADIE_DBDC : adie;
+
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_01(idx),
+ MT_AFE_RG_WBG_EN_RCK_MASK, 0x1);
+ usleep_range(60, 100);
+
+ mt76_rmw(dev, MT_AFE_DIG_EN_01(idx),
+ MT_AFE_RG_WBG_EN_RCK_MASK, 0x0);
+
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_03(idx),
+ MT_AFE_RG_WBG_EN_BPLL_UP_MASK, 0x1);
+ usleep_range(30, 100);
+
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_03(idx),
+ MT_AFE_RG_WBG_EN_WPLL_UP_MASK, 0x1);
+ usleep_range(60, 100);
+
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_01(idx),
+ MT_AFE_RG_WBG_EN_TXCAL_MASK, 0x1f);
+ usleep_range(800, 1000);
+
+ mt76_rmw(dev, MT_AFE_DIG_EN_01(idx),
+ MT_AFE_RG_WBG_EN_TXCAL_MASK, 0x0);
+ mt76_rmw(dev, MT_AFE_DIG_EN_03(idx),
+ MT_AFE_RG_WBG_EN_PLL_UP_MASK, 0x0);
+
+ ret = mt76_wmac_spi_write(dev, adie, MT_AFE_RG_ENCAL_WBTAC_IF_SW,
+ 0x5);
+
+out:
+ mt76_wmac_spi_unlock(dev);
+
+ return ret;
+}
+
+static void mt7986_wmac_subsys_pll_initial(struct mt7915_dev *dev, u8 band)
+{
+ mt76_rmw(dev, MT_AFE_PLL_STB_TIME(band),
+ MT_AFE_PLL_STB_TIME_MASK, MT_AFE_PLL_STB_TIME_VAL);
+
+ mt76_rmw(dev, MT_AFE_DIG_EN_02(band),
+ MT_AFE_PLL_CFG_MASK, MT_AFE_PLL_CFG_VAL);
+
+ mt76_rmw(dev, MT_AFE_DIG_TOP_01(band),
+ MT_AFE_DIG_TOP_01_MASK, MT_AFE_DIG_TOP_01_VAL);
+}
+
+static void mt7986_wmac_subsys_setting(struct mt7915_dev *dev)
+{
+ /* Subsys pll init */
+ mt7986_wmac_subsys_pll_initial(dev, 0);
+ mt7986_wmac_subsys_pll_initial(dev, 1);
+
+ /* Set legacy OSC control stable time*/
+ mt76_rmw(dev, MT_CONN_INFRA_OSC_RC_EN,
+ MT_CONN_INFRA_OSC_RC_EN_MASK, 0x0);
+ mt76_rmw(dev, MT_CONN_INFRA_OSC_CTRL,
+ MT_CONN_INFRA_OSC_STB_TIME_MASK, 0x80706);
+
+ /* prevent subsys from power on/of in a short time interval */
+ mt76_rmw(dev, MT_TOP_WFSYS_PWR,
+ MT_TOP_PWR_ACK_MASK | MT_TOP_PWR_KEY_MASK,
+ MT_TOP_PWR_KEY);
+}
+
+static int mt7986_wmac_bus_timeout(struct mt7915_dev *dev)
+{
+ mt76_rmw_field(dev, MT_INFRA_BUS_OFF_TIMEOUT,
+ MT_INFRA_BUS_TIMEOUT_LIMIT_MASK, 0x2);
+
+ mt76_rmw_field(dev, MT_INFRA_BUS_OFF_TIMEOUT,
+ MT_INFRA_BUS_TIMEOUT_EN_MASK, 0xf);
+
+ mt76_rmw_field(dev, MT_INFRA_BUS_ON_TIMEOUT,
+ MT_INFRA_BUS_TIMEOUT_LIMIT_MASK, 0xc);
+
+ mt76_rmw_field(dev, MT_INFRA_BUS_ON_TIMEOUT,
+ MT_INFRA_BUS_TIMEOUT_EN_MASK, 0xf);
+
+ return mt7986_wmac_coninfra_check(dev);
+}
+
+static void mt7986_wmac_clock_enable(struct mt7915_dev *dev, u32 adie_type)
+{
+ u32 cur;
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_1,
+ MT_INFRA_CKGEN_DIV_SEL_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_2,
+ MT_INFRA_CKGEN_DIV_SEL_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_1,
+ MT_INFRA_CKGEN_DIV_EN_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_2,
+ MT_INFRA_CKGEN_DIV_EN_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_RFSPI_WPLL_DIV,
+ MT_INFRA_CKGEN_DIV_SEL_MASK, 0x8);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_RFSPI_WPLL_DIV,
+ MT_INFRA_CKGEN_DIV_EN_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS,
+ MT_INFRA_CKGEN_BUS_CLK_SEL_MASK, 0x0);
+
+ mt76_rmw_field(dev, MT_CONN_INFRA_HW_CTRL,
+ MT_CONN_INFRA_HW_CTRL_MASK, 0x1);
+
+ mt76_rmw(dev, MT_TOP_CONN_INFRA_WAKEUP,
+ MT_TOP_CONN_INFRA_WAKEUP_MASK, 0x1);
+
+ usleep_range(900, 1000);
+
+ mt76_wmac_spi_lock(dev);
+ if (is_7975(dev, 0, adie_type) || is_7976(dev, 0, adie_type)) {
+ mt76_rmw_field(dev, MT_ADIE_SLP_CTRL_CK0(0),
+ MT_SLP_CTRL_EN_MASK, 0x1);
+
+ read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_CTRL_BSY_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_ADIE_SLP_CTRL_CK0(0));
+ }
+ if (is_7975(dev, 1, adie_type) || is_7976(dev, 1, adie_type)) {
+ mt76_rmw_field(dev, MT_ADIE_SLP_CTRL_CK0(1),
+ MT_SLP_CTRL_EN_MASK, 0x1);
+
+ read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_CTRL_BSY_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_ADIE_SLP_CTRL_CK0(0));
+ }
+ mt76_wmac_spi_unlock(dev);
+
+ mt76_rmw(dev, MT_TOP_CONN_INFRA_WAKEUP,
+ MT_TOP_CONN_INFRA_WAKEUP_MASK, 0x0);
+ usleep_range(900, 1000);
+}
+
+static int mt7986_wmac_top_wfsys_wakeup(struct mt7915_dev *dev, bool enable)
+{
+ mt76_rmw_field(dev, MT_TOP_WFSYS_WAKEUP,
+ MT_TOP_WFSYS_WAKEUP_MASK, enable);
+
+ usleep_range(900, 1000);
+
+ if (!enable)
+ return 0;
+
+ return mt7986_wmac_coninfra_check(dev);
+}
+
+static int mt7986_wmac_wm_enable(struct mt7915_dev *dev, bool enable)
+{
+ u32 cur;
+
+ mt76_rmw_field(dev, MT7986_TOP_WM_RESET,
+ MT7986_TOP_WM_RESET_MASK, enable);
+ if (!enable)
+ return 0;
+
+ return read_poll_timeout(mt76_rr, cur, (cur == 0x1d1e),
+ USEC_PER_MSEC, 5000 * USEC_PER_MSEC, false,
+ dev, MT_TOP_CFG_ON_ROM_IDX);
+}
+
+static int mt7986_wmac_wfsys_poweron(struct mt7915_dev *dev, bool enable)
+{
+ u32 mask = MT_TOP_PWR_EN_MASK | MT_TOP_PWR_KEY_MASK;
+ u32 cur;
+
+ mt76_rmw(dev, MT_TOP_WFSYS_PWR, mask,
+ MT_TOP_PWR_KEY | FIELD_PREP(MT_TOP_PWR_EN_MASK, enable));
+
+ return read_poll_timeout(mt76_rr, cur,
+ (FIELD_GET(MT_TOP_WFSYS_RESET_STATUS_MASK, cur) == enable),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_WFSYS_RESET_STATUS);
+}
+
+static int mt7986_wmac_wfsys_setting(struct mt7915_dev *dev)
+{
+ int ret;
+ u32 cur;
+
+ /* Turn off wfsys2conn bus sleep protect */
+ mt76_rmw(dev, MT_CONN_INFRA_WF_SLP_PROT,
+ MT_CONN_INFRA_WF_SLP_PROT_MASK, 0x0);
+
+ ret = mt7986_wmac_wfsys_poweron(dev, true);
+ if (ret)
+ return ret;
+
+ /* Check bus sleep protect */
+
+ ret = read_poll_timeout(mt76_rr, cur,
+ !(cur & MT_CONN_INFRA_CONN_WF_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_CONN_INFRA_WF_SLP_PROT_RDY);
+ if (ret)
+ return ret;
+
+ ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_WFDMA2CONN_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_SLP_STATUS);
+ if (ret)
+ return ret;
+
+ return read_poll_timeout(mt76_rr, cur, (cur == 0x02060000),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_CFG_IP_VERSION_ADDR);
+}
+
+static void mt7986_wmac_wfsys_set_timeout(struct mt7915_dev *dev)
+{
+ u32 mask = MT_MCU_BUS_TIMEOUT_SET_MASK |
+ MT_MCU_BUS_TIMEOUT_CG_EN_MASK |
+ MT_MCU_BUS_TIMEOUT_EN_MASK;
+ u32 val = FIELD_PREP(MT_MCU_BUS_TIMEOUT_SET_MASK, 1) |
+ FIELD_PREP(MT_MCU_BUS_TIMEOUT_CG_EN_MASK, 1) |
+ FIELD_PREP(MT_MCU_BUS_TIMEOUT_EN_MASK, 1);
+
+ mt76_rmw(dev, MT_MCU_BUS_TIMEOUT, mask, val);
+
+ mt76_wr(dev, MT_MCU_BUS_REMAP, 0x810f0000);
+
+ mask = MT_MCU_BUS_DBG_TIMEOUT_SET_MASK |
+ MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK |
+ MT_MCU_BUS_DBG_TIMEOUT_EN_MASK;
+ val = FIELD_PREP(MT_MCU_BUS_DBG_TIMEOUT_SET_MASK, 0x3aa) |
+ FIELD_PREP(MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK, 1) |
+ FIELD_PREP(MT_MCU_BUS_DBG_TIMEOUT_EN_MASK, 1);
+
+ mt76_rmw(dev, MT_MCU_BUS_DBG_TIMEOUT, mask, val);
+}
+
+static int mt7986_wmac_sku_update(struct mt7915_dev *dev, u32 adie_type)
+{
+ u32 val;
+
+ if (is_7976(dev, 0, adie_type) && is_7976(dev, 1, adie_type))
+ val = 0xf;
+ else if (is_7975(dev, 0, adie_type) && is_7975(dev, 1, adie_type))
+ val = 0xd;
+ else if (is_7976(dev, 0, adie_type))
+ val = 0x7;
+ else if (is_7975(dev, 1, adie_type))
+ val = 0x8;
+ else if (is_7976(dev, 1, adie_type))
+ val = 0xa;
+ else
+ return -EINVAL;
+
+ mt76_wmac_rmw(dev->sku, MT_TOP_POS_SKU, MT_TOP_POS_SKU_MASK,
+ FIELD_PREP(MT_TOP_POS_SKU_MASK, val));
+
+ mt76_wr(dev, MT_CONNINFRA_SKU_DEC_ADDR, val);
+
+ return 0;
+}
+
+static int
+mt7986_wmac_adie_setup(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+ int ret;
+
+ if (!(is_7975(dev, adie, adie_type) || is_7976(dev, adie, adie_type)))
+ return 0;
+
+ ret = mt7986_wmac_adie_cfg(dev, adie, adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_afe_cal(dev, adie, false, adie_type);
+ if (ret)
+ return ret;
+
+ if (!adie && (mt7986_wmac_check_adie_type(dev) == ADIE_DBDC))
+ ret = mt7986_wmac_afe_cal(dev, adie, true, adie_type);
+
+ return ret;
+}
+
+static int mt7986_wmac_subsys_powerup(struct mt7915_dev *dev, u32 adie_type)
+{
+ int ret;
+
+ mt7986_wmac_subsys_setting(dev);
+
+ ret = mt7986_wmac_bus_timeout(dev);
+ if (ret)
+ return ret;
+
+ mt7986_wmac_clock_enable(dev, adie_type);
+
+ return 0;
+}
+
+static int mt7986_wmac_wfsys_powerup(struct mt7915_dev *dev)
+{
+ int ret;
+
+ ret = mt7986_wmac_wm_enable(dev, false);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_wfsys_setting(dev);
+ if (ret)
+ return ret;
+
+ mt7986_wmac_wfsys_set_timeout(dev);
+
+ return mt7986_wmac_wm_enable(dev, true);
+}
+
+int mt7986_wmac_enable(struct mt7915_dev *dev)
+{
+ int ret;
+ u32 adie_type;
+
+ ret = mt7986_wmac_consys_reset(dev, true);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_gpio_setup(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_consys_lockup(dev, false);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_coninfra_check(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_coninfra_setup(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_sku_setup(dev, &adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_adie_setup(dev, 0, adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_adie_setup(dev, 1, adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_subsys_powerup(dev, adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_top_wfsys_wakeup(dev, true);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_wfsys_powerup(dev);
+ if (ret)
+ return ret;
+
+ return mt7986_wmac_sku_update(dev, adie_type);
+}
+
+void mt7986_wmac_disable(struct mt7915_dev *dev)
+{
+ u32 cur;
+
+ mt7986_wmac_top_wfsys_wakeup(dev, true);
+
+ /* Turn on wfsys2conn bus sleep protect */
+ mt76_rmw_field(dev, MT_CONN_INFRA_WF_SLP_PROT,
+ MT_CONN_INFRA_WF_SLP_PROT_MASK, 0x1);
+
+ /* Check wfsys2conn bus sleep protect */
+ read_poll_timeout(mt76_rr, cur, !(cur ^ MT_CONN_INFRA_CONN),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_CONN_INFRA_WF_SLP_PROT_RDY);
+
+ mt7986_wmac_wfsys_poweron(dev, false);
+
+ /* Turn back wpll setting */
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_02(0), MT_AFE_MCU_BPLL_CFG_MASK, 0x2);
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_02(0), MT_AFE_WPLL_CFG_MASK, 0x2);
+
+ /* Reset EMI */
+ mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+ MT_CONN_INFRA_EMI_REQ_MASK, 0x1);
+ mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+ MT_CONN_INFRA_EMI_REQ_MASK, 0x0);
+ mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+ MT_CONN_INFRA_INFRA_REQ_MASK, 0x1);
+ mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+ MT_CONN_INFRA_INFRA_REQ_MASK, 0x0);
+
+ mt7986_wmac_top_wfsys_wakeup(dev, false);
+ mt7986_wmac_consys_lockup(dev, true);
+ mt7986_wmac_consys_reset(dev, false);
+}
+
+static int mt7986_wmac_init(struct mt7915_dev *dev)
+{
+ struct device *pdev = dev->mt76.dev;
+ struct platform_device *pfdev = to_platform_device(pdev);
+
+ dev->dcm = devm_platform_ioremap_resource(pfdev, 1);
+ if (IS_ERR(dev->dcm))
+ return PTR_ERR(dev->dcm);
+
+ dev->sku = devm_platform_ioremap_resource(pfdev, 2);
+ if (IS_ERR(dev->sku))
+ return PTR_ERR(dev->sku);
+
+ dev->rstc = devm_reset_control_get(pdev, "consys");
+ if (IS_ERR(dev->rstc))
+ return PTR_ERR(dev->rstc);
+
+ return mt7986_wmac_enable(dev);
+}
+
+static int mt7986_wmac_probe(struct platform_device *pdev)
+{
+ void __iomem *mem_base;
+ struct mt7915_dev *dev;
+ struct mt76_dev *mdev;
+ int irq, ret;
+ u32 chip_id;
+
+ chip_id = (uintptr_t)of_device_get_match_data(&pdev->dev);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ mem_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mem_base)) {
+ dev_err(&pdev->dev, "Failed to get memory resource\n");
+ return PTR_ERR(mem_base);
+ }
+
+ dev = mt7915_mmio_probe(&pdev->dev, mem_base, chip_id);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ mdev = &dev->mt76;
+ ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto free_device;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ ret = mt7986_wmac_init(dev);
+ if (ret)
+ goto free_irq;
+
+ ret = mt7915_register_device(dev);
+ if (ret)
+ goto free_irq;
+
+ return 0;
+
+free_irq:
+ devm_free_irq(mdev->dev, irq, dev);
+
+free_device:
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+}
+
+static int mt7986_wmac_remove(struct platform_device *pdev)
+{
+ struct mt7915_dev *dev = platform_get_drvdata(pdev);
+
+ mt7915_unregister_device(dev);
+
+ return 0;
+}
+
+static const struct of_device_id mt7986_wmac_of_match[] = {
+ { .compatible = "mediatek,mt7986-wmac", .data = (u32 *)0x7986 },
+ {},
+};
+
+struct platform_driver mt7986_wmac_driver = {
+ .driver = {
+ .name = "mt7986-wmac",
+ .of_match_table = mt7986_wmac_of_match,
+ },
+ .probe = mt7986_wmac_probe,
+ .remove = mt7986_wmac_remove,
+};
+
+MODULE_FIRMWARE(MT7986_FIRMWARE_WA);
+MODULE_FIRMWARE(MT7986_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7986_FIRMWARE_WM_MT7975);
+MODULE_FIRMWARE(MT7986_ROM_PATCH);
+MODULE_FIRMWARE(MT7986_ROM_PATCH_MT7975);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
index af80c2cf8c83..20f63644e929 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
@@ -23,30 +23,16 @@ struct reg_band {
u32 band[2];
};
-#define REG_BAND(_reg) \
- { .band[0] = MT_##_reg(0), .band[1] = MT_##_reg(1) }
-#define REG_BAND_IDX(_reg, _idx) \
- { .band[0] = MT_##_reg(0, _idx), .band[1] = MT_##_reg(1, _idx) }
-
-static const struct reg_band reg_backup_list[] = {
- REG_BAND_IDX(AGG_PCR0, 0),
- REG_BAND_IDX(AGG_PCR0, 1),
- REG_BAND_IDX(AGG_AWSCR0, 0),
- REG_BAND_IDX(AGG_AWSCR0, 1),
- REG_BAND_IDX(AGG_AWSCR0, 2),
- REG_BAND_IDX(AGG_AWSCR0, 3),
- REG_BAND(AGG_MRCR),
- REG_BAND(TMAC_TFCR0),
- REG_BAND(TMAC_TCR0),
- REG_BAND(AGG_ATCR1),
- REG_BAND(AGG_ATCR3),
- REG_BAND(TMAC_TRCR0),
- REG_BAND(TMAC_ICR0),
- REG_BAND_IDX(ARB_DRNGR0, 0),
- REG_BAND_IDX(ARB_DRNGR0, 1),
- REG_BAND(WF_RFCR),
- REG_BAND(WF_RFCR1),
-};
+#define REG_BAND(_list, _reg) \
+ { _list.band[0] = MT_##_reg(0); \
+ _list.band[1] = MT_##_reg(1); }
+#define REG_BAND_IDX(_list, _reg, _idx) \
+ { _list.band[0] = MT_##_reg(0, _idx); \
+ _list.band[1] = MT_##_reg(1, _idx); }
+
+#define TM_REG_MAX_ID 17
+static struct reg_band reg_backup_list[TM_REG_MAX_ID];
+
static int
mt7915_tm_set_tx_power(struct mt7915_phy *phy)
@@ -212,7 +198,6 @@ mt7915_tm_set_ipg_params(struct mt7915_phy *phy, u32 ipg, u8 mode)
u8 slot_time = 9, sifs = TM_DEFAULT_SIFS;
u8 aifsn = TM_MIN_AIFSN;
u32 i2t_time, tr2t_time, txv_time;
- bool ext_phy = phy != &dev->phy;
u16 cw = 0;
if (ipg < sig_ext + slot_time + sifs)
@@ -242,29 +227,25 @@ mt7915_tm_set_ipg_params(struct mt7915_phy *phy, u32 ipg, u8 mode)
ipg -= aifsn * slot_time;
- if (ipg > TM_DEFAULT_SIFS) {
- if (ipg < TM_MAX_SIFS)
- sifs = ipg;
- else
- sifs = TM_MAX_SIFS;
- }
+ if (ipg > TM_DEFAULT_SIFS)
+ sifs = min_t(u32, ipg, TM_MAX_SIFS);
}
done:
- txv_time = mt76_get_field(dev, MT_TMAC_ATCR(ext_phy),
+ txv_time = mt76_get_field(dev, MT_TMAC_ATCR(phy->band_idx),
MT_TMAC_ATCR_TXV_TOUT);
txv_time *= 50; /* normal clock time */
i2t_time = (slot_time * 1000 - txv_time - BBP_PROC_TIME) / 50;
tr2t_time = (sifs * 1000 - txv_time - BBP_PROC_TIME) / 50;
- mt76_set(dev, MT_TMAC_TRCR0(ext_phy),
+ mt76_set(dev, MT_TMAC_TRCR0(phy->band_idx),
FIELD_PREP(MT_TMAC_TRCR0_TR2T_CHK, tr2t_time) |
FIELD_PREP(MT_TMAC_TRCR0_I2T_CHK, i2t_time));
mt7915_tm_set_slot_time(phy, slot_time, sifs);
return mt7915_tm_set_wmm_qid(dev,
- mt7915_lmac_mapping(dev, IEEE80211_AC_BE),
+ mt76_connac_lmac_mapping(IEEE80211_AC_BE),
aifsn, cw, cw, 0);
}
@@ -290,6 +271,8 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
case MT76_TM_TX_MODE_OFDM:
if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
sband = &mphy->sband_5g.sband;
+ else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
else
sband = &mphy->sband_2g.sband;
@@ -351,13 +334,30 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
{
int n_regs = ARRAY_SIZE(reg_backup_list);
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
u32 *b = phy->test.reg_backup;
int i;
+ REG_BAND_IDX(reg_backup_list[0], AGG_PCR0, 0);
+ REG_BAND_IDX(reg_backup_list[1], AGG_PCR0, 1);
+ REG_BAND_IDX(reg_backup_list[2], AGG_AWSCR0, 0);
+ REG_BAND_IDX(reg_backup_list[3], AGG_AWSCR0, 1);
+ REG_BAND_IDX(reg_backup_list[4], AGG_AWSCR0, 2);
+ REG_BAND_IDX(reg_backup_list[5], AGG_AWSCR0, 3);
+ REG_BAND(reg_backup_list[6], AGG_MRCR);
+ REG_BAND(reg_backup_list[7], TMAC_TFCR0);
+ REG_BAND(reg_backup_list[8], TMAC_TCR0);
+ REG_BAND(reg_backup_list[9], AGG_ATCR1);
+ REG_BAND(reg_backup_list[10], AGG_ATCR3);
+ REG_BAND(reg_backup_list[11], TMAC_TRCR0);
+ REG_BAND(reg_backup_list[12], TMAC_ICR0);
+ REG_BAND_IDX(reg_backup_list[13], ARB_DRNGR0, 0);
+ REG_BAND_IDX(reg_backup_list[14], ARB_DRNGR0, 1);
+ REG_BAND(reg_backup_list[15], WF_RFCR);
+ REG_BAND(reg_backup_list[16], WF_RFCR1);
+
if (phy->mt76->test.state == MT76_TM_STATE_OFF) {
for (i = 0; i < n_regs; i++)
- mt76_wr(dev, reg_backup_list[i].band[ext_phy], b[i]);
+ mt76_wr(dev, reg_backup_list[i].band[phy->band_idx], b[i]);
return;
}
@@ -368,33 +368,33 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
phy->test.reg_backup = b;
for (i = 0; i < n_regs; i++)
- b[i] = mt76_rr(dev, reg_backup_list[i].band[ext_phy]);
+ b[i] = mt76_rr(dev, reg_backup_list[i].band[phy->band_idx]);
}
- mt76_clear(dev, MT_AGG_PCR0(ext_phy, 0), MT_AGG_PCR0_MM_PROT |
+ mt76_clear(dev, MT_AGG_PCR0(phy->band_idx, 0), MT_AGG_PCR0_MM_PROT |
MT_AGG_PCR0_GF_PROT | MT_AGG_PCR0_ERP_PROT |
MT_AGG_PCR0_VHT_PROT | MT_AGG_PCR0_BW20_PROT |
MT_AGG_PCR0_BW40_PROT | MT_AGG_PCR0_BW80_PROT);
- mt76_set(dev, MT_AGG_PCR0(ext_phy, 0), MT_AGG_PCR0_PTA_WIN_DIS);
+ mt76_set(dev, MT_AGG_PCR0(phy->band_idx, 0), MT_AGG_PCR0_PTA_WIN_DIS);
- mt76_wr(dev, MT_AGG_PCR0(ext_phy, 1), MT_AGG_PCR1_RTS0_NUM_THRES |
+ mt76_wr(dev, MT_AGG_PCR0(phy->band_idx, 1), MT_AGG_PCR1_RTS0_NUM_THRES |
MT_AGG_PCR1_RTS0_LEN_THRES);
- mt76_clear(dev, MT_AGG_MRCR(ext_phy), MT_AGG_MRCR_BAR_CNT_LIMIT |
+ mt76_clear(dev, MT_AGG_MRCR(phy->band_idx), MT_AGG_MRCR_BAR_CNT_LIMIT |
MT_AGG_MRCR_LAST_RTS_CTS_RN | MT_AGG_MRCR_RTS_FAIL_LIMIT |
MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT);
- mt76_rmw(dev, MT_AGG_MRCR(ext_phy), MT_AGG_MRCR_RTS_FAIL_LIMIT |
+ mt76_rmw(dev, MT_AGG_MRCR(phy->band_idx), MT_AGG_MRCR_RTS_FAIL_LIMIT |
MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT,
FIELD_PREP(MT_AGG_MRCR_RTS_FAIL_LIMIT, 1) |
FIELD_PREP(MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT, 1));
- mt76_wr(dev, MT_TMAC_TFCR0(ext_phy), 0);
- mt76_clear(dev, MT_TMAC_TCR0(ext_phy), MT_TMAC_TCR0_TBTT_STOP_CTRL);
+ mt76_wr(dev, MT_TMAC_TFCR0(phy->band_idx), 0);
+ mt76_clear(dev, MT_TMAC_TCR0(phy->band_idx), MT_TMAC_TCR0_TBTT_STOP_CTRL);
/* config rx filter for testmode rx */
- mt76_wr(dev, MT_WF_RFCR(ext_phy), 0xcf70a);
- mt76_wr(dev, MT_WF_RFCR1(ext_phy), 0);
+ mt76_wr(dev, MT_WF_RFCR(phy->band_idx), 0xcf70a);
+ mt76_wr(dev, MT_WF_RFCR1(phy->band_idx), 0);
}
static void
@@ -452,7 +452,7 @@ mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
u8 tx_ant = td->tx_antenna_mask;
if (phy != &dev->phy)
- tx_ant >>= 2;
+ tx_ant >>= dev->chainshift;
phy->test.spe_idx = spe_idx_map[tx_ant];
}
}
@@ -574,6 +574,8 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
if (chandef->chan->band == NL80211_BAND_5GHZ)
sband = &phy->mt76->sband_5g.sband;
+ else if (chandef->chan->band == NL80211_BAND_6GHZ)
+ sband = &phy->mt76->sband_6g.sband;
else
sband = &phy->mt76->sband_2g.sband;
@@ -720,11 +722,11 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
{
struct mt7915_phy *phy = mphy->priv;
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
enum mt76_rxq_id q;
void *rx, *rssi;
u16 fcs_err;
int i;
+ u32 cnt;
rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX);
if (!rx)
@@ -768,9 +770,11 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
nla_nest_end(msg, rx);
- fcs_err = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
- MT_MIB_SDR3_FCS_ERR_MASK);
- q = ext_phy ? MT_RXQ_EXT : MT_RXQ_MAIN;
+ cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx));
+ fcs_err = is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
+
+ q = phy->band_idx ? MT_RXQ_EXT : MT_RXQ_MAIN;
mphy->test.rx_stats.packets[q] += fcs_err;
mphy->test.rx_stats.fcs_error[q] += fcs_err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig
index 71154fc2a87c..adff2d7350b5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig
@@ -24,3 +24,14 @@ config MT7921S
This adds support for MT7921S 802.11ax 2x2:2SS wireless devices.
To compile this driver as a module, choose M here.
+
+config MT7921U
+ tristate "MediaTek MT7921U (USB) support"
+ select MT76_USB
+ select MT7921_COMMON
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7921U 802.11ax 2x2:2SS wireless devices.
+
+ To compile this driver as a module, choose M here.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
index 1187acedfeda..0a146818c623 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
@@ -3,6 +3,7 @@
obj-$(CONFIG_MT7921_COMMON) += mt7921-common.o
obj-$(CONFIG_MT7921E) += mt7921e.o
obj-$(CONFIG_MT7921S) += mt7921s.o
+obj-$(CONFIG_MT7921U) += mt7921u.o
CFLAGS_trace.o := -I$(src)
@@ -10,3 +11,4 @@ mt7921-common-y := mac.o mcu.o main.o init.o debugfs.o trace.o
mt7921-common-$(CONFIG_NL80211_TESTMODE) += testmode.o
mt7921e-y := pci.o pci_mac.o pci_mcu.o dma.o
mt7921s-y := sdio.o sdio_mac.o sdio_mcu.o
+mt7921u-y := usb.o usb_mac.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
index 86fd7292b229..bce76417f95d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
@@ -129,23 +129,22 @@ mt7921_queues_acq(struct seq_file *s, void *data)
mt7921_mutex_acquire(dev);
- for (i = 0; i < 16; i++) {
- int j, acs = i / 4, index = i % 4;
+ for (i = 0; i < 4; i++) {
u32 ctrl, val, qlen = 0;
+ int j;
- val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index));
- ctrl = BIT(31) | BIT(15) | (acs << 8);
+ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(i));
+ ctrl = BIT(31) | BIT(11) | (i << 24);
for (j = 0; j < 32; j++) {
if (val & BIT(j))
continue;
- mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
- ctrl | (j + (index << 5)));
+ mt76_wr(dev, MT_PLE_FL_Q0_CTRL, ctrl | j);
qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
GENMASK(11, 0));
}
- seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
+ seq_printf(s, "AC%d: queued=%d\n", i, qlen);
}
mt7921_mutex_release(dev);
@@ -262,26 +261,21 @@ mt7921_txpwr(struct seq_file *s, void *data)
return 0;
}
-static void
-mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
-{
- struct mt7921_dev *dev = priv;
-
- mt7921_mcu_set_beacon_filter(dev, vif, dev->pm.enable);
-}
-
static int
mt7921_pm_set(void *data, u64 val)
{
struct mt7921_dev *dev = data;
struct mt76_connac_pm *pm = &dev->pm;
+ if (mt76_is_usb(&dev->mt76))
+ return -EOPNOTSUPP;
+
mutex_lock(&dev->mt76.mutex);
- if (val == pm->enable)
+ if (val == pm->enable_user)
goto out;
- if (!pm->enable) {
+ if (!pm->enable_user) {
pm->stats.last_wake_event = jiffies;
pm->stats.last_doze_event = jiffies;
}
@@ -291,13 +285,8 @@ mt7921_pm_set(void *data, u64 val)
pm->enable = false;
mt76_connac_pm_wake(&dev->mphy, pm);
- ieee80211_iterate_active_interfaces(mt76_hw(dev),
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7921_pm_interface_iter, dev);
-
- mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
-
- pm->enable = val;
+ pm->enable_user = val;
+ mt7921_set_runtime_pm(dev);
mt76_connac_power_save_sched(&dev->mphy, pm);
out:
mutex_unlock(&dev->mt76.mutex);
@@ -310,7 +299,7 @@ mt7921_pm_get(void *data, u64 *val)
{
struct mt7921_dev *dev = data;
- *val = dev->pm.enable;
+ *val = dev->pm.enable_user;
return 0;
}
@@ -322,13 +311,20 @@ mt7921_deep_sleep_set(void *data, u64 val)
{
struct mt7921_dev *dev = data;
struct mt76_connac_pm *pm = &dev->pm;
+ bool monitor = !!(dev->mphy.hw->conf.flags & IEEE80211_CONF_MONITOR);
bool enable = !!val;
+ if (mt76_is_usb(&dev->mt76))
+ return -EOPNOTSUPP;
+
mt7921_mutex_acquire(dev);
- if (pm->ds_enable != enable) {
- mt76_connac_mcu_set_deep_sleep(&dev->mt76, enable);
- pm->ds_enable = enable;
- }
+ if (pm->ds_enable_user == enable)
+ goto out;
+
+ pm->ds_enable_user = enable;
+ pm->ds_enable = enable && !monitor;
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
+out:
mt7921_mutex_release(dev);
return 0;
@@ -339,7 +335,7 @@ mt7921_deep_sleep_get(void *data, u64 *val)
{
struct mt7921_dev *dev = data;
- *val = dev->pm.ds_enable;
+ *val = dev->pm.ds_enable_user;
return 0;
}
@@ -438,8 +434,13 @@ int mt7921_init_debugfs(struct mt7921_dev *dev)
if (!dir)
return -ENOMEM;
- debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
- mt7921_queues_read);
+ if (mt76_is_mmio(&dev->mt76))
+ debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues",
+ dir, mt7921_queues_read);
+ else
+ debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues",
+ dir, mt76_queues_read);
+
debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
mt7921_queues_acq);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
index cdff1fd52d93..ca7e20fb5fc0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
@@ -5,7 +5,7 @@
#include "../dma.h"
#include "mac.h"
-int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
+static int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
{
int i, err;
@@ -78,110 +78,6 @@ static void mt7921_dma_prefetch(struct mt7921_dev *dev)
mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
}
-static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
-{
- static const struct {
- u32 phys;
- u32 mapped;
- u32 size;
- } fixed_map[] = {
- { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
- { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
- { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
- { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
- { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
- { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
- { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
- { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
- { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
- { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
- { 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
- { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
- { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
- { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
- { 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
- { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
- { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
- };
- int i;
-
- if (addr < 0x100000)
- return addr;
-
- for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
- u32 ofs;
-
- if (addr < fixed_map[i].phys)
- continue;
-
- ofs = addr - fixed_map[i].phys;
- if (ofs > fixed_map[i].size)
- continue;
-
- return fixed_map[i].mapped + ofs;
- }
-
- if ((addr >= 0x18000000 && addr < 0x18c00000) ||
- (addr >= 0x70000000 && addr < 0x78000000) ||
- (addr >= 0x7c000000 && addr < 0x7c400000))
- return mt7921_reg_map_l1(dev, addr);
-
- dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
- addr);
-
- return 0;
-}
-
-static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- u32 addr = __mt7921_reg_addr(dev, offset);
-
- return dev->bus_ops->rr(mdev, addr);
-}
-
-static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- u32 addr = __mt7921_reg_addr(dev, offset);
-
- dev->bus_ops->wr(mdev, addr, val);
-}
-
-static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- u32 addr = __mt7921_reg_addr(dev, offset);
-
- return dev->bus_ops->rmw(mdev, addr, mask, val);
-}
-
static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
{
if (force) {
@@ -341,23 +237,8 @@ int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev)
int mt7921_dma_init(struct mt7921_dev *dev)
{
- struct mt76_bus_ops *bus_ops;
int ret;
- dev->phy.dev = dev;
- dev->phy.mt76 = &dev->mt76.phy;
- dev->mt76.phy.priv = &dev->phy;
- dev->bus_ops = dev->mt76.bus;
- bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
- GFP_KERNEL);
- if (!bus_ops)
- return -ENOMEM;
-
- bus_ops->rr = mt7921_rr;
- bus_ops->wr = mt7921_wr;
- bus_ops->rmw = mt7921_rmw;
- dev->mt76.bus = bus_ops;
-
mt76_dma_attach(&dev->mt76);
ret = mt7921_dma_disable(dev, true);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index ad59ef9839dc..91fc41922d95 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -165,7 +165,7 @@ out:
static int mt7921_init_hardware(struct mt7921_dev *dev)
{
- int ret, idx, i;
+ int ret, i;
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
@@ -182,6 +182,13 @@ static int mt7921_init_hardware(struct mt7921_dev *dev)
return ret;
}
+ return 0;
+}
+
+static int mt7921_init_wcid(struct mt7921_dev *dev)
+{
+ int idx;
+
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7921_WTBL_STA - 1);
if (idx)
@@ -195,6 +202,38 @@ static int mt7921_init_hardware(struct mt7921_dev *dev)
return 0;
}
+static void mt7921_init_work(struct work_struct *work)
+{
+ struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
+ init_work);
+ int ret;
+
+ ret = mt7921_init_hardware(dev);
+ if (ret)
+ return;
+
+ mt76_set_stream_caps(&dev->mphy, true);
+ mt7921_set_stream_he_caps(&dev->phy);
+
+ ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+ ARRAY_SIZE(mt76_rates));
+ if (ret) {
+ dev_err(dev->mt76.dev, "register device failed\n");
+ return;
+ }
+
+ ret = mt7921_init_debugfs(dev);
+ if (ret) {
+ dev_err(dev->mt76.dev, "register debugfs failed\n");
+ return;
+ }
+
+ /* we support chip reset now */
+ dev->hw_init_done = true;
+
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, dev->pm.ds_enable);
+}
+
int mt7921_register_device(struct mt7921_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
@@ -222,21 +261,22 @@ int mt7921_register_device(struct mt7921_dev *dev)
spin_lock_init(&dev->sta_poll_lock);
INIT_WORK(&dev->reset_work, mt7921_mac_reset_work);
+ INIT_WORK(&dev->init_work, mt7921_init_work);
dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
dev->pm.stats.last_wake_event = jiffies;
dev->pm.stats.last_doze_event = jiffies;
-
- /* TODO: mt7921s run sleep mode on default */
- if (mt76_is_mmio(&dev->mt76)) {
+ if (!mt76_is_usb(&dev->mt76)) {
+ dev->pm.enable_user = true;
dev->pm.enable = true;
+ dev->pm.ds_enable_user = true;
dev->pm.ds_enable = true;
}
- if (mt76_is_sdio(&dev->mt76))
+ if (!mt76_is_mmio(&dev->mt76))
hw->extra_tx_headroom += MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
- ret = mt7921_init_hardware(dev);
+ ret = mt7921_init_wcid(dev);
if (ret)
return ret;
@@ -264,23 +304,7 @@ int mt7921_register_device(struct mt7921_dev *dev)
dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
- mt76_set_stream_caps(&dev->mphy, true);
- mt7921_set_stream_he_caps(&dev->phy);
-
- ret = mt76_register_device(&dev->mt76, true, mt76_rates,
- ARRAY_SIZE(mt76_rates));
- if (ret)
- return ret;
-
- ret = mt7921_init_debugfs(dev);
- if (ret)
- return ret;
-
- ret = mt76_connac_mcu_set_deep_sleep(&dev->mt76, dev->pm.ds_enable);
- if (ret)
- return ret;
-
- dev->hw_init_done = true;
+ queue_work(system_wq, &dev->init_work);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index ec10f95a4649..233998ca4857 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -116,7 +116,7 @@ void mt7921_mac_sta_poll(struct mt7921_dev *dev)
sta = container_of((void *)msta, struct ieee80211_sta,
drv_priv);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u8 q = mt7921_lmac_mapping(dev, i);
+ u8 q = mt76_connac_lmac_mapping(i);
u32 tx_cur = tx_time[q];
u32 rx_cur = rx_time[q];
u8 tid = ac_to_tid[i];
@@ -176,8 +176,8 @@ mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
u32 ru_h, ru_l;
u8 ru, offs = 0;
- ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
- ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
+ ru_l = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC_L);
+ ru_h = le32_get_bits(rxv[1], MT_PRXV_HE_RU_ALLOC_H);
ru = (u8)(ru_l | ru_h << 4);
status->bw = RATE_INFO_BW_HE_RU;
@@ -247,19 +247,19 @@ mt7921_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv)
MU_PREP(FLAGS2_SIG_B_SYMS_USERS,
le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER));
- he_mu->ru_ch1[0] = FIELD_GET(MT_CRXV_HE_RU0, le32_to_cpu(rxv[3]));
+ he_mu->ru_ch1[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU0);
if (status->bw >= RATE_INFO_BW_40) {
he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN);
he_mu->ru_ch2[0] =
- FIELD_GET(MT_CRXV_HE_RU1, le32_to_cpu(rxv[3]));
+ le32_get_bits(rxv[3], MT_CRXV_HE_RU1);
}
if (status->bw >= RATE_INFO_BW_80) {
he_mu->ru_ch1[1] =
- FIELD_GET(MT_CRXV_HE_RU2, le32_to_cpu(rxv[3]));
+ le32_get_bits(rxv[3], MT_CRXV_HE_RU2);
he_mu->ru_ch2[1] =
- FIELD_GET(MT_CRXV_HE_RU3, le32_to_cpu(rxv[3]));
+ le32_get_bits(rxv[3], MT_CRXV_HE_RU3);
}
}
@@ -304,15 +304,16 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
case MT_PHY_TYPE_HE_SU:
he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
HE_BITS(DATA1_UL_DL_KNOWN) |
- HE_BITS(DATA1_BEAM_CHANGE_KNOWN);
+ HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
+ HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
break;
case MT_PHY_TYPE_HE_EXT_SU:
he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
- HE_BITS(DATA1_UL_DL_KNOWN);
+ HE_BITS(DATA1_UL_DL_KNOWN) |
+ HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
break;
@@ -402,15 +403,15 @@ mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb)
static int mt7921_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
struct mt7921_sta *msta = (struct mt7921_sta *)status->wcid;
+ __le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct ieee80211_hdr hdr;
- struct ethhdr eth_hdr;
- __le32 *rxd = (__le32 *)skb->data;
- __le32 qos_ctrl, ht_ctrl;
+ u16 frame_control;
- if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) !=
+ if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
MT_RXD3_NORMAL_U2M)
return -EINVAL;
@@ -424,47 +425,52 @@ static int mt7921_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
/* store the info from RXD and ethhdr to avoid being overridden */
- memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
- hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]);
- hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]);
- qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]);
- ht_ctrl = FIELD_GET(MT_RXD9_HT_CONTROL, rxd[9]);
-
+ frame_control = le32_get_bits(rxd[6], MT_RXD6_FRAME_CONTROL);
+ hdr.frame_control = cpu_to_le16(frame_control);
+ hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_SEQ_CTRL));
hdr.duration_id = 0;
+
ether_addr_copy(hdr.addr1, vif->addr);
ether_addr_copy(hdr.addr2, sta->addr);
- switch (le16_to_cpu(hdr.frame_control) &
- (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
+ switch (frame_control & (IEEE80211_FCTL_TODS |
+ IEEE80211_FCTL_FROMDS)) {
case 0:
ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
break;
case IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_source);
break;
case IEEE80211_FCTL_TODS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
break;
case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
- ether_addr_copy(hdr.addr4, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
+ ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
break;
}
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
- if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
- eth_hdr.h_proto == htons(ETH_P_IPX))
+ if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
+ eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
- else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
+ else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
else
skb_pull(skb, 2);
if (ieee80211_has_order(hdr.frame_control))
- memcpy(skb_push(skb, 2), &ht_ctrl, 2);
- if (ieee80211_is_data_qos(hdr.frame_control))
- memcpy(skb_push(skb, 2), &qos_ctrl, 2);
+ memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[9],
+ IEEE80211_HT_CTL_LEN);
+ if (ieee80211_is_data_qos(hdr.frame_control)) {
+ __le16 qos_ctrl;
+
+ qos_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_QOS_CTL));
+ memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
+ IEEE80211_QOS_CTL_LEN);
+ }
+
if (ieee80211_has_a4(hdr.frame_control))
memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
else
@@ -666,9 +672,6 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
status->chain_signal[i]);
}
- if (status->signal == -128)
- status->flag |= RX_FLAG_NO_SIGNAL_VAL;
-
stbc = FIELD_GET(MT_PRXV_STBC, v0);
gi = FIELD_GET(MT_PRXV_SGI, v0);
cck = false;
@@ -912,11 +915,18 @@ mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
val = MT_TXD3_SN_VALID |
FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
txwi[3] |= cpu_to_le32(val);
+ txwi[7] &= ~cpu_to_le32(MT_TXD7_HW_AMSDU);
}
- val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
- FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
- txwi[7] |= cpu_to_le32(val);
+ if (mt76_is_mmio(&dev->mt76)) {
+ val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
+ txwi[7] |= cpu_to_le32(val);
+ } else {
+ val = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype);
+ txwi[8] |= cpu_to_le32(val);
+ }
}
void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
@@ -950,7 +960,7 @@ void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
} else {
p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
q_idx = wmm_idx * MT7921_MAX_WMM_SETS +
- mt7921_lmac_mapping(dev, skb_get_queue_mapping(skb));
+ mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
}
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
@@ -1016,7 +1026,7 @@ void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
if (!sta || !(sta->ht_cap.ht_supported || sta->he_cap.has_he))
return;
- tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
+ tid = le32_get_bits(txwi[1], MT_TXD1_TID);
if (tid >= 6) /* skip VO queue */
return;
@@ -1092,7 +1102,6 @@ mt7921_mac_add_txs_skb(struct mt7921_dev *dev, struct mt76_wcid *wcid, int pid,
break;
case MT_PHY_TYPE_HT:
case MT_PHY_TYPE_HT_GF:
- rate.mcs += (rate.nss - 1) * 8;
if (rate.mcs > 31)
goto out;
@@ -1156,18 +1165,13 @@ void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
struct mt76_wcid *wcid;
__le32 *txs_data = data;
u16 wcidx;
- u32 txs;
u8 pid;
- txs = le32_to_cpu(txs_data[0]);
- if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1)
+ if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
return;
- txs = le32_to_cpu(txs_data[2]);
- wcidx = FIELD_GET(MT_TXS2_WCID, txs);
-
- txs = le32_to_cpu(txs_data[3]);
- pid = FIELD_GET(MT_TXS3_PID, txs);
+ wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
+ pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
if (pid < MT_PACKET_ID_FIRST)
return;
@@ -1195,6 +1199,7 @@ void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
out:
rcu_read_unlock();
}
+EXPORT_SYMBOL_GPL(mt7921_mac_add_txs);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
@@ -1205,8 +1210,8 @@ void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
enum rx_pkt_type type;
u16 flag;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
- flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+ flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG);
if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
type = PKT_TYPE_NORMAL_MCU;
@@ -1548,7 +1553,16 @@ void mt7921_pm_power_save_work(struct work_struct *work)
delta = dev->pm.idle_timeout;
if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
- test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
+ test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) ||
+ dev->fw_assert)
+ goto out;
+
+ if (mutex_is_locked(&dev->mt76.mutex))
+ /* if mt76 mutex is held we should not put the device
+ * to sleep since we are currently accessing device
+ * register map. We need to wait for the next power_save
+ * trigger.
+ */
goto out;
if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
@@ -1610,3 +1624,94 @@ void mt7921_coredump_work(struct work_struct *work)
mt7921_reset(&dev->mt76);
}
+
+/* usb_sdio */
+static void
+mt7921_usb_sdio_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid,
+ enum mt76_txq_id qid, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key, int pid,
+ struct sk_buff *skb)
+{
+ __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
+
+ memset(txwi, 0, MT_SDIO_TXD_SIZE);
+ mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false);
+ skb_push(skb, MT_SDIO_TXD_SIZE);
+}
+
+int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct sk_buff *skb = tx_info->skb;
+ int err, pad, pktid, type;
+
+ if (unlikely(tx_info->skb->len <= ETH_HLEN))
+ return -EINVAL;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ if (sta) {
+ struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+
+ if (time_after(jiffies, msta->last_txs + HZ / 4)) {
+ info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ msta->last_txs = jiffies;
+ }
+ }
+
+ pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+ mt7921_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
+
+ type = mt76_is_sdio(mdev) ? MT7921_SDIO_DATA : 0;
+ mt7921_skb_add_usb_sdio_hdr(dev, skb, type);
+ pad = round_up(skb->len, 4) - skb->len;
+ if (mt76_is_usb(mdev))
+ pad += 4;
+
+ err = mt76_skb_adjust_pad(skb, pad);
+ if (err)
+ /* Release pktid in case of error. */
+ idr_remove(&wcid->pktid, pktid);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_prepare_skb);
+
+void mt7921_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
+ struct mt76_queue_entry *e)
+{
+ __le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE);
+ unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
+ struct ieee80211_sta *sta;
+ struct mt76_wcid *wcid;
+ u16 idx;
+
+ idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
+ wcid = rcu_dereference(mdev->wcid[idx]);
+ sta = wcid_to_sta(wcid);
+
+ if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7921_tx_check_aggr(sta, txwi);
+
+ skb_pull(e->skb, headroom);
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
+}
+EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_complete_skb);
+
+bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+
+ mt7921_mutex_acquire(dev);
+ mt7921_mac_sta_poll(dev);
+ mt7921_mutex_release(dev);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_status_data);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
index 544a1c33126a..79447e2d0143 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
@@ -202,6 +202,7 @@ enum tx_mcu_port_q_idx {
#define MT_SDIO_TXD_SIZE (MT_TXD_SIZE + 8 * 4)
#define MT_SDIO_TAIL_SIZE 8
#define MT_SDIO_HDR_SIZE 4
+#define MT_USB_TAIL_SIZE 4
#define MT_TXD0_Q_IDX GENMASK(31, 25)
#define MT_TXD0_PKT_FMT GENMASK(24, 23)
@@ -284,6 +285,9 @@ enum tx_mcu_port_q_idx {
#define MT_TXD7_HW_AMSDU BIT(10)
#define MT_TXD7_TX_TIME GENMASK(9, 0)
+#define MT_TXD8_L_TYPE GENMASK(5, 4)
+#define MT_TXD8_L_SUB_TYPE GENMASK(3, 0)
+
#define MT_TX_RATE_STBC BIT(13)
#define MT_TX_RATE_NSS GENMASK(12, 10)
#define MT_TX_RATE_MODE GENMASK(9, 6)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 7a8d2596c226..fdaf2451bc1d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -264,7 +264,7 @@ static int mt7921_start(struct ieee80211_hw *hw)
return err;
}
-static void mt7921_stop(struct ieee80211_hw *hw)
+void mt7921_stop(struct ieee80211_hw *hw)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt7921_phy *phy = mt7921_hw_phy(hw);
@@ -273,6 +273,7 @@ static void mt7921_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&dev->pm.ps_work);
cancel_work_sync(&dev->pm.wake_work);
+ cancel_work_sync(&dev->reset_work);
mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
mt7921_mutex_acquire(dev);
@@ -280,6 +281,7 @@ static void mt7921_stop(struct ieee80211_hw *hw)
mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, false, false);
mt7921_mutex_release(dev);
}
+EXPORT_SYMBOL_GPL(mt7921_stop);
static int mt7921_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
@@ -452,19 +454,64 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
- err = mt7921_mcu_add_key(dev, vif, msta, key, cmd);
+ err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ key, MCU_UNI_CMD(STA_REC_UPDATE),
+ &msta->wcid, cmd);
if (err)
goto out;
if (key->cipher == WLAN_CIPHER_SUITE_WEP104 ||
key->cipher == WLAN_CIPHER_SUITE_WEP40)
- err = mt7921_mcu_add_key(dev, vif, mvif->wep_sta, key, cmd);
+ err = mt76_connac_mcu_add_key(&dev->mt76, vif,
+ &mvif->wep_sta->bip,
+ key, MCU_UNI_CMD(STA_REC_UPDATE),
+ &mvif->wep_sta->wcid, cmd);
out:
mt7921_mutex_release(dev);
return err;
}
+static void
+mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt7921_dev *dev = priv;
+
+ mt7921_mcu_set_beacon_filter(dev, vif, dev->pm.enable);
+}
+
+static void
+mt7921_sniffer_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt7921_dev *dev = priv;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt76_connac_pm *pm = &dev->pm;
+ bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
+
+ mt7921_mcu_set_sniffer(dev, vif, monitor);
+ pm->enable = !monitor;
+ pm->ds_enable = !monitor;
+
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
+
+ if (monitor)
+ mt7921_mcu_set_beacon_filter(dev, vif, false);
+}
+
+void mt7921_set_runtime_pm(struct mt7921_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt76_connac_pm *pm = &dev->pm;
+ bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
+
+ pm->enable = pm->enable_user && !monitor;
+ ieee80211_iterate_active_interfaces(hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_pm_interface_iter, dev);
+ pm->ds_enable = pm->ds_enable_user && !monitor;
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
+}
+
static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
@@ -488,16 +535,10 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
- bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
-
- if (!enabled)
- phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
- else
- phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
-
- mt76_rmw_field(dev, MT_DMA_DCR0(0), MT_DMA_DCR0_RXD_G5_EN,
- enabled);
- mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter);
+ ieee80211_iterate_active_interfaces(hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_sniffer_interface_iter, dev);
+ dev->mt76.rxfilter = mt76_rr(dev, MT_WF_RFCR(0));
}
out:
@@ -510,11 +551,10 @@ static int
mt7921_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
/* no need to update right away, we'll get BSS_CHANGED_QOS */
- queue = mt7921_lmac_mapping(dev, queue);
+ queue = mt76_connac_lmac_mapping(queue);
mvif->queue_params[queue] = *params;
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index ef1e1ef91611..da2be050ed7c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -67,25 +67,6 @@ struct mt7921_fw_region {
#define MT_STA_BFER BIT(0)
#define MT_STA_BFEE BIT(1)
-#define FW_FEATURE_SET_ENCRYPT BIT(0)
-#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
-#define FW_FEATURE_ENCRY_MODE BIT(4)
-#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
-
-#define DL_MODE_ENCRYPT BIT(0)
-#define DL_MODE_KEY_IDX GENMASK(2, 1)
-#define DL_MODE_RESET_SEC_IV BIT(3)
-#define DL_MODE_WORKING_PDA_CR4 BIT(4)
-#define DL_CONFIG_ENCRY_MODE_SEL BIT(6)
-#define DL_MODE_NEED_RSP BIT(31)
-
-#define FW_START_OVERRIDE BIT(0)
-#define FW_START_WORKING_PDA_CR4 BIT(2)
-
-#define PATCH_SEC_NOT_SUPPORT GENMASK(31, 0)
-#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
-#define PATCH_SEC_TYPE_INFO 0x2
-
#define PATCH_SEC_ENC_TYPE_MASK GENMASK(31, 24)
#define PATCH_SEC_ENC_TYPE_PLAIN 0x00
#define PATCH_SEC_ENC_TYPE_AES 0x01
@@ -93,52 +74,6 @@ struct mt7921_fw_region {
#define PATCH_SEC_ENC_SCRAMBLE_INFO_MASK GENMASK(15, 0)
#define PATCH_SEC_ENC_AES_KEY_MASK GENMASK(7, 0)
-static enum mcu_cipher_type
-mt7921_mcu_get_cipher(int cipher)
-{
- switch (cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- return MCU_CIPHER_WEP40;
- case WLAN_CIPHER_SUITE_WEP104:
- return MCU_CIPHER_WEP104;
- case WLAN_CIPHER_SUITE_TKIP:
- return MCU_CIPHER_TKIP;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- return MCU_CIPHER_BIP_CMAC_128;
- case WLAN_CIPHER_SUITE_CCMP:
- return MCU_CIPHER_AES_CCMP;
- case WLAN_CIPHER_SUITE_CCMP_256:
- return MCU_CIPHER_CCMP_256;
- case WLAN_CIPHER_SUITE_GCMP:
- return MCU_CIPHER_GCMP;
- case WLAN_CIPHER_SUITE_GCMP_256:
- return MCU_CIPHER_GCMP_256;
- case WLAN_CIPHER_SUITE_SMS4:
- return MCU_CIPHER_WAPI;
- default:
- return MCU_CIPHER_NONE;
- }
-}
-
-static u8 mt7921_mcu_chan_bw(struct cfg80211_chan_def *chandef)
-{
- static const u8 width_to_bw[] = {
- [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
- [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
- [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
- [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
- [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
- [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
- [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
- [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
- };
-
- if (chandef->width >= ARRAY_SIZE(width_to_bw))
- return 0;
-
- return width_to_bw[chandef->width];
-}
-
static int
mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
{
@@ -465,95 +400,6 @@ void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
}
/** starec & wtbl **/
-static int
-mt7921_mcu_sta_key_tlv(struct mt7921_sta *msta, struct sk_buff *skb,
- struct ieee80211_key_conf *key, enum set_key_cmd cmd)
-{
- struct mt7921_sta_key_conf *bip = &msta->bip;
- struct sta_rec_sec *sec;
- struct tlv *tlv;
- u32 len = sizeof(*sec);
-
- tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
-
- sec = (struct sta_rec_sec *)tlv;
- sec->add = cmd;
-
- if (cmd == SET_KEY) {
- struct sec_key *sec_key;
- u8 cipher;
-
- cipher = mt7921_mcu_get_cipher(key->cipher);
- if (cipher == MCU_CIPHER_NONE)
- return -EOPNOTSUPP;
-
- sec_key = &sec->key[0];
- sec_key->cipher_len = sizeof(*sec_key);
-
- if (cipher == MCU_CIPHER_BIP_CMAC_128) {
- sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
- sec_key->key_id = bip->keyidx;
- sec_key->key_len = 16;
- memcpy(sec_key->key, bip->key, 16);
-
- sec_key = &sec->key[1];
- sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
- sec_key->cipher_len = sizeof(*sec_key);
- sec_key->key_len = 16;
- memcpy(sec_key->key, key->key, 16);
-
- sec->n_cipher = 2;
- } else {
- sec_key->cipher_id = cipher;
- sec_key->key_id = key->keyidx;
- sec_key->key_len = key->keylen;
- memcpy(sec_key->key, key->key, key->keylen);
-
- if (cipher == MCU_CIPHER_TKIP) {
- /* Rx/Tx MIC keys are swapped */
- memcpy(sec_key->key + 16, key->key + 24, 8);
- memcpy(sec_key->key + 24, key->key + 16, 8);
- }
-
- /* store key_conf for BIP batch update */
- if (cipher == MCU_CIPHER_AES_CCMP) {
- memcpy(bip->key, key->key, key->keylen);
- bip->keyidx = key->keyidx;
- }
-
- len -= sizeof(*sec_key);
- sec->n_cipher = 1;
- }
- } else {
- len -= sizeof(sec->key);
- sec->n_cipher = 0;
- }
- sec->len = cpu_to_le16(len);
-
- return 0;
-}
-
-int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
- struct mt7921_sta *msta, struct ieee80211_key_conf *key,
- enum set_key_cmd cmd)
-{
- struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
- struct sk_buff *skb;
- int ret;
-
- skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
- &msta->wcid);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- ret = mt7921_mcu_sta_key_tlv(msta, skb, key, cmd);
- if (ret)
- return ret;
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_UNI_CMD(STA_REC_UPDATE), true);
-}
-
int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
struct ieee80211_ampdu_params *params,
bool enable)
@@ -564,6 +410,7 @@ int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
msta->wcid.amsdu = false;
return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
+ MCU_UNI_CMD(STA_REC_UPDATE),
enable, true);
}
@@ -574,23 +421,10 @@ int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv;
return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
+ MCU_UNI_CMD(STA_REC_UPDATE),
enable, false);
}
-int mt7921_mcu_restart(struct mt76_dev *dev)
-{
- struct {
- u8 power_mode;
- u8 rsv[3];
- } req = {
- .power_mode = 1,
- };
-
- return mt76_mcu_send_msg(dev, MCU_CMD(NIC_POWER_CTRL), &req,
- sizeof(req), false);
-}
-EXPORT_SYMBOL_GPL(mt7921_mcu_restart);
-
static u32 mt7921_get_data_mode(struct mt7921_dev *dev, u32 info)
{
u32 mode = DL_MODE_NEED_RSP;
@@ -707,12 +541,8 @@ static int mt7921_load_patch(struct mt7921_dev *dev)
if (mt76_is_sdio(&dev->mt76)) {
/* activate again */
ret = __mt7921_mcu_fw_pmctrl(dev);
- if (ret)
- return ret;
-
- ret = __mt7921_mcu_drv_pmctrl(dev);
- if (ret)
- return ret;
+ if (!ret)
+ ret = __mt7921_mcu_drv_pmctrl(dev);
}
out:
@@ -730,22 +560,6 @@ out:
return ret;
}
-static u32 mt7921_mcu_gen_dl_mode(u8 feature_set, bool is_wa)
-{
- u32 ret = 0;
-
- ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
- (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
- ret |= (feature_set & FW_FEATURE_ENCRY_MODE) ?
- DL_CONFIG_ENCRY_MODE_SEL : 0;
- ret |= FIELD_PREP(DL_MODE_KEY_IDX,
- FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
- ret |= DL_MODE_NEED_RSP;
- ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0;
-
- return ret;
-}
-
static int
mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev,
const struct mt7921_fw_trailer *hdr,
@@ -763,7 +577,8 @@ mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev,
region = (const struct mt7921_fw_region *)((const u8 *)hdr -
(hdr->n_region - i) * sizeof(*region));
- mode = mt7921_mcu_gen_dl_mode(region->feature_set, is_wa);
+ mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
+ region->feature_set, is_wa);
len = le32_to_cpu(region->len);
addr = le32_to_cpu(region->addr);
@@ -920,33 +735,26 @@ EXPORT_SYMBOL_GPL(mt7921_mcu_exit);
int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
{
-#define WMM_AIFS_SET BIT(0)
-#define WMM_CW_MIN_SET BIT(1)
-#define WMM_CW_MAX_SET BIT(2)
-#define WMM_TXOP_SET BIT(3)
-#define WMM_PARAM_SET GENMASK(3, 0)
-#define TX_CMD_MODE 1
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct edca {
- u8 queue;
- u8 set;
- u8 aifs;
- u8 cw_min;
+ __le16 cw_min;
__le16 cw_max;
__le16 txop;
- };
+ __le16 aifs;
+ u8 guardtime;
+ u8 acm;
+ } __packed;
struct mt7921_mcu_tx {
- u8 total;
- u8 action;
- u8 valid;
- u8 mode;
-
struct edca edca[IEEE80211_NUM_ACS];
+ u8 bss_idx;
+ u8 qos;
+ u8 wmm_idx;
+ u8 pad;
} __packed req = {
- .valid = true,
- .mode = TX_CMD_MODE,
- .total = IEEE80211_NUM_ACS,
+ .bss_idx = mvif->mt76.idx,
+ .qos = vif->bss_conf.qos,
+ .wmm_idx = mvif->mt76.wmm_idx,
};
- struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mu_edca {
u8 cw_min;
u8 cw_max;
@@ -970,30 +778,29 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
.qos = vif->bss_conf.qos,
.wmm_idx = mvif->mt76.wmm_idx,
};
+ static const int to_aci[] = { 1, 0, 2, 3 };
int ac, ret;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
- struct edca *e = &req.edca[ac];
+ struct edca *e = &req.edca[to_aci[ac]];
- e->set = WMM_PARAM_SET;
- e->queue = ac + mvif->mt76.wmm_idx * MT7921_MAX_WMM_SETS;
- e->aifs = q->aifs;
+ e->aifs = cpu_to_le16(q->aifs);
e->txop = cpu_to_le16(q->txop);
if (q->cw_min)
- e->cw_min = fls(q->cw_min);
+ e->cw_min = cpu_to_le16(q->cw_min);
else
- e->cw_min = 5;
+ e->cw_min = cpu_to_le16(5);
if (q->cw_max)
- e->cw_max = cpu_to_le16(fls(q->cw_max));
+ e->cw_max = cpu_to_le16(q->cw_max);
else
e->cw_max = cpu_to_le16(10);
}
- ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EDCA_UPDATE),
- &req, sizeof(req), true);
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_EDCA_PARMS), &req,
+ sizeof(req), false);
if (ret)
return ret;
@@ -1003,7 +810,6 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
struct ieee80211_he_mu_edca_param_ac_rec *q;
struct mu_edca *e;
- int to_aci[] = {1, 0, 2, 3};
if (!mvif->queue_params[ac].mu_edca)
break;
@@ -1046,7 +852,7 @@ int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
} __packed req = {
.control_ch = chandef->chan->hw_value,
.center_ch = ieee80211_frequency_to_channel(freq1),
- .bw = mt7921_mcu_chan_bw(chandef),
+ .bw = mt76_connac_chan_bw(chandef),
.tx_streams_num = hweight8(phy->mt76->antenna_mask),
.rx_streams = phy->mt76->antenna_mask,
.band_idx = phy != &dev->phy,
@@ -1057,10 +863,13 @@ int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
else
req.channel_band = chandef->chan->band;
- if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+ dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
+ req.switch_reason = CH_SWITCH_NORMAL;
+ else if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
- else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ else if (!cfg80211_reg_can_beacon(dev->mt76.hw->wiphy, chandef,
+ NL80211_IFTYPE_AP))
req.switch_reason = CH_SWITCH_DFS;
else
req.switch_reason = CH_SWITCH_NORMAL;
@@ -1093,30 +902,6 @@ int mt7921_mcu_set_eeprom(struct mt7921_dev *dev)
}
EXPORT_SYMBOL_GPL(mt7921_mcu_set_eeprom);
-int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset)
-{
- struct mt7921_mcu_eeprom_info req = {
- .addr = cpu_to_le32(round_down(offset, 16)),
- };
- struct mt7921_mcu_eeprom_info *res;
- struct sk_buff *skb;
- int ret;
- u8 *buf;
-
- ret = mt76_mcu_send_and_get_msg(&dev->mt76,
- MCU_EXT_QUERY(EFUSE_ACCESS),
- &req, sizeof(req), true, &skb);
- if (ret)
- return ret;
-
- res = (struct mt7921_mcu_eeprom_info *)skb->data;
- buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
- memcpy(buf, res->data, 16);
- dev_kfree_skb(skb);
-
- return 0;
-}
-
int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
@@ -1351,3 +1136,33 @@ int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr)
return 0;
}
+
+int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct {
+ struct {
+ u8 band_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct sniffer_enable_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 enable;
+ u8 pad[3];
+ } __packed enable;
+ } req = {
+ .hdr = {
+ .band_idx = mvif->band_idx,
+ },
+ .enable = {
+ .tag = cpu_to_le16(0),
+ .len = cpu_to_le16(sizeof(struct sniffer_enable_tlv)),
+ .enable = enable,
+ },
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req),
+ true);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 96647801850a..7690364bc079 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -30,6 +30,7 @@
#define MT7921_DRV_OWN_RETRY_COUNT 10
#define MT7921_MCU_INIT_RETRY_COUNT 10
+#define MT7921_WFSYS_INIT_RETRY_COUNT 2
#define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin"
#define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin"
@@ -89,11 +90,6 @@ enum mt7921_rxq_id {
MT7921_RXQ_MCU_WM = 0,
};
-struct mt7921_sta_key_conf {
- s8 keyidx;
- u8 key[16];
-};
-
struct mt7921_sta {
struct mt76_wcid wcid; /* must be first */
@@ -106,7 +102,7 @@ struct mt7921_sta {
unsigned long ampdu_state;
struct mt76_sta_stats stats;
- struct mt7921_sta_key_conf bip;
+ struct mt76_connac_sta_key_conf bip;
};
DECLARE_EWMA(rssi, 10, 8);
@@ -209,6 +205,8 @@ struct mt7921_dev {
struct list_head sta_poll_list;
spinlock_t sta_poll_lock;
+ struct work_struct init_work;
+
u8 fw_debug;
struct mt76_connac_pm pm;
@@ -277,12 +275,6 @@ mt7921_hw_dev(struct ieee80211_hw *hw)
#define mt7921_mutex_release(dev) \
mt76_connac_mutex_release(&(dev)->mt76, &(dev)->pm)
-static inline u8 mt7921_lmac_mapping(struct mt7921_dev *dev, u8 ac)
-{
- /* LMAC uses the reverse order of mac80211 AC indexes */
- return 3 - ac;
-}
-
extern const struct ieee80211_ops mt7921_ops;
extern struct pci_driver mt7921_pci_driver;
@@ -296,16 +288,12 @@ int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force);
int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev);
void mt7921_dma_cleanup(struct mt7921_dev *dev);
int mt7921_run_firmware(struct mt7921_dev *dev);
-int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
- struct mt7921_sta *msta, struct ieee80211_key_conf *key,
- enum set_key_cmd cmd);
int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
struct ieee80211_vif *vif, bool enable,
enum mt76_sta_info_state state);
int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd);
int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif);
int mt7921_mcu_set_eeprom(struct mt7921_dev *dev);
-int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset);
int mt7921_mcu_get_rx_rate(struct mt7921_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl);
@@ -367,17 +355,20 @@ static inline void mt7921_mcu_tx_cleanup(struct mt7921_dev *dev)
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false);
}
-static inline void mt7921_skb_add_sdio_hdr(struct sk_buff *skb,
- enum mt7921_sdio_pkt_type type)
+static inline void
+mt7921_skb_add_usb_sdio_hdr(struct mt7921_dev *dev, struct sk_buff *skb,
+ int type)
{
- u32 hdr;
+ u32 hdr, len;
- hdr = FIELD_PREP(MT7921_SDIO_HDR_TX_BYTES, skb->len + sizeof(hdr)) |
+ len = mt76_is_usb(&dev->mt76) ? skb->len : skb->len + sizeof(hdr);
+ hdr = FIELD_PREP(MT7921_SDIO_HDR_TX_BYTES, len) |
FIELD_PREP(MT7921_SDIO_HDR_PKT_TYPE, type);
put_unaligned_le32(hdr, skb_push(skb, sizeof(hdr)));
}
+void mt7921_stop(struct ieee80211_hw *hw);
int mt7921_mac_init(struct mt7921_dev *dev);
bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask);
void mt7921_mac_reset_counters(struct mt7921_phy *phy);
@@ -399,7 +390,6 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
void mt7921_tx_worker(struct mt76_worker *w);
void mt7921e_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
-int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc);
void mt7921_tx_token_put(struct mt7921_dev *dev);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
@@ -424,7 +414,6 @@ int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev);
void mt7921_pm_wake_work(struct work_struct *work);
void mt7921_pm_power_save_work(struct work_struct *work);
-bool mt7921_wait_for_mcu_init(struct mt7921_dev *dev);
void mt7921_coredump_work(struct work_struct *work);
int mt7921_wfsys_reset(struct mt7921_dev *dev);
int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr);
@@ -442,8 +431,8 @@ int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq);
int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq);
-int mt7921_mcu_restart(struct mt76_dev *dev);
+bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
int mt7921e_driver_own(struct mt7921_dev *dev);
@@ -452,17 +441,33 @@ int mt7921e_mcu_init(struct mt7921_dev *dev);
int mt7921s_wfsys_reset(struct mt7921_dev *dev);
int mt7921s_mac_reset(struct mt7921_dev *dev);
int mt7921s_init_reset(struct mt7921_dev *dev);
+int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev);
int mt7921s_mcu_init(struct mt7921_dev *dev);
int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev);
-int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- enum mt76_txq_id qid, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta,
- struct mt76_tx_info *tx_info);
-void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
-bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data);
+void mt7921_set_runtime_pm(struct mt7921_dev *dev);
+int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ bool enable);
+
+int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+void mt7921_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
+ struct mt76_queue_entry *e);
+bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update);
+
+/* usb */
+#define MT_USB_TYPE_VENDOR (USB_TYPE_VENDOR | 0x1f)
+#define MT_USB_TYPE_UHW_VENDOR (USB_TYPE_VENDOR | 0x1e)
+
+int mt7921u_mcu_power_on(struct mt7921_dev *dev);
+int mt7921u_wfsys_reset(struct mt7921_dev *dev);
+int mt7921u_dma_init(struct mt7921_dev *dev);
+int mt7921u_init_reset(struct mt7921_dev *dev);
+int mt7921u_mac_reset(struct mt7921_dev *dev);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index 9dae2f5972bf..1a01d025bbe5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -105,6 +105,7 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
int i;
struct mt76_connac_pm *pm = &dev->pm;
+ cancel_work_sync(&dev->init_work);
mt76_unregister_device(&dev->mt76);
mt76_for_each_q_rx(&dev->mt76, i)
napi_disable(&dev->mt76.napi[i]);
@@ -121,6 +122,110 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
mt76_free_device(&dev->mt76);
}
+static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
+{
+ static const struct {
+ u32 phys;
+ u32 mapped;
+ u32 size;
+ } fixed_map[] = {
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
+ { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
+ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
+ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
+ { 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
+ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ };
+ int i;
+
+ if (addr < 0x100000)
+ return addr;
+
+ for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
+ u32 ofs;
+
+ if (addr < fixed_map[i].phys)
+ continue;
+
+ ofs = addr - fixed_map[i].phys;
+ if (ofs > fixed_map[i].size)
+ continue;
+
+ return fixed_map[i].mapped + ofs;
+ }
+
+ if ((addr >= 0x18000000 && addr < 0x18c00000) ||
+ (addr >= 0x70000000 && addr < 0x78000000) ||
+ (addr >= 0x7c000000 && addr < 0x7c400000))
+ return mt7921_reg_map_l1(dev, addr);
+
+ dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
+ addr);
+
+ return 0;
+}
+
+static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ return dev->bus_ops->rr(mdev, addr);
+}
+
+static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ dev->bus_ops->wr(mdev, addr, val);
+}
+
+static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+}
+
static int mt7921_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -134,6 +239,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.token_size = MT7921_TOKEN_SIZE,
.tx_prepare_skb = mt7921e_tx_prepare_skb,
.tx_complete_skb = mt7921e_tx_complete_skb,
+ .rx_check = mt7921e_rx_check,
.rx_skb = mt7921e_queue_rx_skb,
.rx_poll_complete = mt7921_rx_poll_complete,
.sta_ps = mt7921_sta_ps,
@@ -151,6 +257,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.fw_own = mt7921e_mcu_fw_pmctrl,
};
+ struct mt76_bus_ops *bus_ops;
struct mt7921_dev *dev;
struct mt76_dev *mdev;
int ret;
@@ -188,6 +295,25 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev);
+
+ dev->phy.dev = dev;
+ dev->phy.mt76 = &dev->mt76.phy;
+ dev->mt76.phy.priv = &dev->phy;
+ dev->bus_ops = dev->mt76.bus;
+ bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
+ GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+
+ bus_ops->rr = mt7921_rr;
+ bus_ops->wr = mt7921_wr;
+ bus_ops->rmw = mt7921_rmw;
+ dev->mt76.bus = bus_ops;
+
+ ret = __mt7921e_mcu_drv_pmctrl(dev);
+ if (ret)
+ return ret;
+
mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) |
(mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
index 85286cc9add1..5ca14dbbdd26 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
@@ -137,7 +137,7 @@ mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
wcid_idx = wcid->idx;
} else {
- wcid_idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
+ wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
}
__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
@@ -148,14 +148,15 @@ out:
}
static void
-mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
+mt7921e_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
{
- struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data;
+ struct mt7921_tx_free *free = (struct mt7921_tx_free *)data;
struct mt76_dev *mdev = &dev->mt76;
struct mt76_txwi_cache *txwi;
struct ieee80211_sta *sta = NULL;
+ struct sk_buff *skb, *tmp;
+ void *end = data + len;
LIST_HEAD(free_list);
- struct sk_buff *tmp;
bool wake = false;
u8 i, count;
@@ -163,11 +164,10 @@ mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
- /* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
- * to the time ack is received or dropped by hw (air + hw queue time).
- * Should avoid accessing WTBL to get Tx airtime, and use it instead.
- */
- count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
+ count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
+ if (WARN_ON_ONCE((void *)&free->info[count] > end))
+ return;
+
for (i = 0; i < count; i++) {
u32 msdu, info = le32_to_cpu(free->info[i]);
u8 stat;
@@ -208,8 +208,6 @@ mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
if (wake)
mt76_set_tx_blocked(&dev->mt76, false);
- napi_consume_skb(skb, 1);
-
list_for_each_entry_safe(skb, tmp, &free_list, list) {
skb_list_del_init(skb);
napi_consume_skb(skb, 1);
@@ -222,6 +220,28 @@ mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
mt76_worker_schedule(&dev->mt76.tx_worker);
}
+bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ __le32 *rxd = (__le32 *)data;
+ __le32 *end = (__le32 *)&rxd[len / 4];
+ enum rx_pkt_type type;
+
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7921e_mac_tx_free(dev, data, len);
+ return false;
+ case PKT_TYPE_TXS:
+ for (rxd += 2; rxd + 8 <= end; rxd += 8)
+ mt7921_mac_add_txs(dev, rxd);
+ return false;
+ default:
+ return true;
+ }
+}
+
void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
{
@@ -229,11 +249,12 @@ void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
__le32 *rxd = (__le32 *)skb->data;
enum rx_pkt_type type;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
- mt7921_mac_tx_free(dev, skb);
+ mt7921e_mac_tx_free(dev, skb->data, skb->len);
+ napi_consume_skb(skb, 1);
break;
default:
mt7921_queue_rx_skb(mdev, q, skb);
@@ -314,6 +335,7 @@ int mt7921e_mac_reset(struct mt7921_dev *dev)
}
local_bh_enable();
+ dev->fw_assert = false;
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
index a020352122a1..36669e5aeef3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
@@ -42,7 +42,7 @@ int mt7921e_mcu_init(struct mt7921_dev *dev)
.headroom = sizeof(struct mt7921_mcu_txd),
.mcu_skb_send_msg = mt7921_mcu_send_message,
.mcu_parse_response = mt7921_mcu_parse_response,
- .mcu_restart = mt7921_mcu_restart,
+ .mcu_restart = mt76_connac_mcu_restart,
};
int err;
@@ -59,10 +59,8 @@ int mt7921e_mcu_init(struct mt7921_dev *dev)
return err;
}
-int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
+int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
{
- struct mt76_phy *mphy = &dev->mt76.phy;
- struct mt76_connac_pm *pm = &dev->pm;
int i, err = 0;
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
@@ -75,9 +73,21 @@ int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
if (i == MT7921_DRV_OWN_RETRY_COUNT) {
dev_err(dev->mt76.dev, "driver own failed\n");
err = -EIO;
- goto out;
}
+ return err;
+}
+
+int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_connac_pm *pm = &dev->pm;
+ int err;
+
+ err = __mt7921e_mcu_drv_pmctrl(dev);
+ if (err < 0)
+ goto out;
+
mt7921_wpdma_reinit_cond(dev);
clear_bit(MT76_STATE_PM, &mphy->state);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
index cbd38122c510..6712ff60c722 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
@@ -17,13 +17,12 @@
#define MT_PLE_BASE 0x820c0000
#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
-#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0)
-#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4)
-#define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8)
-#define MT_PLE_FL_Q3_CTRL MT_PLE(0x1bc)
+#define MT_PLE_FL_Q0_CTRL MT_PLE(0x3e0)
+#define MT_PLE_FL_Q1_CTRL MT_PLE(0x3e4)
+#define MT_PLE_FL_Q2_CTRL MT_PLE(0x3e8)
+#define MT_PLE_FL_Q3_CTRL MT_PLE(0x3ec)
-#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \
- ((n) << 2))
+#define MT_PLE_AC_QEMPTY(_n) MT_PLE(0x500 + 0x40 * (_n))
#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2))
#define MT_MDP_BASE 0x820cd000
@@ -354,6 +353,7 @@
#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
#define MT_WFDMA0_GLO_CFG_RX_DMA_BUSY BIT(3)
#define MT_WFDMA0_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MT_WFDMA0_GLO_CFG_FW_DWLD_BYPASS_DMASHDL BIT(9)
#define MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
#define MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN BIT(15)
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
@@ -378,6 +378,9 @@
#define MT_WFDMA0_TX_RING16_EXT_CTRL MT_WFDMA0(0x640)
#define MT_WFDMA0_TX_RING17_EXT_CTRL MT_WFDMA0(0x644)
+#define MT_WPDMA0_MAX_CNT_MASK GENMASK(7, 0)
+#define MT_WPDMA0_BASE_PTR_MASK GENMASK(31, 16)
+
#define MT_WFDMA0_RX_RING0_EXT_CTRL MT_WFDMA0(0x680)
#define MT_WFDMA0_RX_RING1_EXT_CTRL MT_WFDMA0(0x684)
#define MT_WFDMA0_RX_RING2_EXT_CTRL MT_WFDMA0(0x688)
@@ -426,6 +429,10 @@
#define MT_WFDMA_DUMMY_CR MT_MCU_WPDMA0(0x120)
#define MT_WFDMA_NEED_REINIT BIT(1)
+#define MT_CBTOP_RGU(ofs) (0x70002000 + (ofs))
+#define MT_CBTOP_RGU_WF_SUBSYS_RST MT_CBTOP_RGU(0x600)
+#define MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH BIT(0)
+
#define MT_HW_BOUND 0x70010020
#define MT_HW_CHIPID 0x70010200
#define MT_HW_REV 0x70010204
@@ -434,12 +441,14 @@
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
-#define MT_DMA_SHDL(ofs) (0xd6000 + (ofs))
+#define MT_DMA_SHDL(ofs) (0x7c026000 + (ofs))
#define MT_DMASHDL_SW_CONTROL MT_DMA_SHDL(0x004)
#define MT_DMASHDL_DMASHDL_BYPASS BIT(28)
#define MT_DMASHDL_OPTIONAL MT_DMA_SHDL(0x008)
#define MT_DMASHDL_PAGE MT_DMA_SHDL(0x00c)
+#define MT_DMASHDL_GROUP_SEQ_ORDER BIT(16)
#define MT_DMASHDL_REFILL MT_DMA_SHDL(0x010)
+#define MT_DMASHDL_REFILL_MASK GENMASK(31, 16)
#define MT_DMASHDL_PKT_MAX_SIZE MT_DMA_SHDL(0x01c)
#define MT_DMASHDL_PKT_MAX_SIZE_PLE GENMASK(11, 0)
#define MT_DMASHDL_PKT_MAX_SIZE_PSE GENMASK(27, 16)
@@ -454,6 +463,46 @@
#define MT_DMASHDL_SCHED_SET(_n) MT_DMA_SHDL(0x070 + ((_n) << 2))
+#define MT_WFDMA_HOST_CONFIG 0x7c027030
+#define MT_WFDMA_HOST_CONFIG_USB_RXEVT_EP4_EN BIT(6)
+
+#define MT_UMAC(ofs) (0x74000000 + (ofs))
+#define MT_UDMA_TX_QSEL MT_UMAC(0x008)
+#define MT_FW_DL_EN BIT(3)
+
+#define MT_UDMA_WLCFG_1 MT_UMAC(0x00c)
+#define MT_WL_RX_AGG_PKT_LMT GENMASK(7, 0)
+#define MT_WL_TX_TMOUT_LMT GENMASK(27, 8)
+
+#define MT_UDMA_WLCFG_0 MT_UMAC(0x18)
+#define MT_WL_RX_AGG_TO GENMASK(7, 0)
+#define MT_WL_RX_AGG_LMT GENMASK(15, 8)
+#define MT_WL_TX_TMOUT_FUNC_EN BIT(16)
+#define MT_WL_TX_DPH_CHK_EN BIT(17)
+#define MT_WL_RX_MPSZ_PAD0 BIT(18)
+#define MT_WL_RX_FLUSH BIT(19)
+#define MT_TICK_1US_EN BIT(20)
+#define MT_WL_RX_AGG_EN BIT(21)
+#define MT_WL_RX_EN BIT(22)
+#define MT_WL_TX_EN BIT(23)
+#define MT_WL_RX_BUSY BIT(30)
+#define MT_WL_TX_BUSY BIT(31)
+
+#define MT_UDMA_CONN_INFRA_STATUS MT_UMAC(0xa20)
+#define MT_UDMA_CONN_WFSYS_INIT_DONE BIT(22)
+#define MT_UDMA_CONN_INFRA_STATUS_SEL MT_UMAC(0xa24)
+
+#define MT_SSUSB_EPCTL_CSR(ofs) (0x74011800 + (ofs))
+#define MT_SSUSB_EPCTL_CSR_EP_RST_OPT MT_SSUSB_EPCTL_CSR(0x090)
+
+#define MT_UWFDMA0(ofs) (0x7c024000 + (ofs))
+#define MT_UWFDMA0_GLO_CFG MT_UWFDMA0(0x208)
+#define MT_UWFDMA0_GLO_CFG_EXT0 MT_UWFDMA0(0x2b0)
+#define MT_UWFDMA0_TX_RING_EXT_CTRL(_n) MT_UWFDMA0(0x600 + ((_n) << 2))
+
+#define MT_CONN_STATUS 0x7c053c10
+#define MT_WIFI_PATCH_DL_STATE BIT(0)
+
#define MT_CONN_ON_LPCTL 0x7c060010
#define PCIE_LPCR_HOST_OWN_SYNC BIT(2)
#define PCIE_LPCR_HOST_CLR_OWN BIT(1)
@@ -464,6 +513,7 @@
#define WFSYS_SW_INIT_DONE BIT(4)
#define MT_CONN_ON_MISC 0x7c0600f0
+#define MT_TOP_MISC2_FW_PWR_ON BIT(0)
#define MT_TOP_MISC2_FW_N9_RDY GENMASK(1, 0)
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index 65d693902c22..af26d59fa2f0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -41,6 +41,7 @@ static void mt7921s_unregister_device(struct mt7921_dev *dev)
{
struct mt76_connac_pm *pm = &dev->pm;
+ cancel_work_sync(&dev->init_work);
mt76_unregister_device(&dev->mt76);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
@@ -58,7 +59,10 @@ static int mt7921s_parse_intr(struct mt76_dev *dev, struct mt76s_intr *intr)
struct mt7921_sdio_intr *irq_data = sdio->intr_data;
int i, err;
+ sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, irq_data, MCR_WHISR, sizeof(*irq_data));
+ sdio_release_host(sdio->func);
+
if (err < 0)
return err;
@@ -88,9 +92,9 @@ static int mt7921s_probe(struct sdio_func *func,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
- .tx_prepare_skb = mt7921s_tx_prepare_skb,
- .tx_complete_skb = mt7921s_tx_complete_skb,
- .tx_status_data = mt7921s_tx_status_data,
+ .tx_prepare_skb = mt7921_usb_sdio_tx_prepare_skb,
+ .tx_complete_skb = mt7921_usb_sdio_tx_complete_skb,
+ .tx_status_data = mt7921_usb_sdio_tx_status_data,
.rx_skb = mt7921_queue_rx_skb,
.sta_ps = mt7921_sta_ps,
.sta_add = mt7921_mac_sta_add,
@@ -118,7 +122,7 @@ static int mt7921s_probe(struct sdio_func *func,
struct mt7921_dev *dev;
struct mt76_dev *mdev;
- int ret, i;
+ int ret;
mdev = mt76_alloc_device(&func->dev, sizeof(*dev), &mt7921_ops,
&drv_ops);
@@ -151,16 +155,6 @@ static int mt7921s_probe(struct sdio_func *func,
goto error;
}
- for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) {
- mdev->sdio.xmit_buf[i] = devm_kmalloc(mdev->dev,
- MT76S_XMIT_BUF_SZ,
- GFP_KERNEL);
- if (!mdev->sdio.xmit_buf[i]) {
- ret = -ENOMEM;
- goto error;
- }
- }
-
ret = mt76s_alloc_rx_queue(mdev, MT_RXQ_MAIN);
if (ret)
goto error;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
index ccaf8134cec7..1b3adb3d91e8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
@@ -60,7 +60,11 @@ int mt7921s_wfsys_reset(struct mt7921_dev *dev)
sdio_release_host(sdio->func);
+ clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+
/* activate mt7921s again */
+ mt7921s_mcu_drv_pmctrl(dev);
+ mt76_clear(dev, MT_CONN_STATUS, MT_WIFI_PATCH_DL_STATE);
mt7921s_mcu_fw_pmctrl(dev);
mt7921s_mcu_drv_pmctrl(dev);
@@ -81,7 +85,6 @@ int mt7921s_init_reset(struct mt7921_dev *dev)
mt7921s_wfsys_reset(dev);
mt76_worker_enable(&dev->mt76.sdio.txrx_worker);
- clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
mt7921s_enable_irq(&dev->mt76);
@@ -114,7 +117,6 @@ int mt7921s_mac_reset(struct mt7921_dev *dev)
mt76_worker_enable(&dev->mt76.sdio.net_worker);
dev->fw_assert = false;
- clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
mt7921s_enable_irq(&dev->mt76);
@@ -138,86 +140,3 @@ out:
return err;
}
-
-static void
-mt7921s_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid,
- enum mt76_txq_id qid, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key, int pid,
- struct sk_buff *skb)
-{
- __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
-
- memset(txwi, 0, MT_SDIO_TXD_SIZE);
- mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false);
- skb_push(skb, MT_SDIO_TXD_SIZE);
-}
-
-int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- enum mt76_txq_id qid, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta,
- struct mt76_tx_info *tx_info)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
- struct ieee80211_key_conf *key = info->control.hw_key;
- struct sk_buff *skb = tx_info->skb;
- int err, pad, pktid;
-
- if (unlikely(tx_info->skb->len <= ETH_HLEN))
- return -EINVAL;
-
- if (!wcid)
- wcid = &dev->mt76.global_wcid;
-
- if (sta) {
- struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
-
- if (time_after(jiffies, msta->last_txs + HZ / 4)) {
- info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- msta->last_txs = jiffies;
- }
- }
-
- pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
- mt7921s_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
-
- mt7921_skb_add_sdio_hdr(skb, MT7921_SDIO_DATA);
- pad = round_up(skb->len, 4) - skb->len;
-
- err = mt76_skb_adjust_pad(skb, pad);
- if (err)
- /* Release pktid in case of error. */
- idr_remove(&wcid->pktid, pktid);
-
- return err;
-}
-
-void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
-{
- __le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE);
- unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
- struct ieee80211_sta *sta;
- struct mt76_wcid *wcid;
- u16 idx;
-
- idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
- wcid = rcu_dereference(mdev->wcid[idx]);
- sta = wcid_to_sta(wcid);
-
- if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7921_tx_check_aggr(sta, txwi);
-
- skb_pull(e->skb, headroom);
- mt76_tx_complete_skb(mdev, e->wcid, e->skb);
-}
-
-bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
-
- mt7921_mutex_acquire(dev);
- mt7921_mac_sta_poll(dev);
- mt7921_mutex_release(dev);
-
- return false;
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
index d20f2ff01be1..54a5c712a3c3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
@@ -36,7 +36,7 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (cmd == MCU_CMD(FW_SCATTER))
type = MT7921_SDIO_FWDL;
- mt7921_skb_add_sdio_hdr(skb, type);
+ mt7921_skb_add_usb_sdio_hdr(dev, skb, type);
pad = round_up(skb->len, 4) - skb->len;
__skb_put_zero(skb, pad);
@@ -49,6 +49,26 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
return ret;
}
+static u32 mt7921s_read_rm3r(struct mt7921_dev *dev)
+{
+ struct mt76_sdio *sdio = &dev->mt76.sdio;
+
+ return sdio_readl(sdio->func, MCR_D2HRM3R, NULL);
+}
+
+static u32 mt7921s_clear_rm3r_drv_own(struct mt7921_dev *dev)
+{
+ struct mt76_sdio *sdio = &dev->mt76.sdio;
+ u32 val;
+
+ val = sdio_readl(sdio->func, MCR_D2HRM3R, NULL);
+ if (val)
+ sdio_writel(sdio->func, H2D_SW_INT_CLEAR_MAILBOX_ACK,
+ MCR_WSICR, NULL);
+
+ return val;
+}
+
int mt7921s_mcu_init(struct mt7921_dev *dev)
{
static const struct mt76_mcu_ops mt7921s_mcu_ops = {
@@ -88,6 +108,12 @@ int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev)
err = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status,
status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
+
+ if (!err && test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
+ err = readx_poll_timeout(mt7921s_read_rm3r, dev, status,
+ status & D2HRM3R_IS_DRIVER_OWN,
+ 2000, 1000000);
+
sdio_release_host(func);
if (err < 0) {
@@ -115,12 +141,24 @@ int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev)
sdio_claim_host(func);
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) {
+ err = readx_poll_timeout(mt7921s_clear_rm3r_drv_own,
+ dev, status,
+ !(status & D2HRM3R_IS_DRIVER_OWN),
+ 2000, 1000000);
+ if (err < 0) {
+ dev_err(dev->mt76.dev, "mailbox ACK not cleared\n");
+ goto err;
+ }
+ }
+
sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, NULL);
err = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status,
!(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000);
sdio_release_host(func);
+err:
if (err < 0) {
dev_err(dev->mt76.dev, "firmware own failed\n");
clear_bit(MT76_STATE_PM, &mphy->state);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
new file mode 100644
index 000000000000..b7771e9f1fcd
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2022 MediaTek Inc.
+ *
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7921.h"
+#include "mcu.h"
+#include "mac.h"
+
+static const struct usb_device_id mt7921u_device_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7961, 0xff, 0xff, 0xff) },
+ { },
+};
+
+static u32 mt7921u_rr(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = ___mt76u_rr(dev, MT_VEND_READ_EXT,
+ USB_DIR_IN | MT_USB_TYPE_VENDOR, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void mt7921u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | MT_USB_TYPE_VENDOR, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static u32 mt7921u_rmw(struct mt76_dev *dev, u32 addr,
+ u32 mask, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ val |= ___mt76u_rr(dev, MT_VEND_READ_EXT,
+ USB_DIR_IN | MT_USB_TYPE_VENDOR, addr) & ~mask;
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | MT_USB_TYPE_VENDOR, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return val;
+}
+
+static void mt7921u_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int ret, i = 0, batch_len;
+ const u8 *val = data;
+
+ len = round_up(len, 4);
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (i < len) {
+ batch_len = min_t(int, usb->data_len, len - i);
+ memcpy(usb->data, val + i, batch_len);
+ ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | MT_USB_TYPE_VENDOR,
+ (offset + i) >> 16, offset + i,
+ usb->data, batch_len);
+ if (ret < 0)
+ break;
+
+ i += batch_len;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+int mt7921u_mcu_power_on(struct mt7921_dev *dev)
+{
+ int ret;
+
+ ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
+ USB_DIR_OUT | MT_USB_TYPE_VENDOR,
+ 0x0, 0x1, NULL, 0);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
+ MT_TOP_MISC2_FW_PWR_ON, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for power on\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int
+mt7921u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, int *seq)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 pad, ep;
+ int ret;
+
+ ret = mt7921_mcu_fill_message(mdev, skb, cmd, seq);
+ if (ret)
+ return ret;
+
+ if (cmd != MCU_CMD(FW_SCATTER))
+ ep = MT_EP_OUT_INBAND_CMD;
+ else
+ ep = MT_EP_OUT_AC_BE;
+
+ mt7921_skb_add_usb_sdio_hdr(dev, skb, 0);
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+ __skb_put_zero(skb, pad);
+
+ ret = mt76u_bulk_msg(&dev->mt76, skb->data, skb->len, NULL,
+ 1000, ep);
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+static int mt7921u_mcu_init(struct mt7921_dev *dev)
+{
+ static const struct mt76_mcu_ops mcu_ops = {
+ .headroom = MT_SDIO_HDR_SIZE + sizeof(struct mt7921_mcu_txd),
+ .tailroom = MT_USB_TAIL_SIZE,
+ .mcu_skb_send_msg = mt7921u_mcu_send_message,
+ .mcu_parse_response = mt7921_mcu_parse_response,
+ .mcu_restart = mt76_connac_mcu_restart,
+ };
+ int ret;
+
+ dev->mt76.mcu_ops = &mcu_ops;
+
+ mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+ ret = mt7921_run_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+
+ return 0;
+}
+
+static void mt7921u_stop(struct ieee80211_hw *hw)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+ mt76u_stop_tx(&dev->mt76);
+ mt7921_stop(hw);
+}
+
+static void mt7921u_cleanup(struct mt7921_dev *dev)
+{
+ clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+ mt7921u_wfsys_reset(dev);
+ mt7921_mcu_exit(dev);
+ mt76u_queues_deinit(&dev->mt76);
+}
+
+static int mt7921u_probe(struct usb_interface *usb_intf,
+ const struct usb_device_id *id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .txwi_size = MT_SDIO_TXD_SIZE,
+ .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .tx_prepare_skb = mt7921_usb_sdio_tx_prepare_skb,
+ .tx_complete_skb = mt7921_usb_sdio_tx_complete_skb,
+ .tx_status_data = mt7921_usb_sdio_tx_status_data,
+ .rx_skb = mt7921_queue_rx_skb,
+ .sta_ps = mt7921_sta_ps,
+ .sta_add = mt7921_mac_sta_add,
+ .sta_assoc = mt7921_mac_sta_assoc,
+ .sta_remove = mt7921_mac_sta_remove,
+ .update_survey = mt7921_update_channel,
+ };
+ static const struct mt7921_hif_ops hif_ops = {
+ .mcu_init = mt7921u_mcu_init,
+ .init_reset = mt7921u_init_reset,
+ .reset = mt7921u_mac_reset,
+ };
+ static struct mt76_bus_ops bus_ops = {
+ .rr = mt7921u_rr,
+ .wr = mt7921u_wr,
+ .rmw = mt7921u_rmw,
+ .read_copy = mt76u_read_copy,
+ .write_copy = mt7921u_copy,
+ .type = MT76_BUS_USB,
+ };
+ struct usb_device *udev = interface_to_usbdev(usb_intf);
+ struct ieee80211_ops *ops;
+ struct ieee80211_hw *hw;
+ struct mt7921_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ops = devm_kmemdup(&usb_intf->dev, &mt7921_ops, sizeof(mt7921_ops),
+ GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ ops->stop = mt7921u_stop;
+
+ mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), ops, &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7921_dev, mt76);
+ dev->hif_ops = &hif_ops;
+
+ udev = usb_get_dev(udev);
+ usb_reset_device(udev);
+
+ usb_set_intfdata(usb_intf, dev);
+
+ ret = __mt76u_init(mdev, usb_intf, &bus_ops);
+ if (ret < 0)
+ goto error;
+
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ if (mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY)) {
+ ret = mt7921u_wfsys_reset(dev);
+ if (ret)
+ goto error;
+ }
+
+ ret = mt7921u_mcu_power_on(dev);
+ if (ret)
+ goto error;
+
+ ret = mt76u_alloc_mcu_queue(&dev->mt76);
+ if (ret)
+ goto error;
+
+ ret = mt76u_alloc_queues(&dev->mt76);
+ if (ret)
+ goto error;
+
+ ret = mt7921u_dma_init(dev);
+ if (ret)
+ return ret;
+
+ hw = mt76_hw(dev);
+ /* check hw sg support in order to enable AMSDU */
+ hw->max_tx_fragments = mdev->usb.sg_en ? MT_HW_TXP_MAX_BUF_NUM : 1;
+
+ ret = mt7921_register_device(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ mt76u_queues_deinit(&dev->mt76);
+
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+}
+
+static void mt7921u_disconnect(struct usb_interface *usb_intf)
+{
+ struct mt7921_dev *dev = usb_get_intfdata(usb_intf);
+
+ cancel_work_sync(&dev->init_work);
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return;
+
+ mt76_unregister_device(&dev->mt76);
+ mt7921u_cleanup(dev);
+
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ mt76_free_device(&dev->mt76);
+}
+
+MODULE_DEVICE_TABLE(usb, mt7921u_device_table);
+MODULE_FIRMWARE(MT7921_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7921_ROM_PATCH);
+
+static struct usb_driver mt7921u_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt7921u_device_table,
+ .probe = mt7921u_probe,
+ .disconnect = mt7921u_disconnect,
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt7921u_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c
new file mode 100644
index 000000000000..99bcbd858b65
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2022 MediaTek Inc.
+ *
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7921.h"
+#include "mcu.h"
+#include "mac.h"
+
+static u32 mt7921u_uhw_rr(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = ___mt76u_rr(dev, MT_VEND_DEV_MODE,
+ USB_DIR_IN | MT_USB_TYPE_UHW_VENDOR, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void mt7921u_uhw_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ___mt76u_wr(dev, MT_VEND_WRITE,
+ USB_DIR_OUT | MT_USB_TYPE_UHW_VENDOR, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static void mt7921u_dma_prefetch(struct mt7921_dev *dev)
+{
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0),
+ MT_WPDMA0_BASE_PTR_MASK, 0x80);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1),
+ MT_WPDMA0_BASE_PTR_MASK, 0xc0);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2),
+ MT_WPDMA0_BASE_PTR_MASK, 0x100);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3),
+ MT_WPDMA0_BASE_PTR_MASK, 0x140);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4),
+ MT_WPDMA0_BASE_PTR_MASK, 0x180);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16),
+ MT_WPDMA0_BASE_PTR_MASK, 0x280);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17),
+ MT_WPDMA0_BASE_PTR_MASK, 0x2c0);
+}
+
+static void mt7921u_wfdma_init(struct mt7921_dev *dev)
+{
+ mt7921u_dma_prefetch(dev);
+
+ mt76_clear(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_OMIT_RX_INFO);
+ mt76_set(dev, MT_UWFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
+ MT_WFDMA0_GLO_CFG_FW_DWLD_BYPASS_DMASHDL |
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ /* disable dmashdl */
+ mt76_clear(dev, MT_UWFDMA0_GLO_CFG_EXT0,
+ MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
+ mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
+
+ mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
+}
+
+static int mt7921u_dma_rx_evt_ep4(struct mt7921_dev *dev)
+{
+ if (!mt76_poll(dev, MT_UWFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000))
+ return -ETIMEDOUT;
+
+ mt76_clear(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_USB_RXEVT_EP4_EN);
+ mt76_set(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ return 0;
+}
+
+static void mt7921u_epctl_rst_opt(struct mt7921_dev *dev, bool reset)
+{
+ u32 val;
+
+ /* usb endpoint reset opt
+ * bits[4,9]: out blk ep 4-9
+ * bits[20,21]: in blk ep 4-5
+ * bits[22]: in int ep 6
+ */
+ val = mt7921u_uhw_rr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT);
+ if (reset)
+ val |= GENMASK(9, 4) | GENMASK(22, 20);
+ else
+ val &= ~(GENMASK(9, 4) | GENMASK(22, 20));
+ mt7921u_uhw_wr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT, val);
+}
+
+int mt7921u_dma_init(struct mt7921_dev *dev)
+{
+ int err;
+
+ mt7921u_wfdma_init(dev);
+
+ mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH);
+
+ mt76_set(dev, MT_UDMA_WLCFG_0,
+ MT_WL_RX_EN | MT_WL_TX_EN |
+ MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN);
+ mt76_clear(dev, MT_UDMA_WLCFG_0,
+ MT_WL_RX_AGG_TO | MT_WL_RX_AGG_LMT);
+ mt76_clear(dev, MT_UDMA_WLCFG_1, MT_WL_RX_AGG_PKT_LMT);
+
+ err = mt7921u_dma_rx_evt_ep4(dev);
+ if (err)
+ return err;
+
+ mt7921u_epctl_rst_opt(dev, false);
+
+ return 0;
+}
+
+int mt7921u_wfsys_reset(struct mt7921_dev *dev)
+{
+ u32 val;
+ int i;
+
+ mt7921u_epctl_rst_opt(dev, false);
+
+ val = mt7921u_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST);
+ val |= MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH;
+ mt7921u_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val);
+
+ usleep_range(10, 20);
+
+ val = mt7921u_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST);
+ val &= ~MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH;
+ mt7921u_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val);
+
+ mt7921u_uhw_wr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS_SEL, 0);
+ for (i = 0; i < MT7921_WFSYS_INIT_RETRY_COUNT; i++) {
+ val = mt7921u_uhw_rr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS);
+ if (val & MT_UDMA_CONN_WFSYS_INIT_DONE)
+ break;
+
+ msleep(100);
+ }
+
+ if (i == MT7921_WFSYS_INIT_RETRY_COUNT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int mt7921u_init_reset(struct mt7921_dev *dev)
+{
+ set_bit(MT76_RESET, &dev->mphy.state);
+
+ wake_up(&dev->mt76.mcu.wait);
+ mt7921_mcu_exit(dev);
+
+ mt76u_stop_rx(&dev->mt76);
+ mt76u_stop_tx(&dev->mt76);
+
+ mt7921u_wfsys_reset(dev);
+
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ return mt76u_resume_rx(&dev->mt76);
+}
+
+int mt7921u_mac_reset(struct mt7921_dev *dev)
+{
+ int err;
+
+ mt76_txq_schedule_all(&dev->mphy);
+ mt76_worker_disable(&dev->mt76.tx_worker);
+
+ set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+
+ wake_up(&dev->mt76.mcu.wait);
+ mt7921_mcu_exit(dev);
+
+ mt76u_stop_rx(&dev->mt76);
+ mt76u_stop_tx(&dev->mt76);
+
+ mt7921u_wfsys_reset(dev);
+
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ err = mt76u_resume_rx(&dev->mt76);
+ if (err)
+ goto out;
+
+ err = mt7921u_mcu_power_on(dev);
+ if (err)
+ goto out;
+
+ err = mt7921u_dma_init(dev);
+ if (err)
+ goto out;
+
+ mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
+ mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+
+ err = mt7921_run_firmware(dev);
+ if (err)
+ goto out;
+
+ mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+
+ err = mt7921_mcu_set_eeprom(dev);
+ if (err)
+ goto out;
+
+ err = mt7921_mac_init(dev);
+ if (err)
+ goto out;
+
+ err = __mt7921_start(&dev->phy);
+out:
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+
+ return err;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 54f72d215948..def7f325f5c5 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -12,6 +12,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/sched.h>
#include <linux/kthread.h>
@@ -627,6 +629,7 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
const struct mt76_bus_ops *bus_ops)
{
struct mt76_sdio *sdio = &dev->sdio;
+ u32 host_max_cap;
int err;
err = mt76_worker_setup(dev->hw, &sdio->status_worker,
@@ -648,7 +651,16 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
dev->bus = bus_ops;
dev->sdio.func = func;
- return 0;
+ host_max_cap = min_t(u32, func->card->host->max_req_size,
+ func->cur_blksize *
+ func->card->host->max_blk_count);
+ dev->sdio.xmit_buf_sz = min_t(u32, host_max_cap, MT76S_XMIT_BUF_SZ);
+ dev->sdio.xmit_buf = devm_kmalloc(dev->dev, dev->sdio.xmit_buf_sz,
+ GFP_KERNEL);
+ if (!dev->sdio.xmit_buf)
+ err = -ENOMEM;
+
+ return err;
}
EXPORT_SYMBOL_GPL(mt76s_init);
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.h b/drivers/net/wireless/mediatek/mt76/sdio.h
index 99db4ad93b7c..27d5d2077eba 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.h
+++ b/drivers/net/wireless/mediatek/mt76/sdio.h
@@ -65,6 +65,7 @@
#define MCR_H2DSM0R 0x0070
#define H2D_SW_INT_READ BIT(16)
#define H2D_SW_INT_WRITE BIT(17)
+#define H2D_SW_INT_CLEAR_MAILBOX_ACK BIT(22)
#define MCR_H2DSM1R 0x0074
#define MCR_D2HRM0R 0x0078
@@ -109,6 +110,7 @@
#define MCR_H2DSM2R 0x0160 /* supported in CONNAC2 */
#define MCR_H2DSM3R 0x0164 /* supported in CONNAC2 */
#define MCR_D2HRM3R 0x0174 /* supported in CONNAC2 */
+#define D2HRM3R_IS_DRIVER_OWN BIT(0)
#define MCR_WTQCR8 0x0190 /* supported in CONNAC2 */
#define MCR_WTQCR9 0x0194 /* supported in CONNAC2 */
#define MCR_WTQCR10 0x0198 /* supported in CONNAC2 */
diff --git a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
index 801590a0a334..a2601aa9e7b1 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
@@ -102,7 +102,10 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
buf = page_address(page);
+ sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
+ sdio_release_host(sdio->func);
+
if (err < 0) {
dev_err(dev->dev, "sdio read data failed:%d\n", err);
put_page(page);
@@ -115,7 +118,7 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
__le32 *rxd = (__le32 *)buf;
/* parse rxd to get the actual packet length */
- len = FIELD_GET(GENMASK(15, 0), le32_to_cpu(rxd[0]));
+ len = le32_get_bits(rxd[0], GENMASK(15, 0));
e->skb = mt76s_build_rx_skb(buf, len, round_up(len + 4, 4));
if (!e->skb)
break;
@@ -214,7 +217,10 @@ static int __mt76s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
if (len > sdio->func->cur_blksize)
len = roundup(len, sdio->func->cur_blksize);
+ sdio_claim_host(sdio->func);
err = sdio_writesb(sdio->func, MCR_WTDR1, data, len);
+ sdio_release_host(sdio->func);
+
if (err)
dev_err(dev->dev, "sdio write failed: %d\n", err);
@@ -223,12 +229,11 @@ static int __mt76s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
+ int err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
bool mcu = q == dev->q_mcu[MT_MCUQ_WM];
struct mt76_sdio *sdio = &dev->sdio;
u8 pad;
- qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid;
while (q->first != q->head) {
struct mt76_queue_entry *e = &q->entry[q->first];
struct sk_buff *iter;
@@ -249,27 +254,25 @@ static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
}
pad = roundup(e->skb->len, 4) - e->skb->len;
- if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ)
+ if (len + e->skb->len + pad + 4 > dev->sdio.xmit_buf_sz)
break;
if (mt76s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz,
&ple_sz))
break;
- memcpy(sdio->xmit_buf[qid] + len, e->skb->data,
- skb_headlen(e->skb));
+ memcpy(sdio->xmit_buf + len, e->skb->data, skb_headlen(e->skb));
len += skb_headlen(e->skb);
nframes++;
skb_walk_frags(e->skb, iter) {
- memcpy(sdio->xmit_buf[qid] + len, iter->data,
- iter->len);
+ memcpy(sdio->xmit_buf + len, iter->data, iter->len);
len += iter->len;
nframes++;
}
if (unlikely(pad)) {
- memset(sdio->xmit_buf[qid] + len, 0, pad);
+ memset(sdio->xmit_buf + len, 0, pad);
len += pad;
}
next:
@@ -278,8 +281,8 @@ next:
}
if (nframes) {
- memset(sdio->xmit_buf[qid] + len, 0, 4);
- err = __mt76s_xmit_queue(dev, sdio->xmit_buf[qid], len + 4);
+ memset(sdio->xmit_buf + len, 0, 4);
+ err = __mt76s_xmit_queue(dev, sdio->xmit_buf, len + 4);
if (err)
return err;
}
@@ -298,6 +301,7 @@ void mt76s_txrx_worker(struct mt76_sdio *sdio)
/* disable interrupt */
sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
+ sdio_release_host(sdio->func);
do {
nframes = 0;
@@ -327,6 +331,7 @@ void mt76s_txrx_worker(struct mt76_sdio *sdio)
} while (nframes > 0);
/* enable interrupt */
+ sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
sdio_release_host(sdio->func);
}
@@ -341,6 +346,7 @@ void mt76s_sdio_irq(struct sdio_func *func)
test_bit(MT76_MCU_RESET, &dev->phy.state))
return;
+ sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
mt76_worker_schedule(&sdio->txrx_worker);
}
EXPORT_SYMBOL_GPL(mt76s_sdio_irq);
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
index 1a01ad7a4c16..382b45639f26 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/testmode.c
@@ -409,7 +409,6 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt76_dev *dev = phy->dev;
struct mt76_testmode_data *td = &phy->test;
struct nlattr *tb[NUM_MT76_TM_ATTRS];
- bool ext_phy = phy != &dev->phy;
u32 state;
int err;
int i;
@@ -447,8 +446,8 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_LDPC], &td->tx_rate_ldpc, 0, 1) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_STBC], &td->tx_rate_stbc, 0, 1) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_LTF], &td->tx_ltf, 0, 2) ||
- mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA], &td->tx_antenna_mask,
- 1 << (ext_phy * 2), phy->antenna_mask << (ext_phy * 2)) ||
+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA],
+ &td->tx_antenna_mask, 0, 0xff) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_SPE_IDX], &td->tx_spe_idx, 0, 27) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_DUTY_CYCLE],
&td->tx_duty_cycle, 0, 99) ||
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 0a7006c8959b..a85e192c9d59 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -15,9 +15,8 @@ static bool disable_usb_sg;
module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
-static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
- u8 req_type, u16 val, u16 offset,
- void *buf, size_t len)
+int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
+ u16 val, u16 offset, void *buf, size_t len)
{
struct usb_interface *uintf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(uintf);
@@ -45,6 +44,7 @@ static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
req, offset, ret);
return ret;
}
+EXPORT_SYMBOL_GPL(__mt76u_vendor_request);
int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
@@ -62,22 +62,21 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
}
EXPORT_SYMBOL_GPL(mt76u_vendor_request);
-static u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u32 addr)
+u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr)
{
struct mt76_usb *usb = &dev->usb;
u32 data = ~0;
int ret;
- ret = __mt76u_vendor_request(dev, req,
- USB_DIR_IN | USB_TYPE_VENDOR,
- addr >> 16, addr, usb->data,
- sizeof(__le32));
+ ret = __mt76u_vendor_request(dev, req, req_type, addr >> 16,
+ addr, usb->data, sizeof(__le32));
if (ret == sizeof(__le32))
data = get_unaligned_le32(usb->data);
trace_usb_reg_rr(dev, addr, data);
return data;
}
+EXPORT_SYMBOL_GPL(___mt76u_rr);
static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
{
@@ -95,7 +94,8 @@ static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
break;
}
- return ___mt76u_rr(dev, req, addr & ~MT_VEND_TYPE_MASK);
+ return ___mt76u_rr(dev, req, USB_DIR_IN | USB_TYPE_VENDOR,
+ addr & ~MT_VEND_TYPE_MASK);
}
static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
@@ -109,29 +109,17 @@ static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
return ret;
}
-static u32 mt76u_rr_ext(struct mt76_dev *dev, u32 addr)
-{
- u32 ret;
-
- mutex_lock(&dev->usb.usb_ctrl_mtx);
- ret = ___mt76u_rr(dev, MT_VEND_READ_EXT, addr);
- mutex_unlock(&dev->usb.usb_ctrl_mtx);
-
- return ret;
-}
-
-static void ___mt76u_wr(struct mt76_dev *dev, u8 req,
- u32 addr, u32 val)
+void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
+ u32 addr, u32 val)
{
struct mt76_usb *usb = &dev->usb;
put_unaligned_le32(val, usb->data);
- __mt76u_vendor_request(dev, req,
- USB_DIR_OUT | USB_TYPE_VENDOR,
- addr >> 16, addr, usb->data,
- sizeof(__le32));
+ __mt76u_vendor_request(dev, req, req_type, addr >> 16,
+ addr, usb->data, sizeof(__le32));
trace_usb_reg_wr(dev, addr, val);
}
+EXPORT_SYMBOL_GPL(___mt76u_wr);
static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
{
@@ -145,7 +133,8 @@ static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
req = MT_VEND_MULTI_WRITE;
break;
}
- ___mt76u_wr(dev, req, addr & ~MT_VEND_TYPE_MASK, val);
+ ___mt76u_wr(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR,
+ addr & ~MT_VEND_TYPE_MASK, val);
}
static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
@@ -155,13 +144,6 @@ static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
mutex_unlock(&dev->usb.usb_ctrl_mtx);
}
-static void mt76u_wr_ext(struct mt76_dev *dev, u32 addr, u32 val)
-{
- mutex_lock(&dev->usb.usb_ctrl_mtx);
- ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
- mutex_unlock(&dev->usb.usb_ctrl_mtx);
-}
-
static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
u32 mask, u32 val)
{
@@ -173,17 +155,6 @@ static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
return val;
}
-static u32 mt76u_rmw_ext(struct mt76_dev *dev, u32 addr,
- u32 mask, u32 val)
-{
- mutex_lock(&dev->usb.usb_ctrl_mtx);
- val |= ___mt76u_rr(dev, MT_VEND_READ_EXT, addr) & ~mask;
- ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
- mutex_unlock(&dev->usb.usb_ctrl_mtx);
-
- return val;
-}
-
static void mt76u_copy(struct mt76_dev *dev, u32 offset,
const void *data, int len)
{
@@ -216,33 +187,8 @@ static void mt76u_copy(struct mt76_dev *dev, u32 offset,
mutex_unlock(&usb->usb_ctrl_mtx);
}
-static void mt76u_copy_ext(struct mt76_dev *dev, u32 offset,
- const void *data, int len)
-{
- struct mt76_usb *usb = &dev->usb;
- int ret, i = 0, batch_len;
- const u8 *val = data;
-
- len = round_up(len, 4);
- mutex_lock(&usb->usb_ctrl_mtx);
- while (i < len) {
- batch_len = min_t(int, usb->data_len, len - i);
- memcpy(usb->data, val + i, batch_len);
- ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
- USB_DIR_OUT | USB_TYPE_VENDOR,
- (offset + i) >> 16, offset + i,
- usb->data, batch_len);
- if (ret < 0)
- break;
-
- i += batch_len;
- }
- mutex_unlock(&usb->usb_ctrl_mtx);
-}
-
-static void
-mt76u_read_copy_ext(struct mt76_dev *dev, u32 offset,
- void *data, int len)
+void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
+ void *data, int len)
{
struct mt76_usb *usb = &dev->usb;
int i = 0, batch_len, ret;
@@ -264,6 +210,7 @@ mt76u_read_copy_ext(struct mt76_dev *dev, u32 offset,
}
mutex_unlock(&usb->usb_ctrl_mtx);
}
+EXPORT_SYMBOL_GPL(mt76u_read_copy);
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val)
@@ -1112,24 +1059,13 @@ static const struct mt76_queue_ops usb_queue_ops = {
.kick = mt76u_tx_kick,
};
-int mt76u_init(struct mt76_dev *dev,
- struct usb_interface *intf, bool ext)
+int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
+ struct mt76_bus_ops *ops)
{
- static struct mt76_bus_ops mt76u_ops = {
- .read_copy = mt76u_read_copy_ext,
- .wr_rp = mt76u_wr_rp,
- .rd_rp = mt76u_rd_rp,
- .type = MT76_BUS_USB,
- };
struct usb_device *udev = interface_to_usbdev(intf);
struct mt76_usb *usb = &dev->usb;
int err;
- mt76u_ops.rr = ext ? mt76u_rr_ext : mt76u_rr;
- mt76u_ops.wr = ext ? mt76u_wr_ext : mt76u_wr;
- mt76u_ops.rmw = ext ? mt76u_rmw_ext : mt76u_rmw;
- mt76u_ops.write_copy = ext ? mt76u_copy_ext : mt76u_copy;
-
INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
@@ -1141,7 +1077,7 @@ int mt76u_init(struct mt76_dev *dev,
return -ENOMEM;
mutex_init(&usb->usb_ctrl_mtx);
- dev->bus = &mt76u_ops;
+ dev->bus = ops;
dev->queue_ops = &usb_queue_ops;
dev_set_drvdata(&udev->dev, dev);
@@ -1167,6 +1103,23 @@ int mt76u_init(struct mt76_dev *dev,
return 0;
}
+EXPORT_SYMBOL_GPL(__mt76u_init);
+
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
+{
+ static struct mt76_bus_ops bus_ops = {
+ .rr = mt76u_rr,
+ .wr = mt76u_wr,
+ .rmw = mt76u_rmw,
+ .read_copy = mt76u_read_copy,
+ .write_copy = mt76u_copy,
+ .wr_rp = mt76u_wr_rp,
+ .rd_rp = mt76u_rd_rp,
+ .type = MT76_BUS_USB,
+ };
+
+ return __mt76u_init(dev, intf, &bus_ops);
+}
EXPORT_SYMBOL_GPL(mt76u_init);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 2c2ed4b09efd..18420e954402 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -240,7 +240,7 @@ free:
return ret;
}
-static int wilc_bus_remove(struct spi_device *spi)
+static void wilc_bus_remove(struct spi_device *spi)
{
struct wilc *wilc = spi_get_drvdata(spi);
struct wilc_spi *spi_priv = wilc->bus_data;
@@ -248,8 +248,6 @@ static int wilc_bus_remove(struct spi_device *spi)
clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
kfree(spi_priv);
-
- return 0;
}
static const struct of_device_id wilc_of_match[] = {
@@ -727,10 +725,7 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz)
int nbytes;
u8 rsp;
- if (sz <= DATA_PKT_SZ)
- nbytes = sz;
- else
- nbytes = DATA_PKT_SZ;
+ nbytes = min_t(u32, sz, DATA_PKT_SZ);
/*
* Data Response header
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 2987ad9271f6..87e98ab068ed 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -382,6 +382,8 @@ static int ray_config(struct pcmcia_device *link)
goto failed;
local->sram = ioremap(link->resource[2]->start,
resource_size(link->resource[2]));
+ if (!local->sram)
+ goto failed;
/*** Set up 16k window for shared memory (receive buffer) ***************/
link->resource[3]->flags |=
@@ -396,6 +398,8 @@ static int ray_config(struct pcmcia_device *link)
goto failed;
local->rmem = ioremap(link->resource[3]->start,
resource_size(link->resource[3]));
+ if (!local->rmem)
+ goto failed;
/*** Set up window for attribute memory ***********************************/
link->resource[4]->flags |=
@@ -410,6 +414,8 @@ static int ray_config(struct pcmcia_device *link)
goto failed;
local->amem = ioremap(link->resource[4]->start,
resource_size(link->resource[4]));
+ if (!local->amem)
+ goto failed;
dev_dbg(&link->dev, "ray_config sram=%p\n", local->sram);
dev_dbg(&link->dev, "ray_config rmem=%p\n", local->rmem);
diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c
index 7a0355dc6bab..32970ea4b4e7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/cam.c
+++ b/drivers/net/wireless/realtek/rtlwifi/cam.c
@@ -208,7 +208,7 @@ void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index)
u32 ul_command;
u32 ul_content;
- u32 ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ u32 ul_encalgo;
u8 entry_i;
switch (rtlpriv->sec.pairwise_enc_algorithm) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
index 04735da11168..da54e51badd3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
@@ -396,36 +396,6 @@ void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
}
}
-static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw)
-{
- u32 u4b_tmp;
- u8 delay = 5;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
- rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
- rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
- u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
- while (u4b_tmp != 0 && delay > 0) {
- rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
- rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
- rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
- u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
- delay--;
- }
- if (delay == 0) {
- rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
- rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
- rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
- "Switch RF timeout !!!\n");
- return;
- }
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
- rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
-}
-
static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
enum rf_pwrstate rfpwr_state)
{
@@ -519,7 +489,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
jiffies_to_msecs(jiffies -
ppsc->last_awake_jiffies));
ppsc->last_sleep_jiffies = jiffies;
- _rtl92ce_phy_set_rf_sleep(hw);
+ _rtl92c_phy_set_rf_sleep(hw);
break;
}
default:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index b53daf1b29f7..876c14d46c2f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -334,6 +334,7 @@ static const struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x04f2, 0xaff7, rtl92cu_hal_cfg)}, /*Xavi*/
{RTL_USB_DEVICE(0x04f2, 0xaff9, rtl92cu_hal_cfg)}, /*Xavi*/
{RTL_USB_DEVICE(0x04f2, 0xaffa, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x0846, 0x9042, rtl92cu_hal_cfg)}, /*On Netwrks N150MA*/
/****** 8188CUS Slim Combo ********/
{RTL_USB_DEVICE(0x04f2, 0xaff8, rtl92cu_hal_cfg)}, /*Xavi*/
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index f6bff0ebd6b0..f3fe16798c59 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -872,7 +872,7 @@ static void rtl8821ae_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
else
falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail;
- /*reset OFDM FA coutner*/
+ /*reset OFDM FA counter*/
rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 1);
rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 0);
/* reset CCK FA counter*/
@@ -1464,7 +1464,7 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
const u8 *delta_swing_table_idx_tup_b;
const u8 *delta_swing_table_idx_tdown_b;
- /*2. Initilization ( 7 steps in total )*/
+ /*2. Initialization ( 7 steps in total )*/
rtl8812ae_get_delta_swing_table(hw,
&delta_swing_table_idx_tup_a,
&delta_swing_table_idx_tdown_a,
@@ -2502,7 +2502,7 @@ static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
rtlpriv->dm.dbginfo.num_non_be_pkt = 0;
/*===============================
- * list paramter for different platform
+ * list parameter for different platform
*===============================
*/
pb_is_cur_rdl_state = &rtlpriv->dm.is_cur_rdlstate;
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index 2551e228b581..cac053f485c3 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -211,6 +211,10 @@ static void rtw_coex_wl_ccklock_detect(struct rtw_dev *rtwdev)
bool is_cck_lock_rate = false;
+ if (coex_stat->wl_coex_mode != COEX_WLINK_2G1PORT &&
+ coex_stat->wl_coex_mode != COEX_WLINK_2GFREE)
+ return;
+
if (coex_dm->bt_status == COEX_BTSTATUS_INQ_PAGE ||
coex_stat->bt_setup_link) {
coex_stat->wl_cck_lock = false;
@@ -460,6 +464,29 @@ static void rtw_coex_gnt_workaround(struct rtw_dev *rtwdev, bool force, u8 mode)
rtw_coex_set_gnt_fix(rtwdev);
}
+static void rtw_coex_monitor_bt_ctr(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u32 tmp;
+
+ tmp = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS);
+ coex_stat->hi_pri_tx = FIELD_GET(MASKLWORD, tmp);
+ coex_stat->hi_pri_rx = FIELD_GET(MASKHWORD, tmp);
+
+ tmp = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS_1);
+ coex_stat->lo_pri_tx = FIELD_GET(MASKLWORD, tmp);
+ coex_stat->lo_pri_rx = FIELD_GET(MASKHWORD, tmp);
+
+ rtw_write8(rtwdev, REG_BT_COEX_ENH_INTR_CTRL,
+ BIT_R_GRANTALL_WLMASK | BIT_STATIS_BT_EN);
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX,
+ "[BTCoex], Hi-Pri Rx/Tx: %d/%d, Lo-Pri Rx/Tx: %d/%d\n",
+ coex_stat->hi_pri_rx, coex_stat->hi_pri_tx,
+ coex_stat->lo_pri_rx, coex_stat->lo_pri_tx);
+}
+
static void rtw_coex_monitor_bt_enable(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -780,7 +807,9 @@ static void rtw_coex_update_bt_link_info(struct rtw_dev *rtwdev)
static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type)
{
struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_coex_dm *coex_dm = &rtwdev->coex.dm;
+ struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
u8 link = 0;
u8 center_chan = 0;
u8 bw;
@@ -791,7 +820,9 @@ static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type)
if (type != COEX_MEDIA_DISCONNECT)
center_chan = rtwdev->hal.current_channel;
- if (center_chan == 0) {
+ if (center_chan == 0 ||
+ (efuse->share_ant && center_chan <= 14 &&
+ coex_stat->wl_coex_mode != COEX_WLINK_2GFREE)) {
link = 0;
center_chan = 0;
bw = 0;
@@ -930,6 +961,23 @@ static void rtw_coex_set_gnt_wl(struct rtw_dev *rtwdev, u8 state)
rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, 0x0300, state);
}
+static void rtw_coex_mimo_ps(struct rtw_dev *rtwdev, bool force, bool state)
+{
+ struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
+
+ if (!force && state == coex_stat->wl_mimo_ps)
+ return;
+
+ coex_stat->wl_mimo_ps = state;
+
+ rtw_set_txrx_1ss(rtwdev, state);
+
+ rtw_coex_update_wl_ch_info(rtwdev, (u8)coex_stat->wl_connected);
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX,
+ "[BTCoex], %s(): state = %d\n", __func__, state);
+}
+
static void rtw_btc_wltoggle_table_a(struct rtw_dev *rtwdev, bool force,
u8 table_case)
{
@@ -1106,7 +1154,8 @@ static void rtw_coex_set_tdma(struct rtw_dev *rtwdev, u8 byte1, u8 byte2,
ps_type = COEX_PS_WIFI_NATIVE;
rtw_coex_power_save_state(rtwdev, ps_type, 0x0, 0x0);
- } else if (byte1 & BIT(4) && !(byte1 & BIT(5))) {
+ } else if ((byte1 & BIT(4) && !(byte1 & BIT(5))) ||
+ coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
rtw_dbg(rtwdev, RTW_DBG_COEX,
"[BTCoex], %s(): Force LPS (byte1 = 0x%x)\n", __func__,
byte1);
@@ -1802,6 +1851,54 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
rtw_coex_tdma(rtwdev, false, tdma_case | slot_type);
}
+static void rtw_coex_action_bt_game_hid(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+
+ if (efuse->share_ant) {
+ coex_stat->wl_coex_mode = COEX_WLINK_2GFREE;
+ if (coex_stat->bt_whck_test)
+ table_case = 2;
+ else if (coex_stat->wl_linkscan_proc || coex_stat->bt_hid_exist)
+ table_case = 33;
+ else if (coex_stat->bt_setup_link || coex_stat->bt_inq_page)
+ table_case = 0;
+ else if (coex_stat->bt_a2dp_exist)
+ table_case = 34;
+ else
+ table_case = 33;
+
+ tdma_case = 0;
+ } else {
+ if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1]))
+ tdma_case = 112;
+ else
+ tdma_case = 113;
+
+ table_case = 121;
+ }
+
+ if (coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
+ if (coex_stat->wl_tput_dir == COEX_WL_TPUT_TX)
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_tx[6]);
+ else
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[5]);
+ } else {
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ }
+
+ rtw_coex_table(rtwdev, false, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
{
struct rtw_coex *coex = &rtwdev->coex;
@@ -1816,13 +1913,8 @@ static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
if (efuse->share_ant) {
/* Shared-Ant */
- if (coex_stat->bt_multi_link) {
- table_case = 10;
- tdma_case = 17;
- } else {
- table_case = 10;
- tdma_case = 5;
- }
+ table_case = 10;
+ tdma_case = 5;
} else {
/* Non-Shared-Ant */
if (coex_stat->bt_multi_link) {
@@ -2224,8 +2316,10 @@ static void rtw_coex_action_bt_a2dp_pan_hid(struct rtw_dev *rtwdev)
static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev)
{
+ struct rtw_coex *coex = &rtwdev->coex;
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2235,6 +2329,9 @@ static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev)
rtw_coex_write_scbd(rtwdev, COEX_SCBD_FIX2M, false);
+ if (coex_stat->bt_game_hid_exist && coex_stat->wl_linkscan_proc)
+ coex_stat->wl_coex_mode = COEX_WLINK_2GFREE;
+
if (efuse->share_ant) {
/* Shared-Ant */
table_case = 0;
@@ -2278,6 +2375,7 @@ static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
u8 table_case, tdma_case;
if (coex->under_5g)
@@ -2286,7 +2384,6 @@ static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
- rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
if (efuse->share_ant) {
/* Shared-Ant */
@@ -2298,6 +2395,16 @@ static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
tdma_case = 100;
}
+ if (coex_stat->bt_game_hid_exist) {
+ coex_stat->wl_coex_mode = COEX_WLINK_2GFREE;
+ if (coex_stat->wl_tput_dir == COEX_WL_TPUT_TX)
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_tx[6]);
+ else
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[5]);
+ } else {
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ }
+
rtw_coex_table(rtwdev, false, table_case);
rtw_coex_tdma(rtwdev, false, tdma_case);
}
@@ -2422,6 +2529,7 @@ static void rtw_coex_action_wl_connected(struct rtw_dev *rtwdev)
static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
{
struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_coex_stat *coex_stat = &coex->stat;
bool rf4ce_en = false;
@@ -2494,6 +2602,11 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
goto exit;
}
+ if (coex_stat->bt_game_hid_exist && coex_stat->wl_connected) {
+ rtw_coex_action_bt_game_hid(rtwdev);
+ goto exit;
+ }
+
if (coex_stat->bt_whck_test) {
rtw_coex_action_bt_whql_test(rtwdev);
goto exit;
@@ -2530,6 +2643,18 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
}
exit:
+
+ if (chip->wl_mimo_ps_support) {
+ if (coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
+ if (coex_dm->reason == COEX_RSN_2GMEDIA)
+ rtw_coex_mimo_ps(rtwdev, true, true);
+ else
+ rtw_coex_mimo_ps(rtwdev, false, true);
+ } else {
+ rtw_coex_mimo_ps(rtwdev, false, false);
+ }
+ }
+
rtw_coex_gnt_workaround(rtwdev, false, coex_stat->wl_coex_mode);
rtw_coex_limited_wl(rtwdev);
}
@@ -3139,6 +3264,135 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
rtw_coex_run_coex(rtwdev, COEX_RSN_BTINFO);
}
+#define COEX_BT_HIDINFO_MTK 0x46
+static const u8 coex_bt_hidinfo_ps[] = {0x57, 0x69, 0x72};
+static const u8 coex_bt_hidinfo_xb[] = {0x58, 0x62, 0x6f};
+
+void rtw_coex_bt_hid_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_hid *hidinfo;
+ struct rtw_coex_hid_info_a *hida;
+ struct rtw_coex_hid_handle_list *hl, *bhl;
+ u8 sub_id = buf[2], gamehid_cnt = 0, handle, i;
+ bool cur_game_hid_exist, complete;
+
+ if (!chip->wl_mimo_ps_support &&
+ (sub_id == COEX_BT_HIDINFO_LIST || sub_id == COEX_BT_HIDINFO_A))
+ return;
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX,
+ "[BTCoex], HID info notify, sub_id = 0x%x\n", sub_id);
+
+ switch (sub_id) {
+ case COEX_BT_HIDINFO_LIST:
+ hl = &coex_stat->hid_handle_list;
+ bhl = (struct rtw_coex_hid_handle_list *)buf;
+ if (!memcmp(hl, bhl, sizeof(*hl)))
+ return;
+ coex_stat->hid_handle_list = *bhl;
+ memset(&coex_stat->hid_info, 0, sizeof(coex_stat->hid_info));
+ for (i = 0; i < COEX_BT_HIDINFO_HANDLE_NUM; i++) {
+ hidinfo = &coex_stat->hid_info[i];
+ if (hl->handle[i] != COEX_BT_HIDINFO_NOTCON &&
+ hl->handle[i] != 0)
+ hidinfo->hid_handle = hl->handle[i];
+ }
+ break;
+ case COEX_BT_HIDINFO_A:
+ hida = (struct rtw_coex_hid_info_a *)buf;
+ handle = hida->handle;
+ for (i = 0; i < COEX_BT_HIDINFO_HANDLE_NUM; i++) {
+ hidinfo = &coex_stat->hid_info[i];
+ if (hidinfo->hid_handle == handle) {
+ hidinfo->hid_vendor = hida->vendor;
+ memcpy(hidinfo->hid_name, hida->name,
+ sizeof(hidinfo->hid_name));
+ hidinfo->hid_info_completed = true;
+ break;
+ }
+ }
+ break;
+ }
+ for (i = 0; i < COEX_BT_HIDINFO_HANDLE_NUM; i++) {
+ hidinfo = &coex_stat->hid_info[i];
+ complete = hidinfo->hid_info_completed;
+ handle = hidinfo->hid_handle;
+ if (!complete || handle == COEX_BT_HIDINFO_NOTCON ||
+ handle == 0 || handle >= COEX_BT_BLE_HANDLE_THRS) {
+ hidinfo->is_game_hid = false;
+ continue;
+ }
+
+ if (hidinfo->hid_vendor == COEX_BT_HIDINFO_MTK) {
+ if ((memcmp(hidinfo->hid_name,
+ coex_bt_hidinfo_ps,
+ COEX_BT_HIDINFO_NAME)) == 0)
+ hidinfo->is_game_hid = true;
+ else if ((memcmp(hidinfo->hid_name,
+ coex_bt_hidinfo_xb,
+ COEX_BT_HIDINFO_NAME)) == 0)
+ hidinfo->is_game_hid = true;
+ else
+ hidinfo->is_game_hid = false;
+ } else {
+ hidinfo->is_game_hid = false;
+ }
+ if (hidinfo->is_game_hid)
+ gamehid_cnt++;
+ }
+
+ if (gamehid_cnt > 0)
+ cur_game_hid_exist = true;
+ else
+ cur_game_hid_exist = false;
+
+ if (cur_game_hid_exist != coex_stat->bt_game_hid_exist) {
+ coex_stat->bt_game_hid_exist = cur_game_hid_exist;
+ rtw_dbg(rtwdev, RTW_DBG_COEX,
+ "[BTCoex], HID info changed!bt_game_hid_exist = %d!\n",
+ coex_stat->bt_game_hid_exist);
+ rtw_coex_run_coex(rtwdev, COEX_RSN_BTSTATUS);
+ }
+}
+
+void rtw_coex_query_bt_hid_list(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_hid *hidinfo;
+ u8 i, handle;
+ bool complete;
+
+ if (!chip->wl_mimo_ps_support || coex_stat->wl_under_ips ||
+ (coex_stat->wl_under_lps && !coex_stat->wl_force_lps_ctrl))
+ return;
+
+ if (!coex_stat->bt_hid_exist &&
+ !((coex_stat->bt_info_lb2 & COEX_INFO_CONNECTION) &&
+ (coex_stat->hi_pri_tx + coex_stat->hi_pri_rx >
+ COEX_BT_GAMEHID_CNT)))
+ return;
+
+ rtw_fw_coex_query_hid_info(rtwdev, COEX_BT_HIDINFO_LIST, 0);
+
+ for (i = 0; i < COEX_BT_HIDINFO_HANDLE_NUM; i++) {
+ hidinfo = &coex_stat->hid_info[i];
+ complete = hidinfo->hid_info_completed;
+ handle = hidinfo->hid_handle;
+ if (handle == 0 || handle == COEX_BT_HIDINFO_NOTCON ||
+ handle >= COEX_BT_BLE_HANDLE_THRS || complete)
+ continue;
+
+ rtw_fw_coex_query_hid_info(rtwdev,
+ COEX_BT_HIDINFO_A,
+ handle);
+ }
+}
+
void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
{
struct rtw_coex *coex = &rtwdev->coex;
@@ -3175,6 +3429,17 @@ void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev, u32 type)
rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS);
}
+void rtw_coex_wl_status_check(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
+
+ if ((coex_stat->wl_under_lps && !coex_stat->wl_force_lps_ctrl) ||
+ coex_stat->wl_under_ips)
+ return;
+
+ rtw_coex_monitor_bt_ctr(rtwdev);
+}
+
void rtw_coex_bt_relink_work(struct work_struct *work)
{
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
@@ -3637,6 +3902,7 @@ static const char *rtw_coex_get_wl_coex_mode(u8 coex_wl_link_mode)
switch (coex_wl_link_mode) {
case_WLINK(2G1PORT);
case_WLINK(5G);
+ case_WLINK(2GFREE);
default:
return "Unknown";
}
@@ -3658,7 +3924,6 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
u16 score_board_WB, score_board_BW;
u32 wl_reg_6c0, wl_reg_6c4, wl_reg_6c8, wl_reg_778, wl_reg_6cc;
u32 lte_coex, bt_coex;
- u32 bt_hi_pri, bt_lo_pri;
int i;
score_board_BW = rtw_coex_read_scbd(rtwdev);
@@ -3669,17 +3934,6 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
wl_reg_6cc = rtw_read32(rtwdev, REG_BT_COEX_TABLE_H);
wl_reg_778 = rtw_read8(rtwdev, REG_BT_STAT_CTRL);
- bt_hi_pri = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS);
- bt_lo_pri = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS_1);
- rtw_write8(rtwdev, REG_BT_COEX_ENH_INTR_CTRL,
- BIT_R_GRANTALL_WLMASK | BIT_STATIS_BT_EN);
-
- coex_stat->hi_pri_tx = FIELD_GET(MASKLWORD, bt_hi_pri);
- coex_stat->hi_pri_rx = FIELD_GET(MASKHWORD, bt_hi_pri);
-
- coex_stat->lo_pri_tx = FIELD_GET(MASKLWORD, bt_lo_pri);
- coex_stat->lo_pri_rx = FIELD_GET(MASKHWORD, bt_lo_pri);
-
sys_lte = rtw_read8(rtwdev, 0x73);
lte_coex = rtw_coex_read_indirect_reg(rtwdev, 0x38);
bt_coex = rtw_coex_read_indirect_reg(rtwdev, 0x54);
diff --git a/drivers/net/wireless/realtek/rtw88/coex.h b/drivers/net/wireless/realtek/rtw88/coex.h
index fc61a0cab3e4..07fa7aa34d4b 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.h
+++ b/drivers/net/wireless/realtek/rtw88/coex.h
@@ -11,6 +11,7 @@
#define COEX_MIN_DELAY 10 /* delay unit in ms */
#define COEX_RFK_TIMEOUT 600 /* RFK timeout in ms */
+#define COEX_BT_GAMEHID_CNT 800
#define COEX_RF_OFF 0x0
#define COEX_RF_ON 0x1
@@ -172,6 +173,7 @@ enum coex_bt_profile {
enum coex_wl_link_mode {
COEX_WLINK_2G1PORT = 0x0,
COEX_WLINK_5G = 0x3,
+ COEX_WLINK_2GFREE = 0x7,
COEX_WLINK_MAX
};
@@ -401,9 +403,12 @@ void rtw_coex_scan_notify(struct rtw_dev *rtwdev, u8 type);
void rtw_coex_connect_notify(struct rtw_dev *rtwdev, u8 type);
void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 type);
void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length);
+void rtw_coex_bt_hid_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length);
void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length);
void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type);
void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev, u32 type);
+void rtw_coex_wl_status_check(struct rtw_dev *rtwdev);
+void rtw_coex_query_bt_hid_list(struct rtw_dev *rtwdev);
void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m);
static inline bool rtw_coex_disabled(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index e429428232c1..1a52ff585fbc 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -390,7 +390,7 @@ static ssize_t rtw_debugfs_set_h2c(struct file *filp,
&param[0], &param[1], &param[2], &param[3],
&param[4], &param[5], &param[6], &param[7]);
if (num != 8) {
- rtw_info(rtwdev, "invalid H2C command format for debug\n");
+ rtw_warn(rtwdev, "invalid H2C command format for debug\n");
return -EINVAL;
}
@@ -715,8 +715,10 @@ static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v)
seq_printf(m, "Current CH(fc) = %u\n", rtwdev->hal.current_channel);
seq_printf(m, "Current BW = %u\n", rtwdev->hal.current_band_width);
seq_printf(m, "Current IGI = 0x%x\n", dm_info->igi_history[0]);
- seq_printf(m, "TP {Tx, Rx} = {%u, %u}Mbps\n\n",
+ seq_printf(m, "TP {Tx, Rx} = {%u, %u}Mbps\n",
stats->tx_throughput, stats->rx_throughput);
+ seq_printf(m, "1SS for TX and RX = %c\n\n", rtwdev->hal.txrx_1ss ?
+ 'Y' : 'N');
seq_puts(m, "==========[Tx Phy Info]========\n");
seq_puts(m, "[Tx Rate] = ");
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index 61f8369fe2d6..066792dd96af 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -23,6 +23,7 @@ enum rtw_debug_mask {
RTW_DBG_PATH_DIV = 0x00004000,
RTW_DBG_ADAPTIVITY = 0x00008000,
RTW_DBG_HW_SCAN = 0x00010000,
+ RTW_DBG_STATE = 0x00020000,
RTW_DBG_ALL = 0xffffffff
};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 2f7c036f9022..aa2aeb5fb2cc 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -233,6 +233,9 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
case C2H_BT_INFO:
rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
break;
+ case C2H_BT_HID_INFO:
+ rtw_coex_bt_hid_info_notify(rtwdev, c2h->payload, len);
+ break;
case C2H_WLAN_INFO:
rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len);
break;
@@ -538,6 +541,18 @@ void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
+void rtw_fw_coex_query_hid_info(struct rtw_dev *rtwdev, u8 sub_id, u8 data)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_HID_INFO);
+
+ SET_COEX_QUERY_HID_INFO_SUBID(h2c_pkt, sub_id);
+ SET_COEX_QUERY_HID_INFO_DATA1(h2c_pkt, data);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
@@ -1784,9 +1799,9 @@ void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
-static void rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
- struct sk_buff_head *list,
- struct rtw_vif *rtwvif)
+static int rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
+ struct sk_buff_head *list, u8 *bands,
+ struct rtw_vif *rtwvif)
{
struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
struct rtw_chip_info *chip = rtwdev->chip;
@@ -1797,19 +1812,24 @@ static void rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
if (!(BIT(idx) & chip->band))
continue;
new = skb_copy(skb, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
skb_put_data(new, ies->ies[idx], ies->len[idx]);
skb_put_data(new, ies->common_ies, ies->common_ie_len);
skb_queue_tail(list, new);
+ (*bands)++;
}
+
+ return 0;
}
-static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_ssids,
+static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_probes,
struct sk_buff_head *probe_req_list)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *skb, *tmp;
u8 page_offset = 1, *buf, page_size = chip->page_size;
- u8 pages = page_offset + num_ssids * RTW_PROBE_PG_CNT;
+ u8 pages = page_offset + num_probes * RTW_PROBE_PG_CNT;
u16 pg_addr = rtwdev->fifo.rsvd_h2c_info_addr, loc;
u16 buf_offset = page_size * page_offset;
u8 tx_desc_sz = chip->tx_pkt_desc_sz;
@@ -1848,6 +1868,8 @@ static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_ssids,
rtwdev->scan_info.probe_pg_size = page_offset;
out:
kfree(buf);
+ skb_queue_walk_safe(probe_req_list, skb, tmp)
+ kfree_skb(skb);
return ret;
}
@@ -1857,8 +1879,9 @@ static int rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev,
{
struct cfg80211_scan_request *req = rtwvif->scan_req;
struct sk_buff_head list;
- struct sk_buff *skb;
- u8 num = req->n_ssids, i;
+ struct sk_buff *skb, *tmp;
+ u8 num = req->n_ssids, i, bands = 0;
+ int ret;
skb_queue_head_init(&list);
for (i = 0; i < num; i++) {
@@ -1866,11 +1889,25 @@ static int rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev,
req->ssids[i].ssid,
req->ssids[i].ssid_len,
req->ie_len);
- rtw_append_probe_req_ie(rtwdev, skb, &list, rtwvif);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = rtw_append_probe_req_ie(rtwdev, skb, &list, &bands,
+ rtwvif);
+ if (ret)
+ goto out;
+
kfree_skb(skb);
}
- return _rtw_hw_scan_update_probe_req(rtwdev, num, &list);
+ return _rtw_hw_scan_update_probe_req(rtwdev, num * bands, &list);
+
+out:
+ skb_queue_walk_safe(&list, skb, tmp)
+ kfree_skb(skb);
+
+ return ret;
}
static int rtw_add_chan_info(struct rtw_dev *rtwdev, struct rtw_chan_info *info,
@@ -2022,7 +2059,7 @@ void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtwdev->hal.rcr |= BIT_CBSSID_BCN;
rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
- rtw_core_scan_complete(rtwdev, vif);
+ rtw_core_scan_complete(rtwdev, vif, true);
ieee80211_wake_queues(rtwdev->hw);
ieee80211_scan_completed(rtwdev->hw, &info);
@@ -2109,7 +2146,7 @@ void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb)
rtw_hw_scan_complete(rtwdev, vif, aborted);
if (aborted)
- rtw_info(rtwdev, "HW scan aborted with code: %d\n", rc);
+ rtw_dbg(rtwdev, RTW_DBG_HW_SCAN, "HW scan aborted with code: %d\n", rc);
}
void rtw_store_op_chan(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 654c3c2e5721..b59d2cbad5d7 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -47,6 +47,7 @@ enum rtw_c2h_cmd_id {
C2H_CCX_TX_RPT = 0x03,
C2H_BT_INFO = 0x09,
C2H_BT_MP_INFO = 0x0b,
+ C2H_BT_HID_INFO = 0x45,
C2H_RA_RPT = 0x0c,
C2H_HW_FEATURE_REPORT = 0x19,
C2H_WLAN_INFO = 0x27,
@@ -529,6 +530,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_QUERY_BT_MP_INFO 0x67
#define H2C_CMD_BT_WIFI_CONTROL 0x69
#define H2C_CMD_WIFI_CALIBRATION 0x6d
+#define H2C_CMD_QUERY_BT_HID_INFO 0x73
#define H2C_CMD_KEEP_ALIVE 0x03
#define H2C_CMD_DISCONNECT_DECISION 0x04
@@ -681,6 +683,11 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
+#define SET_COEX_QUERY_HID_INFO_SUBID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_COEX_QUERY_HID_INFO_DATA1(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+
#define SET_KEEP_ALIVE_ENABLE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
#define SET_KEEP_ALIVE_ADOPT(h2c_pkt, value) \
@@ -780,6 +787,8 @@ void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl);
void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable);
void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
u8 para1, u8 para2, u8 para3, u8 para4, u8 para5);
+void rtw_fw_coex_query_hid_info(struct rtw_dev *rtwdev, u8 sub_id, u8 data);
+
void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data);
void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index ae7d97de5fdf..5cdc54c9a9aa 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -72,6 +72,9 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
struct rtw_dev *rtwdev = hw->priv;
int ret = 0;
+ /* let previous ips work finish to ensure we don't leave ips twice */
+ cancel_work_sync(&rtwdev->ips_work);
+
mutex_lock(&rtwdev->mutex);
rtw_leave_lps_deep(rtwdev);
@@ -205,7 +208,7 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
- rtw_info(rtwdev, "start vif %pM on port %d\n", vif->addr, rtwvif->port);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "start vif %pM on port %d\n", vif->addr, rtwvif->port);
return 0;
}
@@ -216,7 +219,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
u32 config = 0;
- rtw_info(rtwdev, "stop vif %pM on port %d\n", vif->addr, rtwvif->port);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "stop vif %pM on port %d\n", vif->addr, rtwvif->port);
mutex_lock(&rtwdev->mutex);
@@ -242,8 +245,8 @@ static int rtw_ops_change_interface(struct ieee80211_hw *hw,
{
struct rtw_dev *rtwdev = hw->priv;
- rtw_info(rtwdev, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n",
- vif->addr, vif->type, type, vif->p2p, p2p);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n",
+ vif->addr, vif->type, type, vif->p2p, p2p);
rtw_ops_remove_interface(hw, vif);
@@ -614,7 +617,7 @@ static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
struct rtw_dev *rtwdev = hw->priv;
mutex_lock(&rtwdev->mutex);
- rtw_core_scan_complete(rtwdev, vif);
+ rtw_core_scan_complete(rtwdev, vif, false);
mutex_unlock(&rtwdev->mutex);
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 38252113c4a8..8b9899e41b0b 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -207,6 +207,9 @@ static void rtw_watch_dog_work(struct work_struct *work)
else
clear_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
+ rtw_coex_wl_status_check(rtwdev);
+ rtw_coex_query_bt_hid_list(rtwdev);
+
if (busy_traffic != test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags))
rtw_coex_wl_status_change_notify(rtwdev, 0);
@@ -272,6 +275,15 @@ static void rtw_c2h_work(struct work_struct *work)
}
}
+static void rtw_ips_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, ips_work);
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_enter_ips(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+}
+
static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
{
unsigned long mac_id;
@@ -305,8 +317,8 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
rtwdev->sta_cnt++;
rtwdev->beacon_loss = false;
- rtw_info(rtwdev, "sta %pM joined with macid %d\n",
- sta->addr, si->mac_id);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "sta %pM joined with macid %d\n",
+ sta->addr, si->mac_id);
return 0;
}
@@ -327,8 +339,8 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
kfree(si->mask);
rtwdev->sta_cnt--;
- rtw_info(rtwdev, "sta %pM with macid %d left\n",
- sta->addr, si->mac_id);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "sta %pM with macid %d left\n",
+ sta->addr, si->mac_id);
}
struct rtw_fwcd_hdr {
@@ -1011,37 +1023,52 @@ static u8 get_rate_id(u8 wireless_set, enum rtw_bandwidth bw_mode, u8 tx_num)
#define RA_MASK_VHT_RATES (RA_MASK_VHT_RATES_1SS | \
RA_MASK_VHT_RATES_2SS | \
RA_MASK_VHT_RATES_3SS)
+#define RA_MASK_CCK_IN_BG 0x00005
#define RA_MASK_CCK_IN_HT 0x00005
#define RA_MASK_CCK_IN_VHT 0x00005
#define RA_MASK_OFDM_IN_VHT 0x00010
#define RA_MASK_OFDM_IN_HT_2G 0x00010
#define RA_MASK_OFDM_IN_HT_5G 0x00030
-static u64 rtw_update_rate_mask(struct rtw_dev *rtwdev,
- struct rtw_sta_info *si,
- u64 ra_mask, bool is_vht_enable,
- u8 wireless_set)
+static u64 rtw_rate_mask_rssi(struct rtw_sta_info *si, u8 wireless_set)
+{
+ u8 rssi_level = si->rssi_level;
+
+ if (wireless_set == WIRELESS_CCK)
+ return 0xffffffffffffffffULL;
+
+ if (rssi_level == 0)
+ return 0xffffffffffffffffULL;
+ else if (rssi_level == 1)
+ return 0xfffffffffffffff0ULL;
+ else if (rssi_level == 2)
+ return 0xffffffffffffefe0ULL;
+ else if (rssi_level == 3)
+ return 0xffffffffffffcfc0ULL;
+ else if (rssi_level == 4)
+ return 0xffffffffffff8f80ULL;
+ else
+ return 0xffffffffffff0f00ULL;
+}
+
+static u64 rtw_rate_mask_recover(u64 ra_mask, u64 ra_mask_bak)
+{
+ if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0)
+ ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
+
+ if (ra_mask == 0)
+ ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
+
+ return ra_mask;
+}
+
+static u64 rtw_rate_mask_cfg(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
+ u64 ra_mask, bool is_vht_enable)
{
struct rtw_hal *hal = &rtwdev->hal;
const struct cfg80211_bitrate_mask *mask = si->mask;
u64 cfg_mask = GENMASK_ULL(63, 0);
- u8 rssi_level, band;
-
- if (wireless_set != WIRELESS_CCK) {
- rssi_level = si->rssi_level;
- if (rssi_level == 0)
- ra_mask &= 0xffffffffffffffffULL;
- else if (rssi_level == 1)
- ra_mask &= 0xfffffffffffffff0ULL;
- else if (rssi_level == 2)
- ra_mask &= 0xffffffffffffefe0ULL;
- else if (rssi_level == 3)
- ra_mask &= 0xffffffffffffcfc0ULL;
- else if (rssi_level == 4)
- ra_mask &= 0xffffffffffff8f80ULL;
- else if (rssi_level >= 5)
- ra_mask &= 0xffffffffffff0f00ULL;
- }
+ u8 band;
if (!si->use_cfg_mask)
return ra_mask;
@@ -1091,6 +1118,7 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
u8 ldpc_en = 0;
u8 tx_num = 1;
u64 ra_mask = 0;
+ u64 ra_mask_bak = 0;
bool is_vht_enable = false;
bool is_support_sgi = false;
@@ -1110,11 +1138,12 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
ldpc_en = HT_LDPC_EN;
}
- if (efuse->hw_cap.nss == 1)
+ if (efuse->hw_cap.nss == 1 || rtwdev->hal.txrx_1ss)
ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS;
if (hal->current_band_type == RTW_BAND_5G) {
ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
+ ra_mask_bak = ra_mask;
if (sta->vht_cap.vht_supported) {
ra_mask &= RA_MASK_VHT_RATES | RA_MASK_OFDM_IN_VHT;
wireless_set = WIRELESS_OFDM | WIRELESS_VHT;
@@ -1127,6 +1156,7 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
dm_info->rrsr_val_init = RRSR_INIT_5G;
} else if (hal->current_band_type == RTW_BAND_2G) {
ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ];
+ ra_mask_bak = ra_mask;
if (sta->vht_cap.vht_supported) {
ra_mask &= RA_MASK_VHT_RATES | RA_MASK_CCK_IN_VHT |
RA_MASK_OFDM_IN_VHT;
@@ -1140,11 +1170,13 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
} else if (sta->supp_rates[0] <= 0xf) {
wireless_set = WIRELESS_CCK;
} else {
+ ra_mask &= RA_MASK_OFDM_RATES | RA_MASK_CCK_IN_BG;
wireless_set = WIRELESS_CCK | WIRELESS_OFDM;
}
dm_info->rrsr_val_init = RRSR_INIT_2G;
} else {
rtw_err(rtwdev, "Unknown band type\n");
+ ra_mask_bak = ra_mask;
wireless_set = 0;
}
@@ -1176,8 +1208,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
rate_id = get_rate_id(wireless_set, bw_mode, tx_num);
- ra_mask = rtw_update_rate_mask(rtwdev, si, ra_mask, is_vht_enable,
- wireless_set);
+ ra_mask &= rtw_rate_mask_rssi(si, wireless_set);
+ ra_mask = rtw_rate_mask_recover(ra_mask, ra_mask_bak);
+ ra_mask = rtw_rate_mask_cfg(rtwdev, si, ra_mask, is_vht_enable);
si->bw_mode = bw_mode;
si->stbc_en = stbc_en;
@@ -1339,7 +1372,8 @@ void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
set_bit(RTW_FLAG_SCANNING, rtwdev->flags);
}
-void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
+void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ bool hw_scan)
{
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
u32 config = 0;
@@ -1354,6 +1388,9 @@ void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
rtw_vif_port_config(rtwdev, rtwvif, config);
rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH);
+
+ if (rtwvif->net_type == RTW_NET_NO_LINK && hw_scan)
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work);
}
int rtw_core_start(struct rtw_dev *rtwdev)
@@ -1536,6 +1573,37 @@ static void rtw_unset_supported_band(struct ieee80211_hw *hw,
kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
}
+static void rtw_vif_smps_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = (struct rtw_dev *)data;
+
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+ return;
+
+ if (rtwdev->hal.txrx_1ss)
+ ieee80211_request_smps(vif, IEEE80211_SMPS_STATIC);
+ else
+ ieee80211_request_smps(vif, IEEE80211_SMPS_OFF);
+}
+
+void rtw_set_txrx_1ss(struct rtw_dev *rtwdev, bool txrx_1ss)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ if (!chip->ops->config_txrx_mode || rtwdev->hal.txrx_1ss == txrx_1ss)
+ return;
+
+ rtwdev->hal.txrx_1ss = txrx_1ss;
+ if (txrx_1ss)
+ chip->ops->config_txrx_mode(rtwdev, BB_PATH_A, BB_PATH_A, false);
+ else
+ chip->ops->config_txrx_mode(rtwdev, hal->antenna_tx,
+ hal->antenna_rx, false);
+ rtw_iterate_vifs_atomic(rtwdev, rtw_vif_smps_iter, rtwdev);
+}
+
static void __update_firmware_feature(struct rtw_dev *rtwdev,
struct rtw_fw_state *fw)
{
@@ -1919,6 +1987,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
INIT_DELAYED_WORK(&coex->wl_ccklock_work, rtw_coex_wl_ccklock_work);
INIT_WORK(&rtwdev->tx_work, rtw_tx_work);
INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
+ INIT_WORK(&rtwdev->ips_work, rtw_ips_work);
INIT_WORK(&rtwdev->fw_recovery_work, rtw_fw_recovery_work);
INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work);
skb_queue_head_init(&rtwdev->c2h_queue);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index dc1cd9bd4b8a..17815af9dd4e 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -874,6 +874,8 @@ struct rtw_chip_ops {
enum rtw_bb_path tx_path_1ss,
enum rtw_bb_path tx_path_cck,
bool is_tx2_path);
+ void (*config_txrx_mode)(struct rtw_dev *rtwdev, u8 tx_path,
+ u8 rx_path, bool is_tx2_path);
/* for coex */
void (*coex_set_init)(struct rtw_dev *rtwdev);
@@ -1240,6 +1242,7 @@ struct rtw_chip_info {
bool scbd_support;
bool new_scbd10_def; /* true: fix 2M(8822c) */
bool ble_hid_profile_support;
+ bool wl_mimo_ps_support;
u8 pstdma_type; /* 0: LPSoff, 1:LPSon */
u8 bt_rssi_type;
u8 ant_isolation;
@@ -1352,6 +1355,42 @@ struct rtw_coex_dm {
#define COEX_BTINFO_LENGTH_MAX 10
#define COEX_BTINFO_LENGTH 7
+#define COEX_BT_HIDINFO_LIST 0x0
+#define COEX_BT_HIDINFO_A 0x1
+#define COEX_BT_HIDINFO_NAME 3
+
+#define COEX_BT_HIDINFO_LENGTH 6
+#define COEX_BT_HIDINFO_HANDLE_NUM 4
+#define COEX_BT_HIDINFO_C2H_HANDLE 0
+#define COEX_BT_HIDINFO_C2H_VENDOR 1
+#define COEX_BT_BLE_HANDLE_THRS 0x10
+#define COEX_BT_HIDINFO_NOTCON 0xff
+
+struct rtw_coex_hid {
+ u8 hid_handle;
+ u8 hid_vendor;
+ u8 hid_name[COEX_BT_HIDINFO_NAME];
+ bool hid_info_completed;
+ bool is_game_hid;
+};
+
+struct rtw_coex_hid_handle_list {
+ u8 cmd_id;
+ u8 len;
+ u8 subid;
+ u8 handle_cnt;
+ u8 handle[COEX_BT_HIDINFO_HANDLE_NUM];
+} __packed;
+
+struct rtw_coex_hid_info_a {
+ u8 cmd_id;
+ u8 len;
+ u8 subid;
+ u8 handle;
+ u8 vendor;
+ u8 name[COEX_BT_HIDINFO_NAME];
+} __packed;
+
struct rtw_coex_stat {
bool bt_disabled;
bool bt_disabled_pre;
@@ -1382,6 +1421,8 @@ struct rtw_coex_stat {
bool bt_slave;
bool bt_418_hid_exist;
bool bt_ble_hid_exist;
+ bool bt_game_hid_exist;
+ bool bt_hid_handle_cnt;
bool bt_mailbox_reply;
bool wl_under_lps;
@@ -1402,6 +1443,7 @@ struct rtw_coex_stat {
bool wl_connecting;
bool wl_slot_toggle;
bool wl_slot_toggle_change; /* if toggle to no-toggle */
+ bool wl_mimo_ps;
u32 bt_supported_version;
u32 bt_supported_feature;
@@ -1459,6 +1501,9 @@ struct rtw_coex_stat {
u32 darfrc;
u32 darfrch;
+
+ struct rtw_coex_hid hid_info[COEX_BT_HIDINFO_HANDLE_NUM];
+ struct rtw_coex_hid_handle_list hid_handle_list;
};
struct rtw_coex {
@@ -1867,6 +1912,7 @@ struct rtw_hal {
u32 antenna_tx;
u32 antenna_rx;
u8 bfee_sts_cap;
+ bool txrx_1ss;
/* protect tx power section */
struct mutex tx_power_mutex;
@@ -1960,6 +2006,7 @@ struct rtw_dev {
/* c2h cmd queue & handler work */
struct sk_buff_head c2h_queue;
struct work_struct c2h_work;
+ struct work_struct ips_work;
struct work_struct fw_recovery_work;
/* used to protect txqs list */
@@ -2101,7 +2148,8 @@ void rtw_tx_report_purge_timer(struct timer_list *t);
void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
const u8 *mac_addr, bool hw_scan);
-void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif);
+void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ bool hw_scan);
int rtw_core_start(struct rtw_dev *rtwdev);
void rtw_core_stop(struct rtw_dev *rtwdev);
int rtw_chip_info_setup(struct rtw_dev *rtwdev);
@@ -2121,5 +2169,5 @@ void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start);
int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
u32 fwcd_item);
int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size);
-
+void rtw_set_txrx_1ss(struct rtw_dev *rtwdev, bool config_1ss);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
index 3fdbaf7302c5..ad2b323a0423 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -2753,6 +2753,7 @@ struct rtw_chip_info rtw8723d_hw_spec = {
.scbd_support = true,
.new_scbd10_def = true,
.ble_hid_profile_support = false,
+ .wl_mimo_ps_support = false,
.pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
.bt_rssi_type = COEX_BTRSSI_RATIO,
.ant_isolation = 15,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index db078df63f85..99eee128ae94 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -499,7 +499,7 @@ static s8 get_cck_rx_pwr(struct rtw_dev *rtwdev, u8 lna_idx, u8 vga_idx)
}
if (lna_idx >= lna_gain_table_size) {
- rtw_info(rtwdev, "incorrect lna index (%d)\n", lna_idx);
+ rtw_warn(rtwdev, "incorrect lna index (%d)\n", lna_idx);
return -120;
}
@@ -1514,6 +1514,7 @@ static const struct rtw_rfe_def rtw8821c_rfe_defs[] = {
[0] = RTW_DEF_RFE(8821c, 0, 0),
[2] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2),
[4] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2),
+ [6] = RTW_DEF_RFE(8821c, 0, 0),
};
static struct rtw_hw_reg rtw8821c_dig[] = {
@@ -1924,6 +1925,7 @@ struct rtw_chip_info rtw8821c_hw_spec = {
.scbd_support = true,
.new_scbd10_def = false,
.ble_hid_profile_support = false,
+ .wl_mimo_ps_support = false,
.pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
.bt_rssi_type = COEX_BTRSSI_RATIO,
.ant_isolation = 15,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index dd4fbb82750d..eee7bf035403 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -1012,12 +1012,12 @@ static int rtw8822b_set_antenna(struct rtw_dev *rtwdev,
antenna_tx, antenna_rx);
if (!rtw8822b_check_rf_path(antenna_tx)) {
- rtw_info(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
+ rtw_warn(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
return -EINVAL;
}
if (!rtw8822b_check_rf_path(antenna_rx)) {
- rtw_info(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
+ rtw_warn(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
return -EINVAL;
}
@@ -2554,6 +2554,7 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.scbd_support = true,
.new_scbd10_def = false,
.ble_hid_profile_support = false,
+ .wl_mimo_ps_support = false,
.pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
.bt_rssi_type = COEX_BTRSSI_RATIO,
.ant_isolation = 15,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 35c46e5209de..cd74607a61a2 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -2798,7 +2798,7 @@ static int rtw8822c_set_antenna(struct rtw_dev *rtwdev,
case BB_PATH_AB:
break;
default:
- rtw_info(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
+ rtw_warn(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
return -EINVAL;
}
@@ -2808,7 +2808,7 @@ static int rtw8822c_set_antenna(struct rtw_dev *rtwdev,
case BB_PATH_AB:
break;
default:
- rtw_info(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
+ rtw_warn(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
return -EINVAL;
}
@@ -2996,19 +2996,34 @@ static void rtw8822c_coex_cfg_gnt_fix(struct rtw_dev *rtwdev)
* enable "DAC off if GNT_WL = 0" for non-shared-antenna
* disable 0x1c30[22] = 0,
* enable: 0x1c30[22] = 1, 0x1c38[12] = 0, 0x1c38[28] = 1
- *
- * disable WL-S1 BB chage RF mode if GNT_BT
+ */
+ if (coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
+ rtw_write8_mask(rtwdev, REG_ANAPAR + 2,
+ BIT_ANAPAR_BTPS >> 16, 0);
+ } else {
+ rtw_write8_mask(rtwdev, REG_ANAPAR + 2,
+ BIT_ANAPAR_BTPS >> 16, 1);
+ rtw_write8_mask(rtwdev, REG_RSTB_SEL + 1,
+ BIT_DAC_OFF_ENABLE, 0);
+ rtw_write8_mask(rtwdev, REG_RSTB_SEL + 3,
+ BIT_DAC_OFF_ENABLE, 1);
+ }
+
+ /* disable WL-S1 BB chage RF mode if GNT_BT
* since RF TRx mask can do it
*/
- rtw_write8_mask(rtwdev, REG_ANAPAR + 2, BIT_ANAPAR_BTPS >> 16, 1);
- rtw_write8_mask(rtwdev, REG_RSTB_SEL + 1, BIT_DAC_OFF_ENABLE, 0);
- rtw_write8_mask(rtwdev, REG_RSTB_SEL + 3, BIT_DAC_OFF_ENABLE, 1);
- rtw_write8_mask(rtwdev, REG_IGN_GNTBT4, BIT_PI_IGNORE_GNT_BT, 1);
+ rtw_write8_mask(rtwdev, REG_IGN_GNTBT4,
+ BIT_PI_IGNORE_GNT_BT, 1);
/* disable WL-S0 BB chage RF mode if wifi is at 5G,
* or antenna path is separated
*/
- if (coex_stat->wl_coex_mode == COEX_WLINK_5G ||
+ if (coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
+ rtw_write8_mask(rtwdev, REG_IGN_GNT_BT1,
+ BIT_PI_IGNORE_GNT_BT, 1);
+ rtw_write8_mask(rtwdev, REG_NOMASK_TXBT,
+ BIT_NOMASK_TXBT_ENABLE, 1);
+ } else if (coex_stat->wl_coex_mode == COEX_WLINK_5G ||
coex->under_5g || !efuse->share_ant) {
if (coex_stat->kt_ver >= 3) {
rtw_write8_mask(rtwdev, REG_IGN_GNT_BT1,
@@ -4962,6 +4977,7 @@ static struct rtw_chip_ops rtw8822c_ops = {
.cfo_init = rtw8822c_cfo_init,
.cfo_track = rtw8822c_cfo_track,
.config_tx_path = rtw8822c_config_tx_path,
+ .config_txrx_mode = rtw8822c_config_trx_mode,
.coex_set_init = rtw8822c_coex_cfg_init,
.coex_set_ant_switch = NULL,
@@ -5007,6 +5023,8 @@ static const struct coex_table_para table_sant_8822c[] = {
{0x66556aaa, 0x6a5a6aaa}, /*case-30*/
{0xffffffff, 0x5aaa5aaa},
{0x56555555, 0x5a5a5aaa},
+ {0xdaffdaff, 0xdaffdaff},
+ {0xddffddff, 0xddffddff},
};
/* Non-Shared-Antenna Coex Table */
@@ -5107,7 +5125,8 @@ static const struct coex_rf_para rf_para_tx_8822c[] = {
{8, 17, true, 4},
{7, 18, true, 4},
{6, 19, true, 4},
- {5, 20, true, 4}
+ {5, 20, true, 4},
+ {0, 21, true, 4} /* for gamg hid */
};
static const struct coex_rf_para rf_para_rx_8822c[] = {
@@ -5116,7 +5135,8 @@ static const struct coex_rf_para rf_para_rx_8822c[] = {
{3, 24, true, 5},
{2, 26, true, 5},
{1, 27, true, 5},
- {0, 28, true, 5}
+ {0, 28, true, 5},
+ {0, 28, true, 5} /* for gamg hid */
};
static_assert(ARRAY_SIZE(rf_para_tx_8822c) == ARRAY_SIZE(rf_para_rx_8822c));
@@ -5354,11 +5374,12 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.wowlan_stub = &rtw_wowlan_stub_8822c,
.max_sched_scan_ssids = 4,
#endif
- .coex_para_ver = 0x2103181c,
- .bt_desired_ver = 0x1c,
+ .coex_para_ver = 0x22020720,
+ .bt_desired_ver = 0x20,
.scbd_support = true,
.new_scbd10_def = true,
.ble_hid_profile_support = true,
+ .wl_mimo_ps_support = true,
.pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
.bt_rssi_type = COEX_BTRSSI_DBM,
.ant_isolation = 15,
diff --git a/drivers/net/wireless/realtek/rtw88/sar.c b/drivers/net/wireless/realtek/rtw88/sar.c
index 3383726c4d90..c472f1502b82 100644
--- a/drivers/net/wireless/realtek/rtw88/sar.c
+++ b/drivers/net/wireless/realtek/rtw88/sar.c
@@ -91,10 +91,10 @@ int rtw_set_sar_specs(struct rtw_dev *rtwdev,
return -EINVAL;
power = sar->sub_specs[i].power;
- rtw_info(rtwdev, "On freq %u to %u, set SAR %d in 1/%lu dBm\n",
- rtw_common_sar_freq_ranges[idx].start_freq,
- rtw_common_sar_freq_ranges[idx].end_freq,
- power, BIT(RTW_COMMON_SAR_FCT));
+ rtw_dbg(rtwdev, RTW_DBG_REGD, "On freq %u to %u, set SAR %d in 1/%lu dBm\n",
+ rtw_common_sar_freq_ranges[idx].start_freq,
+ rtw_common_sar_freq_ranges[idx].end_freq,
+ power, BIT(RTW_COMMON_SAR_FCT));
for (j = 0; j < RTW_RF_PATH_MAX; j++) {
for (k = 0; k < RTW_RATE_SECTION_MAX; k++) {
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index efcc1b0371a8..94d1089f4022 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -353,7 +353,7 @@ static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
bw = si->bw_mode;
rate_id = si->rate_id;
- stbc = si->stbc_en;
+ stbc = rtwdev->hal.txrx_1ss ? false : si->stbc_en;
ldpc = si->ldpc_en;
out:
diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig
index 37e5def24d9f..dd02b6a6790e 100644
--- a/drivers/net/wireless/realtek/rtw89/Kconfig
+++ b/drivers/net/wireless/realtek/rtw89/Kconfig
@@ -16,11 +16,15 @@ config RTW89_CORE
config RTW89_PCI
tristate
+config RTW89_8852A
+ tristate
+
config RTW89_8852AE
tristate "Realtek 8852AE PCI wireless network adapter"
depends on PCI
select RTW89_CORE
select RTW89_PCI
+ select RTW89_8852A
help
Select this option will enable support for 8852AE chipset
diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile
index 077e8fe23f60..012ae60c0b81 100644
--- a/drivers/net/wireless/realtek/rtw89/Makefile
+++ b/drivers/net/wireless/realtek/rtw89/Makefile
@@ -6,10 +6,6 @@ rtw89_core-y += core.o \
mac.o \
phy.o \
fw.o \
- rtw8852a.o \
- rtw8852a_table.o \
- rtw8852a_rfk.o \
- rtw8852a_rfk_table.o \
cam.o \
efuse.o \
regd.o \
@@ -18,6 +14,15 @@ rtw89_core-y += core.o \
ps.o \
ser.o
+obj-$(CONFIG_RTW89_8852A) += rtw89_8852a.o
+rtw89_8852a-objs := rtw8852a.o \
+ rtw8852a_table.o \
+ rtw8852a_rfk.o \
+ rtw8852a_rfk_table.o
+
+obj-$(CONFIG_RTW89_8852AE) += rtw89_8852ae.o
+rtw89_8852ae-objs := rtw8852ae.o
+
rtw89_core-$(CONFIG_RTW89_DEBUG) += debug.o
obj-$(CONFIG_RTW89_PCI) += rtw89_pci.o
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index bd34e4bbe107..305dbbebff6b 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -231,7 +231,7 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
}
rtwvif = (struct rtw89_vif *)vif->drv_priv;
- addr_cam = &rtwvif->addr_cam;
+ addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
ret = rtw89_cam_get_addr_cam_key_idx(addr_cam, sec_cam, key, &key_idx);
if (ret) {
rtw89_err(rtwdev, "failed to get addr cam key idx %d, %d\n",
@@ -387,7 +387,7 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev,
}
rtwvif = (struct rtw89_vif *)vif->drv_priv;
- addr_cam = &rtwvif->addr_cam;
+ addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
sec_cam = addr_cam->sec_entries[key_idx];
if (!sec_cam)
return -EINVAL;
@@ -427,15 +427,23 @@ static void rtw89_cam_reset_key_iter(struct ieee80211_hw *hw,
rtw89_cam_deinit(rtwdev, rtwvif);
}
+void rtw89_cam_deinit_addr_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_addr_cam_entry *addr_cam)
+{
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+
+ addr_cam->valid = false;
+ clear_bit(addr_cam->addr_cam_idx, cam_info->addr_cam_map);
+}
+
void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
- addr_cam->valid = false;
+ rtw89_cam_deinit_addr_cam(rtwdev, addr_cam);
bssid_cam->valid = false;
- clear_bit(addr_cam->addr_cam_idx, cam_info->addr_cam_map);
clear_bit(bssid_cam->bssid_cam_idx, cam_info->bssid_cam_map);
}
@@ -464,10 +472,10 @@ static int rtw89_cam_get_avail_addr_cam(struct rtw89_dev *rtwdev,
return 0;
}
-static int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif)
+int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_addr_cam_entry *addr_cam,
+ const struct rtw89_bssid_cam_entry *bssid_cam)
{
- struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
u8 addr_cam_idx;
int i;
int ret;
@@ -484,14 +492,17 @@ static int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
addr_cam->valid = true;
addr_cam->addr_mask = 0;
addr_cam->mask_sel = RTW89_NO_MSK;
+ addr_cam->sec_ent_mode = RTW89_ADDR_CAM_SEC_NORMAL;
bitmap_zero(addr_cam->sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM);
- ether_addr_copy(addr_cam->sma, rtwvif->mac_addr);
for (i = 0; i < RTW89_SEC_CAM_IN_ADDR_CAM; i++) {
addr_cam->sec_ent_keyid[i] = 0;
addr_cam->sec_ent[i] = 0;
}
+ /* associate addr cam with bssid cam */
+ addr_cam->bssid_cam_idx = bssid_cam->bssid_cam_idx;
+
return 0;
}
@@ -549,21 +560,18 @@ int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
int ret;
- ret = rtw89_cam_init_addr_cam(rtwdev, rtwvif);
+ ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif);
if (ret) {
- rtw89_err(rtwdev, "failed to init addr cam\n");
+ rtw89_err(rtwdev, "failed to init bssid cam\n");
return ret;
}
- ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif);
+ ret = rtw89_cam_init_addr_cam(rtwdev, addr_cam, bssid_cam);
if (ret) {
- rtw89_err(rtwdev, "failed to init bssid cam\n");
+ rtw89_err(rtwdev, "failed to init addr cam\n");
return ret;
}
- /* associate addr cam with bssid cam */
- addr_cam->bssid_cam_idx = bssid_cam->bssid_cam_idx;
-
return 0;
}
@@ -609,7 +617,7 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
u8 *cmd)
{
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
- struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
+ struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
struct ieee80211_sta *sta = rtwsta_to_sta_safe(rtwsta);
const u8 *sma = scan_mac_addr ? scan_mac_addr : rtwvif->mac_addr;
u8 sma_hash, tma_hash, addr_msk_start;
diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
index 33a3ad582b81..3a6a786530d1 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.h
+++ b/drivers/net/wireless/realtek/rtw89/cam.h
@@ -346,6 +346,11 @@ static inline void FWCMD_SET_ADDR_BSSID_BSSID5(void *cmd, u32 value)
int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
+int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_addr_cam_entry *addr_cam,
+ const struct rtw89_bssid_cam_entry *bssid_cam);
+void rtw89_cam_deinit_addr_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_addr_cam_entry *addr_cam);
void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *vif,
struct rtw89_sta *rtwsta,
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index 9f7d4f8d0c56..684583955511 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -594,7 +594,7 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
memset(&btc->dm, 0, sizeof(btc->dm));
memset(bt_linfo->rssi_state, 0, sizeof(bt_linfo->rssi_state));
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++)
+ for (i = 0; i < RTW89_PORT_NUM; i++)
memset(wl_linfo[i].rssi_state, 0,
sizeof(wl_linfo[i].rssi_state));
@@ -1478,7 +1478,7 @@ static void _set_gnt_wl(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
}
}
- rtw89_mac_cfg_gnt(rtwdev, &dm->gnt);
+ rtw89_chip_mac_cfg_gnt(rtwdev, &dm->gnt);
}
#define BTC_TDMA_WLROLE_MAX 2
@@ -1698,7 +1698,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
wl_rinfo->link_mode == BTC_WLINK_2G_SCC) {
en = true;
/* get p2p channel */
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
if (wl_rinfo->active_role[i].role ==
RTW89_WIFI_ROLE_P2P_GO ||
wl_rinfo->active_role[i].role ==
@@ -1711,7 +1711,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
} else {
en = true;
/* get 2g channel */
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
if (wl_rinfo->active_role[i].connected &&
wl_rinfo->active_role[i].band == RTW89_BAND_2G) {
ch = wl_rinfo->active_role[i].ch;
@@ -2233,7 +2233,7 @@ static void _set_gnt_bt(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
}
}
- rtw89_mac_cfg_gnt(rtwdev, &dm->gnt);
+ rtw89_chip_mac_cfg_gnt(rtwdev, &dm->gnt);
}
static void _set_bt_plut(struct rtw89_dev *rtwdev, u8 phy_map,
@@ -2300,7 +2300,7 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
switch (type) {
case BTC_ANT_WPOWERON:
- rtw89_mac_cfg_ctrl_path(rtwdev, false);
+ rtw89_chip_cfg_ctrl_path(rtwdev, false);
break;
case BTC_ANT_WINIT:
if (bt->enable.now) {
@@ -2310,21 +2310,21 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
_set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
_set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
}
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ rtw89_chip_cfg_ctrl_path(rtwdev, true);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_BT, BTC_PLT_BT);
break;
case BTC_ANT_WONLY:
_set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
_set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ rtw89_chip_cfg_ctrl_path(rtwdev, true);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_WOFF:
- rtw89_mac_cfg_ctrl_path(rtwdev, false);
+ rtw89_chip_cfg_ctrl_path(rtwdev, false);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_W2G:
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ rtw89_chip_cfg_ctrl_path(rtwdev, true);
if (rtwdev->dbcc_en) {
for (i = 0; i < RTW89_PHY_MAX; i++) {
b2g = (wl_dinfo->real_band[i] == RTW89_BAND_2G);
@@ -2352,32 +2352,32 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
}
break;
case BTC_ANT_W5G:
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ rtw89_chip_cfg_ctrl_path(rtwdev, true);
_set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
_set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_W25G:
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ rtw89_chip_cfg_ctrl_path(rtwdev, true);
_set_gnt_wl(rtwdev, phy_map, BTC_GNT_HW);
_set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
_set_bt_plut(rtwdev, BTC_PHY_ALL,
BTC_PLT_GNT_WL, BTC_PLT_GNT_WL);
break;
case BTC_ANT_FREERUN:
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ rtw89_chip_cfg_ctrl_path(rtwdev, true);
_set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
_set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_WRFK:
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ rtw89_chip_cfg_ctrl_path(rtwdev, true);
_set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
_set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
_set_bt_plut(rtwdev, phy_map, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_BRFK:
- rtw89_mac_cfg_ctrl_path(rtwdev, false);
+ rtw89_chip_cfg_ctrl_path(rtwdev, false);
_set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_LO);
_set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
_set_bt_plut(rtwdev, phy_map, BTC_PLT_NONE, BTC_PLT_NONE);
@@ -3287,7 +3287,7 @@ static void _update_wl_info(struct rtw89_dev *rtwdev)
memset(wl_rinfo, 0, sizeof(*wl_rinfo));
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
/* check if role active? */
if (!wl_linfo[i].active)
continue;
@@ -4370,6 +4370,7 @@ void rtw89_btc_ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_map,
rtwdev->is_bt_iqk_timeout = true;
}
}
+EXPORT_SYMBOL(rtw89_btc_ntfy_wl_rfk);
struct rtw89_btc_wl_sta_iter_data {
struct rtw89_dev *rtwdev;
@@ -4622,12 +4623,12 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
ver_hotfix = FIELD_GET(GENMASK(15, 8), chip->wlcx_desired);
seq_printf(m, "(%s, desired:%d.%d.%d), ",
(wl->ver_info.fw_coex >= chip->wlcx_desired ?
- "Match" : "Mis-Match"), ver_main, ver_sub, ver_hotfix);
+ "Match" : "Mismatch"), ver_main, ver_sub, ver_hotfix);
seq_printf(m, "BT_FW_coex:%d(%s, desired:%d)\n",
bt->ver_info.fw_coex,
(bt->ver_info.fw_coex >= chip->btcx_desired ?
- "Match" : "Mis-Match"), chip->btcx_desired);
+ "Match" : "Mismatch"), chip->btcx_desired);
if (bt->enable.now && bt->ver_info.fw == 0)
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, true);
@@ -4676,7 +4677,7 @@ static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m)
wl_dinfo->real_band[RTW89_PHY_1]);
}
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
plink = &btc->cx.wl.link_info[i];
if (!plink->active)
@@ -5074,7 +5075,7 @@ static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
seq_printf(m, "leak_ap:%d, fw_offload:%s%s\n", dm->leak_ap,
(BTC_CX_FW_OFFLOAD ? "Y" : "N"),
(dm->wl_fw_cx_offload == BTC_CX_FW_OFFLOAD ?
- "" : "(Mis-Match!!)"));
+ "" : "(Mismatch!!)"));
if (dm->rf_trx_para.wl_tx_power == 0xff)
seq_printf(m,
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index a0737eea9f81..bcefc968576e 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -4,6 +4,7 @@
#include <linux/ip.h>
#include <linux/udp.h>
+#include "cam.h"
#include "coex.h"
#include "core.h"
#include "efuse.h"
@@ -21,50 +22,122 @@ static bool rtw89_disable_ps_mode;
module_param_named(disable_ps_mode, rtw89_disable_ps_mode, bool, 0644);
MODULE_PARM_DESC(disable_ps_mode, "Set Y to disable low power mode");
+#define RTW89_DEF_CHAN(_freq, _hw_val, _flags, _band) \
+ { .center_freq = _freq, .hw_value = _hw_val, .flags = _flags, .band = _band, }
+#define RTW89_DEF_CHAN_2G(_freq, _hw_val) \
+ RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_2GHZ)
+#define RTW89_DEF_CHAN_5G(_freq, _hw_val) \
+ RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_5GHZ)
+#define RTW89_DEF_CHAN_5G_NO_HT40MINUS(_freq, _hw_val) \
+ RTW89_DEF_CHAN(_freq, _hw_val, IEEE80211_CHAN_NO_HT40MINUS, NL80211_BAND_5GHZ)
+#define RTW89_DEF_CHAN_6G(_freq, _hw_val) \
+ RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_6GHZ)
+
static struct ieee80211_channel rtw89_channels_2ghz[] = {
- { .center_freq = 2412, .hw_value = 1, },
- { .center_freq = 2417, .hw_value = 2, },
- { .center_freq = 2422, .hw_value = 3, },
- { .center_freq = 2427, .hw_value = 4, },
- { .center_freq = 2432, .hw_value = 5, },
- { .center_freq = 2437, .hw_value = 6, },
- { .center_freq = 2442, .hw_value = 7, },
- { .center_freq = 2447, .hw_value = 8, },
- { .center_freq = 2452, .hw_value = 9, },
- { .center_freq = 2457, .hw_value = 10, },
- { .center_freq = 2462, .hw_value = 11, },
- { .center_freq = 2467, .hw_value = 12, },
- { .center_freq = 2472, .hw_value = 13, },
- { .center_freq = 2484, .hw_value = 14, },
+ RTW89_DEF_CHAN_2G(2412, 1),
+ RTW89_DEF_CHAN_2G(2417, 2),
+ RTW89_DEF_CHAN_2G(2422, 3),
+ RTW89_DEF_CHAN_2G(2427, 4),
+ RTW89_DEF_CHAN_2G(2432, 5),
+ RTW89_DEF_CHAN_2G(2437, 6),
+ RTW89_DEF_CHAN_2G(2442, 7),
+ RTW89_DEF_CHAN_2G(2447, 8),
+ RTW89_DEF_CHAN_2G(2452, 9),
+ RTW89_DEF_CHAN_2G(2457, 10),
+ RTW89_DEF_CHAN_2G(2462, 11),
+ RTW89_DEF_CHAN_2G(2467, 12),
+ RTW89_DEF_CHAN_2G(2472, 13),
+ RTW89_DEF_CHAN_2G(2484, 14),
};
static struct ieee80211_channel rtw89_channels_5ghz[] = {
- {.center_freq = 5180, .hw_value = 36,},
- {.center_freq = 5200, .hw_value = 40,},
- {.center_freq = 5220, .hw_value = 44,},
- {.center_freq = 5240, .hw_value = 48,},
- {.center_freq = 5260, .hw_value = 52,},
- {.center_freq = 5280, .hw_value = 56,},
- {.center_freq = 5300, .hw_value = 60,},
- {.center_freq = 5320, .hw_value = 64,},
- {.center_freq = 5500, .hw_value = 100,},
- {.center_freq = 5520, .hw_value = 104,},
- {.center_freq = 5540, .hw_value = 108,},
- {.center_freq = 5560, .hw_value = 112,},
- {.center_freq = 5580, .hw_value = 116,},
- {.center_freq = 5600, .hw_value = 120,},
- {.center_freq = 5620, .hw_value = 124,},
- {.center_freq = 5640, .hw_value = 128,},
- {.center_freq = 5660, .hw_value = 132,},
- {.center_freq = 5680, .hw_value = 136,},
- {.center_freq = 5700, .hw_value = 140,},
- {.center_freq = 5720, .hw_value = 144,},
- {.center_freq = 5745, .hw_value = 149,},
- {.center_freq = 5765, .hw_value = 153,},
- {.center_freq = 5785, .hw_value = 157,},
- {.center_freq = 5805, .hw_value = 161,},
- {.center_freq = 5825, .hw_value = 165,
- .flags = IEEE80211_CHAN_NO_HT40MINUS},
+ RTW89_DEF_CHAN_5G(5180, 36),
+ RTW89_DEF_CHAN_5G(5200, 40),
+ RTW89_DEF_CHAN_5G(5220, 44),
+ RTW89_DEF_CHAN_5G(5240, 48),
+ RTW89_DEF_CHAN_5G(5260, 52),
+ RTW89_DEF_CHAN_5G(5280, 56),
+ RTW89_DEF_CHAN_5G(5300, 60),
+ RTW89_DEF_CHAN_5G(5320, 64),
+ RTW89_DEF_CHAN_5G(5500, 100),
+ RTW89_DEF_CHAN_5G(5520, 104),
+ RTW89_DEF_CHAN_5G(5540, 108),
+ RTW89_DEF_CHAN_5G(5560, 112),
+ RTW89_DEF_CHAN_5G(5580, 116),
+ RTW89_DEF_CHAN_5G(5600, 120),
+ RTW89_DEF_CHAN_5G(5620, 124),
+ RTW89_DEF_CHAN_5G(5640, 128),
+ RTW89_DEF_CHAN_5G(5660, 132),
+ RTW89_DEF_CHAN_5G(5680, 136),
+ RTW89_DEF_CHAN_5G(5700, 140),
+ RTW89_DEF_CHAN_5G(5720, 144),
+ RTW89_DEF_CHAN_5G(5745, 149),
+ RTW89_DEF_CHAN_5G(5765, 153),
+ RTW89_DEF_CHAN_5G(5785, 157),
+ RTW89_DEF_CHAN_5G(5805, 161),
+ RTW89_DEF_CHAN_5G_NO_HT40MINUS(5825, 165),
+};
+
+static struct ieee80211_channel rtw89_channels_6ghz[] = {
+ RTW89_DEF_CHAN_6G(5955, 1),
+ RTW89_DEF_CHAN_6G(5975, 5),
+ RTW89_DEF_CHAN_6G(5995, 9),
+ RTW89_DEF_CHAN_6G(6015, 13),
+ RTW89_DEF_CHAN_6G(6035, 17),
+ RTW89_DEF_CHAN_6G(6055, 21),
+ RTW89_DEF_CHAN_6G(6075, 25),
+ RTW89_DEF_CHAN_6G(6095, 29),
+ RTW89_DEF_CHAN_6G(6115, 33),
+ RTW89_DEF_CHAN_6G(6135, 37),
+ RTW89_DEF_CHAN_6G(6155, 41),
+ RTW89_DEF_CHAN_6G(6175, 45),
+ RTW89_DEF_CHAN_6G(6195, 49),
+ RTW89_DEF_CHAN_6G(6215, 53),
+ RTW89_DEF_CHAN_6G(6235, 57),
+ RTW89_DEF_CHAN_6G(6255, 61),
+ RTW89_DEF_CHAN_6G(6275, 65),
+ RTW89_DEF_CHAN_6G(6295, 69),
+ RTW89_DEF_CHAN_6G(6315, 73),
+ RTW89_DEF_CHAN_6G(6335, 77),
+ RTW89_DEF_CHAN_6G(6355, 81),
+ RTW89_DEF_CHAN_6G(6375, 85),
+ RTW89_DEF_CHAN_6G(6395, 89),
+ RTW89_DEF_CHAN_6G(6415, 93),
+ RTW89_DEF_CHAN_6G(6435, 97),
+ RTW89_DEF_CHAN_6G(6455, 101),
+ RTW89_DEF_CHAN_6G(6475, 105),
+ RTW89_DEF_CHAN_6G(6495, 109),
+ RTW89_DEF_CHAN_6G(6515, 113),
+ RTW89_DEF_CHAN_6G(6535, 117),
+ RTW89_DEF_CHAN_6G(6555, 121),
+ RTW89_DEF_CHAN_6G(6575, 125),
+ RTW89_DEF_CHAN_6G(6595, 129),
+ RTW89_DEF_CHAN_6G(6615, 133),
+ RTW89_DEF_CHAN_6G(6635, 137),
+ RTW89_DEF_CHAN_6G(6655, 141),
+ RTW89_DEF_CHAN_6G(6675, 145),
+ RTW89_DEF_CHAN_6G(6695, 149),
+ RTW89_DEF_CHAN_6G(6715, 153),
+ RTW89_DEF_CHAN_6G(6735, 157),
+ RTW89_DEF_CHAN_6G(6755, 161),
+ RTW89_DEF_CHAN_6G(6775, 165),
+ RTW89_DEF_CHAN_6G(6795, 169),
+ RTW89_DEF_CHAN_6G(6815, 173),
+ RTW89_DEF_CHAN_6G(6835, 177),
+ RTW89_DEF_CHAN_6G(6855, 181),
+ RTW89_DEF_CHAN_6G(6875, 185),
+ RTW89_DEF_CHAN_6G(6895, 189),
+ RTW89_DEF_CHAN_6G(6915, 193),
+ RTW89_DEF_CHAN_6G(6935, 197),
+ RTW89_DEF_CHAN_6G(6955, 201),
+ RTW89_DEF_CHAN_6G(6975, 205),
+ RTW89_DEF_CHAN_6G(6995, 209),
+ RTW89_DEF_CHAN_6G(7015, 213),
+ RTW89_DEF_CHAN_6G(7035, 217),
+ RTW89_DEF_CHAN_6G(7055, 221),
+ RTW89_DEF_CHAN_6G(7075, 225),
+ RTW89_DEF_CHAN_6G(7095, 229),
+ RTW89_DEF_CHAN_6G(7115, 233),
};
static struct ieee80211_rate rtw89_bitrates[] = {
@@ -118,6 +191,16 @@ static struct ieee80211_supported_band rtw89_sband_5ghz = {
.vht_cap = {0},
};
+static struct ieee80211_supported_band rtw89_sband_6ghz = {
+ .band = NL80211_BAND_6GHZ,
+ .channels = rtw89_channels_6ghz,
+ .n_channels = ARRAY_SIZE(rtw89_channels_6ghz),
+
+ /* 6G has no CCK rates, 1M/2M/5.5M/11M */
+ .bitrates = rtw89_bitrates + 4,
+ .n_bitrates = ARRAY_SIZE(rtw89_bitrates) - 4,
+};
+
static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev,
struct rtw89_traffic_stats *stats,
struct sk_buff *skb, bool tx)
@@ -149,6 +232,9 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
u8 center_chan;
u8 bandwidth = RTW89_CHANNEL_WIDTH_20;
u8 primary_chan_idx = 0;
+ u32 offset;
+ u8 band;
+ u8 subband;
center_chan = channel->hw_value;
primary_freq = channel->center_freq;
@@ -171,23 +257,16 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
}
break;
case NL80211_CHAN_WIDTH_80:
- bandwidth = RTW89_CHANNEL_WIDTH_80;
+ case NL80211_CHAN_WIDTH_160:
+ bandwidth = nl_to_rtw89_bandwidth(width);
if (primary_freq > center_freq) {
- if (primary_freq - center_freq == 10) {
- primary_chan_idx = RTW89_SC_20_UPPER;
- center_chan -= 2;
- } else {
- primary_chan_idx = RTW89_SC_20_UPMOST;
- center_chan -= 6;
- }
+ offset = (primary_freq - center_freq - 10) / 20;
+ primary_chan_idx = RTW89_SC_20_UPPER + offset * 2;
+ center_chan -= 2 + offset * 4;
} else {
- if (center_freq - primary_freq == 10) {
- primary_chan_idx = RTW89_SC_20_LOWER;
- center_chan += 2;
- } else {
- primary_chan_idx = RTW89_SC_20_LOWEST;
- center_chan += 6;
- }
+ offset = (center_freq - primary_freq - 10) / 20;
+ primary_chan_idx = RTW89_SC_20_LOWER + offset * 2;
+ center_chan += 2 + offset * 4;
}
break;
default:
@@ -195,10 +274,81 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
break;
}
+ switch (channel->band) {
+ default:
+ case NL80211_BAND_2GHZ:
+ band = RTW89_BAND_2G;
+ break;
+ case NL80211_BAND_5GHZ:
+ band = RTW89_BAND_5G;
+ break;
+ case NL80211_BAND_6GHZ:
+ band = RTW89_BAND_6G;
+ break;
+ }
+
+ switch (band) {
+ default:
+ case RTW89_BAND_2G:
+ switch (center_chan) {
+ default:
+ case 1 ... 14:
+ subband = RTW89_CH_2G;
+ break;
+ }
+ break;
+ case RTW89_BAND_5G:
+ switch (center_chan) {
+ default:
+ case 36 ... 64:
+ subband = RTW89_CH_5G_BAND_1;
+ break;
+ case 100 ... 144:
+ subband = RTW89_CH_5G_BAND_3;
+ break;
+ case 149 ... 177:
+ subband = RTW89_CH_5G_BAND_4;
+ break;
+ }
+ break;
+ case RTW89_BAND_6G:
+ switch (center_chan) {
+ default:
+ case 1 ... 29:
+ subband = RTW89_CH_6G_BAND_IDX0;
+ break;
+ case 33 ... 61:
+ subband = RTW89_CH_6G_BAND_IDX1;
+ break;
+ case 65 ... 93:
+ subband = RTW89_CH_6G_BAND_IDX2;
+ break;
+ case 97 ... 125:
+ subband = RTW89_CH_6G_BAND_IDX3;
+ break;
+ case 129 ... 157:
+ subband = RTW89_CH_6G_BAND_IDX4;
+ break;
+ case 161 ... 189:
+ subband = RTW89_CH_6G_BAND_IDX5;
+ break;
+ case 193 ... 221:
+ subband = RTW89_CH_6G_BAND_IDX6;
+ break;
+ case 225 ... 253:
+ subband = RTW89_CH_6G_BAND_IDX7;
+ break;
+ }
+ break;
+ }
+
chan_param->center_chan = center_chan;
+ chan_param->center_freq = center_freq;
chan_param->primary_chan = channel->hw_value;
chan_param->bandwidth = bandwidth;
chan_param->pri_ch_idx = primary_chan_idx;
+ chan_param->band_type = band;
+ chan_param->subband_type = subband;
}
void rtw89_set_channel(struct rtw89_dev *rtwdev)
@@ -209,7 +359,6 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev)
struct rtw89_channel_params ch_param;
struct rtw89_channel_help_params bak;
u8 center_chan, bandwidth;
- u8 band_type;
bool band_changed;
rtw89_get_channel_params(&hw->conf.chandef, &ch_param);
@@ -218,30 +367,17 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev)
center_chan = ch_param.center_chan;
bandwidth = ch_param.bandwidth;
- band_type = center_chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
- band_changed = hal->current_band_type != band_type ||
+ band_changed = hal->current_band_type != ch_param.band_type ||
hal->current_channel == 0;
hal->current_band_width = bandwidth;
hal->current_channel = center_chan;
+ hal->current_freq = ch_param.center_freq;
hal->prev_primary_channel = hal->current_primary_channel;
+ hal->prev_band_type = hal->current_band_type;
hal->current_primary_channel = ch_param.primary_chan;
- hal->current_band_type = band_type;
-
- switch (center_chan) {
- case 1 ... 14:
- hal->current_subband = RTW89_CH_2G;
- break;
- case 36 ... 64:
- hal->current_subband = RTW89_CH_5G_BAND_1;
- break;
- case 100 ... 144:
- hal->current_subband = RTW89_CH_5G_BAND_3;
- break;
- case 149 ... 177:
- hal->current_subband = RTW89_CH_5G_BAND_4;
- break;
- }
+ hal->current_band_type = ch_param.band_type;
+ hal->current_subband = ch_param.subband_type;
rtw89_chip_set_channel_prepare(rtwdev, &bak);
@@ -300,9 +436,11 @@ rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
struct ieee80211_vif *vif = tx_req->vif;
+ struct ieee80211_sta *sta = tx_req->sta;
struct ieee80211_tx_info *info;
struct ieee80211_key_conf *key;
struct rtw89_vif *rtwvif;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_addr_cam_entry *addr_cam;
struct rtw89_sec_cam_entry *sec_cam;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
@@ -315,7 +453,7 @@ rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev,
}
rtwvif = (struct rtw89_vif *)vif->drv_priv;
- addr_cam = &rtwvif->addr_cam;
+ addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
info = IEEE80211_SKB_CB(skb);
key = info->control.hw_key;
@@ -377,14 +515,19 @@ static void
rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
+ struct ieee80211_vif *vif = tx_req->vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
u8 qsel, ch_dma;
- qsel = RTW89_TX_QSEL_B0_MGMT;
+ qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : RTW89_TX_QSEL_B0_MGMT;
ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
- desc_info->qsel = RTW89_TX_QSEL_B0_MGMT;
+ desc_info->qsel = qsel;
desc_info->ch_dma = ch_dma;
+ desc_info->port = desc_info->hiq ? rtwvif->port : 0;
+ desc_info->hw_ssn_sel = RTW89_MGMT_HW_SSN_SEL;
+ desc_info->hw_seq_mode = RTW89_MGMT_HW_SEQ_MODE;
/* fixed data rate for mgmt frames */
desc_info->en_wd_info = true;
@@ -520,6 +663,21 @@ desc_bk:
desc_info->bk = true;
}
+static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct ieee80211_vif *vif = tx_req->vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct ieee80211_sta *sta = tx_req->sta;
+ struct rtw89_sta *rtwsta;
+
+ if (!sta)
+ return rtwvif->mac_id;
+
+ rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ return rtwsta->mac_id;
+}
+
static void
rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
@@ -535,12 +693,14 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
tid_indicate = rtw89_core_get_tid_indicate(rtwdev, tid);
- qsel = rtw89_core_get_qsel(rtwdev, tid);
+ qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : rtw89_core_get_qsel(rtwdev, tid);
ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
desc_info->ch_dma = ch_dma;
desc_info->tid_indicate = tid_indicate;
desc_info->qsel = qsel;
+ desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
+ desc_info->port = desc_info->hiq ? rtwvif->port : 0;
/* enable wd_info for AMPDU */
desc_info->en_wd_info = true;
@@ -596,11 +756,28 @@ rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev,
}
static void
+rtw89_core_tx_wake(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ if (!rtwdev->fw.tx_wake)
+ return;
+
+ if (!test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
+ return;
+
+ if (tx_req->tx_type != RTW89_CORE_TX_TYPE_MGMT)
+ return;
+
+ rtw89_mac_notify_wake(rtwdev);
+}
+
+static void
rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
struct sk_buff *skb = tx_req->skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *)skb->data;
enum rtw89_core_tx_type tx_type;
enum btc_pkt_type pkt_type;
@@ -619,6 +796,7 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
desc_info->pkt_size = skb->len;
desc_info->is_bmc = is_bmc;
desc_info->wd_page = true;
+ desc_info->hiq = info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM;
switch (tx_req->tx_type) {
case RTW89_CORE_TX_TYPE_MGMT:
@@ -691,6 +869,8 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, true);
rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, true);
rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
+ rtw89_core_tx_wake(rtwdev, &tx_req);
+
ret = rtw89_hci_tx_write(rtwdev, &tx_req);
if (ret) {
rtw89_err(rtwdev, "failed to transmit skb to HCI\n");
@@ -710,7 +890,9 @@ static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info)
FIELD_PREP(RTW89_TXWD_BODY0_CHANNEL_DMA, desc_info->ch_dma) |
FIELD_PREP(RTW89_TXWD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) |
FIELD_PREP(RTW89_TXWD_BODY0_WD_PAGE, desc_info->wd_page) |
- FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl);
+ FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl) |
+ FIELD_PREP(RTW89_TXWD_BODY0_HW_SSN_SEL, desc_info->hw_ssn_sel) |
+ FIELD_PREP(RTW89_TXWD_BODY0_HW_SSN_MODE, desc_info->hw_seq_mode);
return cpu_to_le32(dword);
}
@@ -719,7 +901,8 @@ static __le32 rtw89_build_txwd_body2(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(RTW89_TXWD_BODY2_TID_INDICATE, desc_info->tid_indicate) |
FIELD_PREP(RTW89_TXWD_BODY2_QSEL, desc_info->qsel) |
- FIELD_PREP(RTW89_TXWD_BODY2_TXPKT_SIZE, desc_info->pkt_size);
+ FIELD_PREP(RTW89_TXWD_BODY2_TXPKT_SIZE, desc_info->pkt_size) |
+ FIELD_PREP(RTW89_TXWD_BODY2_MACID, desc_info->mac_id);
return cpu_to_le32(dword);
}
@@ -737,7 +920,8 @@ static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_USE_RATE, desc_info->use_rate) |
FIELD_PREP(RTW89_TXWD_INFO0_DATA_RATE, desc_info->data_rate) |
- FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb);
+ FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
+ FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port);
return cpu_to_le32(dword);
}
@@ -996,13 +1180,7 @@ static bool rtw89_core_rx_ppdu_match(struct rtw89_dev *rtwdev,
rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode);
}
- if (desc_info->bw == RTW89_CHANNEL_WIDTH_80)
- bw = RATE_INFO_BW_80;
- else if (desc_info->bw == RTW89_CHANNEL_WIDTH_40)
- bw = RATE_INFO_BW_40;
- else
- bw = RATE_INFO_BW_20;
-
+ bw = rtw89_hw_to_rate_info_bw(desc_info->bw);
gi_ltf = rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info, false);
ret = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band] == desc_info->ppdu_cnt &&
status->rate_idx == rate_idx &&
@@ -1083,10 +1261,31 @@ static void rtw89_core_hw_to_sband_rate(struct ieee80211_rx_status *rx_status)
if (rx_status->band == NL80211_BAND_2GHZ ||
rx_status->encoding != RX_ENC_LEGACY)
return;
+
+ /* Some control frames' freq(ACKs in this case) are reported wrong due
+ * to FW notify timing, set to lowest rate to prevent overflow.
+ */
+ if (rx_status->rate_idx < RTW89_HW_RATE_OFDM6) {
+ rx_status->rate_idx = 0;
+ return;
+ }
+
/* No 4 CCK rates for non-2G */
rx_status->rate_idx -= 4;
}
+static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct rtw89_rx_desc_info *desc_info,
+ struct sk_buff *skb_ppdu,
+ struct ieee80211_rx_status *rx_status)
+{
+ rtw89_core_hw_to_sband_rate(rx_status);
+ rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
+ ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi);
+ rtwdev->napi_budget_countdown--;
+}
+
static void rtw89_core_rx_pending_skb(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct rtw89_rx_desc_info *desc_info,
@@ -1106,10 +1305,7 @@ static void rtw89_core_rx_pending_skb(struct rtw89_dev *rtwdev,
if (rtw89_core_rx_ppdu_match(rtwdev, desc_info, rx_status))
rtw89_chip_query_ppdu(rtwdev, phy_ppdu, rx_status);
rtw89_correct_cck_chan(rtwdev, rx_status);
- rtw89_core_hw_to_sband_rate(rx_status);
- rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
- ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi);
- rtwdev->napi_budget_countdown--;
+ rtw89_core_rx_to_mac80211(rtwdev, phy_ppdu, desc_info, skb_ppdu, rx_status);
}
}
@@ -1250,6 +1446,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_hw *hw = rtwdev->hw;
+ struct rtw89_hal *hal = &rtwdev->hal;
u16 data_rate;
u8 data_rate_mode;
@@ -1257,6 +1454,13 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
+ if (rtwdev->scanning && rtwdev->fw.scan_offload) {
+ rx_status->freq =
+ ieee80211_channel_to_frequency(hal->current_channel,
+ hal->current_band_type);
+ rx_status->band = rtwdev->hal.current_band_type;
+ }
+
if (desc_info->icv_err || desc_info->crc32_err)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -1264,12 +1468,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
!(desc_info->sw_dec || desc_info->icv_err))
rx_status->flag |= RX_FLAG_DECRYPTED;
- if (desc_info->bw == RTW89_CHANNEL_WIDTH_80)
- rx_status->bw = RATE_INFO_BW_80;
- else if (desc_info->bw == RTW89_CHANNEL_WIDTH_40)
- rx_status->bw = RATE_INFO_BW_40;
- else
- rx_status->bw = RATE_INFO_BW_20;
+ rx_status->bw = rtw89_hw_to_rate_info_bw(desc_info->bw);
data_rate = desc_info->data_rate;
data_rate_mode = GET_DATA_RATE_MODE(data_rate);
@@ -1334,10 +1533,7 @@ static void rtw89_core_flush_ppdu_rx_queue(struct rtw89_dev *rtwdev,
skb_queue_walk_safe(&ppdu_sts->rx_queue[band], skb_ppdu, tmp) {
skb_unlink(skb_ppdu, &ppdu_sts->rx_queue[band]);
rx_status = IEEE80211_SKB_RXCB(skb_ppdu);
- rtw89_core_hw_to_sband_rate(rx_status);
- rtw89_core_rx_stats(rtwdev, NULL, desc_info, skb_ppdu);
- ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi);
- rtwdev->napi_budget_countdown--;
+ rtw89_core_rx_to_mac80211(rtwdev, NULL, desc_info, skb_ppdu, rx_status);
}
}
@@ -1364,14 +1560,10 @@ void rtw89_core_rx(struct rtw89_dev *rtwdev,
memset(rx_status, 0, sizeof(*rx_status));
rtw89_core_update_rx_status(rtwdev, desc_info, rx_status);
if (desc_info->long_rxdesc &&
- BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP) {
+ BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP)
skb_queue_tail(&ppdu_sts->rx_queue[band], skb);
- } else {
- rtw89_core_hw_to_sband_rate(rx_status);
- rtw89_core_rx_stats(rtwdev, NULL, desc_info, skb);
- ieee80211_rx_napi(rtwdev->hw, NULL, skb, &rtwdev->napi);
- rtwdev->napi_budget_countdown--;
- }
+ else
+ rtw89_core_rx_to_mac80211(rtwdev, NULL, desc_info, skb, rx_status);
}
EXPORT_SYMBOL(rtw89_core_rx);
@@ -1509,11 +1701,12 @@ static void rtw89_core_txq_push(struct rtw89_dev *rtwdev,
unsigned long i;
int ret;
+ rcu_read_lock();
for (i = 0; i < frame_cnt; i++) {
skb = ieee80211_tx_dequeue_ni(rtwdev->hw, txq);
if (!skb) {
rtw89_debug(rtwdev, RTW89_DBG_TXRX, "dequeue a NULL skb\n");
- return;
+ goto out;
}
rtw89_core_txq_check_agg(rtwdev, rtwtxq, skb);
ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, NULL);
@@ -1523,6 +1716,8 @@ static void rtw89_core_txq_push(struct rtw89_dev *rtwdev,
break;
}
}
+out:
+ rcu_read_unlock();
}
static u32 rtw89_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 tid)
@@ -1598,6 +1793,16 @@ static void rtw89_core_txq_schedule(struct rtw89_dev *rtwdev, u8 ac, bool *reinv
ieee80211_txq_schedule_end(hw, ac);
}
+static void rtw89_ips_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ ips_work);
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_enter_ips(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+}
+
static void rtw89_core_txq_work(struct work_struct *w)
{
struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev, txq_work);
@@ -1770,6 +1975,51 @@ void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits)
bitmap_zero(addr, nbits);
}
+int rtw89_core_acquire_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
+{
+ struct rtw89_ba_cam_entry *entry;
+ u8 idx;
+
+ idx = rtw89_core_acquire_bit_map(rtwsta->ba_cam_map, RTW89_BA_CAM_NUM);
+ if (idx == RTW89_BA_CAM_NUM) {
+ /* allocate a static BA CAM to tid=0, so replace the existing
+ * one if BA CAM is full. Hardware will process the original tid
+ * automatically.
+ */
+ if (tid != 0)
+ return -ENOSPC;
+
+ idx = 0;
+ }
+
+ entry = &rtwsta->ba_cam_entry[idx];
+ entry->tid = tid;
+ *cam_idx = idx;
+
+ return 0;
+}
+
+int rtw89_core_release_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
+{
+ struct rtw89_ba_cam_entry *entry;
+ int i;
+
+ for (i = 0; i < RTW89_BA_CAM_NUM; i++) {
+ if (!test_bit(i, rtwsta->ba_cam_map))
+ continue;
+
+ entry = &rtwsta->ba_cam_entry[i];
+ if (entry->tid != tid)
+ continue;
+
+ rtw89_core_release_bit_map(rtwsta->ba_cam_map, i);
+ *cam_idx = i;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
#define RTW89_TYPE_MAPPING(_type) \
case NL80211_IFTYPE_ ## _type: \
rtwvif->wifi_role = RTW89_WIFI_ROLE_ ## _type; \
@@ -1838,6 +2088,9 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_CONN_START);
rtw89_chip_rfk_channel(rtwdev);
+ } else if (vif->type == NL80211_IFTYPE_AP) {
+ rtwsta->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map,
+ RTW89_MAX_MAC_ID_NUM);
}
return 0;
@@ -1866,8 +2119,11 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
rtw89_mac_bf_monitor_calc(rtwdev, sta, true);
rtw89_mac_bf_disassoc(rtwdev, vif, sta);
rtw89_core_free_sta_pending_ba(rtwdev, sta);
+ if (vif->type == NL80211_IFTYPE_AP)
+ rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
- rtw89_vif_type_mapping(vif, false);
+ if (vif->type == NL80211_IFTYPE_STATION)
+ rtw89_vif_type_mapping(vif, false);
ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
@@ -1875,14 +2131,22 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
return ret;
}
- ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, 1);
+ ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, rtwsta, true);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c join info\n");
return ret;
}
+ if (vif->type == NL80211_IFTYPE_AP) {
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, RTW89_ROLE_REMOVE);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c role info\n");
+ return ret;
+ }
+ }
+
/* update cam aid mac_id net_type */
- rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
+ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c cam\n");
return ret;
@@ -1899,7 +2163,25 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
int ret;
- rtw89_vif_type_mapping(vif, true);
+ if (vif->type == NL80211_IFTYPE_AP) {
+ ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta->mac_id, false);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c macid pause\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, RTW89_ROLE_CREATE);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c role info\n");
+ return ret;
+ }
+
+ ret = rtw89_cam_init_addr_cam(rtwdev, &rtwsta->addr_cam, &rtwvif->bssid_cam);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c init addr cam\n");
+ return ret;
+ }
+ }
ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
@@ -1907,7 +2189,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
return ret;
}
- ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, 0);
+ ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, rtwsta, false);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c join info\n");
return ret;
@@ -1950,6 +2232,8 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
if (vif->type == NL80211_IFTYPE_STATION)
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_DIS_CONN);
+ else if (vif->type == NL80211_IFTYPE_AP)
+ rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
return 0;
}
@@ -1986,9 +2270,14 @@ static void rtw89_init_ht_cap(struct rtw89_dev *rtwdev,
static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
struct ieee80211_sta_vht_cap *vht_cap)
{
- static const __le16 highest[RF_PATH_MAX] = {
+ static const __le16 highest_bw80[RF_PATH_MAX] = {
cpu_to_le16(433), cpu_to_le16(867), cpu_to_le16(1300), cpu_to_le16(1733),
};
+ static const __le16 highest_bw160[RF_PATH_MAX] = {
+ cpu_to_le16(867), cpu_to_le16(1733), cpu_to_le16(2600), cpu_to_le16(3467),
+ };
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const __le16 *highest = chip->support_bw160 ? highest_bw160 : highest_bw80;
struct rtw89_hal *hal = &rtwdev->hal;
u16 tx_mcs_map = 0, rx_mcs_map = 0;
u8 sts_cap = 3;
@@ -2017,6 +2306,9 @@ static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
vht_cap->cap |= sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ if (chip->support_bw160)
+ vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+ IEEE80211_VHT_CAP_SHORT_GI_160;
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rx_mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(tx_mcs_map);
vht_cap->vht_mcs.rx_highest = highest[hal->rx_nss - 1];
@@ -2087,8 +2379,15 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
if (i == NL80211_IFTYPE_STATION)
mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
- phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+ if (band == NL80211_BAND_2GHZ) {
+ phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ } else {
+ phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+ if (chip->support_bw160)
+ phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ }
phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
@@ -2117,6 +2416,9 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
+ if (chip->support_bw160)
+ phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
@@ -2127,6 +2429,22 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map);
he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map);
+ if (chip->support_bw160) {
+ he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(mcs_map);
+ he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(mcs_map);
+ }
+
+ if (band == NL80211_BAND_6GHZ) {
+ __le16 capa;
+
+ capa = le16_encode_bits(IEEE80211_HT_MPDU_DENSITY_NONE,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
+ le16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
+ le16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+ iftype_data[idx].he_6ghz_capa.capa = capa;
+ }
idx++;
}
@@ -2139,34 +2457,52 @@ static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
{
struct ieee80211_hw *hw = rtwdev->hw;
struct ieee80211_supported_band *sband_2ghz = NULL, *sband_5ghz = NULL;
+ struct ieee80211_supported_band *sband_6ghz = NULL;
u32 size = sizeof(struct ieee80211_supported_band);
+ u8 support_bands = rtwdev->chip->support_bands;
- sband_2ghz = kmemdup(&rtw89_sband_2ghz, size, GFP_KERNEL);
- if (!sband_2ghz)
- goto err;
- rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap);
- rtw89_init_he_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
- hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz;
+ if (support_bands & BIT(NL80211_BAND_2GHZ)) {
+ sband_2ghz = kmemdup(&rtw89_sband_2ghz, size, GFP_KERNEL);
+ if (!sband_2ghz)
+ goto err;
+ rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap);
+ rtw89_init_he_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz;
+ }
- sband_5ghz = kmemdup(&rtw89_sband_5ghz, size, GFP_KERNEL);
- if (!sband_5ghz)
- goto err;
- rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap);
- rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap);
- rtw89_init_he_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
- hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz;
+ if (support_bands & BIT(NL80211_BAND_5GHZ)) {
+ sband_5ghz = kmemdup(&rtw89_sband_5ghz, size, GFP_KERNEL);
+ if (!sband_5ghz)
+ goto err;
+ rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap);
+ rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap);
+ rtw89_init_he_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz;
+ }
+
+ if (support_bands & BIT(NL80211_BAND_6GHZ)) {
+ sband_6ghz = kmemdup(&rtw89_sband_6ghz, size, GFP_KERNEL);
+ if (!sband_6ghz)
+ goto err;
+ rtw89_init_he_cap(rtwdev, NL80211_BAND_6GHZ, sband_6ghz);
+ hw->wiphy->bands[NL80211_BAND_6GHZ] = sband_6ghz;
+ }
return 0;
err:
hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
+ hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL;
if (sband_2ghz)
kfree(sband_2ghz->iftype_data);
if (sband_5ghz)
kfree(sband_5ghz->iftype_data);
+ if (sband_6ghz)
+ kfree(sband_6ghz->iftype_data);
kfree(sband_2ghz);
kfree(sband_5ghz);
+ kfree(sband_6ghz);
return -ENOMEM;
}
@@ -2176,10 +2512,14 @@ static void rtw89_core_clr_supported_band(struct rtw89_dev *rtwdev)
kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]->iftype_data);
kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]->iftype_data);
+ if (hw->wiphy->bands[NL80211_BAND_6GHZ])
+ kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]->iftype_data);
kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]);
kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
+ kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]);
hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
+ hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL;
}
static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev)
@@ -2192,6 +2532,21 @@ static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev)
rtwdev->ppdu_sts.curr_rx_ppdu_cnt[i] = U8_MAX;
}
+void rtw89_core_update_beacon_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev;
+ struct rtw89_vif *rtwvif = container_of(work, struct rtw89_vif,
+ update_beacon_work);
+
+ if (rtwvif->net_type != RTW89_NET_TYPE_AP_MODE)
+ return;
+
+ rtwdev = rtwvif->rtwdev;
+ mutex_lock(&rtwdev->mutex);
+ rtw89_fw_h2c_update_beacon(rtwdev, rtwvif);
+ mutex_unlock(&rtwdev->mutex);
+}
+
int rtw89_core_start(struct rtw89_dev *rtwdev)
{
int ret;
@@ -2278,10 +2633,16 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
int ret;
+ u8 band;
INIT_LIST_HEAD(&rtwdev->ba_list);
INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
INIT_LIST_HEAD(&rtwdev->early_h2c_list);
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ if (!(rtwdev->chip->support_bands & BIT(band)))
+ continue;
+ INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]);
+ }
INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
@@ -2292,11 +2653,13 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
spin_lock_init(&rtwdev->ba_lock);
+ spin_lock_init(&rtwdev->rpwm_lock);
mutex_init(&rtwdev->mutex);
mutex_init(&rtwdev->rf_mutex);
rtwdev->total_sta_assoc = 0;
INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work);
+ INIT_WORK(&rtwdev->ips_work, rtw89_ips_work);
skb_queue_head_init(&rtwdev->c2h_queue);
rtw89_core_ppdu_sts_init(rtwdev);
rtw89_traffic_stats_init(rtwdev, &rtwdev->stats);
@@ -2332,12 +2695,48 @@ void rtw89_core_deinit(struct rtw89_dev *rtwdev)
}
EXPORT_SYMBOL(rtw89_core_deinit);
+void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ const u8 *mac_addr, bool hw_scan)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ rtwdev->scanning = true;
+ rtw89_leave_lps(rtwdev);
+ if (hw_scan && rtwvif->net_type == RTW89_NET_TYPE_NO_LINK)
+ rtw89_leave_ips(rtwdev);
+
+ ether_addr_copy(rtwvif->mac_addr, mac_addr);
+ rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, hal->current_band_type);
+ rtw89_chip_rfk_scan(rtwdev, true);
+ rtw89_hci_recalc_int_mit(rtwdev);
+
+ rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, mac_addr);
+}
+
+void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif, bool hw_scan)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ ether_addr_copy(rtwvif->mac_addr, vif->addr);
+ rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
+
+ rtw89_chip_rfk_scan(rtwdev, false);
+ rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0);
+
+ rtwdev->scanning = false;
+ rtwdev->dig.bypass_dig = true;
+ if (hw_scan && rtwvif->net_type == RTW89_NET_TYPE_NO_LINK)
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work);
+}
+
static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u8 cv;
cv = rtw89_read32_mask(rtwdev, R_AX_SYS_CFG1, B_AX_CHIP_VER_MASK);
- if (cv <= CHIP_CBV) {
+ if (chip->chip_id == RTL8852A && cv <= CHIP_CBV) {
if (rtw89_read32(rtwdev, R_AX_GPIO0_7_FUNC_SEL) == RTW89_R32_DEAD)
cv = CHIP_CAV;
else
@@ -2347,6 +2746,13 @@ static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev)
rtwdev->hal.cv = cv;
}
+static void rtw89_core_setup_phycap(struct rtw89_dev *rtwdev)
+{
+ rtwdev->hal.support_cckpd =
+ !(rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV) &&
+ !(rtwdev->chip->chip_id == RTL8852B && rtwdev->hal.cv <= CHIP_CAV);
+}
+
static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
{
int ret;
@@ -2367,6 +2773,8 @@ static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
if (ret)
return ret;
+ rtw89_core_setup_phycap(rtwdev);
+
rtw89_mac_pwr_off(rtwdev);
return 0;
@@ -2438,13 +2846,18 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
- hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP);
hw->wiphy->available_antennas_tx = BIT(rtwdev->chip->rf_path_num) - 1;
hw->wiphy->available_antennas_rx = BIT(rtwdev->chip->rf_path_num) - 1;
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
+ hw->wiphy->max_scan_ie_len = RTW89_SCANOFLD_MAX_IE_LEN;
+
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
ret = rtw89_core_set_supported_band(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 7c84556ec4ad..771722132c53 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -13,9 +13,9 @@
#include <net/mac80211.h>
struct rtw89_dev;
+struct rtw89_pci_info;
extern const struct ieee80211_ops rtw89_ops;
-extern const struct rtw89_chip_info rtw8852a_chip_info;
#define MASKBYTE0 0xff
#define MASKBYTE1 0xff00
@@ -33,7 +33,6 @@ extern const struct rtw89_chip_info rtw8852a_chip_info;
#define MAX_RSSI 110
#define RSSI_FACTOR 1
#define RTW89_RSSI_RAW_TO_DBM(rssi) ((s8)((rssi) >> RSSI_FACTOR) - MAX_RSSI)
-#define RTW89_MAX_HW_PORT_NUM 5
#define RTW89_HTC_MASK_VARIANT GENMASK(1, 0)
#define RTW89_HTC_VARIANT_HE 3
@@ -63,6 +62,15 @@ enum rtw89_subband {
RTW89_CH_5G_BAND_3 = 3,
RTW89_CH_5G_BAND_4 = 4,
+ RTW89_CH_6G_BAND_IDX0, /* Low */
+ RTW89_CH_6G_BAND_IDX1, /* Low */
+ RTW89_CH_6G_BAND_IDX2, /* Mid */
+ RTW89_CH_6G_BAND_IDX3, /* Mid */
+ RTW89_CH_6G_BAND_IDX4, /* High */
+ RTW89_CH_6G_BAND_IDX5, /* High */
+ RTW89_CH_6G_BAND_IDX6, /* Ultra-high */
+ RTW89_CH_6G_BAND_IDX7, /* Ultra-high */
+
RTW89_SUBBAND_NR,
};
@@ -140,11 +148,11 @@ enum rtw89_wifi_role {
};
enum rtw89_upd_mode {
- RTW89_VIF_CREATE,
- RTW89_VIF_REMOVE,
- RTW89_VIF_TYPE_CHANGE,
- RTW89_VIF_INFO_CHANGE,
- RTW89_VIF_CON_DISCONN
+ RTW89_ROLE_CREATE,
+ RTW89_ROLE_REMOVE,
+ RTW89_ROLE_TYPE_CHANGE,
+ RTW89_ROLE_INFO_CHANGE,
+ RTW89_ROLE_CON_DISCONN
};
enum rtw89_self_role {
@@ -205,6 +213,7 @@ enum rtw89_port {
enum rtw89_band {
RTW89_BAND_2G = 0,
RTW89_BAND_5G = 1,
+ RTW89_BAND_6G = 2,
RTW89_BAND_MAX,
};
@@ -363,6 +372,25 @@ enum rtw89_hw_rate {
*/
#define RTW89_5G_CH_NUM 53
+/* 6G channels,
+ * 1, 3, 5, 7, 9, 11, 13, 15,
+ * 17, 19, 21, 23, 25, 27, 29, 33,
+ * 35, 37, 39, 41, 43, 45, 47, 49,
+ * 51, 53, 55, 57, 59, 61, 65, 67,
+ * 69, 71, 73, 75, 77, 79, 81, 83,
+ * 85, 87, 89, 91, 93, 97, 99, 101,
+ * 103, 105, 107, 109, 111, 113, 115, 117,
+ * 119, 121, 123, 125, 129, 131, 133, 135,
+ * 137, 139, 141, 143, 145, 147, 149, 151,
+ * 153, 155, 157, 161, 163, 165, 167, 169,
+ * 171, 173, 175, 177, 179, 181, 183, 185,
+ * 187, 189, 193, 195, 197, 199, 201, 203,
+ * 205, 207, 209, 211, 213, 215, 217, 219,
+ * 221, 225, 227, 229, 231, 233, 235, 237,
+ * 239, 241, 243, 245, 247, 249, 251, 253,
+ */
+#define RTW89_6G_CH_NUM 120
+
enum rtw89_rate_section {
RTW89_RS_CCK,
RTW89_RS_OFDM,
@@ -421,9 +449,6 @@ enum rtw89_regulation_type {
RTW89_REGD_NUM,
};
-extern const u8 rtw89_rs_idx_max[RTW89_RS_MAX];
-extern const u8 rtw89_rs_nss_max[RTW89_RS_MAX];
-
struct rtw89_txpwr_byrate {
s8 cck[RTW89_RATE_CCK_MAX];
s8 ofdm[RTW89_RATE_OFDM_MAX];
@@ -548,7 +573,8 @@ enum rtw89_ps_mode {
};
#define RTW89_2G_BW_NUM (RTW89_CHANNEL_WIDTH_40 + 1)
-#define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1)
+#define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
+#define RTW89_6G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1)
enum rtw89_ru_bandwidth {
@@ -564,19 +590,26 @@ enum rtw89_sc_offset {
RTW89_SC_20_LOWER = 2,
RTW89_SC_20_UPMOST = 3,
RTW89_SC_20_LOWEST = 4,
+ RTW89_SC_20_UP2X = 5,
+ RTW89_SC_20_LOW2X = 6,
+ RTW89_SC_20_UP3X = 7,
+ RTW89_SC_20_LOW3X = 8,
RTW89_SC_40_UPPER = 9,
RTW89_SC_40_LOWER = 10,
};
struct rtw89_channel_params {
u8 center_chan;
+ u32 center_freq;
u8 primary_chan;
u8 bandwidth;
u8 pri_ch_idx;
+ u8 band_type;
+ u8 subband_type;
};
struct rtw89_channel_help_params {
- u16 tx_en;
+ u32 tx_en;
};
struct rtw89_port_reg {
@@ -670,6 +703,7 @@ struct rtw89_rxdesc_long {
struct rtw89_tx_desc_info {
u16 pkt_size;
u8 wp_offset;
+ u8 mac_id;
u8 qsel;
u8 ch_dma;
u8 hdr_llc_len;
@@ -691,6 +725,12 @@ struct rtw89_tx_desc_info {
bool fw_dl;
u16 seq;
bool a_ctrl_bsr;
+ u8 hw_ssn_sel;
+#define RTW89_MGMT_HW_SSN_SEL 1
+ u8 hw_seq_mode;
+#define RTW89_MGMT_HW_SEQ_MODE 1
+ bool hiq;
+ u8 port;
};
struct rtw89_core_tx_request {
@@ -1048,7 +1088,7 @@ struct rtw89_btc_wl_role_info { /* struct size must be n*4 bytes */
u8 connect_cnt;
u8 link_mode;
union rtw89_btc_wl_role_info_map role_map;
- struct rtw89_btc_wl_active_role active_role[RTW89_MAX_HW_PORT_NUM];
+ struct rtw89_btc_wl_active_role active_role[RTW89_PORT_NUM];
};
struct rtw89_btc_wl_ver_info {
@@ -1151,7 +1191,7 @@ struct rtw89_btc_rf_para {
};
struct rtw89_btc_wl_info {
- struct rtw89_btc_wl_link_info link_info[RTW89_MAX_HW_PORT_NUM];
+ struct rtw89_btc_wl_link_info link_info[RTW89_PORT_NUM];
struct rtw89_btc_wl_rfk_info rfk_info;
struct rtw89_btc_wl_ver_info ver_info;
struct rtw89_btc_wl_afh_info afh_info;
@@ -1831,27 +1871,10 @@ struct rtw89_ra_report {
DECLARE_EWMA(rssi, 10, 16);
-struct rtw89_sta {
- u8 mac_id;
- bool disassoc;
- struct rtw89_vif *rtwvif;
- struct rtw89_ra_info ra;
- struct rtw89_ra_report ra_report;
- int max_agg_wait;
- u8 prev_rssi;
- struct ewma_rssi avg_rssi;
- struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
- struct ieee80211_rx_status rx_status;
- u16 rx_hw_rate;
- __le32 htc_template;
+#define RTW89_BA_CAM_NUM 2
- bool use_cfg_mask;
- struct cfg80211_bitrate_mask mask;
-
- bool cctl_tx_time;
- u32 ampdu_max_time:4;
- bool cctl_tx_retry_limit;
- u32 data_tx_cnt_lmt:6;
+struct rtw89_ba_cam_entry {
+ u8 tid;
};
#define RTW89_MAX_ADDR_CAM_NUM 128
@@ -1868,7 +1891,6 @@ struct rtw89_addr_cam_entry {
u8 wapi : 1;
u8 mask_sel : 2;
u8 bssid_cam_idx: 6;
- u8 sma[ETH_ALEN];
u8 sec_ent_mode;
DECLARE_BITMAP(sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM);
@@ -1898,6 +1920,33 @@ struct rtw89_sec_cam_entry {
u8 key[32];
};
+struct rtw89_sta {
+ u8 mac_id;
+ bool disassoc;
+ struct rtw89_vif *rtwvif;
+ struct rtw89_ra_info ra;
+ struct rtw89_ra_report ra_report;
+ int max_agg_wait;
+ u8 prev_rssi;
+ struct ewma_rssi avg_rssi;
+ struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
+ struct ieee80211_rx_status rx_status;
+ u16 rx_hw_rate;
+ __le32 htc_template;
+ struct rtw89_addr_cam_entry addr_cam; /* AP mode only */
+
+ bool use_cfg_mask;
+ struct cfg80211_bitrate_mask mask;
+
+ bool cctl_tx_time;
+ u32 ampdu_max_time:4;
+ bool cctl_tx_retry_limit;
+ u32 data_tx_cnt_lmt:6;
+
+ DECLARE_BITMAP(ba_cam_map, RTW89_BA_CAM_NUM);
+ struct rtw89_ba_cam_entry ba_cam_entry[RTW89_BA_CAM_NUM];
+};
+
struct rtw89_efuse {
bool valid;
u8 xtal_cap;
@@ -1915,6 +1964,7 @@ struct rtw89_phy_rate_pattern {
struct rtw89_vif {
struct list_head list;
+ struct rtw89_dev *rtwdev;
u8 mac_id;
u8 port;
u8 mac_addr[ETH_ALEN];
@@ -1936,11 +1986,14 @@ struct rtw89_vif {
bool wowlan_magic;
bool is_hesta;
bool last_a_ctrl;
+ struct work_struct update_beacon_work;
struct rtw89_addr_cam_entry addr_cam;
struct rtw89_bssid_cam_entry bssid_cam;
struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS];
struct rtw89_traffic_stats stats;
struct rtw89_phy_rate_pattern rate_pattern;
+ struct cfg80211_scan_request *scan_req;
+ struct ieee80211_scan_ies *scan_ies;
};
enum rtw89_lv1_rcvy_step {
@@ -2012,7 +2065,15 @@ struct rtw89_chip_ops {
struct ieee80211_rx_status *status);
void (*bb_ctrl_btc_preagc)(struct rtw89_dev *rtwdev, bool bt_en);
void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev,
- s16 pw_ofst, enum rtw89_mac_idx mac_idx);
+ s8 pw_ofst, enum rtw89_mac_idx mac_idx);
+ int (*pwr_on_func)(struct rtw89_dev *rtwdev);
+ int (*pwr_off_func)(struct rtw89_dev *rtwdev);
+ int (*cfg_ctrl_path)(struct rtw89_dev *rtwdev, bool wl);
+ int (*mac_cfg_gnt)(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
+ int (*stop_sch_tx)(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel);
+ int (*resume_sch_tx)(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
void (*btc_set_rfe)(struct rtw89_dev *rtwdev);
void (*btc_init_cfg)(struct rtw89_dev *rtwdev);
@@ -2133,6 +2194,7 @@ struct rtw89_ple_quota {
u16 bb_rpt;
u16 wd_rel;
u16 cpu_io;
+ u16 tx_rpt;
};
struct rtw89_dle_mem {
@@ -2173,6 +2235,8 @@ struct rtw89_phy_table {
const struct rtw89_reg2_def *regs;
u32 n_regs;
enum rtw89_rf_path rf_path;
+ void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path, void *data);
};
struct rtw89_txpwr_table {
@@ -2182,6 +2246,21 @@ struct rtw89_txpwr_table {
const struct rtw89_txpwr_table *tbl);
};
+struct rtw89_page_regs {
+ u32 hci_fc_ctrl;
+ u32 ch_page_ctrl;
+ u32 ach_page_ctrl;
+ u32 ach_page_info;
+ u32 pub_page_info3;
+ u32 pub_page_ctrl1;
+ u32 pub_page_ctrl2;
+ u32 pub_page_info1;
+ u32 pub_page_info2;
+ u32 wp_page_ctrl1;
+ u32 wp_page_ctrl2;
+ u32 wp_page_info1;
+};
+
struct rtw89_chip_info {
enum rtw89_core_chip_id chip_id;
const struct rtw89_chip_ops *ops;
@@ -2192,6 +2271,8 @@ struct rtw89_chip_info {
const struct rtw89_hfc_param_ini *hfc_param_ini;
const struct rtw89_dle_mem *dle_mem;
u32 rf_base_addr[2];
+ u8 support_bands;
+ bool support_bw160;
u8 rf_path_num;
u8 tx_nss;
u8 rx_nss;
@@ -2203,6 +2284,8 @@ struct rtw89_chip_info {
u32 physical_efuse_size;
u32 logical_efuse_size;
u32 limit_efuse_size;
+ u32 dav_phy_efuse_size;
+ u32 dav_log_efuse_size;
u32 phycap_addr;
u32 phycap_size;
@@ -2219,10 +2302,15 @@ struct rtw89_chip_info {
const s8 (*txpwr_lmt_5g)[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[RTW89_RS_LMT_NUM][RTW89_BF_NUM]
[RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+ const s8 (*txpwr_lmt_6g)[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_6G_CH_NUM];
const s8 (*txpwr_lmt_ru_2g)[RTW89_RU_NUM][RTW89_NTX_NUM]
[RTW89_REGD_NUM][RTW89_2G_CH_NUM];
const s8 (*txpwr_lmt_ru_5g)[RTW89_RU_NUM][RTW89_NTX_NUM]
[RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+ const s8 (*txpwr_lmt_ru_6g)[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_6G_CH_NUM];
u8 txpwr_factor_rf;
u8 txpwr_factor_mac;
@@ -2245,6 +2333,24 @@ struct rtw89_chip_info {
u8 rf_para_dlink_num;
const struct rtw89_btc_rf_trx_para *rf_para_dlink;
u8 ps_mode_supported;
+
+ u32 hci_func_en_addr;
+ u32 h2c_ctrl_reg;
+ const u32 *h2c_regs;
+ u32 c2h_ctrl_reg;
+ const u32 *c2h_regs;
+ const struct rtw89_page_regs *page_regs;
+ const struct rtw89_reg_def *dcfo_comp;
+ u8 dcfo_comp_sft;
+};
+
+union rtw89_bus_info {
+ const struct rtw89_pci_info *pci;
+};
+
+struct rtw89_driver_info {
+ const struct rtw89_chip_info *chip;
+ union rtw89_bus_info bus;
};
enum rtw89_hcifc_mode {
@@ -2312,6 +2418,8 @@ struct rtw89_fw_info {
struct rtw89_fw_suit wowlan;
bool fw_log_enable;
bool old_ht_ra_format;
+ bool scan_offload;
+ bool tx_wake;
};
struct rtw89_cam_info {
@@ -2348,19 +2456,23 @@ struct rtw89_hal {
u32 rx_fltr;
u8 cv;
u8 current_channel;
+ u32 current_freq;
u8 prev_primary_channel;
u8 current_primary_channel;
enum rtw89_subband current_subband;
u8 current_band_width;
+ u8 prev_band_type;
u8 current_band_type;
u32 sw_amsdu_max_size;
u32 antenna_tx;
u32 antenna_rx;
u8 tx_nss;
u8 rx_nss;
+ bool support_cckpd;
};
#define RTW89_MAX_MAC_ID_NUM 128
+#define RTW89_MAX_PKT_OFLD_NUM 255
enum rtw89_flags {
RTW89_FLAG_POWERON,
@@ -2556,22 +2668,30 @@ struct rtw89_cfo_tracking_info {
s32 residual_cfo_acc;
u8 phy_cfotrk_state;
u8 phy_cfotrk_cnt;
+ bool divergence_lock_en;
+ u8 x_cap_lb;
+ u8 x_cap_ub;
+ u8 lock_cnt;
};
/* 2GL, 2GH, 5GL1, 5GH1, 5GM1, 5GM2, 5GH1, 5GH2 */
#define TSSI_TRIM_CH_GROUP_NUM 8
+#define TSSI_TRIM_CH_GROUP_NUM_6G 16
#define TSSI_CCK_CH_GROUP_NUM 6
#define TSSI_MCS_2G_CH_GROUP_NUM 5
#define TSSI_MCS_5G_CH_GROUP_NUM 14
+#define TSSI_MCS_6G_CH_GROUP_NUM 32
#define TSSI_MCS_CH_GROUP_NUM \
(TSSI_MCS_2G_CH_GROUP_NUM + TSSI_MCS_5G_CH_GROUP_NUM)
struct rtw89_tssi_info {
u8 thermal[RF_PATH_MAX];
s8 tssi_trim[RF_PATH_MAX][TSSI_TRIM_CH_GROUP_NUM];
+ s8 tssi_trim_6g[RF_PATH_MAX][TSSI_TRIM_CH_GROUP_NUM_6G];
s8 tssi_cck[RF_PATH_MAX][TSSI_CCK_CH_GROUP_NUM];
s8 tssi_mcs[RF_PATH_MAX][TSSI_MCS_CH_GROUP_NUM];
+ s8 tssi_6g_mcs[RF_PATH_MAX][TSSI_MCS_6G_CH_GROUP_NUM];
s8 extra_ofst[RF_PATH_MAX];
bool tssi_tracking_check[RF_PATH_MAX];
u8 default_txagc_offset[RF_PATH_MAX];
@@ -2769,12 +2889,23 @@ struct rtw89_early_h2c {
u16 h2c_len;
};
+struct rtw89_hw_scan_info {
+ struct ieee80211_vif *scanning_vif;
+ struct list_head pkt_list[NUM_NL80211_BANDS];
+ u8 op_pri_ch;
+ u8 op_chan;
+ u8 op_bw;
+ u8 op_band;
+};
+
struct rtw89_dev {
struct ieee80211_hw *hw;
struct device *dev;
bool dbcc_en;
+ struct rtw89_hw_scan_info scan_info;
const struct rtw89_chip_info *chip;
+ const struct rtw89_pci_info *pci_info;
struct rtw89_hal hal;
struct rtw89_mac_info mac;
struct rtw89_fw_info fw;
@@ -2795,19 +2926,23 @@ struct rtw89_dev {
/* txqs to setup ba session */
struct list_head ba_list;
struct work_struct ba_work;
+ /* used to protect rpwm */
+ spinlock_t rpwm_lock;
struct rtw89_cam_info cam_info;
struct sk_buff_head c2h_queue;
struct work_struct c2h_work;
+ struct work_struct ips_work;
struct list_head early_h2c_list;
struct rtw89_ser ser;
- DECLARE_BITMAP(hw_port, RTW89_MAX_HW_PORT_NUM);
+ DECLARE_BITMAP(hw_port, RTW89_PORT_NUM);
DECLARE_BITMAP(mac_id_map, RTW89_MAX_MAC_ID_NUM);
DECLARE_BITMAP(flags, NUM_OF_RTW89_FLAGS);
+ DECLARE_BITMAP(pkt_offload, RTW89_MAX_PKT_OFLD_NUM);
struct rtw89_phy_stat phystat;
struct rtw89_dack_info dack;
@@ -2847,7 +2982,7 @@ struct rtw89_dev {
int napi_budget_countdown;
/* HCI related data, keep last */
- u8 priv[0] __aligned(sizeof(void *));
+ u8 priv[] __aligned(sizeof(void *));
};
static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
@@ -3128,6 +3263,46 @@ static inline struct rtw89_sta *sta_to_rtwsta_safe(struct ieee80211_sta *sta)
return sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
}
+static inline u8 rtw89_hw_to_rate_info_bw(enum rtw89_bandwidth hw_bw)
+{
+ if (hw_bw == RTW89_CHANNEL_WIDTH_160)
+ return RATE_INFO_BW_160;
+ else if (hw_bw == RTW89_CHANNEL_WIDTH_80)
+ return RATE_INFO_BW_80;
+ else if (hw_bw == RTW89_CHANNEL_WIDTH_40)
+ return RATE_INFO_BW_40;
+ else
+ return RATE_INFO_BW_20;
+}
+
+static inline
+enum rtw89_bandwidth nl_to_rtw89_bandwidth(enum nl80211_chan_width width)
+{
+ switch (width) {
+ default:
+ WARN(1, "Not support bandwidth %d\n", width);
+ fallthrough;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return RTW89_CHANNEL_WIDTH_20;
+ case NL80211_CHAN_WIDTH_40:
+ return RTW89_CHANNEL_WIDTH_40;
+ case NL80211_CHAN_WIDTH_80:
+ return RTW89_CHANNEL_WIDTH_80;
+ case NL80211_CHAN_WIDTH_160:
+ return RTW89_CHANNEL_WIDTH_160;
+ }
+}
+
+static inline
+struct rtw89_addr_cam_entry *rtw89_get_addr_cam_of(struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE && rtwsta)
+ return &rtwsta->addr_cam;
+ return &rtwvif->addr_cam;
+}
+
static inline
void rtw89_chip_set_channel_prepare(struct rtw89_dev *rtwdev,
struct rtw89_channel_help_params *p)
@@ -3298,6 +3473,39 @@ static inline void rtw89_ctrl_btg(struct rtw89_dev *rtwdev, bool btg)
chip->ops->ctrl_btg(rtwdev, btg);
}
+static inline
+void rtw89_chip_mac_cfg_gnt(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ chip->ops->mac_cfg_gnt(rtwdev, gnt_cfg);
+}
+
+static inline void rtw89_chip_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ chip->ops->cfg_ctrl_path(rtwdev, wl);
+}
+
+static inline
+int rtw89_chip_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->stop_sch_tx(rtwdev, mac_idx, tx_en, sel);
+}
+
+static inline
+int rtw89_chip_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->resume_sch_tx(rtwdev, mac_idx, tx_en);
+}
+
static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
{
__le16 fc = hdr->frame_control;
@@ -3371,6 +3579,8 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev);
u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size);
void rtw89_core_release_bit_map(unsigned long *addr, u8 bit);
void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits);
+int rtw89_core_acquire_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
+int rtw89_core_release_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc);
int rtw89_chip_info_setup(struct rtw89_dev *rtwdev);
u16 rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate);
@@ -3381,5 +3591,10 @@ void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
struct rtw89_traffic_stats *stats);
int rtw89_core_start(struct rtw89_dev *rtwdev);
void rtw89_core_stop(struct rtw89_dev *rtwdev);
+void rtw89_core_update_beacon_work(struct work_struct *work);
+void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ const u8 *mac_addr, bool hw_scan);
+void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif, bool hw_scan);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 22bd1d03e722..b73cc03cecfd 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -2324,16 +2324,17 @@ rtw89_debug_append_rx_rate(struct seq_file *m, struct rtw89_pkt_stat *pkt_stat,
static const struct rtw89_rx_rate_cnt_info {
enum rtw89_hw_rate first_rate;
int len;
+ int ext;
const char *rate_mode;
} rtw89_rx_rate_cnt_infos[] = {
- {RTW89_HW_RATE_CCK1, 4, "Legacy:"},
- {RTW89_HW_RATE_OFDM6, 8, "OFDM:"},
- {RTW89_HW_RATE_MCS0, 8, "HT 0:"},
- {RTW89_HW_RATE_MCS8, 8, "HT 1:"},
- {RTW89_HW_RATE_VHT_NSS1_MCS0, 10, "VHT 1SS:"},
- {RTW89_HW_RATE_VHT_NSS2_MCS0, 10, "VHT 2SS:"},
- {RTW89_HW_RATE_HE_NSS1_MCS0, 12, "HE 1SS:"},
- {RTW89_HW_RATE_HE_NSS2_MCS0, 12, "HE 2ss:"},
+ {RTW89_HW_RATE_CCK1, 4, 0, "Legacy:"},
+ {RTW89_HW_RATE_OFDM6, 8, 0, "OFDM:"},
+ {RTW89_HW_RATE_MCS0, 8, 0, "HT 0:"},
+ {RTW89_HW_RATE_MCS8, 8, 0, "HT 1:"},
+ {RTW89_HW_RATE_VHT_NSS1_MCS0, 10, 2, "VHT 1SS:"},
+ {RTW89_HW_RATE_VHT_NSS2_MCS0, 10, 2, "VHT 2SS:"},
+ {RTW89_HW_RATE_HE_NSS1_MCS0, 12, 0, "HE 1SS:"},
+ {RTW89_HW_RATE_HE_NSS2_MCS0, 12, 0, "HE 2ss:"},
};
static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
@@ -2358,6 +2359,11 @@ static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
seq_printf(m, "%10s [", info->rate_mode);
rtw89_debug_append_rx_rate(m, pkt_stat,
info->first_rate, info->len);
+ if (info->ext) {
+ seq_puts(m, "][");
+ rtw89_debug_append_rx_rate(m, pkt_stat,
+ info->first_rate + info->len, info->ext);
+ }
seq_puts(m, "]\n");
}
@@ -2366,6 +2372,72 @@ static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
return 0;
}
+static void rtw89_dump_addr_cam(struct seq_file *m,
+ struct rtw89_addr_cam_entry *addr_cam)
+{
+ struct rtw89_sec_cam_entry *sec_entry;
+ int i;
+
+ seq_printf(m, "\taddr_cam_idx=%u\n", addr_cam->addr_cam_idx);
+ seq_printf(m, "\t-> bssid_cam_idx=%u\n", addr_cam->bssid_cam_idx);
+ seq_printf(m, "\tsec_cam_bitmap=%*ph\n", (int)sizeof(addr_cam->sec_cam_map),
+ addr_cam->sec_cam_map);
+ for (i = 0; i < RTW89_SEC_CAM_IN_ADDR_CAM; i++) {
+ sec_entry = addr_cam->sec_entries[i];
+ if (!sec_entry)
+ continue;
+ seq_printf(m, "\tsec[%d]: sec_cam_idx %u", i, sec_entry->sec_cam_idx);
+ if (sec_entry->ext_key)
+ seq_printf(m, ", %u", sec_entry->sec_cam_idx + 1);
+ seq_puts(m, "\n");
+ }
+}
+
+static
+void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct seq_file *m = (struct seq_file *)data;
+ struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
+
+ seq_printf(m, "VIF [%d] %pM\n", rtwvif->mac_id, rtwvif->mac_addr);
+ seq_printf(m, "\tbssid_cam_idx=%u\n", bssid_cam->bssid_cam_idx);
+ rtw89_dump_addr_cam(m, &rtwvif->addr_cam);
+}
+
+static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct seq_file *m = (struct seq_file *)data;
+
+ seq_printf(m, "STA [%d] %pM\n", rtwsta->mac_id, sta->addr);
+ rtw89_dump_addr_cam(m, &rtwsta->addr_cam);
+}
+
+static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+
+ seq_puts(m, "map:\n");
+ seq_printf(m, "\tmac_id: %*ph\n", (int)sizeof(rtwdev->mac_id_map),
+ rtwdev->mac_id_map);
+ seq_printf(m, "\taddr_cam: %*ph\n", (int)sizeof(cam_info->addr_cam_map),
+ cam_info->addr_cam_map);
+ seq_printf(m, "\tbssid_cam: %*ph\n", (int)sizeof(cam_info->bssid_cam_map),
+ cam_info->bssid_cam_map);
+ seq_printf(m, "\tsec_cam: %*ph\n", (int)sizeof(cam_info->sec_cam_map),
+ cam_info->sec_cam_map);
+
+ ieee80211_iterate_active_interfaces_atomic(rtwdev->hw,
+ IEEE80211_IFACE_ITER_NORMAL, rtw89_vif_ids_get_iter, m);
+
+ ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_ids_get_iter, m);
+
+ return 0;
+}
+
static struct rtw89_debugfs_priv rtw89_debug_priv_read_reg = {
.cb_read = rtw89_debug_priv_read_reg_get,
.cb_write = rtw89_debug_priv_read_reg_select,
@@ -2432,6 +2504,10 @@ static struct rtw89_debugfs_priv rtw89_debug_priv_phy_info = {
.cb_read = rtw89_debug_priv_phy_info_get,
};
+static struct rtw89_debugfs_priv rtw89_debug_priv_stations = {
+ .cb_read = rtw89_debug_priv_stations_get,
+};
+
#define rtw89_debugfs_add(name, mode, fopname, parent) \
do { \
rtw89_debug_priv_ ##name.rtwdev = rtwdev; \
@@ -2470,6 +2546,7 @@ void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
rtw89_debugfs_add_w(btc_manual);
rtw89_debugfs_add_w(fw_log_manual);
rtw89_debugfs_add_r(phy_info);
+ rtw89_debugfs_add_r(stations);
}
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index f14b726c1a9f..1745815f5e00 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -23,6 +23,7 @@ enum rtw89_debug_mask {
RTW89_DBG_FW = BIT(12),
RTW89_DBG_BTC = BIT(13),
RTW89_DBG_BF = BIT(14),
+ RTW89_DBG_HW_SCAN = BIT(15),
};
enum rtw89_debug_mac_reg_sel {
diff --git a/drivers/net/wireless/realtek/rtw89/efuse.c b/drivers/net/wireless/realtek/rtw89/efuse.c
index c0b80f3da56c..7bd4f8558e03 100644
--- a/drivers/net/wireless/realtek/rtw89/efuse.c
+++ b/drivers/net/wireless/realtek/rtw89/efuse.c
@@ -4,6 +4,7 @@
#include "debug.h"
#include "efuse.h"
+#include "mac.h"
#include "reg.h"
enum rtw89_efuse_bank {
@@ -16,6 +17,9 @@ static int rtw89_switch_efuse_bank(struct rtw89_dev *rtwdev,
{
u8 val;
+ if (rtwdev->chip->chip_id != RTL8852A)
+ return 0;
+
val = rtw89_read32_mask(rtwdev, R_AX_EFUSE_CTRL_1,
B_AX_EF_CELL_SEL_MASK);
if (bank == val)
@@ -32,14 +36,61 @@ static int rtw89_switch_efuse_bank(struct rtw89_dev *rtwdev,
return -EBUSY;
}
-static int rtw89_dump_physical_efuse_map(struct rtw89_dev *rtwdev, u8 *map,
- u32 dump_addr, u32 dump_size)
+static void rtw89_enable_otp_burst_mode(struct rtw89_dev *rtwdev, bool en)
+{
+ if (en)
+ rtw89_write32_set(rtwdev, R_AX_EFUSE_CTRL_1_V1, B_AX_EF_BURST);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_EFUSE_CTRL_1_V1, B_AX_EF_BURST);
+}
+
+static void rtw89_enable_efuse_pwr_cut_ddv(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ if (chip_id == RTL8852A)
+ return;
+
+ rtw89_write8_set(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+ rtw89_write16_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14);
+
+ fsleep(1000);
+
+ rtw89_write16_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15);
+ rtw89_write16_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE);
+ if (chip_id == RTL8852B && hal->cv == CHIP_CAV)
+ rtw89_enable_otp_burst_mode(rtwdev, true);
+}
+
+static void rtw89_disable_efuse_pwr_cut_ddv(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ if (chip_id == RTL8852A)
+ return;
+
+ if (chip_id == RTL8852B && hal->cv == CHIP_CAV)
+ rtw89_enable_otp_burst_mode(rtwdev, false);
+
+ rtw89_write16_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE);
+ rtw89_write16_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15);
+
+ fsleep(1000);
+
+ rtw89_write16_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14);
+ rtw89_write8_clr(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+}
+
+static int rtw89_dump_physical_efuse_map_ddv(struct rtw89_dev *rtwdev, u8 *map,
+ u32 dump_addr, u32 dump_size)
{
u32 efuse_ctl;
u32 addr;
int ret;
- rtw89_switch_efuse_bank(rtwdev, RTW89_EFUSE_BANK_WIFI);
+ rtw89_enable_efuse_pwr_cut_ddv(rtwdev);
for (addr = dump_addr; addr < dump_addr + dump_size; addr++) {
efuse_ctl = u32_encode_bits(addr, B_AX_EF_ADDR_MASK);
@@ -54,6 +105,74 @@ static int rtw89_dump_physical_efuse_map(struct rtw89_dev *rtwdev, u8 *map,
*map++ = (u8)(efuse_ctl & 0xff);
}
+ rtw89_disable_efuse_pwr_cut_ddv(rtwdev);
+
+ return 0;
+}
+
+static int rtw89_dump_physical_efuse_map_dav(struct rtw89_dev *rtwdev, u8 *map,
+ u32 dump_addr, u32 dump_size)
+{
+ u32 addr;
+ u8 val8;
+ int err;
+ int ret;
+
+ for (addr = dump_addr; addr < dump_addr + dump_size; addr++) {
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, 0x40, FULL_BIT_MASK);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_LOW_ADDR,
+ addr & 0xff, XTAL_SI_LOW_ADDR_MASK);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, addr >> 8,
+ XTAL_SI_HIGH_ADDR_MASK);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, 0,
+ XTAL_SI_MODE_SEL_MASK);
+ if (ret)
+ return ret;
+
+ ret = read_poll_timeout_atomic(rtw89_mac_read_xtal_si, err,
+ !err && (val8 & XTAL_SI_RDY),
+ 1, 10000, false,
+ rtwdev, XTAL_SI_CTRL, &val8);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to read dav efuse\n");
+ return ret;
+ }
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_READ_VAL, &val8);
+ if (ret)
+ return ret;
+ *map++ = val8;
+ }
+
+ return 0;
+}
+
+static int rtw89_dump_physical_efuse_map(struct rtw89_dev *rtwdev, u8 *map,
+ u32 dump_addr, u32 dump_size, bool dav)
+{
+ int ret;
+
+ if (!map || dump_size == 0)
+ return 0;
+
+ rtw89_switch_efuse_bank(rtwdev, RTW89_EFUSE_BANK_WIFI);
+
+ if (dav) {
+ ret = rtw89_dump_physical_efuse_map_dav(rtwdev, map, dump_addr, dump_size);
+ if (ret)
+ return ret;
+ } else {
+ ret = rtw89_dump_physical_efuse_map_ddv(rtwdev, map, dump_addr, dump_size);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -78,6 +197,9 @@ static int rtw89_dump_logical_efuse_map(struct rtw89_dev *rtwdev, u8 *phy_map,
u8 word_en;
int i;
+ if (!phy_map)
+ return 0;
+
while (phy_idx < physical_size - sec_ctrl_size) {
hdr1 = phy_map[phy_idx];
hdr2 = phy_map[phy_idx + 1];
@@ -109,8 +231,13 @@ int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev)
{
u32 phy_size = rtwdev->chip->physical_efuse_size;
u32 log_size = rtwdev->chip->logical_efuse_size;
+ u32 dav_phy_size = rtwdev->chip->dav_phy_efuse_size;
+ u32 dav_log_size = rtwdev->chip->dav_log_efuse_size;
+ u32 full_log_size = log_size + dav_log_size;
u8 *phy_map = NULL;
u8 *log_map = NULL;
+ u8 *dav_phy_map = NULL;
+ u8 *dav_log_map = NULL;
int ret;
if (rtw89_read16(rtwdev, R_AX_SYS_WL_EFUSE_CTRL) & B_AX_AUTOLOAD_SUS)
@@ -119,27 +246,41 @@ int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev)
rtw89_warn(rtwdev, "failed to check efuse autoload\n");
phy_map = kmalloc(phy_size, GFP_KERNEL);
- log_map = kmalloc(log_size, GFP_KERNEL);
+ log_map = kmalloc(full_log_size, GFP_KERNEL);
+ if (dav_phy_size && dav_log_size) {
+ dav_phy_map = kmalloc(dav_phy_size, GFP_KERNEL);
+ dav_log_map = log_map + log_size;
+ }
- if (!phy_map || !log_map) {
+ if (!phy_map || !log_map || (dav_phy_size && !dav_phy_map)) {
ret = -ENOMEM;
goto out_free;
}
- ret = rtw89_dump_physical_efuse_map(rtwdev, phy_map, 0, phy_size);
+ ret = rtw89_dump_physical_efuse_map(rtwdev, phy_map, 0, phy_size, false);
if (ret) {
rtw89_warn(rtwdev, "failed to dump efuse physical map\n");
goto out_free;
}
+ ret = rtw89_dump_physical_efuse_map(rtwdev, dav_phy_map, 0, dav_phy_size, true);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to dump efuse dav physical map\n");
+ goto out_free;
+ }
- memset(log_map, 0xff, log_size);
+ memset(log_map, 0xff, full_log_size);
ret = rtw89_dump_logical_efuse_map(rtwdev, phy_map, log_map);
if (ret) {
rtw89_warn(rtwdev, "failed to dump efuse logical map\n");
goto out_free;
}
+ ret = rtw89_dump_logical_efuse_map(rtwdev, dav_phy_map, dav_log_map);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to dump efuse dav logical map\n");
+ goto out_free;
+ }
- rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "log_map: ", log_map, log_size);
+ rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "log_map: ", log_map, full_log_size);
ret = rtwdev->chip->ops->read_efuse(rtwdev, log_map);
if (ret) {
@@ -148,6 +289,7 @@ int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev)
}
out_free:
+ kfree(dav_phy_map);
kfree(log_map);
kfree(phy_map);
@@ -169,7 +311,7 @@ int rtw89_parse_phycap_map(struct rtw89_dev *rtwdev)
return -ENOMEM;
ret = rtw89_dump_physical_efuse_map(rtwdev, phycap_map,
- phycap_addr, phycap_size);
+ phycap_addr, phycap_size, false);
if (ret) {
rtw89_warn(rtwdev, "failed to dump phycap map\n");
goto out_free;
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 8a57b75b07c0..6deaf8eec6b4 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -201,6 +201,14 @@ static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
if (chip->chip_id == RTL8852A &&
RTW89_FW_SUIT_VER_CODE(fw_suit) <= RTW89_FW_VER_CODE(0, 13, 29, 0))
rtwdev->fw.old_ht_ra_format = true;
+
+ if (chip->chip_id == RTL8852A &&
+ RTW89_FW_SUIT_VER_CODE(fw_suit) >= RTW89_FW_VER_CODE(0, 13, 35, 0))
+ rtwdev->fw.scan_offload = true;
+
+ if (chip->chip_id == RTL8852A &&
+ RTW89_FW_SUIT_VER_CODE(fw_suit) >= RTW89_FW_VER_CODE(0, 13, 35, 0))
+ rtwdev->fw.tx_wake = true;
}
int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
@@ -555,11 +563,27 @@ fail:
return -EBUSY;
}
-#define H2C_BA_CAM_LEN 4
-int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid,
- struct ieee80211_ampdu_params *params)
+#define H2C_BA_CAM_LEN 8
+int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params)
{
+ u8 macid = rtwsta->mac_id;
struct sk_buff *skb;
+ u8 entry_idx;
+ int ret;
+
+ ret = valid ?
+ rtw89_core_acquire_sta_ba_entry(rtwsta, params->tid, &entry_idx) :
+ rtw89_core_release_sta_ba_entry(rtwsta, params->tid, &entry_idx);
+ if (ret) {
+ /* it still works even if we don't have static BA CAM, because
+ * hardware can create dynamic BA CAM automatically.
+ */
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ "failed to %s entry tid=%d for h2c ba cam\n",
+ valid ? "alloc" : "free", params->tid);
+ return 0;
+ }
skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_BA_CAM_LEN);
if (!skb) {
@@ -568,6 +592,7 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid,
}
skb_put(skb, H2C_BA_CAM_LEN);
SET_BA_CAM_MACID(skb->data, macid);
+ SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
if (!valid)
goto end;
SET_BA_CAM_VALID(skb->data, valid);
@@ -577,7 +602,7 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid,
else
SET_BA_CAM_BMAP_SIZE(skb->data, 0);
/* If init req is set, hw will set the ssn */
- SET_BA_CAM_INIT_REQ(skb->data, 0);
+ SET_BA_CAM_INIT_REQ(skb->data, 1);
SET_BA_CAM_SSN(skb->data, params->ssn);
end:
@@ -716,12 +741,14 @@ fail:
}
#define H2C_CMC_TBL_LEN 68
-int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 macid)
+int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
{
struct rtw89_hal *hal = &rtwdev->hal;
struct sk_buff *skb;
u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
+ u8 macid = rtwvif->mac_id;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
if (!skb) {
@@ -743,6 +770,8 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 macid)
SET_CMC_TBL_ANTSEL_D(skb->data, 0);
SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
+ SET_CMC_TBL_DATA_DCM(skb->data, 0);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
@@ -821,13 +850,15 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta)
{
struct rtw89_hal *hal = &rtwdev->hal;
- struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct sk_buff *skb;
u8 pads[RTW89_PPE_BW_NUM];
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
memset(pads, 0, sizeof(pads));
- __get_sta_he_pkt_padding(rtwdev, sta, pads);
+ if (sta)
+ __get_sta_he_pkt_padding(rtwdev, sta, pads);
skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
if (!skb) {
@@ -835,7 +866,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
return -ENOMEM;
}
skb_put(skb, H2C_CMC_TBL_LEN);
- SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
+ SET_CTRL_INFO_MACID(skb->data, mac_id);
SET_CTRL_INFO_OPERATION(skb->data, 1);
SET_CMC_TBL_DISRTSFB(skb->data, 1);
SET_CMC_TBL_DISDATAFB(skb->data, 1);
@@ -853,7 +884,10 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
- SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, sta->he_cap.has_he);
+ if (sta)
+ SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, sta->he_cap.has_he);
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
+ SET_CMC_TBL_DATA_DCM(skb->data, 0);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
@@ -911,28 +945,93 @@ fail:
return -EBUSY;
}
-#define H2C_VIF_MAINTAIN_LEN 4
-int rtw89_fw_h2c_vif_maintain(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif,
- enum rtw89_upd_mode upd_mode)
+#define H2C_BCN_BASE_LEN 12
+int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
struct sk_buff *skb;
+ struct sk_buff *skb_beacon;
+ u16 tim_offset;
+ int bcn_total_len;
+
+ skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, NULL);
+ if (!skb_beacon) {
+ rtw89_err(rtwdev, "failed to get beacon skb\n");
+ return -ENOMEM;
+ }
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_VIF_MAINTAIN_LEN);
+ bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len;
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(bcn_total_len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ dev_kfree_skb_any(skb_beacon);
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_BCN_BASE_LEN);
+
+ SET_BCN_UPD_PORT(skb->data, rtwvif->port);
+ SET_BCN_UPD_MBSSID(skb->data, 0);
+ SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx);
+ SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset);
+ SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id);
+ SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL);
+ SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE);
+ SET_BCN_UPD_RATE(skb->data, hal->current_band_type == RTW89_BAND_2G ?
+ RTW89_HW_RATE_CCK1 : RTW89_HW_RATE_OFDM6);
+
+ skb_put_data(skb, skb_beacon->data, skb_beacon->len);
+ dev_kfree_skb_any(skb_beacon);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_BCN_UPD, 0, 1,
+ bcn_total_len);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+#define H2C_ROLE_MAINTAIN_LEN 4
+int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ enum rtw89_upd_mode upd_mode)
+{
+ struct sk_buff *skb;
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ u8 self_role;
+
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
+ if (rtwsta)
+ self_role = RTW89_SELF_ROLE_AP_CLIENT;
+ else
+ self_role = rtwvif->self_role;
+ } else {
+ self_role = rtwvif->self_role;
+ }
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_ROLE_MAINTAIN_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
return -ENOMEM;
}
- skb_put(skb, H2C_VIF_MAINTAIN_LEN);
- SET_FWROLE_MAINTAIN_MACID(skb->data, rtwvif->mac_id);
- SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, rtwvif->self_role);
+ skb_put(skb, H2C_ROLE_MAINTAIN_LEN);
+ SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id);
+ SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role);
SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
- H2C_VIF_MAINTAIN_LEN);
+ H2C_ROLE_MAINTAIN_LEN);
if (rtw89_h2c_tx(rtwdev, skb, false)) {
rtw89_err(rtwdev, "failed to send h2c\n");
@@ -948,9 +1047,17 @@ fail:
#define H2C_JOIN_INFO_LEN 4
int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
- u8 dis_conn)
+ struct rtw89_sta *rtwsta, bool dis_conn)
{
struct sk_buff *skb;
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ u8 self_role = rtwvif->self_role;
+ u8 net_type = rtwvif->net_type;
+
+ if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
+ self_role = RTW89_SELF_ROLE_AP_CLIENT;
+ net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
+ }
skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_JOIN_INFO_LEN);
if (!skb) {
@@ -958,7 +1065,7 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
return -ENOMEM;
}
skb_put(skb, H2C_JOIN_INFO_LEN);
- SET_JOININFO_MACID(skb->data, rtwvif->mac_id);
+ SET_JOININFO_MACID(skb->data, mac_id);
SET_JOININFO_OP(skb->data, dis_conn);
SET_JOININFO_BAND(skb->data, rtwvif->mac_idx);
SET_JOININFO_WMM(skb->data, rtwvif->wmm);
@@ -968,9 +1075,9 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
SET_JOININFO_TF_MAC_PAD(skb->data, 0);
SET_JOININFO_DL_T_PE(skb->data, 0);
SET_JOININFO_PORT_ID(skb->data, rtwvif->port);
- SET_JOININFO_NET_TYPE(skb->data, rtwvif->net_type);
+ SET_JOININFO_NET_TYPE(skb->data, net_type);
SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role);
- SET_JOININFO_SELF_ROLE(skb->data, rtwvif->self_role);
+ SET_JOININFO_SELF_ROLE(skb->data, self_role);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
@@ -1212,7 +1319,7 @@ fail:
return -EBUSY;
}
-#define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_MAX_HW_PORT_NUM + H2C_LEN_CXDRVHDR)
+#define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_PORT_NUM + H2C_LEN_CXDRVHDR)
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -1251,7 +1358,7 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++, active++) {
+ for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i);
RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i);
RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i);
@@ -1368,6 +1475,198 @@ fail:
return -EBUSY;
}
+#define H2C_LEN_PKT_OFLD 4
+int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
+{
+ struct sk_buff *skb;
+ u8 *cmd;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_PKT_OFLD);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_PKT_OFLD);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id);
+ RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_PACKET_OFLD, 1, 1,
+ H2C_LEN_PKT_OFLD);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
+ struct sk_buff *skb_ofld)
+{
+ struct sk_buff *skb;
+ u8 *cmd;
+ u8 alloc_id;
+
+ alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
+ RTW89_MAX_PKT_OFLD_NUM);
+ if (alloc_id == RTW89_MAX_PKT_OFLD_NUM)
+ return -ENOSPC;
+
+ *id = alloc_id;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_PKT_OFLD + skb_ofld->len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_PKT_OFLD);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id);
+ RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD);
+ RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len);
+ skb_put_data(skb, skb_ofld->data, skb_ofld->len);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_PACKET_OFLD, 1, 1,
+ H2C_LEN_PKT_OFLD + skb_ofld->len);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_LEN_SCAN_LIST_OFFLOAD 4
+int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
+ struct list_head *chan_list)
+{
+ struct rtw89_mac_chinfo *ch_info;
+ struct sk_buff *skb;
+ int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
+ u8 *cmd;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(skb_len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len);
+ /* in unit of 4 bytes */
+ RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4);
+
+ list_for_each_entry(ch_info, chan_list, list) {
+ cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE);
+
+ RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period);
+ RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time);
+ RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch);
+ RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch);
+ RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw);
+ RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action);
+ RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt);
+ RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt);
+ RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data);
+ RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band);
+ RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id);
+ RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch);
+ RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null);
+ RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num);
+ RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]);
+ RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]);
+ RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]);
+ RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]);
+ RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]);
+ RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]);
+ RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]);
+ RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_LEN_SCAN_OFFLOAD 20
+int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option,
+ struct rtw89_vif *rtwvif)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct sk_buff *skb;
+ u8 *cmd;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_SCAN_OFFLOAD);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_SCAN_OFFLOAD);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_SCANOFLD_MACID(cmd, rtwvif->mac_id);
+ RTW89_SET_FWCMD_SCANOFLD_PORT_ID(cmd, rtwvif->port);
+ RTW89_SET_FWCMD_SCANOFLD_BAND(cmd, RTW89_PHY_0);
+ RTW89_SET_FWCMD_SCANOFLD_OPERATION(cmd, option->enable);
+ RTW89_SET_FWCMD_SCANOFLD_NOTIFY_END(cmd, true);
+ RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_MODE(cmd, option->target_ch_mode);
+ RTW89_SET_FWCMD_SCANOFLD_START_MODE(cmd, RTW89_SCAN_IMMEDIATE);
+ RTW89_SET_FWCMD_SCANOFLD_SCAN_TYPE(cmd, RTW89_SCAN_ONCE);
+ if (option->target_ch_mode) {
+ RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BW(cmd, scan_info->op_bw);
+ RTW89_SET_FWCMD_SCANOFLD_TARGET_PRI_CH(cmd,
+ scan_info->op_pri_ch);
+ RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(cmd,
+ scan_info->op_chan);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_SCANOFLD, 1, 1,
+ H2C_LEN_SCAN_OFFLOAD);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info,
u16 len, u8 page)
@@ -1533,15 +1832,13 @@ void rtw89_fw_c2h_work(struct work_struct *work)
static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
struct rtw89_mac_h2c_info *info)
{
- static const u32 h2c_reg[RTW89_H2CREG_MAX] = {
- R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1,
- R_AX_H2CREG_DATA2, R_AX_H2CREG_DATA3
- };
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const u32 *h2c_reg = chip->h2c_regs;
u8 i, val, len;
int ret;
ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
- rtwdev, R_AX_H2CREG_CTRL);
+ rtwdev, chip->h2c_ctrl_reg);
if (ret) {
rtw89_warn(rtwdev, "FW does not process h2c registers\n");
return ret;
@@ -1555,7 +1852,7 @@ static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
for (i = 0; i < RTW89_H2CREG_MAX; i++)
rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]);
- rtw89_write8(rtwdev, R_AX_H2CREG_CTRL, B_AX_H2CREG_TRIGGER);
+ rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER);
return 0;
}
@@ -1563,10 +1860,8 @@ static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
struct rtw89_mac_c2h_info *info)
{
- static const u32 c2h_reg[RTW89_C2HREG_MAX] = {
- R_AX_C2HREG_DATA0, R_AX_C2HREG_DATA1,
- R_AX_C2HREG_DATA2, R_AX_C2HREG_DATA3
- };
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const u32 *c2h_reg = chip->c2h_regs;
u32 ret;
u8 i, val;
@@ -1574,7 +1869,7 @@ static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
RTW89_C2H_TIMEOUT, false, rtwdev,
- R_AX_C2HREG_CTRL);
+ chip->c2h_ctrl_reg);
if (ret) {
rtw89_warn(rtwdev, "c2h reg timeout\n");
return ret;
@@ -1583,7 +1878,7 @@ static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
for (i = 0; i < RTW89_C2HREG_MAX; i++)
info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
- rtw89_write8(rtwdev, R_AX_C2HREG_CTRL, 0);
+ rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0);
info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg);
info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) -
@@ -1640,3 +1935,322 @@ void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
rtw89_fw_prog_cnt_dump(rtwdev);
}
+
+static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
+{
+ struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
+ struct rtw89_pktofld_info *info, *tmp;
+ u8 idx;
+
+ for (idx = RTW89_BAND_2G; idx < NUM_NL80211_BANDS; idx++) {
+ if (!(rtwdev->chip->support_bands & BIT(idx)))
+ continue;
+
+ list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
+ rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
+ rtw89_core_release_bit_map(rtwdev->pkt_offload,
+ info->id);
+ list_del(&info->list);
+ kfree(info);
+ }
+ }
+}
+
+static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct sk_buff *skb)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
+ struct rtw89_pktofld_info *info;
+ struct sk_buff *new;
+ int ret = 0;
+ u8 band;
+
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ if (!(rtwdev->chip->support_bands & BIT(band)))
+ continue;
+
+ new = skb_copy(skb, GFP_KERNEL);
+ if (!new) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ skb_put_data(new, ies->ies[band], ies->len[band]);
+ skb_put_data(new, ies->common_ies, ies->common_ie_len);
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ kfree_skb(new);
+ goto out;
+ }
+
+ list_add_tail(&info->list, &scan_info->pkt_list[band]);
+ ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
+ if (ret)
+ goto out;
+
+ kfree_skb(new);
+ }
+out:
+ return ret;
+}
+
+static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ struct sk_buff *skb;
+ u8 num = req->n_ssids, i;
+ int ret;
+
+ for (i = 0; i < num; i++) {
+ skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
+ req->ssids[i].ssid,
+ req->ssids[i].ssid_len,
+ req->ie_len);
+ if (!skb)
+ return -ENOMEM;
+
+ ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb);
+ kfree_skb(skb);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
+ int ssid_num,
+ struct rtw89_mac_chinfo *ch_info)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_pktofld_info *info;
+ u8 band, probe_count = 0;
+
+ ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
+ ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
+ ch_info->bw = RTW89_SCAN_WIDTH;
+ ch_info->tx_pkt = true;
+ ch_info->cfg_tx_pwr = false;
+ ch_info->tx_pwr_idx = 0;
+ ch_info->tx_null = false;
+ ch_info->pause_data = false;
+
+ if (ssid_num) {
+ ch_info->num_pkt = ssid_num;
+ band = ch_info->ch_band;
+
+ list_for_each_entry(info, &scan_info->pkt_list[band], list) {
+ ch_info->probe_id = info->id;
+ ch_info->pkt_id[probe_count] = info->id;
+ if (++probe_count >= ssid_num)
+ break;
+ }
+ if (probe_count != ssid_num)
+ rtw89_err(rtwdev, "SSID num differs from list len\n");
+ }
+
+ switch (chan_type) {
+ case RTW89_CHAN_OPERATE:
+ ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
+ ch_info->central_ch = scan_info->op_chan;
+ ch_info->pri_ch = scan_info->op_pri_ch;
+ ch_info->ch_band = scan_info->op_band;
+ ch_info->bw = scan_info->op_bw;
+ ch_info->tx_null = true;
+ ch_info->num_pkt = 0;
+ break;
+ case RTW89_CHAN_DFS:
+ ch_info->period = min_t(u8, ch_info->period,
+ RTW89_DFS_CHAN_TIME);
+ ch_info->dwell_time = RTW89_DWELL_TIME;
+ break;
+ case RTW89_CHAN_ACTIVE:
+ break;
+ default:
+ rtw89_err(rtwdev, "Channel type out of bound\n");
+ }
+}
+
+static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ struct rtw89_mac_chinfo *ch_info, *tmp;
+ struct ieee80211_channel *channel;
+ struct list_head chan_list;
+ bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
+ int list_len = req->n_channels, off_chan_time = 0;
+ enum rtw89_chan_type type;
+ int ret = 0, i;
+
+ INIT_LIST_HEAD(&chan_list);
+ for (i = 0; i < req->n_channels; i++) {
+ channel = req->channels[i];
+ ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
+ if (!ch_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ch_info->period = req->duration_mandatory ?
+ req->duration : RTW89_CHANNEL_TIME;
+ ch_info->ch_band = channel->band;
+ ch_info->central_ch = channel->hw_value;
+ ch_info->pri_ch = channel->hw_value;
+ ch_info->rand_seq_num = random_seq;
+
+ if (channel->flags &
+ (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
+ type = RTW89_CHAN_DFS;
+ else
+ type = RTW89_CHAN_ACTIVE;
+ rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info);
+
+ if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK &&
+ off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ kfree(ch_info);
+ goto out;
+ }
+
+ type = RTW89_CHAN_OPERATE;
+ tmp->period = req->duration_mandatory ?
+ req->duration : RTW89_CHANNEL_TIME;
+ rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp);
+ list_add_tail(&tmp->list, &chan_list);
+ off_chan_time = 0;
+ list_len++;
+ }
+ list_add_tail(&ch_info->list, &chan_list);
+ off_chan_time += ch_info->period;
+ }
+ rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
+
+out:
+ list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+
+ return ret;
+}
+
+static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ int ret;
+
+ ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif);
+ if (ret) {
+ rtw89_err(rtwdev, "Update probe request failed\n");
+ goto out;
+ }
+ ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif);
+out:
+ return ret;
+}
+
+void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *scan_req)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct cfg80211_scan_request *req = &scan_req->req;
+ u8 mac_addr[ETH_ALEN];
+
+ rtwdev->scan_info.scanning_vif = vif;
+ rtwvif->scan_ies = &scan_req->ies;
+ rtwvif->scan_req = req;
+ ieee80211_stop_queues(rtwdev->hw);
+
+ if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
+ get_random_mask_addr(mac_addr, req->mac_addr,
+ req->mac_addr_mask);
+ else
+ ether_addr_copy(mac_addr, vif->addr);
+ rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true);
+
+ rtwdev->hal.rx_fltr &= ~B_AX_A_BCN_CHK_EN;
+ rtwdev->hal.rx_fltr &= ~B_AX_A_BC;
+ rtwdev->hal.rx_fltr &= ~B_AX_A_A1_MATCH;
+ rtw89_write32_mask(rtwdev,
+ rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
+ B_AX_RX_FLTR_CFG_MASK,
+ rtwdev->hal.rx_fltr);
+}
+
+void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ bool aborted)
+{
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ };
+ struct rtw89_vif *rtwvif;
+
+ if (!vif)
+ return;
+
+ rtwdev->hal.rx_fltr |= B_AX_A_BCN_CHK_EN;
+ rtwdev->hal.rx_fltr |= B_AX_A_BC;
+ rtwdev->hal.rx_fltr |= B_AX_A_A1_MATCH;
+ rtw89_write32_mask(rtwdev,
+ rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
+ B_AX_RX_FLTR_CFG_MASK,
+ rtwdev->hal.rx_fltr);
+
+ rtw89_core_scan_complete(rtwdev, vif, true);
+ ieee80211_scan_completed(rtwdev->hw, &info);
+ ieee80211_wake_queues(rtwdev->hw);
+
+ rtw89_release_pkt_list(rtwdev);
+ rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ rtwvif->scan_req = NULL;
+ rtwvif->scan_ies = NULL;
+ rtwdev->scan_info.scanning_vif = NULL;
+}
+
+void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
+{
+ rtw89_hw_scan_offload(rtwdev, vif, false);
+ rtw89_hw_scan_complete(rtwdev, vif, true);
+}
+
+int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct rtw89_scan_option opt = {0};
+ struct rtw89_vif *rtwvif;
+ int ret = 0;
+
+ rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
+ if (!rtwvif)
+ return -EINVAL;
+
+ opt.enable = enable;
+ opt.target_ch_mode = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK;
+ if (enable) {
+ ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif);
+ if (ret)
+ goto out;
+ }
+ rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
+out:
+ return ret;
+}
+
+void rtw89_store_op_chan(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ scan_info->op_pri_ch = hal->current_primary_channel;
+ scan_info->op_chan = hal->current_channel;
+ scan_info->op_bw = hal->current_band_width;
+ scan_info->op_band = hal->current_band_type;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 2d36dc27222f..ed8609b204e0 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -123,6 +123,27 @@ enum rtw89_fw_log_comp {
RTW89_FW_LOG_COMP_MCC = 20,
};
+enum rtw89_pkt_offload_op {
+ RTW89_PKT_OFLD_OP_ADD,
+ RTW89_PKT_OFLD_OP_DEL,
+ RTW89_PKT_OFLD_OP_READ,
+};
+
+enum rtw89_scanofld_notify_reason {
+ RTW89_SCAN_DWELL_NOTIFY,
+ RTW89_SCAN_PRE_TX_NOTIFY,
+ RTW89_SCAN_POST_TX_NOTIFY,
+ RTW89_SCAN_ENTER_CH_NOTIFY,
+ RTW89_SCAN_LEAVE_CH_NOTIFY,
+ RTW89_SCAN_END_SCAN_NOTIFY,
+};
+
+enum rtw89_chan_type {
+ RTW89_CHAN_OPERATE = 0,
+ RTW89_CHAN_ACTIVE,
+ RTW89_CHAN_DFS,
+};
+
#define FWDL_SECTION_MAX_NUM 10
#define FWDL_SECTION_CHKSUM_LEN 8
#define FWDL_SECTION_PER_PKT_LEN 2020
@@ -156,6 +177,50 @@ struct rtw89_h2creg_sch_tx_en {
u16 rsvd:15;
} __packed;
+#define RTW89_CHANNEL_TIME 45
+#define RTW89_DFS_CHAN_TIME 105
+#define RTW89_OFF_CHAN_TIME 100
+#define RTW89_DWELL_TIME 20
+#define RTW89_SCAN_WIDTH 0
+#define RTW89_SCANOFLD_MAX_SSID 8
+#define RTW89_SCANOFLD_MAX_IE_LEN 512
+#define RTW89_SCANOFLD_PKT_NONE 0xFF
+#define RTW89_SCANOFLD_DEBUG_MASK 0x1F
+#define RTW89_MAC_CHINFO_SIZE 20
+
+struct rtw89_mac_chinfo {
+ u8 period;
+ u8 dwell_time;
+ u8 central_ch;
+ u8 pri_ch;
+ u8 bw:3;
+ u8 notify_action:5;
+ u8 num_pkt:4;
+ u8 tx_pkt:1;
+ u8 pause_data:1;
+ u8 ch_band:2;
+ u8 probe_id;
+ u8 dfs_ch:1;
+ u8 tx_null:1;
+ u8 rand_seq_num:1;
+ u8 cfg_tx_pwr:1;
+ u8 rsvd0: 4;
+ u8 pkt_id[RTW89_SCANOFLD_MAX_SSID];
+ u16 tx_pwr_idx;
+ u8 rsvd1;
+ struct list_head list;
+};
+
+struct rtw89_scan_option {
+ bool enable;
+ bool target_ch_mode;
+};
+
+struct rtw89_pktofld_info {
+ struct list_head list;
+ u8 id;
+};
+
static inline void RTW89_SET_FWCMD_RA_IS_DIS(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(0));
@@ -1056,6 +1121,106 @@ static inline void SET_CMC_TBL_CSI_BW(void *table, u32 val)
GENMASK(31, 30));
}
+static inline void SET_BCN_UPD_PORT(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
+}
+
+static inline void SET_BCN_UPD_MBSSID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
+}
+
+static inline void SET_BCN_UPD_BAND(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16));
+}
+
+static inline void SET_BCN_UPD_GRP_IE_OFST(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, (val - 24) | BIT(7), GENMASK(31, 24));
+}
+
+static inline void SET_BCN_UPD_MACID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0));
+}
+
+static inline void SET_BCN_UPD_SSN_SEL(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(9, 8));
+}
+
+static inline void SET_BCN_UPD_SSN_MODE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(11, 10));
+}
+
+static inline void SET_BCN_UPD_RATE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(20, 12));
+}
+
+static inline void SET_BCN_UPD_TXPWR(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(23, 21));
+}
+
+static inline void SET_BCN_UPD_TXINFO_CTRL_EN(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(0));
+}
+
+static inline void SET_BCN_UPD_NTX_PATH_EN(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(4, 1));
+}
+
+static inline void SET_BCN_UPD_PATH_MAP_A(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(6, 5));
+}
+
+static inline void SET_BCN_UPD_PATH_MAP_B(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(8, 7));
+}
+
+static inline void SET_BCN_UPD_PATH_MAP_C(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(10, 9));
+}
+
+static inline void SET_BCN_UPD_PATH_MAP_D(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(12, 11));
+}
+
+static inline void SET_BCN_UPD_PATH_ANTSEL_A(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(13));
+}
+
+static inline void SET_BCN_UPD_PATH_ANTSEL_B(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(14));
+}
+
+static inline void SET_BCN_UPD_PATH_ANTSEL_C(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(15));
+}
+
+static inline void SET_BCN_UPD_PATH_ANTSEL_D(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(16));
+}
+
+static inline void SET_BCN_UPD_CSA_OFST(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 17));
+}
+
static inline void SET_FWROLE_MAINTAIN_MACID(void *h2c, u32 val)
{
le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
@@ -1226,6 +1391,26 @@ static inline void SET_BA_CAM_SSN(void *h2c, u32 val)
le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 20));
}
+static inline void SET_BA_CAM_UID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(7, 0));
+}
+
+static inline void SET_BA_CAM_STD_EN(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 1, val, BIT(8));
+}
+
+static inline void SET_BA_CAM_BAND(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 1, val, BIT(9));
+}
+
+static inline void SET_BA_CAM_ENTRY_IDX_V1(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(31, 28));
+}
+
static inline void SET_LPS_PARM_MACID(void *h2c, u32 val)
{
le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
@@ -1316,6 +1501,14 @@ enum rtw89_btc_cxdrvinfo {
CXDRVINFO_MAX,
};
+enum rtw89_scan_mode {
+ RTW89_SCAN_IMMEDIATE,
+};
+
+enum rtw89_scan_type {
+ RTW89_SCAN_ONCE,
+};
+
static inline void RTW89_SET_FWCMD_CXHDR_TYPE(void *cmd, u8 val)
{
u8p_replace_bits((u8 *)(cmd) + 0, val, GENMASK(7, 0));
@@ -1586,6 +1779,242 @@ static inline void RTW89_SET_FWCMD_CXRFK_TYPE(void *cmd, u32 val)
le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(17, 10));
}
+static inline void RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(10, 8));
+}
+
+static inline void RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(31, 16));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_CH_NUM(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PERIOD(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_DWELL(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_CENTER_CH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PRI_CH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_BW(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(2, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_ACTION(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(7, 3));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_NUM_PKT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(11, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_TX(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(12));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(13));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_BAND(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(15, 14));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT_ID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_DFS(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(24));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_TX_NULL(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(25));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_RANDOM(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(26));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_CFG_TX(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(27));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT0(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT1(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT2(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT3(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT4(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT5(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT6(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT7(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_POWER_IDX(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 16), val, GENMASK(15, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_MACID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_NORM_CY(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_PORT_ID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(18, 16));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_BAND(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, BIT(19));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_OPERATION(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(21, 20));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BAND(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(23, 22));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_NOTIFY_END(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_MODE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(1));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_START_MODE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(2));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_SCAN_TYPE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(4, 3));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BW(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(7, 5));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_PRI_CH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(void *cmd,
+ u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_PROBE_REQ_PKT_ID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_NORM_PD(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(15, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_SLOW_PD(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TSF_HIGH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TSF_SLOW(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 16), val, GENMASK(31, 0));
+}
+
#define RTW89_C2H_HEADER_LEN 8
#define RTW89_GET_C2H_CATEGORY(c2h) \
@@ -1642,6 +2071,26 @@ static inline void RTW89_SET_FWCMD_CXRFK_TYPE(void *cmd, u32 val)
#define RTW89_MK_HT_RATE(nss, mcs) (FIELD_PREP(GENMASK(4, 3), nss) | \
FIELD_PREP(GENMASK(2, 0), mcs))
+#define RTW89_GET_MAC_C2H_PKTOFLD_ID(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 0))
+#define RTW89_GET_MAC_C2H_PKTOFLD_OP(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(10, 8))
+#define RTW89_GET_MAC_C2H_PKTOFLD_LEN(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 16))
+
+#define RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 0))
+#define RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(19, 16))
+#define RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 20))
+#define RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(3, 0))
+#define RTW89_GET_MAC_C2H_SCANOFLD_AIR_DENSITY(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(7, 4))
+#define RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(25, 24))
+
#define RTW89_FW_HDR_SIZE 32
#define RTW89_FW_SECTION_HDR_SIZE 16
@@ -1709,6 +2158,7 @@ struct rtw89_fw_h2c_rf_reg_info {
/* CLASS 5 - Frame Exchange */
#define H2C_CL_MAC_FR_EXCHG 0x5
#define H2C_FUNC_MAC_CCTLINFO_UD 0x2
+#define H2C_FUNC_MAC_BCN_UPD 0x5
/* CLASS 6 - Address CAM */
#define H2C_CL_MAC_ADDR_CAM_UPDATE 0x6
@@ -1721,9 +2171,12 @@ struct rtw89_fw_h2c_rf_reg_info {
/* CLASS 9 - FW offload */
#define H2C_CL_MAC_FW_OFLD 0x9
+#define H2C_FUNC_PACKET_OFLD 0x1
#define H2C_FUNC_MAC_MACID_PAUSE 0x8
#define H2C_FUNC_USR_EDCA 0xF
#define H2C_FUNC_OFLD_CFG 0x14
+#define H2C_FUNC_ADD_SCANOFLD_CH 0x16
+#define H2C_FUNC_SCANOFLD 0x17
/* CLASS 10 - Security CAM */
#define H2C_CL_MAC_SEC_CAM 0xa
@@ -1750,21 +2203,25 @@ int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev);
void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u8 type, u8 cat, u8 class, u8 func,
bool rack, bool dack, u32 len);
-int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 macid);
+int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif,
struct rtw89_sta *rtwsta, const u8 *scan_mac_addr);
void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h);
void rtw89_fw_c2h_work(struct work_struct *work);
-int rtw89_fw_h2c_vif_maintain(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif,
- enum rtw89_upd_mode upd_mode);
+int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ enum rtw89_upd_mode upd_mode);
int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
- u8 dis_conn);
+ struct rtw89_sta *rtwsta, bool dis_conn);
int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
bool pause);
int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
@@ -1775,6 +2232,14 @@ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id);
+int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
+ struct sk_buff *skb_ofld);
+int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
+ struct list_head *chan_list);
+int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *opt,
+ struct rtw89_vif *vif);
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info,
u16 len, u8 page);
@@ -1785,8 +2250,9 @@ int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len);
void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev);
void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid);
-int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid,
- struct ieee80211_ampdu_params *params);
+int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params);
+
int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(u32 len);
@@ -1796,5 +2262,16 @@ int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
struct rtw89_mac_c2h_info *c2h_info);
int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable);
void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev);
+void rtw89_store_op_chan(struct rtw89_dev *rtwdev);
+void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req);
+void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ bool aborted);
+int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ bool enable);
+void rtw89_hw_scan_status_report(struct rtw89_dev *rtwdev, struct sk_buff *skb);
+void rtw89_hw_scan_chan_switch(struct rtw89_dev *rtwdev, struct sk_buff *skb);
+void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
+void rtw89_store_op_chan(struct rtw89_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index b98c47e9ecfe..5e554bd9f036 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -172,6 +172,7 @@ static void rtw89_mac_dump_qta_lost(struct rtw89_dev *rtwdev)
qempty.dle_type = DLE_CTRL_TYPE_PLE;
qempty.grpsel = 0;
+ qempty.qempty = ~(u32)0;
ret = dle_dfi_qempty(rtwdev, &qempty);
if (ret)
rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
@@ -481,9 +482,10 @@ int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err)
}
EXPORT_SYMBOL(rtw89_mac_set_err_status);
-const struct rtw89_hfc_prec_cfg rtw_hfc_preccfg_pcie = {
+const struct rtw89_hfc_prec_cfg rtw89_hfc_preccfg_pcie = {
2, 40, 0, 0, 1, 0, 0, 0
};
+EXPORT_SYMBOL(rtw89_hfc_preccfg_pcie);
static int hfc_reset_param(struct rtw89_dev *rtwdev)
{
@@ -567,6 +569,8 @@ static int hfc_pub_cfg_chk(struct rtw89_dev *rtwdev)
static int hfc_ch_ctrl(struct rtw89_dev *rtwdev, u8 ch)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg;
int ret = 0;
@@ -586,13 +590,15 @@ static int hfc_ch_ctrl(struct rtw89_dev *rtwdev, u8 ch)
val = u32_encode_bits(cfg[ch].min, B_AX_MIN_PG_MASK) |
u32_encode_bits(cfg[ch].max, B_AX_MAX_PG_MASK) |
(cfg[ch].grp ? B_AX_GRP : 0);
- rtw89_write32(rtwdev, R_AX_ACH0_PAGE_CTRL + ch * 4, val);
+ rtw89_write32(rtwdev, regs->ach_page_ctrl + ch * 4, val);
return 0;
}
static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
struct rtw89_hfc_ch_info *info = param->ch_info;
const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg;
@@ -606,7 +612,7 @@ static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch)
if (ch > RTW89_DMA_H2C)
return -EINVAL;
- val = rtw89_read32(rtwdev, R_AX_ACH0_PAGE_INFO + ch * 4);
+ val = rtw89_read32(rtwdev, regs->ach_page_info + ch * 4);
info[ch].aval = u32_get_bits(val, B_AX_AVAL_PG_MASK);
if (ch < RTW89_DMA_H2C)
info[ch].used = u32_get_bits(val, B_AX_USE_PG_MASK);
@@ -618,6 +624,8 @@ static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch)
static int hfc_pub_ctrl(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
const struct rtw89_hfc_pub_cfg *cfg = &rtwdev->mac.hfc_param.pub_cfg;
u32 val;
int ret;
@@ -632,16 +640,18 @@ static int hfc_pub_ctrl(struct rtw89_dev *rtwdev)
val = u32_encode_bits(cfg->grp0, B_AX_PUBPG_G0_MASK) |
u32_encode_bits(cfg->grp1, B_AX_PUBPG_G1_MASK);
- rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL1, val);
+ rtw89_write32(rtwdev, regs->pub_page_ctrl1, val);
val = u32_encode_bits(cfg->wp_thrd, B_AX_WP_THRD_MASK);
- rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL2, val);
+ rtw89_write32(rtwdev, regs->wp_page_ctrl2, val);
return 0;
}
static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
@@ -653,20 +663,20 @@ static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
if (ret)
return ret;
- val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO1);
+ val = rtw89_read32(rtwdev, regs->pub_page_info1);
info->g0_used = u32_get_bits(val, B_AX_G0_USE_PG_MASK);
info->g1_used = u32_get_bits(val, B_AX_G1_USE_PG_MASK);
- val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO3);
+ val = rtw89_read32(rtwdev, regs->pub_page_info3);
info->g0_aval = u32_get_bits(val, B_AX_G0_AVAL_PG_MASK);
info->g1_aval = u32_get_bits(val, B_AX_G1_AVAL_PG_MASK);
info->pub_aval =
- u32_get_bits(rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO2),
+ u32_get_bits(rtw89_read32(rtwdev, regs->pub_page_info2),
B_AX_PUB_AVAL_PG_MASK);
info->wp_aval =
- u32_get_bits(rtw89_read32(rtwdev, R_AX_WP_PAGE_INFO1),
+ u32_get_bits(rtw89_read32(rtwdev, regs->wp_page_info1),
B_AX_WP_AVAL_PG_MASK);
- val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL);
+ val = rtw89_read32(rtwdev, regs->hci_fc_ctrl);
param->en = val & B_AX_HCI_FC_EN ? 1 : 0;
param->h2c_en = val & B_AX_HCI_FC_CH12_EN ? 1 : 0;
param->mode = u32_get_bits(val, B_AX_HCI_FC_MODE_MASK);
@@ -679,21 +689,21 @@ static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
prec_cfg->wp_ch811_full_cond =
u32_get_bits(val, B_AX_HCI_FC_WP_CH811_FULL_COND_MASK);
- val = rtw89_read32(rtwdev, R_AX_CH_PAGE_CTRL);
+ val = rtw89_read32(rtwdev, regs->ch_page_ctrl);
prec_cfg->ch011_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH011_MASK);
prec_cfg->h2c_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH12_MASK);
- val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL2);
+ val = rtw89_read32(rtwdev, regs->pub_page_ctrl2);
pub_cfg->pub_max = u32_get_bits(val, B_AX_PUBPG_ALL_MASK);
- val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL1);
+ val = rtw89_read32(rtwdev, regs->wp_page_ctrl1);
prec_cfg->wp_ch07_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH07_MASK);
prec_cfg->wp_ch811_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH811_MASK);
- val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL2);
+ val = rtw89_read32(rtwdev, regs->wp_page_ctrl2);
pub_cfg->wp_thrd = u32_get_bits(val, B_AX_WP_THRD_MASK);
- val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL1);
+ val = rtw89_read32(rtwdev, regs->pub_page_ctrl1);
pub_cfg->grp0 = u32_get_bits(val, B_AX_PUBPG_G0_MASK);
pub_cfg->grp1 = u32_get_bits(val, B_AX_PUBPG_G1_MASK);
@@ -706,20 +716,24 @@ static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
static void hfc_h2c_cfg(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
const struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
u32 val;
val = u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK);
- rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val);
+ rtw89_write32(rtwdev, regs->ch_page_ctrl, val);
- rtw89_write32_mask(rtwdev, R_AX_HCI_FC_CTRL,
+ rtw89_write32_mask(rtwdev, regs->hci_fc_ctrl,
B_AX_HCI_FC_CH12_FULL_COND_MASK,
prec_cfg->h2c_full_cond);
}
static void hfc_mix_cfg(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
const struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
const struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
@@ -727,18 +741,18 @@ static void hfc_mix_cfg(struct rtw89_dev *rtwdev)
val = u32_encode_bits(prec_cfg->ch011_prec, B_AX_PREC_PAGE_CH011_MASK) |
u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK);
- rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val);
+ rtw89_write32(rtwdev, regs->ch_page_ctrl, val);
val = u32_encode_bits(pub_cfg->pub_max, B_AX_PUBPG_ALL_MASK);
- rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL2, val);
+ rtw89_write32(rtwdev, regs->pub_page_ctrl2, val);
val = u32_encode_bits(prec_cfg->wp_ch07_prec,
B_AX_PREC_PAGE_WP_CH07_MASK) |
u32_encode_bits(prec_cfg->wp_ch811_prec,
B_AX_PREC_PAGE_WP_CH811_MASK);
- rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL1, val);
+ rtw89_write32(rtwdev, regs->wp_page_ctrl1, val);
- val = u32_replace_bits(rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL),
+ val = u32_replace_bits(rtw89_read32(rtwdev, regs->hci_fc_ctrl),
param->mode, B_AX_HCI_FC_MODE_MASK);
val = u32_replace_bits(val, prec_cfg->ch011_full_cond,
B_AX_HCI_FC_WD_FULL_COND_MASK);
@@ -748,21 +762,23 @@ static void hfc_mix_cfg(struct rtw89_dev *rtwdev)
B_AX_HCI_FC_WP_CH07_FULL_COND_MASK);
val = u32_replace_bits(val, prec_cfg->wp_ch811_full_cond,
B_AX_HCI_FC_WP_CH811_FULL_COND_MASK);
- rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val);
+ rtw89_write32(rtwdev, regs->hci_fc_ctrl, val);
}
static void hfc_func_en(struct rtw89_dev *rtwdev, bool en, bool h2c_en)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
u32 val;
- val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL);
+ val = rtw89_read32(rtwdev, regs->hci_fc_ctrl);
param->en = en;
param->h2c_en = h2c_en;
val = en ? (val | B_AX_HCI_FC_EN) : (val & ~B_AX_HCI_FC_EN);
val = h2c_en ? (val | B_AX_HCI_FC_CH12_EN) :
(val & ~B_AX_HCI_FC_CH12_EN);
- rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val);
+ rtw89_write32(rtwdev, regs->hci_fc_ctrl, val);
}
static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
@@ -915,23 +931,31 @@ rtw89_mac_get_req_pwr_state(struct rtw89_dev *rtwdev)
}
static void rtw89_mac_send_rpwm(struct rtw89_dev *rtwdev,
- enum rtw89_rpwm_req_pwr_state req_pwr_state)
+ enum rtw89_rpwm_req_pwr_state req_pwr_state,
+ bool notify_wake)
{
u16 request;
+ spin_lock_bh(&rtwdev->rpwm_lock);
+
request = rtw89_read16(rtwdev, R_AX_RPWM);
request ^= request | PS_RPWM_TOGGLE;
-
- rtwdev->mac.rpwm_seq_num = (rtwdev->mac.rpwm_seq_num + 1) &
- RPWM_SEQ_NUM_MAX;
- request |= FIELD_PREP(PS_RPWM_SEQ_NUM, rtwdev->mac.rpwm_seq_num);
-
request |= req_pwr_state;
- if (req_pwr_state < RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED)
- request |= PS_RPWM_ACK;
+ if (notify_wake) {
+ request |= PS_RPWM_NOTIFY_WAKE;
+ } else {
+ rtwdev->mac.rpwm_seq_num = (rtwdev->mac.rpwm_seq_num + 1) &
+ RPWM_SEQ_NUM_MAX;
+ request |= FIELD_PREP(PS_RPWM_SEQ_NUM,
+ rtwdev->mac.rpwm_seq_num);
+ if (req_pwr_state < RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED)
+ request |= PS_RPWM_ACK;
+ }
rtw89_write16(rtwdev, rtwdev->hci.rpwm_addr, request);
+
+ spin_unlock_bh(&rtwdev->rpwm_lock);
}
static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev,
@@ -991,7 +1015,7 @@ void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
else
state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE;
- rtw89_mac_send_rpwm(rtwdev, state);
+ rtw89_mac_send_rpwm(rtwdev, state, false);
ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret, !ret,
1000, 15000, false, rtwdev, state);
if (ret)
@@ -999,19 +1023,31 @@ void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
enter ? "entering" : "leaving");
}
+void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_rpwm_req_pwr_state state;
+
+ state = rtw89_mac_get_req_pwr_state(rtwdev);
+ rtw89_mac_send_rpwm(rtwdev, state, true);
+}
+
static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
{
#define PWR_ACT 1
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_pwr_cfg * const *cfg_seq;
+ int (*cfg_func)(struct rtw89_dev *rtwdev);
struct rtw89_hal *hal = &rtwdev->hal;
int ret;
u8 val;
- if (on)
+ if (on) {
cfg_seq = chip->pwr_on_seq;
- else
+ cfg_func = chip->ops->pwr_on_func;
+ } else {
cfg_seq = chip->pwr_off_seq;
+ cfg_func = chip->ops->pwr_off_func;
+ }
if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags))
__rtw89_leave_ps_mode(rtwdev);
@@ -1022,7 +1058,7 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
return -EBUSY;
}
- ret = rtw89_mac_pwr_seq(rtwdev, cfg_seq);
+ ret = cfg_func ? cfg_func(rtwdev) : rtw89_mac_pwr_seq(rtwdev, cfg_seq);
if (ret)
return ret;
@@ -1092,18 +1128,31 @@ static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
static int dmac_func_en(struct rtw89_dev *rtwdev)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val32;
- val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MAC_SEC_EN |
- B_AX_DISPATCHER_EN | B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN |
- B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN | B_AX_STA_SCH_EN |
- B_AX_TXPKT_CTRL_EN | B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN);
+ if (chip_id == RTL8852C)
+ val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN |
+ B_AX_MAC_SEC_EN | B_AX_DISPATCHER_EN |
+ B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN |
+ B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN |
+ B_AX_STA_SCH_EN | B_AX_TXPKT_CTRL_EN |
+ B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN |
+ B_AX_DMAC_CRPRT | B_AX_H_AXIDMA_EN);
+ else
+ val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN |
+ B_AX_MAC_SEC_EN | B_AX_DISPATCHER_EN |
+ B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN |
+ B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN |
+ B_AX_STA_SCH_EN | B_AX_TXPKT_CTRL_EN |
+ B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN |
+ B_AX_DMAC_CRPRT);
rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val32);
val32 = (B_AX_MAC_SEC_CLK_EN | B_AX_DISPATCHER_CLK_EN |
B_AX_DLE_CPUIO_CLK_EN | B_AX_PKT_IN_CLK_EN |
B_AX_STA_SCH_CLK_EN | B_AX_TXPKT_CTRL_CLK_EN |
- B_AX_WD_RLS_CLK_EN);
+ B_AX_WD_RLS_CLK_EN | B_AX_BBRPT_CLK_EN);
rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32);
return 0;
@@ -1111,7 +1160,11 @@ static int dmac_func_en(struct rtw89_dev *rtwdev)
static int chip_func_en(struct rtw89_dev *rtwdev)
{
- rtw89_write32_set(rtwdev, R_AX_SPSLDO_ON_CTRL0, B_AX_OCP_L1_MASK);
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+
+ if (chip_id == RTL8852A)
+ rtw89_write32_set(rtwdev, R_AX_SPSLDO_ON_CTRL0,
+ B_AX_OCP_L1_MASK);
return 0;
}
@@ -1136,49 +1189,118 @@ static int rtw89_mac_sys_init(struct rtw89_dev *rtwdev)
}
/* PCIE 64 */
-const struct rtw89_dle_size wde_size0 = {
+const struct rtw89_dle_size rtw89_wde_size0 = {
RTW89_WDE_PG_64, 4095, 1,
};
+EXPORT_SYMBOL(rtw89_wde_size0);
/* DLFW */
-const struct rtw89_dle_size wde_size4 = {
+const struct rtw89_dle_size rtw89_wde_size4 = {
RTW89_WDE_PG_64, 0, 4096,
};
+EXPORT_SYMBOL(rtw89_wde_size4);
+
+/* 8852C DLFW */
+const struct rtw89_dle_size rtw89_wde_size18 = {
+ RTW89_WDE_PG_64, 0, 2048,
+};
+EXPORT_SYMBOL(rtw89_wde_size18);
+
+/* 8852C PCIE SCC */
+const struct rtw89_dle_size rtw89_wde_size19 = {
+ RTW89_WDE_PG_64, 3328, 0,
+};
+EXPORT_SYMBOL(rtw89_wde_size19);
/* PCIE */
-const struct rtw89_dle_size ple_size0 = {
+const struct rtw89_dle_size rtw89_ple_size0 = {
RTW89_PLE_PG_128, 1520, 16,
};
+EXPORT_SYMBOL(rtw89_ple_size0);
/* DLFW */
-const struct rtw89_dle_size ple_size4 = {
+const struct rtw89_dle_size rtw89_ple_size4 = {
RTW89_PLE_PG_128, 64, 1472,
};
+EXPORT_SYMBOL(rtw89_ple_size4);
+
+/* 8852C DLFW */
+const struct rtw89_dle_size rtw89_ple_size18 = {
+ RTW89_PLE_PG_128, 2544, 16,
+};
+EXPORT_SYMBOL(rtw89_ple_size18);
+
+/* 8852C PCIE SCC */
+const struct rtw89_dle_size rtw89_ple_size19 = {
+ RTW89_PLE_PG_128, 1904, 16,
+};
+EXPORT_SYMBOL(rtw89_ple_size19);
/* PCIE 64 */
-const struct rtw89_wde_quota wde_qt0 = {
+const struct rtw89_wde_quota rtw89_wde_qt0 = {
3792, 196, 0, 107,
};
+EXPORT_SYMBOL(rtw89_wde_qt0);
/* DLFW */
-const struct rtw89_wde_quota wde_qt4 = {
+const struct rtw89_wde_quota rtw89_wde_qt4 = {
0, 0, 0, 0,
};
+EXPORT_SYMBOL(rtw89_wde_qt4);
+
+/* 8852C DLFW */
+const struct rtw89_wde_quota rtw89_wde_qt17 = {
+ 0, 0, 0, 0,
+};
+EXPORT_SYMBOL(rtw89_wde_qt17);
+
+/* 8852C PCIE SCC */
+const struct rtw89_wde_quota rtw89_wde_qt18 = {
+ 3228, 60, 0, 40,
+};
+EXPORT_SYMBOL(rtw89_wde_qt18);
/* PCIE SCC */
-const struct rtw89_ple_quota ple_qt4 = {
+const struct rtw89_ple_quota rtw89_ple_qt4 = {
264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,
};
+EXPORT_SYMBOL(rtw89_ple_qt4);
/* PCIE SCC */
-const struct rtw89_ple_quota ple_qt5 = {
+const struct rtw89_ple_quota rtw89_ple_qt5 = {
264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120,
};
+EXPORT_SYMBOL(rtw89_ple_qt5);
/* DLFW */
-const struct rtw89_ple_quota ple_qt13 = {
+const struct rtw89_ple_quota rtw89_ple_qt13 = {
0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0
};
+EXPORT_SYMBOL(rtw89_ple_qt13);
+
+/* DLFW 52C */
+const struct rtw89_ple_quota rtw89_ple_qt44 = {
+ 0, 0, 16, 256, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+EXPORT_SYMBOL(rtw89_ple_qt44);
+
+/* DLFW 52C */
+const struct rtw89_ple_quota rtw89_ple_qt45 = {
+ 0, 0, 32, 256, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+EXPORT_SYMBOL(rtw89_ple_qt45);
+
+/* 8852C PCIE SCC */
+const struct rtw89_ple_quota rtw89_ple_qt46 = {
+ 525, 0, 16, 20, 13, 13, 178, 0, 32, 62, 8, 16,
+};
+EXPORT_SYMBOL(rtw89_ple_qt46);
+
+/* 8852C PCIE SCC */
+const struct rtw89_ple_quota rtw89_ple_qt47 = {
+ 525, 0, 32, 20, 1034, 13, 1199, 0, 1053, 62, 160, 1037,
+};
+EXPORT_SYMBOL(rtw89_ple_qt47);
static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev,
enum rtw89_qta_mode mode)
@@ -1334,6 +1456,8 @@ static void ple_quota_cfg(struct rtw89_dev *rtwdev,
SET_QUOTA(bb_rpt, PLE, 8);
SET_QUOTA(wd_rel, PLE, 9);
SET_QUOTA(cpu_io, PLE, 10);
+ if (rtwdev->chip->chip_id == RTL8852C)
+ SET_QUOTA(tx_rpt, PLE, 11);
}
#undef SET_QUOTA
@@ -1421,6 +1545,43 @@ error:
return ret;
}
+static int preload_init_set(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx,
+ enum rtw89_qta_mode mode)
+{
+ u32 reg, max_preld_size, min_rsvd_size;
+
+ max_preld_size = (mac_idx == RTW89_MAC_0 ?
+ PRELD_B0_ENT_NUM : PRELD_B1_ENT_NUM) * PRELD_AMSDU_SIZE;
+ reg = mac_idx == RTW89_MAC_0 ?
+ R_AX_TXPKTCTL_B0_PRELD_CFG0 : R_AX_TXPKTCTL_B1_PRELD_CFG0;
+ rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_USEMAXSZ_MASK, max_preld_size);
+ rtw89_write32_set(rtwdev, reg, B_AX_B0_PRELD_FEN);
+
+ min_rsvd_size = PRELD_AMSDU_SIZE;
+ reg = mac_idx == RTW89_MAC_0 ?
+ R_AX_TXPKTCTL_B0_PRELD_CFG1 : R_AX_TXPKTCTL_B1_PRELD_CFG1;
+ rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_NXT_TXENDWIN_MASK, PRELD_NEXT_WND);
+ rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_NXT_RSVMINSZ_MASK, min_rsvd_size);
+
+ return 0;
+}
+
+static bool is_qta_poh(struct rtw89_dev *rtwdev)
+{
+ return rtwdev->hci.type == RTW89_HCI_TYPE_PCIE;
+}
+
+static int preload_init(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx,
+ enum rtw89_qta_mode mode)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || !is_qta_poh(rtwdev))
+ return 0;
+
+ return preload_init_set(rtwdev, mac_idx, mode);
+}
+
static bool dle_is_txq_empty(struct rtw89_dev *rtwdev)
{
u32 msk32;
@@ -1528,6 +1689,12 @@ static int dmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return ret;
}
+ ret = preload_init(rtwdev, RTW89_MAC_0, rtwdev->mac.qta_mode);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]preload init %d\n", ret);
+ return ret;
+ }
+
ret = hfc_init(rtwdev, true, true, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]HCI FC init %d\n", ret);
@@ -2079,8 +2246,26 @@ static int rtw89_set_hw_sch_tx_en(struct rtw89_dev *rtwdev, u8 mac_idx,
return 0;
}
+static int rtw89_set_hw_sch_tx_en_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 tx_en, u32 tx_en_mask)
+{
+ u32 reg = rtw89_mac_reg_by_idx(R_AX_CTN_DRV_TXEN, mac_idx);
+ u32 val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ val = rtw89_read32(rtwdev, reg);
+ val = (val & ~tx_en_mask) | (tx_en & tx_en_mask);
+ rtw89_write32(rtwdev, reg, val);
+
+ return 0;
+}
+
int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
- u16 *tx_en, enum rtw89_sch_tx_sel sel)
+ u32 *tx_en, enum rtw89_sch_tx_sel sel)
{
int ret;
@@ -2089,7 +2274,8 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
switch (sel) {
case RTW89_SCH_TX_SEL_ALL:
- ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff);
+ ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0,
+ B_AX_CTN_TXEN_ALL_MASK);
if (ret)
return ret;
break;
@@ -2106,7 +2292,8 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
return ret;
break;
case RTW89_SCH_TX_SEL_MACID:
- ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff);
+ ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0,
+ B_AX_CTN_TXEN_ALL_MASK);
if (ret)
return ret;
break;
@@ -2116,17 +2303,73 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_stop_sch_tx);
+
+int rtw89_mac_stop_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel)
+{
+ int ret;
+
+ *tx_en = rtw89_read32(rtwdev,
+ rtw89_mac_reg_by_idx(R_AX_CTN_DRV_TXEN, mac_idx));
+
+ switch (sel) {
+ case RTW89_SCH_TX_SEL_ALL:
+ ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 0,
+ B_AX_CTN_TXEN_ALL_MASK_V1);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_HIQ:
+ ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx,
+ 0, B_AX_CTN_TXEN_HGQ);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_MG0:
+ ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx,
+ 0, B_AX_CTN_TXEN_MGQ);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_MACID:
+ ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 0,
+ B_AX_CTN_TXEN_ALL_MASK_V1);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_stop_sch_tx_v1);
+
+int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
+{
+ int ret;
+
+ ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, tx_en, B_AX_CTN_TXEN_ALL_MASK);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_resume_sch_tx);
-int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en)
+int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
{
int ret;
- ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, tx_en, 0xffff);
+ ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, tx_en,
+ B_AX_CTN_TXEN_ALL_MASK_V1);
if (ret)
return ret;
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v1);
static u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len,
bool wd)
@@ -2290,9 +2533,9 @@ static int band1_enable(struct rtw89_dev *rtwdev)
int ret, i;
u32 sleep_bak[4] = {0};
u32 pause_bak[4] = {0};
- u16 tx_en;
+ u32 tx_en;
- ret = rtw89_mac_stop_sch_tx(rtwdev, 0, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ ret = rtw89_chip_stop_sch_tx(rtwdev, 0, &tx_en, RTW89_SCH_TX_SEL_ALL);
if (ret) {
rtw89_err(rtwdev, "[ERR]stop sch tx %d\n", ret);
return ret;
@@ -2322,7 +2565,7 @@ static int band1_enable(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, pause_bak[i]);
}
- ret = rtw89_mac_resume_sch_tx(rtwdev, 0, tx_en);
+ ret = rtw89_chip_resume_sch_tx(rtwdev, 0, tx_en);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC1 resume sch tx %d\n", ret);
return ret;
@@ -2516,7 +2759,11 @@ static void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev)
clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN);
+ rtw89_write32_clr(rtwdev, R_AX_WCPU_FW_CTRL, B_AX_WCPU_FWDL_EN |
+ B_AX_H2C_PATH_RDY | B_AX_FWDL_PATH_RDY);
rtw89_write32_clr(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN);
+ rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
}
static int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason,
@@ -2586,7 +2833,9 @@ static int rtw89_mac_fw_dl_pre_init(struct rtw89_dev *rtwdev)
static void rtw89_mac_hci_func_en(struct rtw89_dev *rtwdev)
{
- rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN,
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ rtw89_write32_set(rtwdev, chip->hci_func_en_addr,
B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN);
}
@@ -2705,7 +2954,7 @@ static void rtw89_mac_cmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid)
rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 28, 0xB8109);
}
-static int rtw89_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause)
+int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause)
{
u8 sh = FIELD_GET(GENMASK(4, 0), macid);
u8 grp = macid >> 5;
@@ -2864,6 +3113,36 @@ static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev,
bcn_int);
}
+static void rtw89_mac_port_cfg_hiq_win(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ static const u32 hiq_win_addr[RTW89_PORT_NUM] = {
+ R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG,
+ R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2,
+ R_AX_PORT_HGQ_WINDOW_CFG + 3,
+ };
+ u8 win = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE ? 16 : 0;
+ u8 port = rtwvif->port;
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(hiq_win_addr[port], rtwvif->mac_idx);
+ rtw89_write8(rtwdev, reg, win);
+}
+
+static void rtw89_mac_port_cfg_hiq_dtim(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ const struct rtw89_port_reg *p = &rtw_port_base;
+ u32 addr;
+
+ addr = rtw89_mac_reg_by_idx(R_AX_MD_TSFT_STMP_CTL, rtwvif->mac_idx);
+ rtw89_write8_set(rtwdev, addr, B_AX_UPD_HGQMD | B_AX_UPD_TIMIE);
+
+ rtw89_write16_port_mask(rtwdev, rtwvif, p->dtim_ctrl, B_AX_DTIM_NUM_MASK,
+ vif->bss_conf.dtim_period);
+}
+
static void rtw89_mac_port_cfg_bcn_setup_time(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
@@ -2978,11 +3257,11 @@ int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_mac_dmac_tbl_init(rtwdev, rtwvif->mac_id);
rtw89_mac_cmac_tbl_init(rtwdev, rtwvif->mac_id);
- ret = rtw89_set_macid_pause(rtwdev, rtwvif->mac_id, false);
+ ret = rtw89_mac_set_macid_pause(rtwdev, rtwvif->mac_id, false);
if (ret)
return ret;
- ret = rtw89_fw_h2c_vif_maintain(rtwdev, rtwvif, RTW89_VIF_CREATE);
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_CREATE);
if (ret)
return ret;
@@ -2994,7 +3273,7 @@ int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
if (ret)
return ret;
- ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif->mac_id);
+ ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif);
if (ret)
return ret;
@@ -3005,7 +3284,7 @@ int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
int ret;
- ret = rtw89_fw_h2c_vif_maintain(rtwdev, rtwvif, RTW89_VIF_REMOVE);
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_REMOVE);
if (ret)
return ret;
@@ -3034,13 +3313,15 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif);
rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_hiq_win(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_hiq_dtim(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_setup_time(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif);
rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif);
rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif);
rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif);
- rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif);
rtw89_mac_port_cfg_func_en(rtwdev, rtwvif);
fsleep(BCN_ERLY_SET_DLY);
rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif);
@@ -3084,6 +3365,57 @@ rtw89_mac_c2h_macid_pause(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len
{
}
+static bool rtw89_is_op_chan(struct rtw89_dev *rtwdev, u8 band, u8 channel)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+
+ return band == scan_info->op_band && channel == scan_info->op_pri_ch;
+}
+
+static void
+rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ u32 len)
+{
+ struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u8 reason, status, tx_fail, band;
+ u16 chan;
+
+ tx_fail = RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h->data);
+ status = RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h->data);
+ chan = RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h->data);
+ reason = RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h->data);
+ band = RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h->data);
+
+ if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))
+ band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
+
+ rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
+ "band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d\n",
+ band, chan, reason, status, tx_fail);
+
+ switch (reason) {
+ case RTW89_SCAN_LEAVE_CH_NOTIFY:
+ if (rtw89_is_op_chan(rtwdev, band, chan))
+ ieee80211_stop_queues(rtwdev->hw);
+ return;
+ case RTW89_SCAN_END_SCAN_NOTIFY:
+ rtw89_hw_scan_complete(rtwdev, vif, false);
+ break;
+ case RTW89_SCAN_ENTER_CH_NOTIFY:
+ if (rtw89_is_op_chan(rtwdev, band, chan))
+ ieee80211_wake_queues(rtwdev->hw);
+ break;
+ default:
+ return;
+ }
+
+ hal->prev_band_type = hal->current_band_type;
+ hal->prev_primary_channel = hal->current_channel;
+ hal->current_channel = chan;
+ hal->current_band_type = band;
+}
+
static void
rtw89_mac_c2h_rec_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
@@ -3114,6 +3446,11 @@ rtw89_mac_c2h_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
RTW89_GET_C2H_LOG_SRT_PRT(c2h->data));
}
+static void
+rtw89_mac_c2h_bcn_cnt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+}
+
static
void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
@@ -3122,6 +3459,7 @@ void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = NULL,
[RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL,
[RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause,
+ [RTW89_MAC_C2H_FUNC_SCANOFLD_RSP] = rtw89_mac_c2h_scanofld_rsp,
};
static
@@ -3130,6 +3468,7 @@ void (* const rtw89_mac_c2h_info_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_REC_ACK] = rtw89_mac_c2h_rec_ack,
[RTW89_MAC_C2H_FUNC_DONE_ACK] = rtw89_mac_c2h_done_ack,
[RTW89_MAC_C2H_FUNC_C2H_LOG] = rtw89_mac_c2h_log,
+ [RTW89_MAC_C2H_FUNC_BCN_CNT] = rtw89_mac_c2h_bcn_cnt,
};
void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
@@ -3192,6 +3531,7 @@ error:
return false;
}
+EXPORT_SYMBOL(rtw89_mac_get_txpwr_cr);
int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
{
@@ -3216,6 +3556,7 @@ int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
return ret;
}
+EXPORT_SYMBOL(rtw89_mac_cfg_ppdu_status);
void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx)
{
@@ -3349,33 +3690,37 @@ int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_coex_init);
int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
{
- u32 val, ret;
+ u32 val = 0, ret;
+
+ if (gnt_cfg->band[0].gnt_bt)
+ val |= B_AX_GNT_BT_RFC_S0_SW_VAL | B_AX_GNT_BT_BB_S0_SW_VAL;
+
+ if (gnt_cfg->band[0].gnt_bt_sw_en)
+ val |= B_AX_GNT_BT_RFC_S0_SW_CTRL | B_AX_GNT_BT_BB_S0_SW_CTRL;
+
+ if (gnt_cfg->band[0].gnt_wl)
+ val |= B_AX_GNT_WL_RFC_S0_SW_VAL | B_AX_GNT_WL_BB_S0_SW_VAL;
+
+ if (gnt_cfg->band[0].gnt_wl_sw_en)
+ val |= B_AX_GNT_WL_RFC_S0_SW_CTRL | B_AX_GNT_WL_BB_S0_SW_CTRL;
+
+ if (gnt_cfg->band[1].gnt_bt)
+ val |= B_AX_GNT_BT_RFC_S1_SW_VAL | B_AX_GNT_BT_BB_S1_SW_VAL;
+
+ if (gnt_cfg->band[1].gnt_bt_sw_en)
+ val |= B_AX_GNT_BT_RFC_S1_SW_CTRL | B_AX_GNT_BT_BB_S1_SW_CTRL;
+
+ if (gnt_cfg->band[1].gnt_wl)
+ val |= B_AX_GNT_WL_RFC_S1_SW_VAL | B_AX_GNT_WL_BB_S1_SW_VAL;
+
+ if (gnt_cfg->band[1].gnt_wl_sw_en)
+ val |= B_AX_GNT_WL_RFC_S1_SW_CTRL | B_AX_GNT_WL_BB_S1_SW_CTRL;
- ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val);
- if (ret) {
- rtw89_err(rtwdev, "Read LTE fail!\n");
- return ret;
- }
- val = (gnt_cfg->band[0].gnt_bt ?
- B_AX_GNT_BT_RFC_S0_SW_VAL | B_AX_GNT_BT_BB_S0_SW_VAL : 0) |
- (gnt_cfg->band[0].gnt_bt_sw_en ?
- B_AX_GNT_BT_RFC_S0_SW_CTRL | B_AX_GNT_BT_BB_S0_SW_CTRL : 0) |
- (gnt_cfg->band[0].gnt_wl ?
- B_AX_GNT_WL_RFC_S0_SW_VAL | B_AX_GNT_WL_BB_S0_SW_VAL : 0) |
- (gnt_cfg->band[0].gnt_wl_sw_en ?
- B_AX_GNT_WL_RFC_S0_SW_CTRL | B_AX_GNT_WL_BB_S0_SW_CTRL : 0) |
- (gnt_cfg->band[1].gnt_bt ?
- B_AX_GNT_BT_RFC_S1_SW_VAL | B_AX_GNT_BT_BB_S1_SW_VAL : 0) |
- (gnt_cfg->band[1].gnt_bt_sw_en ?
- B_AX_GNT_BT_RFC_S1_SW_CTRL | B_AX_GNT_BT_BB_S1_SW_CTRL : 0) |
- (gnt_cfg->band[1].gnt_wl ?
- B_AX_GNT_WL_RFC_S1_SW_VAL | B_AX_GNT_WL_BB_S1_SW_VAL : 0) |
- (gnt_cfg->band[1].gnt_wl_sw_en ?
- B_AX_GNT_WL_RFC_S1_SW_CTRL | B_AX_GNT_WL_BB_S1_SW_CTRL : 0);
ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_1, val);
if (ret) {
rtw89_err(rtwdev, "Write LTE fail!\n");
@@ -3384,11 +3729,59 @@ int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_cfg_gnt);
+
+int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
+{
+ u32 val = 0;
+
+ if (gnt_cfg->band[0].gnt_bt)
+ val |= B_AX_GNT_BT_RFC_S0_VAL | B_AX_GNT_BT_RX_VAL |
+ B_AX_GNT_BT_TX_VAL;
+ else
+ val |= B_AX_WL_ACT_VAL;
+
+ if (gnt_cfg->band[0].gnt_bt_sw_en)
+ val |= B_AX_GNT_BT_RFC_S0_SWCTRL | B_AX_GNT_BT_RX_SWCTRL |
+ B_AX_GNT_BT_TX_SWCTRL | B_AX_WL_ACT_SWCTRL;
+
+ if (gnt_cfg->band[0].gnt_wl)
+ val |= B_AX_GNT_WL_RFC_S0_VAL | B_AX_GNT_WL_RX_VAL |
+ B_AX_GNT_WL_TX_VAL | B_AX_GNT_WL_BB_VAL;
+
+ if (gnt_cfg->band[0].gnt_wl_sw_en)
+ val |= B_AX_GNT_WL_RFC_S0_SWCTRL | B_AX_GNT_WL_RX_SWCTRL |
+ B_AX_GNT_WL_TX_SWCTRL | B_AX_GNT_WL_BB_SWCTRL;
+
+ if (gnt_cfg->band[1].gnt_bt)
+ val |= B_AX_GNT_BT_RFC_S1_VAL | B_AX_GNT_BT_RX_VAL |
+ B_AX_GNT_BT_TX_VAL;
+ else
+ val |= B_AX_WL_ACT_VAL;
+
+ if (gnt_cfg->band[1].gnt_bt_sw_en)
+ val |= B_AX_GNT_BT_RFC_S1_SWCTRL | B_AX_GNT_BT_RX_SWCTRL |
+ B_AX_GNT_BT_TX_SWCTRL | B_AX_WL_ACT_SWCTRL;
+
+ if (gnt_cfg->band[1].gnt_wl)
+ val |= B_AX_GNT_WL_RFC_S1_VAL | B_AX_GNT_WL_RX_VAL |
+ B_AX_GNT_WL_TX_VAL | B_AX_GNT_WL_BB_VAL;
+
+ if (gnt_cfg->band[1].gnt_wl_sw_en)
+ val |= B_AX_GNT_WL_RFC_S1_SWCTRL | B_AX_GNT_WL_RX_SWCTRL |
+ B_AX_GNT_WL_TX_SWCTRL | B_AX_GNT_WL_BB_SWCTRL;
+
+ rtw89_write32(rtwdev, R_AX_GNT_SW_CTRL, val);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_gnt_v1);
int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
{
u32 reg;
- u8 val;
+ u16 val;
int ret;
ret = rtw89_mac_check_mac_en(rtwdev, plt->band, RTW89_CMAC_SEL);
@@ -3403,8 +3796,9 @@ int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
(plt->rx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_RX_PLT_GNT_LTE_RX : 0) |
(plt->rx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_RX_PLT_GNT_BT_TX : 0) |
(plt->rx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_RX_PLT_GNT_BT_RX : 0) |
- (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_RX_PLT_GNT_WL : 0);
- rtw89_write8(rtwdev, reg, val);
+ (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_RX_PLT_GNT_WL : 0) |
+ B_AX_PLT_EN;
+ rtw89_write16(rtwdev, reg, val);
return 0;
}
@@ -3442,6 +3836,28 @@ int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl)
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path);
+
+int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_mac_ax_gnt *g = dm->gnt.band;
+ int i;
+
+ if (wl)
+ return 0;
+
+ for (i = 0; i < RTW89_PHY_MAX; i++) {
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 1;
+ g[i].gnt_wl_sw_en = 1;
+ g[i].gnt_wl = 0;
+ }
+
+ return rtw89_mac_cfg_gnt_v1(rtwdev, &dm->gnt);
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path_v1);
bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev)
{
@@ -3845,3 +4261,51 @@ int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev,
return 0;
}
+
+int rtw89_mac_write_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask)
+{
+ u32 val32;
+ int ret;
+
+ val32 = FIELD_PREP(B_AX_WL_XTAL_SI_ADDR_MASK, offset) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_DATA_MASK, val) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_BITMASK_MASK, mask) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_MODE_MASK, XTAL_SI_NORMAL_WRITE) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_CMD_POLL, 1);
+ rtw89_write32(rtwdev, R_AX_WLAN_XTAL_SI_CTRL, val32);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_WL_XTAL_SI_CMD_POLL),
+ 50, 50000, false, rtwdev, R_AX_WLAN_XTAL_SI_CTRL);
+ if (ret) {
+ rtw89_warn(rtwdev, "xtal si not ready(W): offset=%x val=%x mask=%x\n",
+ offset, val, mask);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_write_xtal_si);
+
+int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val)
+{
+ u32 val32;
+ int ret;
+
+ val32 = FIELD_PREP(B_AX_WL_XTAL_SI_ADDR_MASK, offset) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_DATA_MASK, 0x00) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_BITMASK_MASK, 0x00) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_MODE_MASK, XTAL_SI_NORMAL_READ) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_CMD_POLL, 1);
+ rtw89_write32(rtwdev, R_AX_WLAN_XTAL_SI_CTRL, val32);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_WL_XTAL_SI_CMD_POLL),
+ 50, 50000, false, rtwdev, R_AX_WLAN_XTAL_SI_CTRL);
+ if (ret) {
+ rtw89_warn(rtwdev, "xtal si not ready(R): offset=%x\n", offset);
+ return ret;
+ }
+
+ *val = rtw89_read8(rtwdev, R_AX_WLAN_XTAL_SI_CTRL + 1);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index b7d13edf7dd1..b797667c78c6 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -301,6 +301,7 @@ enum rtw89_mac_c2h_ofld_func {
RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP,
RTW89_MAC_C2H_FUNC_BCN_RESEND,
RTW89_MAC_C2H_FUNC_MACID_PAUSE,
+ RTW89_MAC_C2H_FUNC_SCANOFLD_RSP = 0x9,
RTW89_MAC_C2H_FUNC_OFLD_MAX,
};
@@ -308,6 +309,7 @@ enum rtw89_mac_c2h_info_func {
RTW89_MAC_C2H_FUNC_REC_ACK,
RTW89_MAC_C2H_FUNC_DONE_ACK,
RTW89_MAC_C2H_FUNC_C2H_LOG,
+ RTW89_MAC_C2H_FUNC_BCN_CNT,
RTW89_MAC_C2H_FUNC_INFO_MAX,
};
@@ -670,16 +672,26 @@ enum mac_ax_err_info {
MAC_AX_SET_ERR_MAX,
};
-extern const struct rtw89_hfc_prec_cfg rtw_hfc_preccfg_pcie;
-extern const struct rtw89_dle_size wde_size0;
-extern const struct rtw89_dle_size wde_size4;
-extern const struct rtw89_dle_size ple_size0;
-extern const struct rtw89_dle_size ple_size4;
-extern const struct rtw89_wde_quota wde_qt0;
-extern const struct rtw89_wde_quota wde_qt4;
-extern const struct rtw89_ple_quota ple_qt4;
-extern const struct rtw89_ple_quota ple_qt5;
-extern const struct rtw89_ple_quota ple_qt13;
+extern const struct rtw89_hfc_prec_cfg rtw89_hfc_preccfg_pcie;
+extern const struct rtw89_dle_size rtw89_wde_size0;
+extern const struct rtw89_dle_size rtw89_wde_size4;
+extern const struct rtw89_dle_size rtw89_wde_size18;
+extern const struct rtw89_dle_size rtw89_wde_size19;
+extern const struct rtw89_dle_size rtw89_ple_size0;
+extern const struct rtw89_dle_size rtw89_ple_size4;
+extern const struct rtw89_dle_size rtw89_ple_size18;
+extern const struct rtw89_dle_size rtw89_ple_size19;
+extern const struct rtw89_wde_quota rtw89_wde_qt0;
+extern const struct rtw89_wde_quota rtw89_wde_qt4;
+extern const struct rtw89_wde_quota rtw89_wde_qt17;
+extern const struct rtw89_wde_quota rtw89_wde_qt18;
+extern const struct rtw89_ple_quota rtw89_ple_qt4;
+extern const struct rtw89_ple_quota rtw89_ple_qt5;
+extern const struct rtw89_ple_quota rtw89_ple_qt13;
+extern const struct rtw89_ple_quota rtw89_ple_qt44;
+extern const struct rtw89_ple_quota rtw89_ple_qt45;
+extern const struct rtw89_ple_quota rtw89_ple_qt46;
+extern const struct rtw89_ple_quota rtw89_ple_qt47;
static inline u32 rtw89_mac_reg_by_idx(u32 reg_base, u8 band)
{
@@ -779,24 +791,31 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len, u8 class, u8 func);
int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev);
int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
- u16 *tx_en, enum rtw89_sch_tx_sel sel);
-int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en);
+ u32 *tx_en, enum rtw89_sch_tx_sel sel);
+int rtw89_mac_stop_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel);
+int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
+int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_ids, bool enable);
void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx);
void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop);
int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex);
int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
+int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt);
u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band);
void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val);
u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev);
bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev);
int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl);
+int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl);
bool rtw89_mac_get_txpwr_cr(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
u32 reg_base, u32 *cr);
void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter);
+void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev);
void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
@@ -810,6 +829,7 @@ int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool en);
+int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause);
static inline void rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev)
{
@@ -868,4 +888,44 @@ int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev,
int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta, u8 *tx_retry);
+enum rtw89_mac_xtal_si_offset {
+ XTAL_SI_XTAL_SC_XI = 0x04,
+#define XTAL_SC_XI_MASK GENMASK(7, 0)
+ XTAL_SI_XTAL_SC_XO = 0x05,
+#define XTAL_SC_XO_MASK GENMASK(7, 0)
+ XTAL_SI_PWR_CUT = 0x10,
+#define XTAL_SI_SMALL_PWR_CUT BIT(0)
+#define XTAL_SI_BIG_PWR_CUT BIT(1)
+ XTAL_SI_XTAL_XMD_2 = 0x24,
+#define XTAL_SI_LDO_LPS GENMASK(6, 4)
+ XTAL_SI_XTAL_XMD_4 = 0x26,
+#define XTAL_SI_LPS_CAP GENMASK(3, 0)
+ XTAL_SI_CV = 0x41,
+ XTAL_SI_LOW_ADDR = 0x62,
+#define XTAL_SI_LOW_ADDR_MASK GENMASK(7, 0)
+ XTAL_SI_CTRL = 0x63,
+#define XTAL_SI_MODE_SEL_MASK GENMASK(7, 6)
+#define XTAL_SI_RDY BIT(5)
+#define XTAL_SI_HIGH_ADDR_MASK GENMASK(2, 0)
+ XTAL_SI_READ_VAL = 0x7A,
+ XTAL_SI_WL_RFC_S0 = 0x80,
+#define XTAL_SI_RF00 BIT(0)
+ XTAL_SI_WL_RFC_S1 = 0x81,
+#define XTAL_SI_RF10 BIT(0)
+ XTAL_SI_ANAPAR_WL = 0x90,
+#define XTAL_SI_SRAM2RFC BIT(7)
+#define XTAL_SI_GND_SHDN_WL BIT(6)
+#define XTAL_SI_SHDN_WL BIT(5)
+#define XTAL_SI_RFC2RF BIT(4)
+#define XTAL_SI_OFF_EI BIT(3)
+#define XTAL_SI_OFF_WEI BIT(2)
+#define XTAL_SI_PON_EI BIT(1)
+#define XTAL_SI_PON_WEI BIT(0)
+ XTAL_SI_SRAM_CTRL = 0xA1,
+#define FULL_BIT_MASK GENMASK(7, 0)
+};
+
+int rtw89_mac_write_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask);
+int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val);
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index a322259f4cc4..fca9f82bb462 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -66,6 +66,9 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
{
struct rtw89_dev *rtwdev = hw->priv;
+ /* let previous ips work finish to ensure we don't leave ips twice */
+ cancel_work_sync(&rtwdev->ips_work);
+
mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
@@ -102,14 +105,16 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
int ret = 0;
mutex_lock(&rtwdev->mutex);
+ rtwvif->rtwdev = rtwdev;
list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
+ INIT_WORK(&rtwvif->update_beacon_work, rtw89_core_update_beacon_work);
rtw89_leave_ps_mode(rtwdev);
rtw89_traffic_stats_init(rtwdev, &rtwvif->stats);
rtw89_vif_type_mapping(vif, false);
rtwvif->port = rtw89_core_acquire_bit_map(rtwdev->hw_port,
- RTW89_MAX_HW_PORT_NUM);
- if (rtwvif->port == RTW89_MAX_HW_PORT_NUM) {
+ RTW89_PORT_NUM);
+ if (rtwvif->port == RTW89_PORT_NUM) {
ret = -ENOSPC;
goto out;
}
@@ -141,6 +146,8 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ cancel_work_sync(&rtwvif->update_beacon_work);
+
mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_STOP);
@@ -161,7 +168,7 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw,
rtw89_leave_ps_mode(rtwdev);
*new_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_FCSFAIL |
- FIF_BCN_PRBRESP_PROMISC;
+ FIF_BCN_PRBRESP_PROMISC | FIF_PROBE_REQ;
if (changed_flags & FIF_ALLMULTI) {
if (*new_flags & FIF_ALLMULTI)
@@ -192,6 +199,15 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw,
rtwdev->hal.rx_fltr |= B_AX_A_A1_MATCH;
}
}
+ if (changed_flags & FIF_PROBE_REQ) {
+ if (*new_flags & FIF_PROBE_REQ) {
+ rtwdev->hal.rx_fltr &= ~B_AX_A_BC_CAM_MATCH;
+ rtwdev->hal.rx_fltr &= ~B_AX_A_UC_CAM_MATCH;
+ } else {
+ rtwdev->hal.rx_fltr |= B_AX_A_BC_CAM_MATCH;
+ rtwdev->hal.rx_fltr |= B_AX_A_UC_CAM_MATCH;
+ }
+ }
rtw89_write32_mask(rtwdev,
rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
@@ -311,6 +327,9 @@ static void rtw89_station_mode_sta_assoc(struct rtw89_dev *rtwdev,
rtw89_err(rtwdev, "can't find sta to set sta_assoc state\n");
return;
}
+
+ rtw89_vif_type_mapping(vif, true);
+
rtw89_core_sta_assoc(rtwdev, vif, sta);
}
@@ -331,6 +350,13 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw89_phy_set_bss_color(rtwdev, vif);
rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, vif);
rtw89_mac_port_update(rtwdev, rtwvif);
+ rtw89_store_op_chan(rtwdev);
+ } else {
+ /* Abort ongoing scan if cancel_scan isn't issued
+ * when disconnected by peer
+ */
+ if (rtwdev->scanning)
+ rtw89_hw_scan_abort(rtwdev, vif);
}
}
@@ -340,6 +366,9 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
}
+ if (changed & BSS_CHANGED_BEACON)
+ rtw89_fw_h2c_update_beacon(rtwdev, rtwvif);
+
if (changed & BSS_CHANGED_ERP_SLOT)
rtw89_conf_tx(rtwdev, rtwvif);
@@ -352,6 +381,49 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
}
+static int rtw89_ops_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+ ether_addr_copy(rtwvif->bssid, vif->bss_conf.bssid);
+ rtw89_cam_bssid_changed(rtwdev, rtwvif);
+ rtw89_mac_port_update(rtwdev, rtwvif);
+ rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+ rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_TYPE_CHANGE);
+ rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
+ rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
+ rtw89_chip_rfk_channel(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
+static
+void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+ rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static int rtw89_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ bool set)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+
+ ieee80211_queue_work(rtwdev->hw, &rtwvif->update_beacon_work);
+
+ return 0;
+}
+
static int rtw89_ops_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 ac,
const struct ieee80211_tx_queue_params *params)
@@ -476,7 +548,6 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mutex_lock(&rtwdev->mutex);
clear_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
- rtw89_fw_h2c_ba_cam(rtwdev, false, rtwsta->mac_id, params);
mutex_unlock(&rtwdev->mutex);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
@@ -486,11 +557,17 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
rtwsta->ampdu_params[tid].agg_num = params->buf_size;
rtwsta->ampdu_params[tid].amsdu = params->amsdu;
rtw89_leave_ps_mode(rtwdev);
- rtw89_fw_h2c_ba_cam(rtwdev, true, rtwsta->mac_id, params);
mutex_unlock(&rtwdev->mutex);
break;
case IEEE80211_AMPDU_RX_START:
+ mutex_lock(&rtwdev->mutex);
+ rtw89_fw_h2c_ba_cam(rtwdev, rtwsta, true, params);
+ mutex_unlock(&rtwdev->mutex);
+ break;
case IEEE80211_AMPDU_RX_STOP:
+ mutex_lock(&rtwdev->mutex);
+ rtw89_fw_h2c_ba_cam(rtwdev, rtwsta, false, params);
+ mutex_unlock(&rtwdev->mutex);
break;
default:
WARN_ON(1);
@@ -617,15 +694,9 @@ static void rtw89_ops_sw_scan_start(struct ieee80211_hw *hw,
{
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
- struct rtw89_hal *hal = &rtwdev->hal;
mutex_lock(&rtwdev->mutex);
- rtwdev->scanning = true;
- rtw89_leave_lps(rtwdev);
- rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, hal->current_band_type);
- rtw89_chip_rfk_scan(rtwdev, true);
- rtw89_hci_recalc_int_mit(rtwdev);
- rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, mac_addr);
+ rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, false);
mutex_unlock(&rtwdev->mutex);
}
@@ -633,14 +704,9 @@ static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct rtw89_dev *rtwdev = hw->priv;
- struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
mutex_lock(&rtwdev->mutex);
- rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
- rtw89_chip_rfk_scan(rtwdev, false);
- rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0);
- rtwdev->scanning = false;
- rtwdev->dig.bypass_dig = true;
+ rtw89_core_scan_complete(rtwdev, vif, false);
mutex_unlock(&rtwdev->mutex);
}
@@ -653,6 +719,46 @@ static void rtw89_ops_reconfig_complete(struct ieee80211_hw *hw,
rtw89_ser_recfg_done(rtwdev);
}
+static int rtw89_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ int ret = 0;
+
+ if (!rtwdev->fw.scan_offload)
+ return 1;
+
+ if (rtwdev->scanning)
+ return -EBUSY;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_hw_scan_start(rtwdev, vif, req);
+ ret = rtw89_hw_scan_offload(rtwdev, vif, true);
+ if (ret) {
+ rtw89_hw_scan_abort(rtwdev, vif);
+ rtw89_err(rtwdev, "HW scan failed with status: %d\n", ret);
+ }
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ if (!rtwdev->fw.scan_offload)
+ return;
+
+ if (!rtwdev->scanning)
+ return;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_hw_scan_abort(rtwdev, vif);
+ mutex_unlock(&rtwdev->mutex);
+}
+
const struct ieee80211_ops rtw89_ops = {
.tx = rtw89_ops_tx,
.wake_tx_queue = rtw89_ops_wake_tx_queue,
@@ -663,6 +769,9 @@ const struct ieee80211_ops rtw89_ops = {
.remove_interface = rtw89_ops_remove_interface,
.configure_filter = rtw89_ops_configure_filter,
.bss_info_changed = rtw89_ops_bss_info_changed,
+ .start_ap = rtw89_ops_start_ap,
+ .stop_ap = rtw89_ops_stop_ap,
+ .set_tim = rtw89_ops_set_tim,
.conf_tx = rtw89_ops_conf_tx,
.sta_state = rtw89_ops_sta_state,
.set_key = rtw89_ops_set_key,
@@ -676,6 +785,8 @@ const struct ieee80211_ops rtw89_ops = {
.sw_scan_start = rtw89_ops_sw_scan_start,
.sw_scan_complete = rtw89_ops_sw_scan_complete,
.reconfig_complete = rtw89_ops_reconfig_complete,
+ .hw_scan = rtw89_ops_hw_scan,
+ .cancel_hw_scan = rtw89_ops_cancel_hw_scan,
.set_sar_specs = rtw89_ops_set_sar_specs,
};
EXPORT_SYMBOL(rtw89_ops);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 2c94762e4f93..e79bfc335b44 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -62,7 +62,7 @@ static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev,
struct rtw89_pci_tx_ring *tx_ring)
{
struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
- u32 addr_idx = bd_ring->addr_idx;
+ u32 addr_idx = bd_ring->addr.idx;
u32 cnt, idx;
idx = rtw89_read32(rtwdev, addr_idx);
@@ -121,7 +121,7 @@ static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev,
struct rtw89_pci_rx_ring *rx_ring)
{
struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
- u32 addr_idx = bd_ring->addr_idx;
+ u32 addr_idx = bd_ring->addr.idx;
u32 cnt, idx;
idx = rtw89_read32(rtwdev, addr_idx);
@@ -304,7 +304,7 @@ static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev,
cnt -= rx_cnt;
}
- rtw89_write16(rtwdev, bd_ring->addr_idx, bd_ring->wp);
+ rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
}
static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
@@ -555,7 +555,7 @@ static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev,
cnt -= release_cnt;
}
- rtw89_write16(rtwdev, bd_ring->addr_idx, bd_ring->wp);
+ rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
}
static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
@@ -598,7 +598,7 @@ static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
rx_ring = &rtwpci->rx_rings[i];
bd_ring = &rx_ring->bd_ring;
- reg_idx = rtw89_read32(rtwdev, bd_ring->addr_idx);
+ reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx);
host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx);
hw_idx_next = (hw_idx + 1) % bd_ring->len;
@@ -697,71 +697,110 @@ exit:
return irqret;
}
-#define case_TXCHADDRS(txch) \
- case RTW89_TXCH_##txch: \
- *addr_num = R_AX_##txch##_TXBD_NUM; \
- *addr_idx = R_AX_##txch##_TXBD_IDX; \
- *addr_bdram = R_AX_##txch##_BDRAM_CTRL; \
- *addr_desa_l = R_AX_##txch##_TXBD_DESA_L; \
- *addr_desa_h = R_AX_##txch##_TXBD_DESA_H; \
- break
-
-static int rtw89_pci_get_txch_addrs(enum rtw89_tx_channel txch,
- u32 *addr_num,
- u32 *addr_idx,
- u32 *addr_bdram,
- u32 *addr_desa_l,
- u32 *addr_desa_h)
-{
- switch (txch) {
- case_TXCHADDRS(ACH0);
- case_TXCHADDRS(ACH1);
- case_TXCHADDRS(ACH2);
- case_TXCHADDRS(ACH3);
- case_TXCHADDRS(ACH4);
- case_TXCHADDRS(ACH5);
- case_TXCHADDRS(ACH6);
- case_TXCHADDRS(ACH7);
- case_TXCHADDRS(CH8);
- case_TXCHADDRS(CH9);
- case_TXCHADDRS(CH10);
- case_TXCHADDRS(CH11);
- case_TXCHADDRS(CH12);
- default:
+#define DEF_TXCHADDRS_TYPE1(info, txch, v...) \
+ [RTW89_TXCH_##txch] = { \
+ .num = R_AX_##txch##_TXBD_NUM ##v, \
+ .idx = R_AX_##txch##_TXBD_IDX ##v, \
+ .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
+ .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
+ .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
+ }
+
+#define DEF_TXCHADDRS(info, txch, v...) \
+ [RTW89_TXCH_##txch] = { \
+ .num = R_AX_##txch##_TXBD_NUM, \
+ .idx = R_AX_##txch##_TXBD_IDX, \
+ .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
+ .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
+ .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
+ }
+
+#define DEF_RXCHADDRS(info, rxch, v...) \
+ [RTW89_RXCH_##rxch] = { \
+ .num = R_AX_##rxch##_RXBD_NUM ##v, \
+ .idx = R_AX_##rxch##_RXBD_IDX ##v, \
+ .desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \
+ .desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \
+ }
+
+const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
+ .tx = {
+ DEF_TXCHADDRS(info, ACH0),
+ DEF_TXCHADDRS(info, ACH1),
+ DEF_TXCHADDRS(info, ACH2),
+ DEF_TXCHADDRS(info, ACH3),
+ DEF_TXCHADDRS(info, ACH4),
+ DEF_TXCHADDRS(info, ACH5),
+ DEF_TXCHADDRS(info, ACH6),
+ DEF_TXCHADDRS(info, ACH7),
+ DEF_TXCHADDRS(info, CH8),
+ DEF_TXCHADDRS(info, CH9),
+ DEF_TXCHADDRS_TYPE1(info, CH10),
+ DEF_TXCHADDRS_TYPE1(info, CH11),
+ DEF_TXCHADDRS(info, CH12),
+ },
+ .rx = {
+ DEF_RXCHADDRS(info, RXQ),
+ DEF_RXCHADDRS(info, RPQ),
+ },
+};
+EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set);
+
+const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = {
+ .tx = {
+ DEF_TXCHADDRS(info, ACH0, _V1),
+ DEF_TXCHADDRS(info, ACH1, _V1),
+ DEF_TXCHADDRS(info, ACH2, _V1),
+ DEF_TXCHADDRS(info, ACH3, _V1),
+ DEF_TXCHADDRS(info, ACH4, _V1),
+ DEF_TXCHADDRS(info, ACH5, _V1),
+ DEF_TXCHADDRS(info, ACH6, _V1),
+ DEF_TXCHADDRS(info, ACH7, _V1),
+ DEF_TXCHADDRS(info, CH8, _V1),
+ DEF_TXCHADDRS(info, CH9, _V1),
+ DEF_TXCHADDRS_TYPE1(info, CH10, _V1),
+ DEF_TXCHADDRS_TYPE1(info, CH11, _V1),
+ DEF_TXCHADDRS(info, CH12, _V1),
+ },
+ .rx = {
+ DEF_RXCHADDRS(info, RXQ, _V1),
+ DEF_RXCHADDRS(info, RPQ, _V1),
+ },
+};
+EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1);
+
+#undef DEF_TXCHADDRS_TYPE1
+#undef DEF_TXCHADDRS
+#undef DEF_RXCHADDRS
+
+static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
+ enum rtw89_tx_channel txch,
+ const struct rtw89_pci_ch_dma_addr **addr)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+ if (txch >= RTW89_TXCH_NUM)
return -EINVAL;
- }
+
+ *addr = &info->dma_addr_set->tx[txch];
return 0;
}
-#undef case_TXCHADDRS
-
-#define case_RXCHADDRS(rxch) \
- case RTW89_RXCH_##rxch: \
- *addr_num = R_AX_##rxch##_RXBD_NUM; \
- *addr_idx = R_AX_##rxch##_RXBD_IDX; \
- *addr_desa_l = R_AX_##rxch##_RXBD_DESA_L; \
- *addr_desa_h = R_AX_##rxch##_RXBD_DESA_H; \
- break
-
-static int rtw89_pci_get_rxch_addrs(enum rtw89_rx_channel rxch,
- u32 *addr_num,
- u32 *addr_idx,
- u32 *addr_desa_l,
- u32 *addr_desa_h)
+static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev,
+ enum rtw89_rx_channel rxch,
+ const struct rtw89_pci_ch_dma_addr **addr)
{
- switch (rxch) {
- case_RXCHADDRS(RXQ);
- case_RXCHADDRS(RPQ);
- default:
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+ if (rxch >= RTW89_RXCH_NUM)
return -EINVAL;
- }
+
+ *addr = &info->dma_addr_set->rx[rxch];
return 0;
}
-#undef case_RXCHADDRS
-
static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring)
{
struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring;
@@ -837,7 +876,7 @@ static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_t
struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
u32 host_idx, addr;
- addr = bd_ring->addr_idx;
+ addr = bd_ring->addr.idx;
host_idx = bd_ring->wp;
rtw89_write16(rtwdev, addr, host_idx);
}
@@ -879,7 +918,7 @@ static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
* just use for loop with udelay here.
*/
for (i = 0; i < 60; i++) {
- cur_idx = rtw89_read32(rtwdev, bd_ring->addr_idx);
+ cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
if (cur_rp == bd_ring->wp)
return;
@@ -1140,9 +1179,9 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
tx_ring = &rtwpci->tx_rings[i];
bd_ring = &tx_ring->bd_ring;
bd_ram = &bd_ram_table[i];
- addr_num = bd_ring->addr_num;
- addr_bdram = bd_ring->addr_bdram;
- addr_desa_l = bd_ring->addr_desa_l;
+ addr_num = bd_ring->addr.num;
+ addr_bdram = bd_ring->addr.bdram;
+ addr_desa_l = bd_ring->addr.desa_l;
bd_ring->wp = 0;
bd_ring->rp = 0;
@@ -1158,8 +1197,8 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
for (i = 0; i < RTW89_RXCH_NUM; i++) {
rx_ring = &rtwpci->rx_rings[i];
bd_ring = &rx_ring->bd_ring;
- addr_num = bd_ring->addr_num;
- addr_desa_l = bd_ring->addr_desa_l;
+ addr_num = bd_ring->addr.num;
+ addr_desa_l = bd_ring->addr.desa_l;
bd_ring->wp = 0;
bd_ring->rp = 0;
rx_ring->diliver_skb = NULL;
@@ -1413,79 +1452,52 @@ static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u
return 0;
}
-static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data)
+static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
+ u8 data)
{
- u16 write_addr;
- u16 remainder = addr & ~(B_AX_DBI_ADDR_MSK | B_AX_DBI_WREN_MSK);
- u8 flag;
- int ret;
-
- write_addr = addr & B_AX_DBI_ADDR_MSK;
- write_addr |= u16_encode_bits(BIT(remainder), B_AX_DBI_WREN_MSK);
- rtw89_write8(rtwdev, R_AX_DBI_WDATA + remainder, data);
- rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr);
- rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16);
-
- ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
- 10 * RTW89_PCI_WR_RETRY_CNT, false,
- rtwdev, R_AX_DBI_FLAG + 2);
- if (ret)
- WARN(flag, "failed to write to DBI register, addr=0x%04x\n",
- addr);
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
- return ret;
+ return pci_write_config_byte(pdev, addr, data);
}
-static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value)
+static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
+ u8 *value)
{
- u16 read_addr = addr & B_AX_DBI_ADDR_MSK;
- u8 flag;
- int ret;
-
- rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr);
- rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16);
-
- ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
- 10 * RTW89_PCI_WR_RETRY_CNT, false,
- rtwdev, R_AX_DBI_FLAG + 2);
-
- if (!ret) {
- read_addr = R_AX_DBI_RDATA + (addr & 3);
- *value = rtw89_read8(rtwdev, read_addr);
- } else {
- WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
- ret = -EIO;
- }
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
- return ret;
+ return pci_read_config_byte(pdev, addr, value);
}
-static int rtw89_dbi_write8_set(struct rtw89_dev *rtwdev, u16 addr, u8 bit)
+static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
+ u8 bit)
{
u8 value;
int ret;
- ret = rtw89_dbi_read8(rtwdev, addr, &value);
+ ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
if (ret)
return ret;
value |= bit;
- ret = rtw89_dbi_write8(rtwdev, addr, value);
+ ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
return ret;
}
-static int rtw89_dbi_write8_clr(struct rtw89_dev *rtwdev, u16 addr, u8 bit)
+static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr,
+ u8 bit)
{
u8 value;
int ret;
- ret = rtw89_dbi_read8(rtwdev, addr, &value);
+ ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
if (ret)
return ret;
value &= ~bit;
- ret = rtw89_dbi_write8(rtwdev, addr, value);
+ ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
return ret;
}
@@ -1542,9 +1554,10 @@ static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
rtwdev->chip->chip_id == RTL8852C)
return 0;
- ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
+ ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
if (ret) {
- rtw89_err(rtwdev, "[ERR]dbi_r8_pcie %X\n", RTW89_PCIE_PHY_RATE);
+ rtw89_err(rtwdev, "[ERR]pci config read %X\n",
+ RTW89_PCIE_PHY_RATE);
return ret;
}
@@ -1557,17 +1570,18 @@ static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
return -EOPNOTSUPP;
}
/* Disable L1BD */
- ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
+ ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
if (ret) {
- rtw89_err(rtwdev, "[ERR]dbi_r8_pcie %X\n", RTW89_PCIE_L1_CTRL);
+ rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL);
return ret;
}
if (bdr_ori & RTW89_PCIE_BIT_L1) {
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_L1_CTRL,
- bdr_ori & ~RTW89_PCIE_BIT_L1);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
+ bdr_ori & ~RTW89_PCIE_BIT_L1);
if (ret) {
- rtw89_err(rtwdev, "[ERR]dbi_w8_pcie %X\n", RTW89_PCIE_L1_CTRL);
+ rtw89_err(rtwdev, "[ERR]pci config write %X\n",
+ RTW89_PCIE_L1_CTRL);
return ret;
}
l1_flag = true;
@@ -1662,14 +1676,17 @@ static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
}
/* CLK delay = 0 */
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_CLK_CTRL, PCIE_CLKDLY_HW_0);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
+ PCIE_CLKDLY_HW_0);
end:
/* Set L1BD to ori */
if (l1_flag) {
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_L1_CTRL, bdr_ori);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
+ bdr_ori);
if (ret) {
- rtw89_err(rtwdev, "[ERR]dbi_w8_pcie %X\n", RTW89_PCIE_L1_CTRL);
+ rtw89_err(rtwdev, "[ERR]pci config write %X\n",
+ RTW89_PCIE_L1_CTRL);
return ret;
}
}
@@ -2210,14 +2227,10 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
u32 desc_size, u32 len,
enum rtw89_tx_channel txch)
{
+ const struct rtw89_pci_ch_dma_addr *txch_addr;
int ring_sz = desc_size * len;
u8 *head;
dma_addr_t dma;
- u32 addr_num;
- u32 addr_idx;
- u32 addr_bdram;
- u32 addr_desa_l;
- u32 addr_desa_h;
int ret;
ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
@@ -2226,8 +2239,7 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
goto err;
}
- ret = rtw89_pci_get_txch_addrs(txch, &addr_num, &addr_idx, &addr_bdram,
- &addr_desa_l, &addr_desa_h);
+ ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr);
if (ret) {
rtw89_err(rtwdev, "failed to get address of txch %d", txch);
goto err_free_wd_ring;
@@ -2244,11 +2256,7 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
tx_ring->bd_ring.dma = dma;
tx_ring->bd_ring.len = len;
tx_ring->bd_ring.desc_size = desc_size;
- tx_ring->bd_ring.addr_num = addr_num;
- tx_ring->bd_ring.addr_idx = addr_idx;
- tx_ring->bd_ring.addr_bdram = addr_bdram;
- tx_ring->bd_ring.addr_desa_l = addr_desa_l;
- tx_ring->bd_ring.addr_desa_h = addr_desa_h;
+ tx_ring->bd_ring.addr = *txch_addr;
tx_ring->bd_ring.wp = 0;
tx_ring->bd_ring.rp = 0;
tx_ring->txch = txch;
@@ -2300,20 +2308,16 @@ static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
struct rtw89_pci_rx_ring *rx_ring,
u32 desc_size, u32 len, u32 rxch)
{
+ const struct rtw89_pci_ch_dma_addr *rxch_addr;
struct sk_buff *skb;
u8 *head;
dma_addr_t dma;
- u32 addr_num;
- u32 addr_idx;
- u32 addr_desa_l;
- u32 addr_desa_h;
int ring_sz = desc_size * len;
int buf_sz = RTW89_PCI_RX_BUF_SIZE;
int i, allocated;
int ret;
- ret = rtw89_pci_get_rxch_addrs(rxch, &addr_num, &addr_idx,
- &addr_desa_l, &addr_desa_h);
+ ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr);
if (ret) {
rtw89_err(rtwdev, "failed to get address of rxch %d", rxch);
return ret;
@@ -2329,10 +2333,7 @@ static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
rx_ring->bd_ring.dma = dma;
rx_ring->bd_ring.len = len;
rx_ring->bd_ring.desc_size = desc_size;
- rx_ring->bd_ring.addr_num = addr_num;
- rx_ring->bd_ring.addr_idx = addr_idx;
- rx_ring->bd_ring.addr_desa_l = addr_desa_l;
- rx_ring->bd_ring.addr_desa_h = addr_desa_h;
+ rx_ring->bd_ring.addr = *rxch_addr;
rx_ring->bd_ring.wp = 0;
rx_ring->bd_ring.rp = 0;
rx_ring->buf_sz = buf_sz;
@@ -2552,17 +2553,17 @@ static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
if (rtw89_pci_disable_clkreq)
return;
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_CLK_CTRL,
- PCIE_CLKDLY_HW_30US);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
+ PCIE_CLKDLY_HW_30US);
if (ret)
rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
if (enable)
- ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_CLK);
+ ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_CLK);
else
- ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_CLK);
+ ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_CLK);
if (ret)
rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
enable ? "set" : "unset", ret);
@@ -2576,7 +2577,7 @@ static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
if (rtw89_pci_disable_aspm_l1)
return;
- ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
+ ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
if (ret)
rtw89_err(rtwdev, "failed to read ASPM Delay\n");
@@ -2584,16 +2585,16 @@ static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) |
FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US);
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
if (ret)
rtw89_err(rtwdev, "failed to read ASPM Delay\n");
if (enable)
- ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_L1);
+ ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_L1);
else
- ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_L1);
+ ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_L1);
if (ret)
rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
enable ? "set" : "unset", ret);
@@ -2657,11 +2658,11 @@ static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
int ret;
if (enable)
- ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_TIMER_CTRL,
- RTW89_PCIE_BIT_L1SUB);
+ ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_TIMER_CTRL,
+ RTW89_PCIE_BIT_L1SUB);
else
- ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_TIMER_CTRL,
- RTW89_PCIE_BIT_L1SUB);
+ ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_TIMER_CTRL,
+ RTW89_PCIE_BIT_L1SUB);
if (ret)
rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
enable ? "set" : "unset", ret);
@@ -2878,10 +2879,10 @@ static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev)
return;
/* Hardware need write the reg twice to ensure the setting work */
- rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_RST_MSTATE,
- RTW89_PCIE_BIT_CFG_RST_MSTATE);
- rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_RST_MSTATE,
- RTW89_PCIE_BIT_CFG_RST_MSTATE);
+ rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
+ RTW89_PCIE_BIT_CFG_RST_MSTATE);
+ rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
+ RTW89_PCIE_BIT_CFG_RST_MSTATE);
}
static int __maybe_unused rtw89_pci_resume(struct device *dev)
@@ -2932,11 +2933,11 @@ static const struct rtw89_hci_ops rtw89_pci_ops = {
.napi_poll = rtw89_pci_napi_poll,
};
-static int rtw89_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ieee80211_hw *hw;
struct rtw89_dev *rtwdev;
+ const struct rtw89_driver_info *info;
int driver_data_size;
int ret;
@@ -2957,13 +2958,9 @@ static int rtw89_pci_probe(struct pci_dev *pdev,
SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
- switch (id->driver_data) {
- case RTL8852A:
- rtwdev->chip = &rtw8852a_chip_info;
- break;
- default:
- return -ENOENT;
- }
+ info = (const struct rtw89_driver_info *)id->driver_data;
+ rtwdev->chip = info->chip;
+ rtwdev->pci_info = info->bus.pci;
ret = rtw89_core_init(rtwdev);
if (ret) {
@@ -3022,8 +3019,9 @@ err_release_hw:
return ret;
}
+EXPORT_SYMBOL(rtw89_pci_probe);
-static void rtw89_pci_remove(struct pci_dev *pdev)
+void rtw89_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct rtw89_dev *rtwdev;
@@ -3038,22 +3036,7 @@ static void rtw89_pci_remove(struct pci_dev *pdev)
rtw89_core_deinit(rtwdev);
ieee80211_free_hw(hw);
}
-
-static const struct pci_device_id rtw89_pci_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8852), .driver_data = RTL8852A },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xa85a), .driver_data = RTL8852A },
- {},
-};
-MODULE_DEVICE_TABLE(pci, rtw89_pci_id_table);
-
-static struct pci_driver rtw89_pci_driver = {
- .name = "rtw89_pci",
- .id_table = rtw89_pci_id_table,
- .probe = rtw89_pci_probe,
- .remove = rtw89_pci_remove,
- .driver.pm = &rtw89_pm_ops,
-};
-module_pci_driver(rtw89_pci_driver);
+EXPORT_SYMBOL(rtw89_pci_remove);
MODULE_AUTHOR("Realtek Corporation");
MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver");
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index 20e6767ea5c4..b84acd0d0582 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -130,6 +130,10 @@
#define R_AX_CH10_TXBD_IDX 0x137C /* Management Queue band 1 */
#define R_AX_CH11_TXBD_IDX 0x1380 /* HI Queue band 1 */
#define R_AX_CH12_TXBD_IDX 0x1080 /* FWCMD Queue */
+#define R_AX_CH10_TXBD_IDX_V1 0x11D0
+#define R_AX_CH11_TXBD_IDX_V1 0x11D4
+#define R_AX_RXQ_RXBD_IDX_V1 0x1218
+#define R_AX_RPQ_RXBD_IDX_V1 0x121C
#define TXBD_HW_IDX_MASK GENMASK(27, 16)
#define TXBD_HOST_IDX_MASK GENMASK(11, 0)
@@ -163,6 +167,36 @@
#define R_AX_RXQ_RXBD_DESA_H 0x1104
#define R_AX_RPQ_RXBD_DESA_L 0x1108
#define R_AX_RPQ_RXBD_DESA_H 0x110C
+#define R_AX_RXQ_RXBD_DESA_L_V1 0x1220
+#define R_AX_RXQ_RXBD_DESA_H_V1 0x1224
+#define R_AX_RPQ_RXBD_DESA_L_V1 0x1228
+#define R_AX_RPQ_RXBD_DESA_H_V1 0x122C
+#define R_AX_ACH0_TXBD_DESA_L_V1 0x1230
+#define R_AX_ACH0_TXBD_DESA_H_V1 0x1234
+#define R_AX_ACH1_TXBD_DESA_L_V1 0x1238
+#define R_AX_ACH1_TXBD_DESA_H_V1 0x123C
+#define R_AX_ACH2_TXBD_DESA_L_V1 0x1240
+#define R_AX_ACH2_TXBD_DESA_H_V1 0x1244
+#define R_AX_ACH3_TXBD_DESA_L_V1 0x1248
+#define R_AX_ACH3_TXBD_DESA_H_V1 0x124C
+#define R_AX_ACH4_TXBD_DESA_L_V1 0x1250
+#define R_AX_ACH4_TXBD_DESA_H_V1 0x1254
+#define R_AX_ACH5_TXBD_DESA_L_V1 0x1258
+#define R_AX_ACH5_TXBD_DESA_H_V1 0x125C
+#define R_AX_ACH6_TXBD_DESA_L_V1 0x1260
+#define R_AX_ACH6_TXBD_DESA_H_V1 0x1264
+#define R_AX_ACH7_TXBD_DESA_L_V1 0x1268
+#define R_AX_ACH7_TXBD_DESA_H_V1 0x126C
+#define R_AX_CH8_TXBD_DESA_L_V1 0x1270
+#define R_AX_CH8_TXBD_DESA_H_V1 0x1274
+#define R_AX_CH9_TXBD_DESA_L_V1 0x1278
+#define R_AX_CH9_TXBD_DESA_H_V1 0x127C
+#define R_AX_CH12_TXBD_DESA_L_V1 0x1280
+#define R_AX_CH12_TXBD_DESA_H_V1 0x1284
+#define R_AX_CH10_TXBD_DESA_L_V1 0x1458
+#define R_AX_CH10_TXBD_DESA_H_V1 0x145C
+#define R_AX_CH11_TXBD_DESA_L_V1 0x1460
+#define R_AX_CH11_TXBD_DESA_H_V1 0x1464
#define B_AX_DESC_NUM_MSK GENMASK(11, 0)
#define R_AX_RXQ_RXBD_NUM 0x1020
@@ -180,6 +214,10 @@
#define R_AX_CH10_TXBD_NUM 0x1338
#define R_AX_CH11_TXBD_NUM 0x133A
#define R_AX_CH12_TXBD_NUM 0x1038
+#define R_AX_RXQ_RXBD_NUM_V1 0x1210
+#define R_AX_RPQ_RXBD_NUM_V1 0x1212
+#define R_AX_CH10_TXBD_NUM_V1 0x1438
+#define R_AX_CH11_TXBD_NUM_V1 0x143A
#define R_AX_ACH0_BDRAM_CTRL 0x1200
#define R_AX_ACH1_BDRAM_CTRL 0x1204
@@ -194,6 +232,19 @@
#define R_AX_CH10_BDRAM_CTRL 0x1320
#define R_AX_CH11_BDRAM_CTRL 0x1324
#define R_AX_CH12_BDRAM_CTRL 0x1228
+#define R_AX_ACH0_BDRAM_CTRL_V1 0x1300
+#define R_AX_ACH1_BDRAM_CTRL_V1 0x1304
+#define R_AX_ACH2_BDRAM_CTRL_V1 0x1308
+#define R_AX_ACH3_BDRAM_CTRL_V1 0x130C
+#define R_AX_ACH4_BDRAM_CTRL_V1 0x1310
+#define R_AX_ACH5_BDRAM_CTRL_V1 0x1314
+#define R_AX_ACH6_BDRAM_CTRL_V1 0x1318
+#define R_AX_ACH7_BDRAM_CTRL_V1 0x131C
+#define R_AX_CH8_BDRAM_CTRL_V1 0x1320
+#define R_AX_CH9_BDRAM_CTRL_V1 0x1324
+#define R_AX_CH12_BDRAM_CTRL_V1 0x1328
+#define R_AX_CH10_BDRAM_CTRL_V1 0x1420
+#define R_AX_CH11_BDRAM_CTRL_V1 0x1424
#define BDRAM_SIDX_MASK GENMASK(7, 0)
#define BDRAM_MAX_MASK GENMASK(15, 8)
#define BDRAM_MIN_MASK GENMASK(23, 16)
@@ -382,6 +433,23 @@ enum rtw89_pcie_clkdly_hw {
PCIE_CLKDLY_HW_200US = 0x5,
};
+struct rtw89_pci_ch_dma_addr {
+ u32 num;
+ u32 idx;
+ u32 bdram;
+ u32 desa_l;
+ u32 desa_h;
+};
+
+struct rtw89_pci_ch_dma_addr_set {
+ struct rtw89_pci_ch_dma_addr tx[RTW89_TXCH_NUM];
+ struct rtw89_pci_ch_dma_addr rx[RTW89_RXCH_NUM];
+};
+
+struct rtw89_pci_info {
+ const struct rtw89_pci_ch_dma_addr_set *dma_addr_set;
+};
+
struct rtw89_pci_bd_ram {
u8 start_idx;
u8 max_num;
@@ -469,11 +537,7 @@ struct rtw89_pci_dma_ring {
u8 desc_size;
dma_addr_t dma;
- u32 addr_num;
- u32 addr_idx;
- u32 addr_bdram;
- u32 addr_desa_l;
- u32 addr_desa_h;
+ struct rtw89_pci_ch_dma_addr addr;
u32 len;
u32 wp; /* host idx */
@@ -626,5 +690,12 @@ static inline bool rtw89_pci_ltr_is_err_reg_val(u32 val)
}
extern const struct dev_pm_ops rtw89_pm_ops;
+extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set;
+extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1;
+
+struct pci_device_id;
+
+int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+void rtw89_pci_remove(struct pci_dev *pdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index 147009888de0..ac211d897311 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -4,6 +4,7 @@
#include "debug.h"
#include "fw.h"
+#include "mac.h"
#include "phy.h"
#include "ps.h"
#include "reg.h"
@@ -117,17 +118,28 @@ static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi,
else if (rssi_lv == 1)
return 0xfffffffffffffff0ULL;
else if (rssi_lv == 2)
- return 0xffffffffffffffe0ULL;
+ return 0xffffffffffffefe0ULL;
else if (rssi_lv == 3)
- return 0xffffffffffffffc0ULL;
+ return 0xffffffffffffcfc0ULL;
else if (rssi_lv == 4)
- return 0xffffffffffffff80ULL;
+ return 0xffffffffffff8f80ULL;
else if (rssi_lv >= 5)
- return 0xffffffffffffff00ULL;
+ return 0xffffffffffff0f00ULL;
return 0xffffffffffffffffULL;
}
+static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak)
+{
+ if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0)
+ ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
+
+ if (ra_mask == 0)
+ ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
+
+ return ra_mask;
+}
+
static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta)
{
struct rtw89_hal *hal = &rtwdev->hal;
@@ -150,6 +162,11 @@ static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtw
cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy,
RA_MASK_OFDM_RATES);
break;
+ case RTW89_BAND_6G:
+ band = NL80211_BAND_6GHZ;
+ cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_6GHZ].legacy,
+ RA_MASK_OFDM_RATES);
+ break;
default:
rtw89_warn(rtwdev, "unhandled band type %d\n", hal->current_band_type);
return -1;
@@ -194,8 +211,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
struct rtw89_ra_info *ra = &rtwsta->ra;
const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi);
- u64 high_rate_mask = 0;
u64 ra_mask = 0;
+ u64 ra_mask_bak;
u8 mode = 0;
u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY;
u8 bw_mode = 0;
@@ -243,37 +260,54 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
ldpc_en = 1;
}
- if (rtwdev->hal.current_band_type == RTW89_BAND_2G) {
+ switch (rtwdev->hal.current_band_type) {
+ case RTW89_BAND_2G:
+ ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ];
if (sta->supp_rates[NL80211_BAND_2GHZ] <= 0xf)
mode |= RTW89_RA_MODE_CCK;
else
mode |= RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM;
- } else {
+ break;
+ case RTW89_BAND_5G:
+ ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
mode |= RTW89_RA_MODE_OFDM;
+ break;
+ case RTW89_BAND_6G:
+ ra_mask |= (u64)sta->supp_rates[NL80211_BAND_6GHZ] << 4;
+ mode |= RTW89_RA_MODE_OFDM;
+ break;
+ default:
+ rtw89_err(rtwdev, "Unknown band type\n");
+ break;
}
+ ra_mask_bak = ra_mask;
+
if (mode >= RTW89_RA_MODE_HT) {
+ u64 mask = 0;
for (i = 0; i < rtwdev->hal.tx_nss; i++)
- high_rate_mask |= high_rate_masks[i];
- ra_mask &= high_rate_mask;
+ mask |= high_rate_masks[i];
if (mode & RTW89_RA_MODE_OFDM)
- ra_mask |= RA_MASK_SUBOFDM_RATES;
+ mask |= RA_MASK_SUBOFDM_RATES;
if (mode & RTW89_RA_MODE_CCK)
- ra_mask |= RA_MASK_SUBCCK_RATES;
+ mask |= RA_MASK_SUBCCK_RATES;
+ ra_mask &= mask;
} else if (mode & RTW89_RA_MODE_OFDM) {
- if (mode & RTW89_RA_MODE_CCK)
- ra_mask |= RA_MASK_SUBCCK_RATES;
- ra_mask |= RA_MASK_OFDM_RATES;
- } else {
- ra_mask = RA_MASK_CCK_RATES;
+ ra_mask &= (RA_MASK_OFDM_RATES | RA_MASK_SUBCCK_RATES);
}
- if (mode != RTW89_RA_MODE_CCK) {
+ if (mode != RTW89_RA_MODE_CCK)
ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0);
- ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta);
- }
+
+ ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak);
+ ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta);
switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ bw_mode = RTW89_CHANNEL_WIDTH_160;
+ sgi = sta->vht_cap.vht_supported &&
+ (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
+ break;
case IEEE80211_STA_RX_BW_80:
bw_mode = RTW89_CHANNEL_WIDTH_80;
sgi = sta->vht_cap.vht_supported &&
@@ -568,6 +602,13 @@ u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
return txsc_idx;
}
+EXPORT_SYMBOL(rtw89_phy_get_txsc);
+
+static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev)
+{
+ return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) ||
+ !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_R_BUSY_V1);
+}
u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask)
@@ -591,6 +632,56 @@ u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
}
EXPORT_SYMBOL(rtw89_phy_read_rf);
+static u32 rtw89_phy_read_rf_a(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rf_path, u32 addr, u32 mask)
+{
+ bool busy;
+ bool done;
+ u32 val;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
+ 1, 30, false, rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "read rf busy swsi\n");
+ return INV_RF_DATA;
+ }
+
+ mask &= RFREG_MASK;
+
+ val = FIELD_PREP(B_SWSI_READ_ADDR_PATH_V1, rf_path) |
+ FIELD_PREP(B_SWSI_READ_ADDR_ADDR_V1, addr);
+ rtw89_phy_write32_mask(rtwdev, R_SWSI_READ_ADDR_V1, B_SWSI_READ_ADDR_V1, val);
+ udelay(2);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 1,
+ 30, false, rtwdev, R_SWSI_V1,
+ B_SWSI_R_DATA_DONE_V1);
+ if (ret) {
+ rtw89_err(rtwdev, "read swsi busy\n");
+ return INV_RF_DATA;
+ }
+
+ return rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, mask);
+}
+
+u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask)
+{
+ bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ if (ad_sel)
+ return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
+ else
+ return rtw89_phy_read_rf_a(rtwdev, rf_path, addr, mask);
+}
+EXPORT_SYMBOL(rtw89_phy_read_rf_v1);
+
bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
@@ -616,6 +707,60 @@ bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
}
EXPORT_SYMBOL(rtw89_phy_write_rf);
+static bool rtw89_phy_write_rf_a(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rf_path, u32 addr, u32 mask,
+ u32 data)
+{
+ u8 bit_shift;
+ u32 val;
+ bool busy, b_msk_en = false;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
+ 1, 30, false, rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "write rf busy swsi\n");
+ return false;
+ }
+
+ data &= RFREG_MASK;
+ mask &= RFREG_MASK;
+
+ if (mask != RFREG_MASK) {
+ b_msk_en = true;
+ rtw89_phy_write32_mask(rtwdev, R_SWSI_BIT_MASK_V1, RFREG_MASK,
+ mask);
+ bit_shift = __ffs(mask);
+ data = (data << bit_shift) & RFREG_MASK;
+ }
+
+ val = FIELD_PREP(B_SWSI_DATA_BIT_MASK_EN_V1, b_msk_en) |
+ FIELD_PREP(B_SWSI_DATA_PATH_V1, rf_path) |
+ FIELD_PREP(B_SWSI_DATA_ADDR_V1, addr) |
+ FIELD_PREP(B_SWSI_DATA_VAL_V1, data);
+
+ rtw89_phy_write32_mask(rtwdev, R_SWSI_DATA_V1, MASKDWORD, val);
+
+ return true;
+}
+
+bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return false;
+ }
+
+ if (ad_sel)
+ return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
+ else
+ return rtw89_phy_write_rf_a(rtwdev, rf_path, addr, mask, data);
+}
+EXPORT_SYMBOL(rtw89_phy_write_rf_v1);
+
static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
@@ -717,6 +862,21 @@ static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev,
}
}
+void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data)
+{
+ rtw89_write_rf(rtwdev, rf_path, reg->addr, RFREG_MASK, reg->data);
+
+ if (reg->addr < 0x100)
+ return;
+
+ rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
+ (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
+}
+EXPORT_SYMBOL(rtw89_phy_config_rf_reg_v1);
+
static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev,
const struct rtw89_phy_table *table,
u32 *headline_size, u32 *headline_idx,
@@ -888,6 +1048,8 @@ static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev)
void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev)
{
+ void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path, void *data);
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_phy_table *rf_table;
struct rtw89_fw_h2c_rf_reg_info *rf_reg_info;
@@ -898,13 +1060,13 @@ void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev)
return;
for (path = RF_PATH_A; path < chip->rf_path_num; path++) {
- rf_reg_info->rf_path = path;
rf_table = chip->rf_table[path];
- rtw89_phy_init_reg(rtwdev, rf_table, rtw89_phy_config_rf_reg,
- (void *)rf_reg_info);
+ rf_reg_info->rf_path = rf_table->rf_path;
+ config = rf_table->config ? rf_table->config : rtw89_phy_config_rf_reg;
+ rtw89_phy_init_reg(rtwdev, rf_table, config, (void *)rf_reg_info);
if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info))
rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n",
- path);
+ rf_reg_info->rf_path);
}
kfree(rf_reg_info);
}
@@ -972,6 +1134,7 @@ void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
addr += rtw89_phy0_phy1_offset(rtwdev, addr);
rtw89_phy_write32_mask(rtwdev, addr, mask, data);
}
+EXPORT_SYMBOL(rtw89_phy_write32_idx);
void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 val)
@@ -995,6 +1158,7 @@ void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data);
}
}
+EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl);
const u8 rtw89_rs_idx_max[] = {
[RTW89_RS_CCK] = RTW89_RATE_CCK_MAX,
@@ -1003,6 +1167,7 @@ const u8 rtw89_rs_idx_max[] = {
[RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_MAX,
[RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_MAX,
};
+EXPORT_SYMBOL(rtw89_rs_idx_max);
const u8 rtw89_rs_nss_max[] = {
[RTW89_RS_CCK] = 1,
@@ -1011,6 +1176,7 @@ const u8 rtw89_rs_nss_max[] = {
[RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_MAX,
[RTW89_RS_OFFSET] = 1,
};
+EXPORT_SYMBOL(rtw89_rs_nss_max);
static const u8 _byr_of_rs[] = {
[RTW89_RS_CCK] = offsetof(struct rtw89_txpwr_byrate, cck),
@@ -1044,6 +1210,7 @@ void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
}
}
}
+EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
#define _phy_txpwr_rf_to_mac(rtwdev, txpwr_rf) \
({ \
@@ -1074,9 +1241,38 @@ s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev,
return _phy_txpwr_rf_to_mac(rtwdev, byr[idx]);
}
+EXPORT_SYMBOL(rtw89_phy_read_txpwr_byrate);
+
+static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g)
+{
+ switch (channel_6g) {
+ case 1 ... 29:
+ return (channel_6g - 1) / 2;
+ case 33 ... 61:
+ return (channel_6g - 3) / 2;
+ case 65 ... 93:
+ return (channel_6g - 5) / 2;
+ case 97 ... 125:
+ return (channel_6g - 7) / 2;
+ case 129 ... 157:
+ return (channel_6g - 9) / 2;
+ case 161 ... 189:
+ return (channel_6g - 11) / 2;
+ case 193 ... 221:
+ return (channel_6g - 13) / 2;
+ case 225 ... 253:
+ return (channel_6g - 15) / 2;
+ default:
+ rtw89_warn(rtwdev, "unknown 6g channel: %d\n", channel_6g);
+ return 0;
+ }
+}
-static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 channel)
+static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel)
{
+ if (band == RTW89_BAND_6G)
+ return rtw89_channel_6g_to_idx(rtwdev, channel);
+
switch (channel) {
case 1 ... 14:
return channel - 1;
@@ -1096,8 +1292,8 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 ch_idx = rtw89_channel_to_idx(rtwdev, ch);
u8 band = rtwdev->hal.current_band_type;
+ u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
s8 lmt = 0, sar;
@@ -1114,6 +1310,12 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
lmt = (*chip->txpwr_lmt_5g)[bw][ntx][rs][bf]
[RTW89_WW][ch_idx];
break;
+ case RTW89_BAND_6G:
+ lmt = (*chip->txpwr_lmt_6g)[bw][ntx][rs][bf][regd][ch_idx];
+ if (!lmt)
+ lmt = (*chip->txpwr_lmt_6g)[bw][ntx][rs][bf]
+ [RTW89_WW][ch_idx];
+ break;
default:
rtw89_warn(rtwdev, "unknown band type: %d\n", band);
return 0;
@@ -1124,6 +1326,7 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
return min(lmt, sar);
}
+EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
#define __fill_txpwr_limit_nonbf_bf(ptr, bw, ntx, rs, ch) \
do { \
@@ -1151,14 +1354,14 @@ static void rtw89_phy_fill_txpwr_limit_20m(struct rtw89_dev *rtwdev,
static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit *lmt,
- u8 ntx, u8 ch)
+ u8 ntx, u8 ch, u8 pri_ch)
{
__fill_txpwr_limit_nonbf_bf(lmt->cck_20m, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_CCK, ch - 2);
__fill_txpwr_limit_nonbf_bf(lmt->cck_40m, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_CCK, ch);
__fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
- ntx, RTW89_RS_OFDM, ch - 2);
+ ntx, RTW89_RS_OFDM, pri_ch);
__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 2);
__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
@@ -1169,14 +1372,14 @@ static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev,
static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit *lmt,
- u8 ntx, u8 ch)
+ u8 ntx, u8 ch, u8 pri_ch)
{
s8 val_0p5_n[RTW89_BF_NUM];
s8 val_0p5_p[RTW89_BF_NUM];
u8 i;
__fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
- ntx, RTW89_RS_OFDM, ch - 6);
+ ntx, RTW89_RS_OFDM, pri_ch);
__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 6);
__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
@@ -1201,10 +1404,82 @@ static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev,
lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
}
+static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev,
+ struct rtw89_txpwr_limit *lmt,
+ u8 ntx, u8 ch, u8 pri_ch)
+{
+ s8 val_0p5_n[RTW89_BF_NUM];
+ s8 val_0p5_p[RTW89_BF_NUM];
+ s8 val_2p5_n[RTW89_BF_NUM];
+ s8 val_2p5_p[RTW89_BF_NUM];
+ u8 i;
+
+ /* fill ofdm section */
+ __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_OFDM, pri_ch);
+
+ /* fill mcs 20m section */
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch - 14);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch - 10);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch - 6);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch - 2);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch + 2);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch + 6);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch + 10);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch + 14);
+
+ /* fill mcs 40m section */
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch - 12);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch - 4);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch + 4);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch + 12);
+
+ /* fill mcs 80m section */
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], RTW89_CHANNEL_WIDTH_80,
+ ntx, RTW89_RS_MCS, ch - 8);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], RTW89_CHANNEL_WIDTH_80,
+ ntx, RTW89_RS_MCS, ch + 8);
+
+ /* fill mcs 160m section */
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, RTW89_CHANNEL_WIDTH_160,
+ ntx, RTW89_RS_MCS, ch);
+
+ /* fill mcs 40m 0p5 section */
+ __fill_txpwr_limit_nonbf_bf(val_0p5_n, RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch - 4);
+ __fill_txpwr_limit_nonbf_bf(val_0p5_p, RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch + 4);
+
+ for (i = 0; i < RTW89_BF_NUM; i++)
+ lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
+
+ /* fill mcs 40m 2p5 section */
+ __fill_txpwr_limit_nonbf_bf(val_2p5_n, RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch - 8);
+ __fill_txpwr_limit_nonbf_bf(val_2p5_p, RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch + 8);
+
+ for (i = 0; i < RTW89_BF_NUM; i++)
+ lmt->mcs_40m_2p5[i] = min_t(s8, val_2p5_n[i], val_2p5_p[i]);
+}
+
void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit *lmt,
u8 ntx)
{
+ u8 pri_ch = rtwdev->hal.current_primary_channel;
u8 ch = rtwdev->hal.current_channel;
u8 bw = rtwdev->hal.current_band_width;
@@ -1215,20 +1490,24 @@ void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
rtw89_phy_fill_txpwr_limit_20m(rtwdev, lmt, ntx, ch);
break;
case RTW89_CHANNEL_WIDTH_40:
- rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, ntx, ch, pri_ch);
break;
case RTW89_CHANNEL_WIDTH_80:
- rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, ntx, ch, pri_ch);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_fill_txpwr_limit_160m(rtwdev, lmt, ntx, ch, pri_ch);
break;
}
}
+EXPORT_SYMBOL(rtw89_phy_fill_txpwr_limit);
static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev,
u8 ru, u8 ntx, u8 ch)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 ch_idx = rtw89_channel_to_idx(rtwdev, ch);
u8 band = rtwdev->hal.current_band_type;
+ u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
s8 lmt_ru = 0, sar;
@@ -1245,6 +1524,12 @@ static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev,
lmt_ru = (*chip->txpwr_lmt_ru_5g)[ru][ntx]
[RTW89_WW][ch_idx];
break;
+ case RTW89_BAND_6G:
+ lmt_ru = (*chip->txpwr_lmt_ru_6g)[ru][ntx][regd][ch_idx];
+ if (!lmt_ru)
+ lmt_ru = (*chip->txpwr_lmt_ru_6g)[ru][ntx]
+ [RTW89_WW][ch_idx];
+ break;
default:
rtw89_warn(rtwdev, "unknown band type: %d\n", band);
return 0;
@@ -1319,6 +1604,31 @@ rtw89_phy_fill_txpwr_limit_ru_80m(struct rtw89_dev *rtwdev,
ntx, ch + 6);
}
+static void
+rtw89_phy_fill_txpwr_limit_ru_160m(struct rtw89_dev *rtwdev,
+ struct rtw89_txpwr_limit_ru *lmt_ru,
+ u8 ntx, u8 ch)
+{
+ static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 };
+ int i;
+
+ static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM);
+ for (i = 0; i < RTW89_RU_SEC_NUM; i++) {
+ lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev,
+ RTW89_RU26,
+ ntx,
+ ch + ofst[i]);
+ lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev,
+ RTW89_RU52,
+ ntx,
+ ch + ofst[i]);
+ lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev,
+ RTW89_RU106,
+ ntx,
+ ch + ofst[i]);
+ }
+}
+
void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit_ru *lmt_ru,
u8 ntx)
@@ -1338,8 +1648,12 @@ void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
case RTW89_CHANNEL_WIDTH_80:
rtw89_phy_fill_txpwr_limit_ru_80m(rtwdev, lmt_ru, ntx, ch);
break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_fill_txpwr_limit_ru_160m(rtwdev, lmt_ru, ntx, ch);
+ break;
}
}
+EXPORT_SYMBOL(rtw89_phy_fill_txpwr_limit_ru);
struct rtw89_phy_iter_ra_data {
struct rtw89_dev *rtwdev;
@@ -1401,13 +1715,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
break;
}
- if (bw == RTW89_CHANNEL_WIDTH_80)
- ra_report->txrate.bw = RATE_INFO_BW_80;
- else if (bw == RTW89_CHANNEL_WIDTH_40)
- ra_report->txrate.bw = RATE_INFO_BW_40;
- else
- ra_report->txrate.bw = RATE_INFO_BW_20;
-
+ ra_report->txrate.bw = rtw89_hw_to_rate_info_bw(bw);
ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate);
ra_report->hw_rate = FIELD_PREP(RTW89_HW_RATE_MASK_MOD, mode) |
FIELD_PREP(RTW89_HW_RATE_MASK_VAL, rate);
@@ -1487,15 +1795,25 @@ static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
u8 crystal_cap, bool force)
{
struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u8 sc_xi_val, sc_xo_val;
if (!force && cfo->crystal_cap == crystal_cap)
return;
crystal_cap = clamp_t(u8, crystal_cap, 0, 127);
- rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
- rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
- sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true);
- sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false);
+ if (chip->chip_id == RTL8852A) {
+ rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
+ rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
+ sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true);
+ sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false);
+ } else {
+ rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO,
+ crystal_cap, XTAL_SC_XO_MASK);
+ rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI,
+ crystal_cap, XTAL_SC_XI_MASK);
+ rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, &sc_xo_val);
+ rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, &sc_xi_val);
+ }
cfo->crystal_cap = sc_xi_val;
cfo->x_cap_ofst = (s8)((int)cfo->crystal_cap - cfo->def_x_cap);
@@ -1525,9 +1843,11 @@ static void rtw89_phy_cfo_reset(struct rtw89_dev *rtwdev)
static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo)
{
+ const struct rtw89_reg_def *dcfo_comp = rtwdev->chip->dcfo_comp;
bool is_linked = rtwdev->total_sta_assoc > 0;
s32 cfo_avg_312;
- s32 dcfo_comp;
+ s32 dcfo_comp_val;
+ u8 dcfo_comp_sft = rtwdev->chip->dcfo_comp_sft;
int sign;
if (!is_linked) {
@@ -1538,13 +1858,13 @@ static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo)
rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: curr_cfo=%d\n", curr_cfo);
if (curr_cfo == 0)
return;
- dcfo_comp = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO);
+ dcfo_comp_val = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO);
sign = curr_cfo > 0 ? 1 : -1;
- cfo_avg_312 = (curr_cfo << 3) / 5 + sign * dcfo_comp;
+ cfo_avg_312 = (curr_cfo << dcfo_comp_sft) / 5 + sign * dcfo_comp_val;
rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: avg_cfo=%d\n", cfo_avg_312);
if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV)
cfo_avg_312 = -cfo_avg_312;
- rtw89_phy_set_phy_regs(rtwdev, R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK,
+ rtw89_phy_set_phy_regs(rtwdev, dcfo_comp->addr, dcfo_comp->mask,
cfo_avg_312);
}
@@ -1563,8 +1883,12 @@ static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev)
cfo->crystal_cap_default = efuse->xtal_cap & B_AX_XTAL_SC_MASK;
cfo->crystal_cap = cfo->crystal_cap_default;
cfo->def_x_cap = cfo->crystal_cap;
+ cfo->x_cap_ub = min_t(int, cfo->def_x_cap + CFO_BOUND, 0x7f);
+ cfo->x_cap_lb = max_t(int, cfo->def_x_cap - CFO_BOUND, 0x1);
cfo->is_adjust = false;
+ cfo->divergence_lock_en = false;
cfo->x_cap_ofst = 0;
+ cfo->lock_cnt = 0;
cfo->rtw89_multi_cfo_mode = RTW89_TP_BASED_AVG_MODE;
cfo->apply_compensation = false;
cfo->residual_cfo_acc = 0;
@@ -1782,6 +2106,23 @@ static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n");
return;
}
+ if (cfo->divergence_lock_en) {
+ cfo->lock_cnt++;
+ if (cfo->lock_cnt > CFO_PERIOD_CNT) {
+ cfo->divergence_lock_en = false;
+ cfo->lock_cnt = 0;
+ } else {
+ rtw89_phy_cfo_reset(rtwdev);
+ }
+ return;
+ }
+ if (cfo->crystal_cap >= cfo->x_cap_ub ||
+ cfo->crystal_cap <= cfo->x_cap_lb) {
+ cfo->divergence_lock_en = true;
+ rtw89_phy_cfo_reset(rtwdev);
+ return;
+ }
+
rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo);
cfo->cfo_avg_pre = new_cfo;
x_cap_update = cfo->crystal_cap != pre_x_cap;
@@ -2845,7 +3186,9 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
enum rtw89_bandwidth cbw = rtwdev->hal.current_band_width;
struct rtw89_dig_info *dig = &rtwdev->dig;
u8 final_rssi = 0, under_region = dig->pd_low_th_ofst;
- u32 val = 0;
+ u8 ofdm_cca_th;
+ s8 cck_cca_th;
+ u32 pd_val = 0;
under_region += PD_TH_SB_FLTR_CMP_VAL;
@@ -2856,6 +3199,9 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
case RTW89_CHANNEL_WIDTH_80:
under_region += PD_TH_BW80_CMP_VAL;
break;
+ case RTW89_CHANNEL_WIDTH_160:
+ under_region += PD_TH_BW160_CMP_VAL;
+ break;
case RTW89_CHANNEL_WIDTH_20:
fallthrough;
default:
@@ -2866,23 +3212,38 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
dig->dyn_pd_th_max = dig->igi_rssi;
final_rssi = min_t(u8, rssi, dig->igi_rssi);
- final_rssi = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region,
- PD_TH_MAX_RSSI + under_region);
+ ofdm_cca_th = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region,
+ PD_TH_MAX_RSSI + under_region);
if (enable) {
- val = (final_rssi - under_region - PD_TH_MIN_RSSI) >> 1;
+ pd_val = (ofdm_cca_th - under_region - PD_TH_MIN_RSSI) >> 1;
rtw89_debug(rtwdev, RTW89_DBG_DIG,
- "dyn_max=%d, final_rssi=%d, total=%d, PD_low=%d\n",
- dig->igi_rssi, final_rssi, under_region, val);
+ "igi=%d, ofdm_ccaTH=%d, backoff=%d, PD_low=%d\n",
+ final_rssi, ofdm_cca_th, under_region, pd_val);
} else {
rtw89_debug(rtwdev, RTW89_DBG_DIG,
"Dynamic PD th disabled, Set PD_low_bd=0\n");
}
rtw89_phy_write32_mask(rtwdev, R_SEG0R_PD, B_SEG0R_PD_LOWER_BOUND_MSK,
- val);
+ pd_val);
rtw89_phy_write32_mask(rtwdev, R_SEG0R_PD,
B_SEG0R_PD_SPATIAL_REUSE_EN_MSK, enable);
+
+ if (!rtwdev->hal.support_cckpd)
+ return;
+
+ cck_cca_th = max_t(s8, final_rssi - under_region, CCKPD_TH_MIN_RSSI);
+ pd_val = (u32)(cck_cca_th - IGI_RSSI_MAX);
+
+ rtw89_debug(rtwdev, RTW89_DBG_DIG,
+ "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n",
+ final_rssi, cck_cca_th, under_region, pd_val);
+
+ rtw89_phy_write32_mask(rtwdev, R_BMODE_PDTH_EN_V1,
+ B_BMODE_PDTH_LIMIT_EN_MSK_V1, enable);
+ rtw89_phy_write32_mask(rtwdev, R_BMODE_PDTH_V1,
+ B_BMODE_PDTH_LOWER_BOUND_MSK_V1, pd_val);
}
void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev)
@@ -2994,3 +3355,55 @@ void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif
rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_STAID,
vif->bss_conf.aid, phy_idx);
}
+
+static void
+_rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data);
+}
+
+static void
+_rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
+}
+
+static void
+_rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ rtw89_phy_write32_set(rtwdev, def->addr, def->mask);
+}
+
+static void
+_rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ rtw89_phy_write32_clr(rtwdev, def->addr, def->mask);
+}
+
+static void
+_rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ udelay(def->data);
+}
+
+static void
+(*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = {
+ [RTW89_RFK_F_WRF] = _rfk_write_rf,
+ [RTW89_RFK_F_WM] = _rfk_write32_mask,
+ [RTW89_RFK_F_WS] = _rfk_write32_set,
+ [RTW89_RFK_F_WC] = _rfk_write32_clr,
+ [RTW89_RFK_F_DELAY] = _rfk_delay,
+};
+
+static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
+
+void
+rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
+{
+ const struct rtw89_reg5_def *p = tbl->defs;
+ const struct rtw89_reg5_def *end = tbl->defs + tbl->size;
+
+ for (; p < end; p++)
+ _rfk_handler[p->flag](rtwdev, p);
+}
+EXPORT_SYMBOL(rtw89_rfk_parser);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index b1f059b725a1..adcfcb4c2429 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -8,6 +8,7 @@
#include "core.h"
#define RTW89_PHY_ADDR_OFFSET 0x10000
+#define RTW89_RF_ADDR_ADSEL_MASK BIT(16)
#define get_phy_headline(addr) FIELD_GET(GENMASK(31, 28), addr)
#define PHY_HEADLINE_VALID 0xf
@@ -55,6 +56,7 @@
#define CFO_TRK_STOP_TH (2 << 2)
#define CFO_SW_COMP_FINE_TUNE (2 << 2)
#define CFO_PERIOD_CNT 15
+#define CFO_BOUND 32
#define CFO_TP_UPPER 100
#define CFO_TP_LOWER 50
#define CFO_COMP_PERIOD 250
@@ -87,8 +89,11 @@
#define RXB_IDX_MAX 31
#define RXB_IDX_MIN 0
+#define IGI_RSSI_MAX 110
#define PD_TH_MAX_RSSI 70
#define PD_TH_MIN_RSSI 8
+#define CCKPD_TH_MIN_RSSI (-18)
+#define PD_TH_BW160_CMP_VAL 9
#define PD_TH_BW80_CMP_VAL 6
#define PD_TH_BW40_CMP_VAL 3
#define PD_TH_BW20_CMP_VAL 0
@@ -265,6 +270,9 @@ const struct rtw89_phy_reg3_tbl _name ## _tbl = { \
.size = ARRAY_SIZE(_name), \
}
+extern const u8 rtw89_rs_idx_max[RTW89_RS_MAX];
+extern const u8 rtw89_rs_nss_max[RTW89_RS_MAX];
+
static inline void rtw89_phy_write8(struct rtw89_dev *rtwdev,
u32 addr, u8 data)
{
@@ -322,6 +330,65 @@ static inline u32 rtw89_phy_read32_mask(struct rtw89_dev *rtwdev,
return rtw89_read32_mask(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, mask);
}
+enum rtw89_rfk_flag {
+ RTW89_RFK_F_WRF = 0,
+ RTW89_RFK_F_WM = 1,
+ RTW89_RFK_F_WS = 2,
+ RTW89_RFK_F_WC = 3,
+ RTW89_RFK_F_DELAY = 4,
+ RTW89_RFK_F_NUM,
+};
+
+struct rtw89_rfk_tbl {
+ const struct rtw89_reg5_def *defs;
+ u32 size;
+};
+
+#define RTW89_DECLARE_RFK_TBL(_name) \
+const struct rtw89_rfk_tbl _name ## _tbl = { \
+ .defs = _name, \
+ .size = ARRAY_SIZE(_name), \
+}
+
+#define RTW89_DECL_RFK_WRF(_path, _addr, _mask, _data) \
+ {.flag = RTW89_RFK_F_WRF, \
+ .path = _path, \
+ .addr = _addr, \
+ .mask = _mask, \
+ .data = _data,}
+
+#define RTW89_DECL_RFK_WM(_addr, _mask, _data) \
+ {.flag = RTW89_RFK_F_WM, \
+ .addr = _addr, \
+ .mask = _mask, \
+ .data = _data,}
+
+#define RTW89_DECL_RFK_WS(_addr, _mask) \
+ {.flag = RTW89_RFK_F_WS, \
+ .addr = _addr, \
+ .mask = _mask,}
+
+#define RTW89_DECL_RFK_WC(_addr, _mask) \
+ {.flag = RTW89_RFK_F_WC, \
+ .addr = _addr, \
+ .mask = _mask,}
+
+#define RTW89_DECL_RFK_DELAY(_data) \
+ {.flag = RTW89_RFK_F_DELAY, \
+ .data = _data,}
+
+void
+rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl);
+
+#define rtw89_rfk_parser_by_cond(dev, cond, tbl_t, tbl_f) \
+ do { \
+ typeof(dev) __dev = (dev); \
+ if (cond) \
+ rtw89_rfk_parser(__dev, (tbl_t)); \
+ else \
+ rtw89_rfk_parser(__dev, (tbl_f)); \
+ } while (0)
+
void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
const struct rtw89_phy_reg3_tbl *tbl);
u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
@@ -329,10 +396,18 @@ u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
enum rtw89_bandwidth dbw);
u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask);
+u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask);
bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
+bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev);
void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev);
+void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data);
void rtw89_phy_dm_init(struct rtw89_dev *rtwdev);
void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 data, enum rtw89_phy_idx phy_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index e0a416d37d0e..25b106788118 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -8,16 +8,36 @@
#define R_AX_SYS_WL_EFUSE_CTRL 0x000A
#define B_AX_AUTOLOAD_SUS BIT(5)
+#define R_AX_SYS_ISO_CTRL 0x0000
+#define B_AX_PWC_EV2EF_MASK GENMASK(15, 14)
+#define B_AX_PWC_EV2EF_B15 BIT(15)
+#define B_AX_PWC_EV2EF_B14 BIT(14)
+#define B_AX_ISO_EB2CORE BIT(8)
+
#define R_AX_SYS_FUNC_EN 0x0002
#define B_AX_FEN_BB_GLB_RSTN BIT(1)
#define B_AX_FEN_BBRSTB BIT(0)
#define R_AX_SYS_PW_CTRL 0x0004
+#define B_AX_XTAL_OFF_A_DIE BIT(22)
+#define B_AX_DIS_WLBT_PDNSUSEN_SOPC BIT(18)
+#define B_AX_RDY_SYSPWR BIT(17)
+#define B_AX_EN_WLON BIT(16)
+#define B_AX_APDM_HPDN BIT(15)
#define B_AX_PSUS_OFF_CAPC_EN BIT(14)
+#define B_AX_AFSM_PCIE_SUS_EN BIT(12)
+#define B_AX_AFSM_WLSUS_EN BIT(11)
+#define B_AX_APFM_SWLPS BIT(10)
+#define B_AX_APFM_OFFMAC BIT(9)
+#define B_AX_APFN_ONMAC BIT(8)
#define R_AX_SYS_CLK_CTRL 0x0008
#define B_AX_CPU_CLK_EN BIT(14)
+#define R_AX_SYS_ADIE_PAD_PWR_CTRL 0x0018
+#define B_AX_SYM_PADPDN_WL_PTA_1P3 BIT(6)
+#define B_AX_SYM_PADPDN_WL_RFC_1P3 BIT(5)
+
#define R_AX_RSV_CTRL 0x001C
#define B_AX_R_DIS_PRST BIT(6)
#define B_AX_WLOCK_1C_BIT6 BIT(5)
@@ -41,6 +61,17 @@
#define B_AX_EF_ADDR_MASK GENMASK(26, 16)
#define B_AX_EF_DATA_MASK GENMASK(15, 0)
+#define R_AX_EFUSE_CTRL_1_V1 0x0038
+#define B_AX_EF_ENT BIT(31)
+#define B_AX_EF_BURST BIT(19)
+#define B_AX_EF_TEST_SEL_MASK GENMASK(18, 16)
+#define B_AX_EF_TROW_EN BIT(15)
+#define B_AX_EF_ERR_FLAG BIT(14)
+#define B_AX_EF_DSB_EN BIT(11)
+#define B_AX_PCIE_CALIB_EN_V1 BIT(12)
+#define B_AX_WDT_WAKE_PCIE_EN BIT(10)
+#define B_AX_WDT_WAKE_USB_EN BIT(9)
+
#define R_AX_GPIO_MUXCFG 0x0040
#define B_AX_BOOT_MODE BIT(19)
#define B_AX_WL_EECS_EXT_32K_SEL BIT(18)
@@ -72,11 +103,16 @@
#define R_AX_SYS_SDIO_CTRL 0x0070
#define B_AX_PCIE_DIS_L2_CTRL_LDO_HCI BIT(15)
#define B_AX_PCIE_DIS_WLSUS_AFT_PDN BIT(14)
+#define B_AX_PCIE_CALIB_EN_V1 BIT(12)
#define B_AX_PCIE_AUXCLK_GATE BIT(11)
#define B_AX_LTE_MUX_CTRL_PATH BIT(26)
#define R_AX_PLATFORM_ENABLE 0x0088
#define B_AX_WCPU_EN BIT(1)
+#define B_AX_PLATFORM_EN BIT(0)
+
+#define R_AX_WLLPS_CTRL 0x0090
+#define B_AX_DIS_WLBT_LPSEN_LOPC BIT(1)
#define R_AX_SCOREBOARD 0x00AC
#define B_AX_TOGGLE BIT(31)
@@ -89,11 +125,20 @@
#define R_AX_DBG_PORT_SEL 0x00C0
#define B_AX_DEBUG_ST_MASK GENMASK(31, 0)
+#define R_AX_PMC_DBG_CTRL2 0x00CC
+#define B_AX_SYSON_DIS_PMCR_AX_WRMSK BIT(2)
+
#define R_AX_SYS_CFG1 0x00F0
#define B_AX_CHIP_VER_MASK GENMASK(15, 12)
#define R_AX_SYS_STATUS1 0x00F4
#define B_AX_SEL_0XC0_MASK GENMASK(17, 16)
+#define B_AX_PAD_HCI_SEL_V2_MASK GENMASK(5, 3)
+#define MAC_AX_HCI_SEL_SDIO_UART 0
+#define MAC_AX_HCI_SEL_MULTI_USB 1
+#define MAC_AX_HCI_SEL_PCIE_UART 2
+#define MAC_AX_HCI_SEL_PCIE_USB 3
+#define MAC_AX_HCI_SEL_MULTI_SDIO 4
#define R_AX_HALT_H2C_CTRL 0x0160
#define R_AX_HALT_H2C 0x0168
@@ -112,6 +157,7 @@
#define PS_RPWM_TOGGLE BIT(15)
#define PS_RPWM_ACK BIT(14)
#define PS_RPWM_SEQ_NUM GENMASK(13, 12)
+#define PS_RPWM_NOTIFY_WAKE BIT(8)
#define PS_RPWM_STATE 0x7
#define RPWM_SEQ_NUM_MAX 3
#define PS_CPWM_SEQ_NUM GENMASK(13, 12)
@@ -130,6 +176,21 @@
#define R_AX_UDM2 0x01F8
#define R_AX_UDM3 0x01FC
+#define R_AX_LDO_AON_CTRL0 0x0218
+#define B_AX_PD_REGU_L BIT(16)
+
+#define R_AX_WLAN_XTAL_SI_CTRL 0x0270
+#define B_AX_WL_XTAL_SI_CMD_POLL BIT(31)
+#define B_AX_BT_XTAL_SI_ERR_FLAG BIT(30)
+#define B_AX_WL_XTAL_GNT BIT(29)
+#define B_AX_BT_XTAL_GNT BIT(28)
+#define B_AX_WL_XTAL_SI_MODE_MASK GENMASK(25, 24)
+#define XTAL_SI_NORMAL_WRITE 0x00
+#define XTAL_SI_NORMAL_READ 0x01
+#define B_AX_WL_XTAL_SI_BITMASK_MASK GENMASK(23, 16)
+#define B_AX_WL_XTAL_SI_DATA_MASK GENMASK(15, 8)
+#define B_AX_WL_XTAL_SI_ADDR_MASK GENMASK(7, 0)
+
#define R_AX_XTAL_ON_CTRL0 0x0280
#define B_AX_XTAL_SC_LPS BIT(31)
#define B_AX_XTAL_SC_XO_MASK GENMASK(23, 17)
@@ -138,6 +199,11 @@
#define R_AX_GPIO0_7_FUNC_SEL 0x02D0
+#define R_AX_GPIO0_15_EECS_EESK_LED1_PULL_LOW_EN 0x02E4
+#define B_AX_LED1_PULL_LOW_EN BIT(18)
+#define B_AX_EESK_PULL_LOW_EN BIT(17)
+#define B_AX_EECS_PULL_LOW_EN BIT(16)
+
#define R_AX_WLRF_CTRL 0x02F0
#define B_AX_WLRF1_CTRL_7 BIT(15)
#define B_AX_WLRF1_CTRL_1 BIT(9)
@@ -162,6 +228,58 @@
#define B_AX_ASFF_FULL_NO_STK BIT(1)
#define B_AX_EN_STUCK_DBG BIT(0)
+#define R_AX_HCI_FC_CTRL_V1 0x1700
+#define R_AX_CH_PAGE_CTRL_V1 0x1704
+
+#define R_AX_ACH0_PAGE_CTRL_V1 0x1710
+#define R_AX_ACH1_PAGE_CTRL_V1 0x1714
+#define R_AX_ACH2_PAGE_CTRL_V1 0x1718
+#define R_AX_ACH3_PAGE_CTRL_V1 0x171C
+#define R_AX_ACH4_PAGE_CTRL_V1 0x1720
+#define R_AX_ACH5_PAGE_CTRL_V1 0x1724
+#define R_AX_ACH6_PAGE_CTRL_V1 0x1728
+#define R_AX_ACH7_PAGE_CTRL_V1 0x172C
+#define R_AX_CH8_PAGE_CTRL_V1 0x1730
+#define R_AX_CH9_PAGE_CTRL_V1 0x1734
+#define R_AX_CH10_PAGE_CTRL_V1 0x1738
+#define R_AX_CH11_PAGE_CTRL_V1 0x173C
+
+#define R_AX_ACH0_PAGE_INFO_V1 0x1750
+#define R_AX_ACH1_PAGE_INFO_V1 0x1754
+#define R_AX_ACH2_PAGE_INFO_V1 0x1758
+#define R_AX_ACH3_PAGE_INFO_V1 0x175C
+#define R_AX_ACH4_PAGE_INFO_V1 0x1760
+#define R_AX_ACH5_PAGE_INFO_V1 0x1764
+#define R_AX_ACH6_PAGE_INFO_V1 0x1768
+#define R_AX_ACH7_PAGE_INFO_V1 0x176C
+#define R_AX_CH8_PAGE_INFO_V1 0x1770
+#define R_AX_CH9_PAGE_INFO_V1 0x1774
+#define R_AX_CH10_PAGE_INFO_V1 0x1778
+#define R_AX_CH11_PAGE_INFO_V1 0x177C
+#define R_AX_CH12_PAGE_INFO_V1 0x1780
+
+#define R_AX_PUB_PAGE_INFO3_V1 0x178C
+#define R_AX_PUB_PAGE_CTRL1_V1 0x1790
+#define R_AX_PUB_PAGE_CTRL2_V1 0x1794
+#define R_AX_PUB_PAGE_INFO1_V1 0x1798
+#define R_AX_PUB_PAGE_INFO2_V1 0x179C
+#define R_AX_WP_PAGE_CTRL1_V1 0x17A0
+#define R_AX_WP_PAGE_CTRL2_V1 0x17A4
+#define R_AX_WP_PAGE_INFO1_V1 0x17A8
+
+#define R_AX_H2CREG_DATA0_V1 0x7140
+#define R_AX_H2CREG_DATA1_V1 0x7144
+#define R_AX_H2CREG_DATA2_V1 0x7148
+#define R_AX_H2CREG_DATA3_V1 0x714C
+#define R_AX_C2HREG_DATA0_V1 0x7150
+#define R_AX_C2HREG_DATA1_V1 0x7154
+#define R_AX_C2HREG_DATA2_V1 0x7158
+#define R_AX_C2HREG_DATA3_V1 0x715C
+#define R_AX_H2CREG_CTRL_V1 0x7160
+#define R_AX_C2HREG_CTRL_V1 0x7164
+
+#define R_AX_HCI_FUNC_EN_V1 0x7880
+
#define R_AX_PHYREG_SET 0x8040
#define PHYREG_SET_ALL_CYCLE 0x8
@@ -194,6 +312,7 @@
#define R_AX_BOOT_DBG 0x83F0
#define R_AX_DMAC_FUNC_EN 0x8400
+#define B_AX_DMAC_CRPRT BIT(31)
#define B_AX_MAC_FUNC_EN BIT(30)
#define B_AX_DMAC_FUNC_EN BIT(29)
#define B_AX_MPDU_PROC_EN BIT(28)
@@ -207,7 +326,10 @@
#define B_AX_PKT_IN_EN BIT(20)
#define B_AX_DLE_CPUIO_EN BIT(19)
#define B_AX_DISPATCHER_EN BIT(18)
+#define B_AX_BBRPT_EN BIT(17)
#define B_AX_MAC_SEC_EN BIT(16)
+#define B_AX_MAC_UN_EN BIT(15)
+#define B_AX_H_AXIDMA_EN BIT(14)
#define R_AX_DMAC_CLK_EN 0x8404
#define B_AX_WD_RLS_CLK_EN BIT(27)
@@ -218,6 +340,7 @@
#define B_AX_PKT_IN_CLK_EN BIT(20)
#define B_AX_DLE_CPUIO_CLK_EN BIT(19)
#define B_AX_DISPATCHER_CLK_EN BIT(18)
+#define B_AX_BBRPT_CLK_EN BIT(17)
#define B_AX_MAC_SEC_CLK_EN BIT(16)
#define PCI_LTR_IDLE_TIMER_1US 0
@@ -444,6 +567,7 @@
#define R_AX_PLE_QTA8_CFG 0x9060
#define R_AX_PLE_QTA9_CFG 0x9064
#define R_AX_PLE_QTA10_CFG 0x9068
+#define R_AX_PLE_QTA11_CFG 0x906C
#define R_AX_PLE_INI_STATUS 0x9100
#define B_AX_PLE_Q_MGN_INI_RDY BIT(1)
@@ -618,6 +742,30 @@
#define R_AX_DBG_FUN_INTF_DATA 0x9F34
#define B_AX_DFI_DATA_MASK GENMASK(31, 0)
+#define R_AX_TXPKTCTL_B0_PRELD_CFG0 0x9F48
+#define B_AX_B0_PRELD_FEN BIT(31)
+#define B_AX_B0_PRELD_USEMAXSZ_MASK GENMASK(25, 16)
+#define PRELD_B0_ENT_NUM 10
+#define PRELD_AMSDU_SIZE 52
+#define B_AX_B0_PRELD_CAM_G1ENTNUM_MASK GENMASK(12, 8)
+#define B_AX_B0_PRELD_CAM_G0ENTNUM_MASK GENMASK(4, 0)
+
+#define R_AX_TXPKTCTL_B0_PRELD_CFG1 0x9F4C
+#define B_AX_B0_PRELD_NXT_TXENDWIN_MASK GENMASK(11, 8)
+#define PRELD_NEXT_WND 1
+#define B_AX_B0_PRELD_NXT_RSVMINSZ_MASK GENMASK(7, 0)
+
+#define R_AX_TXPKTCTL_B1_PRELD_CFG0 0x9F88
+#define B_AX_B1_PRELD_FEN BIT(31)
+#define B_AX_B1_PRELD_USEMAXSZ_MASK GENMASK(25, 16)
+#define PRELD_B1_ENT_NUM 4
+#define B_AX_B1_PRELD_CAM_G1ENTNUM_MASK GENMASK(12, 8)
+#define B_AX_B1_PRELD_CAM_G0ENTNUM_MASK GENMASK(4, 0)
+
+#define R_AX_TXPKTCTL_B1_PRELD_CFG1 0x9F8C
+#define B_AX_B1_PRELD_NXT_TXENDWIN_MASK GENMASK(11, 8)
+#define B_AX_B1_PRELD_NXT_RSVMINSZ_MASK GENMASK(7, 0)
+
#define R_AX_AFE_CTRL1 0x0024
#define B_AX_R_SYM_WLCMAC1_P4_PC_EN BIT(4)
@@ -745,6 +893,7 @@
#define B_AX_CTN_TXEN_VI_0 BIT(2)
#define B_AX_CTN_TXEN_BK_0 BIT(1)
#define B_AX_CTN_TXEN_BE_0 BIT(0)
+#define B_AX_CTN_TXEN_ALL_MASK GENMASK(15, 0)
#define R_AX_MUEDCA_BE_PARAM_0 0xC350
#define R_AX_MUEDCA_BE_PARAM_0_C1 0xE350
@@ -791,6 +940,12 @@
#define B_AX_CTN_CHK_CCA_S20 BIT(1)
#define B_AX_CTN_CHK_CCA_P20 BIT(0)
+#define R_AX_CTN_DRV_TXEN 0xC398
+#define R_AX_CTN_DRV_TXEN_C1 0xE398
+#define B_AX_CTN_TXEN_TWT_3 BIT(17)
+#define B_AX_CTN_TXEN_TWT_2 BIT(16)
+#define B_AX_CTN_TXEN_ALL_MASK_V1 GENMASK(17, 0)
+
#define R_AX_SCHEDULE_ERR_IMR 0xC3E8
#define R_AX_SCHEDULE_ERR_IMR_C1 0xE3E8
#define B_AX_SORT_NON_IDLE_ERR_INT_EN BIT(1)
@@ -913,7 +1068,7 @@
#define R_AX_DTIM_CTRL_P2 0xC4A6
#define R_AX_DTIM_CTRL_P3 0xC4E6
#define R_AX_DTIM_CTRL_P4 0xC526
-#define B_AX_DTIM_NUM_MASK GENMASK(15, 0)
+#define B_AX_DTIM_NUM_MASK GENMASK(15, 8)
#define B_AX_DTIM_CURRCNT_MASK GENMASK(7, 0)
#define R_AX_TBTT_SHIFT_P0 0xC428
@@ -964,6 +1119,11 @@
#define B_AX_P0MB2_EN BIT(2)
#define B_AX_P0MB1_EN BIT(1)
+#define R_AX_P0MB_HGQ_WINDOW_CFG_0 0xC590
+#define R_AX_P0MB_HGQ_WINDOW_CFG_0_C1 0xE590
+#define R_AX_PORT_HGQ_WINDOW_CFG 0xC5A0
+#define R_AX_PORT_HGQ_WINDOW_CFG_C1 0xE5A0
+
#define R_AX_AMPDU_AGG_LIMIT 0xC610
#define B_AX_AMPDU_MAX_TIME_MASK GENMASK(31, 24)
#define B_AX_RA_TRY_RATE_AGG_LMT_MASK GENMASK(23, 16)
@@ -1080,6 +1240,13 @@
#define B_AX_TCR_ZLD_USTIME_AFTERPHYTXON GENMASK(11, 8)
#define B_AX_TCR_TXTIMEOUT GENMASK(7, 0)
+#define R_AX_MD_TSFT_STMP_CTL 0xCA08
+#define R_AX_MD_TSFT_STMP_CTL_C1 0xEA08
+#define B_AX_TSFT_OFS_MASK GENMASK(31, 16)
+#define B_AX_STMP_THSD_MASK GENMASK(15, 8)
+#define B_AX_UPD_HGQMD BIT(1)
+#define B_AX_UPD_TIMIE BIT(0)
+
#define R_AX_PPWRBIT_SETTING 0xCA0C
#define R_AX_PPWRBIT_SETTING_C1 0xEA0C
@@ -1391,8 +1558,10 @@
#define B_AX_PWR_UL_TB_CTRL_EN BIT(31)
#define R_AX_PWR_UL_TB_1T 0xD28C
#define B_AX_PWR_UL_TB_1T_MASK GENMASK(4, 0)
+#define B_AX_PWR_UL_TB_1T_V1_MASK GENMASK(7, 0)
#define R_AX_PWR_UL_TB_2T 0xD290
#define B_AX_PWR_UL_TB_2T_MASK GENMASK(4, 0)
+#define B_AX_PWR_UL_TB_2T_V1_MASK GENMASK(7, 0)
#define R_AX_PWR_BY_RATE_TABLE0 0xD2C0
#define R_AX_PWR_BY_RATE_TABLE10 0xD2E8
#define R_AX_PWR_BY_RATE R_AX_PWR_BY_RATE_TABLE0
@@ -1458,6 +1627,31 @@
#define B_AX_STATIS_BT_LO_TX_1_MASK GENMASK(15, 0)
#define B_AX_STATIS_BT_LO_RX_1_MASK GENMASK(31, 16)
+#define R_AX_GNT_SW_CTRL 0xDA48
+#define R_AX_GNT_SW_CTRL_C1 0xFA48
+#define B_AX_WL_ACT2_VAL BIT(21)
+#define B_AX_WL_ACT2_SWCTRL BIT(20)
+#define B_AX_WL_ACT_VAL BIT(19)
+#define B_AX_WL_ACT_SWCTRL BIT(18)
+#define B_AX_GNT_BT_RX_VAL BIT(17)
+#define B_AX_GNT_BT_RX_SWCTRL BIT(16)
+#define B_AX_GNT_BT_TX_VAL BIT(15)
+#define B_AX_GNT_BT_TX_SWCTRL BIT(14)
+#define B_AX_GNT_WL_RX_VAL BIT(13)
+#define B_AX_GNT_WL_RX_SWCTRL BIT(12)
+#define B_AX_GNT_WL_TX_VAL BIT(11)
+#define B_AX_GNT_WL_TX_SWCTRL BIT(10)
+#define B_AX_GNT_BT_RFC_S1_VAL BIT(9)
+#define B_AX_GNT_BT_RFC_S1_SWCTRL BIT(8)
+#define B_AX_GNT_WL_RFC_S1_VAL BIT(7)
+#define B_AX_GNT_WL_RFC_S1_SWCTRL BIT(6)
+#define B_AX_GNT_BT_RFC_S0_VAL BIT(5)
+#define B_AX_GNT_BT_RFC_S0_SWCTRL BIT(4)
+#define B_AX_GNT_WL_RFC_S0_VAL BIT(3)
+#define B_AX_GNT_WL_RFC_S0_SWCTRL BIT(2)
+#define B_AX_GNT_WL_BB_VAL BIT(1)
+#define B_AX_GNT_WL_BB_SWCTRL BIT(0)
+
#define R_AX_TDMA_MODE 0xDA4C
#define R_AX_TDMA_MODE_C1 0xFA4C
#define B_AX_R_BT_CMD_RPT_MASK GENMASK(31, 16)
@@ -1669,6 +1863,17 @@
#define B_ANAPAR_FLTRST BIT(22)
#define B_ANAPAR_CRXBB GENMASK(18, 16)
#define B_ANAPAR_14 GENMASK(15, 0)
+#define R_SWSI_DATA_V1 0x0370
+#define B_SWSI_DATA_VAL_V1 GENMASK(19, 0)
+#define B_SWSI_DATA_ADDR_V1 GENMASK(27, 20)
+#define B_SWSI_DATA_PATH_V1 GENMASK(30, 28)
+#define B_SWSI_DATA_BIT_MASK_EN_V1 BIT(31)
+#define R_SWSI_BIT_MASK_V1 0x0374
+#define B_SWSI_BIT_MASK_V1 GENMASK(19, 0)
+#define R_SWSI_READ_ADDR_V1 0x0378
+#define B_SWSI_READ_ADDR_ADDR_V1 GENMASK(7, 0)
+#define B_SWSI_READ_ADDR_PATH_V1 GENMASK(10, 8)
+#define B_SWSI_READ_ADDR_V1 GENMASK(10, 0)
#define R_UPD_CLK_ADC 0x0700
#define B_UPD_CLK_ADC_ON BIT(24)
#define B_UPD_CLK_ADC_VAL GENMASK(26, 25)
@@ -1776,6 +1981,10 @@
#define R_CFO_COMP_SEG0_H 0x1388
#define R_CFO_COMP_SEG0_CTRL 0x138C
#define R_DBG32_D 0x1730
+#define R_SWSI_V1 0x174C
+#define B_SWSI_W_BUSY_V1 BIT(24)
+#define B_SWSI_R_BUSY_V1 BIT(25)
+#define B_SWSI_R_DATA_DONE_V1 BIT(26)
#define R_TX_COUNTER 0x1A40
#define R_IFS_CLM_TX_CNT 0x1ACC
#define B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK GENMASK(31, 16)
@@ -1959,6 +2168,12 @@
#define R_CHBW_MOD 0x4978
#define B_CHBW_MOD_PRICH GENMASK(11, 8)
#define B_CHBW_MOD_SBW GENMASK(13, 12)
+#define R_DCFO_COMP_S0_V1 0x4A40
+#define B_DCFO_COMP_S0_V1_MSK GENMASK(13, 0)
+#define R_BMODE_PDTH_V1 0x4B64
+#define B_BMODE_PDTH_LOWER_BOUND_MSK_V1 GENMASK(31, 24)
+#define R_BMODE_PDTH_EN_V1 0x4B74
+#define B_BMODE_PDTH_LIMIT_EN_MSK_V1 BIT(30)
#define R_CFO_COMP_SEG1_L 0x5384
#define R_CFO_COMP_SEG1_H 0x5388
#define R_CFO_COMP_SEG1_CTRL 0x538C
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 6b75e4bc7352..41fc8db311ec 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -3,6 +3,7 @@
*/
#include "coex.h"
+#include "fw.h"
#include "mac.h"
#include "phy.h"
#include "reg.h"
@@ -36,16 +37,19 @@ static const struct rtw89_hfc_pub_cfg rtw8852a_hfc_pubcfg_pcie = {
static const struct rtw89_hfc_param_ini rtw8852a_hfc_param_ini_pcie[] = {
[RTW89_QTA_SCC] = {rtw8852a_hfc_chcfg_pcie, &rtw8852a_hfc_pubcfg_pcie,
- &rtw_hfc_preccfg_pcie, RTW89_HCIFC_POH},
- [RTW89_QTA_DLFW] = {NULL, NULL, &rtw_hfc_preccfg_pcie, RTW89_HCIFC_POH},
+ &rtw89_hfc_preccfg_pcie, RTW89_HCIFC_POH},
+ [RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_hfc_preccfg_pcie,
+ RTW89_HCIFC_POH},
[RTW89_QTA_INVALID] = {NULL},
};
static const struct rtw89_dle_mem rtw8852a_dle_mem_pcie[] = {
- [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &wde_size0, &ple_size0, &wde_qt0,
- &wde_qt0, &ple_qt4, &ple_qt5},
- [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &wde_size4, &ple_size4,
- &wde_qt4, &wde_qt4, &ple_qt13, &ple_qt13},
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_wde_size0, &rtw89_ple_size0,
+ &rtw89_wde_qt0, &rtw89_wde_qt0, &rtw89_ple_qt4,
+ &rtw89_ple_qt5},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_wde_size4, &rtw89_ple_size4,
+ &rtw89_wde_qt4, &rtw89_wde_qt4, &rtw89_ple_qt13,
+ &rtw89_ple_qt13},
[RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
NULL},
};
@@ -373,6 +377,35 @@ static const struct rtw89_pwr_cfg * const pwr_off_seq_8852a[] = {
rtw8852a_pwroff, NULL
};
+static const u32 rtw8852a_h2c_regs[RTW89_H2CREG_MAX] = {
+ R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1, R_AX_H2CREG_DATA2,
+ R_AX_H2CREG_DATA3
+};
+
+static const u32 rtw8852a_c2h_regs[RTW89_C2HREG_MAX] = {
+ R_AX_C2HREG_DATA0, R_AX_C2HREG_DATA1, R_AX_C2HREG_DATA2,
+ R_AX_C2HREG_DATA3
+};
+
+static const struct rtw89_page_regs rtw8852a_page_regs = {
+ .hci_fc_ctrl = R_AX_HCI_FC_CTRL,
+ .ch_page_ctrl = R_AX_CH_PAGE_CTRL,
+ .ach_page_ctrl = R_AX_ACH0_PAGE_CTRL,
+ .ach_page_info = R_AX_ACH0_PAGE_INFO,
+ .pub_page_info3 = R_AX_PUB_PAGE_INFO3,
+ .pub_page_ctrl1 = R_AX_PUB_PAGE_CTRL1,
+ .pub_page_ctrl2 = R_AX_PUB_PAGE_CTRL2,
+ .pub_page_info1 = R_AX_PUB_PAGE_INFO1,
+ .pub_page_info2 = R_AX_PUB_PAGE_INFO2,
+ .wp_page_ctrl1 = R_AX_WP_PAGE_CTRL1,
+ .wp_page_ctrl2 = R_AX_WP_PAGE_CTRL2,
+ .wp_page_info1 = R_AX_WP_PAGE_INFO1,
+};
+
+static const struct rtw89_reg_def rtw8852a_dcfo_comp = {
+ R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK
+};
+
static void rtw8852ae_efuse_parsing(struct rtw89_efuse *efuse,
struct rtw8852a_efuse *map)
{
@@ -1134,7 +1167,7 @@ static void rtw8852a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
u8 phy_idx = RTW89_PHY_0;
if (enter) {
- rtw89_mac_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_chip_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
rtw8852a_dfs_en(rtwdev, false);
rtw8852a_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0);
@@ -1147,7 +1180,7 @@ static void rtw8852a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
rtw8852a_dfs_en(rtwdev, true);
rtw8852a_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0);
rtw8852a_bb_reset_en(rtwdev, phy_idx, true);
- rtw89_mac_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
+ rtw89_chip_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
}
}
@@ -1242,10 +1275,10 @@ static u32 rtw8852a_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
static
void rtw8852a_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
- s16 pw_ofst, enum rtw89_mac_idx mac_idx)
+ s8 pw_ofst, enum rtw89_mac_idx mac_idx)
{
- s32 val_1t = 0;
- s32 val_2t = 0;
+ s8 val_1t = 0;
+ s8 val_2t = 0;
u32 reg;
if (pw_ofst < -16 || pw_ofst > 15) {
@@ -1255,7 +1288,7 @@ void rtw8852a_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
}
reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_CTRL, mac_idx);
rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN);
- val_1t = (s32)pw_ofst;
+ val_1t = pw_ofst;
reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx);
rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, val_1t);
val_2t = max(val_1t - 3, -16);
@@ -1984,6 +2017,12 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.query_ppdu = rtw8852a_query_ppdu,
.bb_ctrl_btc_preagc = rtw8852a_bb_ctrl_btc_preagc,
.set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset,
+ .pwr_on_func = NULL,
+ .pwr_off_func = NULL,
+ .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
+ .mac_cfg_gnt = rtw89_mac_cfg_gnt,
+ .stop_sch_tx = rtw89_mac_stop_sch_tx,
+ .resume_sch_tx = rtw89_mac_resume_sch_tx,
.btc_set_rfe = rtw8852a_btc_set_rfe,
.btc_init_cfg = rtw8852a_btc_init_cfg,
@@ -2019,6 +2058,9 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = &rtw89_8852a_phy_dig_table,
+ .support_bands = BIT(NL80211_BAND_2GHZ) |
+ BIT(NL80211_BAND_5GHZ),
+ .support_bw160 = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
@@ -2029,6 +2071,8 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.physical_efuse_size = 1216,
.logical_efuse_size = 1536,
.limit_efuse_size = 1152,
+ .dav_phy_efuse_size = 0,
+ .dav_log_efuse_size = 0,
.phycap_addr = 0x580,
.phycap_size = 128,
.para_ver = 0x05050864,
@@ -2049,7 +2093,18 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) |
BIT(RTW89_PS_MODE_CLK_GATED) |
BIT(RTW89_PS_MODE_PWR_GATED),
+ .hci_func_en_addr = R_AX_HCI_FUNC_EN,
+ .h2c_ctrl_reg = R_AX_H2CREG_CTRL,
+ .h2c_regs = rtw8852a_h2c_regs,
+ .c2h_ctrl_reg = R_AX_C2HREG_CTRL,
+ .c2h_regs = rtw8852a_c2h_regs,
+ .page_regs = &rtw8852a_page_regs,
+ .dcfo_comp = &rtw8852a_dcfo_comp,
+ .dcfo_comp_sft = 3,
};
EXPORT_SYMBOL(rtw8852a_chip_info);
MODULE_FIRMWARE("rtw89/rtw8852a_fw.bin");
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852A driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.h b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
index 633384374de0..fcff1194c009 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
@@ -93,6 +93,8 @@ struct rtw8852a_bb_pmac_info {
u8 duty_cycle;
};
+extern const struct rtw89_chip_info rtw8852a_chip_info;
+
void rtw8852a_bb_set_plcp_tx(struct rtw89_dev *rtwdev);
void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852a_bb_pmac_info *tx_info,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
index c021e93eb07b..ad272854c442 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
@@ -12,66 +12,6 @@
#include "rtw8852a_rfk_table.h"
#include "rtw8852a_table.h"
-static void
-_rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
- rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data);
-}
-
-static void
-_rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
- rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
-}
-
-static void
-_rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
- rtw89_phy_write32_set(rtwdev, def->addr, def->mask);
-}
-
-static void
-_rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
- rtw89_phy_write32_clr(rtwdev, def->addr, def->mask);
-}
-
-static void
-_rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
- udelay(def->data);
-}
-
-static void
-(*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = {
- [RTW89_RFK_F_WRF] = _rfk_write_rf,
- [RTW89_RFK_F_WM] = _rfk_write32_mask,
- [RTW89_RFK_F_WS] = _rfk_write32_set,
- [RTW89_RFK_F_WC] = _rfk_write32_clr,
- [RTW89_RFK_F_DELAY] = _rfk_delay,
-};
-
-static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
-
-static void
-rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
-{
- const struct rtw89_reg5_def *p = tbl->defs;
- const struct rtw89_reg5_def *end = tbl->defs + tbl->size;
-
- for (; p < end; p++)
- _rfk_handler[p->flag](rtwdev, p);
-}
-
-#define rtw89_rfk_parser_by_cond(rtwdev, cond, tbl_t, tbl_f) \
- do { \
- typeof(rtwdev) _dev = (rtwdev); \
- if (cond) \
- rtw89_rfk_parser(_dev, (tbl_t)); \
- else \
- rtw89_rfk_parser(_dev, (tbl_f)); \
- } while (0)
-
static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n",
@@ -2977,6 +2917,7 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
u8 i, j;
switch (subband) {
+ default:
case RTW89_CH_2G:
thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_p;
thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_n;
@@ -3161,6 +3102,7 @@ static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
u8 subband = rtwdev->hal.current_subband;
switch (subband) {
+ default:
case RTW89_CH_2G:
rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
&rtw8852a_tssi_pak_defs_a_2g_tbl,
@@ -3584,7 +3526,7 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
const struct rtw89_chip_info *mac_reg = rtwdev->chip;
u8 ch = rtwdev->hal.current_channel, ch_tmp;
u8 bw = rtwdev->hal.current_band_width;
- u16 tx_en;
+ u32 tx_en;
u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0);
s8 power;
s16 xdbm;
@@ -3612,7 +3554,7 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
__func__, phy, power, xdbm);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
- rtw89_mac_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy));
tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
@@ -3658,7 +3600,7 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
rtw8852a_bb_tx_mode_switch(rtwdev, phy, 0);
- rtw89_mac_resume_sch_tx(rtwdev, phy, tx_en);
+ rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
}
@@ -3681,11 +3623,11 @@ void rtw8852a_dack(struct rtw89_dev *rtwdev)
void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
- u16 tx_en;
+ u32 tx_en;
u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
- rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
@@ -3694,7 +3636,7 @@ void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
else
_iqk(rtwdev, phy_idx, false);
- rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
@@ -3706,33 +3648,33 @@ void rtw8852a_iqk_track(struct rtw89_dev *rtwdev)
void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
bool is_afe)
{
- u16 tx_en;
+ u32 tx_en;
u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
- rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_rx_dck(rtwdev, phy_idx, is_afe);
- rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
- u16 tx_en;
+ u32 tx_en;
u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
- rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
rtwdev->dpk.is_dpk_enable = true;
rtwdev->dpk.is_dpk_reload_en = false;
_dpk(rtwdev, phy_idx, false);
- rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c
index 510570090502..dd2a978b9bae 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c
@@ -5,1603 +5,1603 @@
#include "rtw8852a_rfk_table.h"
static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs[] = {
- DECL_RFK_WM(0x12a8, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x12a8, 0x0000000e, 0x00000002),
- DECL_RFK_WM(0x32a8, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x32a8, 0x0000000e, 0x00000002),
- DECL_RFK_WM(0x12bc, 0x000000f0, 0x00000005),
- DECL_RFK_WM(0x12bc, 0x00000f00, 0x00000005),
- DECL_RFK_WM(0x12bc, 0x000f0000, 0x00000005),
- DECL_RFK_WM(0x12bc, 0x0000f000, 0x00000005),
- DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
- DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
- DECL_RFK_WM(0x32bc, 0x000000f0, 0x00000005),
- DECL_RFK_WM(0x32bc, 0x00000f00, 0x00000005),
- DECL_RFK_WM(0x32bc, 0x000f0000, 0x00000005),
- DECL_RFK_WM(0x32bc, 0x0000f000, 0x00000005),
- DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
- DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
- DECL_RFK_WM(0x0300, 0xff000000, 0x00000019),
- DECL_RFK_WM(0x0304, 0x000000ff, 0x00000019),
- DECL_RFK_WM(0x0304, 0x0000ff00, 0x0000001d),
- DECL_RFK_WM(0x0314, 0xffff0000, 0x00002044),
- DECL_RFK_WM(0x0318, 0x0000ffff, 0x00002042),
- DECL_RFK_WM(0x0318, 0xffff0000, 0x00002002),
- DECL_RFK_WM(0x0020, 0x00006000, 0x00000003),
- DECL_RFK_WM(0x0024, 0x00006000, 0x00000003),
- DECL_RFK_WM(0x0704, 0xffff0000, 0x0000601e),
- DECL_RFK_WM(0x2704, 0xffff0000, 0x0000601e),
- DECL_RFK_WM(0x0700, 0xf0000000, 0x00000004),
- DECL_RFK_WM(0x2700, 0xf0000000, 0x00000004),
- DECL_RFK_WM(0x0650, 0x3c000000, 0x00000000),
- DECL_RFK_WM(0x2650, 0x3c000000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs);
+ RTW89_DECL_RFK_WM(0x12a8, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12a8, 0x0000000e, 0x00000002),
+ RTW89_DECL_RFK_WM(0x32a8, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32a8, 0x0000000e, 0x00000002),
+ RTW89_DECL_RFK_WM(0x12bc, 0x000000f0, 0x00000005),
+ RTW89_DECL_RFK_WM(0x12bc, 0x00000f00, 0x00000005),
+ RTW89_DECL_RFK_WM(0x12bc, 0x000f0000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x12bc, 0x0000f000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
+ RTW89_DECL_RFK_WM(0x32bc, 0x000000f0, 0x00000005),
+ RTW89_DECL_RFK_WM(0x32bc, 0x00000f00, 0x00000005),
+ RTW89_DECL_RFK_WM(0x32bc, 0x000f0000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x32bc, 0x0000f000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
+ RTW89_DECL_RFK_WM(0x0300, 0xff000000, 0x00000019),
+ RTW89_DECL_RFK_WM(0x0304, 0x000000ff, 0x00000019),
+ RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x0000001d),
+ RTW89_DECL_RFK_WM(0x0314, 0xffff0000, 0x00002044),
+ RTW89_DECL_RFK_WM(0x0318, 0x0000ffff, 0x00002042),
+ RTW89_DECL_RFK_WM(0x0318, 0xffff0000, 0x00002002),
+ RTW89_DECL_RFK_WM(0x0020, 0x00006000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0024, 0x00006000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0704, 0xffff0000, 0x0000601e),
+ RTW89_DECL_RFK_WM(0x2704, 0xffff0000, 0x0000601e),
+ RTW89_DECL_RFK_WM(0x0700, 0xf0000000, 0x00000004),
+ RTW89_DECL_RFK_WM(0x2700, 0xf0000000, 0x00000004),
+ RTW89_DECL_RFK_WM(0x0650, 0x3c000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2650, 0x3c000000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs);
static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs_2g[] = {
- DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
- DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
- DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
- DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_2g);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_2g);
static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs_5g[] = {
- DECL_RFK_WM(0x120c, 0x000000ff, 0x00000044),
- DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000044),
- DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000044),
- DECL_RFK_WM(0x320c, 0x000000ff, 0x00000044),
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x00000044),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000044),
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000044),
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x00000044),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_5g);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_5g);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_a[] = {
- DECL_RFK_WM(0x5800, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x5800, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x5800, 0x003f0000, 0x0000003f),
- DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x5800, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x5800, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
- DECL_RFK_WM(0x580c, 0x0000007f, 0x00000040),
- DECL_RFK_WM(0x580c, 0x00007f00, 0x00000040),
- DECL_RFK_WM(0x580c, 0x00008000, 0x00000000),
- DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
- DECL_RFK_WM(0x5810, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x5810, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x5810, 0x0000fc00, 0x00000000),
- DECL_RFK_WM(0x5810, 0x00010000, 0x00000001),
- DECL_RFK_WM(0x5810, 0x00fe0000, 0x00000000),
- DECL_RFK_WM(0x5810, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x5810, 0x06000000, 0x00000000),
- DECL_RFK_WM(0x5810, 0x38000000, 0x00000003),
- DECL_RFK_WM(0x5810, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x5810, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00000c00, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x5814, 0x00002000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
- DECL_RFK_WM(0x5814, 0x003c0000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x01c00000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
- DECL_RFK_WM(0x5814, 0xe0000000, 0x00000000),
- DECL_RFK_WM(0x5818, 0x000000ff, 0x00000000),
- DECL_RFK_WM(0x5818, 0x0001ff00, 0x00000018),
- DECL_RFK_WM(0x5818, 0x03fe0000, 0x00000016),
- DECL_RFK_WM(0x5818, 0xfc000000, 0x00000000),
- DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
- DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
- DECL_RFK_WM(0x581c, 0x01e00000, 0x00000008),
- DECL_RFK_WM(0x581c, 0x01e00000, 0x0000000e),
- DECL_RFK_WM(0x581c, 0x1e000000, 0x00000008),
- DECL_RFK_WM(0x581c, 0x1e000000, 0x0000000e),
- DECL_RFK_WM(0x581c, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
- DECL_RFK_WM(0x5820, 0x0000f000, 0x0000000f),
- DECL_RFK_WM(0x5820, 0x001f0000, 0x00000000),
- DECL_RFK_WM(0x5820, 0xffe00000, 0x00000000),
- DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
- DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x5864, 0x03f00000, 0x00000000),
- DECL_RFK_WM(0x5864, 0x04000000, 0x00000000),
- DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x58a0, 0x000000ff, 0x000000fd),
- DECL_RFK_WM(0x58a0, 0x0000ff00, 0x000000e5),
- DECL_RFK_WM(0x58a0, 0x00ff0000, 0x000000cd),
- DECL_RFK_WM(0x58a0, 0xff000000, 0x000000b5),
- DECL_RFK_WM(0x58a4, 0x000000ff, 0x00000016),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58b0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x0000001f, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x00000020, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x000001c0, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x0000f000, 0x00000002),
- DECL_RFK_WM(0x58b4, 0x00ff0000, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
- DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
- DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
- DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
- DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
- DECL_RFK_WM(0x58bc, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x58bc, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x58bc, 0x00030000, 0x00000003),
- DECL_RFK_WM(0x58bc, 0x000c0000, 0x00000001),
- DECL_RFK_WM(0x58bc, 0x00300000, 0x00000002),
- DECL_RFK_WM(0x58bc, 0x00c00000, 0x00000002),
- DECL_RFK_WM(0x58bc, 0x07000000, 0x00000007),
- DECL_RFK_WM(0x58c0, 0x00fe0000, 0x0000003f),
- DECL_RFK_WM(0x58c0, 0xff000000, 0x00000000),
- DECL_RFK_WM(0x58c4, 0x0003ffff, 0x0003ffff),
- DECL_RFK_WM(0x58c4, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x58c4, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x58c8, 0x00ffffff, 0x00000000),
- DECL_RFK_WM(0x58c8, 0xf0000000, 0x00000000),
- DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x58d0, 0x00001fff, 0x00000101),
- DECL_RFK_WM(0x58d0, 0x0001e000, 0x00000004),
- DECL_RFK_WM(0x58d0, 0x03fe0000, 0x00000100),
- DECL_RFK_WM(0x58d0, 0x04000000, 0x00000000),
- DECL_RFK_WM(0x58d4, 0x000000ff, 0x00000000),
- DECL_RFK_WM(0x58d4, 0x0003fe00, 0x000000ff),
- DECL_RFK_WM(0x58d4, 0x07fc0000, 0x00000100),
- DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
- DECL_RFK_WM(0x58d8, 0x0003fe00, 0x0000005c),
- DECL_RFK_WM(0x58d8, 0x000c0000, 0x00000002),
- DECL_RFK_WM(0x58d8, 0xfff00000, 0x00000800),
- DECL_RFK_WM(0x58dc, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x58dc, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x58dc, 0x00010000, 0x00000000),
- DECL_RFK_WM(0x58dc, 0x3ff00000, 0x00000000),
- DECL_RFK_WM(0x58dc, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x58f0, 0x000001ff, 0x000001ff),
- DECL_RFK_WM(0x58f0, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_a);
+ RTW89_DECL_RFK_WM(0x5800, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x5800, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x5800, 0x003f0000, 0x0000003f),
+ RTW89_DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5800, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5800, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x580c, 0x0000007f, 0x00000040),
+ RTW89_DECL_RFK_WM(0x580c, 0x00007f00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x580c, 0x00008000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x0000fc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x00010000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5810, 0x00fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5810, 0x06000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x38000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x5810, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5810, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00000c00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5814, 0x00002000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x5814, 0x003c0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x01c00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0xe0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x000000ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x0001ff00, 0x00000018),
+ RTW89_DECL_RFK_WM(0x5818, 0x03fe0000, 0x00000016),
+ RTW89_DECL_RFK_WM(0x5818, 0xfc000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
+ RTW89_DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x581c, 0x01e00000, 0x00000008),
+ RTW89_DECL_RFK_WM(0x581c, 0x01e00000, 0x0000000e),
+ RTW89_DECL_RFK_WM(0x581c, 0x1e000000, 0x00000008),
+ RTW89_DECL_RFK_WM(0x581c, 0x1e000000, 0x0000000e),
+ RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
+ RTW89_DECL_RFK_WM(0x5820, 0x0000f000, 0x0000000f),
+ RTW89_DECL_RFK_WM(0x5820, 0x001f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5820, 0xffe00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x5864, 0x03f00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0x04000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a0, 0x000000ff, 0x000000fd),
+ RTW89_DECL_RFK_WM(0x58a0, 0x0000ff00, 0x000000e5),
+ RTW89_DECL_RFK_WM(0x58a0, 0x00ff0000, 0x000000cd),
+ RTW89_DECL_RFK_WM(0x58a0, 0xff000000, 0x000000b5),
+ RTW89_DECL_RFK_WM(0x58a4, 0x000000ff, 0x00000016),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x0000001f, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x00000020, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x000001c0, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x0000f000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x58b4, 0x00ff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
+ RTW89_DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
+ RTW89_DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
+ RTW89_DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58bc, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x58bc, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x58bc, 0x00030000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x58bc, 0x000c0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58bc, 0x00300000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x58bc, 0x00c00000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x58bc, 0x07000000, 0x00000007),
+ RTW89_DECL_RFK_WM(0x58c0, 0x00fe0000, 0x0000003f),
+ RTW89_DECL_RFK_WM(0x58c0, 0xff000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c4, 0x0003ffff, 0x0003ffff),
+ RTW89_DECL_RFK_WM(0x58c4, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c4, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c8, 0x00ffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c8, 0xf0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58d0, 0x00001fff, 0x00000101),
+ RTW89_DECL_RFK_WM(0x58d0, 0x0001e000, 0x00000004),
+ RTW89_DECL_RFK_WM(0x58d0, 0x03fe0000, 0x00000100),
+ RTW89_DECL_RFK_WM(0x58d0, 0x04000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58d4, 0x000000ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58d4, 0x0003fe00, 0x000000ff),
+ RTW89_DECL_RFK_WM(0x58d4, 0x07fc0000, 0x00000100),
+ RTW89_DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
+ RTW89_DECL_RFK_WM(0x58d8, 0x0003fe00, 0x0000005c),
+ RTW89_DECL_RFK_WM(0x58d8, 0x000c0000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x58d8, 0xfff00000, 0x00000800),
+ RTW89_DECL_RFK_WM(0x58dc, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x58dc, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x58dc, 0x00010000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58dc, 0x3ff00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58dc, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58f0, 0x000001ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x58f0, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_b[] = {
- DECL_RFK_WM(0x7800, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x7800, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x7800, 0x003f0000, 0x0000003f),
- DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x7800, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x7800, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
- DECL_RFK_WM(0x780c, 0x0000007f, 0x00000040),
- DECL_RFK_WM(0x780c, 0x00007f00, 0x00000040),
- DECL_RFK_WM(0x780c, 0x00008000, 0x00000000),
- DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
- DECL_RFK_WM(0x7810, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x7810, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x7810, 0x0000fc00, 0x00000000),
- DECL_RFK_WM(0x7810, 0x00010000, 0x00000001),
- DECL_RFK_WM(0x7810, 0x00fe0000, 0x00000000),
- DECL_RFK_WM(0x7810, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x7810, 0x06000000, 0x00000000),
- DECL_RFK_WM(0x7810, 0x38000000, 0x00000003),
- DECL_RFK_WM(0x7810, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x7810, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00000c00, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x7814, 0x00002000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
- DECL_RFK_WM(0x7814, 0x003c0000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x01c00000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
- DECL_RFK_WM(0x7814, 0xe0000000, 0x00000000),
- DECL_RFK_WM(0x7818, 0x000000ff, 0x00000000),
- DECL_RFK_WM(0x7818, 0x0001ff00, 0x00000018),
- DECL_RFK_WM(0x7818, 0x03fe0000, 0x00000016),
- DECL_RFK_WM(0x7818, 0xfc000000, 0x00000000),
- DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
- DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
- DECL_RFK_WM(0x781c, 0x01e00000, 0x00000008),
- DECL_RFK_WM(0x781c, 0x01e00000, 0x0000000e),
- DECL_RFK_WM(0x781c, 0x1e000000, 0x00000008),
- DECL_RFK_WM(0x781c, 0x1e000000, 0x0000000e),
- DECL_RFK_WM(0x781c, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
- DECL_RFK_WM(0x7820, 0x0000f000, 0x00000000),
- DECL_RFK_WM(0x7820, 0x001f0000, 0x00000000),
- DECL_RFK_WM(0x7820, 0xffe00000, 0x00000000),
- DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7858, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
- DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x7864, 0x03f00000, 0x00000000),
- DECL_RFK_WM(0x7864, 0x04000000, 0x00000000),
- DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x78a0, 0x000000ff, 0x000000fd),
- DECL_RFK_WM(0x78a0, 0x0000ff00, 0x000000e5),
- DECL_RFK_WM(0x78a0, 0x00ff0000, 0x000000cd),
- DECL_RFK_WM(0x78a0, 0xff000000, 0x000000b5),
- DECL_RFK_WM(0x78a4, 0x000000ff, 0x00000016),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78b0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x0000001f, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x00000020, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x000001c0, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x0000f000, 0x00000002),
- DECL_RFK_WM(0x78b4, 0x00ff0000, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
- DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
- DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
- DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
- DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
- DECL_RFK_WM(0x78bc, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x78bc, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x78bc, 0x00030000, 0x00000003),
- DECL_RFK_WM(0x78bc, 0x000c0000, 0x00000001),
- DECL_RFK_WM(0x78bc, 0x00300000, 0x00000002),
- DECL_RFK_WM(0x78bc, 0x00c00000, 0x00000002),
- DECL_RFK_WM(0x78bc, 0x07000000, 0x00000007),
- DECL_RFK_WM(0x78c0, 0x00fe0000, 0x0000003f),
- DECL_RFK_WM(0x78c0, 0xff000000, 0x00000000),
- DECL_RFK_WM(0x78c4, 0x0003ffff, 0x0003ffff),
- DECL_RFK_WM(0x78c4, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x78c4, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x78c8, 0x00ffffff, 0x00000000),
- DECL_RFK_WM(0x78c8, 0xf0000000, 0x00000000),
- DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x78d0, 0x00001fff, 0x00000101),
- DECL_RFK_WM(0x78d0, 0x0001e000, 0x00000004),
- DECL_RFK_WM(0x78d0, 0x03fe0000, 0x00000100),
- DECL_RFK_WM(0x78d0, 0x04000000, 0x00000000),
- DECL_RFK_WM(0x78d4, 0x000000ff, 0x00000000),
- DECL_RFK_WM(0x78d4, 0x0003fe00, 0x000000ff),
- DECL_RFK_WM(0x78d4, 0x07fc0000, 0x00000100),
- DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
- DECL_RFK_WM(0x78d8, 0x0003fe00, 0x0000005c),
- DECL_RFK_WM(0x78d8, 0x000c0000, 0x00000002),
- DECL_RFK_WM(0x78d8, 0xfff00000, 0x00000800),
- DECL_RFK_WM(0x78dc, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x78dc, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x78dc, 0x00010000, 0x00000000),
- DECL_RFK_WM(0x78dc, 0x3ff00000, 0x00000000),
- DECL_RFK_WM(0x78dc, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x78f0, 0x000001ff, 0x000001ff),
- DECL_RFK_WM(0x78f0, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_b);
+ RTW89_DECL_RFK_WM(0x7800, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x7800, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x7800, 0x003f0000, 0x0000003f),
+ RTW89_DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7800, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7800, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x780c, 0x0000007f, 0x00000040),
+ RTW89_DECL_RFK_WM(0x780c, 0x00007f00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x780c, 0x00008000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x0000fc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x00010000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7810, 0x00fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7810, 0x06000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x38000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7810, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7810, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00000c00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7814, 0x00002000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x7814, 0x003c0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x01c00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0xe0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x000000ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x0001ff00, 0x00000018),
+ RTW89_DECL_RFK_WM(0x7818, 0x03fe0000, 0x00000016),
+ RTW89_DECL_RFK_WM(0x7818, 0xfc000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
+ RTW89_DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x781c, 0x01e00000, 0x00000008),
+ RTW89_DECL_RFK_WM(0x781c, 0x01e00000, 0x0000000e),
+ RTW89_DECL_RFK_WM(0x781c, 0x1e000000, 0x00000008),
+ RTW89_DECL_RFK_WM(0x781c, 0x1e000000, 0x0000000e),
+ RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
+ RTW89_DECL_RFK_WM(0x7820, 0x0000f000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7820, 0x001f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7820, 0xffe00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7858, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x7864, 0x03f00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0x04000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a0, 0x000000ff, 0x000000fd),
+ RTW89_DECL_RFK_WM(0x78a0, 0x0000ff00, 0x000000e5),
+ RTW89_DECL_RFK_WM(0x78a0, 0x00ff0000, 0x000000cd),
+ RTW89_DECL_RFK_WM(0x78a0, 0xff000000, 0x000000b5),
+ RTW89_DECL_RFK_WM(0x78a4, 0x000000ff, 0x00000016),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x0000001f, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x00000020, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x000001c0, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x0000f000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78b4, 0x00ff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
+ RTW89_DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
+ RTW89_DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
+ RTW89_DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78bc, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x78bc, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x78bc, 0x00030000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x78bc, 0x000c0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78bc, 0x00300000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78bc, 0x00c00000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78bc, 0x07000000, 0x00000007),
+ RTW89_DECL_RFK_WM(0x78c0, 0x00fe0000, 0x0000003f),
+ RTW89_DECL_RFK_WM(0x78c0, 0xff000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c4, 0x0003ffff, 0x0003ffff),
+ RTW89_DECL_RFK_WM(0x78c4, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c4, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c8, 0x00ffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c8, 0xf0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78d0, 0x00001fff, 0x00000101),
+ RTW89_DECL_RFK_WM(0x78d0, 0x0001e000, 0x00000004),
+ RTW89_DECL_RFK_WM(0x78d0, 0x03fe0000, 0x00000100),
+ RTW89_DECL_RFK_WM(0x78d0, 0x04000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78d4, 0x000000ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78d4, 0x0003fe00, 0x000000ff),
+ RTW89_DECL_RFK_WM(0x78d4, 0x07fc0000, 0x00000100),
+ RTW89_DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
+ RTW89_DECL_RFK_WM(0x78d8, 0x0003fe00, 0x0000005c),
+ RTW89_DECL_RFK_WM(0x78d8, 0x000c0000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78d8, 0xfff00000, 0x00000800),
+ RTW89_DECL_RFK_WM(0x78dc, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x78dc, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x78dc, 0x00010000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78dc, 0x3ff00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78dc, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78f0, 0x000001ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x78f0, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_2g[] = {
- DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000013c),
- DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000013c),
+ RTW89_DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000013c),
+ RTW89_DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000013c),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_2g);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_2g);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_5g[] = {
- DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
- DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
+ RTW89_DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
+ RTW89_DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_5g);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_5g);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a[] = {
- DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fc),
- DECL_RFK_WM(0x58e4, 0x0000007f, 0x00000020),
+ RTW89_DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fc),
+ RTW89_DECL_RFK_WM(0x58e4, 0x0000007f, 0x00000020),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b[] = {
- DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fc),
- DECL_RFK_WM(0x78e4, 0x0000007f, 0x00000020),
+ RTW89_DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fc),
+ RTW89_DECL_RFK_WM(0x78e4, 0x0000007f, 0x00000020),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_dck_defs_a[] = {
- DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x5814, 0x00002000, 0x00000001),
- DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
- DECL_RFK_WM(0x5814, 0x003c0000, 0x00000003),
- DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5814, 0x00002000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x5814, 0x003c0000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_dck_defs_b[] = {
- DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x7814, 0x00002000, 0x00000001),
- DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
- DECL_RFK_WM(0x7814, 0x003c0000, 0x00000003),
- DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7814, 0x00002000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x7814, 0x003c0000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_dac_gain_tbl_defs_a[] = {
- DECL_RFK_WM(0x58b0, 0x00000fff, 0x00000000),
- DECL_RFK_WM(0x58b0, 0x00000800, 0x00000001),
- DECL_RFK_WM(0x5a00, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a04, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a08, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a0c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a10, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a14, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a18, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a1c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a20, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a24, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a28, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a2c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a30, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a34, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a38, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a3c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a40, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a44, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a48, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a4c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a50, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a54, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a58, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a5c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a60, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a64, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a68, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a6c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a70, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a74, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a78, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a7c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a80, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a84, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a88, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a8c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a90, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a94, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a98, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a9c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5aa0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5aa4, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5aa8, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5aac, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5ab0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5ab4, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5ab8, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5abc, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5ac0, 0xffffffff, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_a);
+ RTW89_DECL_RFK_WM(0x58b0, 0x00000fff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b0, 0x00000800, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5a00, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a04, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a08, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a0c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a10, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a14, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a18, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a1c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a20, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a24, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a28, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a2c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a30, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a34, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a38, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a3c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a40, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a44, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a48, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a4c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a50, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a54, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a58, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a5c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a60, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a64, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a68, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a6c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a70, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a74, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a78, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a7c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a80, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a84, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a88, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a8c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a90, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a94, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a98, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a9c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aac, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5abc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ac0, 0xffffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_dac_gain_tbl_defs_b[] = {
- DECL_RFK_WM(0x78b0, 0x00000fff, 0x00000000),
- DECL_RFK_WM(0x78b0, 0x00000800, 0x00000001),
- DECL_RFK_WM(0x7a00, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a04, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a08, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a0c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a10, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a14, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a18, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a1c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a20, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a24, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a28, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a2c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a30, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a34, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a38, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a3c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a40, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a44, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a48, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a4c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a50, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a54, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a58, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a5c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a60, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a64, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a68, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a6c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a70, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a74, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a78, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a7c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a80, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a84, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a88, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a8c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a90, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a94, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a98, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a9c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7aa0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7aa4, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7aa8, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7aac, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7ab0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7ab4, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7ab8, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7abc, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7ac0, 0xffffffff, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_b);
+ RTW89_DECL_RFK_WM(0x78b0, 0x00000fff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b0, 0x00000800, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7a00, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a04, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a08, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a0c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a10, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a14, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a18, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a1c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a20, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a24, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a28, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a2c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a30, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a34, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a38, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a3c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a40, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a44, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a48, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a4c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a50, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a54, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a58, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a5c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a60, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a64, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a68, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a6c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a70, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a74, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a78, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a7c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a80, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a84, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a88, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a8c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a90, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a94, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a98, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a9c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aac, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7abc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ac0, 0xffffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_slope_cal_org_defs_a[] = {
- DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
- DECL_RFK_WM(0x58cc, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x58cc, 0x00000007, 0x00000000),
- DECL_RFK_WM(0x58cc, 0x00000038, 0x00000001),
- DECL_RFK_WM(0x58cc, 0x000001c0, 0x00000002),
- DECL_RFK_WM(0x58cc, 0x00000e00, 0x00000003),
- DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x5898, 0x000000ff, 0x00000040),
- DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x5898, 0x0000ff00, 0x00000040),
- DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x5898, 0x00ff0000, 0x00000040),
- DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x5898, 0xff000000, 0x00000040),
- DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x589c, 0x000000ff, 0x00000040),
- DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x589c, 0x0000ff00, 0x00000040),
- DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x589c, 0x00ff0000, 0x00000040),
- DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x589c, 0xff000000, 0x00000040),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_a);
+ RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58cc, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58cc, 0x00000007, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58cc, 0x00000038, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58cc, 0x000001c0, 0x00000002),
+ RTW89_DECL_RFK_WM(0x58cc, 0x00000e00, 0x00000003),
+ RTW89_DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5898, 0x000000ff, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5898, 0x0000ff00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5898, 0x00ff0000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5898, 0xff000000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x589c, 0x000000ff, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x589c, 0x0000ff00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x589c, 0x00ff0000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x589c, 0xff000000, 0x00000040),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_slope_cal_org_defs_b[] = {
- DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
- DECL_RFK_WM(0x78cc, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x78cc, 0x00000007, 0x00000000),
- DECL_RFK_WM(0x78cc, 0x00000038, 0x00000001),
- DECL_RFK_WM(0x78cc, 0x000001c0, 0x00000002),
- DECL_RFK_WM(0x78cc, 0x00000e00, 0x00000003),
- DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x7898, 0x000000ff, 0x00000040),
- DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x7898, 0x0000ff00, 0x00000040),
- DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x7898, 0x00ff0000, 0x00000040),
- DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x7898, 0xff000000, 0x00000040),
- DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x789c, 0x000000ff, 0x00000040),
- DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x789c, 0x0000ff00, 0x00000040),
- DECL_RFK_WM(0x7878, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x789c, 0x00ff0000, 0x00000040),
- DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x789c, 0xff000000, 0x00000040),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_b);
+ RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78cc, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78cc, 0x00000007, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78cc, 0x00000038, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78cc, 0x000001c0, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78cc, 0x00000e00, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7898, 0x000000ff, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7898, 0x0000ff00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7898, 0x00ff0000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7898, 0xff000000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x789c, 0x000000ff, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x789c, 0x0000ff00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7878, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x789c, 0x00ff0000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x789c, 0xff000000, 0x00000040),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_rf_gap_tbl_defs_a[] = {
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_a);
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_rf_gap_tbl_defs_b[] = {
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_b);
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_slope_defs_a[] = {
- DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00000800, 0x00000001),
- DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x5820, 0x0000f000, 0x00000001),
- DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
- DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
- DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
- DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
- DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
- DECL_RFK_WM(0x5810, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x580c, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x580c, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_a);
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x00000001),
+ RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5820, 0x0000f000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
+ RTW89_DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
+ RTW89_DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
+ RTW89_DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
+ RTW89_DECL_RFK_WM(0x5810, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_slope_defs_b[] = {
- DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00000800, 0x00000001),
- DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x7820, 0x0000f000, 0x00000001),
- DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
- DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
- DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
- DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
- DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
- DECL_RFK_WM(0x7810, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x780c, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x780c, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_b);
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x00000001),
+ RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7820, 0x0000f000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
+ RTW89_DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
+ RTW89_DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
+ RTW89_DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
+ RTW89_DECL_RFK_WM(0x7810, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_track_defs_a[] = {
- DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5818, 0x18000000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00000800, 0x00000000),
- DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
- DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
- DECL_RFK_WM(0x5814, 0x01000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x00000000),
+ RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
+ RTW89_DECL_RFK_WM(0x5814, 0x01000000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_track_defs_b[] = {
- DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7818, 0x18000000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00000800, 0x00000000),
- DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
- DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
- DECL_RFK_WM(0x7814, 0x01000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x00000000),
+ RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
+ RTW89_DECL_RFK_WM(0x7814, 0x01000000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_txagc_ofst_mv_avg_defs_a[] = {
- DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
- DECL_RFK_WM(0x58e4, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
- DECL_RFK_WM(0x58e4, 0x00008000, 0x00000000),
- DECL_RFK_WM(0x58e4, 0x000f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00008000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58e4, 0x000f0000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_txagc_ofst_mv_avg_defs_b[] = {
- DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
- DECL_RFK_WM(0x78e4, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
- DECL_RFK_WM(0x78e4, 0x00008000, 0x00000000),
- DECL_RFK_WM(0x78e4, 0x000f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00008000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78e4, 0x000f0000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_2g[] = {
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d0),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001e8),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x0000000b),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000088),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_2g);
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d0),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001e8),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x0000000b),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000088),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_2g);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_1[] = {
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d7),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fb),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000005),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x0000007c),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_1);
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d7),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fb),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000005),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x0000007c),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_1);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_3[] = {
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d8),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fc),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000006),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000078),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_3);
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d8),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fc),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000006),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000078),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_3);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_4[] = {
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001e5),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x0000000a),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000011),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000075),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_4);
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001e5),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x0000000a),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000011),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000075),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_4);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_2g[] = {
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001cc),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001e2),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000005),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000089),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_2g);
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001cc),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001e2),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000005),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000089),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_2g);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_1[] = {
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001d5),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001fc),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000005),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000079),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_1);
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001d5),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001fc),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000005),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000079),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_1);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_3[] = {
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001dc),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000002),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000000b),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000076),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_3);
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001dc),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000000b),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000076),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_3);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_4[] = {
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001f0),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000016),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000001f),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000072),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_4);
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001f0),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000016),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000001f),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000072),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_4);
static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_a[] = {
- DECL_RFK_WRF(0x0, 0x55, 0x00080, 0x00001),
- DECL_RFK_WM(0x5818, 0x000000ff, 0x000000c0),
- DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x5818, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5820, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x5818, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WRF(0x0, 0x55, 0x00080, 0x00001),
+ RTW89_DECL_RFK_WM(0x5818, 0x000000ff, 0x000000c0),
+ RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x00000003),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_b[] = {
- DECL_RFK_WRF(0x1, 0x55, 0x00080, 0x00001),
- DECL_RFK_WM(0x7818, 0x000000ff, 0x000000c0),
- DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x7818, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7820, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x7818, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WRF(0x1, 0x55, 0x00080, 0x00001),
+ RTW89_DECL_RFK_WM(0x7818, 0x000000ff, 0x000000c0),
+ RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x00000003),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_disable_defs[] = {
- DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5818, 0x18000000, 0x00000001),
- DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7818, 0x18000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_disable_defs);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_disable_defs);
static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_ab[] = {
- DECL_RFK_WM(0x5820, 0x80000000, 0x0),
- DECL_RFK_WM(0x5820, 0x80000000, 0x1),
- DECL_RFK_WM(0x5818, 0x18000000, 0x3),
- DECL_RFK_WM(0x7820, 0x80000000, 0x0),
- DECL_RFK_WM(0x7820, 0x80000000, 0x1),
- DECL_RFK_WM(0x7818, 0x18000000, 0x3),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x3),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x3),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_ab);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_ab);
static const struct rtw89_reg5_def rtw8852a_tssi_tracking_defs[] = {
- DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
- DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
- DECL_RFK_WM(0x58f0, 0xfff00000, 0x00000400),
- DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
- DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
- DECL_RFK_WM(0x78f0, 0xfff00000, 0x00000400),
+ RTW89_DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f0, 0xfff00000, 0x00000400),
+ RTW89_DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f0, 0xfff00000, 0x00000400),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_tracking_defs);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_tracking_defs);
static const struct rtw89_reg5_def rtw8852a_rfk_afe_init_defs[] = {
- DECL_RFK_WC(0x12ec, 0x00008000),
- DECL_RFK_WS(0x12ec, 0x00008000),
- DECL_RFK_WC(0x5e00, 0x00000001),
- DECL_RFK_WS(0x5e00, 0x00000001),
- DECL_RFK_WC(0x32ec, 0x00008000),
- DECL_RFK_WS(0x32ec, 0x00008000),
- DECL_RFK_WC(0x7e00, 0x00000001),
- DECL_RFK_WS(0x7e00, 0x00000001),
+ RTW89_DECL_RFK_WC(0x12ec, 0x00008000),
+ RTW89_DECL_RFK_WS(0x12ec, 0x00008000),
+ RTW89_DECL_RFK_WC(0x5e00, 0x00000001),
+ RTW89_DECL_RFK_WS(0x5e00, 0x00000001),
+ RTW89_DECL_RFK_WC(0x32ec, 0x00008000),
+ RTW89_DECL_RFK_WS(0x32ec, 0x00008000),
+ RTW89_DECL_RFK_WC(0x7e00, 0x00000001),
+ RTW89_DECL_RFK_WS(0x7e00, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_afe_init_defs);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_afe_init_defs);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_reload_defs_a[] = {
- DECL_RFK_WS(0x5e00, 0x00000008),
- DECL_RFK_WS(0x5e50, 0x00000008),
- DECL_RFK_WS(0x5e10, 0x80000000),
- DECL_RFK_WS(0x5e60, 0x80000000),
- DECL_RFK_WC(0x5e00, 0x00000008),
- DECL_RFK_WC(0x5e50, 0x00000008),
+ RTW89_DECL_RFK_WS(0x5e00, 0x00000008),
+ RTW89_DECL_RFK_WS(0x5e50, 0x00000008),
+ RTW89_DECL_RFK_WS(0x5e10, 0x80000000),
+ RTW89_DECL_RFK_WS(0x5e60, 0x80000000),
+ RTW89_DECL_RFK_WC(0x5e00, 0x00000008),
+ RTW89_DECL_RFK_WC(0x5e50, 0x00000008),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_reload_defs_b[] = {
- DECL_RFK_WS(0x7e00, 0x00000008),
- DECL_RFK_WS(0x7e50, 0x00000008),
- DECL_RFK_WS(0x7e10, 0x80000000),
- DECL_RFK_WS(0x7e60, 0x80000000),
- DECL_RFK_WC(0x7e00, 0x00000008),
- DECL_RFK_WC(0x7e50, 0x00000008),
+ RTW89_DECL_RFK_WS(0x7e00, 0x00000008),
+ RTW89_DECL_RFK_WS(0x7e50, 0x00000008),
+ RTW89_DECL_RFK_WS(0x7e10, 0x80000000),
+ RTW89_DECL_RFK_WS(0x7e60, 0x80000000),
+ RTW89_DECL_RFK_WC(0x7e00, 0x00000008),
+ RTW89_DECL_RFK_WC(0x7e50, 0x00000008),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_check_addc_defs_a[] = {
- DECL_RFK_WC(0x20f4, 0x01000000),
- DECL_RFK_WS(0x20f8, 0x80000000),
- DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
- DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
- DECL_RFK_WC(0x20f0, 0x0000000f),
- DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000002),
+ RTW89_DECL_RFK_WC(0x20f4, 0x01000000),
+ RTW89_DECL_RFK_WS(0x20f8, 0x80000000),
+ RTW89_DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
+ RTW89_DECL_RFK_WC(0x20f0, 0x0000000f),
+ RTW89_DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000002),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_check_addc_defs_b[] = {
- DECL_RFK_WC(0x20f4, 0x01000000),
- DECL_RFK_WS(0x20f8, 0x80000000),
- DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
- DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
- DECL_RFK_WC(0x20f0, 0x0000000f),
- DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000003),
+ RTW89_DECL_RFK_WC(0x20f4, 0x01000000),
+ RTW89_DECL_RFK_WS(0x20f8, 0x80000000),
+ RTW89_DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
+ RTW89_DECL_RFK_WC(0x20f0, 0x0000000f),
+ RTW89_DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000003),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_reset_defs_a[] = {
- DECL_RFK_WC(0x12d8, 0x00000030),
- DECL_RFK_WC(0x32d8, 0x00000030),
- DECL_RFK_WS(0x12b8, 0x40000000),
- DECL_RFK_WC(0x032c, 0x40000000),
- DECL_RFK_WC(0x032c, 0x00400000),
- DECL_RFK_WS(0x032c, 0x00400000),
- DECL_RFK_WS(0x030c, 0x0f000000),
- DECL_RFK_WC(0x032c, 0x00010000),
- DECL_RFK_WS(0x12dc, 0x00000002),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+ RTW89_DECL_RFK_WC(0x12d8, 0x00000030),
+ RTW89_DECL_RFK_WC(0x32d8, 0x00000030),
+ RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x00400000),
+ RTW89_DECL_RFK_WS(0x032c, 0x00400000),
+ RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WS(0x12dc, 0x00000002),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_trigger_defs_a[] = {
- DECL_RFK_WS(0x12d8, 0x000000c0),
- DECL_RFK_WS(0x12d8, 0x00000800),
- DECL_RFK_WC(0x12d8, 0x00000800),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x12d8, 0x00000300, 0x00000001),
+ RTW89_DECL_RFK_WS(0x12d8, 0x000000c0),
+ RTW89_DECL_RFK_WS(0x12d8, 0x00000800),
+ RTW89_DECL_RFK_WC(0x12d8, 0x00000800),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x12d8, 0x00000300, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_restore_defs_a[] = {
- DECL_RFK_WC(0x12dc, 0x00000002),
- DECL_RFK_WS(0x032c, 0x00010000),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
- DECL_RFK_WS(0x032c, 0x40000000),
- DECL_RFK_WC(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x12dc, 0x00000002),
+ RTW89_DECL_RFK_WS(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
+ RTW89_DECL_RFK_WS(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WC(0x12b8, 0x40000000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_reset_defs_b[] = {
- DECL_RFK_WS(0x32b8, 0x40000000),
- DECL_RFK_WC(0x032c, 0x40000000),
- DECL_RFK_WC(0x032c, 0x00400000),
- DECL_RFK_WS(0x032c, 0x00400000),
- DECL_RFK_WS(0x030c, 0x0f000000),
- DECL_RFK_WC(0x032c, 0x00010000),
- DECL_RFK_WS(0x32dc, 0x00000002),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x00400000),
+ RTW89_DECL_RFK_WS(0x032c, 0x00400000),
+ RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WS(0x32dc, 0x00000002),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_trigger_defs_b[] = {
- DECL_RFK_WS(0x32d8, 0x000000c0),
- DECL_RFK_WS(0x32d8, 0x00000800),
- DECL_RFK_WC(0x32d8, 0x00000800),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x32d8, 0x00000300, 0x00000001),
+ RTW89_DECL_RFK_WS(0x32d8, 0x000000c0),
+ RTW89_DECL_RFK_WS(0x32d8, 0x00000800),
+ RTW89_DECL_RFK_WC(0x32d8, 0x00000800),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x32d8, 0x00000300, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_restore_defs_b[] = {
- DECL_RFK_WC(0x32dc, 0x00000002),
- DECL_RFK_WS(0x032c, 0x00010000),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
- DECL_RFK_WS(0x032c, 0x40000000),
- DECL_RFK_WC(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x32dc, 0x00000002),
+ RTW89_DECL_RFK_WS(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
+ RTW89_DECL_RFK_WS(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WC(0x32b8, 0x40000000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_f_a[] = {
- DECL_RFK_WC(0x032c, 0x40000000),
- DECL_RFK_WS(0x030c, 0x0f000000),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
- DECL_RFK_WC(0x032c, 0x00010000),
- DECL_RFK_WS(0x12dc, 0x00000001),
- DECL_RFK_WS(0x12e8, 0x00000004),
- DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00001),
+ RTW89_DECL_RFK_WC(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+ RTW89_DECL_RFK_WC(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WS(0x12dc, 0x00000001),
+ RTW89_DECL_RFK_WS(0x12e8, 0x00000004),
+ RTW89_DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_a);
static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_f_b[] = {
- DECL_RFK_WC(0x032c, 0x40000000),
- DECL_RFK_WS(0x030c, 0x0f000000),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
- DECL_RFK_WC(0x032c, 0x00010000),
- DECL_RFK_WS(0x32dc, 0x00000001),
- DECL_RFK_WS(0x32e8, 0x00000004),
- DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00001),
+ RTW89_DECL_RFK_WC(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+ RTW89_DECL_RFK_WC(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WS(0x32dc, 0x00000001),
+ RTW89_DECL_RFK_WS(0x32e8, 0x00000004),
+ RTW89_DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_b);
static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_r_a[] = {
- DECL_RFK_WC(0x12dc, 0x00000001),
- DECL_RFK_WC(0x12e8, 0x00000004),
- DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00000),
- DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
+ RTW89_DECL_RFK_WC(0x12dc, 0x00000001),
+ RTW89_DECL_RFK_WC(0x12e8, 0x00000004),
+ RTW89_DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00000),
+ RTW89_DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_a);
static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_r_b[] = {
- DECL_RFK_WC(0x32dc, 0x00000001),
- DECL_RFK_WC(0x32e8, 0x00000004),
- DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00000),
- DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
+ RTW89_DECL_RFK_WC(0x32dc, 0x00000001),
+ RTW89_DECL_RFK_WC(0x32e8, 0x00000004),
+ RTW89_DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00000),
+ RTW89_DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_f_a[] = {
- DECL_RFK_WS(0x5e00, 0x00000008),
- DECL_RFK_WC(0x5e10, 0x80000000),
- DECL_RFK_WS(0x5e50, 0x00000008),
- DECL_RFK_WC(0x5e60, 0x80000000),
- DECL_RFK_WS(0x12a0, 0x00008000),
- DECL_RFK_WM(0x12a0, 0x00007000, 0x00000003),
- DECL_RFK_WS(0x12b8, 0x40000000),
- DECL_RFK_WS(0x030c, 0x10000000),
- DECL_RFK_WC(0x032c, 0x80000000),
- DECL_RFK_WS(0x12e0, 0x00010000),
- DECL_RFK_WS(0x12e4, 0x0c000000),
- DECL_RFK_WM(0x5e00, 0x03ff0000, 0x00000030),
- DECL_RFK_WM(0x5e50, 0x03ff0000, 0x00000030),
- DECL_RFK_WC(0x5e00, 0x0c000000),
- DECL_RFK_WC(0x5e50, 0x0c000000),
- DECL_RFK_WC(0x5e0c, 0x00000008),
- DECL_RFK_WC(0x5e5c, 0x00000008),
- DECL_RFK_WS(0x5e0c, 0x00000001),
- DECL_RFK_WS(0x5e5c, 0x00000001),
- DECL_RFK_DELAY(1),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_a);
+ RTW89_DECL_RFK_WS(0x5e00, 0x00000008),
+ RTW89_DECL_RFK_WC(0x5e10, 0x80000000),
+ RTW89_DECL_RFK_WS(0x5e50, 0x00000008),
+ RTW89_DECL_RFK_WC(0x5e60, 0x80000000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00008000),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00007000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WS(0x030c, 0x10000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x80000000),
+ RTW89_DECL_RFK_WS(0x12e0, 0x00010000),
+ RTW89_DECL_RFK_WS(0x12e4, 0x0c000000),
+ RTW89_DECL_RFK_WM(0x5e00, 0x03ff0000, 0x00000030),
+ RTW89_DECL_RFK_WM(0x5e50, 0x03ff0000, 0x00000030),
+ RTW89_DECL_RFK_WC(0x5e00, 0x0c000000),
+ RTW89_DECL_RFK_WC(0x5e50, 0x0c000000),
+ RTW89_DECL_RFK_WC(0x5e0c, 0x00000008),
+ RTW89_DECL_RFK_WC(0x5e5c, 0x00000008),
+ RTW89_DECL_RFK_WS(0x5e0c, 0x00000001),
+ RTW89_DECL_RFK_WS(0x5e5c, 0x00000001),
+ RTW89_DECL_RFK_DELAY(1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_m_a[] = {
- DECL_RFK_WC(0x12e4, 0x0c000000),
- DECL_RFK_WS(0x5e0c, 0x00000008),
- DECL_RFK_WS(0x5e5c, 0x00000008),
- DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WC(0x12e4, 0x0c000000),
+ RTW89_DECL_RFK_WS(0x5e0c, 0x00000008),
+ RTW89_DECL_RFK_WS(0x5e5c, 0x00000008),
+ RTW89_DECL_RFK_DELAY(1),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_r_a[] = {
- DECL_RFK_WC(0x5e0c, 0x00000001),
- DECL_RFK_WC(0x5e5c, 0x00000001),
- DECL_RFK_WC(0x12e0, 0x00010000),
- DECL_RFK_WC(0x12a0, 0x00008000),
- DECL_RFK_WS(0x12a0, 0x00007000),
+ RTW89_DECL_RFK_WC(0x5e0c, 0x00000001),
+ RTW89_DECL_RFK_WC(0x5e5c, 0x00000001),
+ RTW89_DECL_RFK_WC(0x12e0, 0x00010000),
+ RTW89_DECL_RFK_WC(0x12a0, 0x00008000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00007000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_f_b[] = {
- DECL_RFK_WS(0x7e00, 0x00000008),
- DECL_RFK_WC(0x7e10, 0x80000000),
- DECL_RFK_WS(0x7e50, 0x00000008),
- DECL_RFK_WC(0x7e60, 0x80000000),
- DECL_RFK_WS(0x32a0, 0x00008000),
- DECL_RFK_WM(0x32a0, 0x00007000, 0x00000003),
- DECL_RFK_WS(0x32b8, 0x40000000),
- DECL_RFK_WS(0x030c, 0x10000000),
- DECL_RFK_WC(0x032c, 0x80000000),
- DECL_RFK_WS(0x32e0, 0x00010000),
- DECL_RFK_WS(0x32e4, 0x0c000000),
- DECL_RFK_WM(0x7e00, 0x03ff0000, 0x00000030),
- DECL_RFK_WM(0x7e50, 0x03ff0000, 0x00000030),
- DECL_RFK_WC(0x7e00, 0x0c000000),
- DECL_RFK_WC(0x7e50, 0x0c000000),
- DECL_RFK_WC(0x7e0c, 0x00000008),
- DECL_RFK_WC(0x7e5c, 0x00000008),
- DECL_RFK_WS(0x7e0c, 0x00000001),
- DECL_RFK_WS(0x7e5c, 0x00000001),
- DECL_RFK_DELAY(1),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_b);
+ RTW89_DECL_RFK_WS(0x7e00, 0x00000008),
+ RTW89_DECL_RFK_WC(0x7e10, 0x80000000),
+ RTW89_DECL_RFK_WS(0x7e50, 0x00000008),
+ RTW89_DECL_RFK_WC(0x7e60, 0x80000000),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00008000),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00007000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WS(0x030c, 0x10000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x80000000),
+ RTW89_DECL_RFK_WS(0x32e0, 0x00010000),
+ RTW89_DECL_RFK_WS(0x32e4, 0x0c000000),
+ RTW89_DECL_RFK_WM(0x7e00, 0x03ff0000, 0x00000030),
+ RTW89_DECL_RFK_WM(0x7e50, 0x03ff0000, 0x00000030),
+ RTW89_DECL_RFK_WC(0x7e00, 0x0c000000),
+ RTW89_DECL_RFK_WC(0x7e50, 0x0c000000),
+ RTW89_DECL_RFK_WC(0x7e0c, 0x00000008),
+ RTW89_DECL_RFK_WC(0x7e5c, 0x00000008),
+ RTW89_DECL_RFK_WS(0x7e0c, 0x00000001),
+ RTW89_DECL_RFK_WS(0x7e5c, 0x00000001),
+ RTW89_DECL_RFK_DELAY(1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_m_b[] = {
- DECL_RFK_WC(0x32e4, 0x0c000000),
- DECL_RFK_WM(0x7e0c, 0x00000008, 0x00000001),
- DECL_RFK_WM(0x7e5c, 0x00000008, 0x00000001),
- DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WC(0x32e4, 0x0c000000),
+ RTW89_DECL_RFK_WM(0x7e0c, 0x00000008, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7e5c, 0x00000008, 0x00000001),
+ RTW89_DECL_RFK_DELAY(1),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_r_b[] = {
- DECL_RFK_WC(0x7e0c, 0x00000001),
- DECL_RFK_WC(0x7e5c, 0x00000001),
- DECL_RFK_WC(0x32e0, 0x00010000),
- DECL_RFK_WC(0x32a0, 0x00008000),
- DECL_RFK_WS(0x32a0, 0x00007000),
+ RTW89_DECL_RFK_WC(0x7e0c, 0x00000001),
+ RTW89_DECL_RFK_WC(0x7e5c, 0x00000001),
+ RTW89_DECL_RFK_WC(0x32e0, 0x00010000),
+ RTW89_DECL_RFK_WC(0x32a0, 0x00008000),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00007000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sf_defs_a[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
- DECL_RFK_WS(0x12b8, 0x40000000),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
- DECL_RFK_WS(0x12b8, 0x10000000),
- DECL_RFK_WS(0x58c8, 0x01000000),
- DECL_RFK_WS(0x5864, 0xc0000000),
- DECL_RFK_WS(0x2008, 0x01ffffff),
- DECL_RFK_WS(0x0c1c, 0x00000004),
- DECL_RFK_WS(0x0700, 0x08000000),
- DECL_RFK_WS(0x0c70, 0x000003ff),
- DECL_RFK_WS(0x0c60, 0x00000003),
- DECL_RFK_WS(0x0c6c, 0x00000001),
- DECL_RFK_WS(0x58ac, 0x08000000),
- DECL_RFK_WS(0x0c3c, 0x00000200),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_a);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+ RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+ RTW89_DECL_RFK_WS(0x12b8, 0x10000000),
+ RTW89_DECL_RFK_WS(0x58c8, 0x01000000),
+ RTW89_DECL_RFK_WS(0x5864, 0xc0000000),
+ RTW89_DECL_RFK_WS(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WS(0x0c1c, 0x00000004),
+ RTW89_DECL_RFK_WS(0x0700, 0x08000000),
+ RTW89_DECL_RFK_WS(0x0c70, 0x000003ff),
+ RTW89_DECL_RFK_WS(0x0c60, 0x00000003),
+ RTW89_DECL_RFK_WS(0x0c6c, 0x00000001),
+ RTW89_DECL_RFK_WS(0x58ac, 0x08000000),
+ RTW89_DECL_RFK_WS(0x0c3c, 0x00000200),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sr_defs_a[] = {
- DECL_RFK_WS(0x4490, 0x80000000),
- DECL_RFK_WS(0x12a0, 0x00007000),
- DECL_RFK_WS(0x12a0, 0x00008000),
- DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
- DECL_RFK_WS(0x12a0, 0x00080000),
- DECL_RFK_WS(0x0700, 0x01000000),
- DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
- DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WS(0x4490, 0x80000000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00007000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00008000),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00080000),
+ RTW89_DECL_RFK_WS(0x0700, 0x01000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
+ RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sf_defs_b[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
- DECL_RFK_WS(0x32b8, 0x40000000),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
- DECL_RFK_WS(0x32b8, 0x10000000),
- DECL_RFK_WS(0x78c8, 0x01000000),
- DECL_RFK_WS(0x7864, 0xc0000000),
- DECL_RFK_WS(0x2008, 0x01ffffff),
- DECL_RFK_WS(0x2c1c, 0x00000004),
- DECL_RFK_WS(0x2700, 0x08000000),
- DECL_RFK_WS(0x0c70, 0x000003ff),
- DECL_RFK_WS(0x0c60, 0x00000003),
- DECL_RFK_WS(0x0c6c, 0x00000001),
- DECL_RFK_WS(0x78ac, 0x08000000),
- DECL_RFK_WS(0x2c3c, 0x00000200),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_b);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+ RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+ RTW89_DECL_RFK_WS(0x32b8, 0x10000000),
+ RTW89_DECL_RFK_WS(0x78c8, 0x01000000),
+ RTW89_DECL_RFK_WS(0x7864, 0xc0000000),
+ RTW89_DECL_RFK_WS(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WS(0x2c1c, 0x00000004),
+ RTW89_DECL_RFK_WS(0x2700, 0x08000000),
+ RTW89_DECL_RFK_WS(0x0c70, 0x000003ff),
+ RTW89_DECL_RFK_WS(0x0c60, 0x00000003),
+ RTW89_DECL_RFK_WS(0x0c6c, 0x00000001),
+ RTW89_DECL_RFK_WS(0x78ac, 0x08000000),
+ RTW89_DECL_RFK_WS(0x2c3c, 0x00000200),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sr_defs_b[] = {
- DECL_RFK_WS(0x6490, 0x80000000),
- DECL_RFK_WS(0x32a0, 0x00007000),
- DECL_RFK_WS(0x32a0, 0x00008000),
- DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
- DECL_RFK_WS(0x32a0, 0x00080000),
- DECL_RFK_WS(0x2700, 0x01000000),
- DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
- DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WS(0x6490, 0x80000000),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00007000),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00008000),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00080000),
+ RTW89_DECL_RFK_WS(0x2700, 0x01000000),
+ RTW89_DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
+ RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_s_defs_ab[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
- DECL_RFK_WS(0x12b8, 0x40000000),
- DECL_RFK_WS(0x32b8, 0x40000000),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
- DECL_RFK_WS(0x12b8, 0x10000000),
- DECL_RFK_WS(0x58c8, 0x01000000),
- DECL_RFK_WS(0x78c8, 0x01000000),
- DECL_RFK_WS(0x5864, 0xc0000000),
- DECL_RFK_WS(0x7864, 0xc0000000),
- DECL_RFK_WS(0x2008, 0x01ffffff),
- DECL_RFK_WS(0x0c1c, 0x00000004),
- DECL_RFK_WS(0x0700, 0x08000000),
- DECL_RFK_WS(0x0c70, 0x000003ff),
- DECL_RFK_WS(0x0c60, 0x00000003),
- DECL_RFK_WS(0x0c6c, 0x00000001),
- DECL_RFK_WS(0x58ac, 0x08000000),
- DECL_RFK_WS(0x78ac, 0x08000000),
- DECL_RFK_WS(0x0c3c, 0x00000200),
- DECL_RFK_WS(0x2344, 0x80000000),
- DECL_RFK_WS(0x4490, 0x80000000),
- DECL_RFK_WS(0x12a0, 0x00007000),
- DECL_RFK_WS(0x12a0, 0x00008000),
- DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
- DECL_RFK_WS(0x12a0, 0x00080000),
- DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
- DECL_RFK_WS(0x32a0, 0x00080000),
- DECL_RFK_WS(0x0700, 0x01000000),
- DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
- DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
- DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_s_defs_ab);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+ RTW89_DECL_RFK_WS(0x12b8, 0x10000000),
+ RTW89_DECL_RFK_WS(0x58c8, 0x01000000),
+ RTW89_DECL_RFK_WS(0x78c8, 0x01000000),
+ RTW89_DECL_RFK_WS(0x5864, 0xc0000000),
+ RTW89_DECL_RFK_WS(0x7864, 0xc0000000),
+ RTW89_DECL_RFK_WS(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WS(0x0c1c, 0x00000004),
+ RTW89_DECL_RFK_WS(0x0700, 0x08000000),
+ RTW89_DECL_RFK_WS(0x0c70, 0x000003ff),
+ RTW89_DECL_RFK_WS(0x0c60, 0x00000003),
+ RTW89_DECL_RFK_WS(0x0c6c, 0x00000001),
+ RTW89_DECL_RFK_WS(0x58ac, 0x08000000),
+ RTW89_DECL_RFK_WS(0x78ac, 0x08000000),
+ RTW89_DECL_RFK_WS(0x0c3c, 0x00000200),
+ RTW89_DECL_RFK_WS(0x2344, 0x80000000),
+ RTW89_DECL_RFK_WS(0x4490, 0x80000000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00007000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00008000),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00080000),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00080000),
+ RTW89_DECL_RFK_WS(0x0700, 0x01000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
+ RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_s_defs_ab);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_a[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
- DECL_RFK_WC(0x12b8, 0x40000000),
- DECL_RFK_WC(0x5864, 0xc0000000),
- DECL_RFK_WC(0x2008, 0x01ffffff),
- DECL_RFK_WC(0x0c1c, 0x00000004),
- DECL_RFK_WC(0x0700, 0x08000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WC(0x12a0, 0x000ff000),
- DECL_RFK_WC(0x0700, 0x07000000),
- DECL_RFK_WC(0x5864, 0x20000000),
- DECL_RFK_WC(0x0c3c, 0x00000200),
- DECL_RFK_WC(0x20fc, 0xffff0000),
- DECL_RFK_WC(0x58c8, 0x01000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_a);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+ RTW89_DECL_RFK_WC(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x5864, 0xc0000000),
+ RTW89_DECL_RFK_WC(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WC(0x0c1c, 0x00000004),
+ RTW89_DECL_RFK_WC(0x0700, 0x08000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WC(0x12a0, 0x000ff000),
+ RTW89_DECL_RFK_WC(0x0700, 0x07000000),
+ RTW89_DECL_RFK_WC(0x5864, 0x20000000),
+ RTW89_DECL_RFK_WC(0x0c3c, 0x00000200),
+ RTW89_DECL_RFK_WC(0x20fc, 0xffff0000),
+ RTW89_DECL_RFK_WC(0x58c8, 0x01000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_b[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
- DECL_RFK_WC(0x32b8, 0x40000000),
- DECL_RFK_WC(0x7864, 0xc0000000),
- DECL_RFK_WC(0x2008, 0x01ffffff),
- DECL_RFK_WC(0x2c1c, 0x00000004),
- DECL_RFK_WC(0x2700, 0x08000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WC(0x32a0, 0x000ff000),
- DECL_RFK_WC(0x2700, 0x07000000),
- DECL_RFK_WC(0x7864, 0x20000000),
- DECL_RFK_WC(0x2c3c, 0x00000200),
- DECL_RFK_WC(0x20fc, 0xffff0000),
- DECL_RFK_WC(0x78c8, 0x01000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_b);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+ RTW89_DECL_RFK_WC(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x7864, 0xc0000000),
+ RTW89_DECL_RFK_WC(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WC(0x2c1c, 0x00000004),
+ RTW89_DECL_RFK_WC(0x2700, 0x08000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WC(0x32a0, 0x000ff000),
+ RTW89_DECL_RFK_WC(0x2700, 0x07000000),
+ RTW89_DECL_RFK_WC(0x7864, 0x20000000),
+ RTW89_DECL_RFK_WC(0x2c3c, 0x00000200),
+ RTW89_DECL_RFK_WC(0x20fc, 0xffff0000),
+ RTW89_DECL_RFK_WC(0x78c8, 0x01000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_ab[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
- DECL_RFK_WC(0x12b8, 0x40000000),
- DECL_RFK_WC(0x32b8, 0x40000000),
- DECL_RFK_WC(0x5864, 0xc0000000),
- DECL_RFK_WC(0x7864, 0xc0000000),
- DECL_RFK_WC(0x2008, 0x01ffffff),
- DECL_RFK_WC(0x0c1c, 0x00000004),
- DECL_RFK_WC(0x0700, 0x08000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WC(0x12a0, 0x000ff000),
- DECL_RFK_WC(0x32a0, 0x000ff000),
- DECL_RFK_WC(0x0700, 0x07000000),
- DECL_RFK_WC(0x5864, 0x20000000),
- DECL_RFK_WC(0x7864, 0x20000000),
- DECL_RFK_WC(0x0c3c, 0x00000200),
- DECL_RFK_WC(0x20fc, 0xffff0000),
- DECL_RFK_WC(0x58c8, 0x01000000),
- DECL_RFK_WC(0x78c8, 0x01000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_ab);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ RTW89_DECL_RFK_WC(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x5864, 0xc0000000),
+ RTW89_DECL_RFK_WC(0x7864, 0xc0000000),
+ RTW89_DECL_RFK_WC(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WC(0x0c1c, 0x00000004),
+ RTW89_DECL_RFK_WC(0x0700, 0x08000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WC(0x12a0, 0x000ff000),
+ RTW89_DECL_RFK_WC(0x32a0, 0x000ff000),
+ RTW89_DECL_RFK_WC(0x0700, 0x07000000),
+ RTW89_DECL_RFK_WC(0x5864, 0x20000000),
+ RTW89_DECL_RFK_WC(0x7864, 0x20000000),
+ RTW89_DECL_RFK_WC(0x0c3c, 0x00000200),
+ RTW89_DECL_RFK_WC(0x20fc, 0xffff0000),
+ RTW89_DECL_RFK_WC(0x58c8, 0x01000000),
+ RTW89_DECL_RFK_WC(0x78c8, 0x01000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_ab);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_lbk_rxiqk_defs_f[] = {
- DECL_RFK_WM(0x030c, 0xff000000, 0x0000000f),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000003),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a001),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a041),
- DECL_RFK_WS(0x8074, 0x80000000),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x0000000f),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a001),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a041),
+ RTW89_DECL_RFK_WS(0x8074, 0x80000000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_f);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_f);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_lbk_rxiqk_defs_r[] = {
- DECL_RFK_WC(0x8074, 0x80000000),
- DECL_RFK_WM(0x030c, 0xff000000, 0x0000001f),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
+ RTW89_DECL_RFK_WC(0x8074, 0x80000000),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x0000001f),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_r);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_r);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_pas_read_defs[] = {
- DECL_RFK_WM(0x80d4, 0x00ff0000, 0x00000006),
- DECL_RFK_WC(0x80bc, 0x00004000),
- DECL_RFK_WM(0x80c0, 0x00ff0000, 0x00000008),
+ RTW89_DECL_RFK_WM(0x80d4, 0x00ff0000, 0x00000006),
+ RTW89_DECL_RFK_WC(0x80bc, 0x00004000),
+ RTW89_DECL_RFK_WM(0x80c0, 0x00ff0000, 0x00000008),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_pas_read_defs);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_pas_read_defs);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_nondbcc_path01[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
- DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
- DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
- DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
- DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
- DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
- DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
- DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
- DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
- DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
- DECL_RFK_WM(0x2344, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
- DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
- DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
- DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
- DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
- DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
- DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
- DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
- DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_nondbcc_path01);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ RTW89_DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
+ RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
+ RTW89_DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2344, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
+ RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_nondbcc_path01);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_dbcc_path0[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
- DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
- DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
- DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
- DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
- DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
- DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
- DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
- DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
- DECL_RFK_WM(0x2320, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
- DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
- DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
- DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
- DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
- DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path0);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+ RTW89_DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
+ RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
+ RTW89_DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2320, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
+ RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path0);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_dbcc_path1[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
- DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
- DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
- DECL_RFK_WM(0x32b8, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
- DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000001),
- DECL_RFK_WM(0x2700, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
- DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
- DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000001),
- DECL_RFK_WM(0x6490, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x32a0, 0x00007000, 0x00000007),
- DECL_RFK_WM(0x32a0, 0x00008000, 0x00000001),
- DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
- DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
- DECL_RFK_WM(0x2700, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
- DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path1);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+ RTW89_DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32b8, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
+ RTW89_DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2700, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
+ RTW89_DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000001),
+ RTW89_DECL_RFK_WM(0x6490, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00007000, 0x00000007),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00008000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2700, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
+ RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path1);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_nondbcc_path01[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
- DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
- DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
- DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
- DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
- DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
- DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
- DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
- DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x2320, 0x00000001, 0x00000000),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
- DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
- DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_nondbcc_path01);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2320, 0x00000001, 0x00000000),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_nondbcc_path01);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_dbcc_path0[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
- DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
- DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
- DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
- DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
- DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
- DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
- DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path0);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+ RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path0);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_dbcc_path1[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
- DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
- DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
- DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000000),
- DECL_RFK_WM(0x2700, 0x08000000, 0x00000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
- DECL_RFK_WM(0x2700, 0x07000000, 0x00000000),
- DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
- DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path1);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+ RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2700, 0x08000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2700, 0x07000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path1);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h
index 4a4a45d778ff..33e6c404ecf9 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h
@@ -5,54 +5,7 @@
#ifndef __RTW89_8852A_RFK_TABLE_H__
#define __RTW89_8852A_RFK_TABLE_H__
-#include "core.h"
-
-enum rtw89_rfk_flag {
- RTW89_RFK_F_WRF = 0,
- RTW89_RFK_F_WM = 1,
- RTW89_RFK_F_WS = 2,
- RTW89_RFK_F_WC = 3,
- RTW89_RFK_F_DELAY = 4,
- RTW89_RFK_F_NUM,
-};
-
-struct rtw89_rfk_tbl {
- const struct rtw89_reg5_def *defs;
- u32 size;
-};
-
-#define DECLARE_RFK_TBL(_name) \
-const struct rtw89_rfk_tbl _name ## _tbl = { \
- .defs = _name, \
- .size = ARRAY_SIZE(_name), \
-}
-
-#define DECL_RFK_WRF(_path, _addr, _mask, _data) \
- {.flag = RTW89_RFK_F_WRF, \
- .path = _path, \
- .addr = _addr, \
- .mask = _mask, \
- .data = _data,}
-
-#define DECL_RFK_WM(_addr, _mask, _data) \
- {.flag = RTW89_RFK_F_WM, \
- .addr = _addr, \
- .mask = _mask, \
- .data = _data,}
-
-#define DECL_RFK_WS(_addr, _mask) \
- {.flag = RTW89_RFK_F_WS, \
- .addr = _addr, \
- .mask = _mask,}
-
-#define DECL_RFK_WC(_addr, _mask) \
- {.flag = RTW89_RFK_F_WC, \
- .addr = _addr, \
- .mask = _mask,}
-
-#define DECL_RFK_DELAY(_data) \
- {.flag = RTW89_RFK_F_DELAY, \
- .data = _data,}
+#include "phy.h"
extern const struct rtw89_rfk_tbl rtw8852a_tssi_sys_defs_tbl;
extern const struct rtw89_rfk_tbl rtw8852a_tssi_sys_defs_2g_tbl;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
new file mode 100644
index 000000000000..48459aba441d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2020-2021 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "rtw8852a.h"
+
+static const struct rtw89_pci_info rtw8852a_pci_info = {
+ .dma_addr_set = &rtw89_pci_ch_dma_addr_set,
+};
+
+static const struct rtw89_driver_info rtw89_8852ae_info = {
+ .chip = &rtw8852a_chip_info,
+ .bus = {
+ .pci = &rtw8852a_pci_info,
+ },
+};
+
+static const struct pci_device_id rtw89_8852ae_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8852),
+ .driver_data = (kernel_ulong_t)&rtw89_8852ae_info,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xa85a),
+ .driver_data = (kernel_ulong_t)&rtw89_8852ae_info,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, rtw89_8852ae_id_table);
+
+static struct pci_driver rtw89_8852ae_driver = {
+ .name = "rtw89_8852ae",
+ .id_table = rtw89_8852ae_id_table,
+ .probe = rtw89_pci_probe,
+ .remove = rtw89_pci_remove,
+ .driver.pm = &rtw89_pm_ops,
+};
+module_pci_driver(rtw89_8852ae_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852AE driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
new file mode 100644
index 000000000000..58920e91765e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -0,0 +1,529 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#include "debug.h"
+#include "fw.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852c.h"
+
+static const struct rtw89_dle_mem rtw8852c_dle_mem_pcie[] = {
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_wde_size19, &rtw89_ple_size19,
+ &rtw89_wde_qt18, &rtw89_wde_qt18, &rtw89_ple_qt46,
+ &rtw89_ple_qt47},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_wde_size18,
+ &rtw89_ple_size18, &rtw89_wde_qt17, &rtw89_wde_qt17,
+ &rtw89_ple_qt44, &rtw89_ple_qt45},
+ [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
+ NULL},
+};
+
+static const u32 rtw8852c_h2c_regs[RTW89_H2CREG_MAX] = {
+ R_AX_H2CREG_DATA0_V1, R_AX_H2CREG_DATA1_V1, R_AX_H2CREG_DATA2_V1,
+ R_AX_H2CREG_DATA3_V1
+};
+
+static const u32 rtw8852c_c2h_regs[RTW89_H2CREG_MAX] = {
+ R_AX_C2HREG_DATA0_V1, R_AX_C2HREG_DATA1_V1, R_AX_C2HREG_DATA2_V1,
+ R_AX_C2HREG_DATA3_V1
+};
+
+static const struct rtw89_page_regs rtw8852c_page_regs = {
+ .hci_fc_ctrl = R_AX_HCI_FC_CTRL_V1,
+ .ch_page_ctrl = R_AX_CH_PAGE_CTRL_V1,
+ .ach_page_ctrl = R_AX_ACH0_PAGE_CTRL_V1,
+ .ach_page_info = R_AX_ACH0_PAGE_INFO_V1,
+ .pub_page_info3 = R_AX_PUB_PAGE_INFO3_V1,
+ .pub_page_ctrl1 = R_AX_PUB_PAGE_CTRL1_V1,
+ .pub_page_ctrl2 = R_AX_PUB_PAGE_CTRL2_V1,
+ .pub_page_info1 = R_AX_PUB_PAGE_INFO1_V1,
+ .pub_page_info2 = R_AX_PUB_PAGE_INFO2_V1,
+ .wp_page_ctrl1 = R_AX_WP_PAGE_CTRL1_V1,
+ .wp_page_ctrl2 = R_AX_WP_PAGE_CTRL2_V1,
+ .wp_page_info1 = R_AX_WP_PAGE_INFO1_V1,
+};
+
+static const struct rtw89_reg_def rtw8852c_dcfo_comp = {
+ R_DCFO_COMP_S0_V1, B_DCFO_COMP_S0_V1_MSK
+};
+
+static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ u32 ret;
+
+ val32 = rtw89_read32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_PAD_HCI_SEL_V2_MASK);
+ if (val32 == MAC_AX_HCI_SEL_PCIE_USB)
+ rtw89_write32_set(rtwdev, R_AX_LDO_AON_CTRL0, B_AX_PD_REGU_L);
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_AFSM_WLSUS_EN |
+ B_AX_AFSM_PCIE_SUS_EN);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_DIS_WLBT_PDNSUSEN_SOPC);
+ rtw89_write32_set(rtwdev, R_AX_WLLPS_CTRL, B_AX_DIS_WLBT_LPSEN_LOPC);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APDM_HPDN);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+
+ ret = read_poll_timeout(rtw89_read32, val32, val32 & B_AX_RDY_SYSPWR,
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFN_ONMAC);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_APFN_ONMAC),
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_CALIB_EN_V1);
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, B_AX_CMAC1_FEN);
+ rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, B_AX_R_SYM_ISO_CMAC12PP);
+ rtw89_write32_clr(rtwdev, R_AX_AFE_CTRL1, B_AX_R_SYM_WLCMAC1_P4_PC_EN |
+ B_AX_R_SYM_WLCMAC1_P3_PC_EN |
+ B_AX_R_SYM_WLCMAC1_P2_PC_EN |
+ B_AX_R_SYM_WLCMAC1_P1_PC_EN |
+ B_AX_R_SYM_WLCMAC1_PC_EN);
+ rtw89_write32_set(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_PTA_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL,
+ XTAL_SI_GND_SHDN_WL, XTAL_SI_GND_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL,
+ XTAL_SI_SHDN_WL, XTAL_SI_SHDN_WL);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_OFF_WEI,
+ XTAL_SI_OFF_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_OFF_EI,
+ XTAL_SI_OFF_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_RFC2RF);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_PON_WEI,
+ XTAL_SI_PON_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_PON_EI,
+ XTAL_SI_PON_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SRAM2RFC);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_2, 0, XTAL_SI_LDO_LPS);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_4, 0, XTAL_SI_LPS_CAP);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+ rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15);
+
+ fsleep(1000);
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14);
+ rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+ rtw89_write32_set(rtwdev, R_AX_GPIO0_15_EECS_EESK_LED1_PULL_LOW_EN,
+ B_AX_EECS_PULL_LOW_EN | B_AX_EESK_PULL_LOW_EN |
+ B_AX_LED1_PULL_LOW_EN);
+
+ rtw89_write32_set(rtwdev, R_AX_DMAC_FUNC_EN,
+ B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MPDU_PROC_EN |
+ B_AX_WD_RLS_EN | B_AX_DLE_WDE_EN | B_AX_TXPKT_CTRL_EN |
+ B_AX_STA_SCH_EN | B_AX_DLE_PLE_EN | B_AX_PKT_BUF_EN |
+ B_AX_DMAC_TBL_EN | B_AX_PKT_IN_EN | B_AX_DLE_CPUIO_EN |
+ B_AX_DISPATCHER_EN | B_AX_BBRPT_EN | B_AX_MAC_SEC_EN |
+ B_AX_MAC_UN_EN | B_AX_H_AXIDMA_EN);
+
+ rtw89_write32_set(rtwdev, R_AX_CMAC_FUNC_EN,
+ B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN |
+ B_AX_FORCE_CMACREG_GCKEN | B_AX_PHYINTF_EN |
+ B_AX_CMAC_DMA_EN | B_AX_PTCLTOP_EN | B_AX_SCHEDULER_EN |
+ B_AX_TMAC_EN | B_AX_RMAC_EN);
+
+ return 0;
+}
+
+static int rtw8852c_pwr_off_func(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ u32 ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_RFC2RF,
+ XTAL_SI_RFC2RF);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_OFF_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_OFF_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0, XTAL_SI_RF00);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0, XTAL_SI_RF10);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_SRAM2RFC,
+ XTAL_SI_SRAM2RFC);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_PON_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_PON_WEI);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+ rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BB_GLB_RSTN | B_AX_FEN_BBRSTB);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND,
+ B_AX_R_SYM_FEN_WLBBGLB_1 | B_AX_R_SYM_FEN_WLBBFUN_1);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_PTA_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_GND_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_OFFMAC);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_APFM_OFFMAC),
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, 0x0001A0B0);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_XTAL_OFF_A_DIE);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+
+ return 0;
+}
+
+static void rtw8852c_e_efuse_parsing(struct rtw89_efuse *efuse,
+ struct rtw8852c_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+ efuse->rfe_type = map->rfe_type;
+ efuse->xtal_cap = map->xtal_k;
+}
+
+static void rtw8852c_efuse_parsing_tssi(struct rtw89_dev *rtwdev,
+ struct rtw8852c_efuse *map)
+{
+ struct rtw89_tssi_info *tssi = &rtwdev->tssi;
+ struct rtw8852c_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi};
+ u8 *bw40_1s_tssi_6g_ofst[] = {map->bw40_1s_tssi_6g_a, map->bw40_1s_tssi_6g_b};
+ u8 i, j;
+
+ tssi->thermal[RF_PATH_A] = map->path_a_therm;
+ tssi->thermal[RF_PATH_B] = map->path_b_therm;
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ memcpy(tssi->tssi_cck[i], ofst[i]->cck_tssi,
+ sizeof(ofst[i]->cck_tssi));
+
+ for (j = 0; j < TSSI_CCK_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][EFUSE] path=%d cck[%d]=0x%x\n",
+ i, j, tssi->tssi_cck[i][j]);
+
+ memcpy(tssi->tssi_mcs[i], ofst[i]->bw40_tssi,
+ sizeof(ofst[i]->bw40_tssi));
+ memcpy(tssi->tssi_mcs[i] + TSSI_MCS_2G_CH_GROUP_NUM,
+ ofst[i]->bw40_1s_tssi_5g, sizeof(ofst[i]->bw40_1s_tssi_5g));
+ memcpy(tssi->tssi_6g_mcs[i], bw40_1s_tssi_6g_ofst[i],
+ sizeof(tssi->tssi_6g_mcs[i]));
+
+ for (j = 0; j < TSSI_MCS_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][EFUSE] path=%d mcs[%d]=0x%x\n",
+ i, j, tssi->tssi_mcs[i][j]);
+ }
+}
+
+static int rtw8852c_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map)
+{
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ struct rtw8852c_efuse *map;
+
+ map = (struct rtw8852c_efuse *)log_map;
+
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ rtw8852c_efuse_parsing_tssi(rtwdev, map);
+
+ switch (rtwdev->hci.type) {
+ case RTW89_HCI_TYPE_PCIE:
+ rtw8852c_e_efuse_parsing(efuse, map);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
+
+ return 0;
+}
+
+static void rtw8852c_phycap_parsing_tssi(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ struct rtw89_tssi_info *tssi = &rtwdev->tssi;
+ static const u32 tssi_trim_addr[RF_PATH_NUM_8852C] = {0x5D6, 0x5AB};
+ static const u32 tssi_trim_addr_6g[RF_PATH_NUM_8852C] = {0x5CE, 0x5A3};
+ u32 addr = rtwdev->chip->phycap_addr;
+ bool pg = false;
+ u32 ofst;
+ u8 i, j;
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) {
+ /* addrs are in decreasing order */
+ ofst = tssi_trim_addr[i] - addr - j;
+ tssi->tssi_trim[i][j] = phycap_map[ofst];
+
+ if (phycap_map[ofst] != 0xff)
+ pg = true;
+ }
+
+ for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM_6G; j++) {
+ /* addrs are in decreasing order */
+ ofst = tssi_trim_addr_6g[i] - addr - j;
+ tssi->tssi_trim_6g[i][j] = phycap_map[ofst];
+
+ if (phycap_map[ofst] != 0xff)
+ pg = true;
+ }
+ }
+
+ if (!pg) {
+ memset(tssi->tssi_trim, 0, sizeof(tssi->tssi_trim));
+ memset(tssi->tssi_trim_6g, 0, sizeof(tssi->tssi_trim_6g));
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM] no PG, set all trim info to 0\n");
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++)
+ for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] path=%d idx=%d trim=0x%x addr=0x%x\n",
+ i, j, tssi->tssi_trim[i][j],
+ tssi_trim_addr[i] - j);
+}
+
+static void rtw8852c_phycap_parsing_thermal_trim(struct rtw89_dev *rtwdev,
+ u8 *phycap_map)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ static const u32 thm_trim_addr[RF_PATH_NUM_8852C] = {0x5DF, 0x5DC};
+ u32 addr = rtwdev->chip->phycap_addr;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ info->thermal_trim[i] = phycap_map[thm_trim_addr[i] - addr];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] path=%d thermal_trim=0x%x\n",
+ i, info->thermal_trim[i]);
+
+ if (info->thermal_trim[i] != 0xff)
+ info->pg_thermal_trim = true;
+ }
+}
+
+static void rtw8852c_thermal_trim(struct rtw89_dev *rtwdev)
+{
+#define __thm_setting(raw) \
+({ \
+ u8 __v = (raw); \
+ ((__v & 0x1) << 3) | ((__v & 0x1f) >> 1); \
+})
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 i, val;
+
+ if (!info->pg_thermal_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ val = __thm_setting(info->thermal_trim[i]);
+ rtw89_write_rf(rtwdev, i, RR_TM2, RR_TM2_OFF, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] path=%d thermal_setting=0x%x\n",
+ i, val);
+ }
+#undef __thm_setting
+}
+
+static void rtw8852c_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev,
+ u8 *phycap_map)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ static const u32 pabias_trim_addr[RF_PATH_NUM_8852C] = {0x5DE, 0x5DB};
+ u32 addr = rtwdev->chip->phycap_addr;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ info->pa_bias_trim[i] = phycap_map[pabias_trim_addr[i] - addr];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d pa_bias_trim=0x%x\n",
+ i, info->pa_bias_trim[i]);
+
+ if (info->pa_bias_trim[i] != 0xff)
+ info->pg_pa_bias_trim = true;
+ }
+}
+
+static void rtw8852c_pa_bias_trim(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 pabias_2g, pabias_5g;
+ u8 i;
+
+ if (!info->pg_pa_bias_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]);
+ pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
+ i, pabias_2g, pabias_5g);
+
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG, pabias_2g);
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA, pabias_5g);
+ }
+}
+
+static int rtw8852c_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ rtw8852c_phycap_parsing_tssi(rtwdev, phycap_map);
+ rtw8852c_phycap_parsing_thermal_trim(rtwdev, phycap_map);
+ rtw8852c_phycap_parsing_pa_bias_trim(rtwdev, phycap_map);
+
+ return 0;
+}
+
+static void rtw8852c_power_trim(struct rtw89_dev *rtwdev)
+{
+ rtw8852c_thermal_trim(rtwdev);
+ rtw8852c_pa_bias_trim(rtwdev);
+}
+
+static
+void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
+ s8 pw_ofst, enum rtw89_mac_idx mac_idx)
+{
+ s8 pw_ofst_2tx;
+ s8 val_1t;
+ s8 val_2t;
+ u32 reg;
+ u8 i;
+
+ if (pw_ofst < -32 || pw_ofst > 31) {
+ rtw89_warn(rtwdev, "[ULTB] Err pwr_offset=%d\n", pw_ofst);
+ return;
+ }
+ val_1t = pw_ofst << 2;
+ pw_ofst_2tx = max(pw_ofst - 3, -32);
+ val_2t = pw_ofst_2tx << 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[ULTB] val_1tx=0x%x\n", val_1t);
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[ULTB] val_2tx=0x%x\n", val_2t);
+
+ for (i = 0; i < 4; i++) {
+ /* 1TX */
+ reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx);
+ rtw89_write32_mask(rtwdev, reg,
+ B_AX_PWR_UL_TB_1T_V1_MASK << (8 * i),
+ val_1t);
+ /* 2TX */
+ reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_2T, mac_idx);
+ rtw89_write32_mask(rtwdev, reg,
+ B_AX_PWR_UL_TB_2T_V1_MASK << (8 * i),
+ val_2t);
+ }
+}
+
+static const struct rtw89_chip_ops rtw8852c_chip_ops = {
+ .read_efuse = rtw8852c_read_efuse,
+ .read_phycap = rtw8852c_read_phycap,
+ .power_trim = rtw8852c_power_trim,
+ .read_rf = rtw89_phy_read_rf_v1,
+ .write_rf = rtw89_phy_write_rf_v1,
+ .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset,
+ .pwr_on_func = rtw8852c_pwr_on_func,
+ .pwr_off_func = rtw8852c_pwr_off_func,
+ .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path_v1,
+ .mac_cfg_gnt = rtw89_mac_cfg_gnt_v1,
+ .stop_sch_tx = rtw89_mac_stop_sch_tx_v1,
+ .resume_sch_tx = rtw89_mac_resume_sch_tx_v1,
+};
+
+const struct rtw89_chip_info rtw8852c_chip_info = {
+ .chip_id = RTL8852C,
+ .ops = &rtw8852c_chip_ops,
+ .fw_name = "rtw89/rtw8852c_fw.bin",
+ .dle_mem = rtw8852c_dle_mem_pcie,
+ .rf_base_addr = {0xe000, 0xf000},
+ .pwr_on_seq = NULL,
+ .pwr_off_seq = NULL,
+ .sec_ctrl_efuse_size = 4,
+ .physical_efuse_size = 1216,
+ .logical_efuse_size = 2048,
+ .limit_efuse_size = 1280,
+ .dav_phy_efuse_size = 96,
+ .dav_log_efuse_size = 16,
+ .phycap_addr = 0x590,
+ .phycap_size = 0x60,
+ .hci_func_en_addr = R_AX_HCI_FUNC_EN_V1,
+ .h2c_ctrl_reg = R_AX_H2CREG_CTRL_V1,
+ .h2c_regs = rtw8852c_h2c_regs,
+ .c2h_ctrl_reg = R_AX_C2HREG_CTRL_V1,
+ .c2h_regs = rtw8852c_c2h_regs,
+ .page_regs = &rtw8852c_page_regs,
+ .dcfo_comp = &rtw8852c_dcfo_comp,
+ .dcfo_comp_sft = 5,
+};
+EXPORT_SYMBOL(rtw8852c_chip_info);
+
+MODULE_FIRMWARE("rtw89/rtw8852c_fw.bin");
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852C driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.h b/drivers/net/wireless/realtek/rtw89/rtw8852c.h
new file mode 100644
index 000000000000..d0594716040b
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852C_H__
+#define __RTW89_8852C_H__
+
+#include "core.h"
+
+#define RF_PATH_NUM_8852C 2
+
+struct rtw8852c_u_efuse {
+ u8 rsvd[0x38];
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct rtw8852c_e_efuse {
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct rtw8852c_tssi_offset {
+ u8 cck_tssi[TSSI_CCK_CH_GROUP_NUM];
+ u8 bw40_tssi[TSSI_MCS_2G_CH_GROUP_NUM];
+ u8 rsvd[7];
+ u8 bw40_1s_tssi_5g[TSSI_MCS_5G_CH_GROUP_NUM];
+} __packed;
+
+struct rtw8852c_efuse {
+ u8 rsvd[0x210];
+ struct rtw8852c_tssi_offset path_a_tssi;
+ u8 rsvd1[10];
+ struct rtw8852c_tssi_offset path_b_tssi;
+ u8 rsvd2[94];
+ u8 channel_plan;
+ u8 xtal_k;
+ u8 rsvd3;
+ u8 iqk_lck;
+ u8 rsvd4[5];
+ u8 reg_setting:2;
+ u8 tx_diversity:1;
+ u8 rx_diversity:2;
+ u8 ac_mode:1;
+ u8 module_type:2;
+ u8 rsvd5;
+ u8 shared_ant:1;
+ u8 coex_type:3;
+ u8 ant_iso:1;
+ u8 radio_on_off:1;
+ u8 rsvd6:2;
+ u8 eeprom_version;
+ u8 customer_id;
+ u8 tx_bb_swing_2g;
+ u8 tx_bb_swing_5g;
+ u8 tx_cali_pwr_trk_mode;
+ u8 trx_path_selection;
+ u8 rfe_type;
+ u8 country_code[2];
+ u8 rsvd7[3];
+ u8 path_a_therm;
+ u8 path_b_therm;
+ u8 rsvd8[46];
+ u8 bw40_1s_tssi_6g_a[TSSI_MCS_6G_CH_GROUP_NUM];
+ u8 rsvd9[10];
+ u8 bw40_1s_tssi_6g_b[TSSI_MCS_6G_CH_GROUP_NUM];
+ u8 rsvd10[110];
+ u8 channel_plan_6g;
+ u8 rsvd11[71];
+ union {
+ struct rtw8852c_u_efuse u;
+ struct rtw8852c_e_efuse e;
+ };
+} __packed;
+
+extern const struct rtw89_chip_info rtw8852c_chip_info;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
new file mode 100644
index 000000000000..e71370585b4d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2020-2022 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "reg.h"
+#include "rtw8852c.h"
+
+static const struct rtw89_pci_info rtw8852c_pci_info = {
+ .dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1,
+};
+
+static const struct rtw89_driver_info rtw89_8852ce_info = {
+ .chip = &rtw8852c_chip_info,
+ .bus = {
+ .pci = &rtw8852c_pci_info,
+ },
+};
+
+static const struct pci_device_id rtw89_8852ce_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xc852),
+ .driver_data = (kernel_ulong_t)&rtw89_8852ce_info,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, rtw89_8852ce_id_table);
+
+static struct pci_driver rtw89_8852ce_driver = {
+ .name = "rtw89_8852ce",
+ .id_table = rtw89_8852ce_id_table,
+ .probe = rtw89_pci_probe,
+ .remove = rtw89_pci_remove,
+ .driver.pm = &rtw89_pm_ops,
+};
+module_pci_driver(rtw89_8852ce_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852CE driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index 75b11249f306..86e3d8b400d6 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -31,6 +31,8 @@
#define RTW89_TXWD_BODY0_HDR_LLC_LEN GENMASK(15, 11)
#define RTW89_TXWD_BODY0_WD_PAGE BIT(7)
#define RTW89_TXWD_BODY0_HW_AMSDU BIT(5)
+#define RTW89_TXWD_BODY0_HW_SSN_SEL GENMASK(3, 2)
+#define RTW89_TXWD_BODY0_HW_SSN_MODE GENMASK(1, 0)
/* TX WD BODY DWORD 1 */
#define RTW89_TXWD_BODY1_PAYLOAD_ID GENMASK(31, 16)
@@ -56,6 +58,7 @@
#define RTW89_TXWD_INFO0_GI_LTF GENMASK(27, 25)
#define RTW89_TXWD_INFO0_DATA_RATE GENMASK(24, 16)
#define RTW89_TXWD_INFO0_DISDATAFB BIT(10)
+#define RTW89_TXWD_INFO0_MULTIPORT_ID GENMASK(6, 4)
/* TX WD INFO DWORD 1 */
#define RTW89_TXWD_INFO1_DATA_RTY_LOWEST_RATE GENMASK(24, 16)
diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c
index 271ed2ce2d7f..fe0d220da44d 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c
@@ -423,7 +423,7 @@ static int cw1200_spi_probe(struct spi_device *func)
}
/* Disconnect Function to be called by SPI stack when device is disconnected */
-static int cw1200_spi_disconnect(struct spi_device *func)
+static void cw1200_spi_disconnect(struct spi_device *func)
{
struct hwbus_priv *self = spi_get_drvdata(func);
@@ -435,8 +435,6 @@ static int cw1200_spi_disconnect(struct spi_device *func)
}
}
cw1200_spi_off(dev_get_platdata(&func->dev));
-
- return 0;
}
static int __maybe_unused cw1200_spi_suspend(struct device *dev)
diff --git a/drivers/net/wireless/st/cw1200/queue.c b/drivers/net/wireless/st/cw1200/queue.c
index 12952b1c29df..e06da4b3b0d4 100644
--- a/drivers/net/wireless/st/cw1200/queue.c
+++ b/drivers/net/wireless/st/cw1200/queue.c
@@ -8,6 +8,7 @@
#include <net/mac80211.h>
#include <linux/sched.h>
+#include <linux/jiffies.h>
#include "queue.h"
#include "cw1200.h"
#include "debug.h"
@@ -94,7 +95,7 @@ static void __cw1200_queue_gc(struct cw1200_queue *queue,
bool wakeup_stats = false;
list_for_each_entry_safe(item, tmp, &queue->queue, head) {
- if (jiffies - item->queue_timestamp < queue->ttl)
+ if (time_is_after_jiffies(item->queue_timestamp + queue->ttl))
break;
--queue->num_queued;
--queue->link_map_cache[item->txpriv.link_id];
diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c
index 99624dd34886..5a3e7a626702 100644
--- a/drivers/net/wireless/st/cw1200/wsm.c
+++ b/drivers/net/wireless/st/cw1200/wsm.c
@@ -537,7 +537,7 @@ int wsm_set_tx_queue_params(struct cw1200_common *priv,
{
int ret;
struct wsm_buf *buf = &priv->wsm_cmd_buf;
- u8 queue_id_to_wmm_aci[] = {3, 2, 0, 1};
+ static const u8 queue_id_to_wmm_aci[] = { 3, 2, 0, 1 };
wsm_cmd_lock(priv);
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 5b894bd6237e..9df38726e8b0 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -327,14 +327,12 @@ out_free:
return ret;
}
-static int wl1251_spi_remove(struct spi_device *spi)
+static void wl1251_spi_remove(struct spi_device *spi)
{
struct wl1251 *wl = spi_get_drvdata(spi);
wl1251_free_hw(wl);
regulator_disable(wl->vio);
-
- return 0;
}
static struct spi_driver wl1251_spi_driver = {
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 354a7e1c3315..7eae1ec2eb2b 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -546,13 +546,11 @@ out_dev_put:
return ret;
}
-static int wl1271_remove(struct spi_device *spi)
+static void wl1271_remove(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue = spi_get_drvdata(spi);
platform_device_unregister(glue->core);
-
- return 0;
}
static struct spi_driver wl1271_spi_driver = {
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index e64e4e579518..82bc0d44212e 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -521,7 +521,7 @@ static int zd1201_setconfig(struct zd1201 *zd, int rid, const void *buf, int len
zd->rxdatas = 0;
zd->rxlen = 0;
for (seq=0; len > 0; seq++) {
- request = kmalloc(16, gfp_mask);
+ request = kzalloc(16, gfp_mask);
if (!request)
return -ENOMEM;
urb = usb_alloc_urb(0, gfp_mask);
@@ -529,7 +529,6 @@ static int zd1201_setconfig(struct zd1201 *zd, int rid, const void *buf, int len
kfree(request);
return -ENOMEM;
}
- memset(request, 0, 16);
reqlen = len>12 ? 12 : len;
request[0] = ZD1201_USB_RESREQ;
request[1] = seq;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_debugfs.c b/drivers/net/wwan/iosm/iosm_ipc_debugfs.c
index f2f57751a7d2..e916139b8cd4 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_debugfs.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_debugfs.c
@@ -12,10 +12,10 @@
void ipc_debugfs_init(struct iosm_imem *ipc_imem)
{
- struct dentry *debugfs_pdev = wwan_get_debugfs_dir(ipc_imem->dev);
+ ipc_imem->debugfs_wwan_dir = wwan_get_debugfs_dir(ipc_imem->dev);
ipc_imem->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME,
- debugfs_pdev);
+ ipc_imem->debugfs_wwan_dir);
ipc_imem->trace = ipc_trace_init(ipc_imem);
if (!ipc_imem->trace)
@@ -26,4 +26,5 @@ void ipc_debugfs_deinit(struct iosm_imem *ipc_imem)
{
ipc_trace_deinit(ipc_imem->trace);
debugfs_remove_recursive(ipc_imem->debugfs_dir);
+ wwan_put_debugfs_dir(ipc_imem->debugfs_wwan_dir);
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
index f9e8e0ee4de3..1e6a47976642 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -114,17 +114,35 @@ ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
return HRTIMER_NORESTART;
}
+static int ipc_imem_tq_adb_timer_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ ipc_mux_ul_adb_finish(ipc_imem->mux);
+ return 0;
+}
+
+static enum hrtimer_restart
+ipc_imem_adb_timer_cb(struct hrtimer *hr_timer)
+{
+ struct iosm_imem *ipc_imem =
+ container_of(hr_timer, struct iosm_imem, adb_timer);
+
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_adb_timer_cb, 0,
+ NULL, 0, false);
+ return HRTIMER_NORESTART;
+}
+
static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
struct ipc_mux_config *cfg)
{
ipc_mmio_update_cp_capability(ipc_imem->mmio);
- if (!ipc_imem->mmio->has_mux_lite) {
+ if (ipc_imem->mmio->mux_protocol == MUX_UNKNOWN) {
dev_err(ipc_imem->dev, "Failed to get Mux capability.");
return -EINVAL;
}
- cfg->protocol = MUX_LITE;
+ cfg->protocol = ipc_imem->mmio->mux_protocol;
cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
MUX_UL_ON_CREDITS :
@@ -153,6 +171,10 @@ void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
IPC_MSG_PREP_FEATURE_SET, &prep_args);
}
+/**
+ * ipc_imem_td_update_timer_start - Starts the TD Update Timer if not started.
+ * @ipc_imem: Pointer to imem data-struct
+ */
void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
{
/* Use the TD update timer only in the runtime phase */
@@ -179,6 +201,21 @@ void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
hrtimer_cancel(hr_timer);
}
+/**
+ * ipc_imem_adb_timer_start - Starts the adb Timer if not starting.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem)
+{
+ if (!hrtimer_active(&ipc_imem->adb_timer)) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC);
+ hrtimer_start(&ipc_imem->adb_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ }
+}
+
bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
{
struct ipc_mem_channel *channel;
@@ -550,6 +587,11 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
+ if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID &&
+ chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) {
+ ctrl_chl_idx++;
+ continue;
+ }
if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
chnl_cfg_port,
@@ -680,8 +722,11 @@ static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
}
/* Try to generate new ADB or ADGH. */
- if (ipc_mux_ul_data_encode(ipc_imem->mux))
+ if (ipc_mux_ul_data_encode(ipc_imem->mux)) {
ipc_imem_td_update_timer_start(ipc_imem);
+ if (ipc_imem->mux->protocol == MUX_AGGREGATION)
+ ipc_imem_adb_timer_start(ipc_imem);
+ }
/* Continue the send procedure with accumulated SIO or NETIF packets.
* Reset the debounce flags.
@@ -1330,6 +1375,9 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
HRTIMER_MODE_REL);
ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
+ hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
+
if (ipc_imem_config(ipc_imem)) {
dev_err(ipc_imem->dev, "failed to initialize the imem");
goto imem_config_fail;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h
index 98554e9beb01..e700dc8bfe0a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h
@@ -317,6 +317,7 @@ enum ipc_phase {
* @tdupdate_timer: Delay the TD update doorbell.
* @fast_update_timer: forced head pointer update delay timer.
* @td_alloc_timer: Timer for DL pipe TD allocation retry
+ * @adb_timer: Timer for finishing the ADB.
* @rom_exit_code: Mapped boot rom exit code.
* @enter_runtime: 1 means the transition to runtime phase was
* executed.
@@ -340,6 +341,7 @@ enum ipc_phase {
* @ev_mux_net_transmit_pending:0 means inform the IPC tasklet to pass
* @reset_det_n: Reset detect flag
* @pcie_wake_n: Pcie wake flag
+ * @debugfs_wwan_dir: WWAN Debug FS directory entry
* @debugfs_dir: Debug FS directory for driver-specific entries
*/
struct iosm_imem {
@@ -364,6 +366,7 @@ struct iosm_imem {
struct hrtimer tdupdate_timer;
struct hrtimer fast_update_timer;
struct hrtimer td_alloc_timer;
+ struct hrtimer adb_timer;
enum rom_exit_code rom_exit_code;
u32 enter_runtime;
struct completion ul_pend_sem;
@@ -382,6 +385,7 @@ struct iosm_imem {
reset_det_n:1,
pcie_wake_n:1;
#ifdef CONFIG_WWAN_DEBUGFS
+ struct dentry *debugfs_wwan_dir;
struct dentry *debugfs_dir;
#endif
};
@@ -593,4 +597,7 @@ void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
* Returns: 0 on success, -1 on failure
*/
int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem);
+
+void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem);
+
#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.c b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
index f09e5e77a2a5..63eb08c43c05 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mmio.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include "iosm_ipc_mmio.h"
+#include "iosm_ipc_mux.h"
/* Definition of MMIO offsets
* note that MMIO_CI offsets are relative to end of chip info structure
@@ -71,8 +72,9 @@ void ipc_mmio_update_cp_capability(struct iosm_mmio *ipc_mmio)
ver = ipc_mmio_get_cp_version(ipc_mmio);
cp_cap = ioread32(ipc_mmio->base + ipc_mmio->offset.cp_capability);
- ipc_mmio->has_mux_lite = (ver >= IOSM_CP_VERSION) &&
- !(cp_cap & DL_AGGR) && !(cp_cap & UL_AGGR);
+ ipc_mmio->mux_protocol = ((ver >= IOSM_CP_VERSION) && (cp_cap &
+ (UL_AGGR | DL_AGGR))) ? MUX_AGGREGATION
+ : MUX_LITE;
ipc_mmio->has_ul_flow_credit =
(ver >= IOSM_CP_VERSION) && (cp_cap & UL_FLOW_CREDIT);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.h b/drivers/net/wwan/iosm/iosm_ipc_mmio.h
index f861994a6d90..193d7ba2478a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mmio.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.h
@@ -72,7 +72,7 @@ struct mmio_offset {
* @context_info_addr: Physical base address of context info structure
* @chip_info_version: Version of chip info structure
* @chip_info_size: Size of chip info structure
- * @has_mux_lite: It doesn't support mux aggergation
+ * @mux_protocol: mux protocol
* @has_ul_flow_credit: Ul flow credit support
* @has_slp_no_prot: Device sleep no protocol support
* @has_mcr_support: Usage of mcr support
@@ -84,8 +84,8 @@ struct iosm_mmio {
phys_addr_t context_info_addr;
unsigned int chip_info_version;
unsigned int chip_info_size;
- u8 has_mux_lite:1,
- has_ul_flow_credit:1,
+ u32 mux_protocol;
+ u8 has_ul_flow_credit:1,
has_slp_no_prot:1,
has_mcr_support:1;
};
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.c b/drivers/net/wwan/iosm/iosm_ipc_mux.c
index 8e66ffe92055..9c7a9a2a1f25 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux.c
@@ -279,9 +279,10 @@ struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
struct iosm_imem *imem)
{
struct iosm_mux *ipc_mux = kzalloc(sizeof(*ipc_mux), GFP_KERNEL);
- int i, ul_tds, ul_td_size;
+ int i, j, ul_tds, ul_td_size;
struct sk_buff_head *free_list;
struct sk_buff *skb;
+ int qlt_size;
if (!ipc_mux)
return NULL;
@@ -321,6 +322,24 @@ struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
ipc_mux->channel_id = -1;
ipc_mux->channel = NULL;
+ if (ipc_mux->protocol != MUX_LITE) {
+ qlt_size = offsetof(struct mux_qlth, ql) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
+
+ for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
+ ipc_mux->ul_adb.pp_qlt[i] = kzalloc(qlt_size,
+ GFP_ATOMIC);
+ if (!ipc_mux->ul_adb.pp_qlt[i]) {
+ for (j = i - 1; j >= 0; j--)
+ kfree(ipc_mux->ul_adb.pp_qlt[j]);
+ return NULL;
+ }
+ }
+
+ ul_td_size = IPC_MEM_MAX_UL_ADB_BUF_SIZE;
+ ul_tds = IPC_MEM_MAX_TDS_MUX_AGGR_UL;
+ }
+
/* Allocate the list of UL ADB. */
for (i = 0; i < ul_tds; i++) {
dma_addr_t mapping;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.h b/drivers/net/wwan/iosm/iosm_ipc_mux.h
index 88debaa1ed31..cd9d74cc097f 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux.h
@@ -8,9 +8,12 @@
#include "iosm_ipc_protocol.h"
-/* Size of the buffer for the IP MUX data buffer. */
-#define IPC_MEM_MAX_DL_MUX_BUF_SIZE (16 * 1024)
-#define IPC_MEM_MAX_UL_ADB_BUF_SIZE IPC_MEM_MAX_DL_MUX_BUF_SIZE
+#define IPC_MEM_MAX_UL_DG_ENTRIES 100
+#define IPC_MEM_MAX_TDS_MUX_AGGR_UL 60
+
+#define IPC_MEM_MAX_ADB_BUF_SIZE (16 * 1024)
+#define IPC_MEM_MAX_UL_ADB_BUF_SIZE IPC_MEM_MAX_ADB_BUF_SIZE
+#define IPC_MEM_MAX_DL_ADB_BUF_SIZE IPC_MEM_MAX_ADB_BUF_SIZE
/* Size of the buffer for the IP MUX Lite data buffer. */
#define IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE (2 * 1024)
@@ -167,6 +170,7 @@ enum mux_state {
enum ipc_mux_protocol {
MUX_UNKNOWN,
MUX_LITE,
+ MUX_AGGREGATION,
};
/* Supported UL data transfer methods. */
@@ -192,24 +196,111 @@ struct mux_session {
flush:1; /* flush net interface ? */
};
-/* State of a single UL data block. */
+/**
+ * struct mux_adth_dg - Structure of the datagram in the Aggregated Datagram
+ * Table Header.
+ * @datagram_index : Index (in bytes) to the k-th datagram in the table.
+ * Index shall count from the start of the block including
+ * the 16-byte header. This value shall be non-zero.
+ * @datagram_length: Length of the k-th datagram including the head padding.
+ * This value shall be non-zero.
+ * @service_class: Service class identifier for the datagram.
+ * @reserved: Reserved bytes. Set to zero
+ */
+struct mux_adth_dg {
+ __le32 datagram_index;
+ __le16 datagram_length;
+ u8 service_class;
+ u8 reserved;
+};
+
+/**
+ * struct mux_qlth_ql - Structure of the queue level in the Aggregated
+ * Datagram Queue Level Table Header.
+ * @nr_of_bytes: Number of bytes available to transmit in the queue.
+ */
+struct mux_qlth_ql {
+ __le32 nr_of_bytes;
+};
+
+/**
+ * struct mux_qlth - Structure of Aggregated Datagram Queue Level Table
+ * Header.
+ * @signature: Signature of the Queue Level Table Header
+ * Value: 0x48544C51 (ASCII characters: 'Q' 'L' 'T' 'H')
+ * @table_length: Length (in bytes) of the datagram table. This length
+ * shall include the queue level table header size.
+ * Minimum value:0x10
+ * @if_id: ID of the interface the queue levels in the table
+ * belong to.
+ * @reserved: Reserved byte. Set to zero.
+ * @next_table_index: Index (in bytes) to the next table in the buffer. Index
+ * shall count from the start of the block including the
+ * 16-byte header. Value of zero indicates end of the list.
+ * @reserved2: Reserved bytes. Set to zero
+ * @ql: Queue level table with variable length
+ */
+struct mux_qlth {
+ __le32 signature;
+ __le16 table_length;
+ u8 if_id;
+ u8 reserved;
+ __le32 next_table_index;
+ __le32 reserved2;
+ struct mux_qlth_ql ql;
+};
+
+/**
+ * struct mux_adb - Structure of State of a single UL data block.
+ * @dest_skb: Current UL skb for the data block.
+ * @buf: ADB memory
+ * @adgh: ADGH pointer
+ * @qlth_skb: QLTH pointer
+ * @next_table_index: Pointer to next table index.
+ * @free_list: List of alloc. ADB for the UL sess.
+ * @size: Size of the ADB memory.
+ * @if_cnt: Statistic counter
+ * @dg_cnt_total: Datagram count total
+ * @payload_size: Payload Size
+ * @dg: Datagram table.
+ * @pp_qlt: Pointers to hold Queue Level Tables of session
+ * @adbh: ADBH pointer
+ * @qlt_updated: Queue level table updated
+ * @dg_count: Datagram count
+ */
struct mux_adb {
- struct sk_buff *dest_skb; /* Current UL skb for the data block. */
- u8 *buf; /* ADB memory. */
- struct mux_adgh *adgh; /* ADGH pointer */
- struct sk_buff *qlth_skb; /* QLTH pointer */
- u32 *next_table_index; /* Pointer to next table index. */
- struct sk_buff_head free_list; /* List of alloc. ADB for the UL sess.*/
- int size; /* Size of the ADB memory. */
- u32 if_cnt; /* Statistic counter */
+ struct sk_buff *dest_skb;
+ u8 *buf;
+ struct mux_adgh *adgh;
+ struct sk_buff *qlth_skb;
+ u32 *next_table_index;
+ struct sk_buff_head free_list;
+ int size;
+ u32 if_cnt;
u32 dg_cnt_total;
u32 payload_size;
+ struct mux_adth_dg
+ dg[IPC_MEM_MUX_IP_SESSION_ENTRIES][IPC_MEM_MAX_UL_DG_ENTRIES];
+ struct mux_qlth *pp_qlt[IPC_MEM_MUX_IP_SESSION_ENTRIES];
+ struct mux_adbh *adbh;
+ u32 qlt_updated[IPC_MEM_MUX_IP_SESSION_ENTRIES];
+ u32 dg_count[IPC_MEM_MUX_IP_SESSION_ENTRIES];
};
-/* Temporary ACB state. */
+/**
+ * struct mux_acb - Structure of Temporary ACB state.
+ * @skb: Used UL skb.
+ * @if_id: Session id.
+ * @buf_p: Command buffer.
+ * @wanted_response: Wanted Response
+ * @got_response: Got response
+ * @cmd: command
+ * @got_param: Received command/response parameter
+ */
struct mux_acb {
struct sk_buff *skb; /* Used UL skb. */
int if_id; /* Session id. */
+ u8 *buf_p;
u32 wanted_response;
u32 got_response;
u32 cmd;
@@ -241,6 +332,12 @@ struct mux_acb {
* @wwan_q_offset: This will hold the offset of the given instance
* Useful while passing or receiving packets from
* wwan/imem layer.
+ * @adb_finish_timer: Timer for forcefully finishing the ADB
+ * @acb_tx_sequence_nr: Sequence number for the ACB header.
+ * @params: user configurable parameters
+ * @adb_tx_sequence_nr: Sequence number for ADB header
+ * @acc_adb_size: Statistic data for logging
+ * @acc_payload_size: Statistic data for logging
* @initialized: MUX object is initialized
* @ev_mux_net_transmit_pending:
* 0 means inform the IPC tasklet to pass the
@@ -269,10 +366,16 @@ struct iosm_mux {
long long ul_data_pend_bytes;
struct mux_acb acb;
int wwan_q_offset;
+ struct hrtimer adb_finish_timer;
+ u16 acb_tx_sequence_nr;
+ struct ipc_params *params;
+ u16 adb_tx_sequence_nr;
+ unsigned long long acc_adb_size;
+ unsigned long long acc_payload_size;
u8 initialized:1,
ev_mux_net_transmit_pending:1,
- adb_prep_ongoing:1;
-};
+ adb_prep_ongoing;
+} __packed;
/* MUX configuration structure */
struct ipc_mux_config {
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
index 40fb54a0513e..d41e373f9c0a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
@@ -54,6 +54,49 @@ static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
return 0;
}
+/* Initialize the command header. */
+static void ipc_mux_acb_init(struct iosm_mux *ipc_mux)
+{
+ struct mux_acb *acb = &ipc_mux->acb;
+ struct mux_acbh *header;
+
+ header = (struct mux_acbh *)(acb->skb)->data;
+ header->block_length = cpu_to_le32(sizeof(struct mux_acbh));
+ header->first_cmd_index = header->block_length;
+ header->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ACBH);
+ header->sequence_nr = cpu_to_le16(ipc_mux->acb_tx_sequence_nr++);
+}
+
+/* Add a command to the ACB. */
+static struct mux_cmdh *ipc_mux_acb_add_cmd(struct iosm_mux *ipc_mux, u32 cmd,
+ void *param, u32 param_size)
+{
+ struct mux_acbh *header;
+ struct mux_cmdh *cmdh;
+ struct mux_acb *acb;
+
+ acb = &ipc_mux->acb;
+ header = (struct mux_acbh *)(acb->skb)->data;
+ cmdh = (struct mux_cmdh *)
+ ((acb->skb)->data + le32_to_cpu(header->block_length));
+
+ cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
+ cmdh->command_type = cpu_to_le32(cmd);
+ cmdh->if_id = acb->if_id;
+
+ acb->cmd = cmd;
+ cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_cmdh, param) +
+ param_size);
+ cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
+ if (param)
+ memcpy(&cmdh->param, param, param_size);
+
+ skb_put(acb->skb, le32_to_cpu(header->block_length) +
+ le16_to_cpu(cmdh->cmd_len));
+
+ return cmdh;
+}
+
/* Prepare mux Command */
static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
u32 cmd, struct mux_acb *acb,
@@ -104,7 +147,7 @@ int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
size_t res_size, bool blocking, bool respond)
{
struct mux_acb *acb = &ipc_mux->acb;
- struct mux_lite_cmdh *ack_lite;
+ union mux_type_cmdh cmdh;
int ret = 0;
acb->if_id = if_id;
@@ -112,11 +155,23 @@ int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
if (ret)
return ret;
- ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb, param,
- res_size);
- if (respond)
- ack_lite->transaction_id = cpu_to_le32(transaction_id);
+ if (ipc_mux->protocol == MUX_LITE) {
+ cmdh.ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb,
+ param, res_size);
+ if (respond)
+ cmdh.ack_lite->transaction_id =
+ cpu_to_le32(transaction_id);
+ } else {
+ /* Initialize the ACB header. */
+ ipc_mux_acb_init(ipc_mux);
+ cmdh.ack_aggr = ipc_mux_acb_add_cmd(ipc_mux, cmd_type, param,
+ res_size);
+
+ if (respond)
+ cmdh.ack_aggr->transaction_id =
+ cpu_to_le32(transaction_id);
+ }
ret = ipc_mux_acb_send(ipc_mux, blocking);
return ret;
@@ -129,15 +184,17 @@ void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on)
}
static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
- struct mux_lite_cmdh *cmdh)
+ union mux_cmd_param param,
+ __le32 command_type, u8 if_id,
+ __le32 transaction_id)
{
struct mux_acb *acb = &ipc_mux->acb;
- switch (le32_to_cpu(cmdh->command_type)) {
+ switch (le32_to_cpu(command_type)) {
case MUX_CMD_OPEN_SESSION_RESP:
case MUX_CMD_CLOSE_SESSION_RESP:
/* Resume the control application. */
- acb->got_param = cmdh->param;
+ acb->got_param = param;
break;
case MUX_LITE_CMD_FLOW_CTL_ACK:
@@ -147,8 +204,16 @@ static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
if (ipc_mux->protocol != MUX_LITE)
return -EINVAL;
- dev_dbg(ipc_mux->dev, "if %u FLOW_CTL_ACK %u received",
- cmdh->if_id, le32_to_cpu(cmdh->transaction_id));
+ dev_dbg(ipc_mux->dev, "if_id %u FLOW_CTL_ACK %u received",
+ if_id, le32_to_cpu(transaction_id));
+ break;
+
+ case IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK:
+ /* This command type is not expected as response for
+ * Lite version of the protocol. So return non-zero.
+ */
+ if (ipc_mux->protocol == MUX_LITE)
+ return -EINVAL;
break;
default:
@@ -156,38 +221,39 @@ static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
}
acb->wanted_response = MUX_CMD_INVALID;
- acb->got_response = le32_to_cpu(cmdh->command_type);
+ acb->got_response = le32_to_cpu(command_type);
complete(&ipc_mux->channel->ul_sem);
return 0;
}
-static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
- struct mux_lite_cmdh *cmdh)
+static int ipc_mux_dl_cmds_decode_process(struct iosm_mux *ipc_mux,
+ union mux_cmd_param *param,
+ __le32 command_type, u8 if_id,
+ __le16 cmd_len, int size)
{
- union mux_cmd_param *param = &cmdh->param;
struct mux_session *session;
- int new_size;
+ struct hrtimer *adb_timer;
dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
- cmdh->if_id, le32_to_cpu(cmdh->command_type));
+ if_id, le32_to_cpu(command_type));
- switch (le32_to_cpu(cmdh->command_type)) {
+ switch (le32_to_cpu(command_type)) {
case MUX_LITE_CMD_FLOW_CTL:
+ case IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE:
- if (cmdh->if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
+ if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
dev_err(ipc_mux->dev, "if_id [%d] not valid",
- cmdh->if_id);
+ if_id);
return -EINVAL; /* No session interface id. */
}
- session = &ipc_mux->session[cmdh->if_id];
+ session = &ipc_mux->session[if_id];
+ adb_timer = &ipc_mux->imem->adb_timer;
- new_size = offsetof(struct mux_lite_cmdh, param) +
- sizeof(param->flow_ctl);
if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
/* Backward Compatibility */
- if (cmdh->cmd_len == cpu_to_le16(new_size))
+ if (cmd_len == cpu_to_le16(size))
session->flow_ctl_mask =
le32_to_cpu(param->flow_ctl.mask);
else
@@ -197,6 +263,16 @@ static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
* to limit uplink session queueing
*/
session->net_tx_stop = true;
+
+ /* We have to call Finish ADB here.
+ * Otherwise any already queued data
+ * will be sent to CP when ADB is full
+ * for some other sessions.
+ */
+ if (ipc_mux->protocol == MUX_AGGREGATION) {
+ ipc_mux_ul_adb_finish(ipc_mux);
+ ipc_imem_hrtimer_stop(adb_timer);
+ }
/* Update the stats */
session->flow_ctl_en_cnt++;
} else if (param->flow_ctl.mask == 0) {
@@ -205,8 +281,10 @@ static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
* our internal Tx flag and enabling kernel
* flow control
*/
+ dev_dbg(ipc_mux->dev, "if_id[%u] flow_ctl mask 0x%08X",
+ if_id, le32_to_cpu(param->flow_ctl.mask));
/* Backward Compatibility */
- if (cmdh->cmd_len == cpu_to_le16(new_size))
+ if (cmd_len == cpu_to_le16(size))
session->flow_ctl_mask =
le32_to_cpu(param->flow_ctl.mask);
else
@@ -217,7 +295,10 @@ static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
break;
}
- dev_dbg(ipc_mux->dev, "if[%u] FLOW CTRL 0x%08X", cmdh->if_id,
+ ipc_mux->acc_adb_size = 0;
+ ipc_mux->acc_payload_size = 0;
+
+ dev_dbg(ipc_mux->dev, "if_id[%u] FLOW CTRL 0x%08X", if_id,
le32_to_cpu(param->flow_ctl.mask));
break;
@@ -235,12 +316,20 @@ static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
{
struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
__le32 trans_id = cmdh->transaction_id;
+ int size;
- if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh)) {
+ if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
+ cmdh->command_type, cmdh->if_id,
+ cmdh->transaction_id)) {
/* Unable to decode command response indicates the cmd_type
* may be a command instead of response. So try to decoding it.
*/
- if (!ipc_mux_dl_dlcmds_decode_process(ipc_mux, cmdh)) {
+ size = offsetof(struct mux_lite_cmdh, param) +
+ sizeof(cmdh->param.flow_ctl);
+ if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
+ cmdh->command_type,
+ cmdh->if_id,
+ cmdh->cmd_len, size)) {
/* Decoded command may need a response. Give the
* response according to the command type.
*/
@@ -349,7 +438,7 @@ static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
adgh = (struct mux_adgh *)block;
- if (adgh->signature != cpu_to_le32(MUX_SIG_ADGH)) {
+ if (adgh->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH)) {
dev_err(ipc_mux->dev, "invalid ADGH signature received");
return;
}
@@ -392,6 +481,192 @@ static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
ipc_mux->session[if_id].flush = 1;
}
+static void ipc_mux_dl_acbcmd_decode(struct iosm_mux *ipc_mux,
+ struct mux_cmdh *cmdh, int size)
+{
+ u32 link_st = IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP;
+ u32 fctl_dis = IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE;
+ u32 fctl_ena = IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE;
+ u32 fctl_ack = IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK;
+ union mux_cmd_param *cmd_p = NULL;
+ u32 cmd = link_st;
+ u32 trans_id;
+
+ if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
+ cmdh->command_type, cmdh->if_id,
+ cmdh->cmd_len, size)) {
+ size = 0;
+ if (cmdh->command_type == cpu_to_le32(link_st)) {
+ cmd_p = &cmdh->param;
+ cmd_p->link_status_resp.response = MUX_CMD_RESP_SUCCESS;
+ } else if ((cmdh->command_type == cpu_to_le32(fctl_ena)) ||
+ (cmdh->command_type == cpu_to_le32(fctl_dis))) {
+ cmd = fctl_ack;
+ } else {
+ return;
+ }
+ trans_id = le32_to_cpu(cmdh->transaction_id);
+ ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
+ trans_id, cmd_p, size, false, true);
+ }
+}
+
+/* Decode an aggregated command block. */
+static void ipc_mux_dl_acb_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ struct mux_acbh *acbh;
+ struct mux_cmdh *cmdh;
+ u32 next_cmd_index;
+ u8 *block;
+ int size;
+
+ acbh = (struct mux_acbh *)(skb->data);
+ block = (u8 *)(skb->data);
+
+ next_cmd_index = le32_to_cpu(acbh->first_cmd_index);
+ next_cmd_index = array_index_nospec(next_cmd_index,
+ sizeof(struct mux_cmdh));
+
+ while (next_cmd_index != 0) {
+ cmdh = (struct mux_cmdh *)&block[next_cmd_index];
+ next_cmd_index = le32_to_cpu(cmdh->next_cmd_index);
+ if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
+ cmdh->command_type,
+ cmdh->if_id,
+ cmdh->transaction_id)) {
+ size = offsetof(struct mux_cmdh, param) +
+ sizeof(cmdh->param.flow_ctl);
+ ipc_mux_dl_acbcmd_decode(ipc_mux, cmdh, size);
+ }
+ }
+}
+
+/* process datagram */
+static int mux_dl_process_dg(struct iosm_mux *ipc_mux, struct mux_adbh *adbh,
+ struct mux_adth_dg *dg, struct sk_buff *skb,
+ int if_id, int nr_of_dg)
+{
+ u32 dl_head_pad_len = ipc_mux->session[if_id].dl_head_pad_len;
+ u32 packet_offset, i, rc;
+
+ for (i = 0; i < nr_of_dg; i++, dg++) {
+ if (le32_to_cpu(dg->datagram_index)
+ < sizeof(struct mux_adbh))
+ goto dg_error;
+
+ /* Is the packet inside of the ADB */
+ if (le32_to_cpu(dg->datagram_index) >=
+ le32_to_cpu(adbh->block_length)) {
+ goto dg_error;
+ } else {
+ packet_offset =
+ le32_to_cpu(dg->datagram_index) +
+ dl_head_pad_len;
+ /* Pass the packet to the netif layer. */
+ rc = ipc_mux_net_receive(ipc_mux, if_id, ipc_mux->wwan,
+ packet_offset,
+ dg->service_class,
+ skb);
+ if (rc)
+ goto dg_error;
+ }
+ }
+ return 0;
+dg_error:
+ return -1;
+}
+
+/* Decode an aggregated data block. */
+static void mux_dl_adb_decode(struct iosm_mux *ipc_mux,
+ struct sk_buff *skb)
+{
+ struct mux_adth_dg *dg;
+ struct iosm_wwan *wwan;
+ struct mux_adbh *adbh;
+ struct mux_adth *adth;
+ int nr_of_dg, if_id;
+ u32 adth_index;
+ u8 *block;
+
+ block = skb->data;
+ adbh = (struct mux_adbh *)block;
+
+ /* Process the aggregated datagram tables. */
+ adth_index = le32_to_cpu(adbh->first_table_index);
+
+ /* Has CP sent an empty ADB ? */
+ if (adth_index < 1) {
+ dev_err(ipc_mux->dev, "unexpected empty ADB");
+ goto adb_decode_err;
+ }
+
+ /* Loop through mixed session tables. */
+ while (adth_index) {
+ /* Get the reference to the table header. */
+ adth = (struct mux_adth *)(block + adth_index);
+
+ /* Get the interface id and map it to the netif id. */
+ if_id = adth->if_id;
+ if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
+ goto adb_decode_err;
+
+ if_id = array_index_nospec(if_id,
+ IPC_MEM_MUX_IP_SESSION_ENTRIES);
+
+ /* Is the session active ? */
+ wwan = ipc_mux->session[if_id].wwan;
+ if (!wwan)
+ goto adb_decode_err;
+
+ /* Consistency checks for aggregated datagram table. */
+ if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH))
+ goto adb_decode_err;
+
+ if (le16_to_cpu(adth->table_length) < (sizeof(struct mux_adth) -
+ sizeof(struct mux_adth_dg)))
+ goto adb_decode_err;
+
+ /* Calculate the number of datagrams. */
+ nr_of_dg = (le16_to_cpu(adth->table_length) -
+ sizeof(struct mux_adth) +
+ sizeof(struct mux_adth_dg)) /
+ sizeof(struct mux_adth_dg);
+
+ /* Is the datagram table empty ? */
+ if (nr_of_dg < 1) {
+ dev_err(ipc_mux->dev,
+ "adthidx=%u,nr_of_dg=%d,next_tblidx=%u",
+ adth_index, nr_of_dg,
+ le32_to_cpu(adth->next_table_index));
+
+ /* Move to the next aggregated datagram table. */
+ adth_index = le32_to_cpu(adth->next_table_index);
+ continue;
+ }
+
+ /* New aggregated datagram table. */
+ dg = &adth->dg;
+ if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id,
+ nr_of_dg) < 0)
+ goto adb_decode_err;
+
+ /* mark session for final flush */
+ ipc_mux->session[if_id].flush = 1;
+
+ /* Move to the next aggregated datagram table. */
+ adth_index = le32_to_cpu(adth->next_table_index);
+ }
+
+adb_decode_err:
+ return;
+}
+
+/**
+ * ipc_mux_dl_decode - Route the DL packet through the IP MUX layer
+ * depending on Header.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @skb: Pointer to ipc_skb.
+ */
void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
{
u32 signature;
@@ -403,14 +678,18 @@ void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
signature = le32_to_cpup((__le32 *)skb->data);
switch (signature) {
- case MUX_SIG_ADGH:
+ case IOSM_AGGR_MUX_SIG_ADBH: /* Aggregated Data Block Header */
+ mux_dl_adb_decode(ipc_mux, skb);
+ break;
+ case IOSM_AGGR_MUX_SIG_ADGH:
ipc_mux_dl_adgh_decode(ipc_mux, skb);
break;
-
case MUX_SIG_FCTH:
ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
break;
-
+ case IOSM_AGGR_MUX_SIG_ACBH: /* Aggregated Command Block Header */
+ ipc_mux_dl_acb_decode(ipc_mux, skb);
+ break;
case MUX_SIG_CMDH:
ipc_mux_dl_cmd_decode(ipc_mux, skb);
break;
@@ -427,7 +706,10 @@ static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
{
/* Take the first element of the free list. */
struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
+ u32 no_if = IPC_MEM_MUX_IP_SESSION_ENTRIES;
+ u32 *next_tb_id;
int qlt_size;
+ u32 if_id;
if (!skb)
return -EBUSY; /* Wait for a free ADB skb. */
@@ -436,7 +718,37 @@ static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
switch (type) {
- case MUX_SIG_ADGH:
+ case IOSM_AGGR_MUX_SIG_ADBH:
+ /* Save the ADB memory settings. */
+ ul_adb->dest_skb = skb;
+ ul_adb->buf = skb->data;
+ ul_adb->size = IPC_MEM_MAX_ADB_BUF_SIZE;
+
+ /* reset statistic counter */
+ ul_adb->if_cnt = 0;
+ ul_adb->payload_size = 0;
+ ul_adb->dg_cnt_total = 0;
+
+ /* Initialize the ADBH. */
+ ul_adb->adbh = (struct mux_adbh *)ul_adb->buf;
+ memset(ul_adb->adbh, 0, sizeof(struct mux_adbh));
+ ul_adb->adbh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADBH);
+ ul_adb->adbh->block_length =
+ cpu_to_le32(sizeof(struct mux_adbh));
+ next_tb_id = (unsigned int *)&ul_adb->adbh->first_table_index;
+ ul_adb->next_table_index = next_tb_id;
+
+ /* Clear the local copy of DGs for new ADB */
+ memset(ul_adb->dg, 0, sizeof(ul_adb->dg));
+
+ /* Clear the DG count and QLT updated status for new ADB */
+ for (if_id = 0; if_id < no_if; if_id++) {
+ ul_adb->dg_count[if_id] = 0;
+ ul_adb->qlt_updated[if_id] = 0;
+ }
+ break;
+
+ case IOSM_AGGR_MUX_SIG_ADGH:
/* Save the ADB memory settings. */
ul_adb->dest_skb = skb;
ul_adb->buf = skb->data;
@@ -506,6 +818,94 @@ static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
str, bytes);
}
+static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux,
+ struct mux_adb *ul_adb, int *out_offset)
+{
+ int i, qlt_size, offset = *out_offset;
+ struct mux_qlth *p_adb_qlt;
+ struct mux_adth_dg *dg;
+ struct mux_adth *adth;
+ u16 adth_dg_size;
+ u32 *next_tb_id;
+
+ qlt_size = offsetof(struct mux_qlth, ql) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
+
+ for (i = 0; i < ipc_mux->nr_sessions; i++) {
+ if (ul_adb->dg_count[i] > 0) {
+ adth_dg_size = offsetof(struct mux_adth, dg) +
+ ul_adb->dg_count[i] * sizeof(*dg);
+
+ *ul_adb->next_table_index = offset;
+ adth = (struct mux_adth *)&ul_adb->buf[offset];
+ next_tb_id = (unsigned int *)&adth->next_table_index;
+ ul_adb->next_table_index = next_tb_id;
+ offset += adth_dg_size;
+ adth->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH);
+ adth->if_id = i;
+ adth->table_length = cpu_to_le16(adth_dg_size);
+ adth_dg_size -= offsetof(struct mux_adth, dg);
+ memcpy(&adth->dg, ul_adb->dg[i], adth_dg_size);
+ ul_adb->if_cnt++;
+ }
+
+ if (ul_adb->qlt_updated[i]) {
+ *ul_adb->next_table_index = offset;
+ p_adb_qlt = (struct mux_qlth *)&ul_adb->buf[offset];
+ ul_adb->next_table_index =
+ (u32 *)&p_adb_qlt->next_table_index;
+ memcpy(p_adb_qlt, ul_adb->pp_qlt[i], qlt_size);
+ offset += qlt_size;
+ }
+ }
+ *out_offset = offset;
+}
+
+/**
+ * ipc_mux_ul_adb_finish - Add the TD of the aggregated session packets to TDR.
+ * @ipc_mux: Pointer to MUX data-struct.
+ */
+void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux)
+{
+ bool ul_data_pend = false;
+ struct mux_adb *ul_adb;
+ unsigned long flags;
+ int offset;
+
+ ul_adb = &ipc_mux->ul_adb;
+ if (!ul_adb->dest_skb)
+ return;
+
+ offset = *ul_adb->next_table_index;
+ ipc_mux_ul_encode_adth(ipc_mux, ul_adb, &offset);
+ ul_adb->adbh->block_length = cpu_to_le32(offset);
+
+ if (le32_to_cpu(ul_adb->adbh->block_length) > ul_adb->size) {
+ ul_adb->dest_skb = NULL;
+ return;
+ }
+
+ *ul_adb->next_table_index = 0;
+ ul_adb->adbh->sequence_nr = cpu_to_le16(ipc_mux->adb_tx_sequence_nr++);
+ skb_put(ul_adb->dest_skb, le32_to_cpu(ul_adb->adbh->block_length));
+
+ spin_lock_irqsave(&(&ipc_mux->channel->ul_list)->lock, flags);
+ __skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
+ spin_unlock_irqrestore(&(&ipc_mux->channel->ul_list)->lock, flags);
+
+ ul_adb->dest_skb = NULL;
+ /* Updates the TDs with ul_list */
+ ul_data_pend = ipc_imem_ul_write_td(ipc_mux->imem);
+
+ /* Delay the doorbell irq */
+ if (ul_data_pend)
+ ipc_imem_td_update_timer_start(ipc_mux->imem);
+
+ ipc_mux->acc_adb_size += le32_to_cpu(ul_adb->adbh->block_length);
+ ipc_mux->acc_payload_size += ul_adb->payload_size;
+ ipc_mux->ul_data_pend_bytes += ul_adb->payload_size;
+}
+
/* Allocates an ADB from the free list and initializes it with ADBH */
static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
struct mux_adb *adb, int *size_needed,
@@ -688,7 +1088,7 @@ static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
while (nr_of_pkts > 0) {
/* get destination skb allocated */
if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
- MUX_SIG_ADGH)) {
+ IOSM_AGGR_MUX_SIG_ADGH)) {
dev_err(ipc_mux->dev, "no reserved memory for ADGH");
return -ENOMEM;
}
@@ -720,7 +1120,7 @@ static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
memcpy(adb->buf + offset + pad_len, src_skb->data,
src_skb->len);
- adb->adgh->signature = cpu_to_le32(MUX_SIG_ADGH);
+ adb->adgh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH);
adb->adgh->if_id = session_id;
adb->adgh->length =
cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
@@ -762,6 +1162,187 @@ static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
return adb_updated;
}
+/**
+ * ipc_mux_ul_adb_update_ql - Adds Queue Level Table and Queue Level to ADB
+ * @ipc_mux: pointer to MUX instance data
+ * @p_adb: pointer to UL aggegated data block
+ * @session_id: session id
+ * @qlth_n_ql_size: Length (in bytes) of the datagram table
+ * @ul_list: pointer to skb buffer head
+ */
+void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
+ int session_id, int qlth_n_ql_size,
+ struct sk_buff_head *ul_list)
+{
+ int qlevel = ul_list->qlen;
+ struct mux_qlth *p_qlt;
+
+ p_qlt = (struct mux_qlth *)p_adb->pp_qlt[session_id];
+
+ /* Initialize QLTH if not been done */
+ if (p_adb->qlt_updated[session_id] == 0) {
+ p_qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
+ p_qlt->if_id = session_id;
+ p_qlt->table_length = cpu_to_le16(qlth_n_ql_size);
+ p_qlt->reserved = 0;
+ p_qlt->reserved2 = 0;
+ }
+
+ /* Update Queue Level information always */
+ p_qlt->ql.nr_of_bytes = cpu_to_le32(qlevel);
+ p_adb->qlt_updated[session_id] = 1;
+}
+
+/* Update the next table index. */
+static int mux_ul_dg_update_tbl_index(struct iosm_mux *ipc_mux,
+ int session_id,
+ struct sk_buff_head *ul_list,
+ struct mux_adth_dg *dg,
+ int aligned_size,
+ u32 qlth_n_ql_size,
+ struct mux_adb *adb,
+ struct sk_buff *src_skb)
+{
+ ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
+ qlth_n_ql_size, ul_list);
+ ipc_mux_ul_adb_finish(ipc_mux);
+ if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
+ IOSM_AGGR_MUX_SIG_ADBH)) {
+ dev_kfree_skb(src_skb);
+ return -ENOMEM;
+ }
+ ipc_mux->size_needed = le32_to_cpu(adb->adbh->block_length);
+
+ ipc_mux->size_needed += offsetof(struct mux_adth, dg);
+ ipc_mux->size_needed += qlth_n_ql_size;
+ ipc_mux->size_needed += sizeof(*dg) + aligned_size;
+ return 0;
+}
+
+/* Process encode session UL data. */
+static int mux_ul_dg_encode(struct iosm_mux *ipc_mux, struct mux_adb *adb,
+ struct mux_adth_dg *dg,
+ struct sk_buff_head *ul_list,
+ struct sk_buff *src_skb, int session_id,
+ int pkt_to_send, u32 qlth_n_ql_size,
+ int *out_offset, int head_pad_len)
+{
+ int aligned_size;
+ int offset = *out_offset;
+ unsigned long flags;
+ int nr_of_skb = 0;
+
+ while (pkt_to_send > 0) {
+ /* Peek at the head of the list. */
+ src_skb = skb_peek(ul_list);
+ if (!src_skb) {
+ dev_err(ipc_mux->dev,
+ "skb peek return NULL with count : %d",
+ pkt_to_send);
+ return -1;
+ }
+ aligned_size = ALIGN((head_pad_len + src_skb->len), 4);
+ ipc_mux->size_needed += sizeof(*dg) + aligned_size;
+
+ if (ipc_mux->size_needed > adb->size ||
+ ((ipc_mux->size_needed + ipc_mux->ul_data_pend_bytes) >=
+ IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B)) {
+ *adb->next_table_index = offset;
+ if (mux_ul_dg_update_tbl_index(ipc_mux, session_id,
+ ul_list, dg,
+ aligned_size,
+ qlth_n_ql_size, adb,
+ src_skb) < 0)
+ return -ENOMEM;
+ nr_of_skb = 0;
+ offset = le32_to_cpu(adb->adbh->block_length);
+ /* Load pointer to next available datagram entry */
+ dg = adb->dg[session_id] + adb->dg_count[session_id];
+ }
+ /* Add buffer without head padding to next pending transfer. */
+ memcpy(adb->buf + offset + head_pad_len,
+ src_skb->data, src_skb->len);
+ /* Setup datagram entry. */
+ dg->datagram_index = cpu_to_le32(offset);
+ dg->datagram_length = cpu_to_le16(src_skb->len + head_pad_len);
+ dg->service_class = (((struct sk_buff *)src_skb)->priority);
+ dg->reserved = 0;
+ adb->dg_cnt_total++;
+ adb->payload_size += le16_to_cpu(dg->datagram_length);
+ dg++;
+ adb->dg_count[session_id]++;
+ offset += aligned_size;
+ /* Remove the processed elements and free it. */
+ spin_lock_irqsave(&ul_list->lock, flags);
+ src_skb = __skb_dequeue(ul_list);
+ spin_unlock_irqrestore(&ul_list->lock, flags);
+
+ dev_kfree_skb(src_skb);
+ nr_of_skb++;
+ pkt_to_send--;
+ }
+ *out_offset = offset;
+ return nr_of_skb;
+}
+
+/* Process encode session UL data to ADB. */
+static int mux_ul_adb_encode(struct iosm_mux *ipc_mux, int session_id,
+ struct mux_session *session,
+ struct sk_buff_head *ul_list, struct mux_adb *adb,
+ int pkt_to_send)
+{
+ int adb_updated = -EINVAL;
+ int head_pad_len, offset;
+ struct sk_buff *src_skb = NULL;
+ struct mux_adth_dg *dg;
+ u32 qlth_n_ql_size;
+
+ /* If any of the opened session has set Flow Control ON then limit the
+ * UL data to mux_flow_ctrl_high_thresh_b bytes
+ */
+ if (ipc_mux->ul_data_pend_bytes >=
+ IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B) {
+ ipc_mux_stop_tx_for_all_sessions(ipc_mux);
+ return adb_updated;
+ }
+
+ qlth_n_ql_size = offsetof(struct mux_qlth, ql) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
+ head_pad_len = session->ul_head_pad_len;
+
+ if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
+ head_pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
+
+ if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
+ IOSM_AGGR_MUX_SIG_ADBH))
+ return -ENOMEM;
+
+ offset = le32_to_cpu(adb->adbh->block_length);
+
+ if (ipc_mux->size_needed == 0)
+ ipc_mux->size_needed = offset;
+
+ /* Calculate the size needed for ADTH, QLTH and QL*/
+ if (adb->dg_count[session_id] == 0) {
+ ipc_mux->size_needed += offsetof(struct mux_adth, dg);
+ ipc_mux->size_needed += qlth_n_ql_size;
+ }
+
+ dg = adb->dg[session_id] + adb->dg_count[session_id];
+
+ if (mux_ul_dg_encode(ipc_mux, adb, dg, ul_list, src_skb,
+ session_id, pkt_to_send, qlth_n_ql_size, &offset,
+ head_pad_len) > 0) {
+ adb_updated = 1;
+ *adb->next_table_index = offset;
+ ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
+ qlth_n_ql_size, ul_list);
+ adb->adbh->block_length = cpu_to_le32(offset);
+ }
+
+ return adb_updated;
+}
+
bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
{
struct sk_buff_head *ul_list;
@@ -802,28 +1383,88 @@ bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
* -> try next session id.
*/
continue;
-
- updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id, session,
- ul_list, &ipc_mux->ul_adb,
- dg_n);
+ if (ipc_mux->protocol == MUX_LITE)
+ updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id,
+ session, ul_list,
+ &ipc_mux->ul_adb,
+ dg_n);
+ else
+ updated = mux_ul_adb_encode(ipc_mux, session_id,
+ session, ul_list,
+ &ipc_mux->ul_adb,
+ dg_n);
}
ipc_mux->adb_prep_ongoing = false;
return updated == 1;
}
-void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+/* Calculates the Payload from any given ADB. */
+static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux,
+ struct mux_adbh *p_adbh)
{
- struct mux_adgh *adgh;
- u16 adgh_len;
+ struct mux_adth_dg *dg;
+ struct mux_adth *adth;
+ u32 payload_size = 0;
+ u32 next_table_idx;
+ int nr_of_dg, i;
+
+ /* Process the aggregated datagram tables. */
+ next_table_idx = le32_to_cpu(p_adbh->first_table_index);
+
+ if (next_table_idx < sizeof(struct mux_adbh)) {
+ dev_err(ipc_mux->dev, "unexpected empty ADB");
+ return payload_size;
+ }
- adgh = (struct mux_adgh *)skb->data;
- adgh_len = le16_to_cpu(adgh->length);
+ while (next_table_idx != 0) {
+ /* Get the reference to the table header. */
+ adth = (struct mux_adth *)((u8 *)p_adbh + next_table_idx);
- if (adgh->signature == cpu_to_le32(MUX_SIG_ADGH) &&
- ipc_mux->ul_flow == MUX_UL)
- ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes -
- adgh_len;
+ if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) {
+ nr_of_dg = (le16_to_cpu(adth->table_length) -
+ sizeof(struct mux_adth) +
+ sizeof(struct mux_adth_dg)) /
+ sizeof(struct mux_adth_dg);
+
+ if (nr_of_dg <= 0)
+ return payload_size;
+
+ dg = &adth->dg;
+
+ for (i = 0; i < nr_of_dg; i++, dg++) {
+ if (le32_to_cpu(dg->datagram_index) <
+ sizeof(struct mux_adbh)) {
+ return payload_size;
+ }
+ payload_size +=
+ le16_to_cpu(dg->datagram_length);
+ }
+ }
+ next_table_idx = le32_to_cpu(adth->next_table_index);
+ }
+
+ return payload_size;
+}
+
+void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ union mux_type_header hr;
+ u16 adgh_len;
+ int payload;
+
+ if (ipc_mux->protocol == MUX_LITE) {
+ hr.adgh = (struct mux_adgh *)skb->data;
+ adgh_len = le16_to_cpu(hr.adgh->length);
+ if (hr.adgh->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH) &&
+ ipc_mux->ul_flow == MUX_UL)
+ ipc_mux->ul_data_pend_bytes =
+ ipc_mux->ul_data_pend_bytes - adgh_len;
+ } else {
+ hr.adbh = (struct mux_adbh *)(skb->data);
+ payload = ipc_mux_get_payload_from_adb(ipc_mux, hr.adbh);
+ ipc_mux->ul_data_pend_bytes -= payload;
+ }
if (ipc_mux->ul_flow == MUX_UL)
dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
@@ -846,10 +1487,13 @@ static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg,
/* Add session UL data to a ADB and ADGH */
ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
- if (ul_data_pend)
+ if (ul_data_pend) {
+ if (ipc_mux->protocol == MUX_AGGREGATION)
+ ipc_imem_adb_timer_start(ipc_mux->imem);
+
/* Delay the doorbell irq */
ipc_imem_td_update_timer_start(ipc_mux->imem);
-
+ }
/* reset the debounce flag */
ipc_mux->ev_mux_net_transmit_pending = false;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
index aae83db5cbb8..5d4e3b89542c 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
@@ -13,6 +13,39 @@
*/
#define MUX_QUEUE_LEVEL 1
+/* ADB finish timer value */
+#define IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC (500 * 1000)
+
+/* Enables the flow control (Flow is not allowed) */
+#define IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE 5
+
+/* Disables the flow control (Flow is allowed) */
+#define IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE 6
+
+/* ACK the flow control command. Shall have the same Transaction ID as the
+ * matching FLOW_CTL command
+ */
+#define IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK 7
+
+/* Aggregation Protocol Command for report packet indicating link quality
+ */
+#define IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT 8
+
+/* Response to a report packet */
+#define IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP 9
+
+/* ACBH: Signature of the Aggregated Command Block Header. */
+#define IOSM_AGGR_MUX_SIG_ACBH 0x48424341
+
+/* ADTH: Signature of the Aggregated Datagram Table Header. */
+#define IOSM_AGGR_MUX_SIG_ADTH 0x48544441
+
+/* ADBH: Signature of the Aggregated Data Block Header. */
+#define IOSM_AGGR_MUX_SIG_ADBH 0x48424441
+
+/* ADGH: Signature of the Datagram Header. */
+#define IOSM_AGGR_MUX_SIG_ADGH 0x48474441
+
/* Size of the buffer for the IP MUX commands. */
#define MUX_MAX_UL_ACB_BUF_SIZE 256
@@ -53,6 +86,85 @@
#define IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B (110 * 1024)
/**
+ * struct mux_cmdh - Structure of Command Header.
+ * @signature: Signature of the Command Header.
+ * @cmd_len: Length (in bytes) of the Aggregated Command Block.
+ * @if_id: ID of the interface the commands in the table belong to.
+ * @reserved: Reserved. Set to zero.
+ * @next_cmd_index: Index (in bytes) to the next command in the buffer.
+ * @command_type: Command Enum. See table Session Management chapter for
+ * details.
+ * @transaction_id: The Transaction ID shall be unique to the command
+ * @param: Optional parameters used with the command.
+ */
+struct mux_cmdh {
+ __le32 signature;
+ __le16 cmd_len;
+ u8 if_id;
+ u8 reserved;
+ __le32 next_cmd_index;
+ __le32 command_type;
+ __le32 transaction_id;
+ union mux_cmd_param param;
+};
+
+/**
+ * struct mux_acbh - Structure of the Aggregated Command Block Header.
+ * @signature: Signature of the Aggregated Command Block Header.
+ * @reserved: Reserved bytes. Set to zero.
+ * @sequence_nr: Block sequence number.
+ * @block_length: Length (in bytes) of the Aggregated Command Block.
+ * @first_cmd_index: Index (in bytes) to the first command in the buffer.
+ */
+struct mux_acbh {
+ __le32 signature;
+ __le16 reserved;
+ __le16 sequence_nr;
+ __le32 block_length;
+ __le32 first_cmd_index;
+};
+
+/**
+ * struct mux_adbh - Structure of the Aggregated Data Block Header.
+ * @signature: Signature of the Aggregated Data Block Header.
+ * @reserved: Reserved bytes. Set to zero.
+ * @sequence_nr: Block sequence number.
+ * @block_length: Length (in bytes) of the Aggregated Data Block.
+ * @first_table_index: Index (in bytes) to the first Datagram Table in
+ * the buffer.
+ */
+struct mux_adbh {
+ __le32 signature;
+ __le16 reserved;
+ __le16 sequence_nr;
+ __le32 block_length;
+ __le32 first_table_index;
+};
+
+/**
+ * struct mux_adth - Structure of the Aggregated Datagram Table Header.
+ * @signature: Signature of the Aggregated Datagram Table Header.
+ * @table_length: Length (in bytes) of the datagram table.
+ * @if_id: ID of the interface the datagrams in the table
+ * belong to.
+ * @opt_ipv4v6: Indicates IPv4(=0)/IPv6(=1) hint.
+ * @reserved: Reserved bits. Set to zero.
+ * @next_table_index: Index (in bytes) to the next Datagram Table in
+ * the buffer.
+ * @reserved2: Reserved bytes. Set to zero
+ * @dg: datagramm table with variable length
+ */
+struct mux_adth {
+ __le32 signature;
+ __le16 table_length;
+ u8 if_id;
+ u8 opt_ipv4v6;
+ __le32 next_table_index;
+ __le32 reserved2;
+ struct mux_adth_dg dg;
+};
+
+/**
* struct mux_adgh - Aggregated Datagram Header.
* @signature: Signature of the Aggregated Datagram Header(0x48474441)
* @length: Length (in bytes) of the datagram header. This length
@@ -129,11 +241,25 @@ struct ipc_mem_lite_gen_tbl {
};
/**
- * ipc_mux_dl_decode -Route the DL packet through the IP MUX layer
- * depending on Header.
- * @ipc_mux: Pointer to MUX data-struct
- * @skb: Pointer to ipc_skb.
+ * struct mux_type_cmdh - Structure of command header for mux lite and aggr
+ * @ack_lite: MUX Lite Command Header pointer
+ * @ack_aggr: Command Header pointer
*/
+union mux_type_cmdh {
+ struct mux_lite_cmdh *ack_lite;
+ struct mux_cmdh *ack_aggr;
+};
+
+/**
+ * struct mux_type_header - Structure of mux header type
+ * @adgh: Aggregated Datagram Header pointer
+ * @adbh: Aggregated Data Block Header pointer
+ */
+union mux_type_header {
+ struct mux_adgh *adgh;
+ struct mux_adbh *adbh;
+};
+
void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb);
/**
@@ -147,7 +273,7 @@ void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb);
* @blocking: True for blocking send
* @respond: If true return transaction ID
*
- * Returns: 0 in success and failure value on error
+ * Returns: 0 in success and failure value on error
*/
int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
u32 transaction_id, union mux_cmd_param *param,
@@ -190,4 +316,10 @@ bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux);
*/
void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb);
+void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux);
+
+void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
+ int session_id, int qlth_n_ql_size,
+ struct sk_buff_head *ul_list);
+
#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
index d73894e2a84e..31f57b986df2 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -320,6 +320,7 @@ ret_fail:
static const struct pci_device_id iosm_ipc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7560_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7360_ID) },
{}
};
MODULE_DEVICE_TABLE(pci, iosm_ipc_ids);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.h b/drivers/net/wwan/iosm/iosm_ipc_pcie.h
index 7d1f0cd7364c..844cf1fed994 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.h
@@ -14,6 +14,7 @@
/* Device ID */
#define INTEL_CP_DEVICE_7560_ID 0x7560
+#define INTEL_CP_DEVICE_7360_ID 0x7360
/* Define for BAR area usage */
#define IPC_DOORBELL_BAR0 0
diff --git a/drivers/net/wwan/qcom_bam_dmux.c b/drivers/net/wwan/qcom_bam_dmux.c
index 5dfa2eba6014..17d46f4d2913 100644
--- a/drivers/net/wwan/qcom_bam_dmux.c
+++ b/drivers/net/wwan/qcom_bam_dmux.c
@@ -755,7 +755,7 @@ static int __maybe_unused bam_dmux_runtime_resume(struct device *dev)
return 0;
dmux->tx = dma_request_chan(dev, "tx");
- if (IS_ERR(dmux->rx)) {
+ if (IS_ERR(dmux->tx)) {
dev_err(dev, "Failed to request TX DMA channel: %pe\n", dmux->tx);
dmux->tx = NULL;
bam_dmux_runtime_suspend(dev);
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index 1508dc2a497b..b8c7843730ed 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -160,6 +160,42 @@ struct dentry *wwan_get_debugfs_dir(struct device *parent)
return wwandev->debugfs_dir;
}
EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir);
+
+static int wwan_dev_debugfs_match(struct device *dev, const void *dir)
+{
+ struct wwan_device *wwandev;
+
+ if (dev->type != &wwan_dev_type)
+ return 0;
+
+ wwandev = to_wwan_dev(dev);
+
+ return wwandev->debugfs_dir == dir;
+}
+
+static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir)
+{
+ struct device *dev;
+
+ dev = class_find_device(wwan_class, NULL, dir, wwan_dev_debugfs_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_wwan_dev(dev);
+}
+
+void wwan_put_debugfs_dir(struct dentry *dir)
+{
+ struct wwan_device *wwandev = wwan_dev_get_by_debugfs(dir);
+
+ if (WARN_ON(IS_ERR(wwandev)))
+ return;
+
+ /* wwan_dev_get_by_debugfs() also got a reference */
+ put_device(&wwandev->dev);
+ put_device(&wwandev->dev);
+}
+EXPORT_SYMBOL_GPL(wwan_put_debugfs_dir);
#endif
/* This function allocates and registers a new WWAN device OR if a WWAN device
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 8b18246ad999..daa4e6106aac 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -424,14 +424,12 @@ static bool xennet_tx_buf_gc(struct netfront_queue *queue)
queue->tx_link[id] = TX_LINK_NONE;
skb = queue->tx_skbs[id];
queue->tx_skbs[id] = NULL;
- if (unlikely(gnttab_query_foreign_access(
- queue->grant_tx_ref[id]) != 0)) {
+ if (unlikely(!gnttab_end_foreign_access_ref(
+ queue->grant_tx_ref[id], GNTMAP_readonly))) {
dev_alert(dev,
"Grant still in use by backend domain\n");
goto err;
}
- gnttab_end_foreign_access_ref(
- queue->grant_tx_ref[id], GNTMAP_readonly);
gnttab_release_grant_reference(
&queue->gref_tx_head, queue->grant_tx_ref[id]);
queue->grant_tx_ref[id] = GRANT_INVALID_REF;
@@ -842,6 +840,28 @@ static int xennet_close(struct net_device *dev)
return 0;
}
+static void xennet_destroy_queues(struct netfront_info *info)
+{
+ unsigned int i;
+
+ for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
+ struct netfront_queue *queue = &info->queues[i];
+
+ if (netif_running(info->netdev))
+ napi_disable(&queue->napi);
+ netif_napi_del(&queue->napi);
+ }
+
+ kfree(info->queues);
+ info->queues = NULL;
+}
+
+static void xennet_uninit(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ xennet_destroy_queues(np);
+}
+
static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
{
unsigned long flags;
@@ -968,7 +988,6 @@ static int xennet_get_responses(struct netfront_queue *queue,
struct device *dev = &queue->info->netdev->dev;
struct bpf_prog *xdp_prog;
struct xdp_buff xdp;
- unsigned long ret;
int slots = 1;
int err = 0;
u32 verdict;
@@ -1010,8 +1029,13 @@ static int xennet_get_responses(struct netfront_queue *queue,
goto next;
}
- ret = gnttab_end_foreign_access_ref(ref, 0);
- BUG_ON(!ret);
+ if (!gnttab_end_foreign_access_ref(ref, 0)) {
+ dev_alert(dev,
+ "Grant still in use by backend domain\n");
+ queue->info->broken = true;
+ dev_alert(dev, "Disabled for further use\n");
+ return -EINVAL;
+ }
gnttab_release_grant_reference(&queue->gref_rx_head, ref);
@@ -1232,6 +1256,10 @@ static int xennet_poll(struct napi_struct *napi, int budget)
&need_xdp_flush);
if (unlikely(err)) {
+ if (queue->info->broken) {
+ spin_unlock(&queue->rx_lock);
+ return 0;
+ }
err:
while ((skb = __skb_dequeue(&tmpq)))
__skb_queue_tail(&errq, skb);
@@ -1611,6 +1639,7 @@ static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
static const struct net_device_ops xennet_netdev_ops = {
+ .ndo_uninit = xennet_uninit,
.ndo_open = xennet_open,
.ndo_stop = xennet_close,
.ndo_start_xmit = xennet_start_xmit,
@@ -1895,7 +1924,7 @@ static int setup_netfront(struct xenbus_device *dev,
struct netfront_queue *queue, unsigned int feature_split_evtchn)
{
struct xen_netif_tx_sring *txs;
- struct xen_netif_rx_sring *rxs;
+ struct xen_netif_rx_sring *rxs = NULL;
grant_ref_t gref;
int err;
@@ -1915,21 +1944,21 @@ static int setup_netfront(struct xenbus_device *dev,
err = xenbus_grant_ring(dev, txs, 1, &gref);
if (err < 0)
- goto grant_tx_ring_fail;
+ goto fail;
queue->tx_ring_ref = gref;
rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
if (!rxs) {
err = -ENOMEM;
xenbus_dev_fatal(dev, err, "allocating rx ring page");
- goto alloc_rx_ring_fail;
+ goto fail;
}
SHARED_RING_INIT(rxs);
FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
err = xenbus_grant_ring(dev, rxs, 1, &gref);
if (err < 0)
- goto grant_rx_ring_fail;
+ goto fail;
queue->rx_ring_ref = gref;
if (feature_split_evtchn)
@@ -1942,22 +1971,28 @@ static int setup_netfront(struct xenbus_device *dev,
err = setup_netfront_single(queue);
if (err)
- goto alloc_evtchn_fail;
+ goto fail;
return 0;
/* If we fail to setup netfront, it is safe to just revoke access to
* granted pages because backend is not accessing it at this point.
*/
-alloc_evtchn_fail:
- gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
-grant_rx_ring_fail:
- free_page((unsigned long)rxs);
-alloc_rx_ring_fail:
- gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
-grant_tx_ring_fail:
- free_page((unsigned long)txs);
-fail:
+ fail:
+ if (queue->rx_ring_ref != GRANT_INVALID_REF) {
+ gnttab_end_foreign_access(queue->rx_ring_ref, 0,
+ (unsigned long)rxs);
+ queue->rx_ring_ref = GRANT_INVALID_REF;
+ } else {
+ free_page((unsigned long)rxs);
+ }
+ if (queue->tx_ring_ref != GRANT_INVALID_REF) {
+ gnttab_end_foreign_access(queue->tx_ring_ref, 0,
+ (unsigned long)txs);
+ queue->tx_ring_ref = GRANT_INVALID_REF;
+ } else {
+ free_page((unsigned long)txs);
+ }
return err;
}
@@ -2103,22 +2138,6 @@ error:
return err;
}
-static void xennet_destroy_queues(struct netfront_info *info)
-{
- unsigned int i;
-
- for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
- struct netfront_queue *queue = &info->queues[i];
-
- if (netif_running(info->netdev))
- napi_disable(&queue->napi);
- netif_napi_del(&queue->napi);
- }
-
- kfree(info->queues);
- info->queues = NULL;
-}
-
static int xennet_create_page_pool(struct netfront_queue *queue)
diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c
index 5b833a9a83f8..a38e2fcdfd39 100644
--- a/drivers/nfc/nfcmrvl/spi.c
+++ b/drivers/nfc/nfcmrvl/spi.c
@@ -174,12 +174,11 @@ static int nfcmrvl_spi_probe(struct spi_device *spi)
return 0;
}
-static int nfcmrvl_spi_remove(struct spi_device *spi)
+static void nfcmrvl_spi_remove(struct spi_device *spi)
{
struct nfcmrvl_spi_drv_data *drv_data = spi_get_drvdata(spi);
nfcmrvl_nci_unregister_dev(drv_data->priv);
- return 0;
}
static const struct of_device_id of_nfcmrvl_spi_match[] __maybe_unused = {
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index d7db1a0e6be1..00d8ea6dcb5d 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -1612,7 +1612,9 @@ free_nfc_dev:
nfc_digital_free_device(dev->nfc_digital_dev);
error:
+ usb_kill_urb(dev->in_urb);
usb_free_urb(dev->in_urb);
+ usb_kill_urb(dev->out_urb);
usb_free_urb(dev->out_urb);
usb_put_dev(dev->udev);
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
index 4e723992e74c..169eacc0a32a 100644
--- a/drivers/nfc/st-nci/spi.c
+++ b/drivers/nfc/st-nci/spi.c
@@ -263,13 +263,11 @@ static int st_nci_spi_probe(struct spi_device *dev)
return r;
}
-static int st_nci_spi_remove(struct spi_device *dev)
+static void st_nci_spi_remove(struct spi_device *dev)
{
struct st_nci_spi_phy *phy = spi_get_drvdata(dev);
ndlc_remove(phy->ndlc);
-
- return 0;
}
static struct spi_device_id st_nci_spi_id_table[] = {
diff --git a/drivers/nfc/st-nci/vendor_cmds.c b/drivers/nfc/st-nci/vendor_cmds.c
index 30d2912d1a05..6335d7afca24 100644
--- a/drivers/nfc/st-nci/vendor_cmds.c
+++ b/drivers/nfc/st-nci/vendor_cmds.c
@@ -456,7 +456,7 @@ static const struct nfc_vendor_cmd st_nci_vendor_cmds[] = {
int st_nci_vendor_cmds_init(struct nci_dev *ndev)
{
- return nfc_set_vendor_cmds(ndev->nfc_dev, st_nci_vendor_cmds,
+ return nci_set_vendor_cmds(ndev, st_nci_vendor_cmds,
sizeof(st_nci_vendor_cmds));
}
EXPORT_SYMBOL(st_nci_vendor_cmds_init);
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index a86b5edfc7ce..42dc0e5eb161 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -570,8 +570,7 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
if (phy->powered)
st21nfca_hci_i2c_disable(phy);
- if (phy->pending_skb)
- kfree_skb(phy->pending_skb);
+ kfree_skb(phy->pending_skb);
return 0;
}
diff --git a/drivers/nfc/st21nfca/vendor_cmds.c b/drivers/nfc/st21nfca/vendor_cmds.c
index 74882866dbaf..bfa418d4c6b0 100644
--- a/drivers/nfc/st21nfca/vendor_cmds.c
+++ b/drivers/nfc/st21nfca/vendor_cmds.c
@@ -358,7 +358,7 @@ int st21nfca_vendor_cmds_init(struct nfc_hci_dev *hdev)
struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
init_completion(&info->vendor_info.req_completion);
- return nfc_set_vendor_cmds(hdev->ndev, st21nfca_vendor_cmds,
- sizeof(st21nfca_vendor_cmds));
+ return nfc_hci_set_vendor_cmds(hdev, st21nfca_vendor_cmds,
+ sizeof(st21nfca_vendor_cmds));
}
EXPORT_SYMBOL(st21nfca_vendor_cmds_init);
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index b23f47936473..ed704bb77226 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -1198,7 +1198,7 @@ err_disable_regulator:
return ret;
}
-static int st95hf_remove(struct spi_device *nfc_spi_dev)
+static void st95hf_remove(struct spi_device *nfc_spi_dev)
{
int result = 0;
unsigned char reset_cmd = ST95HF_COMMAND_RESET;
@@ -1236,8 +1236,6 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev)
/* disable regulator */
if (stcontext->st95hf_supply)
regulator_disable(stcontext->st95hf_supply);
-
- return 0;
}
/* Register as SPI protocol driver */
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 29ca9c328df2..21d68664fe08 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -2144,7 +2144,7 @@ err_destroy_lock:
return ret;
}
-static int trf7970a_remove(struct spi_device *spi)
+static void trf7970a_remove(struct spi_device *spi)
{
struct trf7970a *trf = spi_get_drvdata(spi);
@@ -2160,8 +2160,6 @@ static int trf7970a_remove(struct spi_device *spi)
regulator_disable(trf->regulator);
mutex_destroy(&trf->lock);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c
index fede05151f69..4081fc538ff4 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen4.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c
@@ -168,6 +168,18 @@ static enum ntb_topo gen4_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
return NTB_TOPO_NONE;
}
+static enum ntb_topo spr_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
+{
+ switch (ppd & SPR_PPD_TOPO_MASK) {
+ case SPR_PPD_TOPO_B2B_USD:
+ return NTB_TOPO_B2B_USD;
+ case SPR_PPD_TOPO_B2B_DSD:
+ return NTB_TOPO_B2B_DSD;
+ }
+
+ return NTB_TOPO_NONE;
+}
+
int gen4_init_dev(struct intel_ntb_dev *ndev)
{
struct pci_dev *pdev = ndev->ntb.pdev;
@@ -183,7 +195,10 @@ int gen4_init_dev(struct intel_ntb_dev *ndev)
}
ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET);
- ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
+ if (pdev_is_ICX(pdev))
+ ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
+ else if (pdev_is_SPR(pdev))
+ ndev->ntb.topo = spr_ppd_topo(ndev, ppd1);
dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1,
ntb_topo_string(ndev->ntb.topo));
if (ndev->ntb.topo == NTB_TOPO_NONE)
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.h b/drivers/ntb/hw/intel/ntb_hw_gen4.h
index 3fcd3fdce9ed..f91323eaf5ce 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen4.h
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.h
@@ -49,10 +49,14 @@
#define GEN4_PPD_CLEAR_TRN 0x0001
#define GEN4_PPD_LINKTRN 0x0008
#define GEN4_PPD_CONN_MASK 0x0300
+#define SPR_PPD_CONN_MASK 0x0700
#define GEN4_PPD_CONN_B2B 0x0200
#define GEN4_PPD_DEV_MASK 0x1000
#define GEN4_PPD_DEV_DSD 0x1000
#define GEN4_PPD_DEV_USD 0x0000
+#define SPR_PPD_DEV_MASK 0x4000
+#define SPR_PPD_DEV_DSD 0x4000
+#define SPR_PPD_DEV_USD 0x0000
#define GEN4_LINK_CTRL_LINK_DISABLE 0x0010
#define GEN4_SLOTSTS 0xb05a
@@ -62,6 +66,10 @@
#define GEN4_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_USD)
#define GEN4_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_DSD)
+#define SPR_PPD_TOPO_MASK (SPR_PPD_CONN_MASK | SPR_PPD_DEV_MASK)
+#define SPR_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_USD)
+#define SPR_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_DSD)
+
#define GEN4_DB_COUNT 32
#define GEN4_DB_LINK 32
#define GEN4_DB_LINK_BIT BIT_ULL(GEN4_DB_LINK)
@@ -112,4 +120,12 @@ static inline int pdev_is_ICX(struct pci_dev *pdev)
return 0;
}
+static inline int pdev_is_SPR(struct pci_dev *pdev)
+{
+ if (pdev_is_gen4(pdev) &&
+ pdev->revision > PCI_DEVICE_REVISION_ICX_MAX)
+ return 1;
+ return 0;
+}
+
#endif
diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c
index dd683cb58d09..6295e55ef85e 100644
--- a/drivers/ntb/msi.c
+++ b/drivers/ntb/msi.c
@@ -33,7 +33,6 @@ int ntb_msi_init(struct ntb_dev *ntb,
{
phys_addr_t mw_phys_addr;
resource_size_t mw_size;
- size_t struct_size;
int peer_widx;
int peers;
int ret;
@@ -43,9 +42,8 @@ int ntb_msi_init(struct ntb_dev *ntb,
if (peers <= 0)
return -EINVAL;
- struct_size = sizeof(*ntb->msi) + sizeof(*ntb->msi->peer_mws) * peers;
-
- ntb->msi = devm_kzalloc(&ntb->dev, struct_size, GFP_KERNEL);
+ ntb->msi = devm_kzalloc(&ntb->dev, struct_size(ntb->msi, peer_mws, peers),
+ GFP_KERNEL);
if (!ntb->msi)
return -ENOMEM;
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 228c33b8d1d6..0a3873833594 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -6,7 +6,6 @@
#include <linux/blkdev.h>
#include <linux/fs.h>
-#include <linux/genhd.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/nd.h>
@@ -89,10 +88,9 @@ static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
*/
cur_len = min(len, bv.bv_len);
- iobuf = kmap_atomic(bv.bv_page);
- err = ndbr->do_io(ndbr, dev_offset, iobuf + bv.bv_offset,
- cur_len, rw);
- kunmap_atomic(iobuf);
+ iobuf = bvec_kmap_local(&bv);
+ err = ndbr->do_io(ndbr, dev_offset, iobuf, cur_len, rw);
+ kunmap_local(iobuf);
if (err)
return err;
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index da3f007a1211..9613e54c7a67 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -11,7 +11,6 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/hdreg.h>
-#include <linux/genhd.h>
#include <linux/sizes.h>
#include <linux/ndctl.h>
#include <linux/fs.h>
@@ -1164,17 +1163,15 @@ static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
*/
cur_len = min(len, bv.bv_len);
- mem = kmap_atomic(bv.bv_page);
+ mem = bvec_kmap_local(&bv);
if (rw)
- ret = arena_write_bytes(arena, meta_nsoff,
- mem + bv.bv_offset, cur_len,
+ ret = arena_write_bytes(arena, meta_nsoff, mem, cur_len,
NVDIMM_IO_ATOMIC);
else
- ret = arena_read_bytes(arena, meta_nsoff,
- mem + bv.bv_offset, cur_len,
+ ret = arena_read_bytes(arena, meta_nsoff, mem, cur_len,
NVDIMM_IO_ATOMIC);
- kunmap_atomic(mem);
+ kunmap_local(mem);
if (ret)
return ret;
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 8b52e5144f08..e5a58520d398 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -4,7 +4,6 @@
*/
#include <linux/blkdev.h>
#include <linux/device.h>
-#include <linux/genhd.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/fs.h>
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 9dc7f3edd42b..5bbe31b08581 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -11,7 +11,6 @@
#include <linux/blkdev.h>
#include <linux/fcntl.h>
#include <linux/async.h>
-#include <linux/genhd.h>
#include <linux/ndctl.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c
index 10351d5b49fa..c6a648fd8744 100644
--- a/drivers/nvdimm/nd_virtio.c
+++ b/drivers/nvdimm/nd_virtio.c
@@ -105,12 +105,12 @@ int async_pmem_flush(struct nd_region *nd_region, struct bio *bio)
* parent bio. Otherwise directly call nd_region flush.
*/
if (bio && bio->bi_iter.bi_sector != -1) {
- struct bio *child = bio_alloc(GFP_ATOMIC, 0);
+ struct bio *child = bio_alloc(bio->bi_bdev, 0, REQ_PREFLUSH,
+ GFP_ATOMIC);
if (!child)
return -ENOMEM;
- bio_copy_dev(child, bio);
- child->bi_opf = REQ_PREFLUSH;
+ bio_clone_blkg_association(child, bio);
child->bi_iter.bi_sector = -1;
bio_chain(child, bio);
submit_bio(child);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 58eda16f5c53..c31e184bfa45 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -5,7 +5,6 @@
#include <linux/memremap.h>
#include <linux/blkdev.h>
#include <linux/device.h>
-#include <linux/genhd.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/fs.h>
diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h
index 59cfe13ea8a8..1f51a2361429 100644
--- a/drivers/nvdimm/pmem.h
+++ b/drivers/nvdimm/pmem.h
@@ -3,6 +3,7 @@
#define __NVDIMM_PMEM_H__
#include <linux/page-flags.h>
#include <linux/badblocks.h>
+#include <linux/memremap.h>
#include <linux/types.h>
#include <linux/pfn_t.h>
#include <linux/fs.h>
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index dc0450ca23a3..d6d056963c06 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -24,6 +24,14 @@ config NVME_MULTIPATH
/dev/nvmeXnY device will show up for each NVMe namespace,
even if it is accessible through multiple controllers.
+config NVME_VERBOSE_ERRORS
+ bool "NVMe verbose error reporting"
+ depends on NVME_CORE
+ help
+ This option enables verbose reporting for NVMe errors. The
+ error translation table will grow the kernel image size by
+ about 4 KB.
+
config NVME_HWMON
bool "NVMe hardware monitoring"
depends on (NVME_CORE=y && HWMON=y) || (NVME_CORE=m && HWMON)
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index dfaacd472e5d..476c5c988496 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
-nvme-core-y := core.o ioctl.o
+nvme-core-y := core.o ioctl.o constants.o
nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
new file mode 100644
index 000000000000..7d49eb34b348
--- /dev/null
+++ b/drivers/nvme/host/constants.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVM Express device driver verbose errors
+ * Copyright (c) 2022, Oracle and/or its affiliates
+ */
+
+#include <linux/blkdev.h>
+#include "nvme.h"
+
+#ifdef CONFIG_NVME_VERBOSE_ERRORS
+static const char * const nvme_ops[] = {
+ [nvme_cmd_flush] = "Flush",
+ [nvme_cmd_write] = "Write",
+ [nvme_cmd_read] = "Read",
+ [nvme_cmd_write_uncor] = "Write Uncorrectable",
+ [nvme_cmd_compare] = "Compare",
+ [nvme_cmd_write_zeroes] = "Write Zeros",
+ [nvme_cmd_dsm] = "Dataset Management",
+ [nvme_cmd_verify] = "Verify",
+ [nvme_cmd_resv_register] = "Reservation Register",
+ [nvme_cmd_resv_report] = "Reservation Report",
+ [nvme_cmd_resv_acquire] = "Reservation Acquire",
+ [nvme_cmd_resv_release] = "Reservation Release",
+ [nvme_cmd_zone_mgmt_send] = "Zone Management Send",
+ [nvme_cmd_zone_mgmt_recv] = "Zone Management Receive",
+ [nvme_cmd_zone_append] = "Zone Management Append",
+};
+
+static const char * const nvme_admin_ops[] = {
+ [nvme_admin_delete_sq] = "Delete SQ",
+ [nvme_admin_create_sq] = "Create SQ",
+ [nvme_admin_get_log_page] = "Get Log Page",
+ [nvme_admin_delete_cq] = "Delete CQ",
+ [nvme_admin_create_cq] = "Create CQ",
+ [nvme_admin_identify] = "Identify",
+ [nvme_admin_abort_cmd] = "Abort Command",
+ [nvme_admin_set_features] = "Set Features",
+ [nvme_admin_get_features] = "Get Features",
+ [nvme_admin_async_event] = "Async Event",
+ [nvme_admin_ns_mgmt] = "Namespace Management",
+ [nvme_admin_activate_fw] = "Activate Firmware",
+ [nvme_admin_download_fw] = "Download Firmware",
+ [nvme_admin_dev_self_test] = "Device Self Test",
+ [nvme_admin_ns_attach] = "Namespace Attach",
+ [nvme_admin_keep_alive] = "Keep Alive",
+ [nvme_admin_directive_send] = "Directive Send",
+ [nvme_admin_directive_recv] = "Directive Receive",
+ [nvme_admin_virtual_mgmt] = "Virtual Management",
+ [nvme_admin_nvme_mi_send] = "NVMe Send MI",
+ [nvme_admin_nvme_mi_recv] = "NVMe Receive MI",
+ [nvme_admin_dbbuf] = "Doorbell Buffer Config",
+ [nvme_admin_format_nvm] = "Format NVM",
+ [nvme_admin_security_send] = "Security Send",
+ [nvme_admin_security_recv] = "Security Receive",
+ [nvme_admin_sanitize_nvm] = "Sanitize NVM",
+ [nvme_admin_get_lba_status] = "Get LBA Status",
+};
+
+static const char * const nvme_statuses[] = {
+ [NVME_SC_SUCCESS] = "Success",
+ [NVME_SC_INVALID_OPCODE] = "Invalid Command Opcode",
+ [NVME_SC_INVALID_FIELD] = "Invalid Field in Command",
+ [NVME_SC_CMDID_CONFLICT] = "Command ID Conflict",
+ [NVME_SC_DATA_XFER_ERROR] = "Data Transfer Error",
+ [NVME_SC_POWER_LOSS] = "Commands Aborted due to Power Loss Notification",
+ [NVME_SC_INTERNAL] = "Internal Error",
+ [NVME_SC_ABORT_REQ] = "Command Abort Requested",
+ [NVME_SC_ABORT_QUEUE] = "Command Aborted due to SQ Deletion",
+ [NVME_SC_FUSED_FAIL] = "Command Aborted due to Failed Fused Command",
+ [NVME_SC_FUSED_MISSING] = "Command Aborted due to Missing Fused Command",
+ [NVME_SC_INVALID_NS] = "Invalid Namespace or Format",
+ [NVME_SC_CMD_SEQ_ERROR] = "Command Sequence Error",
+ [NVME_SC_SGL_INVALID_LAST] = "Invalid SGL Segment Descriptor",
+ [NVME_SC_SGL_INVALID_COUNT] = "Invalid Number of SGL Descriptors",
+ [NVME_SC_SGL_INVALID_DATA] = "Data SGL Length Invalid",
+ [NVME_SC_SGL_INVALID_METADATA] = "Metadata SGL Length Invalid",
+ [NVME_SC_SGL_INVALID_TYPE] = "SGL Descriptor Type Invalid",
+ [NVME_SC_CMB_INVALID_USE] = "Invalid Use of Controller Memory Buffer",
+ [NVME_SC_PRP_INVALID_OFFSET] = "PRP Offset Invalid",
+ [NVME_SC_ATOMIC_WU_EXCEEDED] = "Atomic Write Unit Exceeded",
+ [NVME_SC_OP_DENIED] = "Operation Denied",
+ [NVME_SC_SGL_INVALID_OFFSET] = "SGL Offset Invalid",
+ [NVME_SC_RESERVED] = "Reserved",
+ [NVME_SC_HOST_ID_INCONSIST] = "Host Identifier Inconsistent Format",
+ [NVME_SC_KA_TIMEOUT_EXPIRED] = "Keep Alive Timeout Expired",
+ [NVME_SC_KA_TIMEOUT_INVALID] = "Keep Alive Timeout Invalid",
+ [NVME_SC_ABORTED_PREEMPT_ABORT] = "Command Aborted due to Preempt and Abort",
+ [NVME_SC_SANITIZE_FAILED] = "Sanitize Failed",
+ [NVME_SC_SANITIZE_IN_PROGRESS] = "Sanitize In Progress",
+ [NVME_SC_SGL_INVALID_GRANULARITY] = "SGL Data Block Granularity Invalid",
+ [NVME_SC_CMD_NOT_SUP_CMB_QUEUE] = "Command Not Supported for Queue in CMB",
+ [NVME_SC_NS_WRITE_PROTECTED] = "Namespace is Write Protected",
+ [NVME_SC_CMD_INTERRUPTED] = "Command Interrupted",
+ [NVME_SC_TRANSIENT_TR_ERR] = "Transient Transport Error",
+ [NVME_SC_INVALID_IO_CMD_SET] = "Invalid IO Command Set",
+ [NVME_SC_LBA_RANGE] = "LBA Out of Range",
+ [NVME_SC_CAP_EXCEEDED] = "Capacity Exceeded",
+ [NVME_SC_NS_NOT_READY] = "Namespace Not Ready",
+ [NVME_SC_RESERVATION_CONFLICT] = "Reservation Conflict",
+ [NVME_SC_FORMAT_IN_PROGRESS] = "Format In Progress",
+ [NVME_SC_CQ_INVALID] = "Completion Queue Invalid",
+ [NVME_SC_QID_INVALID] = "Invalid Queue Identifier",
+ [NVME_SC_QUEUE_SIZE] = "Invalid Queue Size",
+ [NVME_SC_ABORT_LIMIT] = "Abort Command Limit Exceeded",
+ [NVME_SC_ABORT_MISSING] = "Reserved", /* XXX */
+ [NVME_SC_ASYNC_LIMIT] = "Asynchronous Event Request Limit Exceeded",
+ [NVME_SC_FIRMWARE_SLOT] = "Invalid Firmware Slot",
+ [NVME_SC_FIRMWARE_IMAGE] = "Invalid Firmware Image",
+ [NVME_SC_INVALID_VECTOR] = "Invalid Interrupt Vector",
+ [NVME_SC_INVALID_LOG_PAGE] = "Invalid Log Page",
+ [NVME_SC_INVALID_FORMAT] = "Invalid Format",
+ [NVME_SC_FW_NEEDS_CONV_RESET] = "Firmware Activation Requires Conventional Reset",
+ [NVME_SC_INVALID_QUEUE] = "Invalid Queue Deletion",
+ [NVME_SC_FEATURE_NOT_SAVEABLE] = "Feature Identifier Not Saveable",
+ [NVME_SC_FEATURE_NOT_CHANGEABLE] = "Feature Not Changeable",
+ [NVME_SC_FEATURE_NOT_PER_NS] = "Feature Not Namespace Specific",
+ [NVME_SC_FW_NEEDS_SUBSYS_RESET] = "Firmware Activation Requires NVM Subsystem Reset",
+ [NVME_SC_FW_NEEDS_RESET] = "Firmware Activation Requires Reset",
+ [NVME_SC_FW_NEEDS_MAX_TIME] = "Firmware Activation Requires Maximum Time Violation",
+ [NVME_SC_FW_ACTIVATE_PROHIBITED] = "Firmware Activation Prohibited",
+ [NVME_SC_OVERLAPPING_RANGE] = "Overlapping Range",
+ [NVME_SC_NS_INSUFFICIENT_CAP] = "Namespace Insufficient Capacity",
+ [NVME_SC_NS_ID_UNAVAILABLE] = "Namespace Identifier Unavailable",
+ [NVME_SC_NS_ALREADY_ATTACHED] = "Namespace Already Attached",
+ [NVME_SC_NS_IS_PRIVATE] = "Namespace Is Private",
+ [NVME_SC_NS_NOT_ATTACHED] = "Namespace Not Attached",
+ [NVME_SC_THIN_PROV_NOT_SUPP] = "Thin Provisioning Not Supported",
+ [NVME_SC_CTRL_LIST_INVALID] = "Controller List Invalid",
+ [NVME_SC_SELT_TEST_IN_PROGRESS] = "Device Self-test In Progress",
+ [NVME_SC_BP_WRITE_PROHIBITED] = "Boot Partition Write Prohibited",
+ [NVME_SC_CTRL_ID_INVALID] = "Invalid Controller Identifier",
+ [NVME_SC_SEC_CTRL_STATE_INVALID] = "Invalid Secondary Controller State",
+ [NVME_SC_CTRL_RES_NUM_INVALID] = "Invalid Number of Controller Resources",
+ [NVME_SC_RES_ID_INVALID] = "Invalid Resource Identifier",
+ [NVME_SC_PMR_SAN_PROHIBITED] = "Sanitize Prohibited",
+ [NVME_SC_ANA_GROUP_ID_INVALID] = "ANA Group Identifier Invalid",
+ [NVME_SC_ANA_ATTACH_FAILED] = "ANA Attach Failed",
+ [NVME_SC_BAD_ATTRIBUTES] = "Conflicting Attributes",
+ [NVME_SC_INVALID_PI] = "Invalid Protection Information",
+ [NVME_SC_READ_ONLY] = "Attempted Write to Read Only Range",
+ [NVME_SC_ONCS_NOT_SUPPORTED] = "ONCS Not Supported",
+ [NVME_SC_ZONE_BOUNDARY_ERROR] = "Zoned Boundary Error",
+ [NVME_SC_ZONE_FULL] = "Zone Is Full",
+ [NVME_SC_ZONE_READ_ONLY] = "Zone Is Read Only",
+ [NVME_SC_ZONE_OFFLINE] = "Zone Is Offline",
+ [NVME_SC_ZONE_INVALID_WRITE] = "Zone Invalid Write",
+ [NVME_SC_ZONE_TOO_MANY_ACTIVE] = "Too Many Active Zones",
+ [NVME_SC_ZONE_TOO_MANY_OPEN] = "Too Many Open Zones",
+ [NVME_SC_ZONE_INVALID_TRANSITION] = "Invalid Zone State Transition",
+ [NVME_SC_WRITE_FAULT] = "Write Fault",
+ [NVME_SC_READ_ERROR] = "Unrecovered Read Error",
+ [NVME_SC_GUARD_CHECK] = "End-to-end Guard Check Error",
+ [NVME_SC_APPTAG_CHECK] = "End-to-end Application Tag Check Error",
+ [NVME_SC_REFTAG_CHECK] = "End-to-end Reference Tag Check Error",
+ [NVME_SC_COMPARE_FAILED] = "Compare Failure",
+ [NVME_SC_ACCESS_DENIED] = "Access Denied",
+ [NVME_SC_UNWRITTEN_BLOCK] = "Deallocated or Unwritten Logical Block",
+ [NVME_SC_ANA_PERSISTENT_LOSS] = "Asymmetric Access Persistent Loss",
+ [NVME_SC_ANA_INACCESSIBLE] = "Asymmetric Access Inaccessible",
+ [NVME_SC_ANA_TRANSITION] = "Asymmetric Access Transition",
+ [NVME_SC_HOST_PATH_ERROR] = "Host Pathing Error",
+};
+
+const unsigned char *nvme_get_error_status_str(u16 status)
+{
+ status &= 0x7ff;
+ if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status])
+ return nvme_statuses[status & 0x7ff];
+ return "Unknown";
+}
+
+const unsigned char *nvme_get_opcode_str(u8 opcode)
+{
+ if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode])
+ return nvme_ops[opcode];
+ return "Unknown";
+}
+
+const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+{
+ if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode])
+ return nvme_admin_ops[opcode];
+ return "Unknown";
+}
+#endif /* CONFIG_NVME_VERBOSE_ERRORS */
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index fd4720d37cc0..cd6eac8e3dd6 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -299,6 +299,37 @@ static void nvme_retry_req(struct request *req)
blk_mq_delay_kick_requeue_list(req->q, delay);
}
+static void nvme_log_error(struct request *req)
+{
+ struct nvme_ns *ns = req->q->queuedata;
+ struct nvme_request *nr = nvme_req(req);
+
+ if (ns) {
+ pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
+ ns->disk ? ns->disk->disk_name : "?",
+ nvme_get_opcode_str(nr->cmd->common.opcode),
+ nr->cmd->common.opcode,
+ (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)),
+ (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift,
+ nvme_get_error_status_str(nr->status),
+ nr->status >> 8 & 7, /* Status Code Type */
+ nr->status & 0xff, /* Status Code */
+ nr->status & NVME_SC_MORE ? "MORE " : "",
+ nr->status & NVME_SC_DNR ? "DNR " : "");
+ return;
+ }
+
+ pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n",
+ dev_name(nr->ctrl->device),
+ nvme_get_admin_opcode_str(nr->cmd->common.opcode),
+ nr->cmd->common.opcode,
+ nvme_get_error_status_str(nr->status),
+ nr->status >> 8 & 7, /* Status Code Type */
+ nr->status & 0xff, /* Status Code */
+ nr->status & NVME_SC_MORE ? "MORE " : "",
+ nr->status & NVME_SC_DNR ? "DNR " : "");
+}
+
enum nvme_disposition {
COMPLETE,
RETRY,
@@ -339,6 +370,8 @@ static inline void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
+ if (unlikely(nvme_req(req)->status != NVME_SC_SUCCESS))
+ nvme_log_error(req);
nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
blk_mq_end_request(req, status);
@@ -562,7 +595,7 @@ static void nvme_free_ns_head(struct kref *ref)
container_of(ref, struct nvme_ns_head, ref);
nvme_mpath_remove_disk(head);
- ida_simple_remove(&head->subsys->ns_ida, head->instance);
+ ida_free(&head->subsys->ns_ida, head->instance);
cleanup_srcu_struct(&head->srcu);
nvme_put_subsystem(head->subsys);
kfree(head);
@@ -607,13 +640,8 @@ static inline void nvme_clear_nvme_request(struct request *req)
req->rq_flags |= RQF_DONTPREP;
}
-static inline unsigned int nvme_req_op(struct nvme_command *cmd)
-{
- return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
-}
-
-static inline void nvme_init_request(struct request *req,
- struct nvme_command *cmd)
+/* initialize a passthrough request */
+void nvme_init_request(struct request *req, struct nvme_command *cmd)
{
if (req->q->queuedata)
req->timeout = NVME_IO_TIMEOUT;
@@ -629,30 +657,7 @@ static inline void nvme_init_request(struct request *req,
nvme_clear_nvme_request(req);
memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
}
-
-struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags)
-{
- struct request *req;
-
- req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
- if (!IS_ERR(req))
- nvme_init_request(req, cmd);
- return req;
-}
-EXPORT_SYMBOL_GPL(nvme_alloc_request);
-
-static struct request *nvme_alloc_request_qid(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
-{
- struct request *req;
-
- req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
- qid ? qid - 1 : 0);
- if (!IS_ERR(req))
- nvme_init_request(req, cmd);
- return req;
-}
+EXPORT_SYMBOL_GPL(nvme_init_request);
/*
* For something we're not in a state to send to the device the default action
@@ -758,6 +763,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
static int nvme_configure_directives(struct nvme_ctrl *ctrl)
{
struct streams_directive_params s;
+ u16 nssa;
int ret;
if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
@@ -773,14 +779,16 @@ static int nvme_configure_directives(struct nvme_ctrl *ctrl)
if (ret)
goto out_disable_stream;
- ctrl->nssa = le16_to_cpu(s.nssa);
- if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
+ nssa = le16_to_cpu(s.nssa);
+ if (nssa < BLK_MAX_WRITE_HINTS - 1) {
dev_info(ctrl->device, "too few streams (%u) available\n",
- ctrl->nssa);
+ nssa);
+ /* this condition is not an error: streams are optional */
+ ret = 0;
goto out_disable_stream;
}
- ctrl->nr_streams = min_t(u16, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
+ ctrl->nr_streams = min_t(u16, nssa, BLK_MAX_WRITE_HINTS - 1);
dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
return 0;
@@ -1050,8 +1058,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
* >0: nvme controller's cqe status response
* <0: kernel error in lieu of controller response
*/
-static int nvme_execute_rq(struct gendisk *disk, struct request *rq,
- bool at_head)
+static int nvme_execute_rq(struct request *rq, bool at_head)
{
blk_status_t status;
@@ -1076,11 +1083,14 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
int ret;
if (qid == NVME_QID_ANY)
- req = nvme_alloc_request(q, cmd, flags);
+ req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
else
- req = nvme_alloc_request_qid(q, cmd, flags, qid);
+ req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
+ qid ? qid - 1 : 0);
+
if (IS_ERR(req))
return PTR_ERR(req);
+ nvme_init_request(req, cmd);
if (timeout)
req->timeout = timeout;
@@ -1091,7 +1101,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
- ret = nvme_execute_rq(NULL, req, at_head);
+ ret = nvme_execute_rq(req, at_head);
if (result && ret >= 0)
*result = nvme_req(req)->result;
out:
@@ -1207,12 +1217,11 @@ int nvme_execute_passthru_rq(struct request *rq)
struct nvme_command *cmd = nvme_req(rq)->cmd;
struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
struct nvme_ns *ns = rq->q->queuedata;
- struct gendisk *disk = ns ? ns->disk : NULL;
u32 effects;
int ret;
effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
- ret = nvme_execute_rq(disk, rq, false);
+ ret = nvme_execute_rq(rq, false);
if (effects) /* nothing to be done for zero cmd effects */
nvme_passthru_end(ctrl, effects, cmd, ret);
@@ -1271,14 +1280,15 @@ static void nvme_keep_alive_work(struct work_struct *work)
return;
}
- rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd),
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (IS_ERR(rq)) {
/* allocation failure, reset the controller */
dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
nvme_reset_ctrl(ctrl);
return;
}
+ nvme_init_request(rq, &ctrl->ka_cmd);
rq->timeout = ctrl->kato * HZ;
rq->end_io_data = ctrl;
@@ -1683,13 +1693,6 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
}
-static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
-{
- return !uuid_is_null(&ids->uuid) ||
- memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
- memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
-}
-
static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
{
return uuid_equal(&a->uuid, &b->uuid) &&
@@ -1977,7 +1980,7 @@ static char nvme_pr_type(enum pr_type type)
default:
return 0;
}
-};
+}
static int nvme_send_ns_head_pr_command(struct block_device *bdev,
struct nvme_command *c, u8 data[16])
@@ -2565,7 +2568,7 @@ static void nvme_release_subsystem(struct device *dev)
container_of(dev, struct nvme_subsystem, dev);
if (subsys->instance >= 0)
- ida_simple_remove(&nvme_instance_ida, subsys->instance);
+ ida_free(&nvme_instance_ida, subsys->instance);
kfree(subsys);
}
@@ -2990,6 +2993,9 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->max_namespaces = le32_to_cpu(id->mnan);
ctrl->ctratt = le32_to_cpu(id->ctratt);
+ ctrl->cntrltype = id->cntrltype;
+ ctrl->dctype = id->dctype;
+
if (id->rtd3e) {
/* us -> s */
u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
@@ -3523,6 +3529,40 @@ static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
+static ssize_t cntrltype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ static const char * const type[] = {
+ [NVME_CTRL_IO] = "io\n",
+ [NVME_CTRL_DISC] = "discovery\n",
+ [NVME_CTRL_ADMIN] = "admin\n",
+ };
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
+ return sysfs_emit(buf, "reserved\n");
+
+ return sysfs_emit(buf, type[ctrl->cntrltype]);
+}
+static DEVICE_ATTR_RO(cntrltype);
+
+static ssize_t dctype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ static const char * const type[] = {
+ [NVME_DCTYPE_NOT_REPORTED] = "none\n",
+ [NVME_DCTYPE_DDC] = "ddc\n",
+ [NVME_DCTYPE_CDC] = "cdc\n",
+ };
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
+ return sysfs_emit(buf, "reserved\n");
+
+ return sysfs_emit(buf, type[ctrl->dctype]);
+}
+static DEVICE_ATTR_RO(dctype);
+
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr,
@@ -3544,6 +3584,8 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reconnect_delay.attr,
&dev_attr_fast_io_fail_tmo.attr,
&dev_attr_kato.attr,
+ &dev_attr_cntrltype.attr,
+ &dev_attr_dctype.attr,
NULL
};
@@ -3598,16 +3640,24 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
return NULL;
}
-static int __nvme_check_ids(struct nvme_subsystem *subsys,
- struct nvme_ns_head *new)
+static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys,
+ struct nvme_ns_ids *ids)
{
+ bool has_uuid = !uuid_is_null(&ids->uuid);
+ bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid));
+ bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
struct nvme_ns_head *h;
lockdep_assert_held(&subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry) {
- if (nvme_ns_ids_valid(&new->ids) &&
- nvme_ns_ids_equal(&new->ids, &h->ids))
+ if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid))
+ return -EINVAL;
+ if (has_nguid &&
+ memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0)
+ return -EINVAL;
+ if (has_eui64 &&
+ memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0)
return -EINVAL;
}
@@ -3616,7 +3666,7 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys,
static void nvme_cdev_rel(struct device *dev)
{
- ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
+ ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
}
void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
@@ -3630,7 +3680,7 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
{
int minor, ret;
- minor = ida_simple_get(&nvme_ns_chr_minor_ida, 0, 0, GFP_KERNEL);
+ minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL);
if (minor < 0)
return minor;
cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
@@ -3693,7 +3743,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
head = kzalloc(size, GFP_KERNEL);
if (!head)
goto out;
- ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
+ ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL);
if (ret < 0)
goto out_free_head;
head->instance = ret;
@@ -3706,13 +3756,6 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
head->ids = *ids;
kref_init(&head->ref);
- ret = __nvme_check_ids(ctrl->subsys, head);
- if (ret) {
- dev_err(ctrl->device,
- "duplicate IDs for nsid %d\n", nsid);
- goto out_cleanup_srcu;
- }
-
if (head->ids.csi) {
ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
if (ret)
@@ -3732,7 +3775,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
out_cleanup_srcu:
cleanup_srcu_struct(&head->srcu);
out_ida_remove:
- ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
+ ida_free(&ctrl->subsys->ns_ida, head->instance);
out_free_head:
kfree(head);
out:
@@ -3741,16 +3784,56 @@ out:
return ERR_PTR(ret);
}
+static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
+ struct nvme_ns_ids *ids)
+{
+ struct nvme_subsystem *s;
+ int ret = 0;
+
+ /*
+ * Note that this check is racy as we try to avoid holding the global
+ * lock over the whole ns_head creation. But it is only intended as
+ * a sanity check anyway.
+ */
+ mutex_lock(&nvme_subsystems_lock);
+ list_for_each_entry(s, &nvme_subsystems, entry) {
+ if (s == this)
+ continue;
+ mutex_lock(&s->lock);
+ ret = nvme_subsys_check_duplicate_ids(s, ids);
+ mutex_unlock(&s->lock);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&nvme_subsystems_lock);
+
+ return ret;
+}
+
static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
struct nvme_ns_ids *ids, bool is_shared)
{
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_ns_head *head = NULL;
- int ret = 0;
+ int ret;
+
+ ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
+ if (ret) {
+ dev_err(ctrl->device,
+ "globally duplicate IDs for nsid %d\n", nsid);
+ return ret;
+ }
mutex_lock(&ctrl->subsys->lock);
head = nvme_find_ns_head(ctrl->subsys, nsid);
if (!head) {
+ ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
+ if (ret) {
+ dev_err(ctrl->device,
+ "duplicate IDs in subsystem for nsid %d\n",
+ nsid);
+ goto out_unlock;
+ }
head = nvme_alloc_ns_head(ctrl, nsid, ids);
if (IS_ERR(head)) {
ret = PTR_ERR(head);
@@ -3770,6 +3853,14 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
nsid);
goto out_put_ns_head;
}
+
+ if (!multipath && !list_empty(&head->list)) {
+ dev_warn(ctrl->device,
+ "Found shared namespace %d, but multipathing not supported.\n",
+ nsid);
+ dev_warn_once(ctrl->device,
+ "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
+ }
}
list_add_tail_rcu(&ns->siblings, &head->list);
@@ -3858,13 +3949,27 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
goto out_cleanup_disk;
/*
- * Without the multipath code enabled, multiple controller per
- * subsystems are visible as devices and thus we cannot use the
- * subsystem instance.
+ * If multipathing is enabled, the device name for all disks and not
+ * just those that represent shared namespaces needs to be based on the
+ * subsystem instance. Using the controller instance for private
+ * namespaces could lead to naming collisions between shared and private
+ * namespaces if they don't use a common numbering scheme.
+ *
+ * If multipathing is not enabled, disk names must use the controller
+ * instance as shared namespaces will show up as multiple block
+ * devices.
*/
- if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags))
+ if (ns->head->disk) {
+ sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
+ ctrl->instance, ns->head->instance);
+ disk->flags |= GENHD_FL_HIDDEN;
+ } else if (multipath) {
+ sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance,
+ ns->head->instance);
+ } else {
sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
ns->head->instance);
+ }
if (nvme_update_ns_info(ns, id))
goto out_unlink_ns;
@@ -4229,6 +4334,13 @@ static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
return ret;
}
+static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata)
+{
+ char *envp[2] = { envdata, NULL };
+
+ kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
+}
+
static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
{
char *envp[2] = { NULL, NULL };
@@ -4403,6 +4515,8 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
nvme_queue_scan(ctrl);
nvme_start_queues(ctrl);
}
+
+ nvme_change_uevent(ctrl, "NVME_EVENT=connected");
}
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
@@ -4436,7 +4550,7 @@ static void nvme_free_ctrl(struct device *dev)
struct nvme_subsystem *subsys = ctrl->subsys;
if (!subsys || ctrl->instance != subsys->instance)
- ida_simple_remove(&nvme_instance_ida, ctrl->instance);
+ ida_free(&nvme_instance_ida, ctrl->instance);
nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
@@ -4495,7 +4609,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
goto out;
}
- ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL);
if (ret < 0)
goto out;
ctrl->instance = ret;
@@ -4536,7 +4650,7 @@ out_free_name:
nvme_put_ctrl(ctrl);
kfree_const(ctrl->device->kobj.name);
out_release_instance:
- ida_simple_remove(&nvme_instance_ida, ctrl->instance);
+ ida_free(&nvme_instance_ida, ctrl->instance);
out:
if (ctrl->discard_page)
__free_page(ctrl->discard_page);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index f79a66d4e22c..ee79a6d639b4 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -144,11 +144,10 @@ EXPORT_SYMBOL_GPL(nvmf_get_address);
*/
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
{
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
union nvme_result res;
int ret;
- memset(&cmd, 0, sizeof(cmd));
cmd.prop_get.opcode = nvme_fabrics_command;
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off);
@@ -272,7 +271,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
int err_sctype = errval & ~NVME_SC_DNR;
switch (err_sctype) {
- case (NVME_SC_CONNECT_INVALID_PARAM):
+ case NVME_SC_CONNECT_INVALID_PARAM:
if (offset >> 16) {
char *inv_data = "Connect Invalid Data Parameter";
@@ -873,7 +872,7 @@ static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
unsigned int required_opts)
{
if ((opts->mask & required_opts) != required_opts) {
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
if ((opt_tokens[i].token & required_opts) &&
@@ -923,7 +922,7 @@ static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
unsigned int allowed_opts)
{
if (opts->mask & ~allowed_opts) {
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
if ((opt_tokens[i].token & opts->mask) &&
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 71b3108c22f0..080f85f4105f 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -259,7 +259,7 @@ nvme_fc_free_lport(struct kref *ref)
complete(&nvme_fc_unload_proceed);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
- ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
+ ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
ida_destroy(&lport->endp_cnt);
put_device(lport->dev);
@@ -399,7 +399,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
goto out_reghost_failed;
}
- idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
+ idx = ida_alloc(&nvme_fc_local_port_cnt, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
goto out_fail_kfree;
@@ -439,7 +439,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
return 0;
out_ida_put:
- ida_simple_remove(&nvme_fc_local_port_cnt, idx);
+ ida_free(&nvme_fc_local_port_cnt, idx);
out_fail_kfree:
kfree(newrec);
out_reghost_failed:
@@ -535,7 +535,7 @@ nvme_fc_free_rport(struct kref *ref)
spin_unlock_irqrestore(&nvme_fc_lock, flags);
WARN_ON(!list_empty(&rport->disc_list));
- ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
+ ida_free(&lport->endp_cnt, rport->remoteport.port_num);
kfree(rport);
@@ -713,7 +713,7 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
goto out_lport_put;
}
- idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
+ idx = ida_alloc(&lport->endp_cnt, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
goto out_kfree_rport;
@@ -2393,7 +2393,7 @@ nvme_fc_ctrl_free(struct kref *ref)
put_device(ctrl->dev);
nvme_fc_rport_put(ctrl->rport);
- ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
+ ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
if (ctrl->ctrl.opts)
nvmf_free_options(ctrl->ctrl.opts);
kfree(ctrl);
@@ -2916,11 +2916,9 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
ctrl->ctrl.tagset = &ctrl->tag_set;
- ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
- if (IS_ERR(ctrl->ctrl.connect_q)) {
- ret = PTR_ERR(ctrl->ctrl.connect_q);
+ ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
+ if (ret)
goto out_free_tag_set;
- }
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
@@ -3472,7 +3470,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto out_fail;
}
- idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
+ idx = ida_alloc(&nvme_fc_ctrl_cnt, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
goto out_free_ctrl;
@@ -3635,7 +3633,7 @@ out_free_queues:
kfree(ctrl->queues);
out_free_ida:
put_device(ctrl->dev);
- ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
+ ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
out_free_ctrl:
kfree(ctrl);
out_fail:
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 22314962842d..554566371ffa 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -56,7 +56,7 @@ out:
static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, void __user *ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- u32 meta_seed, u64 *result, unsigned timeout)
+ u32 meta_seed, u64 *result, unsigned timeout, bool vec)
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
@@ -66,17 +66,32 @@ static int nvme_submit_user_cmd(struct request_queue *q,
void *meta = NULL;
int ret;
- req = nvme_alloc_request(q, cmd, 0);
+ req = blk_mq_alloc_request(q, nvme_req_op(cmd), 0);
if (IS_ERR(req))
return PTR_ERR(req);
+ nvme_init_request(req, cmd);
if (timeout)
req->timeout = timeout;
nvme_req(req)->flags |= NVME_REQ_USERCMD;
if (ubuffer && bufflen) {
- ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
+ if (!vec)
+ ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
GFP_KERNEL);
+ else {
+ struct iovec fast_iov[UIO_FASTIOV];
+ struct iovec *iov = fast_iov;
+ struct iov_iter iter;
+
+ ret = import_iovec(rq_data_dir(req), ubuffer, bufflen,
+ UIO_FASTIOV, &iov, &iter);
+ if (ret < 0)
+ goto out;
+ ret = blk_rq_map_user_iov(q, req, NULL, &iter,
+ GFP_KERNEL);
+ kfree(iov);
+ }
if (ret)
goto out;
bio = req->bio;
@@ -170,7 +185,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
return nvme_submit_user_cmd(ns->queue, &c,
nvme_to_user_ptr(io.addr), length,
- metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
+ metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
+ false);
}
static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
@@ -224,7 +240,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
- 0, &result, timeout);
+ 0, &result, timeout, false);
if (status >= 0) {
if (put_user(result, &ucmd->result))
@@ -235,7 +251,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
}
static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
- struct nvme_passthru_cmd64 __user *ucmd)
+ struct nvme_passthru_cmd64 __user *ucmd, bool vec)
{
struct nvme_passthru_cmd64 cmd;
struct nvme_command c;
@@ -270,7 +286,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
- 0, &cmd.result, timeout);
+ 0, &cmd.result, timeout, vec);
if (status >= 0) {
if (put_user(cmd.result, &ucmd->result))
@@ -296,7 +312,7 @@ static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ctrl, NULL, argp);
case NVME_IOCTL_ADMIN64_CMD:
- return nvme_user_cmd64(ctrl, NULL, argp);
+ return nvme_user_cmd64(ctrl, NULL, argp, false);
default:
return sed_ioctl(ctrl->opal_dev, cmd, argp);
}
@@ -340,7 +356,9 @@ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
case NVME_IOCTL_SUBMIT_IO:
return nvme_submit_io(ns, argp);
case NVME_IOCTL_IO64_CMD:
- return nvme_user_cmd64(ns->ctrl, ns, argp);
+ return nvme_user_cmd64(ns->ctrl, ns, argp, false);
+ case NVME_IOCTL_IO64_CMD_VEC:
+ return nvme_user_cmd64(ns->ctrl, ns, argp, true);
default:
return -ENOTTY;
}
@@ -480,7 +498,7 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ctrl, NULL, argp);
case NVME_IOCTL_ADMIN64_CMD:
- return nvme_user_cmd64(ctrl, NULL, argp);
+ return nvme_user_cmd64(ctrl, NULL, argp, false);
case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET:
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index ff775235534c..1b31f19e1053 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -5,10 +5,11 @@
#include <linux/backing-dev.h>
#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
#include <trace/events/block.h>
#include "nvme.h"
-static bool multipath = true;
+bool multipath = true;
module_param(multipath, bool, 0444);
MODULE_PARM_DESC(multipath,
"turn on native support for multiple controllers per subsystem");
@@ -79,28 +80,6 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
blk_freeze_queue_start(h->disk->queue);
}
-/*
- * If multipathing is enabled we need to always use the subsystem instance
- * number for numbering our devices to avoid conflicts between subsystems that
- * have multiple controllers and thus use the multipath-aware subsystem node
- * and those that have a single controller and use the controller node
- * directly.
- */
-bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags)
-{
- if (!multipath)
- return false;
- if (!ns->head->disk) {
- sprintf(disk_name, "nvme%dn%d", ns->ctrl->subsys->instance,
- ns->head->instance);
- return true;
- }
- sprintf(disk_name, "nvme%dc%dn%d", ns->ctrl->subsys->instance,
- ns->ctrl->instance, ns->head->instance);
- *flags = GENHD_FL_HIDDEN;
- return true;
-}
-
void nvme_failover_req(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
@@ -386,8 +365,7 @@ static void nvme_ns_head_submit_bio(struct bio *bio)
} else {
dev_warn_ratelimited(dev, "no available path - failing I/O\n");
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
+ bio_io_error(bio);
}
srcu_read_unlock(&head->srcu, srcu_idx);
@@ -898,7 +876,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
if (ana_log_size > ctrl->ana_log_size) {
nvme_mpath_stop(ctrl);
nvme_mpath_uninit(ctrl);
- ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
+ ctrl->ana_log_buf = kvmalloc(ana_log_size, GFP_KERNEL);
if (!ctrl->ana_log_buf)
return -ENOMEM;
}
@@ -915,7 +893,7 @@ out_uninit:
void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{
- kfree(ctrl->ana_log_buf);
+ kvfree(ctrl->ana_log_buf);
ctrl->ana_log_buf = NULL;
ctrl->ana_log_size = 0;
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index a162f6c6da6e..1ea908d43e17 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -280,7 +280,6 @@ struct nvme_ctrl {
u16 crdt[3];
u16 oncs;
u16 oacs;
- u16 nssa;
u16 nr_streams;
u16 sqsize;
u32 max_namespaces;
@@ -349,6 +348,9 @@ struct nvme_ctrl {
unsigned long discard_page_busy;
struct nvme_fault_inject fault_inject;
+
+ enum nvme_ctrl_type cntrltype;
+ enum nvme_dctype dctype;
};
enum nvme_iopolicy {
@@ -696,9 +698,13 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl);
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
void nvme_start_freeze(struct nvme_ctrl *ctrl);
+static inline unsigned int nvme_req_op(struct nvme_command *cmd)
+{
+ return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
+}
+
#define NVME_QID_ANY -1
-struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags);
+void nvme_init_request(struct request *req, struct nvme_command *cmd);
void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
@@ -768,7 +774,6 @@ void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
-bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags);
void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
@@ -791,20 +796,17 @@ static inline void nvme_trace_bio_complete(struct request *req)
trace_block_bio_complete(ns->head->disk->queue, req->bio);
}
+extern bool multipath;
extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state;
extern struct device_attribute subsys_attr_iopolicy;
#else
+#define multipath false
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
{
return false;
}
-static inline bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name,
- int *flags)
-{
- return false;
-}
static inline void nvme_failover_req(struct request *req)
{
}
@@ -894,6 +896,14 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
}
#endif
+static inline int nvme_ctrl_init_connect_q(struct nvme_ctrl *ctrl)
+{
+ ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
+ if (IS_ERR(ctrl->connect_q))
+ return PTR_ERR(ctrl->connect_q);
+ return 0;
+}
+
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
{
return dev_to_disk(dev)->private_data;
@@ -930,4 +940,23 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
}
+#ifdef CONFIG_NVME_VERBOSE_ERRORS
+const unsigned char *nvme_get_error_status_str(u16 status);
+const unsigned char *nvme_get_opcode_str(u8 opcode);
+const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
+#else /* CONFIG_NVME_VERBOSE_ERRORS */
+static inline const unsigned char *nvme_get_error_status_str(u16 status)
+{
+ return "I/O Error";
+}
+static inline const unsigned char *nvme_get_opcode_str(u8 opcode)
+{
+ return "I/O Cmd";
+}
+static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+{
+ return "Admin Cmd";
+}
+#endif /* CONFIG_NVME_VERBOSE_ERRORS */
+
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 6a99ed680915..2e98ac3f3ad6 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/memremap.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -424,8 +425,9 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
return 0;
}
-static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
- unsigned int hctx_idx, unsigned int numa_node)
+static int nvme_pci_init_request(struct blk_mq_tag_set *set,
+ struct request *req, unsigned int hctx_idx,
+ unsigned int numa_node)
{
struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -1428,12 +1430,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
"I/O %d QID %d timeout, aborting\n",
req->tag, nvmeq->qid);
- abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
- BLK_MQ_REQ_NOWAIT);
+ abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
+ BLK_MQ_REQ_NOWAIT);
if (IS_ERR(abort_req)) {
atomic_inc(&dev->ctrl.abort_limit);
return BLK_EH_RESET_TIMER;
}
+ nvme_init_request(abort_req, &cmd);
abort_req->end_io_data = NULL;
blk_execute_rq_nowait(abort_req, false, abort_endio);
@@ -1722,7 +1725,7 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
.queue_rq = nvme_queue_rq,
.complete = nvme_pci_complete_rq,
.init_hctx = nvme_admin_init_hctx,
- .init_request = nvme_init_request,
+ .init_request = nvme_pci_init_request,
.timeout = nvme_timeout,
};
@@ -1732,7 +1735,7 @@ static const struct blk_mq_ops nvme_mq_ops = {
.complete = nvme_pci_complete_rq,
.commit_rqs = nvme_commit_rqs,
.init_hctx = nvme_init_hctx,
- .init_request = nvme_init_request,
+ .init_request = nvme_pci_init_request,
.map_queues = nvme_pci_map_queues,
.timeout = nvme_timeout,
.poll = nvme_poll,
@@ -2475,9 +2478,10 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
cmd.delete_queue.opcode = opcode;
cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
- req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT);
+ req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT);
if (IS_ERR(req))
return PTR_ERR(req);
+ nvme_init_request(req, &cmd);
req->end_io_data = nvmeq;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 9c55e4be8a39..d9f19d901313 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -978,11 +978,9 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
goto out_free_io_queues;
}
- ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
- if (IS_ERR(ctrl->ctrl.connect_q)) {
- ret = PTR_ERR(ctrl->ctrl.connect_q);
+ ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
+ if (ret)
goto out_free_tag_set;
- }
}
ret = nvme_rdma_start_io_queues(ctrl);
@@ -1283,6 +1281,22 @@ static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
return ib_post_send(queue->qp, &wr, NULL);
}
+static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+
+ if (blk_integrity_rq(rq)) {
+ ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
+ req->metadata_sgl->nents, rq_dma_dir(rq));
+ sg_free_table_chained(&req->metadata_sgl->sg_table,
+ NVME_INLINE_METADATA_SG_CNT);
+ }
+
+ ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
+ rq_dma_dir(rq));
+ sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
+}
+
static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
struct request *rq)
{
@@ -1294,13 +1308,6 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
if (!blk_rq_nr_phys_segments(rq))
return;
- if (blk_integrity_rq(rq)) {
- ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
- req->metadata_sgl->nents, rq_dma_dir(rq));
- sg_free_table_chained(&req->metadata_sgl->sg_table,
- NVME_INLINE_METADATA_SG_CNT);
- }
-
if (req->use_sig_mr)
pool = &queue->qp->sig_mrs;
@@ -1309,9 +1316,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
req->mr = NULL;
}
- ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
- rq_dma_dir(rq));
- sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
+ nvme_rdma_dma_unmap_req(ibdev, rq);
}
static int nvme_rdma_set_sg_null(struct nvme_command *c)
@@ -1522,22 +1527,11 @@ mr_put:
return -EINVAL;
}
-static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
- struct request *rq, struct nvme_command *c)
+static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
+ int *count, int *pi_count)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
- struct nvme_rdma_device *dev = queue->device;
- struct ib_device *ibdev = dev->dev;
- int pi_count = 0;
- int count, ret;
-
- req->num_sge = 1;
- refcount_set(&req->ref, 2); /* send and recv completions */
-
- c->common.flags |= NVME_CMD_SGL_METABUF;
-
- if (!blk_rq_nr_phys_segments(rq))
- return nvme_rdma_set_sg_null(c);
+ int ret;
req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
@@ -1549,9 +1543,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
req->data_sgl.sg_table.sgl);
- count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
- req->data_sgl.nents, rq_dma_dir(rq));
- if (unlikely(count <= 0)) {
+ *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
+ req->data_sgl.nents, rq_dma_dir(rq));
+ if (unlikely(*count <= 0)) {
ret = -EIO;
goto out_free_table;
}
@@ -1570,16 +1564,50 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
rq->bio, req->metadata_sgl->sg_table.sgl);
- pi_count = ib_dma_map_sg(ibdev,
- req->metadata_sgl->sg_table.sgl,
- req->metadata_sgl->nents,
- rq_dma_dir(rq));
- if (unlikely(pi_count <= 0)) {
+ *pi_count = ib_dma_map_sg(ibdev,
+ req->metadata_sgl->sg_table.sgl,
+ req->metadata_sgl->nents,
+ rq_dma_dir(rq));
+ if (unlikely(*pi_count <= 0)) {
ret = -EIO;
goto out_free_pi_table;
}
}
+ return 0;
+
+out_free_pi_table:
+ sg_free_table_chained(&req->metadata_sgl->sg_table,
+ NVME_INLINE_METADATA_SG_CNT);
+out_unmap_sg:
+ ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
+ rq_dma_dir(rq));
+out_free_table:
+ sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
+ return ret;
+}
+
+static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
+ struct request *rq, struct nvme_command *c)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_device *ibdev = dev->dev;
+ int pi_count = 0;
+ int count, ret;
+
+ req->num_sge = 1;
+ refcount_set(&req->ref, 2); /* send and recv completions */
+
+ c->common.flags |= NVME_CMD_SGL_METABUF;
+
+ if (!blk_rq_nr_phys_segments(rq))
+ return nvme_rdma_set_sg_null(c);
+
+ ret = nvme_rdma_dma_map_req(ibdev, rq, &count, &pi_count);
+ if (unlikely(ret))
+ return ret;
+
if (req->use_sig_mr) {
ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
goto out;
@@ -1603,23 +1631,12 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
ret = nvme_rdma_map_sg_fr(queue, req, c, count);
out:
if (unlikely(ret))
- goto out_unmap_pi_sg;
+ goto out_dma_unmap_req;
return 0;
-out_unmap_pi_sg:
- if (blk_integrity_rq(rq))
- ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
- req->metadata_sgl->nents, rq_dma_dir(rq));
-out_free_pi_table:
- if (blk_integrity_rq(rq))
- sg_free_table_chained(&req->metadata_sgl->sg_table,
- NVME_INLINE_METADATA_SG_CNT);
-out_unmap_sg:
- ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
- rq_dma_dir(rq));
-out_free_table:
- sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
+out_dma_unmap_req:
+ nvme_rdma_dma_unmap_req(ibdev, rq);
return ret;
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 65e00c64a588..ad3a2bf2f1e9 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -30,6 +30,44 @@ static int so_priority;
module_param(so_priority, int, 0644);
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/* lockdep can detect a circular dependency of the form
+ * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
+ * because dependencies are tracked for both nvme-tcp and user contexts. Using
+ * a separate class prevents lockdep from conflating nvme-tcp socket use with
+ * user-space socket API use.
+ */
+static struct lock_class_key nvme_tcp_sk_key[2];
+static struct lock_class_key nvme_tcp_slock_key[2];
+
+static void nvme_tcp_reclassify_socket(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
+ return;
+
+ switch (sk->sk_family) {
+ case AF_INET:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
+ &nvme_tcp_slock_key[0],
+ "sk_lock-AF_INET-NVME",
+ &nvme_tcp_sk_key[0]);
+ break;
+ case AF_INET6:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
+ &nvme_tcp_slock_key[1],
+ "sk_lock-AF_INET6-NVME",
+ &nvme_tcp_sk_key[1]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+#else
+static void nvme_tcp_reclassify_socket(struct socket *sock) { }
+#endif
+
enum nvme_tcp_send_state {
NVME_TCP_SEND_CMD_PDU = 0,
NVME_TCP_SEND_H2C_PDU,
@@ -1469,6 +1507,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
goto err_destroy_mutex;
}
+ nvme_tcp_reclassify_socket(queue->sock);
+
/* Single syn retry */
tcp_sock_set_syncnt(queue->sock->sk, 1);
@@ -1716,7 +1756,7 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
{
- int i, ret = 0;
+ int i, ret;
for (i = 1; i < ctrl->queue_count; i++) {
ret = nvme_tcp_start_queue(ctrl, i);
@@ -1756,8 +1796,7 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
int i, ret;
for (i = 1; i < ctrl->queue_count; i++) {
- ret = nvme_tcp_alloc_queue(ctrl, i,
- ctrl->sqsize + 1);
+ ret = nvme_tcp_alloc_queue(ctrl, i, ctrl->sqsize + 1);
if (ret)
goto out_free_queues;
}
@@ -1867,11 +1906,9 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
goto out_free_io_queues;
}
- ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
- if (IS_ERR(ctrl->connect_q)) {
- ret = PTR_ERR(ctrl->connect_q);
+ ret = nvme_ctrl_init_connect_q(ctrl);
+ if (ret)
goto out_free_tag_set;
- }
}
ret = nvme_tcp_start_io_queues(ctrl);
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 6fb24746de06..46d0dab686dd 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -511,7 +511,11 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
goto done;
}
- nvmet_ns_revalidate(req->ns);
+ if (nvmet_ns_revalidate(req->ns)) {
+ mutex_lock(&req->ns->subsys->lock);
+ nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
+ mutex_unlock(&req->ns->subsys->lock);
+ }
/*
* nuse = ncap = nsze isn't always true, but we have no way to find
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 091a0ca16361..8fedd1e052fe 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -60,10 +60,11 @@ static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
if (nvmet_addr_family[i].type == adrfam)
- return sprintf(page, "%s\n", nvmet_addr_family[i].name);
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ nvmet_addr_family[i].name);
}
- return sprintf(page, "\n");
+ return snprintf(page, PAGE_SIZE, "\n");
}
static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
@@ -93,10 +94,9 @@ CONFIGFS_ATTR(nvmet_, addr_adrfam);
static ssize_t nvmet_addr_portid_show(struct config_item *item,
char *page)
{
- struct nvmet_port *port = to_nvmet_port(item);
+ __le16 portid = to_nvmet_port(item)->disc_addr.portid;
- return snprintf(page, PAGE_SIZE, "%d\n",
- le16_to_cpu(port->disc_addr.portid));
+ return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
}
static ssize_t nvmet_addr_portid_store(struct config_item *item,
@@ -124,8 +124,7 @@ static ssize_t nvmet_addr_traddr_show(struct config_item *item,
{
struct nvmet_port *port = to_nvmet_port(item);
- return snprintf(page, PAGE_SIZE, "%s\n",
- port->disc_addr.traddr);
+ return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
}
static ssize_t nvmet_addr_traddr_store(struct config_item *item,
@@ -162,10 +161,11 @@ static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
if (treq == nvmet_addr_treq[i].type)
- return sprintf(page, "%s\n", nvmet_addr_treq[i].name);
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ nvmet_addr_treq[i].name);
}
- return sprintf(page, "\n");
+ return snprintf(page, PAGE_SIZE, "\n");
}
static ssize_t nvmet_addr_treq_store(struct config_item *item,
@@ -199,8 +199,7 @@ static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
{
struct nvmet_port *port = to_nvmet_port(item);
- return snprintf(page, PAGE_SIZE, "%s\n",
- port->disc_addr.trsvcid);
+ return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
}
static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
@@ -284,7 +283,8 @@ static ssize_t nvmet_addr_trtype_show(struct config_item *item,
for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
if (port->disc_addr.trtype == nvmet_transport[i].type)
- return sprintf(page, "%s\n", nvmet_transport[i].name);
+ return snprintf(page, PAGE_SIZE,
+ "%s\n", nvmet_transport[i].name);
}
return sprintf(page, "\n");
@@ -586,7 +586,8 @@ static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
mutex_unlock(&ns->subsys->lock);
return -EINVAL;
}
- nvmet_ns_revalidate(ns);
+ if (nvmet_ns_revalidate(ns))
+ nvmet_ns_changed(ns->subsys, ns->nsid);
mutex_unlock(&ns->subsys->lock);
return count;
}
@@ -1233,44 +1234,6 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
}
CONFIGFS_ATTR(nvmet_subsys_, attr_model);
-static ssize_t nvmet_subsys_attr_discovery_nqn_show(struct config_item *item,
- char *page)
-{
- return snprintf(page, PAGE_SIZE, "%s\n",
- nvmet_disc_subsys->subsysnqn);
-}
-
-static ssize_t nvmet_subsys_attr_discovery_nqn_store(struct config_item *item,
- const char *page, size_t count)
-{
- struct nvmet_subsys *subsys = to_subsys(item);
- char *subsysnqn;
- int len;
-
- len = strcspn(page, "\n");
- if (!len)
- return -EINVAL;
-
- subsysnqn = kmemdup_nul(page, len, GFP_KERNEL);
- if (!subsysnqn)
- return -ENOMEM;
-
- /*
- * The discovery NQN must be different from subsystem NQN.
- */
- if (!strcmp(subsysnqn, subsys->subsysnqn)) {
- kfree(subsysnqn);
- return -EBUSY;
- }
- down_write(&nvmet_config_sem);
- kfree(nvmet_disc_subsys->subsysnqn);
- nvmet_disc_subsys->subsysnqn = subsysnqn;
- up_write(&nvmet_config_sem);
-
- return count;
-}
-CONFIGFS_ATTR(nvmet_subsys_, attr_discovery_nqn);
-
#ifdef CONFIG_BLK_DEV_INTEGRITY
static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
char *page)
@@ -1300,7 +1263,6 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_cntlid_min,
&nvmet_subsys_attr_attr_cntlid_max,
&nvmet_subsys_attr_attr_model,
- &nvmet_subsys_attr_attr_discovery_nqn,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_subsys_attr_attr_pi_enable,
#endif
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 5119c687de68..64c2d2f3e25c 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -531,7 +531,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
ns->nsid);
}
-void nvmet_ns_revalidate(struct nvmet_ns *ns)
+bool nvmet_ns_revalidate(struct nvmet_ns *ns)
{
loff_t oldsize = ns->size;
@@ -540,8 +540,7 @@ void nvmet_ns_revalidate(struct nvmet_ns *ns)
else
nvmet_file_ns_revalidate(ns);
- if (oldsize != ns->size)
- nvmet_ns_changed(ns->subsys, ns->nsid);
+ return oldsize != ns->size;
}
int nvmet_ns_enable(struct nvmet_ns *ns)
@@ -1400,7 +1399,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (subsys->cntlid_min > subsys->cntlid_max)
goto out_free_sqs;
- ret = ida_simple_get(&cntlid_ida,
+ ret = ida_alloc_range(&cntlid_ida,
subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL);
if (ret < 0) {
@@ -1459,7 +1458,7 @@ static void nvmet_ctrl_free(struct kref *ref)
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work);
- ida_simple_remove(&cntlid_ida, ctrl->cntlid);
+ ida_free(&cntlid_ida, ctrl->cntlid);
nvmet_async_events_free(ctrl);
kfree(ctrl->sqs);
@@ -1493,8 +1492,7 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
if (!port)
return NULL;
- if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn) ||
- !strcmp(nvmet_disc_subsys->subsysnqn, subsysnqn)) {
+ if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
return NULL;
return nvmet_disc_subsys;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 22b5108168a6..de90001fc5c4 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1115,7 +1115,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
if (!assoc)
return NULL;
- idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
+ idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL);
if (idx < 0)
goto out_free_assoc;
@@ -1157,7 +1157,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
out_put:
nvmet_fc_tgtport_put(tgtport);
out_ida:
- ida_simple_remove(&tgtport->assoc_cnt, idx);
+ ida_free(&tgtport->assoc_cnt, idx);
out_free_assoc:
kfree(assoc);
return NULL;
@@ -1183,7 +1183,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
/* if pending Rcv Disconnect Association LS, send rsp now */
if (oldls)
nvmet_fc_xmt_ls_rsp(tgtport, oldls);
- ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
+ ida_free(&tgtport->assoc_cnt, assoc->a_id);
dev_info(tgtport->dev,
"{%d:%d} Association freed\n",
tgtport->fc_target_port.port_num, assoc->a_id);
@@ -1341,7 +1341,7 @@ nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
}
/**
- * nvme_fc_register_targetport - transport entry point called by an
+ * nvmet_fc_register_targetport - transport entry point called by an
* LLDD to register the existence of a local
* NVME subystem FC port.
* @pinfo: pointer to information about the port to be registered
@@ -1383,7 +1383,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
goto out_regtgt_failed;
}
- idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
+ idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
goto out_fail_kfree;
@@ -1433,7 +1433,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
out_free_newrec:
put_device(dev);
out_ida_put:
- ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
+ ida_free(&nvmet_fc_tgtport_cnt, idx);
out_fail_kfree:
kfree(newrec);
out_regtgt_failed:
@@ -1460,7 +1460,7 @@ nvmet_fc_free_tgtport(struct kref *ref)
/* let the LLDD know we've finished tearing it down */
tgtport->ops->targetport_delete(&tgtport->fc_target_port);
- ida_simple_remove(&nvmet_fc_tgtport_cnt,
+ ida_free(&nvmet_fc_tgtport_cnt,
tgtport->fc_target_port.port_num);
ida_destroy(&tgtport->assoc_cnt);
@@ -1604,7 +1604,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
}
/**
- * nvme_fc_unregister_targetport - transport entry point called by an
+ * nvmet_fc_unregister_targetport - transport entry point called by an
* LLDD to deregister/remove a previously
* registered a local NVME subsystem FC port.
* @target_port: pointer to the (registered) target port that is to be
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 70ca9dfc1771..d886c2c59554 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/blkdev.h>
#include <linux/blk-integrity.h>
+#include <linux/memremap.h>
#include <linux/module.h>
#include "nvmet.h"
@@ -76,6 +77,14 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
{
int ret;
+ /*
+ * When buffered_io namespace attribute is enabled that means user want
+ * this block device to be used as a file, so block device can take
+ * an advantage of cache.
+ */
+ if (ns->buffered_io)
+ return -ENOTBLK;
+
ns->bdev = blkdev_get_by_path(ns->device_path,
FMODE_READ | FMODE_WRITE, NULL);
if (IS_ERR(ns->bdev)) {
@@ -267,15 +276,15 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
if (nvmet_use_inline_bvec(req)) {
bio = &req->b.inline_bio;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), op);
} else {
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt));
+ bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), op,
+ GFP_KERNEL);
}
- bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- bio->bi_opf = op;
blk_start_plug(&plug);
if (req->metadata_len)
@@ -296,10 +305,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
}
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt));
- bio_set_dev(bio, req->ns->bdev);
+ bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt),
+ op, GFP_KERNEL);
bio->bi_iter.bi_sector = sector;
- bio->bi_opf = op;
bio_chain(bio, prev);
submit_bio(prev);
@@ -328,11 +336,10 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0))
return;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
- bio_set_dev(bio, req->ns->bdev);
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), REQ_OP_WRITE | REQ_PREFLUSH);
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(bio);
}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 6be6e59d273b..6485dc8eb974 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -14,16 +14,9 @@
#define NVMET_MAX_MPOOL_BVEC 16
#define NVMET_MIN_MPOOL_OBJ 16
-int nvmet_file_ns_revalidate(struct nvmet_ns *ns)
+void nvmet_file_ns_revalidate(struct nvmet_ns *ns)
{
- struct kstat stat;
- int ret;
-
- ret = vfs_getattr(&ns->file->f_path, &stat, STATX_SIZE,
- AT_STATX_FORCE_SYNC);
- if (!ret)
- ns->size = stat.size;
- return ret;
+ ns->size = i_size_read(ns->file->f_mapping->host);
}
void nvmet_file_ns_disable(struct nvmet_ns *ns)
@@ -43,7 +36,7 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
int nvmet_file_ns_enable(struct nvmet_ns *ns)
{
int flags = O_RDWR | O_LARGEFILE;
- int ret;
+ int ret = 0;
if (!ns->buffered_io)
flags |= O_DIRECT;
@@ -57,9 +50,7 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
return ret;
}
- ret = nvmet_file_ns_revalidate(ns);
- if (ret)
- goto err;
+ nvmet_file_ns_revalidate(ns);
/*
* i_blkbits can be greater than the universally accepted upper bound,
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index eb1094254c82..23f9d6f88804 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -543,11 +543,9 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
if (ret)
goto out_destroy_queues;
- ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
- if (IS_ERR(ctrl->ctrl.connect_q)) {
- ret = PTR_ERR(ctrl->ctrl.connect_q);
+ ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
+ if (ret)
goto out_free_tagset;
- }
ret = nvme_loop_connect_io_queues(ctrl);
if (ret)
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index af193423c10b..d910c6aad4b6 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -541,8 +541,8 @@ u16 nvmet_bdev_flush(struct nvmet_req *req);
u16 nvmet_file_flush(struct nvmet_req *req);
void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
-int nvmet_file_ns_revalidate(struct nvmet_ns *ns);
-void nvmet_ns_revalidate(struct nvmet_ns *ns);
+void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
+bool nvmet_ns_revalidate(struct nvmet_ns *ns);
u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 9e5b89ae29df..a4de1e0d518b 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -206,12 +206,13 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
if (nvmet_use_inline_bvec(req)) {
bio = &req->p.inline_bio;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ bio_init(bio, NULL, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), req_op(rq));
} else {
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(req->sg_cnt));
+ bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq),
+ GFP_KERNEL);
bio->bi_end_io = bio_put;
}
- bio->bi_opf = req_op(rq);
for_each_sg(req->sg, sg, req->sg_cnt, i) {
if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
@@ -253,11 +254,12 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
timeout = nvmet_req_subsys(req)->admin_timeout;
}
- rq = nvme_alloc_request(q, req->cmd, 0);
+ rq = blk_mq_alloc_request(q, nvme_req_op(req->cmd), 0);
if (IS_ERR(rq)) {
status = NVME_SC_INTERNAL;
goto out_put_ns;
}
+ nvme_init_request(rq, req->cmd);
if (timeout)
rq->timeout = timeout;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 1deb4043e242..2446d0918a41 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1356,7 +1356,7 @@ static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
!queue->host_qid);
}
nvmet_rdma_free_rsps(queue);
- ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
+ ida_free(&nvmet_rdma_queue_ida, queue->idx);
kfree(queue);
}
@@ -1459,7 +1459,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
spin_lock_init(&queue->rsps_lock);
INIT_LIST_HEAD(&queue->queue_list);
- queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
+ queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL);
if (queue->idx < 0) {
ret = NVME_RDMA_CM_NO_RSC;
goto out_destroy_sq;
@@ -1510,7 +1510,7 @@ out_free_cmds:
out_free_responses:
nvmet_rdma_free_rsps(queue);
out_ida_remove:
- ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
+ ida_free(&nvmet_rdma_queue_ida, queue->idx);
out_destroy_sq:
nvmet_sq_destroy(&queue->nvme_sq);
out_free_queue:
@@ -1703,7 +1703,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
}
/**
- * nvme_rdma_device_removal() - Handle RDMA device removal
+ * nvmet_rdma_device_removal() - Handle RDMA device removal
* @cm_id: rdma_cm id, used for nvmet port
* @queue: nvmet rdma queue (cm id qp_context)
*
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 7c1c43ce466b..83ca577f72be 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1473,7 +1473,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
nvmet_tcp_free_cmds(queue);
if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue);
- ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
+ ida_free(&nvmet_tcp_queue_ida, queue->idx);
page = virt_to_head_page(queue->pf_cache.va);
__page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
@@ -1613,7 +1613,7 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
init_llist_head(&queue->resp_list);
INIT_LIST_HEAD(&queue->resp_send_list);
- queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL);
+ queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
if (queue->idx < 0) {
ret = queue->idx;
goto out_free_queue;
@@ -1646,7 +1646,7 @@ out_destroy_sq:
out_free_connect:
nvmet_tcp_free_cmd(&queue->connect);
out_ida_remove:
- ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
+ ida_free(&nvmet_tcp_queue_ida, queue->idx);
out_free_queue:
kfree(queue);
return ret;
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 46bc30fe85d2..e34718b09550 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -123,7 +123,11 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
goto done;
}
- nvmet_ns_revalidate(req->ns);
+ if (nvmet_ns_revalidate(req->ns)) {
+ mutex_lock(&req->ns->subsys->lock);
+ nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
+ mutex_unlock(&req->ns->subsys->lock);
+ }
zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
req->ns->blksize_shift;
id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
@@ -412,10 +416,10 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
while (sector < get_capacity(bdev->bd_disk)) {
if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) {
- bio = blk_next_bio(bio, 0, GFP_KERNEL);
- bio->bi_opf = zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC;
+ bio = blk_next_bio(bio, bdev, 0,
+ zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
+ GFP_KERNEL);
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, bdev);
/* This may take a while, so be nice to others */
cond_resched();
}
@@ -522,6 +526,7 @@ static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
{
sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
+ const unsigned int op = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
u16 status = NVME_SC_SUCCESS;
unsigned int total_len = 0;
struct scatterlist *sg;
@@ -551,14 +556,13 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
if (nvmet_use_inline_bvec(req)) {
bio = &req->z.inline_bio;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), op);
} else {
- bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
+ bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL);
}
- bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
- bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sect;
bio->bi_private = req;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 9c0fb962c22b..75caa6f5d36f 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/memblock.h>
#include <linux/kmemleak.h>
+#include <linux/cma.h>
#include "of_private.h"
@@ -116,12 +117,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
if (IS_ENABLED(CONFIG_CMA)
&& of_flat_dt_is_compatible(node, "shared-dma-pool")
&& of_get_flat_dt_prop(node, "reusable", NULL)
- && !nomap) {
- unsigned long order =
- max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
-
- align = max(align, (phys_addr_t)PAGE_SIZE << order);
- }
+ && !nomap)
+ align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
if (prop) {
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index 854d95163112..a2c3c207a04b 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -219,7 +219,7 @@ static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
if (hwirq < 0)
return -ENOSPC;
- fwspec.param[1] += hwirq;
+ fwspec.param[fwspec.param_count - 2] += hwirq;
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec);
if (ret)
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 0267977c9f17..952217572113 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -33,6 +33,49 @@ int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id)
}
EXPORT_SYMBOL_GPL(pci_iov_virtfn_devfn);
+int pci_iov_vf_id(struct pci_dev *dev)
+{
+ struct pci_dev *pf;
+
+ if (!dev->is_virtfn)
+ return -EINVAL;
+
+ pf = pci_physfn(dev);
+ return (((dev->bus->number << 8) + dev->devfn) -
+ ((pf->bus->number << 8) + pf->devfn + pf->sriov->offset)) /
+ pf->sriov->stride;
+}
+EXPORT_SYMBOL_GPL(pci_iov_vf_id);
+
+/**
+ * pci_iov_get_pf_drvdata - Return the drvdata of a PF
+ * @dev: VF pci_dev
+ * @pf_driver: Device driver required to own the PF
+ *
+ * This must be called from a context that ensures that a VF driver is attached.
+ * The value returned is invalid once the VF driver completes its remove()
+ * callback.
+ *
+ * Locking is achieved by the driver core. A VF driver cannot be probed until
+ * pci_enable_sriov() is called and pci_disable_sriov() does not return until
+ * all VF drivers have completed their remove().
+ *
+ * The PF driver must call pci_disable_sriov() before it begins to destroy the
+ * drvdata.
+ */
+void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver)
+{
+ struct pci_dev *pf_dev;
+
+ if (!dev->is_virtfn)
+ return ERR_PTR(-EINVAL);
+ pf_dev = dev->physfn;
+ if (pf_dev->driver != pf_driver)
+ return ERR_PTR(-EINVAL);
+ return pci_get_drvdata(pf_dev);
+}
+EXPORT_SYMBOL_GPL(pci_iov_get_pf_drvdata);
+
/*
* Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may
* change when NumVFs changes.
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 588588cfda48..4ceeb75fc899 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -350,7 +350,6 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
const struct pci_device_id *id)
{
int error, node, cpu;
- int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
struct drv_dev_and_id ddi = { drv, dev, id };
/*
@@ -368,17 +367,29 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
* device is probed from work_on_cpu() of the Physical device.
*/
if (node < 0 || node >= MAX_NUMNODES || !node_online(node) ||
- pci_physfn_is_probed(dev))
+ pci_physfn_is_probed(dev)) {
cpu = nr_cpu_ids;
- else
+ } else {
+ cpumask_var_t wq_domain_mask;
+
+ if (!zalloc_cpumask_var(&wq_domain_mask, GFP_KERNEL)) {
+ error = -ENOMEM;
+ goto out;
+ }
+ cpumask_and(wq_domain_mask,
+ housekeeping_cpumask(HK_TYPE_WQ),
+ housekeeping_cpumask(HK_TYPE_DOMAIN));
+
cpu = cpumask_any_and(cpumask_of_node(node),
- housekeeping_cpumask(hk_flags));
+ wq_domain_mask);
+ free_cpumask_var(wq_domain_mask);
+ }
if (cpu < nr_cpu_ids)
error = work_on_cpu(cpu, local_pci_probe, &ddi);
else
error = local_pci_probe(&ddi);
-
+out:
dev->is_probed = 0;
cpu_hotplug_enable();
return error;
@@ -596,7 +607,7 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
int error;
error = drv->suspend(pci_dev, state);
- suspend_report_result(drv->suspend, error);
+ suspend_report_result(dev, drv->suspend, error);
if (error)
return error;
@@ -775,7 +786,7 @@ static int pci_pm_suspend(struct device *dev)
int error;
error = pm->suspend(dev);
- suspend_report_result(pm->suspend, error);
+ suspend_report_result(dev, pm->suspend, error);
if (error)
return error;
@@ -821,7 +832,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
int error;
error = pm->suspend_noirq(dev);
- suspend_report_result(pm->suspend_noirq, error);
+ suspend_report_result(dev, pm->suspend_noirq, error);
if (error)
return error;
@@ -1010,7 +1021,7 @@ static int pci_pm_freeze(struct device *dev)
int error;
error = pm->freeze(dev);
- suspend_report_result(pm->freeze, error);
+ suspend_report_result(dev, pm->freeze, error);
if (error)
return error;
}
@@ -1030,7 +1041,7 @@ static int pci_pm_freeze_noirq(struct device *dev)
int error;
error = pm->freeze_noirq(dev);
- suspend_report_result(pm->freeze_noirq, error);
+ suspend_report_result(dev, pm->freeze_noirq, error);
if (error)
return error;
}
@@ -1116,7 +1127,7 @@ static int pci_pm_poweroff(struct device *dev)
int error;
error = pm->poweroff(dev);
- suspend_report_result(pm->poweroff, error);
+ suspend_report_result(dev, pm->poweroff, error);
if (error)
return error;
}
@@ -1154,7 +1165,7 @@ static int pci_pm_poweroff_noirq(struct device *dev)
int error;
error = pm->poweroff_noirq(dev);
- suspend_report_result(pm->poweroff_noirq, error);
+ suspend_report_result(dev, pm->poweroff_noirq, error);
if (error)
return error;
}
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index ab53eab635f6..2ce261cfff8e 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -210,7 +210,7 @@ config PCMCIA_PXA2XX
depends on ARM && ARCH_PXA && PCMCIA
depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
|| ARCH_PXA_PALM || TRIZEPS_PCMCIA \
- || ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2 \
+ || ARCOM_PCMCIA || ARCH_PXA_ESERIES \
|| MACH_VPAC270 || MACH_BALLOON3 || MACH_COLIBRI \
|| MACH_COLIBRI320 || MACH_H4700)
select PCMCIA_SOC_COMMON
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 2d5657cfc49c..c43267b18f55 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -56,7 +56,6 @@ pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
pxa2xx-obj-$(CONFIG_MACH_PALMTC) += pxa2xx_palmtc.o
pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
-pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o
pxa2xx-obj-$(CONFIG_MACH_VPAC270) += pxa2xx_vpac270.o
pxa2xx-obj-$(CONFIG_MACH_BALLOON3) += pxa2xx_balloon3.o
pxa2xx-obj-$(CONFIG_MACH_COLIBRI) += pxa2xx_colibri.o
diff --git a/drivers/pcmcia/pxa2xx_stargate2.c b/drivers/pcmcia/pxa2xx_stargate2.c
deleted file mode 100644
index 6efb7f814b4a..000000000000
--- a/drivers/pcmcia/pxa2xx_stargate2.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/pcmcia/pxa2xx_stargate2.c
- *
- * Stargate 2 PCMCIA specific routines.
- *
- * Created: December 6, 2005
- * Author: Ed C. Epp
- * Copyright: Intel Corp 2005
- * Jonathan Cameron <jic23@cam.ac.uk> 2009
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-
-#include <pcmcia/ss.h>
-
-#include <asm/irq.h>
-#include <asm/mach-types.h>
-
-#include "soc_common.h"
-
-#define SG2_S0_POWER_CTL 108
-#define SG2_S0_GPIO_RESET 82
-#define SG2_S0_GPIO_DETECT 53
-#define SG2_S0_GPIO_READY 81
-
-static struct gpio sg2_pcmcia_gpios[] = {
- { SG2_S0_GPIO_RESET, GPIOF_OUT_INIT_HIGH, "PCMCIA Reset" },
- { SG2_S0_POWER_CTL, GPIOF_OUT_INIT_HIGH, "PCMCIA Power Ctrl" },
-};
-
-static int sg2_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
-{
- skt->stat[SOC_STAT_CD].gpio = SG2_S0_GPIO_DETECT;
- skt->stat[SOC_STAT_CD].name = "PCMCIA0 CD";
- skt->stat[SOC_STAT_RDY].gpio = SG2_S0_GPIO_READY;
- skt->stat[SOC_STAT_RDY].name = "PCMCIA0 RDY";
- return 0;
-}
-
-static void sg2_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
- struct pcmcia_state *state)
-{
- state->bvd1 = 0; /* not available - battery detect on card */
- state->bvd2 = 0; /* not available */
- state->vs_3v = 1; /* not available - voltage detect for card */
- state->vs_Xv = 0; /* not available */
-}
-
-static int sg2_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
- const socket_state_t *state)
-{
- /* Enable card power */
- switch (state->Vcc) {
- case 0:
- /* sets power ctl register high */
- gpio_set_value(SG2_S0_POWER_CTL, 1);
- break;
- case 33:
- case 50:
- /* sets power control register low (clear) */
- gpio_set_value(SG2_S0_POWER_CTL, 0);
- msleep(100);
- break;
- default:
- pr_err("%s(): bad Vcc %u\n",
- __func__, state->Vcc);
- return -1;
- }
-
- /* reset */
- gpio_set_value(SG2_S0_GPIO_RESET, !!(state->flags & SS_RESET));
-
- return 0;
-}
-
-static struct pcmcia_low_level sg2_pcmcia_ops __initdata = {
- .owner = THIS_MODULE,
- .hw_init = sg2_pcmcia_hw_init,
- .socket_state = sg2_pcmcia_socket_state,
- .configure_socket = sg2_pcmcia_configure_socket,
- .nr = 1,
-};
-
-static struct platform_device *sg2_pcmcia_device;
-
-static int __init sg2_pcmcia_init(void)
-{
- int ret;
-
- if (!machine_is_stargate2())
- return -ENODEV;
-
- sg2_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
- if (!sg2_pcmcia_device)
- return -ENOMEM;
-
- ret = gpio_request_array(sg2_pcmcia_gpios, ARRAY_SIZE(sg2_pcmcia_gpios));
- if (ret)
- goto error_put_platform_device;
-
- ret = platform_device_add_data(sg2_pcmcia_device,
- &sg2_pcmcia_ops,
- sizeof(sg2_pcmcia_ops));
- if (ret)
- goto error_free_gpios;
-
- ret = platform_device_add(sg2_pcmcia_device);
- if (ret)
- goto error_free_gpios;
-
- return 0;
-error_free_gpios:
- gpio_free_array(sg2_pcmcia_gpios, ARRAY_SIZE(sg2_pcmcia_gpios));
-error_put_platform_device:
- platform_device_put(sg2_pcmcia_device);
-
- return ret;
-}
-
-static void __exit sg2_pcmcia_exit(void)
-{
- platform_device_unregister(sg2_pcmcia_device);
- gpio_free_array(sg2_pcmcia_gpios, ARRAY_SIZE(sg2_pcmcia_gpios));
-}
-
-fs_initcall(sg2_pcmcia_init);
-module_exit(sg2_pcmcia_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:pxa2xx-pcmcia");
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index e1a0c44bc686..d05ca6ebbb9d 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -141,11 +141,25 @@ config ARM_DMC620_PMU
config MARVELL_CN10K_TAD_PMU
tristate "Marvell CN10K LLC-TAD PMU"
- depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT)
help
Provides support for Last-Level cache Tag-and-data Units (LLC-TAD)
performance monitors on CN10K family silicons.
+config APPLE_M1_CPU_PMU
+ bool "Apple M1 CPU PMU support"
+ depends on ARM_PMU && ARCH_APPLE
+ help
+ Provides support for the non-architectural CPU PMUs present on
+ the Apple M1 SoCs and derivatives.
+
source "drivers/perf/hisilicon/Kconfig"
+config MARVELL_CN10K_DDR_PMU
+ tristate "Enable MARVELL CN10K DRAM Subsystem(DSS) PMU Support"
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ help
+ Enable perf support for Marvell DDR Performance monitoring
+ event on CN10K platform.
+
endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 2db5418d5b0a..4f43080ec54e 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -15,3 +15,5 @@ obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
obj-$(CONFIG_ARM_DMC620_PMU) += arm_dmc620_pmu.o
obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o
+obj-$(CONFIG_MARVELL_CN10K_DDR_PMU) += marvell_cn10k_ddr_pmu.o
+obj-$(CONFIG_APPLE_M1_CPU_PMU) += apple_m1_cpu_pmu.o
diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c
new file mode 100644
index 000000000000..979a7c2b4f56
--- /dev/null
+++ b/drivers/perf/apple_m1_cpu_pmu.c
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CPU PMU driver for the Apple M1 and derivatives
+ *
+ * Copyright (C) 2021 Google LLC
+ *
+ * Author: Marc Zyngier <maz@kernel.org>
+ *
+ * Most of the information used in this driver was provided by the
+ * Asahi Linux project. The rest was experimentally discovered.
+ */
+
+#include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
+
+#include <asm/apple_m1_pmu.h>
+#include <asm/irq_regs.h>
+#include <asm/perf_event.h>
+
+#define M1_PMU_NR_COUNTERS 10
+
+#define M1_PMU_CFG_EVENT GENMASK(7, 0)
+
+#define ANY_BUT_0_1 GENMASK(9, 2)
+#define ONLY_2_TO_7 GENMASK(7, 2)
+#define ONLY_2_4_6 (BIT(2) | BIT(4) | BIT(6))
+#define ONLY_5_6_7 (BIT(5) | BIT(6) | BIT(7))
+
+/*
+ * Description of the events we actually know about, as well as those with
+ * a specific counter affinity. Yes, this is a grand total of two known
+ * counters, and the rest is anybody's guess.
+ *
+ * Not all counters can count all events. Counters #0 and #1 are wired to
+ * count cycles and instructions respectively, and some events have
+ * bizarre mappings (every other counter, or even *one* counter). These
+ * restrictions equally apply to both P and E cores.
+ *
+ * It is worth noting that the PMUs attached to P and E cores are likely
+ * to be different because the underlying uarches are different. At the
+ * moment, we don't really need to distinguish between the two because we
+ * know next to nothing about the events themselves, and we already have
+ * per cpu-type PMU abstractions.
+ *
+ * If we eventually find out that the events are different across
+ * implementations, we'll have to introduce per cpu-type tables.
+ */
+enum m1_pmu_events {
+ M1_PMU_PERFCTR_UNKNOWN_01 = 0x01,
+ M1_PMU_PERFCTR_CPU_CYCLES = 0x02,
+ M1_PMU_PERFCTR_INSTRUCTIONS = 0x8c,
+ M1_PMU_PERFCTR_UNKNOWN_8d = 0x8d,
+ M1_PMU_PERFCTR_UNKNOWN_8e = 0x8e,
+ M1_PMU_PERFCTR_UNKNOWN_8f = 0x8f,
+ M1_PMU_PERFCTR_UNKNOWN_90 = 0x90,
+ M1_PMU_PERFCTR_UNKNOWN_93 = 0x93,
+ M1_PMU_PERFCTR_UNKNOWN_94 = 0x94,
+ M1_PMU_PERFCTR_UNKNOWN_95 = 0x95,
+ M1_PMU_PERFCTR_UNKNOWN_96 = 0x96,
+ M1_PMU_PERFCTR_UNKNOWN_97 = 0x97,
+ M1_PMU_PERFCTR_UNKNOWN_98 = 0x98,
+ M1_PMU_PERFCTR_UNKNOWN_99 = 0x99,
+ M1_PMU_PERFCTR_UNKNOWN_9a = 0x9a,
+ M1_PMU_PERFCTR_UNKNOWN_9b = 0x9b,
+ M1_PMU_PERFCTR_UNKNOWN_9c = 0x9c,
+ M1_PMU_PERFCTR_UNKNOWN_9f = 0x9f,
+ M1_PMU_PERFCTR_UNKNOWN_bf = 0xbf,
+ M1_PMU_PERFCTR_UNKNOWN_c0 = 0xc0,
+ M1_PMU_PERFCTR_UNKNOWN_c1 = 0xc1,
+ M1_PMU_PERFCTR_UNKNOWN_c4 = 0xc4,
+ M1_PMU_PERFCTR_UNKNOWN_c5 = 0xc5,
+ M1_PMU_PERFCTR_UNKNOWN_c6 = 0xc6,
+ M1_PMU_PERFCTR_UNKNOWN_c8 = 0xc8,
+ M1_PMU_PERFCTR_UNKNOWN_ca = 0xca,
+ M1_PMU_PERFCTR_UNKNOWN_cb = 0xcb,
+ M1_PMU_PERFCTR_UNKNOWN_f5 = 0xf5,
+ M1_PMU_PERFCTR_UNKNOWN_f6 = 0xf6,
+ M1_PMU_PERFCTR_UNKNOWN_f7 = 0xf7,
+ M1_PMU_PERFCTR_UNKNOWN_f8 = 0xf8,
+ M1_PMU_PERFCTR_UNKNOWN_fd = 0xfd,
+ M1_PMU_PERFCTR_LAST = M1_PMU_CFG_EVENT,
+
+ /*
+ * From this point onwards, these are not actual HW events,
+ * but attributes that get stored in hw->config_base.
+ */
+ M1_PMU_CFG_COUNT_USER = BIT(8),
+ M1_PMU_CFG_COUNT_KERNEL = BIT(9),
+};
+
+/*
+ * Per-event affinity table. Most events can be installed on counter
+ * 2-9, but there are a number of exceptions. Note that this table
+ * has been created experimentally, and I wouldn't be surprised if more
+ * counters had strange affinities.
+ */
+static const u16 m1_pmu_event_affinity[M1_PMU_PERFCTR_LAST + 1] = {
+ [0 ... M1_PMU_PERFCTR_LAST] = ANY_BUT_0_1,
+ [M1_PMU_PERFCTR_UNKNOWN_01] = BIT(7),
+ [M1_PMU_PERFCTR_CPU_CYCLES] = ANY_BUT_0_1 | BIT(0),
+ [M1_PMU_PERFCTR_INSTRUCTIONS] = BIT(7) | BIT(1),
+ [M1_PMU_PERFCTR_UNKNOWN_8d] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_8e] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_8f] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_90] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_93] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_94] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_95] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_96] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_97] = BIT(7),
+ [M1_PMU_PERFCTR_UNKNOWN_98] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_99] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_9a] = BIT(7),
+ [M1_PMU_PERFCTR_UNKNOWN_9b] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_9c] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_9f] = BIT(7),
+ [M1_PMU_PERFCTR_UNKNOWN_bf] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_c0] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_c1] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_c4] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_c5] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_c6] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_c8] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_ca] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_cb] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_f5] = ONLY_2_4_6,
+ [M1_PMU_PERFCTR_UNKNOWN_f6] = ONLY_2_4_6,
+ [M1_PMU_PERFCTR_UNKNOWN_f7] = ONLY_2_4_6,
+ [M1_PMU_PERFCTR_UNKNOWN_f8] = ONLY_2_TO_7,
+ [M1_PMU_PERFCTR_UNKNOWN_fd] = ONLY_2_4_6,
+};
+
+static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = M1_PMU_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = M1_PMU_PERFCTR_INSTRUCTIONS,
+ /* No idea about the rest yet */
+};
+
+/* sysfs definitions */
+static ssize_t m1_pmu_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+
+ return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
+}
+
+#define M1_PMU_EVENT_ATTR(name, config) \
+ PMU_EVENT_ATTR_ID(name, m1_pmu_events_sysfs_show, config)
+
+static struct attribute *m1_pmu_event_attrs[] = {
+ M1_PMU_EVENT_ATTR(cycles, M1_PMU_PERFCTR_CPU_CYCLES),
+ M1_PMU_EVENT_ATTR(instructions, M1_PMU_PERFCTR_INSTRUCTIONS),
+ NULL,
+};
+
+static const struct attribute_group m1_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = m1_pmu_event_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-7");
+
+static struct attribute *m1_pmu_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static const struct attribute_group m1_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = m1_pmu_format_attrs,
+};
+
+/* Low level accessors. No synchronisation. */
+#define PMU_READ_COUNTER(_idx) \
+ case _idx: return read_sysreg_s(SYS_IMP_APL_PMC## _idx ##_EL1)
+
+#define PMU_WRITE_COUNTER(_val, _idx) \
+ case _idx: \
+ write_sysreg_s(_val, SYS_IMP_APL_PMC## _idx ##_EL1); \
+ return
+
+static u64 m1_pmu_read_hw_counter(unsigned int index)
+{
+ switch (index) {
+ PMU_READ_COUNTER(0);
+ PMU_READ_COUNTER(1);
+ PMU_READ_COUNTER(2);
+ PMU_READ_COUNTER(3);
+ PMU_READ_COUNTER(4);
+ PMU_READ_COUNTER(5);
+ PMU_READ_COUNTER(6);
+ PMU_READ_COUNTER(7);
+ PMU_READ_COUNTER(8);
+ PMU_READ_COUNTER(9);
+ }
+
+ BUG();
+}
+
+static void m1_pmu_write_hw_counter(u64 val, unsigned int index)
+{
+ switch (index) {
+ PMU_WRITE_COUNTER(val, 0);
+ PMU_WRITE_COUNTER(val, 1);
+ PMU_WRITE_COUNTER(val, 2);
+ PMU_WRITE_COUNTER(val, 3);
+ PMU_WRITE_COUNTER(val, 4);
+ PMU_WRITE_COUNTER(val, 5);
+ PMU_WRITE_COUNTER(val, 6);
+ PMU_WRITE_COUNTER(val, 7);
+ PMU_WRITE_COUNTER(val, 8);
+ PMU_WRITE_COUNTER(val, 9);
+ }
+
+ BUG();
+}
+
+#define get_bit_offset(index, mask) (__ffs(mask) + (index))
+
+static void __m1_pmu_enable_counter(unsigned int index, bool en)
+{
+ u64 val, bit;
+
+ switch (index) {
+ case 0 ... 7:
+ bit = BIT(get_bit_offset(index, PMCR0_CNT_ENABLE_0_7));
+ break;
+ case 8 ... 9:
+ bit = BIT(get_bit_offset(index - 8, PMCR0_CNT_ENABLE_8_9));
+ break;
+ default:
+ BUG();
+ }
+
+ val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
+
+ if (en)
+ val |= bit;
+ else
+ val &= ~bit;
+
+ write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
+}
+
+static void m1_pmu_enable_counter(unsigned int index)
+{
+ __m1_pmu_enable_counter(index, true);
+}
+
+static void m1_pmu_disable_counter(unsigned int index)
+{
+ __m1_pmu_enable_counter(index, false);
+}
+
+static void __m1_pmu_enable_counter_interrupt(unsigned int index, bool en)
+{
+ u64 val, bit;
+
+ switch (index) {
+ case 0 ... 7:
+ bit = BIT(get_bit_offset(index, PMCR0_PMI_ENABLE_0_7));
+ break;
+ case 8 ... 9:
+ bit = BIT(get_bit_offset(index - 8, PMCR0_PMI_ENABLE_8_9));
+ break;
+ default:
+ BUG();
+ }
+
+ val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
+
+ if (en)
+ val |= bit;
+ else
+ val &= ~bit;
+
+ write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
+}
+
+static void m1_pmu_enable_counter_interrupt(unsigned int index)
+{
+ __m1_pmu_enable_counter_interrupt(index, true);
+}
+
+static void m1_pmu_disable_counter_interrupt(unsigned int index)
+{
+ __m1_pmu_enable_counter_interrupt(index, false);
+}
+
+static void m1_pmu_configure_counter(unsigned int index, u8 event,
+ bool user, bool kernel)
+{
+ u64 val, user_bit, kernel_bit;
+ int shift;
+
+ switch (index) {
+ case 0 ... 7:
+ user_bit = BIT(get_bit_offset(index, PMCR1_COUNT_A64_EL0_0_7));
+ kernel_bit = BIT(get_bit_offset(index, PMCR1_COUNT_A64_EL1_0_7));
+ break;
+ case 8 ... 9:
+ user_bit = BIT(get_bit_offset(index - 8, PMCR1_COUNT_A64_EL0_8_9));
+ kernel_bit = BIT(get_bit_offset(index - 8, PMCR1_COUNT_A64_EL1_8_9));
+ break;
+ default:
+ BUG();
+ }
+
+ val = read_sysreg_s(SYS_IMP_APL_PMCR1_EL1);
+
+ if (user)
+ val |= user_bit;
+ else
+ val &= ~user_bit;
+
+ if (kernel)
+ val |= kernel_bit;
+ else
+ val &= ~kernel_bit;
+
+ write_sysreg_s(val, SYS_IMP_APL_PMCR1_EL1);
+
+ /*
+ * Counters 0 and 1 have fixed events. For anything else,
+ * place the event at the expected location in the relevant
+ * register (PMESR0 holds the event configuration for counters
+ * 2-5, resp. PMESR1 for counters 6-9).
+ */
+ switch (index) {
+ case 0 ... 1:
+ break;
+ case 2 ... 5:
+ shift = (index - 2) * 8;
+ val = read_sysreg_s(SYS_IMP_APL_PMESR0_EL1);
+ val &= ~((u64)0xff << shift);
+ val |= (u64)event << shift;
+ write_sysreg_s(val, SYS_IMP_APL_PMESR0_EL1);
+ break;
+ case 6 ... 9:
+ shift = (index - 6) * 8;
+ val = read_sysreg_s(SYS_IMP_APL_PMESR1_EL1);
+ val &= ~((u64)0xff << shift);
+ val |= (u64)event << shift;
+ write_sysreg_s(val, SYS_IMP_APL_PMESR1_EL1);
+ break;
+ }
+}
+
+/* arm_pmu backend */
+static void m1_pmu_enable_event(struct perf_event *event)
+{
+ bool user, kernel;
+ u8 evt;
+
+ evt = event->hw.config_base & M1_PMU_CFG_EVENT;
+ user = event->hw.config_base & M1_PMU_CFG_COUNT_USER;
+ kernel = event->hw.config_base & M1_PMU_CFG_COUNT_KERNEL;
+
+ m1_pmu_disable_counter_interrupt(event->hw.idx);
+ m1_pmu_disable_counter(event->hw.idx);
+ isb();
+
+ m1_pmu_configure_counter(event->hw.idx, evt, user, kernel);
+ m1_pmu_enable_counter(event->hw.idx);
+ m1_pmu_enable_counter_interrupt(event->hw.idx);
+ isb();
+}
+
+static void m1_pmu_disable_event(struct perf_event *event)
+{
+ m1_pmu_disable_counter_interrupt(event->hw.idx);
+ m1_pmu_disable_counter(event->hw.idx);
+ isb();
+}
+
+static irqreturn_t m1_pmu_handle_irq(struct arm_pmu *cpu_pmu)
+{
+ struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
+ struct pt_regs *regs;
+ u64 overflow, state;
+ int idx;
+
+ overflow = read_sysreg_s(SYS_IMP_APL_PMSR_EL1);
+ if (!overflow) {
+ /* Spurious interrupt? */
+ state = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
+ state &= ~PMCR0_IACT;
+ write_sysreg_s(state, SYS_IMP_APL_PMCR0_EL1);
+ isb();
+ return IRQ_NONE;
+ }
+
+ cpu_pmu->stop(cpu_pmu);
+
+ regs = get_irq_regs();
+
+ for (idx = 0; idx < cpu_pmu->num_events; idx++) {
+ struct perf_event *event = cpuc->events[idx];
+ struct perf_sample_data data;
+
+ if (!event)
+ continue;
+
+ armpmu_event_update(event);
+ perf_sample_data_init(&data, 0, event->hw.last_period);
+ if (!armpmu_event_set_period(event))
+ continue;
+
+ if (perf_event_overflow(event, &data, regs))
+ m1_pmu_disable_event(event);
+ }
+
+ cpu_pmu->start(cpu_pmu);
+
+ return IRQ_HANDLED;
+}
+
+static u64 m1_pmu_read_counter(struct perf_event *event)
+{
+ return m1_pmu_read_hw_counter(event->hw.idx);
+}
+
+static void m1_pmu_write_counter(struct perf_event *event, u64 value)
+{
+ m1_pmu_write_hw_counter(value, event->hw.idx);
+ isb();
+}
+
+static int m1_pmu_get_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ unsigned long evtype = event->hw.config_base & M1_PMU_CFG_EVENT;
+ unsigned long affinity = m1_pmu_event_affinity[evtype];
+ int idx;
+
+ /*
+ * Place the event on the first free counter that can count
+ * this event.
+ *
+ * We could do a better job if we had a view of all the events
+ * counting on the PMU at any given time, and by placing the
+ * most constraining events first.
+ */
+ for_each_set_bit(idx, &affinity, M1_PMU_NR_COUNTERS) {
+ if (!test_and_set_bit(idx, cpuc->used_mask))
+ return idx;
+ }
+
+ return -EAGAIN;
+}
+
+static void m1_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
+static void __m1_pmu_set_mode(u8 mode)
+{
+ u64 val;
+
+ val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
+ val &= ~(PMCR0_IMODE | PMCR0_IACT);
+ val |= FIELD_PREP(PMCR0_IMODE, mode);
+ write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
+ isb();
+}
+
+static void m1_pmu_start(struct arm_pmu *cpu_pmu)
+{
+ __m1_pmu_set_mode(PMCR0_IMODE_FIQ);
+}
+
+static void m1_pmu_stop(struct arm_pmu *cpu_pmu)
+{
+ __m1_pmu_set_mode(PMCR0_IMODE_OFF);
+}
+
+static int m1_pmu_map_event(struct perf_event *event)
+{
+ /*
+ * Although the counters are 48bit wide, bit 47 is what
+ * triggers the overflow interrupt. Advertise the counters
+ * being 47bit wide to mimick the behaviour of the ARM PMU.
+ */
+ event->hw.flags |= ARMPMU_EVT_47BIT;
+ return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT);
+}
+
+static void m1_pmu_reset(void *info)
+{
+ int i;
+
+ __m1_pmu_set_mode(PMCR0_IMODE_OFF);
+
+ for (i = 0; i < M1_PMU_NR_COUNTERS; i++) {
+ m1_pmu_disable_counter(i);
+ m1_pmu_disable_counter_interrupt(i);
+ m1_pmu_write_hw_counter(0, i);
+ }
+
+ isb();
+}
+
+static int m1_pmu_set_event_filter(struct hw_perf_event *event,
+ struct perf_event_attr *attr)
+{
+ unsigned long config_base = 0;
+
+ if (!attr->exclude_guest)
+ return -EINVAL;
+ if (!attr->exclude_kernel)
+ config_base |= M1_PMU_CFG_COUNT_KERNEL;
+ if (!attr->exclude_user)
+ config_base |= M1_PMU_CFG_COUNT_USER;
+
+ event->config_base = config_base;
+
+ return 0;
+}
+
+static int m1_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ cpu_pmu->handle_irq = m1_pmu_handle_irq;
+ cpu_pmu->enable = m1_pmu_enable_event;
+ cpu_pmu->disable = m1_pmu_disable_event;
+ cpu_pmu->read_counter = m1_pmu_read_counter;
+ cpu_pmu->write_counter = m1_pmu_write_counter;
+ cpu_pmu->get_event_idx = m1_pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = m1_pmu_clear_event_idx;
+ cpu_pmu->start = m1_pmu_start;
+ cpu_pmu->stop = m1_pmu_stop;
+ cpu_pmu->map_event = m1_pmu_map_event;
+ cpu_pmu->reset = m1_pmu_reset;
+ cpu_pmu->set_event_filter = m1_pmu_set_event_filter;
+
+ cpu_pmu->num_events = M1_PMU_NR_COUNTERS;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group;
+ return 0;
+}
+
+/* Device driver gunk */
+static int m1_pmu_ice_init(struct arm_pmu *cpu_pmu)
+{
+ cpu_pmu->name = "apple_icestorm_pmu";
+ return m1_pmu_init(cpu_pmu);
+}
+
+static int m1_pmu_fire_init(struct arm_pmu *cpu_pmu)
+{
+ cpu_pmu->name = "apple_firestorm_pmu";
+ return m1_pmu_init(cpu_pmu);
+}
+
+static const struct of_device_id m1_pmu_of_device_ids[] = {
+ { .compatible = "apple,icestorm-pmu", .data = m1_pmu_ice_init, },
+ { .compatible = "apple,firestorm-pmu", .data = m1_pmu_fire_init, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, m1_pmu_of_device_ids);
+
+static int m1_pmu_device_probe(struct platform_device *pdev)
+{
+ return arm_pmu_device_probe(pdev, m1_pmu_of_device_ids, NULL);
+}
+
+static struct platform_driver m1_pmu_driver = {
+ .driver = {
+ .name = "apple-m1-cpu-pmu",
+ .of_match_table = m1_pmu_of_device_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = m1_pmu_device_probe,
+};
+
+module_platform_driver(m1_pmu_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 54aca3a62814..96e09fa40909 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -1096,7 +1096,7 @@ static void cci_pmu_enable(struct pmu *pmu)
{
struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
- int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs);
+ bool enabled = !bitmap_empty(hw_events->used_mask, cci_pmu->num_cntrs);
unsigned long flags;
if (!enabled)
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index a96c31604545..40b352e8aa7f 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1460,8 +1460,7 @@ static irqreturn_t arm_ccn_irq_handler(int irq, void *dev_id)
static int arm_ccn_probe(struct platform_device *pdev)
{
struct arm_ccn *ccn;
- struct resource *res;
- unsigned int irq;
+ int irq;
int err;
ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL);
@@ -1474,10 +1473,9 @@ static int arm_ccn_probe(struct platform_device *pdev)
if (IS_ERR(ccn->base))
return PTR_ERR(ccn->base);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res)
- return -EINVAL;
- irq = res->start;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
/* Check if we can use the interrupt */
writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE,
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 0e48adce57ef..9c1d82be7a2f 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -71,9 +71,11 @@
#define CMN_DTM_WPn(n) (0x1A0 + (n) * 0x18)
#define CMN_DTM_WPn_CONFIG(n) (CMN_DTM_WPn(n) + 0x00)
#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2 GENMASK_ULL(18,17)
-#define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(6)
-#define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(5)
-#define CMN_DTM_WPn_CONFIG_WP_GRP BIT(4)
+#define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(9)
+#define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(8)
+#define CMN600_WPn_CONFIG_WP_COMBINE BIT(6)
+#define CMN600_WPn_CONFIG_WP_EXCLUSIVE BIT(5)
+#define CMN_DTM_WPn_CONFIG_WP_GRP GENMASK_ULL(5, 4)
#define CMN_DTM_WPn_CONFIG_WP_CHN_SEL GENMASK_ULL(3, 1)
#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL BIT(0)
#define CMN_DTM_WPn_VAL(n) (CMN_DTM_WPn(n) + 0x08)
@@ -155,6 +157,7 @@
#define CMN_CONFIG_WP_COMBINE GENMASK_ULL(27, 24)
#define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48)
#define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51)
+/* Note that we don't yet support the tertiary match group on newer IPs */
#define CMN_CONFIG_WP_GRP BIT_ULL(56)
#define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(57)
#define CMN_CONFIG1_WP_VAL GENMASK_ULL(63, 0)
@@ -353,7 +356,7 @@ static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
return NULL;
}
-struct dentry *arm_cmn_debugfs;
+static struct dentry *arm_cmn_debugfs;
#ifdef CONFIG_DEBUG_FS
static const char *arm_cmn_device_type(u8 type)
@@ -595,6 +598,9 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3)))
return 0;
+ if (chan == 4 && cmn->model == CMN600)
+ return 0;
+
if ((chan == 5 && cmn->rsp_vc_num < 2) ||
(chan == 6 && cmn->dat_vc_num < 2))
return 0;
@@ -905,15 +911,18 @@ static u32 arm_cmn_wp_config(struct perf_event *event)
u32 grp = CMN_EVENT_WP_GRP(event);
u32 exc = CMN_EVENT_WP_EXCLUSIVE(event);
u32 combine = CMN_EVENT_WP_COMBINE(event);
+ bool is_cmn600 = to_cmn(event->pmu)->model == CMN600;
config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) |
FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) |
FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) |
- FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE, exc) |
FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL2, dev >> 1);
+ if (exc)
+ config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_EXCLUSIVE :
+ CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE;
if (combine && !grp)
- config |= CMN_DTM_WPn_CONFIG_WP_COMBINE;
-
+ config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_COMBINE :
+ CMN_DTM_WPn_CONFIG_WP_COMBINE;
return config;
}
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 295cc7952d0e..9694370651fa 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -109,6 +109,8 @@ static inline u64 arm_pmu_event_max_period(struct perf_event *event)
{
if (event->hw.flags & ARMPMU_EVT_64BIT)
return GENMASK_ULL(63, 0);
+ else if (event->hw.flags & ARMPMU_EVT_47BIT)
+ return GENMASK_ULL(46, 0);
else
return GENMASK_ULL(31, 0);
}
@@ -524,7 +526,7 @@ static void armpmu_enable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
- int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+ bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events);
/* For task-bound events we may be called on other CPUs */
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
@@ -785,7 +787,7 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
{
struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
- int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+ bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events);
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
return NOTIFY_DONE;
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index a738aeab5c04..358e4e284a62 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -393,7 +393,7 @@ EXPORT_SYMBOL_GPL(hisi_uncore_pmu_read);
void hisi_uncore_pmu_enable(struct pmu *pmu)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu);
- int enabled = bitmap_weight(hisi_pmu->pmu_events.used_mask,
+ bool enabled = !bitmap_empty(hisi_pmu->pmu_events.used_mask,
hisi_pmu->num_counters);
if (!enabled)
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
new file mode 100644
index 000000000000..665b382a0ee3
--- /dev/null
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -0,0 +1,758 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/perf_event.h>
+#include <linux/hrtimer.h>
+
+/* Performance Counters Operating Mode Control Registers */
+#define DDRC_PERF_CNT_OP_MODE_CTRL 0x8020
+#define OP_MODE_CTRL_VAL_MANNUAL 0x1
+
+/* Performance Counters Start Operation Control Registers */
+#define DDRC_PERF_CNT_START_OP_CTRL 0x8028
+#define START_OP_CTRL_VAL_START 0x1ULL
+#define START_OP_CTRL_VAL_ACTIVE 0x2
+
+/* Performance Counters End Operation Control Registers */
+#define DDRC_PERF_CNT_END_OP_CTRL 0x8030
+#define END_OP_CTRL_VAL_END 0x1ULL
+
+/* Performance Counters End Status Registers */
+#define DDRC_PERF_CNT_END_STATUS 0x8038
+#define END_STATUS_VAL_END_TIMER_MODE_END 0x1
+
+/* Performance Counters Configuration Registers */
+#define DDRC_PERF_CFG_BASE 0x8040
+
+/* 8 Generic event counter + 2 fixed event counters */
+#define DDRC_PERF_NUM_GEN_COUNTERS 8
+#define DDRC_PERF_NUM_FIX_COUNTERS 2
+#define DDRC_PERF_READ_COUNTER_IDX DDRC_PERF_NUM_GEN_COUNTERS
+#define DDRC_PERF_WRITE_COUNTER_IDX (DDRC_PERF_NUM_GEN_COUNTERS + 1)
+#define DDRC_PERF_NUM_COUNTERS (DDRC_PERF_NUM_GEN_COUNTERS + \
+ DDRC_PERF_NUM_FIX_COUNTERS)
+
+/* Generic event counter registers */
+#define DDRC_PERF_CFG(n) (DDRC_PERF_CFG_BASE + 8 * (n))
+#define EVENT_ENABLE BIT_ULL(63)
+
+/* Two dedicated event counters for DDR reads and writes */
+#define EVENT_DDR_READS 101
+#define EVENT_DDR_WRITES 100
+
+/*
+ * programmable events IDs in programmable event counters.
+ * DO NOT change these event-id numbers, they are used to
+ * program event bitmap in h/w.
+ */
+#define EVENT_OP_IS_ZQLATCH 55
+#define EVENT_OP_IS_ZQSTART 54
+#define EVENT_OP_IS_TCR_MRR 53
+#define EVENT_OP_IS_DQSOSC_MRR 52
+#define EVENT_OP_IS_DQSOSC_MPC 51
+#define EVENT_VISIBLE_WIN_LIMIT_REACHED_WR 50
+#define EVENT_VISIBLE_WIN_LIMIT_REACHED_RD 49
+#define EVENT_BSM_STARVATION 48
+#define EVENT_BSM_ALLOC 47
+#define EVENT_LPR_REQ_WITH_NOCREDIT 46
+#define EVENT_HPR_REQ_WITH_NOCREDIT 45
+#define EVENT_OP_IS_ZQCS 44
+#define EVENT_OP_IS_ZQCL 43
+#define EVENT_OP_IS_LOAD_MODE 42
+#define EVENT_OP_IS_SPEC_REF 41
+#define EVENT_OP_IS_CRIT_REF 40
+#define EVENT_OP_IS_REFRESH 39
+#define EVENT_OP_IS_ENTER_MPSM 35
+#define EVENT_OP_IS_ENTER_POWERDOWN 31
+#define EVENT_OP_IS_ENTER_SELFREF 27
+#define EVENT_WAW_HAZARD 26
+#define EVENT_RAW_HAZARD 25
+#define EVENT_WAR_HAZARD 24
+#define EVENT_WRITE_COMBINE 23
+#define EVENT_RDWR_TRANSITIONS 22
+#define EVENT_PRECHARGE_FOR_OTHER 21
+#define EVENT_PRECHARGE_FOR_RDWR 20
+#define EVENT_OP_IS_PRECHARGE 19
+#define EVENT_OP_IS_MWR 18
+#define EVENT_OP_IS_WR 17
+#define EVENT_OP_IS_RD 16
+#define EVENT_OP_IS_RD_ACTIVATE 15
+#define EVENT_OP_IS_RD_OR_WR 14
+#define EVENT_OP_IS_ACTIVATE 13
+#define EVENT_WR_XACT_WHEN_CRITICAL 12
+#define EVENT_LPR_XACT_WHEN_CRITICAL 11
+#define EVENT_HPR_XACT_WHEN_CRITICAL 10
+#define EVENT_DFI_RD_DATA_CYCLES 9
+#define EVENT_DFI_WR_DATA_CYCLES 8
+#define EVENT_ACT_BYPASS 7
+#define EVENT_READ_BYPASS 6
+#define EVENT_HIF_HI_PRI_RD 5
+#define EVENT_HIF_RMW 4
+#define EVENT_HIF_RD 3
+#define EVENT_HIF_WR 2
+#define EVENT_HIF_RD_OR_WR 1
+
+/* Event counter value registers */
+#define DDRC_PERF_CNT_VALUE_BASE 0x8080
+#define DDRC_PERF_CNT_VALUE(n) (DDRC_PERF_CNT_VALUE_BASE + 8 * (n))
+
+/* Fixed event counter enable/disable register */
+#define DDRC_PERF_CNT_FREERUN_EN 0x80C0
+#define DDRC_PERF_FREERUN_WRITE_EN 0x1
+#define DDRC_PERF_FREERUN_READ_EN 0x2
+
+/* Fixed event counter control register */
+#define DDRC_PERF_CNT_FREERUN_CTRL 0x80C8
+#define DDRC_FREERUN_WRITE_CNT_CLR 0x1
+#define DDRC_FREERUN_READ_CNT_CLR 0x2
+
+/* Fixed event counter value register */
+#define DDRC_PERF_CNT_VALUE_WR_OP 0x80D0
+#define DDRC_PERF_CNT_VALUE_RD_OP 0x80D8
+#define DDRC_PERF_CNT_VALUE_OVERFLOW BIT_ULL(48)
+#define DDRC_PERF_CNT_MAX_VALUE GENMASK_ULL(48, 0)
+
+struct cn10k_ddr_pmu {
+ struct pmu pmu;
+ void __iomem *base;
+ unsigned int cpu;
+ struct device *dev;
+ int active_events;
+ struct perf_event *events[DDRC_PERF_NUM_COUNTERS];
+ struct hrtimer hrtimer;
+ struct hlist_node node;
+};
+
+#define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu)
+
+static ssize_t cn10k_ddr_pmu_event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
+
+}
+
+#define CN10K_DDR_PMU_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR_ID(_name, cn10k_ddr_pmu_event_show, _id)
+
+static struct attribute *cn10k_ddr_perf_events_attrs[] = {
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_or_wr_access, EVENT_HIF_RD_OR_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_wr_access, EVENT_HIF_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_access, EVENT_HIF_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rmw_access, EVENT_HIF_RMW),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_pri_rdaccess, EVENT_HIF_HI_PRI_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_bypass_access, EVENT_READ_BYPASS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_act_bypass_access, EVENT_ACT_BYPASS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dif_wr_data_access, EVENT_DFI_WR_DATA_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dif_rd_data_access, EVENT_DFI_RD_DATA_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hpri_sched_rd_crit_access,
+ EVENT_HPR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_lpri_sched_rd_crit_access,
+ EVENT_LPR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_trxn_crit_access,
+ EVENT_WR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_active_access, EVENT_OP_IS_ACTIVATE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_or_wr_access, EVENT_OP_IS_RD_OR_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_active_access, EVENT_OP_IS_RD_ACTIVATE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_read, EVENT_OP_IS_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_write, EVENT_OP_IS_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_mwr, EVENT_OP_IS_MWR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge, EVENT_OP_IS_PRECHARGE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_rdwr, EVENT_PRECHARGE_FOR_RDWR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_other,
+ EVENT_PRECHARGE_FOR_OTHER),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rdwr_transitions, EVENT_RDWR_TRANSITIONS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_write_combine, EVENT_WRITE_COMBINE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_war_hazard, EVENT_WAR_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_raw_hazard, EVENT_RAW_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_waw_hazard, EVENT_WAW_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_selfref, EVENT_OP_IS_ENTER_SELFREF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_powerdown, EVENT_OP_IS_ENTER_POWERDOWN),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_mpsm, EVENT_OP_IS_ENTER_MPSM),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_refresh, EVENT_OP_IS_REFRESH),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_crit_ref, EVENT_OP_IS_CRIT_REF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_spec_ref, EVENT_OP_IS_SPEC_REF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_load_mode, EVENT_OP_IS_LOAD_MODE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqcl, EVENT_OP_IS_ZQCL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_wr_access, EVENT_OP_IS_ZQCS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hpr_req_with_nocredit,
+ EVENT_HPR_REQ_WITH_NOCREDIT),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_lpr_req_with_nocredit,
+ EVENT_LPR_REQ_WITH_NOCREDIT),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_alloc, EVENT_BSM_ALLOC),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_starvation, EVENT_BSM_STARVATION),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_rd,
+ EVENT_VISIBLE_WIN_LIMIT_REACHED_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_wr,
+ EVENT_VISIBLE_WIN_LIMIT_REACHED_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mpc, EVENT_OP_IS_DQSOSC_MPC),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mrr, EVENT_OP_IS_DQSOSC_MRR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_tcr_mrr, EVENT_OP_IS_TCR_MRR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqstart, EVENT_OP_IS_ZQSTART),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqlatch, EVENT_OP_IS_ZQLATCH),
+ /* Free run event counters */
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_reads, EVENT_DDR_READS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_writes, EVENT_DDR_WRITES),
+ NULL
+};
+
+static struct attribute_group cn10k_ddr_perf_events_attr_group = {
+ .name = "events",
+ .attrs = cn10k_ddr_perf_events_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-8");
+
+static struct attribute *cn10k_ddr_perf_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group cn10k_ddr_perf_format_attr_group = {
+ .name = "format",
+ .attrs = cn10k_ddr_perf_format_attrs,
+};
+
+static ssize_t cn10k_ddr_perf_cpumask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cn10k_ddr_pmu *pmu = dev_get_drvdata(dev);
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
+}
+
+static struct device_attribute cn10k_ddr_perf_cpumask_attr =
+ __ATTR(cpumask, 0444, cn10k_ddr_perf_cpumask_show, NULL);
+
+static struct attribute *cn10k_ddr_perf_cpumask_attrs[] = {
+ &cn10k_ddr_perf_cpumask_attr.attr,
+ NULL,
+};
+
+static struct attribute_group cn10k_ddr_perf_cpumask_attr_group = {
+ .attrs = cn10k_ddr_perf_cpumask_attrs,
+};
+
+static const struct attribute_group *cn10k_attr_groups[] = {
+ &cn10k_ddr_perf_events_attr_group,
+ &cn10k_ddr_perf_format_attr_group,
+ &cn10k_ddr_perf_cpumask_attr_group,
+ NULL,
+};
+
+/* Default poll timeout is 100 sec, which is very sufficient for
+ * 48 bit counter incremented max at 5.6 GT/s, which may take many
+ * hours to overflow.
+ */
+static unsigned long cn10k_ddr_pmu_poll_period_sec = 100;
+module_param_named(poll_period_sec, cn10k_ddr_pmu_poll_period_sec, ulong, 0644);
+
+static ktime_t cn10k_ddr_pmu_timer_period(void)
+{
+ return ms_to_ktime((u64)cn10k_ddr_pmu_poll_period_sec * USEC_PER_SEC);
+}
+
+static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap)
+{
+ switch (eventid) {
+ case EVENT_HIF_RD_OR_WR ... EVENT_WAW_HAZARD:
+ case EVENT_OP_IS_REFRESH ... EVENT_OP_IS_ZQLATCH:
+ *event_bitmap = (1ULL << (eventid - 1));
+ break;
+ case EVENT_OP_IS_ENTER_SELFREF:
+ case EVENT_OP_IS_ENTER_POWERDOWN:
+ case EVENT_OP_IS_ENTER_MPSM:
+ *event_bitmap = (0xFULL << (eventid - 1));
+ break;
+ default:
+ pr_err("%s Invalid eventid %d\n", __func__, eventid);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu,
+ struct perf_event *event)
+{
+ u8 config = event->attr.config;
+ int i;
+
+ /* DDR read free-run counter index */
+ if (config == EVENT_DDR_READS) {
+ pmu->events[DDRC_PERF_READ_COUNTER_IDX] = event;
+ return DDRC_PERF_READ_COUNTER_IDX;
+ }
+
+ /* DDR write free-run counter index */
+ if (config == EVENT_DDR_WRITES) {
+ pmu->events[DDRC_PERF_WRITE_COUNTER_IDX] = event;
+ return DDRC_PERF_WRITE_COUNTER_IDX;
+ }
+
+ /* Allocate DDR generic counters */
+ for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
+ if (pmu->events[i] == NULL) {
+ pmu->events[i] = event;
+ return i;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static void cn10k_ddr_perf_free_counter(struct cn10k_ddr_pmu *pmu, int counter)
+{
+ pmu->events[counter] = NULL;
+}
+
+static int cn10k_ddr_perf_event_init(struct perf_event *event)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (is_sampling_event(event)) {
+ dev_info(pmu->dev, "Sampling not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (event->cpu < 0) {
+ dev_warn(pmu->dev, "Can't provide per-task data!\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* We must NOT create groups containing mixed PMUs */
+ if (event->group_leader->pmu != event->pmu &&
+ !is_software_event(event->group_leader))
+ return -EINVAL;
+
+ /* Set ownership of event to one CPU, same event can not be observed
+ * on multiple cpus at same time.
+ */
+ event->cpu = pmu->cpu;
+ hwc->idx = -1;
+ return 0;
+}
+
+static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
+ int counter, bool enable)
+{
+ u32 reg;
+ u64 val;
+
+ if (counter > DDRC_PERF_NUM_COUNTERS) {
+ pr_err("Error: unsupported counter %d\n", counter);
+ return;
+ }
+
+ if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
+ reg = DDRC_PERF_CFG(counter);
+ val = readq_relaxed(pmu->base + reg);
+
+ if (enable)
+ val |= EVENT_ENABLE;
+ else
+ val &= ~EVENT_ENABLE;
+
+ writeq_relaxed(val, pmu->base + reg);
+ } else {
+ val = readq_relaxed(pmu->base + DDRC_PERF_CNT_FREERUN_EN);
+ if (enable) {
+ if (counter == DDRC_PERF_READ_COUNTER_IDX)
+ val |= DDRC_PERF_FREERUN_READ_EN;
+ else
+ val |= DDRC_PERF_FREERUN_WRITE_EN;
+ } else {
+ if (counter == DDRC_PERF_READ_COUNTER_IDX)
+ val &= ~DDRC_PERF_FREERUN_READ_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_WRITE_EN;
+ }
+ writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_EN);
+ }
+}
+
+static u64 cn10k_ddr_perf_read_counter(struct cn10k_ddr_pmu *pmu, int counter)
+{
+ u64 val;
+
+ if (counter == DDRC_PERF_READ_COUNTER_IDX)
+ return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_RD_OP);
+
+ if (counter == DDRC_PERF_WRITE_COUNTER_IDX)
+ return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_WR_OP);
+
+ val = readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE(counter));
+ return val;
+}
+
+static void cn10k_ddr_perf_event_update(struct perf_event *event)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev_count, new_count, mask;
+
+ do {
+ prev_count = local64_read(&hwc->prev_count);
+ new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
+ } while (local64_xchg(&hwc->prev_count, new_count) != prev_count);
+
+ mask = DDRC_PERF_CNT_MAX_VALUE;
+
+ local64_add((new_count - prev_count) & mask, &event->count);
+}
+
+static void cn10k_ddr_perf_event_start(struct perf_event *event, int flags)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ local64_set(&hwc->prev_count, 0);
+
+ cn10k_ddr_perf_counter_enable(pmu, counter, true);
+
+ hwc->state = 0;
+}
+
+static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u8 config = event->attr.config;
+ int counter, ret;
+ u32 reg_offset;
+ u64 val;
+
+ counter = cn10k_ddr_perf_alloc_counter(pmu, event);
+ if (counter < 0)
+ return -EAGAIN;
+
+ pmu->active_events++;
+ hwc->idx = counter;
+
+ if (pmu->active_events == 1)
+ hrtimer_start(&pmu->hrtimer, cn10k_ddr_pmu_timer_period(),
+ HRTIMER_MODE_REL_PINNED);
+
+ if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
+ /* Generic counters, configure event id */
+ reg_offset = DDRC_PERF_CFG(counter);
+ ret = ddr_perf_get_event_bitmap(config, &val);
+ if (ret)
+ return ret;
+
+ writeq_relaxed(val, pmu->base + reg_offset);
+ } else {
+ /* fixed event counter, clear counter value */
+ if (counter == DDRC_PERF_READ_COUNTER_IDX)
+ val = DDRC_FREERUN_READ_CNT_CLR;
+ else
+ val = DDRC_FREERUN_WRITE_CNT_CLR;
+
+ writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_CTRL);
+ }
+
+ hwc->state |= PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ cn10k_ddr_perf_event_start(event, flags);
+
+ return 0;
+}
+
+static void cn10k_ddr_perf_event_stop(struct perf_event *event, int flags)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ cn10k_ddr_perf_counter_enable(pmu, counter, false);
+
+ if (flags & PERF_EF_UPDATE)
+ cn10k_ddr_perf_event_update(event);
+
+ hwc->state |= PERF_HES_STOPPED;
+}
+
+static void cn10k_ddr_perf_event_del(struct perf_event *event, int flags)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ cn10k_ddr_perf_event_stop(event, PERF_EF_UPDATE);
+
+ cn10k_ddr_perf_free_counter(pmu, counter);
+ pmu->active_events--;
+ hwc->idx = -1;
+
+ /* Cancel timer when no events to capture */
+ if (pmu->active_events == 0)
+ hrtimer_cancel(&pmu->hrtimer);
+}
+
+static void cn10k_ddr_perf_pmu_enable(struct pmu *pmu)
+{
+ struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
+
+ writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base +
+ DDRC_PERF_CNT_START_OP_CTRL);
+}
+
+static void cn10k_ddr_perf_pmu_disable(struct pmu *pmu)
+{
+ struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
+
+ writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base +
+ DDRC_PERF_CNT_END_OP_CTRL);
+}
+
+static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
+{
+ struct hw_perf_event *hwc;
+ int i;
+
+ for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
+ if (pmu->events[i] == NULL)
+ continue;
+
+ cn10k_ddr_perf_event_update(pmu->events[i]);
+ }
+
+ /* Reset previous count as h/w counter are reset */
+ for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
+ if (pmu->events[i] == NULL)
+ continue;
+
+ hwc = &pmu->events[i]->hw;
+ local64_set(&hwc->prev_count, 0);
+ }
+}
+
+static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
+{
+ struct perf_event *event;
+ struct hw_perf_event *hwc;
+ u64 prev_count, new_count;
+ u64 value;
+ int i;
+
+ event = pmu->events[DDRC_PERF_READ_COUNTER_IDX];
+ if (event) {
+ hwc = &event->hw;
+ prev_count = local64_read(&hwc->prev_count);
+ new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
+
+ /* Overflow condition is when new count less than
+ * previous count
+ */
+ if (new_count < prev_count)
+ cn10k_ddr_perf_event_update(event);
+ }
+
+ event = pmu->events[DDRC_PERF_WRITE_COUNTER_IDX];
+ if (event) {
+ hwc = &event->hw;
+ prev_count = local64_read(&hwc->prev_count);
+ new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
+
+ /* Overflow condition is when new count less than
+ * previous count
+ */
+ if (new_count < prev_count)
+ cn10k_ddr_perf_event_update(event);
+ }
+
+ for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
+ if (pmu->events[i] == NULL)
+ continue;
+
+ value = cn10k_ddr_perf_read_counter(pmu, i);
+ if (value == DDRC_PERF_CNT_MAX_VALUE) {
+ pr_info("Counter-(%d) reached max value\n", i);
+ cn10k_ddr_perf_event_update_all(pmu);
+ cn10k_ddr_perf_pmu_disable(&pmu->pmu);
+ cn10k_ddr_perf_pmu_enable(&pmu->pmu);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart cn10k_ddr_pmu_timer_handler(struct hrtimer *hrtimer)
+{
+ struct cn10k_ddr_pmu *pmu = container_of(hrtimer, struct cn10k_ddr_pmu,
+ hrtimer);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ cn10k_ddr_pmu_overflow_handler(pmu);
+ local_irq_restore(flags);
+
+ hrtimer_forward_now(hrtimer, cn10k_ddr_pmu_timer_period());
+ return HRTIMER_RESTART;
+}
+
+static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct cn10k_ddr_pmu *pmu = hlist_entry_safe(node, struct cn10k_ddr_pmu,
+ node);
+ unsigned int target;
+
+ if (cpu != pmu->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&pmu->pmu, cpu, target);
+ pmu->cpu = target;
+ return 0;
+}
+
+static int cn10k_ddr_perf_probe(struct platform_device *pdev)
+{
+ struct cn10k_ddr_pmu *ddr_pmu;
+ struct resource *res;
+ void __iomem *base;
+ char *name;
+ int ret;
+
+ ddr_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddr_pmu), GFP_KERNEL);
+ if (!ddr_pmu)
+ return -ENOMEM;
+
+ ddr_pmu->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ddr_pmu);
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ddr_pmu->base = base;
+
+ /* Setup the PMU counter to work in manual mode */
+ writeq_relaxed(OP_MODE_CTRL_VAL_MANNUAL, ddr_pmu->base +
+ DDRC_PERF_CNT_OP_MODE_CTRL);
+
+ ddr_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = cn10k_attr_groups,
+ .event_init = cn10k_ddr_perf_event_init,
+ .add = cn10k_ddr_perf_event_add,
+ .del = cn10k_ddr_perf_event_del,
+ .start = cn10k_ddr_perf_event_start,
+ .stop = cn10k_ddr_perf_event_stop,
+ .read = cn10k_ddr_perf_event_update,
+ .pmu_enable = cn10k_ddr_perf_pmu_enable,
+ .pmu_disable = cn10k_ddr_perf_pmu_disable,
+ };
+
+ /* Choose this cpu to collect perf data */
+ ddr_pmu->cpu = raw_smp_processor_id();
+
+ name = devm_kasprintf(ddr_pmu->dev, GFP_KERNEL, "mrvl_ddr_pmu_%llx",
+ res->start);
+ if (!name)
+ return -ENOMEM;
+
+ hrtimer_init(&ddr_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ddr_pmu->hrtimer.function = cn10k_ddr_pmu_timer_handler;
+
+ cpuhp_state_add_instance_nocalls(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ &ddr_pmu->node);
+
+ ret = perf_pmu_register(&ddr_pmu->pmu, name, -1);
+ if (ret)
+ goto error;
+
+ pr_info("CN10K DDR PMU Driver for ddrc@%llx\n", res->start);
+ return 0;
+error:
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ &ddr_pmu->node);
+ return ret;
+}
+
+static int cn10k_ddr_perf_remove(struct platform_device *pdev)
+{
+ struct cn10k_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev);
+
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ &ddr_pmu->node);
+
+ perf_pmu_unregister(&ddr_pmu->pmu);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id cn10k_ddr_pmu_of_match[] = {
+ { .compatible = "marvell,cn10k-ddr-pmu", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match);
+#endif
+
+static struct platform_driver cn10k_ddr_pmu_driver = {
+ .driver = {
+ .name = "cn10k-ddr-pmu",
+ .of_match_table = of_match_ptr(cn10k_ddr_pmu_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = cn10k_ddr_perf_probe,
+ .remove = cn10k_ddr_perf_remove,
+};
+
+static int __init cn10k_ddr_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ "perf/marvell/cn10k/ddr:online", NULL,
+ cn10k_ddr_pmu_offline_cpu);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&cn10k_ddr_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE);
+ return ret;
+}
+
+static void __exit cn10k_ddr_pmu_exit(void)
+{
+ platform_driver_unregister(&cn10k_ddr_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE);
+}
+
+module_init(cn10k_ddr_pmu_init);
+module_exit(cn10k_ddr_pmu_exit);
+
+MODULE_AUTHOR("Bharat Bhushan <bbhushan2@marvell.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
index 7f4d292658e3..ee67305f822d 100644
--- a/drivers/perf/marvell_cn10k_tad_pmu.c
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -368,10 +368,12 @@ static int tad_pmu_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
static const struct of_device_id tad_pmu_of_match[] = {
{ .compatible = "marvell,cn10k-tad-pmu", },
{},
};
+#endif
static struct platform_driver tad_pmu_driver = {
.driver = {
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index 05378c0fd8f3..1edb9c03704f 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -887,13 +887,11 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
static acpi_status tx2_uncore_pmu_add(acpi_handle handle, u32 level,
void *data, void **return_value)
{
+ struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
struct tx2_uncore_pmu *tx2_pmu;
- struct acpi_device *adev;
enum tx2_uncore_type type;
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
- if (acpi_bus_get_status(adev) || !adev->status.present)
+ if (!adev || acpi_bus_get_status(adev) || !adev->status.present)
return AE_OK;
type = get_tx2_pmu_type(adev);
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 2b6d476bd213..0c32dffc7ede 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -867,7 +867,7 @@ static void xgene_perf_pmu_enable(struct pmu *pmu)
{
struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
struct xgene_pmu *xgene_pmu = pmu_dev->parent;
- int enabled = bitmap_weight(pmu_dev->cntr_assign_mask,
+ bool enabled = !bitmap_empty(pmu_dev->cntr_assign_mask,
pmu_dev->max_counters);
if (!enabled)
@@ -1549,14 +1549,12 @@ static const struct acpi_device_id *xgene_pmu_acpi_match_type(
static acpi_status acpi_pmu_dev_add(acpi_handle handle, u32 level,
void *data, void **return_value)
{
+ struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
const struct acpi_device_id *acpi_id;
struct xgene_pmu *xgene_pmu = data;
struct xgene_pmu_dev_ctx *ctx;
- struct acpi_device *adev;
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
- if (acpi_bus_get_status(adev) || !adev->status.present)
+ if (!adev || acpi_bus_get_status(adev) || !adev->status.present)
return AE_OK;
acpi_id = xgene_pmu_acpi_match_type(xgene_pmu_acpi_type_match, adev);
diff --git a/drivers/phy/freescale/Kconfig b/drivers/phy/freescale/Kconfig
index c3669c28ea9f..0e91cd99c36b 100644
--- a/drivers/phy/freescale/Kconfig
+++ b/drivers/phy/freescale/Kconfig
@@ -22,3 +22,13 @@ config PHY_FSL_IMX8M_PCIE
help
Enable this to add support for the PCIE PHY as found on
i.MX8M family of SOCs.
+
+config PHY_FSL_LYNX_28G
+ tristate "Freescale Layerscape Lynx 28G SerDes PHY support"
+ depends on OF
+ select GENERIC_PHY
+ help
+ Enable this to add support for the Lynx SerDes 28G PHY as
+ found on NXP's Layerscape platforms such as LX2160A.
+ Used to change the protocol running on SerDes lanes at runtime.
+ Only useful for a restricted set of Ethernet protocols.
diff --git a/drivers/phy/freescale/Makefile b/drivers/phy/freescale/Makefile
index 55d07c742ab0..3518d5dbe8a7 100644
--- a/drivers/phy/freescale/Makefile
+++ b/drivers/phy/freescale/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_PHY_FSL_IMX8MQ_USB) += phy-fsl-imx8mq-usb.o
obj-$(CONFIG_PHY_MIXEL_MIPI_DPHY) += phy-fsl-imx8-mipi-dphy.o
obj-$(CONFIG_PHY_FSL_IMX8M_PCIE) += phy-fsl-imx8m-pcie.o
+obj-$(CONFIG_PHY_FSL_LYNX_28G) += phy-fsl-lynx-28g.o
diff --git a/drivers/phy/freescale/phy-fsl-lynx-28g.c b/drivers/phy/freescale/phy-fsl-lynx-28g.c
new file mode 100644
index 000000000000..569f12af2aaf
--- /dev/null
+++ b/drivers/phy/freescale/phy-fsl-lynx-28g.c
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2021-2022 NXP. */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#define LYNX_28G_NUM_LANE 8
+#define LYNX_28G_NUM_PLL 2
+
+/* General registers per SerDes block */
+#define LYNX_28G_PCC8 0x10a0
+#define LYNX_28G_PCC8_SGMII 0x1
+#define LYNX_28G_PCC8_SGMII_DIS 0x0
+
+#define LYNX_28G_PCCC 0x10b0
+#define LYNX_28G_PCCC_10GBASER 0x9
+#define LYNX_28G_PCCC_USXGMII 0x1
+#define LYNX_28G_PCCC_SXGMII_DIS 0x0
+
+#define LYNX_28G_LNa_PCC_OFFSET(lane) (4 * (LYNX_28G_NUM_LANE - (lane->id) - 1))
+
+/* Per PLL registers */
+#define LYNX_28G_PLLnRSTCTL(pll) (0x400 + (pll) * 0x100 + 0x0)
+#define LYNX_28G_PLLnRSTCTL_DIS(rstctl) (((rstctl) & BIT(24)) >> 24)
+#define LYNX_28G_PLLnRSTCTL_LOCK(rstctl) (((rstctl) & BIT(23)) >> 23)
+
+#define LYNX_28G_PLLnCR0(pll) (0x400 + (pll) * 0x100 + 0x4)
+#define LYNX_28G_PLLnCR0_REFCLK_SEL(cr0) (((cr0) & GENMASK(20, 16)))
+#define LYNX_28G_PLLnCR0_REFCLK_SEL_100MHZ 0x0
+#define LYNX_28G_PLLnCR0_REFCLK_SEL_125MHZ 0x10000
+#define LYNX_28G_PLLnCR0_REFCLK_SEL_156MHZ 0x20000
+#define LYNX_28G_PLLnCR0_REFCLK_SEL_150MHZ 0x30000
+#define LYNX_28G_PLLnCR0_REFCLK_SEL_161MHZ 0x40000
+
+#define LYNX_28G_PLLnCR1(pll) (0x400 + (pll) * 0x100 + 0x8)
+#define LYNX_28G_PLLnCR1_FRATE_SEL(cr1) (((cr1) & GENMASK(28, 24)))
+#define LYNX_28G_PLLnCR1_FRATE_5G_10GVCO 0x0
+#define LYNX_28G_PLLnCR1_FRATE_5G_25GVCO 0x10000000
+#define LYNX_28G_PLLnCR1_FRATE_10G_20GVCO 0x6000000
+
+/* Per SerDes lane registers */
+/* Lane a General Control Register */
+#define LYNX_28G_LNaGCR0(lane) (0x800 + (lane) * 0x100 + 0x0)
+#define LYNX_28G_LNaGCR0_PROTO_SEL_MSK GENMASK(7, 3)
+#define LYNX_28G_LNaGCR0_PROTO_SEL_SGMII 0x8
+#define LYNX_28G_LNaGCR0_PROTO_SEL_XFI 0x50
+#define LYNX_28G_LNaGCR0_IF_WIDTH_MSK GENMASK(2, 0)
+#define LYNX_28G_LNaGCR0_IF_WIDTH_10_BIT 0x0
+#define LYNX_28G_LNaGCR0_IF_WIDTH_20_BIT 0x2
+
+/* Lane a Tx Reset Control Register */
+#define LYNX_28G_LNaTRSTCTL(lane) (0x800 + (lane) * 0x100 + 0x20)
+#define LYNX_28G_LNaTRSTCTL_HLT_REQ BIT(27)
+#define LYNX_28G_LNaTRSTCTL_RST_DONE BIT(30)
+#define LYNX_28G_LNaTRSTCTL_RST_REQ BIT(31)
+
+/* Lane a Tx General Control Register */
+#define LYNX_28G_LNaTGCR0(lane) (0x800 + (lane) * 0x100 + 0x24)
+#define LYNX_28G_LNaTGCR0_USE_PLLF 0x0
+#define LYNX_28G_LNaTGCR0_USE_PLLS BIT(28)
+#define LYNX_28G_LNaTGCR0_USE_PLL_MSK BIT(28)
+#define LYNX_28G_LNaTGCR0_N_RATE_FULL 0x0
+#define LYNX_28G_LNaTGCR0_N_RATE_HALF 0x1000000
+#define LYNX_28G_LNaTGCR0_N_RATE_QUARTER 0x2000000
+#define LYNX_28G_LNaTGCR0_N_RATE_MSK GENMASK(26, 24)
+
+#define LYNX_28G_LNaTECR0(lane) (0x800 + (lane) * 0x100 + 0x30)
+
+/* Lane a Rx Reset Control Register */
+#define LYNX_28G_LNaRRSTCTL(lane) (0x800 + (lane) * 0x100 + 0x40)
+#define LYNX_28G_LNaRRSTCTL_HLT_REQ BIT(27)
+#define LYNX_28G_LNaRRSTCTL_RST_DONE BIT(30)
+#define LYNX_28G_LNaRRSTCTL_RST_REQ BIT(31)
+#define LYNX_28G_LNaRRSTCTL_CDR_LOCK BIT(12)
+
+/* Lane a Rx General Control Register */
+#define LYNX_28G_LNaRGCR0(lane) (0x800 + (lane) * 0x100 + 0x44)
+#define LYNX_28G_LNaRGCR0_USE_PLLF 0x0
+#define LYNX_28G_LNaRGCR0_USE_PLLS BIT(28)
+#define LYNX_28G_LNaRGCR0_USE_PLL_MSK BIT(28)
+#define LYNX_28G_LNaRGCR0_N_RATE_MSK GENMASK(26, 24)
+#define LYNX_28G_LNaRGCR0_N_RATE_FULL 0x0
+#define LYNX_28G_LNaRGCR0_N_RATE_HALF 0x1000000
+#define LYNX_28G_LNaRGCR0_N_RATE_QUARTER 0x2000000
+#define LYNX_28G_LNaRGCR0_N_RATE_MSK GENMASK(26, 24)
+
+#define LYNX_28G_LNaRGCR1(lane) (0x800 + (lane) * 0x100 + 0x48)
+
+#define LYNX_28G_LNaRECR0(lane) (0x800 + (lane) * 0x100 + 0x50)
+#define LYNX_28G_LNaRECR1(lane) (0x800 + (lane) * 0x100 + 0x54)
+#define LYNX_28G_LNaRECR2(lane) (0x800 + (lane) * 0x100 + 0x58)
+
+#define LYNX_28G_LNaRSCCR0(lane) (0x800 + (lane) * 0x100 + 0x74)
+
+#define LYNX_28G_LNaPSS(lane) (0x1000 + (lane) * 0x4)
+#define LYNX_28G_LNaPSS_TYPE(pss) (((pss) & GENMASK(30, 24)) >> 24)
+#define LYNX_28G_LNaPSS_TYPE_SGMII 0x4
+#define LYNX_28G_LNaPSS_TYPE_XFI 0x28
+
+#define LYNX_28G_SGMIIaCR1(lane) (0x1804 + (lane) * 0x10)
+#define LYNX_28G_SGMIIaCR1_SGPCS_EN BIT(11)
+#define LYNX_28G_SGMIIaCR1_SGPCS_DIS 0x0
+#define LYNX_28G_SGMIIaCR1_SGPCS_MSK BIT(11)
+
+struct lynx_28g_priv;
+
+struct lynx_28g_pll {
+ struct lynx_28g_priv *priv;
+ u32 rstctl, cr0, cr1;
+ int id;
+ DECLARE_PHY_INTERFACE_MASK(supported);
+};
+
+struct lynx_28g_lane {
+ struct lynx_28g_priv *priv;
+ struct phy *phy;
+ bool powered_up;
+ bool init;
+ unsigned int id;
+ phy_interface_t interface;
+};
+
+struct lynx_28g_priv {
+ void __iomem *base;
+ struct device *dev;
+ struct lynx_28g_pll pll[LYNX_28G_NUM_PLL];
+ struct lynx_28g_lane lane[LYNX_28G_NUM_LANE];
+
+ struct delayed_work cdr_check;
+};
+
+static void lynx_28g_rmw(struct lynx_28g_priv *priv, unsigned long off,
+ u32 val, u32 mask)
+{
+ void __iomem *reg = priv->base + off;
+ u32 orig, tmp;
+
+ orig = ioread32(reg);
+ tmp = orig & ~mask;
+ tmp |= val;
+ iowrite32(tmp, reg);
+}
+
+#define lynx_28g_lane_rmw(lane, reg, val, mask) \
+ lynx_28g_rmw((lane)->priv, LYNX_28G_##reg(lane->id), \
+ LYNX_28G_##reg##_##val, LYNX_28G_##reg##_##mask)
+#define lynx_28g_lane_read(lane, reg) \
+ ioread32((lane)->priv->base + LYNX_28G_##reg((lane)->id))
+#define lynx_28g_pll_read(pll, reg) \
+ ioread32((pll)->priv->base + LYNX_28G_##reg((pll)->id))
+
+static bool lynx_28g_supports_interface(struct lynx_28g_priv *priv, int intf)
+{
+ int i;
+
+ for (i = 0; i < LYNX_28G_NUM_PLL; i++) {
+ if (LYNX_28G_PLLnRSTCTL_DIS(priv->pll[i].rstctl))
+ continue;
+
+ if (test_bit(intf, priv->pll[i].supported))
+ return true;
+ }
+
+ return false;
+}
+
+static struct lynx_28g_pll *lynx_28g_pll_get(struct lynx_28g_priv *priv,
+ phy_interface_t intf)
+{
+ struct lynx_28g_pll *pll;
+ int i;
+
+ for (i = 0; i < LYNX_28G_NUM_PLL; i++) {
+ pll = &priv->pll[i];
+
+ if (LYNX_28G_PLLnRSTCTL_DIS(pll->rstctl))
+ continue;
+
+ if (test_bit(intf, pll->supported))
+ return pll;
+ }
+
+ return NULL;
+}
+
+static void lynx_28g_lane_set_nrate(struct lynx_28g_lane *lane,
+ struct lynx_28g_pll *pll,
+ phy_interface_t intf)
+{
+ switch (LYNX_28G_PLLnCR1_FRATE_SEL(pll->cr1)) {
+ case LYNX_28G_PLLnCR1_FRATE_5G_10GVCO:
+ case LYNX_28G_PLLnCR1_FRATE_5G_25GVCO:
+ switch (intf) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ lynx_28g_lane_rmw(lane, LNaTGCR0, N_RATE_QUARTER, N_RATE_MSK);
+ lynx_28g_lane_rmw(lane, LNaRGCR0, N_RATE_QUARTER, N_RATE_MSK);
+ break;
+ default:
+ break;
+ }
+ break;
+ case LYNX_28G_PLLnCR1_FRATE_10G_20GVCO:
+ switch (intf) {
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_USXGMII:
+ lynx_28g_lane_rmw(lane, LNaTGCR0, N_RATE_FULL, N_RATE_MSK);
+ lynx_28g_lane_rmw(lane, LNaRGCR0, N_RATE_FULL, N_RATE_MSK);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void lynx_28g_lane_set_pll(struct lynx_28g_lane *lane,
+ struct lynx_28g_pll *pll)
+{
+ if (pll->id == 0) {
+ lynx_28g_lane_rmw(lane, LNaTGCR0, USE_PLLF, USE_PLL_MSK);
+ lynx_28g_lane_rmw(lane, LNaRGCR0, USE_PLLF, USE_PLL_MSK);
+ } else {
+ lynx_28g_lane_rmw(lane, LNaTGCR0, USE_PLLS, USE_PLL_MSK);
+ lynx_28g_lane_rmw(lane, LNaRGCR0, USE_PLLS, USE_PLL_MSK);
+ }
+}
+
+static void lynx_28g_cleanup_lane(struct lynx_28g_lane *lane)
+{
+ u32 lane_offset = LYNX_28G_LNa_PCC_OFFSET(lane);
+ struct lynx_28g_priv *priv = lane->priv;
+
+ /* Cleanup the protocol configuration registers of the current protocol */
+ switch (lane->interface) {
+ case PHY_INTERFACE_MODE_10GBASER:
+ lynx_28g_rmw(priv, LYNX_28G_PCCC,
+ LYNX_28G_PCCC_SXGMII_DIS << lane_offset,
+ GENMASK(3, 0) << lane_offset);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ lynx_28g_rmw(priv, LYNX_28G_PCC8,
+ LYNX_28G_PCC8_SGMII_DIS << lane_offset,
+ GENMASK(3, 0) << lane_offset);
+ break;
+ default:
+ break;
+ }
+}
+
+static void lynx_28g_lane_set_sgmii(struct lynx_28g_lane *lane)
+{
+ u32 lane_offset = LYNX_28G_LNa_PCC_OFFSET(lane);
+ struct lynx_28g_priv *priv = lane->priv;
+ struct lynx_28g_pll *pll;
+
+ lynx_28g_cleanup_lane(lane);
+
+ /* Setup the lane to run in SGMII */
+ lynx_28g_rmw(priv, LYNX_28G_PCC8,
+ LYNX_28G_PCC8_SGMII << lane_offset,
+ GENMASK(3, 0) << lane_offset);
+
+ /* Setup the protocol select and SerDes parallel interface width */
+ lynx_28g_lane_rmw(lane, LNaGCR0, PROTO_SEL_SGMII, PROTO_SEL_MSK);
+ lynx_28g_lane_rmw(lane, LNaGCR0, IF_WIDTH_10_BIT, IF_WIDTH_MSK);
+
+ /* Switch to the PLL that works with this interface type */
+ pll = lynx_28g_pll_get(priv, PHY_INTERFACE_MODE_SGMII);
+ lynx_28g_lane_set_pll(lane, pll);
+
+ /* Choose the portion of clock net to be used on this lane */
+ lynx_28g_lane_set_nrate(lane, pll, PHY_INTERFACE_MODE_SGMII);
+
+ /* Enable the SGMII PCS */
+ lynx_28g_lane_rmw(lane, SGMIIaCR1, SGPCS_EN, SGPCS_MSK);
+
+ /* Configure the appropriate equalization parameters for the protocol */
+ iowrite32(0x00808006, priv->base + LYNX_28G_LNaTECR0(lane->id));
+ iowrite32(0x04310000, priv->base + LYNX_28G_LNaRGCR1(lane->id));
+ iowrite32(0x9f800000, priv->base + LYNX_28G_LNaRECR0(lane->id));
+ iowrite32(0x001f0000, priv->base + LYNX_28G_LNaRECR1(lane->id));
+ iowrite32(0x00000000, priv->base + LYNX_28G_LNaRECR2(lane->id));
+ iowrite32(0x00000000, priv->base + LYNX_28G_LNaRSCCR0(lane->id));
+}
+
+static void lynx_28g_lane_set_10gbaser(struct lynx_28g_lane *lane)
+{
+ u32 lane_offset = LYNX_28G_LNa_PCC_OFFSET(lane);
+ struct lynx_28g_priv *priv = lane->priv;
+ struct lynx_28g_pll *pll;
+
+ lynx_28g_cleanup_lane(lane);
+
+ /* Enable the SXGMII lane */
+ lynx_28g_rmw(priv, LYNX_28G_PCCC,
+ LYNX_28G_PCCC_10GBASER << lane_offset,
+ GENMASK(3, 0) << lane_offset);
+
+ /* Setup the protocol select and SerDes parallel interface width */
+ lynx_28g_lane_rmw(lane, LNaGCR0, PROTO_SEL_XFI, PROTO_SEL_MSK);
+ lynx_28g_lane_rmw(lane, LNaGCR0, IF_WIDTH_20_BIT, IF_WIDTH_MSK);
+
+ /* Switch to the PLL that works with this interface type */
+ pll = lynx_28g_pll_get(priv, PHY_INTERFACE_MODE_10GBASER);
+ lynx_28g_lane_set_pll(lane, pll);
+
+ /* Choose the portion of clock net to be used on this lane */
+ lynx_28g_lane_set_nrate(lane, pll, PHY_INTERFACE_MODE_10GBASER);
+
+ /* Disable the SGMII PCS */
+ lynx_28g_lane_rmw(lane, SGMIIaCR1, SGPCS_DIS, SGPCS_MSK);
+
+ /* Configure the appropriate equalization parameters for the protocol */
+ iowrite32(0x10808307, priv->base + LYNX_28G_LNaTECR0(lane->id));
+ iowrite32(0x10000000, priv->base + LYNX_28G_LNaRGCR1(lane->id));
+ iowrite32(0x00000000, priv->base + LYNX_28G_LNaRECR0(lane->id));
+ iowrite32(0x001f0000, priv->base + LYNX_28G_LNaRECR1(lane->id));
+ iowrite32(0x81000020, priv->base + LYNX_28G_LNaRECR2(lane->id));
+ iowrite32(0x00002000, priv->base + LYNX_28G_LNaRSCCR0(lane->id));
+}
+
+static int lynx_28g_power_off(struct phy *phy)
+{
+ struct lynx_28g_lane *lane = phy_get_drvdata(phy);
+ u32 trstctl, rrstctl;
+
+ if (!lane->powered_up)
+ return 0;
+
+ /* Issue a halt request */
+ lynx_28g_lane_rmw(lane, LNaTRSTCTL, HLT_REQ, HLT_REQ);
+ lynx_28g_lane_rmw(lane, LNaRRSTCTL, HLT_REQ, HLT_REQ);
+
+ /* Wait until the halting process is complete */
+ do {
+ trstctl = lynx_28g_lane_read(lane, LNaTRSTCTL);
+ rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
+ } while ((trstctl & LYNX_28G_LNaTRSTCTL_HLT_REQ) ||
+ (rrstctl & LYNX_28G_LNaRRSTCTL_HLT_REQ));
+
+ lane->powered_up = false;
+
+ return 0;
+}
+
+static int lynx_28g_power_on(struct phy *phy)
+{
+ struct lynx_28g_lane *lane = phy_get_drvdata(phy);
+ u32 trstctl, rrstctl;
+
+ if (lane->powered_up)
+ return 0;
+
+ /* Issue a reset request on the lane */
+ lynx_28g_lane_rmw(lane, LNaTRSTCTL, RST_REQ, RST_REQ);
+ lynx_28g_lane_rmw(lane, LNaRRSTCTL, RST_REQ, RST_REQ);
+
+ /* Wait until the reset sequence is completed */
+ do {
+ trstctl = lynx_28g_lane_read(lane, LNaTRSTCTL);
+ rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
+ } while (!(trstctl & LYNX_28G_LNaTRSTCTL_RST_DONE) ||
+ !(rrstctl & LYNX_28G_LNaRRSTCTL_RST_DONE));
+
+ lane->powered_up = true;
+
+ return 0;
+}
+
+static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+ struct lynx_28g_lane *lane = phy_get_drvdata(phy);
+ struct lynx_28g_priv *priv = lane->priv;
+ int powered_up = lane->powered_up;
+ int err = 0;
+
+ if (mode != PHY_MODE_ETHERNET)
+ return -EOPNOTSUPP;
+
+ if (lane->interface == PHY_INTERFACE_MODE_NA)
+ return -EOPNOTSUPP;
+
+ if (!lynx_28g_supports_interface(priv, submode))
+ return -EOPNOTSUPP;
+
+ /* If the lane is powered up, put the lane into the halt state while
+ * the reconfiguration is being done.
+ */
+ if (powered_up)
+ lynx_28g_power_off(phy);
+
+ switch (submode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ lynx_28g_lane_set_sgmii(lane);
+ break;
+ case PHY_INTERFACE_MODE_10GBASER:
+ lynx_28g_lane_set_10gbaser(lane);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ lane->interface = submode;
+
+out:
+ /* Power up the lane if necessary */
+ if (powered_up)
+ lynx_28g_power_on(phy);
+
+ return err;
+}
+
+static int lynx_28g_validate(struct phy *phy, enum phy_mode mode, int submode,
+ union phy_configure_opts *opts __always_unused)
+{
+ struct lynx_28g_lane *lane = phy_get_drvdata(phy);
+ struct lynx_28g_priv *priv = lane->priv;
+
+ if (mode != PHY_MODE_ETHERNET)
+ return -EOPNOTSUPP;
+
+ if (!lynx_28g_supports_interface(priv, submode))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int lynx_28g_init(struct phy *phy)
+{
+ struct lynx_28g_lane *lane = phy_get_drvdata(phy);
+
+ /* Mark the fact that the lane was init */
+ lane->init = true;
+
+ /* SerDes lanes are powered on at boot time. Any lane that is managed
+ * by this driver will get powered down at init time aka at dpaa2-eth
+ * probe time.
+ */
+ lane->powered_up = true;
+ lynx_28g_power_off(phy);
+
+ return 0;
+}
+
+static const struct phy_ops lynx_28g_ops = {
+ .init = lynx_28g_init,
+ .power_on = lynx_28g_power_on,
+ .power_off = lynx_28g_power_off,
+ .set_mode = lynx_28g_set_mode,
+ .validate = lynx_28g_validate,
+ .owner = THIS_MODULE,
+};
+
+static void lynx_28g_pll_read_configuration(struct lynx_28g_priv *priv)
+{
+ struct lynx_28g_pll *pll;
+ int i;
+
+ for (i = 0; i < LYNX_28G_NUM_PLL; i++) {
+ pll = &priv->pll[i];
+ pll->priv = priv;
+ pll->id = i;
+
+ pll->rstctl = lynx_28g_pll_read(pll, PLLnRSTCTL);
+ pll->cr0 = lynx_28g_pll_read(pll, PLLnCR0);
+ pll->cr1 = lynx_28g_pll_read(pll, PLLnCR1);
+
+ if (LYNX_28G_PLLnRSTCTL_DIS(pll->rstctl))
+ continue;
+
+ switch (LYNX_28G_PLLnCR1_FRATE_SEL(pll->cr1)) {
+ case LYNX_28G_PLLnCR1_FRATE_5G_10GVCO:
+ case LYNX_28G_PLLnCR1_FRATE_5G_25GVCO:
+ /* 5GHz clock net */
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, pll->supported);
+ __set_bit(PHY_INTERFACE_MODE_SGMII, pll->supported);
+ break;
+ case LYNX_28G_PLLnCR1_FRATE_10G_20GVCO:
+ /* 10.3125GHz clock net */
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, pll->supported);
+ break;
+ default:
+ /* 6GHz, 12.890625GHz, 8GHz */
+ break;
+ }
+ }
+}
+
+#define work_to_lynx(w) container_of((w), struct lynx_28g_priv, cdr_check.work)
+
+static void lynx_28g_cdr_lock_check(struct work_struct *work)
+{
+ struct lynx_28g_priv *priv = work_to_lynx(work);
+ struct lynx_28g_lane *lane;
+ u32 rrstctl;
+ int i;
+
+ for (i = 0; i < LYNX_28G_NUM_LANE; i++) {
+ lane = &priv->lane[i];
+
+ if (!lane->init)
+ continue;
+
+ if (!lane->powered_up)
+ continue;
+
+ rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
+ if (!(rrstctl & LYNX_28G_LNaRRSTCTL_CDR_LOCK)) {
+ lynx_28g_lane_rmw(lane, LNaRRSTCTL, RST_REQ, RST_REQ);
+ do {
+ rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
+ } while (!(rrstctl & LYNX_28G_LNaRRSTCTL_RST_DONE));
+ }
+ }
+ queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
+ msecs_to_jiffies(1000));
+}
+
+static void lynx_28g_lane_read_configuration(struct lynx_28g_lane *lane)
+{
+ u32 pss, protocol;
+
+ pss = lynx_28g_lane_read(lane, LNaPSS);
+ protocol = LYNX_28G_LNaPSS_TYPE(pss);
+ switch (protocol) {
+ case LYNX_28G_LNaPSS_TYPE_SGMII:
+ lane->interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+ case LYNX_28G_LNaPSS_TYPE_XFI:
+ lane->interface = PHY_INTERFACE_MODE_10GBASER;
+ break;
+ default:
+ lane->interface = PHY_INTERFACE_MODE_NA;
+ }
+}
+
+static struct phy *lynx_28g_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct lynx_28g_priv *priv = dev_get_drvdata(dev);
+ int idx = args->args[0];
+
+ if (WARN_ON(idx >= LYNX_28G_NUM_LANE))
+ return ERR_PTR(-EINVAL);
+
+ return priv->lane[idx].phy;
+}
+
+static int lynx_28g_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy_provider *provider;
+ struct lynx_28g_priv *priv;
+ int i;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = &pdev->dev;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ lynx_28g_pll_read_configuration(priv);
+
+ for (i = 0; i < LYNX_28G_NUM_LANE; i++) {
+ struct lynx_28g_lane *lane = &priv->lane[i];
+ struct phy *phy;
+
+ memset(lane, 0, sizeof(*lane));
+
+ phy = devm_phy_create(&pdev->dev, NULL, &lynx_28g_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ lane->priv = priv;
+ lane->phy = phy;
+ lane->id = i;
+ phy_set_drvdata(phy, lane);
+ lynx_28g_lane_read_configuration(lane);
+ }
+
+ dev_set_drvdata(dev, priv);
+
+ INIT_DELAYED_WORK(&priv->cdr_check, lynx_28g_cdr_lock_check);
+
+ queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
+ msecs_to_jiffies(1000));
+
+ dev_set_drvdata(&pdev->dev, priv);
+ provider = devm_of_phy_provider_register(&pdev->dev, lynx_28g_xlate);
+
+ return PTR_ERR_OR_ZERO(provider);
+}
+
+static const struct of_device_id lynx_28g_of_match_table[] = {
+ { .compatible = "fsl,lynx-28g" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lynx_28g_of_match_table);
+
+static struct platform_driver lynx_28g_driver = {
+ .probe = lynx_28g_probe,
+ .driver = {
+ .name = "lynx-28g",
+ .of_match_table = lynx_28g_of_match_table,
+ },
+};
+module_platform_driver(lynx_28g_driver);
+
+MODULE_AUTHOR("Ioana Ciornei <ioana.ciornei@nxp.com>");
+MODULE_DESCRIPTION("Lynx 28G SerDes PHY driver for Layerscape SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 4d81908d6725..ba536fd4d674 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -78,7 +78,6 @@ struct npcm7xx_gpio {
struct gpio_chip gc;
int irqbase;
int irq;
- void *priv;
struct irq_chip irq_chip;
u32 pinctrl_id;
int (*direction_input)(struct gpio_chip *chip, unsigned offset);
@@ -226,7 +225,7 @@ static void npcmgpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(chip, desc);
sts = ioread32(bank->base + NPCM7XX_GP_N_EVST);
en = ioread32(bank->base + NPCM7XX_GP_N_EVEN);
- dev_dbg(chip->parent_device, "==> got irq sts %.8x %.8x\n", sts,
+ dev_dbg(bank->gc.parent, "==> got irq sts %.8x %.8x\n", sts,
en);
sts &= en;
@@ -241,33 +240,33 @@ static int npcmgpio_set_irq_type(struct irq_data *d, unsigned int type)
gpiochip_get_data(irq_data_get_irq_chip_data(d));
unsigned int gpio = BIT(d->hwirq);
- dev_dbg(d->chip->parent_device, "setirqtype: %u.%u = %u\n", gpio,
+ dev_dbg(bank->gc.parent, "setirqtype: %u.%u = %u\n", gpio,
d->irq, type);
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- dev_dbg(d->chip->parent_device, "edge.rising\n");
+ dev_dbg(bank->gc.parent, "edge.rising\n");
npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
break;
case IRQ_TYPE_EDGE_FALLING:
- dev_dbg(d->chip->parent_device, "edge.falling\n");
+ dev_dbg(bank->gc.parent, "edge.falling\n");
npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
break;
case IRQ_TYPE_EDGE_BOTH:
- dev_dbg(d->chip->parent_device, "edge.both\n");
+ dev_dbg(bank->gc.parent, "edge.both\n");
npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
break;
case IRQ_TYPE_LEVEL_LOW:
- dev_dbg(d->chip->parent_device, "level.low\n");
+ dev_dbg(bank->gc.parent, "level.low\n");
npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
break;
case IRQ_TYPE_LEVEL_HIGH:
- dev_dbg(d->chip->parent_device, "level.high\n");
+ dev_dbg(bank->gc.parent, "level.high\n");
npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
break;
default:
- dev_dbg(d->chip->parent_device, "invalid irq type\n");
+ dev_dbg(bank->gc.parent, "invalid irq type\n");
return -EINVAL;
}
@@ -289,7 +288,7 @@ static void npcmgpio_irq_ack(struct irq_data *d)
gpiochip_get_data(irq_data_get_irq_chip_data(d));
unsigned int gpio = d->hwirq;
- dev_dbg(d->chip->parent_device, "irq_ack: %u.%u\n", gpio, d->irq);
+ dev_dbg(bank->gc.parent, "irq_ack: %u.%u\n", gpio, d->irq);
iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVST);
}
@@ -301,7 +300,7 @@ static void npcmgpio_irq_mask(struct irq_data *d)
unsigned int gpio = d->hwirq;
/* Clear events */
- dev_dbg(d->chip->parent_device, "irq_mask: %u.%u\n", gpio, d->irq);
+ dev_dbg(bank->gc.parent, "irq_mask: %u.%u\n", gpio, d->irq);
iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENC);
}
@@ -313,7 +312,7 @@ static void npcmgpio_irq_unmask(struct irq_data *d)
unsigned int gpio = d->hwirq;
/* Enable events */
- dev_dbg(d->chip->parent_device, "irq_unmask: %u.%u\n", gpio, d->irq);
+ dev_dbg(bank->gc.parent, "irq_unmask: %u.%u\n", gpio, d->irq);
iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENS);
}
@@ -323,7 +322,7 @@ static unsigned int npcmgpio_irq_startup(struct irq_data *d)
unsigned int gpio = d->hwirq;
/* active-high, input, clear interrupt, enable interrupt */
- dev_dbg(d->chip->parent_device, "startup: %u.%u\n", gpio, d->irq);
+ dev_dbg(gc->parent, "startup: %u.%u\n", gpio, d->irq);
npcmgpio_direction_input(gc, gpio);
npcmgpio_irq_ack(d);
npcmgpio_irq_unmask(d);
diff --git a/drivers/pinctrl/pinctrl-starfive.c b/drivers/pinctrl/pinctrl-starfive.c
index 266da41a6162..ab4b2ee9f217 100644
--- a/drivers/pinctrl/pinctrl-starfive.c
+++ b/drivers/pinctrl/pinctrl-starfive.c
@@ -1308,8 +1308,6 @@ static int starfive_probe(struct platform_device *pdev)
sfp->gc.base = -1;
sfp->gc.ngpio = NR_GPIOS;
- starfive_irq_chip.parent_device = dev;
-
sfp->gc.irq.chip = &starfive_irq_chip;
sfp->gc.irq.parent_handler = starfive_gpio_irq_handler;
sfp->gc.irq.num_parents = 1;
@@ -1330,6 +1328,8 @@ static int starfive_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "could not register gpiochip\n");
+ irq_domain_set_pm_device(sfp->gc.irq.domain, dev);
+
out_pinctrl_enable:
return pinctrl_enable(sfp->pctl);
}
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
index 2e490e7696f4..4102ce955bd7 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -585,13 +585,11 @@ static const struct samsung_pin_ctrl exynos850_pin_ctrl[] __initconst = {
/* pin-controller instance 0 ALIVE data */
.pin_banks = exynos850_pin_banks0,
.nr_banks = ARRAY_SIZE(exynos850_pin_banks0),
- .eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
}, {
/* pin-controller instance 1 CMGP data */
.pin_banks = exynos850_pin_banks1,
.nr_banks = ARRAY_SIZE(exynos850_pin_banks1),
- .eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
}, {
/* pin-controller instance 2 AUD data */
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 0489c899b401..a158d587814e 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -465,6 +465,10 @@ static const struct of_device_id exynos_wkup_irq_ids[] = {
.data = &exynos4210_wkup_irq_chip },
{ .compatible = "samsung,exynos7-wakeup-eint",
.data = &exynos7_wkup_irq_chip },
+ { .compatible = "samsung,exynos850-wakeup-eint",
+ .data = &exynos7_wkup_irq_chip },
+ { .compatible = "samsung,exynosautov9-wakeup-eint",
+ .data = &exynos7_wkup_irq_chip },
{ }
};
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 0f6e9305fec5..568b6e65dfed 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -1002,13 +1002,66 @@ samsung_pinctrl_get_soc_data_for_of_alias(struct platform_device *pdev)
return &(of_data->ctrl[id]);
}
+static void samsung_banks_of_node_put(struct samsung_pinctrl_drv_data *d)
+{
+ struct samsung_pin_bank *bank;
+ unsigned int i;
+
+ bank = d->pin_banks;
+ for (i = 0; i < d->nr_banks; ++i, ++bank)
+ of_node_put(bank->of_node);
+}
+
+/*
+ * Iterate over all driver pin banks to find one matching the name of node,
+ * skipping optional "-gpio" node suffix. When found, assign node to the bank.
+ */
+static void samsung_banks_of_node_get(struct device *dev,
+ struct samsung_pinctrl_drv_data *d,
+ struct device_node *node)
+{
+ const char *suffix = "-gpio-bank";
+ struct samsung_pin_bank *bank;
+ struct device_node *child;
+ /* Pin bank names are up to 4 characters */
+ char node_name[20];
+ unsigned int i;
+ size_t len;
+
+ bank = d->pin_banks;
+ for (i = 0; i < d->nr_banks; ++i, ++bank) {
+ strscpy(node_name, bank->name, sizeof(node_name));
+ len = strlcat(node_name, suffix, sizeof(node_name));
+ if (len >= sizeof(node_name)) {
+ dev_err(dev, "Too long pin bank name '%s', ignoring\n",
+ bank->name);
+ continue;
+ }
+
+ for_each_child_of_node(node, child) {
+ if (!of_find_property(child, "gpio-controller", NULL))
+ continue;
+ if (of_node_name_eq(child, node_name))
+ break;
+ else if (of_node_name_eq(child, bank->name))
+ break;
+ }
+
+ if (child)
+ bank->of_node = child;
+ else
+ dev_warn(dev, "Missing node for bank %s - invalid DTB\n",
+ bank->name);
+ /* child reference dropped in samsung_drop_banks_of_node() */
+ }
+}
+
/* retrieve the soc specific data */
static const struct samsung_pin_ctrl *
samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
- struct device_node *np;
const struct samsung_pin_bank_data *bdata;
const struct samsung_pin_ctrl *ctrl;
struct samsung_pin_bank *bank;
@@ -1072,17 +1125,7 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
*/
d->virt_base = virt_base[0];
- for_each_child_of_node(node, np) {
- if (!of_find_property(np, "gpio-controller", NULL))
- continue;
- bank = d->pin_banks;
- for (i = 0; i < d->nr_banks; ++i, ++bank) {
- if (of_node_name_eq(np, bank->name)) {
- bank->of_node = np;
- break;
- }
- }
- }
+ samsung_banks_of_node_get(&pdev->dev, d, node);
d->pin_base = pin_base;
pin_base += d->nr_pins;
@@ -1117,19 +1160,19 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
if (ctrl->retention_data) {
drvdata->retention_ctrl = ctrl->retention_data->init(drvdata,
ctrl->retention_data);
- if (IS_ERR(drvdata->retention_ctrl))
- return PTR_ERR(drvdata->retention_ctrl);
+ if (IS_ERR(drvdata->retention_ctrl)) {
+ ret = PTR_ERR(drvdata->retention_ctrl);
+ goto err_put_banks;
+ }
}
ret = samsung_pinctrl_register(pdev, drvdata);
if (ret)
- return ret;
+ goto err_put_banks;
ret = samsung_gpiolib_register(pdev, drvdata);
- if (ret) {
- samsung_pinctrl_unregister(pdev, drvdata);
- return ret;
- }
+ if (ret)
+ goto err_unregister;
if (ctrl->eint_gpio_init)
ctrl->eint_gpio_init(drvdata);
@@ -1139,6 +1182,12 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drvdata);
return 0;
+
+err_unregister:
+ samsung_pinctrl_unregister(pdev, drvdata);
+err_put_banks:
+ samsung_banks_of_node_put(drvdata);
+ return ret;
}
/*
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 80d6750c74a6..1f401377ff60 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -36,6 +36,13 @@
#include "../core.h"
#include "pinctrl-sunxi.h"
+/*
+ * These lock classes tell lockdep that GPIO IRQs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key sunxi_pinctrl_irq_lock_class;
+static struct lock_class_key sunxi_pinctrl_irq_request_class;
+
static struct irq_chip sunxi_pinctrl_edge_irq_chip;
static struct irq_chip sunxi_pinctrl_level_irq_chip;
@@ -837,7 +844,8 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
{
struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
- return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, true);
+ return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL,
+ chip->base + offset, true);
}
static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -890,7 +898,8 @@ static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip,
struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
sunxi_pinctrl_gpio_set(chip, offset, value);
- return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, false);
+ return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL,
+ chip->base + offset, false);
}
static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
@@ -1555,6 +1564,8 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
for (i = 0; i < (pctl->desc->irq_banks * IRQ_PER_BANK); i++) {
int irqno = irq_create_mapping(pctl->domain, i);
+ irq_set_lockdep_class(irqno, &sunxi_pinctrl_irq_lock_class,
+ &sunxi_pinctrl_irq_request_class);
irq_set_chip_and_handler(irqno, &sunxi_pinctrl_edge_irq_chip,
handle_edge_irq);
irq_set_chip_data(irqno, pctl);
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index fc5aa1525d13..d49a4efe46c8 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -302,13 +302,11 @@ EXPORT_SYMBOL(cros_ec_register);
*
* Return: 0 on success or negative error code.
*/
-int cros_ec_unregister(struct cros_ec_device *ec_dev)
+void cros_ec_unregister(struct cros_ec_device *ec_dev)
{
if (ec_dev->pd)
platform_device_unregister(ec_dev->pd);
platform_device_unregister(ec_dev->ec);
-
- return 0;
}
EXPORT_SYMBOL(cros_ec_unregister);
diff --git a/drivers/platform/chrome/cros_ec.h b/drivers/platform/chrome/cros_ec.h
index 78363dcfdf23..bbca0096868a 100644
--- a/drivers/platform/chrome/cros_ec.h
+++ b/drivers/platform/chrome/cros_ec.h
@@ -11,7 +11,7 @@
#include <linux/interrupt.h>
int cros_ec_register(struct cros_ec_device *ec_dev);
-int cros_ec_unregister(struct cros_ec_device *ec_dev);
+void cros_ec_unregister(struct cros_ec_device *ec_dev);
int cros_ec_suspend(struct cros_ec_device *ec_dev);
int cros_ec_resume(struct cros_ec_device *ec_dev);
diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c
index 30c8938c27d5..22feb0fd4ce7 100644
--- a/drivers/platform/chrome/cros_ec_i2c.c
+++ b/drivers/platform/chrome/cros_ec_i2c.c
@@ -313,7 +313,9 @@ static int cros_ec_i2c_remove(struct i2c_client *client)
{
struct cros_ec_device *ec_dev = i2c_get_clientdata(client);
- return cros_ec_unregister(ec_dev);
+ cros_ec_unregister(ec_dev);
+
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index d6306d2a096f..7651417b4a25 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -439,7 +439,9 @@ static int cros_ec_lpc_remove(struct platform_device *pdev)
acpi_remove_notify_handler(adev->handle, ACPI_ALL_NOTIFY,
cros_ec_lpc_acpi_notify);
- return cros_ec_unregister(ec_dev);
+ cros_ec_unregister(ec_dev);
+
+ return 0;
}
static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = {
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index 14c4046fa04d..8493af0f680e 100644
--- a/drivers/platform/chrome/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -786,11 +786,11 @@ static int cros_ec_spi_probe(struct spi_device *spi)
return 0;
}
-static int cros_ec_spi_remove(struct spi_device *spi)
+static void cros_ec_spi_remove(struct spi_device *spi)
{
struct cros_ec_device *ec_dev = spi_get_drvdata(spi);
- return cros_ec_unregister(ec_dev);
+ cros_ec_unregister(ec_dev);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c
index 0d46706afd2d..4823bd2819f6 100644
--- a/drivers/platform/olpc/olpc-xo175-ec.c
+++ b/drivers/platform/olpc/olpc-xo175-ec.c
@@ -648,7 +648,7 @@ static struct olpc_ec_driver olpc_xo175_ec_driver = {
.ec_cmd = olpc_xo175_ec_cmd,
};
-static int olpc_xo175_ec_remove(struct spi_device *spi)
+static void olpc_xo175_ec_remove(struct spi_device *spi)
{
if (pm_power_off == olpc_xo175_ec_power_off)
pm_power_off = NULL;
@@ -657,8 +657,6 @@ static int olpc_xo175_ec_remove(struct spi_device *spi)
platform_device_unregister(olpc_ec);
olpc_ec = NULL;
-
- return 0;
}
static int olpc_xo175_ec_probe(struct spi_device *spi)
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 24deeeb29af2..8d1eec208854 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -990,16 +990,16 @@ config TOPSTAR_LAPTOP
If you have a Topstar laptop, say Y or M here.
-config I2C_MULTI_INSTANTIATE
- tristate "I2C multi instantiate pseudo device driver"
- depends on I2C && ACPI
+config SERIAL_MULTI_INSTANTIATE
+ tristate "Serial bus multi instantiate pseudo device driver"
+ depends on I2C && SPI && ACPI
help
- Some ACPI-based systems list multiple i2c-devices in a single ACPI
- firmware-node. This driver will instantiate separate i2c-clients
+ Some ACPI-based systems list multiple devices in a single ACPI
+ firmware-node. This driver will instantiate separate clients
for each device in the firmware-node.
To compile this driver as a module, choose M here: the module
- will be called i2c-multi-instantiate.
+ will be called serial-multi-instantiate.
config MLX_PLATFORM
tristate "Mellanox Technologies platform support"
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index c12a9b044fd8..9527088bba7f 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -110,7 +110,7 @@ obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
# Platform drivers
obj-$(CONFIG_FW_ATTR_CLASS) += firmware_attributes_class.o
-obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o
+obj-$(CONFIG_SERIAL_MULTI_INSTANTIATE) += serial-multi-instantiate.o
obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o
obj-$(CONFIG_WIRELESS_HOTKEY) += wireless-hotkey.o
diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
deleted file mode 100644
index 4956a1df5b90..000000000000
--- a/drivers/platform/x86/i2c-multi-instantiate.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * I2C multi-instantiate driver, pseudo driver to instantiate multiple
- * i2c-clients from a single fwnode.
- *
- * Copyright 2018 Hans de Goede <hdegoede@redhat.com>
- */
-
-#include <linux/acpi.h>
-#include <linux/bits.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/property.h>
-#include <linux/types.h>
-
-#define IRQ_RESOURCE_TYPE GENMASK(1, 0)
-#define IRQ_RESOURCE_NONE 0
-#define IRQ_RESOURCE_GPIO 1
-#define IRQ_RESOURCE_APIC 2
-
-struct i2c_inst_data {
- const char *type;
- unsigned int flags;
- int irq_idx;
-};
-
-struct i2c_multi_inst_data {
- int num_clients;
- struct i2c_client *clients[];
-};
-
-static int i2c_multi_inst_probe(struct platform_device *pdev)
-{
- struct i2c_multi_inst_data *multi;
- const struct i2c_inst_data *inst_data;
- struct i2c_board_info board_info = {};
- struct device *dev = &pdev->dev;
- struct acpi_device *adev;
- char name[32];
- int i, ret;
-
- inst_data = device_get_match_data(dev);
- if (!inst_data) {
- dev_err(dev, "Error ACPI match data is missing\n");
- return -ENODEV;
- }
-
- adev = ACPI_COMPANION(dev);
-
- /* Count number of clients to instantiate */
- ret = i2c_acpi_client_count(adev);
- if (ret < 0)
- return ret;
-
- multi = devm_kmalloc(dev, struct_size(multi, clients, ret), GFP_KERNEL);
- if (!multi)
- return -ENOMEM;
-
- multi->num_clients = ret;
-
- for (i = 0; i < multi->num_clients && inst_data[i].type; i++) {
- memset(&board_info, 0, sizeof(board_info));
- strlcpy(board_info.type, inst_data[i].type, I2C_NAME_SIZE);
- snprintf(name, sizeof(name), "%s-%s.%d", dev_name(dev),
- inst_data[i].type, i);
- board_info.dev_name = name;
- switch (inst_data[i].flags & IRQ_RESOURCE_TYPE) {
- case IRQ_RESOURCE_GPIO:
- ret = acpi_dev_gpio_irq_get(adev, inst_data[i].irq_idx);
- if (ret < 0) {
- dev_err(dev, "Error requesting irq at index %d: %d\n",
- inst_data[i].irq_idx, ret);
- goto error;
- }
- board_info.irq = ret;
- break;
- case IRQ_RESOURCE_APIC:
- ret = platform_get_irq(pdev, inst_data[i].irq_idx);
- if (ret < 0) {
- dev_dbg(dev, "Error requesting irq at index %d: %d\n",
- inst_data[i].irq_idx, ret);
- goto error;
- }
- board_info.irq = ret;
- break;
- default:
- board_info.irq = 0;
- break;
- }
- multi->clients[i] = i2c_acpi_new_device(dev, i, &board_info);
- if (IS_ERR(multi->clients[i])) {
- ret = dev_err_probe(dev, PTR_ERR(multi->clients[i]),
- "Error creating i2c-client, idx %d\n", i);
- goto error;
- }
- }
- if (i < multi->num_clients) {
- dev_err(dev, "Error finding driver, idx %d\n", i);
- ret = -ENODEV;
- goto error;
- }
-
- platform_set_drvdata(pdev, multi);
- return 0;
-
-error:
- while (--i >= 0)
- i2c_unregister_device(multi->clients[i]);
-
- return ret;
-}
-
-static int i2c_multi_inst_remove(struct platform_device *pdev)
-{
- struct i2c_multi_inst_data *multi = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < multi->num_clients; i++)
- i2c_unregister_device(multi->clients[i]);
-
- return 0;
-}
-
-static const struct i2c_inst_data bsg1160_data[] = {
- { "bmc150_accel", IRQ_RESOURCE_GPIO, 0 },
- { "bmc150_magn" },
- { "bmg160" },
- {}
-};
-
-static const struct i2c_inst_data bsg2150_data[] = {
- { "bmc150_accel", IRQ_RESOURCE_GPIO, 0 },
- { "bmc150_magn" },
- /* The resources describe a 3th client, but it is not really there. */
- { "bsg2150_dummy_dev" },
- {}
-};
-
-static const struct i2c_inst_data int3515_data[] = {
- { "tps6598x", IRQ_RESOURCE_APIC, 0 },
- { "tps6598x", IRQ_RESOURCE_APIC, 1 },
- { "tps6598x", IRQ_RESOURCE_APIC, 2 },
- { "tps6598x", IRQ_RESOURCE_APIC, 3 },
- {}
-};
-
-/*
- * Note new device-ids must also be added to i2c_multi_instantiate_ids in
- * drivers/acpi/scan.c: acpi_device_enumeration_by_parent().
- */
-static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = {
- { "BSG1160", (unsigned long)bsg1160_data },
- { "BSG2150", (unsigned long)bsg2150_data },
- { "INT3515", (unsigned long)int3515_data },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, i2c_multi_inst_acpi_ids);
-
-static struct platform_driver i2c_multi_inst_driver = {
- .driver = {
- .name = "I2C multi instantiate pseudo device driver",
- .acpi_match_table = i2c_multi_inst_acpi_ids,
- },
- .probe = i2c_multi_inst_probe,
- .remove = i2c_multi_inst_remove,
-};
-module_platform_driver(i2c_multi_inst_driver);
-
-MODULE_DESCRIPTION("I2C multi instantiate pseudo device driver");
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
new file mode 100644
index 000000000000..1e8063b7c169
--- /dev/null
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Serial multi-instantiate driver, pseudo driver to instantiate multiple
+ * client devices from a single fwnode.
+ *
+ * Copyright 2018 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#define IRQ_RESOURCE_TYPE GENMASK(1, 0)
+#define IRQ_RESOURCE_NONE 0
+#define IRQ_RESOURCE_GPIO 1
+#define IRQ_RESOURCE_APIC 2
+
+enum smi_bus_type {
+ SMI_I2C,
+ SMI_SPI,
+ SMI_AUTO_DETECT,
+};
+
+struct smi_instance {
+ const char *type;
+ unsigned int flags;
+ int irq_idx;
+};
+
+struct smi_node {
+ enum smi_bus_type bus_type;
+ struct smi_instance instances[];
+};
+
+struct smi {
+ int i2c_num;
+ int spi_num;
+ struct i2c_client **i2c_devs;
+ struct spi_device **spi_devs;
+};
+
+static int smi_get_irq(struct platform_device *pdev, struct acpi_device *adev,
+ const struct smi_instance *inst)
+{
+ int ret;
+
+ switch (inst->flags & IRQ_RESOURCE_TYPE) {
+ case IRQ_RESOURCE_GPIO:
+ ret = acpi_dev_gpio_irq_get(adev, inst->irq_idx);
+ break;
+ case IRQ_RESOURCE_APIC:
+ ret = platform_get_irq(pdev, inst->irq_idx);
+ break;
+ default:
+ return 0;
+ }
+
+ if (ret < 0)
+ dev_err_probe(&pdev->dev, ret, "Error requesting irq at index %d: %d\n",
+ inst->irq_idx, ret);
+
+ return ret;
+}
+
+static void smi_devs_unregister(struct smi *smi)
+{
+ while (smi->i2c_num > 0)
+ i2c_unregister_device(smi->i2c_devs[--smi->i2c_num]);
+
+ while (smi->spi_num > 0)
+ spi_unregister_device(smi->spi_devs[--smi->spi_num]);
+}
+
+/**
+ * smi_spi_probe - Instantiate multiple SPI devices from inst array
+ * @pdev: Platform device
+ * @adev: ACPI device
+ * @smi: Internal struct for Serial multi instantiate driver
+ * @inst_array: Array of instances to probe
+ *
+ * Returns the number of SPI devices instantiate, Zero if none is found or a negative error code.
+ */
+static int smi_spi_probe(struct platform_device *pdev, struct acpi_device *adev, struct smi *smi,
+ const struct smi_instance *inst_array)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctlr;
+ struct spi_device *spi_dev;
+ char name[50];
+ int i, ret, count;
+
+ ret = acpi_spi_count_resources(adev);
+ if (ret < 0)
+ return ret;
+ else if (!ret)
+ return -ENODEV;
+
+ count = ret;
+
+ smi->spi_devs = devm_kcalloc(dev, count, sizeof(*smi->spi_devs), GFP_KERNEL);
+ if (!smi->spi_devs)
+ return -ENOMEM;
+
+ for (i = 0; i < count && inst_array[i].type; i++) {
+
+ spi_dev = acpi_spi_device_alloc(NULL, adev, i);
+ if (IS_ERR(spi_dev)) {
+ ret = PTR_ERR(spi_dev);
+ dev_err_probe(dev, ret, "failed to allocate SPI device %s from ACPI: %d\n",
+ dev_name(&adev->dev), ret);
+ goto error;
+ }
+
+ ctlr = spi_dev->controller;
+
+ strscpy(spi_dev->modalias, inst_array[i].type, sizeof(spi_dev->modalias));
+
+ ret = smi_get_irq(pdev, adev, &inst_array[i]);
+ if (ret < 0) {
+ spi_dev_put(spi_dev);
+ goto error;
+ }
+ spi_dev->irq = ret;
+
+ snprintf(name, sizeof(name), "%s-%s-%s.%d", dev_name(&ctlr->dev), dev_name(dev),
+ inst_array[i].type, i);
+ spi_dev->dev.init_name = name;
+
+ ret = spi_add_device(spi_dev);
+ if (ret) {
+ dev_err_probe(&ctlr->dev, ret,
+ "failed to add SPI device %s from ACPI: %d\n",
+ dev_name(&adev->dev), ret);
+ spi_dev_put(spi_dev);
+ goto error;
+ }
+
+ dev_dbg(dev, "SPI device %s using chip select %u", name, spi_dev->chip_select);
+
+ smi->spi_devs[i] = spi_dev;
+ smi->spi_num++;
+ }
+
+ if (smi->spi_num < count) {
+ dev_dbg(dev, "Error finding driver, idx %d\n", i);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ dev_info(dev, "Instantiated %d SPI devices.\n", smi->spi_num);
+
+ return 0;
+error:
+ smi_devs_unregister(smi);
+
+ return ret;
+}
+
+/**
+ * smi_i2c_probe - Instantiate multiple I2C devices from inst array
+ * @pdev: Platform device
+ * @adev: ACPI device
+ * @smi: Internal struct for Serial multi instantiate driver
+ * @inst_array: Array of instances to probe
+ *
+ * Returns the number of I2C devices instantiate, Zero if none is found or a negative error code.
+ */
+static int smi_i2c_probe(struct platform_device *pdev, struct acpi_device *adev, struct smi *smi,
+ const struct smi_instance *inst_array)
+{
+ struct i2c_board_info board_info = {};
+ struct device *dev = &pdev->dev;
+ char name[32];
+ int i, ret, count;
+
+ ret = i2c_acpi_client_count(adev);
+ if (ret < 0)
+ return ret;
+ else if (!ret)
+ return -ENODEV;
+
+ count = ret;
+
+ smi->i2c_devs = devm_kcalloc(dev, count, sizeof(*smi->i2c_devs), GFP_KERNEL);
+ if (!smi->i2c_devs)
+ return -ENOMEM;
+
+ for (i = 0; i < count && inst_array[i].type; i++) {
+ memset(&board_info, 0, sizeof(board_info));
+ strscpy(board_info.type, inst_array[i].type, I2C_NAME_SIZE);
+ snprintf(name, sizeof(name), "%s-%s.%d", dev_name(dev), inst_array[i].type, i);
+ board_info.dev_name = name;
+
+ ret = smi_get_irq(pdev, adev, &inst_array[i]);
+ if (ret < 0)
+ goto error;
+ board_info.irq = ret;
+
+ smi->i2c_devs[i] = i2c_acpi_new_device(dev, i, &board_info);
+ if (IS_ERR(smi->i2c_devs[i])) {
+ ret = dev_err_probe(dev, PTR_ERR(smi->i2c_devs[i]),
+ "Error creating i2c-client, idx %d\n", i);
+ goto error;
+ }
+ smi->i2c_num++;
+ }
+ if (smi->i2c_num < count) {
+ dev_dbg(dev, "Error finding driver, idx %d\n", i);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ dev_info(dev, "Instantiated %d I2C devices.\n", smi->i2c_num);
+
+ return 0;
+error:
+ smi_devs_unregister(smi);
+
+ return ret;
+}
+
+static int smi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct smi_node *node;
+ struct acpi_device *adev;
+ struct smi *smi;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -ENODEV;
+
+ node = device_get_match_data(dev);
+ if (!node) {
+ dev_dbg(dev, "Error ACPI match data is missing\n");
+ return -ENODEV;
+ }
+
+ smi = devm_kzalloc(dev, sizeof(*smi), GFP_KERNEL);
+ if (!smi)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, smi);
+
+ switch (node->bus_type) {
+ case SMI_I2C:
+ return smi_i2c_probe(pdev, adev, smi, node->instances);
+ case SMI_SPI:
+ return smi_spi_probe(pdev, adev, smi, node->instances);
+ case SMI_AUTO_DETECT:
+ if (i2c_acpi_client_count(adev) > 0)
+ return smi_i2c_probe(pdev, adev, smi, node->instances);
+ else
+ return smi_spi_probe(pdev, adev, smi, node->instances);
+ default:
+ return -EINVAL;
+ }
+
+ return 0; /* never reached */
+}
+
+static int smi_remove(struct platform_device *pdev)
+{
+ struct smi *smi = platform_get_drvdata(pdev);
+
+ smi_devs_unregister(smi);
+
+ return 0;
+}
+
+static const struct smi_node bsg1160_data = {
+ .instances = {
+ { "bmc150_accel", IRQ_RESOURCE_GPIO, 0 },
+ { "bmc150_magn" },
+ { "bmg160" },
+ {}
+ },
+ .bus_type = SMI_I2C,
+};
+
+static const struct smi_node bsg2150_data = {
+ .instances = {
+ { "bmc150_accel", IRQ_RESOURCE_GPIO, 0 },
+ { "bmc150_magn" },
+ /* The resources describe a 3th client, but it is not really there. */
+ { "bsg2150_dummy_dev" },
+ {}
+ },
+ .bus_type = SMI_I2C,
+};
+
+static const struct smi_node int3515_data = {
+ .instances = {
+ { "tps6598x", IRQ_RESOURCE_APIC, 0 },
+ { "tps6598x", IRQ_RESOURCE_APIC, 1 },
+ { "tps6598x", IRQ_RESOURCE_APIC, 2 },
+ { "tps6598x", IRQ_RESOURCE_APIC, 3 },
+ {}
+ },
+ .bus_type = SMI_I2C,
+};
+
+static const struct smi_node cs35l41_hda = {
+ .instances = {
+ { "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 },
+ { "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 },
+ { "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 },
+ { "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 },
+ {}
+ },
+ .bus_type = SMI_AUTO_DETECT,
+};
+
+/*
+ * Note new device-ids must also be added to ignore_serial_bus_ids in
+ * drivers/acpi/scan.c: acpi_device_enumeration_by_parent().
+ */
+static const struct acpi_device_id smi_acpi_ids[] = {
+ { "BSG1160", (unsigned long)&bsg1160_data },
+ { "BSG2150", (unsigned long)&bsg2150_data },
+ { "INT3515", (unsigned long)&int3515_data },
+ { "CSC3551", (unsigned long)&cs35l41_hda },
+ /* Non-conforming _HID for Cirrus Logic already released */
+ { "CLSA0100", (unsigned long)&cs35l41_hda },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, smi_acpi_ids);
+
+static struct platform_driver smi_driver = {
+ .driver = {
+ .name = "Serial bus multi instantiate pseudo device driver",
+ .acpi_match_table = smi_acpi_ids,
+ },
+ .probe = smi_probe,
+ .remove = smi_remove,
+};
+module_platform_driver(smi_driver);
+
+MODULE_DESCRIPTION("Serial multi instantiate pseudo device driver");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index cc6757dfa3f1..c02e7bf643a6 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -171,7 +171,7 @@ static int __pnp_bus_suspend(struct device *dev, pm_message_t state)
if (pnp_drv->driver.pm && pnp_drv->driver.pm->suspend) {
error = pnp_drv->driver.pm->suspend(dev);
- suspend_report_result(pnp_drv->driver.pm->suspend, error);
+ suspend_report_result(dev, pnp_drv->driver.pm->suspend, error);
if (error)
return error;
}
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index afaf30a3622c..38928ff7472b 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -287,9 +287,9 @@ static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle,
u32 lvl, void *context,
void **rv)
{
- struct acpi_device *device;
+ struct acpi_device *device = acpi_fetch_acpi_dev(handle);
- if (acpi_bus_get_device(handle, &device))
+ if (!device)
return AE_CTRL_DEPTH;
if (acpi_is_pnp_device(device))
pnpacpi_add_device(device);
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
index 8242e8c5ed77..515e3ceb3393 100644
--- a/drivers/powercap/Kconfig
+++ b/drivers/powercap/Kconfig
@@ -46,6 +46,7 @@ config IDLE_INJECT
config DTPM
bool "Power capping for Dynamic Thermal Power Management (EXPERIMENTAL)"
+ depends on OF
help
This enables support for the power capping for the dynamic
thermal power management userspace engine.
@@ -56,4 +57,11 @@ config DTPM_CPU
help
This enables support for CPU power limitation based on
energy model.
+
+config DTPM_DEVFREQ
+ bool "Add device power capping based on the energy model"
+ depends on DTPM && ENERGY_MODEL
+ help
+ This enables support for device power limitation based on
+ energy model.
endif
diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile
index fabcf388a8d3..494617cdad88 100644
--- a/drivers/powercap/Makefile
+++ b/drivers/powercap/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_DTPM) += dtpm.o
obj-$(CONFIG_DTPM_CPU) += dtpm_cpu.o
+obj-$(CONFIG_DTPM_DEVFREQ) += dtpm_devfreq.o
obj-$(CONFIG_POWERCAP) += powercap_sys.o
obj-$(CONFIG_INTEL_RAPL_CORE) += intel_rapl_common.o
obj-$(CONFIG_INTEL_RAPL) += intel_rapl_msr.o
diff --git a/drivers/powercap/dtpm.c b/drivers/powercap/dtpm.c
index 8cb45f2d3d78..ce920f17f45f 100644
--- a/drivers/powercap/dtpm.c
+++ b/drivers/powercap/dtpm.c
@@ -23,6 +23,9 @@
#include <linux/powercap.h>
#include <linux/slab.h>
#include <linux/mutex.h>
+#include <linux/of.h>
+
+#include "dtpm_subsys.h"
#define DTPM_POWER_LIMIT_FLAG 0
@@ -48,9 +51,7 @@ static int get_max_power_range_uw(struct powercap_zone *pcz, u64 *max_power_uw)
{
struct dtpm *dtpm = to_dtpm(pcz);
- mutex_lock(&dtpm_lock);
*max_power_uw = dtpm->power_max - dtpm->power_min;
- mutex_unlock(&dtpm_lock);
return 0;
}
@@ -80,14 +81,7 @@ static int __get_power_uw(struct dtpm *dtpm, u64 *power_uw)
static int get_power_uw(struct powercap_zone *pcz, u64 *power_uw)
{
- struct dtpm *dtpm = to_dtpm(pcz);
- int ret;
-
- mutex_lock(&dtpm_lock);
- ret = __get_power_uw(dtpm, power_uw);
- mutex_unlock(&dtpm_lock);
-
- return ret;
+ return __get_power_uw(to_dtpm(pcz), power_uw);
}
static void __dtpm_rebalance_weight(struct dtpm *dtpm)
@@ -130,7 +124,16 @@ static void __dtpm_add_power(struct dtpm *dtpm)
}
}
-static int __dtpm_update_power(struct dtpm *dtpm)
+/**
+ * dtpm_update_power - Update the power on the dtpm
+ * @dtpm: a pointer to a dtpm structure to update
+ *
+ * Function to update the power values of the dtpm node specified in
+ * parameter. These new values will be propagated to the tree.
+ *
+ * Return: zero on success, -EINVAL if the values are inconsistent
+ */
+int dtpm_update_power(struct dtpm *dtpm)
{
int ret;
@@ -153,26 +156,6 @@ static int __dtpm_update_power(struct dtpm *dtpm)
}
/**
- * dtpm_update_power - Update the power on the dtpm
- * @dtpm: a pointer to a dtpm structure to update
- *
- * Function to update the power values of the dtpm node specified in
- * parameter. These new values will be propagated to the tree.
- *
- * Return: zero on success, -EINVAL if the values are inconsistent
- */
-int dtpm_update_power(struct dtpm *dtpm)
-{
- int ret;
-
- mutex_lock(&dtpm_lock);
- ret = __dtpm_update_power(dtpm);
- mutex_unlock(&dtpm_lock);
-
- return ret;
-}
-
-/**
* dtpm_release_zone - Cleanup when the node is released
* @pcz: a pointer to a powercap_zone structure
*
@@ -188,48 +171,28 @@ int dtpm_release_zone(struct powercap_zone *pcz)
struct dtpm *dtpm = to_dtpm(pcz);
struct dtpm *parent = dtpm->parent;
- mutex_lock(&dtpm_lock);
-
- if (!list_empty(&dtpm->children)) {
- mutex_unlock(&dtpm_lock);
+ if (!list_empty(&dtpm->children))
return -EBUSY;
- }
if (parent)
list_del(&dtpm->sibling);
__dtpm_sub_power(dtpm);
- mutex_unlock(&dtpm_lock);
-
if (dtpm->ops)
dtpm->ops->release(dtpm);
+ else
+ kfree(dtpm);
- if (root == dtpm)
- root = NULL;
-
- kfree(dtpm);
-
- return 0;
-}
-
-static int __get_power_limit_uw(struct dtpm *dtpm, int cid, u64 *power_limit)
-{
- *power_limit = dtpm->power_limit;
return 0;
}
static int get_power_limit_uw(struct powercap_zone *pcz,
int cid, u64 *power_limit)
{
- struct dtpm *dtpm = to_dtpm(pcz);
- int ret;
-
- mutex_lock(&dtpm_lock);
- ret = __get_power_limit_uw(dtpm, cid, power_limit);
- mutex_unlock(&dtpm_lock);
-
- return ret;
+ *power_limit = to_dtpm(pcz)->power_limit;
+
+ return 0;
}
/*
@@ -289,7 +252,7 @@ static int __set_power_limit_uw(struct dtpm *dtpm, int cid, u64 power_limit)
ret = __set_power_limit_uw(child, cid, power);
if (!ret)
- ret = __get_power_limit_uw(child, cid, &power);
+ ret = get_power_limit_uw(&child->zone, cid, &power);
if (ret)
break;
@@ -307,8 +270,6 @@ static int set_power_limit_uw(struct powercap_zone *pcz,
struct dtpm *dtpm = to_dtpm(pcz);
int ret;
- mutex_lock(&dtpm_lock);
-
/*
* Don't allow values outside of the power range previously
* set when initializing the power numbers.
@@ -320,8 +281,6 @@ static int set_power_limit_uw(struct powercap_zone *pcz,
pr_debug("%s: power limit: %llu uW, power max: %llu uW\n",
dtpm->zone.name, dtpm->power_limit, dtpm->power_max);
- mutex_unlock(&dtpm_lock);
-
return ret;
}
@@ -332,11 +291,7 @@ static const char *get_constraint_name(struct powercap_zone *pcz, int cid)
static int get_max_power_uw(struct powercap_zone *pcz, int id, u64 *max_power)
{
- struct dtpm *dtpm = to_dtpm(pcz);
-
- mutex_lock(&dtpm_lock);
- *max_power = dtpm->power_max;
- mutex_unlock(&dtpm_lock);
+ *max_power = to_dtpm(pcz)->power_max;
return 0;
}
@@ -439,8 +394,6 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent)
if (IS_ERR(pcz))
return PTR_ERR(pcz);
- mutex_lock(&dtpm_lock);
-
if (parent) {
list_add_tail(&dtpm->sibling, &parent->children);
dtpm->parent = parent;
@@ -456,19 +409,253 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent)
pr_debug("Registered dtpm node '%s' / %llu-%llu uW, \n",
dtpm->zone.name, dtpm->power_min, dtpm->power_max);
- mutex_unlock(&dtpm_lock);
+ return 0;
+}
+
+static struct dtpm *dtpm_setup_virtual(const struct dtpm_node *hierarchy,
+ struct dtpm *parent)
+{
+ struct dtpm *dtpm;
+ int ret;
+
+ dtpm = kzalloc(sizeof(*dtpm), GFP_KERNEL);
+ if (!dtpm)
+ return ERR_PTR(-ENOMEM);
+ dtpm_init(dtpm, NULL);
+
+ ret = dtpm_register(hierarchy->name, dtpm, parent);
+ if (ret) {
+ pr_err("Failed to register dtpm node '%s': %d\n",
+ hierarchy->name, ret);
+ kfree(dtpm);
+ return ERR_PTR(ret);
+ }
+
+ return dtpm;
+}
+
+static struct dtpm *dtpm_setup_dt(const struct dtpm_node *hierarchy,
+ struct dtpm *parent)
+{
+ struct device_node *np;
+ int i, ret;
+
+ np = of_find_node_by_path(hierarchy->name);
+ if (!np) {
+ pr_err("Failed to find '%s'\n", hierarchy->name);
+ return ERR_PTR(-ENXIO);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dtpm_subsys); i++) {
+
+ if (!dtpm_subsys[i]->setup)
+ continue;
+
+ ret = dtpm_subsys[i]->setup(parent, np);
+ if (ret) {
+ pr_err("Failed to setup '%s': %d\n", dtpm_subsys[i]->name, ret);
+ of_node_put(np);
+ return ERR_PTR(ret);
+ }
+ }
+
+ of_node_put(np);
+
+ /*
+ * By returning a NULL pointer, we let know the caller there
+ * is no child for us as we are a leaf of the tree
+ */
+ return NULL;
+}
+
+typedef struct dtpm * (*dtpm_node_callback_t)(const struct dtpm_node *, struct dtpm *);
+
+static dtpm_node_callback_t dtpm_node_callback[] = {
+ [DTPM_NODE_VIRTUAL] = dtpm_setup_virtual,
+ [DTPM_NODE_DT] = dtpm_setup_dt,
+};
+
+static int dtpm_for_each_child(const struct dtpm_node *hierarchy,
+ const struct dtpm_node *it, struct dtpm *parent)
+{
+ struct dtpm *dtpm;
+ int i, ret;
+
+ for (i = 0; hierarchy[i].name; i++) {
+
+ if (hierarchy[i].parent != it)
+ continue;
+
+ dtpm = dtpm_node_callback[hierarchy[i].type](&hierarchy[i], parent);
+
+ /*
+ * A NULL pointer means there is no children, hence we
+ * continue without going deeper in the recursivity.
+ */
+ if (!dtpm)
+ continue;
+
+ /*
+ * There are multiple reasons why the callback could
+ * fail. The generic glue is abstracting the backend
+ * and therefore it is not possible to report back or
+ * take a decision based on the error. In any case,
+ * if this call fails, it is not critical in the
+ * hierarchy creation, we can assume the underlying
+ * service is not found, so we continue without this
+ * branch in the tree but with a warning to log the
+ * information the node was not created.
+ */
+ if (IS_ERR(dtpm)) {
+ pr_warn("Failed to create '%s' in the hierarchy\n",
+ hierarchy[i].name);
+ continue;
+ }
+
+ ret = dtpm_for_each_child(hierarchy, &hierarchy[i], dtpm);
+ if (ret)
+ return ret;
+ }
return 0;
}
-static int __init init_dtpm(void)
+/**
+ * dtpm_create_hierarchy - Create the dtpm hierarchy
+ * @hierarchy: An array of struct dtpm_node describing the hierarchy
+ *
+ * The function is called by the platform specific code with the
+ * description of the different node in the hierarchy. It creates the
+ * tree in the sysfs filesystem under the powercap dtpm entry.
+ *
+ * The expected tree has the format:
+ *
+ * struct dtpm_node hierarchy[] = {
+ * [0] { .name = "topmost", type = DTPM_NODE_VIRTUAL },
+ * [1] { .name = "package", .type = DTPM_NODE_VIRTUAL, .parent = &hierarchy[0] },
+ * [2] { .name = "/cpus/cpu0", .type = DTPM_NODE_DT, .parent = &hierarchy[1] },
+ * [3] { .name = "/cpus/cpu1", .type = DTPM_NODE_DT, .parent = &hierarchy[1] },
+ * [4] { .name = "/cpus/cpu2", .type = DTPM_NODE_DT, .parent = &hierarchy[1] },
+ * [5] { .name = "/cpus/cpu3", .type = DTPM_NODE_DT, .parent = &hierarchy[1] },
+ * [6] { }
+ * };
+ *
+ * The last element is always an empty one and marks the end of the
+ * array.
+ *
+ * Return: zero on success, a negative value in case of error. Errors
+ * are reported back from the underlying functions.
+ */
+int dtpm_create_hierarchy(struct of_device_id *dtpm_match_table)
{
+ const struct of_device_id *match;
+ const struct dtpm_node *hierarchy;
+ struct device_node *np;
+ int i, ret;
+
+ mutex_lock(&dtpm_lock);
+
+ if (pct) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
pct = powercap_register_control_type(NULL, "dtpm", NULL);
if (IS_ERR(pct)) {
pr_err("Failed to register control type\n");
- return PTR_ERR(pct);
+ ret = PTR_ERR(pct);
+ goto out_pct;
}
+ ret = -ENODEV;
+ np = of_find_node_by_path("/");
+ if (!np)
+ goto out_err;
+
+ match = of_match_node(dtpm_match_table, np);
+
+ of_node_put(np);
+
+ if (!match)
+ goto out_err;
+
+ hierarchy = match->data;
+ if (!hierarchy) {
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ ret = dtpm_for_each_child(hierarchy, NULL, NULL);
+ if (ret)
+ goto out_err;
+
+ for (i = 0; i < ARRAY_SIZE(dtpm_subsys); i++) {
+
+ if (!dtpm_subsys[i]->init)
+ continue;
+
+ ret = dtpm_subsys[i]->init();
+ if (ret)
+ pr_info("Failed to initialize '%s': %d",
+ dtpm_subsys[i]->name, ret);
+ }
+
+ mutex_unlock(&dtpm_lock);
+
return 0;
+
+out_err:
+ powercap_unregister_control_type(pct);
+out_pct:
+ pct = NULL;
+out_unlock:
+ mutex_unlock(&dtpm_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dtpm_create_hierarchy);
+
+static void __dtpm_destroy_hierarchy(struct dtpm *dtpm)
+{
+ struct dtpm *child, *aux;
+
+ list_for_each_entry_safe(child, aux, &dtpm->children, sibling)
+ __dtpm_destroy_hierarchy(child);
+
+ /*
+ * At this point, we know all children were removed from the
+ * recursive call before
+ */
+ dtpm_unregister(dtpm);
+}
+
+void dtpm_destroy_hierarchy(void)
+{
+ int i;
+
+ mutex_lock(&dtpm_lock);
+
+ if (!pct)
+ goto out_unlock;
+
+ __dtpm_destroy_hierarchy(root);
+
+
+ for (i = 0; i < ARRAY_SIZE(dtpm_subsys); i++) {
+
+ if (!dtpm_subsys[i]->exit)
+ continue;
+
+ dtpm_subsys[i]->exit();
+ }
+
+ powercap_unregister_control_type(pct);
+
+ pct = NULL;
+
+ root = NULL;
+
+out_unlock:
+ mutex_unlock(&dtpm_lock);
}
-late_initcall(init_dtpm);
+EXPORT_SYMBOL_GPL(dtpm_destroy_hierarchy);
diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
index b740866b228d..bca2f912d349 100644
--- a/drivers/powercap/dtpm_cpu.c
+++ b/drivers/powercap/dtpm_cpu.c
@@ -21,6 +21,7 @@
#include <linux/cpuhotplug.h>
#include <linux/dtpm.h>
#include <linux/energy_model.h>
+#include <linux/of.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/units.h>
@@ -150,10 +151,17 @@ static int update_pd_power_uw(struct dtpm *dtpm)
static void pd_release(struct dtpm *dtpm)
{
struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
+ struct cpufreq_policy *policy;
if (freq_qos_request_active(&dtpm_cpu->qos_req))
freq_qos_remove_request(&dtpm_cpu->qos_req);
+ policy = cpufreq_cpu_get(dtpm_cpu->cpu);
+ if (policy) {
+ for_each_cpu(dtpm_cpu->cpu, policy->related_cpus)
+ per_cpu(dtpm_per_cpu, dtpm_cpu->cpu) = NULL;
+ }
+
kfree(dtpm_cpu);
}
@@ -178,11 +186,26 @@ static int cpuhp_dtpm_cpu_offline(unsigned int cpu)
static int cpuhp_dtpm_cpu_online(unsigned int cpu)
{
struct dtpm_cpu *dtpm_cpu;
+
+ dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
+ if (dtpm_cpu)
+ return dtpm_update_power(&dtpm_cpu->dtpm);
+
+ return 0;
+}
+
+static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
+{
+ struct dtpm_cpu *dtpm_cpu;
struct cpufreq_policy *policy;
struct em_perf_domain *pd;
char name[CPUFREQ_NAME_LEN];
int ret = -ENOMEM;
+ dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
+ if (dtpm_cpu)
+ return 0;
+
policy = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
@@ -191,10 +214,6 @@ static int cpuhp_dtpm_cpu_online(unsigned int cpu)
if (!pd)
return -EINVAL;
- dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
- if (dtpm_cpu)
- return dtpm_update_power(&dtpm_cpu->dtpm);
-
dtpm_cpu = kzalloc(sizeof(*dtpm_cpu), GFP_KERNEL);
if (!dtpm_cpu)
return -ENOMEM;
@@ -207,7 +226,7 @@ static int cpuhp_dtpm_cpu_online(unsigned int cpu)
snprintf(name, sizeof(name), "cpu%d-cpufreq", dtpm_cpu->cpu);
- ret = dtpm_register(name, &dtpm_cpu->dtpm, NULL);
+ ret = dtpm_register(name, &dtpm_cpu->dtpm, parent);
if (ret)
goto out_kfree_dtpm_cpu;
@@ -231,7 +250,18 @@ out_kfree_dtpm_cpu:
return ret;
}
-static int __init dtpm_cpu_init(void)
+static int dtpm_cpu_setup(struct dtpm *dtpm, struct device_node *np)
+{
+ int cpu;
+
+ cpu = of_cpu_node_to_id(np);
+ if (cpu < 0)
+ return 0;
+
+ return __dtpm_cpu_setup(cpu, dtpm);
+}
+
+static int dtpm_cpu_init(void)
{
int ret;
@@ -269,4 +299,15 @@ static int __init dtpm_cpu_init(void)
return 0;
}
-DTPM_DECLARE(dtpm_cpu, dtpm_cpu_init);
+static void dtpm_cpu_exit(void)
+{
+ cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
+ cpuhp_remove_state_nocalls(CPUHP_AP_DTPM_CPU_DEAD);
+}
+
+struct dtpm_subsys_ops dtpm_cpu_ops = {
+ .name = KBUILD_MODNAME,
+ .init = dtpm_cpu_init,
+ .exit = dtpm_cpu_exit,
+ .setup = dtpm_cpu_setup,
+};
diff --git a/drivers/powercap/dtpm_devfreq.c b/drivers/powercap/dtpm_devfreq.c
new file mode 100644
index 000000000000..91276761a31d
--- /dev/null
+++ b/drivers/powercap/dtpm_devfreq.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 Linaro Limited
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * The devfreq device combined with the energy model and the load can
+ * give an estimation of the power consumption as well as limiting the
+ * power.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpumask.h>
+#include <linux/devfreq.h>
+#include <linux/dtpm.h>
+#include <linux/energy_model.h>
+#include <linux/of.h>
+#include <linux/pm_qos.h>
+#include <linux/slab.h>
+#include <linux/units.h>
+
+struct dtpm_devfreq {
+ struct dtpm dtpm;
+ struct dev_pm_qos_request qos_req;
+ struct devfreq *devfreq;
+};
+
+static struct dtpm_devfreq *to_dtpm_devfreq(struct dtpm *dtpm)
+{
+ return container_of(dtpm, struct dtpm_devfreq, dtpm);
+}
+
+static int update_pd_power_uw(struct dtpm *dtpm)
+{
+ struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
+ struct devfreq *devfreq = dtpm_devfreq->devfreq;
+ struct device *dev = devfreq->dev.parent;
+ struct em_perf_domain *pd = em_pd_get(dev);
+
+ dtpm->power_min = pd->table[0].power;
+ dtpm->power_min *= MICROWATT_PER_MILLIWATT;
+
+ dtpm->power_max = pd->table[pd->nr_perf_states - 1].power;
+ dtpm->power_max *= MICROWATT_PER_MILLIWATT;
+
+ return 0;
+}
+
+static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
+{
+ struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
+ struct devfreq *devfreq = dtpm_devfreq->devfreq;
+ struct device *dev = devfreq->dev.parent;
+ struct em_perf_domain *pd = em_pd_get(dev);
+ unsigned long freq;
+ u64 power;
+ int i;
+
+ for (i = 0; i < pd->nr_perf_states; i++) {
+
+ power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
+ if (power > power_limit)
+ break;
+ }
+
+ freq = pd->table[i - 1].frequency;
+
+ dev_pm_qos_update_request(&dtpm_devfreq->qos_req, freq);
+
+ power_limit = pd->table[i - 1].power * MICROWATT_PER_MILLIWATT;
+
+ return power_limit;
+}
+
+static void _normalize_load(struct devfreq_dev_status *status)
+{
+ if (status->total_time > 0xfffff) {
+ status->total_time >>= 10;
+ status->busy_time >>= 10;
+ }
+
+ status->busy_time <<= 10;
+ status->busy_time /= status->total_time ? : 1;
+
+ status->busy_time = status->busy_time ? : 1;
+ status->total_time = 1024;
+}
+
+static u64 get_pd_power_uw(struct dtpm *dtpm)
+{
+ struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
+ struct devfreq *devfreq = dtpm_devfreq->devfreq;
+ struct device *dev = devfreq->dev.parent;
+ struct em_perf_domain *pd = em_pd_get(dev);
+ struct devfreq_dev_status status;
+ unsigned long freq;
+ u64 power;
+ int i;
+
+ mutex_lock(&devfreq->lock);
+ status = devfreq->last_status;
+ mutex_unlock(&devfreq->lock);
+
+ freq = DIV_ROUND_UP(status.current_frequency, HZ_PER_KHZ);
+ _normalize_load(&status);
+
+ for (i = 0; i < pd->nr_perf_states; i++) {
+
+ if (pd->table[i].frequency < freq)
+ continue;
+
+ power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
+ power *= status.busy_time;
+ power >>= 10;
+
+ return power;
+ }
+
+ return 0;
+}
+
+static void pd_release(struct dtpm *dtpm)
+{
+ struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
+
+ if (dev_pm_qos_request_active(&dtpm_devfreq->qos_req))
+ dev_pm_qos_remove_request(&dtpm_devfreq->qos_req);
+
+ kfree(dtpm_devfreq);
+}
+
+static struct dtpm_ops dtpm_ops = {
+ .set_power_uw = set_pd_power_limit,
+ .get_power_uw = get_pd_power_uw,
+ .update_power_uw = update_pd_power_uw,
+ .release = pd_release,
+};
+
+static int __dtpm_devfreq_setup(struct devfreq *devfreq, struct dtpm *parent)
+{
+ struct device *dev = devfreq->dev.parent;
+ struct dtpm_devfreq *dtpm_devfreq;
+ struct em_perf_domain *pd;
+ int ret = -ENOMEM;
+
+ pd = em_pd_get(dev);
+ if (!pd) {
+ ret = dev_pm_opp_of_register_em(dev, NULL);
+ if (ret) {
+ pr_err("No energy model available for '%s'\n", dev_name(dev));
+ return -EINVAL;
+ }
+ }
+
+ dtpm_devfreq = kzalloc(sizeof(*dtpm_devfreq), GFP_KERNEL);
+ if (!dtpm_devfreq)
+ return -ENOMEM;
+
+ dtpm_init(&dtpm_devfreq->dtpm, &dtpm_ops);
+
+ dtpm_devfreq->devfreq = devfreq;
+
+ ret = dtpm_register(dev_name(dev), &dtpm_devfreq->dtpm, parent);
+ if (ret) {
+ pr_err("Failed to register '%s': %d\n", dev_name(dev), ret);
+ kfree(dtpm_devfreq);
+ return ret;
+ }
+
+ ret = dev_pm_qos_add_request(dev, &dtpm_devfreq->qos_req,
+ DEV_PM_QOS_MAX_FREQUENCY,
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
+ if (ret) {
+ pr_err("Failed to add QoS request: %d\n", ret);
+ goto out_dtpm_unregister;
+ }
+
+ dtpm_update_power(&dtpm_devfreq->dtpm);
+
+ return 0;
+
+out_dtpm_unregister:
+ dtpm_unregister(&dtpm_devfreq->dtpm);
+
+ return ret;
+}
+
+static int dtpm_devfreq_setup(struct dtpm *dtpm, struct device_node *np)
+{
+ struct devfreq *devfreq;
+
+ devfreq = devfreq_get_devfreq_by_node(np);
+ if (IS_ERR(devfreq))
+ return 0;
+
+ return __dtpm_devfreq_setup(devfreq, dtpm);
+}
+
+struct dtpm_subsys_ops dtpm_devfreq_ops = {
+ .name = KBUILD_MODNAME,
+ .setup = dtpm_devfreq_setup,
+};
diff --git a/drivers/powercap/dtpm_subsys.h b/drivers/powercap/dtpm_subsys.h
new file mode 100644
index 000000000000..db1712938a96
--- /dev/null
+++ b/drivers/powercap/dtpm_subsys.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Linaro Ltd
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ */
+#ifndef ___DTPM_SUBSYS_H__
+#define ___DTPM_SUBSYS_H__
+
+extern struct dtpm_subsys_ops dtpm_cpu_ops;
+extern struct dtpm_subsys_ops dtpm_devfreq_ops;
+
+struct dtpm_subsys_ops *dtpm_subsys[] = {
+#ifdef CONFIG_DTPM_CPU
+ &dtpm_cpu_ops,
+#endif
+#ifdef CONFIG_DTPM_DEVFREQ
+ &dtpm_devfreq_ops,
+#endif
+};
+
+#endif
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 0e4bc8b9329d..b6f2cfd15dd2 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -317,11 +317,18 @@ no_memory:
}
EXPORT_SYMBOL(ptp_clock_register);
+static int unregister_vclock(struct device *dev, void *data)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+
+ ptp_vclock_unregister(info_to_vclock(ptp->info));
+ return 0;
+}
+
int ptp_clock_unregister(struct ptp_clock *ptp)
{
if (ptp_vclock_in_use(ptp)) {
- pr_err("ptp: virtual clock in use\n");
- return -EBUSY;
+ device_for_each_child(&ptp->dev, NULL, unregister_vclock);
}
ptp->defunct = 1;
diff --git a/drivers/ptp/ptp_idt82p33.c b/drivers/ptp/ptp_idt82p33.c
index c1c959f7e52b..97c1be44e323 100644
--- a/drivers/ptp/ptp_idt82p33.c
+++ b/drivers/ptp/ptp_idt82p33.c
@@ -6,13 +6,17 @@
#define pr_fmt(fmt) "IDT_82p33xxx: " fmt
#include <linux/firmware.h>
-#include <linux/i2c.h>
+#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/delay.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/timekeeping.h>
#include <linux/bitops.h>
+#include <linux/of.h>
+#include <linux/mfd/rsmu.h>
+#include <linux/mfd/idt82p33_reg.h>
#include "ptp_private.h"
#include "ptp_idt82p33.h"
@@ -24,15 +28,25 @@ MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FW_FILENAME);
/* Module Parameters */
-static u32 sync_tod_timeout = SYNC_TOD_TIMEOUT_SEC;
-module_param(sync_tod_timeout, uint, 0);
-MODULE_PARM_DESC(sync_tod_timeout,
-"duration in second to keep SYNC_TOD on (set to 0 to keep it always on)");
-
static u32 phase_snap_threshold = SNAP_THRESHOLD_NS;
module_param(phase_snap_threshold, uint, 0);
MODULE_PARM_DESC(phase_snap_threshold,
-"threshold (150000ns by default) below which adjtime would ignore");
+"threshold (10000ns by default) below which adjtime would use double dco");
+
+static char *firmware;
+module_param(firmware, charp, 0);
+
+static inline int idt82p33_read(struct idt82p33 *idt82p33, u16 regaddr,
+ u8 *buf, u16 count)
+{
+ return regmap_bulk_read(idt82p33->regmap, regaddr, buf, count);
+}
+
+static inline int idt82p33_write(struct idt82p33 *idt82p33, u16 regaddr,
+ u8 *buf, u16 count)
+{
+ return regmap_bulk_write(idt82p33->regmap, regaddr, buf, count);
+}
static void idt82p33_byte_array_to_timespec(struct timespec64 *ts,
u8 buf[TOD_BYTE_COUNT])
@@ -78,110 +92,6 @@ static void idt82p33_timespec_to_byte_array(struct timespec64 const *ts,
}
}
-static int idt82p33_xfer_read(struct idt82p33 *idt82p33,
- unsigned char regaddr,
- unsigned char *buf,
- unsigned int count)
-{
- struct i2c_client *client = idt82p33->client;
- struct i2c_msg msg[2];
- int cnt;
-
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = &regaddr;
-
- msg[1].addr = client->addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = count;
- msg[1].buf = buf;
-
- cnt = i2c_transfer(client->adapter, msg, 2);
- if (cnt < 0) {
- dev_err(&client->dev, "i2c_transfer returned %d\n", cnt);
- return cnt;
- } else if (cnt != 2) {
- dev_err(&client->dev,
- "i2c_transfer sent only %d of %d messages\n", cnt, 2);
- return -EIO;
- }
- return 0;
-}
-
-static int idt82p33_xfer_write(struct idt82p33 *idt82p33,
- u8 regaddr,
- u8 *buf,
- u16 count)
-{
- struct i2c_client *client = idt82p33->client;
- /* we add 1 byte for device register */
- u8 msg[IDT82P33_MAX_WRITE_COUNT + 1];
- int err;
-
- if (count > IDT82P33_MAX_WRITE_COUNT)
- return -EINVAL;
-
- msg[0] = regaddr;
- memcpy(&msg[1], buf, count);
-
- err = i2c_master_send(client, msg, count + 1);
- if (err < 0) {
- dev_err(&client->dev, "i2c_master_send returned %d\n", err);
- return err;
- }
-
- return 0;
-}
-
-static int idt82p33_page_offset(struct idt82p33 *idt82p33, unsigned char val)
-{
- int err;
-
- if (idt82p33->page_offset == val)
- return 0;
-
- err = idt82p33_xfer_write(idt82p33, PAGE_ADDR, &val, sizeof(val));
- if (err)
- dev_err(&idt82p33->client->dev,
- "failed to set page offset %d\n", val);
- else
- idt82p33->page_offset = val;
-
- return err;
-}
-
-static int idt82p33_rdwr(struct idt82p33 *idt82p33, unsigned int regaddr,
- unsigned char *buf, unsigned int count, bool write)
-{
- u8 offset, page;
- int err;
-
- page = _PAGE(regaddr);
- offset = _OFFSET(regaddr);
-
- err = idt82p33_page_offset(idt82p33, page);
- if (err)
- return err;
-
- if (write)
- return idt82p33_xfer_write(idt82p33, offset, buf, count);
-
- return idt82p33_xfer_read(idt82p33, offset, buf, count);
-}
-
-static int idt82p33_read(struct idt82p33 *idt82p33, unsigned int regaddr,
- unsigned char *buf, unsigned int count)
-{
- return idt82p33_rdwr(idt82p33, regaddr, buf, count, false);
-}
-
-static int idt82p33_write(struct idt82p33 *idt82p33, unsigned int regaddr,
- unsigned char *buf, unsigned int count)
-{
- return idt82p33_rdwr(idt82p33, regaddr, buf, count, true);
-}
-
static int idt82p33_dpll_set_mode(struct idt82p33_channel *channel,
enum pll_mode mode)
{
@@ -206,7 +116,7 @@ static int idt82p33_dpll_set_mode(struct idt82p33_channel *channel,
if (err)
return err;
- channel->pll_mode = dpll_mode;
+ channel->pll_mode = mode;
return 0;
}
@@ -467,7 +377,7 @@ static int idt82p33_measure_tod_write_overhead(struct idt82p33_channel *channel)
err = idt82p33_measure_settime_gettime_gap_overhead(channel, &gap_ns);
if (err) {
- dev_err(&idt82p33->client->dev,
+ dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
return err;
}
@@ -499,8 +409,8 @@ static int idt82p33_check_and_set_masks(struct idt82p33 *idt82p33,
if (page == PLLMASK_ADDR_HI && offset == PLLMASK_ADDR_LO) {
if ((val & 0xfc) || !(val & 0x3)) {
- dev_err(&idt82p33->client->dev,
- "Invalid PLL mask 0x%hhx\n", val);
+ dev_err(idt82p33->dev,
+ "Invalid PLL mask 0x%x\n", val);
err = -EINVAL;
} else {
idt82p33->pll_mask = val;
@@ -520,14 +430,14 @@ static void idt82p33_display_masks(struct idt82p33 *idt82p33)
{
u8 mask, i;
- dev_info(&idt82p33->client->dev,
+ dev_info(idt82p33->dev,
"pllmask = 0x%02x\n", idt82p33->pll_mask);
for (i = 0; i < MAX_PHC_PLL; i++) {
mask = 1 << i;
if (mask & idt82p33->pll_mask)
- dev_info(&idt82p33->client->dev,
+ dev_info(idt82p33->dev,
"PLL%d output_mask = 0x%04x\n",
i, idt82p33->channel[i].output_mask);
}
@@ -539,11 +449,6 @@ static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable)
u8 sync_cnfg;
int err;
- /* Turn it off after sync_tod_timeout seconds */
- if (enable && sync_tod_timeout)
- ptp_schedule_worker(channel->ptp_clock,
- sync_tod_timeout * HZ);
-
err = idt82p33_read(idt82p33, channel->dpll_sync_cnfg,
&sync_cnfg, sizeof(sync_cnfg));
if (err)
@@ -557,22 +462,6 @@ static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable)
&sync_cnfg, sizeof(sync_cnfg));
}
-static long idt82p33_sync_tod_work_handler(struct ptp_clock_info *ptp)
-{
- struct idt82p33_channel *channel =
- container_of(ptp, struct idt82p33_channel, caps);
- struct idt82p33 *idt82p33 = channel->idt82p33;
-
- mutex_lock(&idt82p33->reg_lock);
-
- (void)idt82p33_sync_tod(channel, false);
-
- mutex_unlock(&idt82p33->reg_lock);
-
- /* Return a negative value here to not reschedule */
- return -1;
-}
-
static int idt82p33_output_enable(struct idt82p33_channel *channel,
bool enable, unsigned int outn)
{
@@ -634,18 +523,11 @@ static int idt82p33_enable_tod(struct idt82p33_channel *channel)
struct idt82p33 *idt82p33 = channel->idt82p33;
struct timespec64 ts = {0, 0};
int err;
- u8 val;
-
- val = 0;
- err = idt82p33_write(idt82p33, channel->dpll_input_mode_cnfg,
- &val, sizeof(val));
- if (err)
- return err;
err = idt82p33_measure_tod_write_overhead(channel);
if (err) {
- dev_err(&idt82p33->client->dev,
+ dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
return err;
}
@@ -673,16 +555,14 @@ static void idt82p33_ptp_clock_unregister_all(struct idt82p33 *idt82p33)
}
static int idt82p33_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+ struct ptp_clock_request *rq, int on)
{
struct idt82p33_channel *channel =
container_of(ptp, struct idt82p33_channel, caps);
struct idt82p33 *idt82p33 = channel->idt82p33;
- int err;
-
- err = -EOPNOTSUPP;
+ int err = -EOPNOTSUPP;
- mutex_lock(&idt82p33->reg_lock);
+ mutex_lock(idt82p33->lock);
if (rq->type == PTP_CLK_REQ_PEROUT) {
if (!on)
@@ -690,15 +570,18 @@ static int idt82p33_enable(struct ptp_clock_info *ptp,
&rq->perout);
/* Only accept a 1-PPS aligned to the second. */
else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
- rq->perout.period.nsec) {
+ rq->perout.period.nsec)
err = -ERANGE;
- } else
+ else
err = idt82p33_perout_enable(channel, true,
&rq->perout);
}
- mutex_unlock(&idt82p33->reg_lock);
+ mutex_unlock(idt82p33->lock);
+ if (err)
+ dev_err(idt82p33->dev,
+ "Failed in %s with err %d!\n", __func__, err);
return err;
}
@@ -727,11 +610,11 @@ static int idt82p33_adjwritephase(struct ptp_clock_info *ptp, s32 offset_ns)
val[3] = (offset_regval >> 24) & 0x1F;
val[3] |= PH_OFFSET_EN;
- mutex_lock(&idt82p33->reg_lock);
+ mutex_lock(idt82p33->lock);
err = idt82p33_dpll_set_mode(channel, PLL_MODE_WPH);
if (err) {
- dev_err(&idt82p33->client->dev,
+ dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
goto out;
}
@@ -740,7 +623,7 @@ static int idt82p33_adjwritephase(struct ptp_clock_info *ptp, s32 offset_ns)
sizeof(val));
out:
- mutex_unlock(&idt82p33->reg_lock);
+ mutex_unlock(idt82p33->lock);
return err;
}
@@ -751,12 +634,12 @@ static int idt82p33_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
struct idt82p33 *idt82p33 = channel->idt82p33;
int err;
- mutex_lock(&idt82p33->reg_lock);
+ mutex_lock(idt82p33->lock);
err = _idt82p33_adjfine(channel, scaled_ppm);
+ mutex_unlock(idt82p33->lock);
if (err)
- dev_err(&idt82p33->client->dev,
+ dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
- mutex_unlock(&idt82p33->reg_lock);
return err;
}
@@ -768,29 +651,20 @@ static int idt82p33_adjtime(struct ptp_clock_info *ptp, s64 delta_ns)
struct idt82p33 *idt82p33 = channel->idt82p33;
int err;
- mutex_lock(&idt82p33->reg_lock);
+ mutex_lock(idt82p33->lock);
if (abs(delta_ns) < phase_snap_threshold) {
- mutex_unlock(&idt82p33->reg_lock);
+ mutex_unlock(idt82p33->lock);
return 0;
}
err = _idt82p33_adjtime(channel, delta_ns);
- if (err) {
- mutex_unlock(&idt82p33->reg_lock);
- dev_err(&idt82p33->client->dev,
- "Adjtime failed in %s with err %d!\n", __func__, err);
- return err;
- }
+ mutex_unlock(idt82p33->lock);
- err = idt82p33_sync_tod(channel, true);
if (err)
- dev_err(&idt82p33->client->dev,
- "Sync_tod failed in %s with err %d!\n", __func__, err);
-
- mutex_unlock(&idt82p33->reg_lock);
-
+ dev_err(idt82p33->dev,
+ "Failed in %s with err %d!\n", __func__, err);
return err;
}
@@ -801,31 +675,31 @@ static int idt82p33_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
struct idt82p33 *idt82p33 = channel->idt82p33;
int err;
- mutex_lock(&idt82p33->reg_lock);
+ mutex_lock(idt82p33->lock);
err = _idt82p33_gettime(channel, ts);
+ mutex_unlock(idt82p33->lock);
+
if (err)
- dev_err(&idt82p33->client->dev,
+ dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
- mutex_unlock(&idt82p33->reg_lock);
-
return err;
}
static int idt82p33_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
+ const struct timespec64 *ts)
{
struct idt82p33_channel *channel =
container_of(ptp, struct idt82p33_channel, caps);
struct idt82p33 *idt82p33 = channel->idt82p33;
int err;
- mutex_lock(&idt82p33->reg_lock);
+ mutex_lock(idt82p33->lock);
err = _idt82p33_settime(channel, ts);
+ mutex_unlock(idt82p33->lock);
+
if (err)
- dev_err(&idt82p33->client->dev,
+ dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
- mutex_unlock(&idt82p33->reg_lock);
-
return err;
}
@@ -864,7 +738,7 @@ static int idt82p33_channel_init(struct idt82p33_channel *channel, int index)
static void idt82p33_caps_init(struct ptp_clock_info *caps)
{
caps->owner = THIS_MODULE;
- caps->max_adj = 92000;
+ caps->max_adj = DCO_MAX_PPB;
caps->n_per_out = 11;
caps->adjphase = idt82p33_adjwritephase;
caps->adjfine = idt82p33_adjfine;
@@ -872,7 +746,6 @@ static void idt82p33_caps_init(struct ptp_clock_info *caps)
caps->gettime64 = idt82p33_gettime;
caps->settime64 = idt82p33_settime;
caps->enable = idt82p33_enable;
- caps->do_aux_work = idt82p33_sync_tod_work_handler;
}
static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
@@ -887,7 +760,7 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
err = idt82p33_channel_init(channel, index);
if (err) {
- dev_err(&idt82p33->client->dev,
+ dev_err(idt82p33->dev,
"Channel_init failed in %s with err %d!\n",
__func__, err);
return err;
@@ -912,7 +785,7 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
err = idt82p33_dpll_set_mode(channel, PLL_MODE_DCO);
if (err) {
- dev_err(&idt82p33->client->dev,
+ dev_err(idt82p33->dev,
"Dpll_set_mode failed in %s with err %d!\n",
__func__, err);
return err;
@@ -920,13 +793,13 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
err = idt82p33_enable_tod(channel);
if (err) {
- dev_err(&idt82p33->client->dev,
+ dev_err(idt82p33->dev,
"Enable_tod failed in %s with err %d!\n",
__func__, err);
return err;
}
- dev_info(&idt82p33->client->dev, "PLL%d registered as ptp%d\n",
+ dev_info(idt82p33->dev, "PLL%d registered as ptp%d\n",
index, channel->ptp_clock->index);
return 0;
@@ -940,25 +813,24 @@ static int idt82p33_load_firmware(struct idt82p33 *idt82p33)
int err;
s32 len;
- dev_dbg(&idt82p33->client->dev,
- "requesting firmware '%s'\n", FW_FILENAME);
+ dev_dbg(idt82p33->dev, "requesting firmware '%s'\n", FW_FILENAME);
- err = request_firmware(&fw, FW_FILENAME, &idt82p33->client->dev);
+ err = request_firmware(&fw, FW_FILENAME, idt82p33->dev);
if (err) {
- dev_err(&idt82p33->client->dev,
+ dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
return err;
}
- dev_dbg(&idt82p33->client->dev, "firmware size %zu bytes\n", fw->size);
+ dev_dbg(idt82p33->dev, "firmware size %zu bytes\n", fw->size);
rec = (struct idt82p33_fwrc *) fw->data;
for (len = fw->size; len > 0; len -= sizeof(*rec)) {
if (rec->reserved) {
- dev_err(&idt82p33->client->dev,
+ dev_err(idt82p33->dev,
"bad firmware, reserved field non-zero\n");
err = -EINVAL;
} else {
@@ -973,16 +845,11 @@ static int idt82p33_load_firmware(struct idt82p33 *idt82p33)
}
if (err == 0) {
- /* maximum 8 pages */
- if (page >= PAGE_NUM)
- continue;
-
/* Page size 128, last 4 bytes of page skipped */
- if (((loaddr > 0x7b) && (loaddr <= 0x7f))
- || loaddr > 0xfb)
+ if (loaddr > 0x7b)
continue;
- err = idt82p33_write(idt82p33, _ADDR(page, loaddr),
+ err = idt82p33_write(idt82p33, REG_ADDR(page, loaddr),
&val, sizeof(val));
}
@@ -997,36 +864,34 @@ out:
}
-static int idt82p33_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int idt82p33_probe(struct platform_device *pdev)
{
+ struct rsmu_ddata *ddata = dev_get_drvdata(pdev->dev.parent);
struct idt82p33 *idt82p33;
int err;
u8 i;
- (void)id;
-
- idt82p33 = devm_kzalloc(&client->dev,
+ idt82p33 = devm_kzalloc(&pdev->dev,
sizeof(struct idt82p33), GFP_KERNEL);
if (!idt82p33)
return -ENOMEM;
- mutex_init(&idt82p33->reg_lock);
-
- idt82p33->client = client;
- idt82p33->page_offset = 0xff;
+ idt82p33->dev = &pdev->dev;
+ idt82p33->mfd = pdev->dev.parent;
+ idt82p33->lock = &ddata->lock;
+ idt82p33->regmap = ddata->regmap;
idt82p33->tod_write_overhead_ns = 0;
idt82p33->calculate_overhead_flag = 0;
idt82p33->pll_mask = DEFAULT_PLL_MASK;
idt82p33->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0;
idt82p33->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1;
- mutex_lock(&idt82p33->reg_lock);
+ mutex_lock(idt82p33->lock);
err = idt82p33_load_firmware(idt82p33);
if (err)
- dev_warn(&idt82p33->client->dev,
+ dev_warn(idt82p33->dev,
"loading firmware failed with %d\n", err);
if (idt82p33->pll_mask) {
@@ -1034,7 +899,7 @@ static int idt82p33_probe(struct i2c_client *client,
if (idt82p33->pll_mask & (1 << i)) {
err = idt82p33_enable_channel(idt82p33, i);
if (err) {
- dev_err(&idt82p33->client->dev,
+ dev_err(idt82p33->dev,
"Failed in %s with err %d!\n",
__func__, err);
break;
@@ -1042,69 +907,38 @@ static int idt82p33_probe(struct i2c_client *client,
}
}
} else {
- dev_err(&idt82p33->client->dev,
+ dev_err(idt82p33->dev,
"no PLLs flagged as PHCs, nothing to do\n");
err = -ENODEV;
}
- mutex_unlock(&idt82p33->reg_lock);
+ mutex_unlock(idt82p33->lock);
if (err) {
idt82p33_ptp_clock_unregister_all(idt82p33);
return err;
}
- i2c_set_clientdata(client, idt82p33);
+ platform_set_drvdata(pdev, idt82p33);
return 0;
}
-static int idt82p33_remove(struct i2c_client *client)
+static int idt82p33_remove(struct platform_device *pdev)
{
- struct idt82p33 *idt82p33 = i2c_get_clientdata(client);
+ struct idt82p33 *idt82p33 = platform_get_drvdata(pdev);
idt82p33_ptp_clock_unregister_all(idt82p33);
- mutex_destroy(&idt82p33->reg_lock);
return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id idt82p33_dt_id[] = {
- { .compatible = "idt,82p33810" },
- { .compatible = "idt,82p33813" },
- { .compatible = "idt,82p33814" },
- { .compatible = "idt,82p33831" },
- { .compatible = "idt,82p33910" },
- { .compatible = "idt,82p33913" },
- { .compatible = "idt,82p33914" },
- { .compatible = "idt,82p33931" },
- {},
-};
-MODULE_DEVICE_TABLE(of, idt82p33_dt_id);
-#endif
-
-static const struct i2c_device_id idt82p33_i2c_id[] = {
- { "idt82p33810", },
- { "idt82p33813", },
- { "idt82p33814", },
- { "idt82p33831", },
- { "idt82p33910", },
- { "idt82p33913", },
- { "idt82p33914", },
- { "idt82p33931", },
- {},
-};
-MODULE_DEVICE_TABLE(i2c, idt82p33_i2c_id);
-
-static struct i2c_driver idt82p33_driver = {
+static struct platform_driver idt82p33_driver = {
.driver = {
- .of_match_table = of_match_ptr(idt82p33_dt_id),
- .name = "idt82p33",
+ .name = "82p33x1x-phc",
},
- .probe = idt82p33_probe,
- .remove = idt82p33_remove,
- .id_table = idt82p33_i2c_id,
+ .probe = idt82p33_probe,
+ .remove = idt82p33_remove,
};
-module_i2c_driver(idt82p33_driver);
+module_platform_driver(idt82p33_driver);
diff --git a/drivers/ptp/ptp_idt82p33.h b/drivers/ptp/ptp_idt82p33.h
index 1c7a0f0872e8..0ea1c35c0f9f 100644
--- a/drivers/ptp/ptp_idt82p33.h
+++ b/drivers/ptp/ptp_idt82p33.h
@@ -8,94 +8,19 @@
#define PTP_IDT82P33_H
#include <linux/ktime.h>
-#include <linux/workqueue.h>
+#include <linux/mfd/idt82p33_reg.h>
+#include <linux/regmap.h>
-
-/* Register Map - AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf */
-#define PAGE_NUM (8)
-#define _ADDR(page, offset) (((page) << 0x7) | ((offset) & 0x7f))
-#define _PAGE(addr) (((addr) >> 0x7) & 0x7)
-#define _OFFSET(addr) ((addr) & 0x7f)
-
-#define DPLL1_TOD_CNFG 0x134
-#define DPLL2_TOD_CNFG 0x1B4
-
-#define DPLL1_TOD_STS 0x10B
-#define DPLL2_TOD_STS 0x18B
-
-#define DPLL1_TOD_TRIGGER 0x115
-#define DPLL2_TOD_TRIGGER 0x195
-
-#define DPLL1_OPERATING_MODE_CNFG 0x120
-#define DPLL2_OPERATING_MODE_CNFG 0x1A0
-
-#define DPLL1_HOLDOVER_FREQ_CNFG 0x12C
-#define DPLL2_HOLDOVER_FREQ_CNFG 0x1AC
-
-#define DPLL1_PHASE_OFFSET_CNFG 0x143
-#define DPLL2_PHASE_OFFSET_CNFG 0x1C3
-
-#define DPLL1_SYNC_EDGE_CNFG 0X140
-#define DPLL2_SYNC_EDGE_CNFG 0X1C0
-
-#define DPLL1_INPUT_MODE_CNFG 0X116
-#define DPLL2_INPUT_MODE_CNFG 0X196
-
-#define OUT_MUX_CNFG(outn) _ADDR(0x6, (0xC * (outn)))
-
-#define PAGE_ADDR 0x7F
-/* Register Map end */
-
-/* Register definitions - AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf*/
-#define TOD_TRIGGER(wr_trig, rd_trig) ((wr_trig & 0xf) << 4 | (rd_trig & 0xf))
-#define SYNC_TOD BIT(1)
-#define PH_OFFSET_EN BIT(7)
-#define SQUELCH_ENABLE BIT(5)
-
-/* Bit definitions for the DPLL_MODE register */
-#define PLL_MODE_SHIFT (0)
-#define PLL_MODE_MASK (0x1F)
-
-#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef)
-
-enum pll_mode {
- PLL_MODE_MIN = 0,
- PLL_MODE_AUTOMATIC = PLL_MODE_MIN,
- PLL_MODE_FORCE_FREERUN = 1,
- PLL_MODE_FORCE_HOLDOVER = 2,
- PLL_MODE_FORCE_LOCKED = 4,
- PLL_MODE_FORCE_PRE_LOCKED2 = 5,
- PLL_MODE_FORCE_PRE_LOCKED = 6,
- PLL_MODE_FORCE_LOST_PHASE = 7,
- PLL_MODE_DCO = 10,
- PLL_MODE_WPH = 18,
- PLL_MODE_MAX = PLL_MODE_WPH,
-};
-
-enum hw_tod_trig_sel {
- HW_TOD_TRIG_SEL_MIN = 0,
- HW_TOD_TRIG_SEL_NO_WRITE = HW_TOD_TRIG_SEL_MIN,
- HW_TOD_TRIG_SEL_SYNC_SEL = 1,
- HW_TOD_TRIG_SEL_IN12 = 2,
- HW_TOD_TRIG_SEL_IN13 = 3,
- HW_TOD_TRIG_SEL_IN14 = 4,
- HW_TOD_TRIG_SEL_TOD_PPS = 5,
- HW_TOD_TRIG_SEL_TIMER_INTERVAL = 6,
- HW_TOD_TRIG_SEL_MSB_PHASE_OFFSET_CNFG = 7,
- HW_TOD_TRIG_SEL_MSB_HOLDOVER_FREQ_CNFG = 8,
- HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG = 9,
- HW_TOD_RD_TRIG_SEL_LSB_TOD_STS = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
- WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
-};
-
-/* Register bit definitions end */
#define FW_FILENAME "idt82p33xxx.bin"
-#define MAX_PHC_PLL (2)
-#define TOD_BYTE_COUNT (10)
-#define MAX_MEASURMENT_COUNT (5)
-#define SNAP_THRESHOLD_NS (150000)
-#define SYNC_TOD_TIMEOUT_SEC (5)
-#define IDT82P33_MAX_WRITE_COUNT (512)
+#define MAX_PHC_PLL (2)
+#define TOD_BYTE_COUNT (10)
+#define DCO_MAX_PPB (92000)
+#define MAX_MEASURMENT_COUNT (5)
+#define SNAP_THRESHOLD_NS (10000)
+#define IMMEDIATE_SNAP_THRESHOLD_NS (50000)
+#define DDCO_THRESHOLD_NS (5)
+#define IDT82P33_MAX_WRITE_COUNT (512)
+#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef)
#define PLLMASK_ADDR_HI 0xFF
#define PLLMASK_ADDR_LO 0xA5
@@ -116,15 +41,25 @@ enum hw_tod_trig_sel {
#define DEFAULT_OUTPUT_MASK_PLL0 (0xc0)
#define DEFAULT_OUTPUT_MASK_PLL1 DEFAULT_OUTPUT_MASK_PLL0
+/**
+ * @brief Maximum absolute value for write phase offset in femtoseconds
+ */
+#define WRITE_PHASE_OFFSET_LIMIT (20000052084ll)
+
+/** @brief Phase offset resolution
+ *
+ * DPLL phase offset = 10^15 fs / ( System Clock * 2^13)
+ * = 10^15 fs / ( 1638400000 * 2^23)
+ * = 74.5058059692382 fs
+ */
+#define IDT_T0DPLL_PHASE_RESOL 74506
+
/* PTP Hardware Clock interface */
struct idt82p33_channel {
struct ptp_clock_info caps;
struct ptp_clock *ptp_clock;
- struct idt82p33 *idt82p33;
- enum pll_mode pll_mode;
- /* task to turn off SYNC_TOD bit after pps sync */
- struct delayed_work sync_tod_work;
- bool sync_tod_on;
+ struct idt82p33 *idt82p33;
+ enum pll_mode pll_mode;
s32 current_freq_ppb;
u8 output_mask;
u16 dpll_tod_cnfg;
@@ -138,15 +73,17 @@ struct idt82p33_channel {
};
struct idt82p33 {
- struct idt82p33_channel channel[MAX_PHC_PLL];
- struct i2c_client *client;
- u8 page_offset;
- u8 pll_mask;
- ktime_t start_time;
- int calculate_overhead_flag;
- s64 tod_write_overhead_ns;
- /* Protects I2C read/modify/write registers from concurrent access */
- struct mutex reg_lock;
+ struct idt82p33_channel channel[MAX_PHC_PLL];
+ struct device *dev;
+ u8 pll_mask;
+ /* Mutex to protect operations from being interrupted */
+ struct mutex *lock;
+ struct regmap *regmap;
+ struct device *mfd;
+ /* Overhead calculation for adjtime */
+ ktime_t start_time;
+ int calculate_overhead_flag;
+ s64 tod_write_overhead_ns;
};
/* firmware interface */
@@ -157,18 +94,4 @@ struct idt82p33_fwrc {
u8 reserved;
} __packed;
-/**
- * @brief Maximum absolute value for write phase offset in femtoseconds
- */
-#define WRITE_PHASE_OFFSET_LIMIT (20000052084ll)
-
-/** @brief Phase offset resolution
- *
- * DPLL phase offset = 10^15 fs / ( System Clock * 2^13)
- * = 10^15 fs / ( 1638400000 * 2^23)
- * = 74.5058059692382 fs
- */
-#define IDT_T0DPLL_PHASE_RESOL 74506
-
-
#endif /* PTP_IDT82P33_H */
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 0f1b5a7d2a89..c3d0fcf609e3 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -11,12 +11,14 @@
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/i2c-xiic.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/spi/spi.h>
#include <linux/spi/xilinx_spi.h>
#include <net/devlink.h>
#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
+#include <linux/nvmem-consumer.h>
#ifndef PCI_VENDOR_ID_FACEBOOK
#define PCI_VENDOR_ID_FACEBOOK 0x1d9b
@@ -52,6 +54,8 @@ struct ocp_reg {
u32 servo_offset_i;
u32 servo_drift_p;
u32 servo_drift_i;
+ u32 status_offset;
+ u32 status_drift;
};
#define OCP_CTRL_ENABLE BIT(0)
@@ -88,9 +92,10 @@ struct tod_reg {
#define TOD_CTRL_GNSS_MASK ((1U << 4) - 1)
#define TOD_CTRL_GNSS_SHIFT 24
-#define TOD_STATUS_UTC_MASK 0xff
-#define TOD_STATUS_UTC_VALID BIT(8)
-#define TOD_STATUS_LEAP_VALID BIT(16)
+#define TOD_STATUS_UTC_MASK 0xff
+#define TOD_STATUS_UTC_VALID BIT(8)
+#define TOD_STATUS_LEAP_ANNOUNCE BIT(12)
+#define TOD_STATUS_LEAP_VALID BIT(16)
struct ts_reg {
u32 enable;
@@ -174,6 +179,35 @@ struct dcf_slave_reg {
#define DCF_S_CTRL_ENABLE BIT(0)
+struct signal_reg {
+ u32 enable;
+ u32 status;
+ u32 polarity;
+ u32 version;
+ u32 __pad0[4];
+ u32 cable_delay;
+ u32 __pad1[3];
+ u32 intr;
+ u32 intr_mask;
+ u32 __pad2[2];
+ u32 start_ns;
+ u32 start_sec;
+ u32 pulse_ns;
+ u32 pulse_sec;
+ u32 period_ns;
+ u32 period_sec;
+ u32 repeat_count;
+};
+
+struct frequency_reg {
+ u32 ctrl;
+ u32 status;
+};
+#define FREQ_STATUS_VALID BIT(31)
+#define FREQ_STATUS_ERROR BIT(30)
+#define FREQ_STATUS_OVERRUN BIT(29)
+#define FREQ_STATUS_MASK (BIT(24) - 1)
+
struct ptp_ocp_flash_info {
const char *name;
int pci_offset;
@@ -201,6 +235,40 @@ struct ptp_ocp_ext_src {
int irq_vec;
};
+enum ptp_ocp_sma_mode {
+ SMA_MODE_IN,
+ SMA_MODE_OUT,
+};
+
+struct ptp_ocp_sma_connector {
+ enum ptp_ocp_sma_mode mode;
+ bool fixed_fcn;
+ bool fixed_dir;
+ bool disabled;
+};
+
+struct ocp_attr_group {
+ u64 cap;
+ const struct attribute_group *group;
+};
+
+#define OCP_CAP_BASIC BIT(0)
+#define OCP_CAP_SIGNAL BIT(1)
+#define OCP_CAP_FREQ BIT(2)
+
+struct ptp_ocp_signal {
+ ktime_t period;
+ ktime_t pulse;
+ ktime_t phase;
+ ktime_t start;
+ int duty;
+ bool polarity;
+ bool running;
+};
+
+#define OCP_BOARD_ID_LEN 13
+#define OCP_SERIAL_LEN 6
+
struct ptp_ocp {
struct pci_dev *pdev;
struct device dev;
@@ -210,16 +278,21 @@ struct ptp_ocp {
struct pps_reg __iomem *pps_to_ext;
struct pps_reg __iomem *pps_to_clk;
struct gpio_reg __iomem *pps_select;
- struct gpio_reg __iomem *sma;
+ struct gpio_reg __iomem *sma_map1;
+ struct gpio_reg __iomem *sma_map2;
struct irig_master_reg __iomem *irig_out;
struct irig_slave_reg __iomem *irig_in;
struct dcf_master_reg __iomem *dcf_out;
struct dcf_slave_reg __iomem *dcf_in;
struct tod_reg __iomem *nmea_out;
+ struct frequency_reg __iomem *freq_in[4];
+ struct ptp_ocp_ext_src *signal_out[4];
struct ptp_ocp_ext_src *pps;
struct ptp_ocp_ext_src *ts0;
struct ptp_ocp_ext_src *ts1;
struct ptp_ocp_ext_src *ts2;
+ struct ptp_ocp_ext_src *ts3;
+ struct ptp_ocp_ext_src *ts4;
struct img_reg __iomem *image;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
@@ -227,6 +300,8 @@ struct ptp_ocp {
struct platform_device *spi_flash;
struct clk_hw *i2c_clk;
struct timer_list watchdog;
+ const struct ocp_attr_group *attr_tbl;
+ const struct ptp_ocp_eeprom_map *eeprom_map;
struct dentry *debug_root;
time64_t gnss_lost;
int id;
@@ -235,12 +310,17 @@ struct ptp_ocp {
int gnss2_port;
int mac_port; /* miniature atomic clock */
int nmea_port;
- u8 serial[6];
- bool has_serial;
+ u32 fw_version;
+ u8 board_id[OCP_BOARD_ID_LEN];
+ u8 serial[OCP_SERIAL_LEN];
+ bool has_eeprom_data;
u32 pps_req_map;
int flash_start;
u32 utc_tai_offset;
u32 ts_window_adjust;
+ u64 fw_cap;
+ struct ptp_ocp_signal signal[4];
+ struct ptp_ocp_sma_connector sma[4];
};
#define OCP_REQ_TIMESTAMP BIT(0)
@@ -263,7 +343,36 @@ static int ptp_ocp_register_serial(struct ptp_ocp *bp, struct ocp_resource *r);
static int ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r);
static int ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r);
static irqreturn_t ptp_ocp_ts_irq(int irq, void *priv);
+static irqreturn_t ptp_ocp_signal_irq(int irq, void *priv);
static int ptp_ocp_ts_enable(void *priv, u32 req, bool enable);
+static int ptp_ocp_signal_from_perout(struct ptp_ocp *bp, int gen,
+ struct ptp_perout_request *req);
+static int ptp_ocp_signal_enable(void *priv, u32 req, bool enable);
+static int ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr);
+
+static const struct ocp_attr_group fb_timecard_groups[];
+
+struct ptp_ocp_eeprom_map {
+ u16 off;
+ u16 len;
+ u32 bp_offset;
+ const void * const tag;
+};
+
+#define EEPROM_ENTRY(addr, member) \
+ .off = addr, \
+ .len = sizeof_field(struct ptp_ocp, member), \
+ .bp_offset = offsetof(struct ptp_ocp, member)
+
+#define BP_MAP_ENTRY_ADDR(bp, map) ({ \
+ (void *)((uintptr_t)(bp) + (map)->bp_offset); \
+})
+
+static struct ptp_ocp_eeprom_map fb_eeprom_map[] = {
+ { EEPROM_ENTRY(0x43, board_id) },
+ { EEPROM_ENTRY(0x00, serial), .tag = "mac" },
+ { }
+};
#define bp_assign_entry(bp, res, val) ({ \
uintptr_t addr = (uintptr_t)(bp) + (res)->bp_offset; \
@@ -289,10 +398,10 @@ static int ptp_ocp_ts_enable(void *priv, u32 req, bool enable);
OCP_RES_LOCATION(member), .setup = ptp_ocp_register_ext
/* This is the MSI vector mapping used.
- * 0: TS3 (and PPS)
+ * 0: PPS (TS5)
* 1: TS0
* 2: TS1
- * 3: GNSS
+ * 3: GNSS1
* 4: GNSS2
* 5: MAC
* 6: TS2
@@ -300,6 +409,12 @@ static int ptp_ocp_ts_enable(void *priv, u32 req, bool enable);
* 8: HWICAP (notused)
* 9: SPI Flash
* 10: NMEA
+ * 11: Signal Generator 1
+ * 12: Signal Generator 2
+ * 13: Signal Generator 3
+ * 14: Signal Generator 4
+ * 15: TS3
+ * 16: TS4
*/
static struct ocp_resource ocp_fb_resource[] = {
@@ -335,15 +450,70 @@ static struct ocp_resource ocp_fb_resource[] = {
},
},
{
+ OCP_EXT_RESOURCE(ts3),
+ .offset = 0x01110000, .size = 0x10000, .irq_vec = 15,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 3,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(ts4),
+ .offset = 0x01120000, .size = 0x10000, .irq_vec = 16,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 4,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ /* Timestamp for PHC and/or PPS generator */
+ {
OCP_EXT_RESOURCE(pps),
.offset = 0x010C0000, .size = 0x10000, .irq_vec = 0,
.extra = &(struct ptp_ocp_ext_info) {
- .index = 3,
+ .index = 5,
.irq_fcn = ptp_ocp_ts_irq,
.enable = ptp_ocp_ts_enable,
},
},
{
+ OCP_EXT_RESOURCE(signal_out[0]),
+ .offset = 0x010D0000, .size = 0x10000, .irq_vec = 11,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 1,
+ .irq_fcn = ptp_ocp_signal_irq,
+ .enable = ptp_ocp_signal_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(signal_out[1]),
+ .offset = 0x010E0000, .size = 0x10000, .irq_vec = 12,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 2,
+ .irq_fcn = ptp_ocp_signal_irq,
+ .enable = ptp_ocp_signal_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(signal_out[2]),
+ .offset = 0x010F0000, .size = 0x10000, .irq_vec = 13,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 3,
+ .irq_fcn = ptp_ocp_signal_irq,
+ .enable = ptp_ocp_signal_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(signal_out[3]),
+ .offset = 0x01100000, .size = 0x10000, .irq_vec = 14,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 4,
+ .irq_fcn = ptp_ocp_signal_irq,
+ .enable = ptp_ocp_signal_enable,
+ },
+ },
+ {
OCP_MEM_RESOURCE(pps_to_ext),
.offset = 0x01030000, .size = 0x10000,
},
@@ -384,15 +554,28 @@ static struct ocp_resource ocp_fb_resource[] = {
.offset = 0x00130000, .size = 0x1000,
},
{
- OCP_MEM_RESOURCE(sma),
+ OCP_MEM_RESOURCE(sma_map1),
.offset = 0x00140000, .size = 0x1000,
},
{
+ OCP_MEM_RESOURCE(sma_map2),
+ .offset = 0x00220000, .size = 0x1000,
+ },
+ {
OCP_I2C_RESOURCE(i2c_ctrl),
.offset = 0x00150000, .size = 0x10000, .irq_vec = 7,
.extra = &(struct ptp_ocp_i2c_info) {
.name = "xiic-i2c",
.fixed_rate = 50000000,
+ .data_size = sizeof(struct xiic_i2c_platform_data),
+ .data = &(struct xiic_i2c_platform_data) {
+ .num_devices = 2,
+ .devices = (struct i2c_board_info[]) {
+ { I2C_BOARD_INFO("24c02", 0x50) },
+ { I2C_BOARD_INFO("24mac402", 0x58),
+ .platform_data = "mac" },
+ },
+ },
},
},
{
@@ -428,6 +611,22 @@ static struct ocp_resource ocp_fb_resource[] = {
},
},
{
+ OCP_MEM_RESOURCE(freq_in[0]),
+ .offset = 0x01200000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(freq_in[1]),
+ .offset = 0x01210000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(freq_in[2]),
+ .offset = 0x01220000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(freq_in[3]),
+ .offset = 0x01230000, .size = 0x10000,
+ },
+ {
.setup = ptp_ocp_fb_board_init,
},
{ }
@@ -460,25 +659,42 @@ static struct ocp_selector ptp_ocp_clock[] = {
{ }
};
+#define SMA_ENABLE BIT(15)
+#define SMA_SELECT_MASK ((1U << 15) - 1)
+#define SMA_DISABLE 0x10000
+
static struct ocp_selector ptp_ocp_sma_in[] = {
- { .name = "10Mhz", .value = 0x00 },
- { .name = "PPS1", .value = 0x01 },
- { .name = "PPS2", .value = 0x02 },
- { .name = "TS1", .value = 0x04 },
- { .name = "TS2", .value = 0x08 },
- { .name = "IRIG", .value = 0x10 },
- { .name = "DCF", .value = 0x20 },
+ { .name = "10Mhz", .value = 0x0000 },
+ { .name = "PPS1", .value = 0x0001 },
+ { .name = "PPS2", .value = 0x0002 },
+ { .name = "TS1", .value = 0x0004 },
+ { .name = "TS2", .value = 0x0008 },
+ { .name = "IRIG", .value = 0x0010 },
+ { .name = "DCF", .value = 0x0020 },
+ { .name = "TS3", .value = 0x0040 },
+ { .name = "TS4", .value = 0x0080 },
+ { .name = "FREQ1", .value = 0x0100 },
+ { .name = "FREQ2", .value = 0x0200 },
+ { .name = "FREQ3", .value = 0x0400 },
+ { .name = "FREQ4", .value = 0x0800 },
+ { .name = "None", .value = SMA_DISABLE },
{ }
};
static struct ocp_selector ptp_ocp_sma_out[] = {
- { .name = "10Mhz", .value = 0x00 },
- { .name = "PHC", .value = 0x01 },
- { .name = "MAC", .value = 0x02 },
- { .name = "GNSS", .value = 0x04 },
- { .name = "GNSS2", .value = 0x08 },
- { .name = "IRIG", .value = 0x10 },
- { .name = "DCF", .value = 0x20 },
+ { .name = "10Mhz", .value = 0x0000 },
+ { .name = "PHC", .value = 0x0001 },
+ { .name = "MAC", .value = 0x0002 },
+ { .name = "GNSS1", .value = 0x0004 },
+ { .name = "GNSS2", .value = 0x0008 },
+ { .name = "IRIG", .value = 0x0010 },
+ { .name = "DCF", .value = 0x0020 },
+ { .name = "GEN1", .value = 0x0040 },
+ { .name = "GEN2", .value = 0x0080 },
+ { .name = "GEN3", .value = 0x0100 },
+ { .name = "GEN4", .value = 0x0200 },
+ { .name = "GND", .value = 0x2000 },
+ { .name = "VCC", .value = 0x4000 },
{ }
};
@@ -607,7 +823,7 @@ ptp_ocp_settime(struct ptp_clock_info *ptp_info, const struct timespec64 *ts)
}
static void
-__ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val)
+__ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u32 adj_val)
{
u32 select, ctrl;
@@ -615,7 +831,7 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val)
iowrite32(OCP_SELECT_CLK_REG, &bp->reg->select);
iowrite32(adj_val, &bp->reg->offset_ns);
- iowrite32(adj_val & 0x7f, &bp->reg->offset_window_ns);
+ iowrite32(NSEC_PER_SEC, &bp->reg->offset_window_ns);
ctrl = OCP_CTRL_ADJUST_OFFSET | OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
@@ -624,6 +840,22 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val)
iowrite32(select >> 16, &bp->reg->select);
}
+static void
+ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns)
+{
+ struct timespec64 ts;
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ err = __ptp_ocp_gettime_locked(bp, &ts, NULL);
+ if (likely(!err)) {
+ timespec64_add_ns(&ts, delta_ns);
+ __ptp_ocp_settime_locked(bp, &ts);
+ }
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
static int
ptp_ocp_adjtime(struct ptp_clock_info *ptp_info, s64 delta_ns)
{
@@ -631,6 +863,11 @@ ptp_ocp_adjtime(struct ptp_clock_info *ptp_info, s64 delta_ns)
unsigned long flags;
u32 adj_ns, sign;
+ if (delta_ns > NSEC_PER_SEC || -delta_ns > NSEC_PER_SEC) {
+ ptp_ocp_adjtime_coarse(bp, delta_ns);
+ return 0;
+ }
+
sign = delta_ns < 0 ? BIT(31) : 0;
adj_ns = sign ? -delta_ns : delta_ns;
@@ -679,6 +916,12 @@ ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq,
ext = bp->ts2;
break;
case 3:
+ ext = bp->ts3;
+ break;
+ case 4:
+ ext = bp->ts4;
+ break;
+ case 5:
ext = bp->pps;
break;
}
@@ -688,13 +931,27 @@ ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq,
ext = bp->pps;
break;
case PTP_CLK_REQ_PEROUT:
- if (on &&
- (rq->perout.period.sec != 1 || rq->perout.period.nsec != 0))
- return -EINVAL;
- /* This is a request for 1PPS on an output SMA.
- * Allow, but assume manual configuration.
- */
- return 0;
+ switch (rq->perout.index) {
+ case 0:
+ /* This is a request for 1PPS on an output SMA.
+ * Allow, but assume manual configuration.
+ */
+ if (on && (rq->perout.period.sec != 1 ||
+ rq->perout.period.nsec != 0))
+ return -EINVAL;
+ return 0;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ req = rq->perout.index - 1;
+ ext = bp->signal_out[req];
+ err = ptp_ocp_signal_from_perout(bp, req, &rq->perout);
+ if (err)
+ return err;
+ break;
+ }
+ break;
default:
return -EOPNOTSUPP;
}
@@ -706,6 +963,36 @@ ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq,
return err;
}
+static int
+ptp_ocp_verify(struct ptp_clock_info *ptp_info, unsigned pin,
+ enum ptp_pin_function func, unsigned chan)
+{
+ struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info);
+ char buf[16];
+
+ switch (func) {
+ case PTP_PF_NONE:
+ snprintf(buf, sizeof(buf), "IN: None");
+ break;
+ case PTP_PF_EXTTS:
+ /* Allow timestamps, but require sysfs configuration. */
+ return 0;
+ case PTP_PF_PEROUT:
+ /* channel 0 is 1PPS from PHC.
+ * channels 1..4 are the frequency generators.
+ */
+ if (chan)
+ snprintf(buf, sizeof(buf), "OUT: GEN%d", chan);
+ else
+ snprintf(buf, sizeof(buf), "OUT: PHC");
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ptp_ocp_sma_store(bp, buf, pin + 1);
+}
+
static const struct ptp_clock_info ptp_ocp_clock_info = {
.owner = THIS_MODULE,
.name = KBUILD_MODNAME,
@@ -716,9 +1003,10 @@ static const struct ptp_clock_info ptp_ocp_clock_info = {
.adjfine = ptp_ocp_null_adjfine,
.adjphase = ptp_ocp_null_adjphase,
.enable = ptp_ocp_enable,
+ .verify = ptp_ocp_verify,
.pps = true,
- .n_ext_ts = 4,
- .n_per_out = 1,
+ .n_ext_ts = 6,
+ .n_per_out = 5,
};
static void
@@ -739,11 +1027,30 @@ __ptp_ocp_clear_drift_locked(struct ptp_ocp *bp)
}
static void
+ptp_ocp_utc_distribute(struct ptp_ocp *bp, u32 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ bp->utc_tai_offset = val;
+
+ if (bp->irig_out)
+ iowrite32(val, &bp->irig_out->adj_sec);
+ if (bp->dcf_out)
+ iowrite32(val, &bp->dcf_out->adj_sec);
+ if (bp->nmea_out)
+ iowrite32(val, &bp->nmea_out->adj_sec);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static void
ptp_ocp_watchdog(struct timer_list *t)
{
struct ptp_ocp *bp = from_timer(bp, t, watchdog);
unsigned long flags;
- u32 status;
+ u32 status, utc_offset;
status = ioread32(&bp->pps_to_clk->status);
@@ -760,6 +1067,17 @@ ptp_ocp_watchdog(struct timer_list *t)
bp->gnss_lost = 0;
}
+ /* if GNSS provides correct data we can rely on
+ * it to get leap second information
+ */
+ if (bp->tod) {
+ status = ioread32(&bp->tod->utc_status);
+ utc_offset = status & TOD_STATUS_UTC_MASK;
+ if (status & TOD_STATUS_UTC_VALID &&
+ utc_offset != bp->utc_tai_offset)
+ ptp_ocp_utc_distribute(bp, utc_offset);
+ }
+
mod_timer(&bp->watchdog, jiffies + HZ);
}
@@ -829,25 +1147,6 @@ ptp_ocp_init_clock(struct ptp_ocp *bp)
}
static void
-ptp_ocp_utc_distribute(struct ptp_ocp *bp, u32 val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&bp->lock, flags);
-
- bp->utc_tai_offset = val;
-
- if (bp->irig_out)
- iowrite32(val, &bp->irig_out->adj_sec);
- if (bp->dcf_out)
- iowrite32(val, &bp->dcf_out->adj_sec);
- if (bp->nmea_out)
- iowrite32(val, &bp->nmea_out->adj_sec);
-
- spin_unlock_irqrestore(&bp->lock, flags);
-}
-
-static void
ptp_ocp_tod_init(struct ptp_ocp *bp)
{
u32 ctrl, reg;
@@ -862,119 +1161,110 @@ ptp_ocp_tod_init(struct ptp_ocp *bp)
ptp_ocp_utc_distribute(bp, reg & TOD_STATUS_UTC_MASK);
}
-static void
-ptp_ocp_tod_info(struct ptp_ocp *bp)
+static const char *
+ptp_ocp_tod_proto_name(const int idx)
{
static const char * const proto_name[] = {
"NMEA", "NMEA_ZDA", "NMEA_RMC", "NMEA_none",
"UBX", "UBX_UTC", "UBX_LS", "UBX_none"
};
+ return proto_name[idx];
+}
+
+static const char *
+ptp_ocp_tod_gnss_name(int idx)
+{
static const char * const gnss_name[] = {
"ALL", "COMBINED", "GPS", "GLONASS", "GALILEO", "BEIDOU",
+ "Unknown"
};
- u32 version, ctrl, reg;
- int idx;
-
- version = ioread32(&bp->tod->version);
- dev_info(&bp->pdev->dev, "TOD Version %d.%d.%d\n",
- version >> 24, (version >> 16) & 0xff, version & 0xffff);
-
- ctrl = ioread32(&bp->tod->ctrl);
- idx = ctrl & TOD_CTRL_PROTOCOL ? 4 : 0;
- idx += (ctrl >> 16) & 3;
- dev_info(&bp->pdev->dev, "control: %x\n", ctrl);
- dev_info(&bp->pdev->dev, "TOD Protocol %s %s\n", proto_name[idx],
- ctrl & TOD_CTRL_ENABLE ? "enabled" : "");
+ if (idx >= ARRAY_SIZE(gnss_name))
+ idx = ARRAY_SIZE(gnss_name) - 1;
+ return gnss_name[idx];
+}
- idx = (ctrl >> TOD_CTRL_GNSS_SHIFT) & TOD_CTRL_GNSS_MASK;
- if (idx < ARRAY_SIZE(gnss_name))
- dev_info(&bp->pdev->dev, "GNSS %s\n", gnss_name[idx]);
+struct ptp_ocp_nvmem_match_info {
+ struct ptp_ocp *bp;
+ const void * const tag;
+};
- reg = ioread32(&bp->tod->status);
- dev_info(&bp->pdev->dev, "status: %x\n", reg);
+static int
+ptp_ocp_nvmem_match(struct device *dev, const void *data)
+{
+ const struct ptp_ocp_nvmem_match_info *info = data;
- reg = ioread32(&bp->tod->adj_sec);
- dev_info(&bp->pdev->dev, "correction: %d\n", reg);
+ dev = dev->parent;
+ if (!i2c_verify_client(dev) || info->tag != dev->platform_data)
+ return 0;
- reg = ioread32(&bp->tod->utc_status);
- dev_info(&bp->pdev->dev, "utc_status: %x\n", reg);
- dev_info(&bp->pdev->dev, "utc_offset: %d valid:%d leap_valid:%d\n",
- reg & TOD_STATUS_UTC_MASK, reg & TOD_STATUS_UTC_VALID ? 1 : 0,
- reg & TOD_STATUS_LEAP_VALID ? 1 : 0);
+ while ((dev = dev->parent))
+ if (dev->driver && !strcmp(dev->driver->name, KBUILD_MODNAME))
+ return info->bp == dev_get_drvdata(dev);
+ return 0;
}
-static int
-ptp_ocp_firstchild(struct device *dev, void *data)
+static inline struct nvmem_device *
+ptp_ocp_nvmem_device_get(struct ptp_ocp *bp, const void * const tag)
{
- return 1;
+ struct ptp_ocp_nvmem_match_info info = { .bp = bp, .tag = tag };
+
+ return nvmem_device_find(&info, ptp_ocp_nvmem_match);
}
-static int
-ptp_ocp_read_i2c(struct i2c_adapter *adap, u8 addr, u8 reg, u8 sz, u8 *data)
+static inline void
+ptp_ocp_nvmem_device_put(struct nvmem_device **nvmemp)
{
- struct i2c_msg msgs[2] = {
- {
- .addr = addr,
- .len = 1,
- .buf = &reg,
- },
- {
- .addr = addr,
- .flags = I2C_M_RD,
- .len = 2,
- .buf = data,
- },
- };
- int err;
- u8 len;
-
- /* xiic-i2c for some stupid reason only does 2 byte reads. */
- while (sz) {
- len = min_t(u8, sz, 2);
- msgs[1].len = len;
- err = i2c_transfer(adap, msgs, 2);
- if (err != msgs[1].len)
- return err;
- msgs[1].buf += len;
- reg += len;
- sz -= len;
+ if (*nvmemp != NULL) {
+ nvmem_device_put(*nvmemp);
+ *nvmemp = NULL;
}
- return 0;
}
static void
-ptp_ocp_get_serial_number(struct ptp_ocp *bp)
+ptp_ocp_read_eeprom(struct ptp_ocp *bp)
{
- struct i2c_adapter *adap;
- struct device *dev;
- int err;
+ const struct ptp_ocp_eeprom_map *map;
+ struct nvmem_device *nvmem;
+ const void *tag;
+ int ret;
if (!bp->i2c_ctrl)
return;
- dev = device_find_child(&bp->i2c_ctrl->dev, NULL, ptp_ocp_firstchild);
- if (!dev) {
- dev_err(&bp->pdev->dev, "Can't find I2C adapter\n");
- return;
- }
+ tag = NULL;
+ nvmem = NULL;
- adap = i2c_verify_adapter(dev);
- if (!adap) {
- dev_err(&bp->pdev->dev, "device '%s' isn't an I2C adapter\n",
- dev_name(dev));
- goto out;
- }
-
- err = ptp_ocp_read_i2c(adap, 0x58, 0x9A, 6, bp->serial);
- if (err) {
- dev_err(&bp->pdev->dev, "could not read eeprom: %d\n", err);
- goto out;
+ for (map = bp->eeprom_map; map->len; map++) {
+ if (map->tag != tag) {
+ tag = map->tag;
+ ptp_ocp_nvmem_device_put(&nvmem);
+ }
+ if (!nvmem) {
+ nvmem = ptp_ocp_nvmem_device_get(bp, tag);
+ if (!nvmem)
+ goto out;
+ }
+ ret = nvmem_device_read(nvmem, map->off, map->len,
+ BP_MAP_ENTRY_ADDR(bp, map));
+ if (ret != map->len)
+ goto read_fail;
}
- bp->has_serial = true;
+ bp->has_eeprom_data = true;
out:
- put_device(dev);
+ ptp_ocp_nvmem_device_put(&nvmem);
+ return;
+
+read_fail:
+ dev_err(&bp->pdev->dev, "could not read eeprom: %d\n", ret);
+ goto out;
+}
+
+static int
+ptp_ocp_firstchild(struct device *dev, void *data)
+{
+ return 1;
}
static struct device *
@@ -1075,34 +1365,33 @@ ptp_ocp_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
if (err)
return err;
- if (bp->image) {
- u32 ver = ioread32(&bp->image->version);
-
- if (ver & 0xffff) {
- sprintf(buf, "%d", ver);
- err = devlink_info_version_running_put(req,
- "fw",
- buf);
- } else {
- sprintf(buf, "%d", ver >> 16);
- err = devlink_info_version_running_put(req,
- "loader",
- buf);
- }
- if (err)
- return err;
+ if (bp->fw_version & 0xffff) {
+ sprintf(buf, "%d", bp->fw_version);
+ err = devlink_info_version_running_put(req, "fw", buf);
+ } else {
+ sprintf(buf, "%d", bp->fw_version >> 16);
+ err = devlink_info_version_running_put(req, "loader", buf);
}
+ if (err)
+ return err;
- if (!bp->has_serial)
- ptp_ocp_get_serial_number(bp);
-
- if (bp->has_serial) {
- sprintf(buf, "%pM", bp->serial);
- err = devlink_info_serial_number_put(req, buf);
- if (err)
- return err;
+ if (!bp->has_eeprom_data) {
+ ptp_ocp_read_eeprom(bp);
+ if (!bp->has_eeprom_data)
+ return 0;
}
+ sprintf(buf, "%pM", bp->serial);
+ err = devlink_info_serial_number_put(req, buf);
+ if (err)
+ return err;
+
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ bp->board_id);
+ if (err)
+ return err;
+
return 0;
}
@@ -1219,6 +1508,137 @@ ptp_ocp_register_i2c(struct ptp_ocp *bp, struct ocp_resource *r)
return 0;
}
+/* The expectation is that this is triggered only on error. */
+static irqreturn_t
+ptp_ocp_signal_irq(int irq, void *priv)
+{
+ struct ptp_ocp_ext_src *ext = priv;
+ struct signal_reg __iomem *reg = ext->mem;
+ struct ptp_ocp *bp = ext->bp;
+ u32 enable, status;
+ int gen;
+
+ gen = ext->info->index - 1;
+
+ enable = ioread32(&reg->enable);
+ status = ioread32(&reg->status);
+
+ /* disable generator on error */
+ if (status || !enable) {
+ iowrite32(0, &reg->intr_mask);
+ iowrite32(0, &reg->enable);
+ bp->signal[gen].running = false;
+ }
+
+ iowrite32(0, &reg->intr); /* ack interrupt */
+
+ return IRQ_HANDLED;
+}
+
+static int
+ptp_ocp_signal_set(struct ptp_ocp *bp, int gen, struct ptp_ocp_signal *s)
+{
+ struct ptp_system_timestamp sts;
+ struct timespec64 ts;
+ ktime_t start_ns;
+ int err;
+
+ if (!s->period)
+ return 0;
+
+ if (!s->pulse)
+ s->pulse = ktime_divns(s->period * s->duty, 100);
+
+ err = ptp_ocp_gettimex(&bp->ptp_info, &ts, &sts);
+ if (err)
+ return err;
+
+ start_ns = ktime_set(ts.tv_sec, ts.tv_nsec) + NSEC_PER_MSEC;
+ if (!s->start) {
+ /* roundup() does not work on 32-bit systems */
+ s->start = DIV_ROUND_UP_ULL(start_ns, s->period);
+ s->start = ktime_add(s->start, s->phase);
+ }
+
+ if (s->duty < 1 || s->duty > 99)
+ return -EINVAL;
+
+ if (s->pulse < 1 || s->pulse > s->period)
+ return -EINVAL;
+
+ if (s->start < start_ns)
+ return -EINVAL;
+
+ bp->signal[gen] = *s;
+
+ return 0;
+}
+
+static int
+ptp_ocp_signal_from_perout(struct ptp_ocp *bp, int gen,
+ struct ptp_perout_request *req)
+{
+ struct ptp_ocp_signal s = { };
+
+ s.polarity = bp->signal[gen].polarity;
+ s.period = ktime_set(req->period.sec, req->period.nsec);
+ if (!s.period)
+ return 0;
+
+ if (req->flags & PTP_PEROUT_DUTY_CYCLE) {
+ s.pulse = ktime_set(req->on.sec, req->on.nsec);
+ s.duty = ktime_divns(s.pulse * 100, s.period);
+ }
+
+ if (req->flags & PTP_PEROUT_PHASE)
+ s.phase = ktime_set(req->phase.sec, req->phase.nsec);
+ else
+ s.start = ktime_set(req->start.sec, req->start.nsec);
+
+ return ptp_ocp_signal_set(bp, gen, &s);
+}
+
+static int
+ptp_ocp_signal_enable(void *priv, u32 req, bool enable)
+{
+ struct ptp_ocp_ext_src *ext = priv;
+ struct signal_reg __iomem *reg = ext->mem;
+ struct ptp_ocp *bp = ext->bp;
+ struct timespec64 ts;
+ int gen;
+
+ gen = ext->info->index - 1;
+
+ iowrite32(0, &reg->intr_mask);
+ iowrite32(0, &reg->enable);
+ bp->signal[gen].running = false;
+ if (!enable)
+ return 0;
+
+ ts = ktime_to_timespec64(bp->signal[gen].start);
+ iowrite32(ts.tv_sec, &reg->start_sec);
+ iowrite32(ts.tv_nsec, &reg->start_ns);
+
+ ts = ktime_to_timespec64(bp->signal[gen].period);
+ iowrite32(ts.tv_sec, &reg->period_sec);
+ iowrite32(ts.tv_nsec, &reg->period_ns);
+
+ ts = ktime_to_timespec64(bp->signal[gen].pulse);
+ iowrite32(ts.tv_sec, &reg->pulse_sec);
+ iowrite32(ts.tv_nsec, &reg->pulse_ns);
+
+ iowrite32(bp->signal[gen].polarity, &reg->polarity);
+ iowrite32(0, &reg->repeat_count);
+
+ iowrite32(0, &reg->intr); /* clear interrupt state */
+ iowrite32(1, &reg->intr_mask); /* enable interrupt */
+ iowrite32(3, &reg->enable); /* valid & enable */
+
+ bp->signal[gen].running = true;
+
+ return 0;
+}
+
static irqreturn_t
ptp_ocp_ts_irq(int irq, void *priv)
{
@@ -1346,7 +1766,7 @@ ptp_ocp_serial_line(struct ptp_ocp *bp, struct ocp_resource *r)
uart.port.mapbase = pci_resource_start(pdev, 0) + r->offset;
uart.port.irq = pci_irq_vector(pdev, r->irq_vec);
uart.port.uartclk = 50000000;
- uart.port.flags = UPF_FIXED_TYPE | UPF_IOREMAP;
+ uart.port.flags = UPF_FIXED_TYPE | UPF_IOREMAP | UPF_NO_THRE_TEST;
uart.port.type = PORT_16550A;
return serial8250_register_8250_port(&uart);
@@ -1391,14 +1811,115 @@ ptp_ocp_nmea_out_init(struct ptp_ocp *bp)
iowrite32(1, &bp->nmea_out->ctrl); /* enable */
}
+static void
+_ptp_ocp_signal_init(struct ptp_ocp_signal *s, struct signal_reg __iomem *reg)
+{
+ u32 val;
+
+ iowrite32(0, &reg->enable); /* disable */
+
+ val = ioread32(&reg->polarity);
+ s->polarity = val ? true : false;
+ s->duty = 50;
+}
+
+static void
+ptp_ocp_signal_init(struct ptp_ocp *bp)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ if (bp->signal_out[i])
+ _ptp_ocp_signal_init(&bp->signal[i],
+ bp->signal_out[i]->mem);
+}
+
+static void
+ptp_ocp_sma_init(struct ptp_ocp *bp)
+{
+ u32 reg;
+ int i;
+
+ /* defaults */
+ bp->sma[0].mode = SMA_MODE_IN;
+ bp->sma[1].mode = SMA_MODE_IN;
+ bp->sma[2].mode = SMA_MODE_OUT;
+ bp->sma[3].mode = SMA_MODE_OUT;
+
+ /* If no SMA1 map, the pin functions and directions are fixed. */
+ if (!bp->sma_map1) {
+ for (i = 0; i < 4; i++) {
+ bp->sma[i].fixed_fcn = true;
+ bp->sma[i].fixed_dir = true;
+ }
+ return;
+ }
+
+ /* If SMA2 GPIO output map is all 1, it is not present.
+ * This indicates the firmware has fixed direction SMA pins.
+ */
+ reg = ioread32(&bp->sma_map2->gpio2);
+ if (reg == 0xffffffff) {
+ for (i = 0; i < 4; i++)
+ bp->sma[i].fixed_dir = true;
+ } else {
+ reg = ioread32(&bp->sma_map1->gpio1);
+ bp->sma[0].mode = reg & BIT(15) ? SMA_MODE_IN : SMA_MODE_OUT;
+ bp->sma[1].mode = reg & BIT(31) ? SMA_MODE_IN : SMA_MODE_OUT;
+
+ reg = ioread32(&bp->sma_map1->gpio2);
+ bp->sma[2].mode = reg & BIT(15) ? SMA_MODE_OUT : SMA_MODE_IN;
+ bp->sma[3].mode = reg & BIT(31) ? SMA_MODE_OUT : SMA_MODE_IN;
+ }
+}
+
+static int
+ptp_ocp_fb_set_pins(struct ptp_ocp *bp)
+{
+ struct ptp_pin_desc *config;
+ int i;
+
+ config = kzalloc(sizeof(*config) * 4, GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ for (i = 0; i < 4; i++) {
+ sprintf(config[i].name, "sma%d", i + 1);
+ config[i].index = i;
+ }
+
+ bp->ptp_info.n_pins = 4;
+ bp->ptp_info.pin_config = config;
+
+ return 0;
+}
+
/* FB specific board initializers; last "resource" registered. */
static int
ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
{
+ int ver, err;
+
bp->flash_start = 1024 * 4096;
+ bp->eeprom_map = fb_eeprom_map;
+ bp->fw_version = ioread32(&bp->image->version);
+ bp->attr_tbl = fb_timecard_groups;
+ bp->fw_cap = OCP_CAP_BASIC;
+
+ ver = bp->fw_version & 0xffff;
+ if (ver >= 19)
+ bp->fw_cap |= OCP_CAP_SIGNAL;
+ if (ver >= 20)
+ bp->fw_cap |= OCP_CAP_FREQ;
ptp_ocp_tod_init(bp);
ptp_ocp_nmea_out_init(bp);
+ ptp_ocp_sma_init(bp);
+ ptp_ocp_signal_init(bp);
+
+ err = ptp_ocp_fb_set_pins(bp);
+ if (err)
+ return err;
return ptp_ocp_init_clock(bp);
}
@@ -1500,38 +2021,8 @@ __handle_signal_inputs(struct ptp_ocp *bp, u32 val)
* ANT4 == sma4 (out)
*/
-enum ptp_ocp_sma_mode {
- SMA_MODE_IN,
- SMA_MODE_OUT,
-};
-
-static struct ptp_ocp_sma_connector {
- enum ptp_ocp_sma_mode mode;
- bool fixed_mode;
- u16 default_out_idx;
-} ptp_ocp_sma_map[4] = {
- {
- .mode = SMA_MODE_IN,
- .fixed_mode = true,
- },
- {
- .mode = SMA_MODE_IN,
- .fixed_mode = true,
- },
- {
- .mode = SMA_MODE_OUT,
- .fixed_mode = true,
- .default_out_idx = 0, /* 10Mhz */
- },
- {
- .mode = SMA_MODE_OUT,
- .fixed_mode = true,
- .default_out_idx = 1, /* PHC */
- },
-};
-
static ssize_t
-ptp_ocp_show_output(u32 val, char *buf, int default_idx)
+ptp_ocp_show_output(u32 val, char *buf, int def_val)
{
const char *name;
ssize_t count;
@@ -1539,13 +2030,13 @@ ptp_ocp_show_output(u32 val, char *buf, int default_idx)
count = sysfs_emit(buf, "OUT: ");
name = ptp_ocp_select_name_from_val(ptp_ocp_sma_out, val);
if (!name)
- name = ptp_ocp_sma_out[default_idx].name;
+ name = ptp_ocp_select_name_from_val(ptp_ocp_sma_out, def_val);
count += sysfs_emit_at(buf, count, "%s\n", name);
return count;
}
static ssize_t
-ptp_ocp_show_inputs(u32 val, char *buf, const char *zero_in)
+ptp_ocp_show_inputs(u32 val, char *buf, int def_val)
{
const char *name;
ssize_t count;
@@ -1558,8 +2049,10 @@ ptp_ocp_show_inputs(u32 val, char *buf, const char *zero_in)
count += sysfs_emit_at(buf, count, "%s ", name);
}
}
- if (!val && zero_in)
- count += sysfs_emit_at(buf, count, "%s ", zero_in);
+ if (!val && def_val >= 0) {
+ name = ptp_ocp_select_name_from_val(ptp_ocp_sma_in, def_val);
+ count += sysfs_emit_at(buf, count, "%s ", name);
+ }
if (count)
count--;
count += sysfs_emit_at(buf, count, "\n");
@@ -1584,7 +2077,7 @@ sma_parse_inputs(const char *buf, enum ptp_ocp_sma_mode *mode)
idx = 0;
dir = *mode == SMA_MODE_IN ? 0 : 1;
- if (!strcasecmp("IN:", argv[idx])) {
+ if (!strcasecmp("IN:", argv[0])) {
dir = 0;
idx++;
}
@@ -1605,102 +2098,126 @@ out:
return ret;
}
+static u32
+ptp_ocp_sma_get(struct ptp_ocp *bp, int sma_nr, enum ptp_ocp_sma_mode mode)
+{
+ u32 __iomem *gpio;
+ u32 shift;
+
+ if (bp->sma[sma_nr - 1].fixed_fcn)
+ return (sma_nr - 1) & 1;
+
+ if (mode == SMA_MODE_IN)
+ gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1;
+ else
+ gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2;
+ shift = sma_nr & 1 ? 0 : 16;
+
+ return (ioread32(gpio) >> shift) & 0xffff;
+}
+
static ssize_t
-ptp_ocp_sma_show(struct ptp_ocp *bp, int sma_nr, u32 val, char *buf,
- const char *zero_in)
+ptp_ocp_sma_show(struct ptp_ocp *bp, int sma_nr, char *buf,
+ int default_in_val, int default_out_val)
{
- struct ptp_ocp_sma_connector *sma = &ptp_ocp_sma_map[sma_nr - 1];
+ struct ptp_ocp_sma_connector *sma = &bp->sma[sma_nr - 1];
+ u32 val;
- if (sma->mode == SMA_MODE_IN)
- return ptp_ocp_show_inputs(val, buf, zero_in);
+ val = ptp_ocp_sma_get(bp, sma_nr, sma->mode) & SMA_SELECT_MASK;
- return ptp_ocp_show_output(val, buf, sma->default_out_idx);
+ if (sma->mode == SMA_MODE_IN) {
+ if (sma->disabled)
+ val = SMA_DISABLE;
+ return ptp_ocp_show_inputs(val, buf, default_in_val);
+ }
+
+ return ptp_ocp_show_output(val, buf, default_out_val);
}
static ssize_t
sma1_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
- u32 val;
- val = ioread32(&bp->sma->gpio1) & 0x3f;
- return ptp_ocp_sma_show(bp, 1, val, buf, ptp_ocp_sma_in[0].name);
+ return ptp_ocp_sma_show(bp, 1, buf, 0, 1);
}
static ssize_t
sma2_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
- u32 val;
- val = (ioread32(&bp->sma->gpio1) >> 16) & 0x3f;
- return ptp_ocp_sma_show(bp, 2, val, buf, NULL);
+ return ptp_ocp_sma_show(bp, 2, buf, -1, 1);
}
static ssize_t
sma3_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
- u32 val;
- val = ioread32(&bp->sma->gpio2) & 0x3f;
- return ptp_ocp_sma_show(bp, 3, val, buf, NULL);
+ return ptp_ocp_sma_show(bp, 3, buf, -1, 0);
}
static ssize_t
sma4_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
- u32 val;
- val = (ioread32(&bp->sma->gpio2) >> 16) & 0x3f;
- return ptp_ocp_sma_show(bp, 4, val, buf, NULL);
+ return ptp_ocp_sma_show(bp, 4, buf, -1, 1);
}
static void
-ptp_ocp_sma_store_output(struct ptp_ocp *bp, u32 val, u32 shift)
+ptp_ocp_sma_store_output(struct ptp_ocp *bp, int sma_nr, u32 val)
{
+ u32 reg, mask, shift;
unsigned long flags;
- u32 gpio, mask;
+ u32 __iomem *gpio;
+
+ gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2;
+ shift = sma_nr & 1 ? 0 : 16;
mask = 0xffff << (16 - shift);
spin_lock_irqsave(&bp->lock, flags);
- gpio = ioread32(&bp->sma->gpio2);
- gpio = (gpio & mask) | (val << shift);
+ reg = ioread32(gpio);
+ reg = (reg & mask) | (val << shift);
- __handle_signal_outputs(bp, gpio);
+ __handle_signal_outputs(bp, reg);
- iowrite32(gpio, &bp->sma->gpio2);
+ iowrite32(reg, gpio);
spin_unlock_irqrestore(&bp->lock, flags);
}
static void
-ptp_ocp_sma_store_inputs(struct ptp_ocp *bp, u32 val, u32 shift)
+ptp_ocp_sma_store_inputs(struct ptp_ocp *bp, int sma_nr, u32 val)
{
+ u32 reg, mask, shift;
unsigned long flags;
- u32 gpio, mask;
+ u32 __iomem *gpio;
+
+ gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1;
+ shift = sma_nr & 1 ? 0 : 16;
mask = 0xffff << (16 - shift);
spin_lock_irqsave(&bp->lock, flags);
- gpio = ioread32(&bp->sma->gpio1);
- gpio = (gpio & mask) | (val << shift);
+ reg = ioread32(gpio);
+ reg = (reg & mask) | (val << shift);
- __handle_signal_inputs(bp, gpio);
+ __handle_signal_inputs(bp, reg);
- iowrite32(gpio, &bp->sma->gpio1);
+ iowrite32(reg, gpio);
spin_unlock_irqrestore(&bp->lock, flags);
}
-static ssize_t
-ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr, u32 shift)
+static int
+ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr)
{
- struct ptp_ocp_sma_connector *sma = &ptp_ocp_sma_map[sma_nr - 1];
+ struct ptp_ocp_sma_connector *sma = &bp->sma[sma_nr - 1];
enum ptp_ocp_sma_mode mode;
int val;
@@ -1709,18 +2226,35 @@ ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr, u32 shift)
if (val < 0)
return val;
- if (mode != sma->mode && sma->fixed_mode)
+ if (sma->fixed_dir && (mode != sma->mode || val & SMA_DISABLE))
return -EOPNOTSUPP;
+ if (sma->fixed_fcn) {
+ if (val != ((sma_nr - 1) & 1))
+ return -EOPNOTSUPP;
+ return 0;
+ }
+
+ sma->disabled = !!(val & SMA_DISABLE);
+
if (mode != sma->mode) {
- pr_err("Mode changes not supported yet.\n");
- return -EOPNOTSUPP;
+ if (mode == SMA_MODE_IN)
+ ptp_ocp_sma_store_output(bp, sma_nr, 0);
+ else
+ ptp_ocp_sma_store_inputs(bp, sma_nr, 0);
+ sma->mode = mode;
}
- if (sma->mode == SMA_MODE_IN)
- ptp_ocp_sma_store_inputs(bp, val, shift);
+ if (!sma->fixed_dir)
+ val |= SMA_ENABLE; /* add enable bit */
+
+ if (sma->disabled)
+ val = 0;
+
+ if (mode == SMA_MODE_IN)
+ ptp_ocp_sma_store_inputs(bp, sma_nr, val);
else
- ptp_ocp_sma_store_output(bp, val, shift);
+ ptp_ocp_sma_store_output(bp, sma_nr, val);
return 0;
}
@@ -1732,7 +2266,7 @@ sma1_store(struct device *dev, struct device_attribute *attr,
struct ptp_ocp *bp = dev_get_drvdata(dev);
int err;
- err = ptp_ocp_sma_store(bp, buf, 1, 0);
+ err = ptp_ocp_sma_store(bp, buf, 1);
return err ? err : count;
}
@@ -1743,7 +2277,7 @@ sma2_store(struct device *dev, struct device_attribute *attr,
struct ptp_ocp *bp = dev_get_drvdata(dev);
int err;
- err = ptp_ocp_sma_store(bp, buf, 2, 16);
+ err = ptp_ocp_sma_store(bp, buf, 2);
return err ? err : count;
}
@@ -1754,7 +2288,7 @@ sma3_store(struct device *dev, struct device_attribute *attr,
struct ptp_ocp *bp = dev_get_drvdata(dev);
int err;
- err = ptp_ocp_sma_store(bp, buf, 3, 0);
+ err = ptp_ocp_sma_store(bp, buf, 3);
return err ? err : count;
}
@@ -1765,7 +2299,7 @@ sma4_store(struct device *dev, struct device_attribute *attr,
struct ptp_ocp *bp = dev_get_drvdata(dev);
int err;
- err = ptp_ocp_sma_store(bp, buf, 4, 16);
+ err = ptp_ocp_sma_store(bp, buf, 4);
return err ? err : count;
}
static DEVICE_ATTR_RW(sma1);
@@ -1789,13 +2323,263 @@ available_sma_outputs_show(struct device *dev,
}
static DEVICE_ATTR_RO(available_sma_outputs);
+#define EXT_ATTR_RO(_group, _name, _val) \
+ struct dev_ext_attribute dev_attr_##_group##_val##_##_name = \
+ { __ATTR_RO(_name), (void *)_val }
+#define EXT_ATTR_RW(_group, _name, _val) \
+ struct dev_ext_attribute dev_attr_##_group##_val##_##_name = \
+ { __ATTR_RW(_name), (void *)_val }
+#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
+
+/* period [duty [phase [polarity]]] */
+static ssize_t
+signal_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ struct ptp_ocp_signal s = { };
+ int gen = (uintptr_t)ea->var;
+ int argc, err;
+ char **argv;
+
+ argv = argv_split(GFP_KERNEL, buf, &argc);
+ if (!argv)
+ return -ENOMEM;
+
+ err = -EINVAL;
+ s.duty = bp->signal[gen].duty;
+ s.phase = bp->signal[gen].phase;
+ s.period = bp->signal[gen].period;
+ s.polarity = bp->signal[gen].polarity;
+
+ switch (argc) {
+ case 4:
+ argc--;
+ err = kstrtobool(argv[argc], &s.polarity);
+ if (err)
+ goto out;
+ fallthrough;
+ case 3:
+ argc--;
+ err = kstrtou64(argv[argc], 0, &s.phase);
+ if (err)
+ goto out;
+ fallthrough;
+ case 2:
+ argc--;
+ err = kstrtoint(argv[argc], 0, &s.duty);
+ if (err)
+ goto out;
+ fallthrough;
+ case 1:
+ argc--;
+ err = kstrtou64(argv[argc], 0, &s.period);
+ if (err)
+ goto out;
+ break;
+ default:
+ goto out;
+ }
+
+ err = ptp_ocp_signal_set(bp, gen, &s);
+ if (err)
+ goto out;
+
+ err = ptp_ocp_signal_enable(bp->signal_out[gen], gen, s.period != 0);
+
+out:
+ argv_free(argv);
+ return err ? err : count;
+}
+
+static ssize_t
+signal_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ struct ptp_ocp_signal *signal;
+ struct timespec64 ts;
+ ssize_t count;
+ int i;
+
+ i = (uintptr_t)ea->var;
+ signal = &bp->signal[i];
+
+ count = sysfs_emit(buf, "%llu %d %llu %d", signal->period,
+ signal->duty, signal->phase, signal->polarity);
+
+ ts = ktime_to_timespec64(signal->start);
+ count += sysfs_emit_at(buf, count, " %ptT TAI\n", &ts);
+
+ return count;
+}
+static EXT_ATTR_RW(signal, signal, 0);
+static EXT_ATTR_RW(signal, signal, 1);
+static EXT_ATTR_RW(signal, signal, 2);
+static EXT_ATTR_RW(signal, signal, 3);
+
+static ssize_t
+duty_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int i = (uintptr_t)ea->var;
+
+ return sysfs_emit(buf, "%d\n", bp->signal[i].duty);
+}
+static EXT_ATTR_RO(signal, duty, 0);
+static EXT_ATTR_RO(signal, duty, 1);
+static EXT_ATTR_RO(signal, duty, 2);
+static EXT_ATTR_RO(signal, duty, 3);
+
+static ssize_t
+period_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int i = (uintptr_t)ea->var;
+
+ return sysfs_emit(buf, "%llu\n", bp->signal[i].period);
+}
+static EXT_ATTR_RO(signal, period, 0);
+static EXT_ATTR_RO(signal, period, 1);
+static EXT_ATTR_RO(signal, period, 2);
+static EXT_ATTR_RO(signal, period, 3);
+
+static ssize_t
+phase_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int i = (uintptr_t)ea->var;
+
+ return sysfs_emit(buf, "%llu\n", bp->signal[i].phase);
+}
+static EXT_ATTR_RO(signal, phase, 0);
+static EXT_ATTR_RO(signal, phase, 1);
+static EXT_ATTR_RO(signal, phase, 2);
+static EXT_ATTR_RO(signal, phase, 3);
+
+static ssize_t
+polarity_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int i = (uintptr_t)ea->var;
+
+ return sysfs_emit(buf, "%d\n", bp->signal[i].polarity);
+}
+static EXT_ATTR_RO(signal, polarity, 0);
+static EXT_ATTR_RO(signal, polarity, 1);
+static EXT_ATTR_RO(signal, polarity, 2);
+static EXT_ATTR_RO(signal, polarity, 3);
+
+static ssize_t
+running_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int i = (uintptr_t)ea->var;
+
+ return sysfs_emit(buf, "%d\n", bp->signal[i].running);
+}
+static EXT_ATTR_RO(signal, running, 0);
+static EXT_ATTR_RO(signal, running, 1);
+static EXT_ATTR_RO(signal, running, 2);
+static EXT_ATTR_RO(signal, running, 3);
+
+static ssize_t
+start_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int i = (uintptr_t)ea->var;
+ struct timespec64 ts;
+
+ ts = ktime_to_timespec64(bp->signal[i].start);
+ return sysfs_emit(buf, "%llu.%lu\n", ts.tv_sec, ts.tv_nsec);
+}
+static EXT_ATTR_RO(signal, start, 0);
+static EXT_ATTR_RO(signal, start, 1);
+static EXT_ATTR_RO(signal, start, 2);
+static EXT_ATTR_RO(signal, start, 3);
+
+static ssize_t
+seconds_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int idx = (uintptr_t)ea->var;
+ u32 val;
+ int err;
+
+ err = kstrtou32(buf, 0, &val);
+ if (err)
+ return err;
+ if (val > 0xff)
+ return -EINVAL;
+
+ if (val)
+ val = (val << 8) | 0x1;
+
+ iowrite32(val, &bp->freq_in[idx]->ctrl);
+
+ return count;
+}
+
+static ssize_t
+seconds_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int idx = (uintptr_t)ea->var;
+ u32 val;
+
+ val = ioread32(&bp->freq_in[idx]->ctrl);
+ if (val & 1)
+ val = (val >> 8) & 0xff;
+ else
+ val = 0;
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+static EXT_ATTR_RW(freq, seconds, 0);
+static EXT_ATTR_RW(freq, seconds, 1);
+static EXT_ATTR_RW(freq, seconds, 2);
+static EXT_ATTR_RW(freq, seconds, 3);
+
+static ssize_t
+frequency_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int idx = (uintptr_t)ea->var;
+ u32 val;
+
+ val = ioread32(&bp->freq_in[idx]->status);
+ if (val & FREQ_STATUS_ERROR)
+ return sysfs_emit(buf, "error\n");
+ if (val & FREQ_STATUS_OVERRUN)
+ return sysfs_emit(buf, "overrun\n");
+ if (val & FREQ_STATUS_VALID)
+ return sysfs_emit(buf, "%lu\n", val & FREQ_STATUS_MASK);
+ return 0;
+}
+static EXT_ATTR_RO(freq, frequency, 0);
+static EXT_ATTR_RO(freq, frequency, 1);
+static EXT_ATTR_RO(freq, frequency, 2);
+static EXT_ATTR_RO(freq, frequency, 3);
+
static ssize_t
serialnum_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
- if (!bp->has_serial)
- ptp_ocp_get_serial_number(bp);
+ if (!bp->has_eeprom_data)
+ ptp_ocp_read_eeprom(bp);
return sysfs_emit(buf, "%pM\n", bp->serial);
}
@@ -1953,7 +2737,122 @@ available_clock_sources_show(struct device *dev,
}
static DEVICE_ATTR_RO(available_clock_sources);
-static struct attribute *timecard_attrs[] = {
+static ssize_t
+clock_status_drift_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ u32 val;
+ int res;
+
+ val = ioread32(&bp->reg->status_drift);
+ res = (val & ~INT_MAX) ? -1 : 1;
+ res *= (val & INT_MAX);
+ return sysfs_emit(buf, "%d\n", res);
+}
+static DEVICE_ATTR_RO(clock_status_drift);
+
+static ssize_t
+clock_status_offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ u32 val;
+ int res;
+
+ val = ioread32(&bp->reg->status_offset);
+ res = (val & ~INT_MAX) ? -1 : 1;
+ res *= (val & INT_MAX);
+ return sysfs_emit(buf, "%d\n", res);
+}
+static DEVICE_ATTR_RO(clock_status_offset);
+
+static ssize_t
+tod_correction_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ u32 val;
+ int res;
+
+ val = ioread32(&bp->tod->adj_sec);
+ res = (val & ~INT_MAX) ? -1 : 1;
+ res *= (val & INT_MAX);
+ return sysfs_emit(buf, "%d\n", res);
+}
+
+static ssize_t
+tod_correction_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ unsigned long flags;
+ int err, res;
+ u32 val = 0;
+
+ err = kstrtos32(buf, 0, &res);
+ if (err)
+ return err;
+ if (res < 0) {
+ res *= -1;
+ val |= BIT(31);
+ }
+ val |= res;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ iowrite32(val, &bp->tod->adj_sec);
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return count;
+}
+static DEVICE_ATTR_RW(tod_correction);
+
+#define _DEVICE_SIGNAL_GROUP_ATTRS(_nr) \
+ static struct attribute *fb_timecard_signal##_nr##_attrs[] = { \
+ &dev_attr_signal##_nr##_signal.attr.attr, \
+ &dev_attr_signal##_nr##_duty.attr.attr, \
+ &dev_attr_signal##_nr##_phase.attr.attr, \
+ &dev_attr_signal##_nr##_period.attr.attr, \
+ &dev_attr_signal##_nr##_polarity.attr.attr, \
+ &dev_attr_signal##_nr##_running.attr.attr, \
+ &dev_attr_signal##_nr##_start.attr.attr, \
+ NULL, \
+ }
+
+#define DEVICE_SIGNAL_GROUP(_name, _nr) \
+ _DEVICE_SIGNAL_GROUP_ATTRS(_nr); \
+ static const struct attribute_group \
+ fb_timecard_signal##_nr##_group = { \
+ .name = #_name, \
+ .attrs = fb_timecard_signal##_nr##_attrs, \
+}
+
+DEVICE_SIGNAL_GROUP(gen1, 0);
+DEVICE_SIGNAL_GROUP(gen2, 1);
+DEVICE_SIGNAL_GROUP(gen3, 2);
+DEVICE_SIGNAL_GROUP(gen4, 3);
+
+#define _DEVICE_FREQ_GROUP_ATTRS(_nr) \
+ static struct attribute *fb_timecard_freq##_nr##_attrs[] = { \
+ &dev_attr_freq##_nr##_seconds.attr.attr, \
+ &dev_attr_freq##_nr##_frequency.attr.attr, \
+ NULL, \
+ }
+
+#define DEVICE_FREQ_GROUP(_name, _nr) \
+ _DEVICE_FREQ_GROUP_ATTRS(_nr); \
+ static const struct attribute_group \
+ fb_timecard_freq##_nr##_group = { \
+ .name = #_name, \
+ .attrs = fb_timecard_freq##_nr##_attrs, \
+}
+
+DEVICE_FREQ_GROUP(freq1, 0);
+DEVICE_FREQ_GROUP(freq2, 1);
+DEVICE_FREQ_GROUP(freq3, 2);
+DEVICE_FREQ_GROUP(freq4, 3);
+
+static struct attribute *fb_timecard_attrs[] = {
&dev_attr_serialnum.attr,
&dev_attr_gnss_sync.attr,
&dev_attr_clock_source.attr,
@@ -1964,38 +2863,119 @@ static struct attribute *timecard_attrs[] = {
&dev_attr_sma4.attr,
&dev_attr_available_sma_inputs.attr,
&dev_attr_available_sma_outputs.attr,
+ &dev_attr_clock_status_drift.attr,
+ &dev_attr_clock_status_offset.attr,
&dev_attr_irig_b_mode.attr,
&dev_attr_utc_tai_offset.attr,
&dev_attr_ts_window_adjust.attr,
+ &dev_attr_tod_correction.attr,
NULL,
};
-ATTRIBUTE_GROUPS(timecard);
+static const struct attribute_group fb_timecard_group = {
+ .attrs = fb_timecard_attrs,
+};
+static const struct ocp_attr_group fb_timecard_groups[] = {
+ { .cap = OCP_CAP_BASIC, .group = &fb_timecard_group },
+ { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal0_group },
+ { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal1_group },
+ { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal2_group },
+ { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal3_group },
+ { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq0_group },
+ { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq1_group },
+ { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq2_group },
+ { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq3_group },
+ { },
+};
-static const char *
-gpio_map(u32 gpio, u32 bit, const char *pri, const char *sec, const char *def)
+static void
+gpio_input_map(char *buf, struct ptp_ocp *bp, u16 map[][2], u16 bit,
+ const char *def)
{
- const char *ans;
+ int i;
- if (gpio & (1 << bit))
- ans = pri;
- else if (gpio & (1 << (bit + 16)))
- ans = sec;
- else
- ans = def;
- return ans;
+ for (i = 0; i < 4; i++) {
+ if (bp->sma[i].mode != SMA_MODE_IN)
+ continue;
+ if (map[i][0] & (1 << bit)) {
+ sprintf(buf, "sma%d", i + 1);
+ return;
+ }
+ }
+ if (!def)
+ def = "----";
+ strcpy(buf, def);
}
static void
-gpio_multi_map(char *buf, u32 gpio, u32 bit,
- const char *pri, const char *sec, const char *def)
+gpio_output_map(char *buf, struct ptp_ocp *bp, u16 map[][2], u16 bit)
{
char *ans = buf;
+ int i;
+
+ strcpy(ans, "----");
+ for (i = 0; i < 4; i++) {
+ if (bp->sma[i].mode != SMA_MODE_OUT)
+ continue;
+ if (map[i][1] & (1 << bit))
+ ans += sprintf(ans, "sma%d ", i + 1);
+ }
+}
+
+static void
+_signal_summary_show(struct seq_file *s, struct ptp_ocp *bp, int nr)
+{
+ struct signal_reg __iomem *reg = bp->signal_out[nr]->mem;
+ struct ptp_ocp_signal *signal = &bp->signal[nr];
+ char label[8];
+ bool on;
+ u32 val;
+
+ if (!signal)
+ return;
+
+ on = signal->running;
+ sprintf(label, "GEN%d", nr + 1);
+ seq_printf(s, "%7s: %s, period:%llu duty:%d%% phase:%llu pol:%d",
+ label, on ? " ON" : "OFF",
+ signal->period, signal->duty, signal->phase,
+ signal->polarity);
+
+ val = ioread32(&reg->enable);
+ seq_printf(s, " [%x", val);
+ val = ioread32(&reg->status);
+ seq_printf(s, " %x]", val);
+
+ seq_printf(s, " start:%llu\n", signal->start);
+}
+
+static void
+_frequency_summary_show(struct seq_file *s, int nr,
+ struct frequency_reg __iomem *reg)
+{
+ char label[8];
+ bool on;
+ u32 val;
+
+ if (!reg)
+ return;
- strcpy(ans, def);
- if (gpio & (1 << bit))
- ans += sprintf(ans, "%s ", pri);
- if (gpio & (1 << (bit + 16)))
- ans += sprintf(ans, "%s ", sec);
+ sprintf(label, "FREQ%d", nr + 1);
+ val = ioread32(&reg->ctrl);
+ on = val & 1;
+ val = (val >> 8) & 0xff;
+ seq_printf(s, "%7s: %s, sec:%u",
+ label,
+ on ? " ON" : "OFF",
+ val);
+
+ val = ioread32(&reg->status);
+ if (val & FREQ_STATUS_ERROR)
+ seq_printf(s, ", error");
+ if (val & FREQ_STATUS_OVERRUN)
+ seq_printf(s, ", overrun");
+ if (val & FREQ_STATUS_VALID)
+ seq_printf(s, ", freq %lu Hz", val & FREQ_STATUS_MASK);
+ seq_printf(s, " reg:%x\n", val);
}
static int
@@ -2003,40 +2983,72 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
{
struct device *dev = s->private;
struct ptp_system_timestamp sts;
- u32 sma_in, sma_out, ctrl, val;
struct ts_reg __iomem *ts_reg;
struct timespec64 ts;
struct ptp_ocp *bp;
- const char *src;
+ u16 sma_val[4][2];
+ char *src, *buf;
+ u32 ctrl, val;
bool on, map;
- char *buf;
+ int i;
buf = (char *)__get_free_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
bp = dev_get_drvdata(dev);
- sma_in = ioread32(&bp->sma->gpio1);
- sma_out = ioread32(&bp->sma->gpio2);
seq_printf(s, "%7s: /dev/ptp%d\n", "PTP", ptp_clock_index(bp->ptp));
+ if (bp->gnss_port != -1)
+ seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS1", bp->gnss_port);
+ if (bp->gnss2_port != -1)
+ seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS2", bp->gnss2_port);
+ if (bp->mac_port != -1)
+ seq_printf(s, "%7s: /dev/ttyS%d\n", "MAC", bp->mac_port);
+ if (bp->nmea_port != -1)
+ seq_printf(s, "%7s: /dev/ttyS%d\n", "NMEA", bp->nmea_port);
+
+ memset(sma_val, 0xff, sizeof(sma_val));
+ if (bp->sma_map1) {
+ u32 reg;
+
+ reg = ioread32(&bp->sma_map1->gpio1);
+ sma_val[0][0] = reg & 0xffff;
+ sma_val[1][0] = reg >> 16;
+
+ reg = ioread32(&bp->sma_map1->gpio2);
+ sma_val[2][1] = reg & 0xffff;
+ sma_val[3][1] = reg >> 16;
+
+ reg = ioread32(&bp->sma_map2->gpio1);
+ sma_val[2][0] = reg & 0xffff;
+ sma_val[3][0] = reg >> 16;
+
+ reg = ioread32(&bp->sma_map2->gpio2);
+ sma_val[0][1] = reg & 0xffff;
+ sma_val[1][1] = reg >> 16;
+ }
sma1_show(dev, NULL, buf);
- seq_printf(s, " sma1: %s", buf);
+ seq_printf(s, " sma1: %04x,%04x %s",
+ sma_val[0][0], sma_val[0][1], buf);
sma2_show(dev, NULL, buf);
- seq_printf(s, " sma2: %s", buf);
+ seq_printf(s, " sma2: %04x,%04x %s",
+ sma_val[1][0], sma_val[1][1], buf);
sma3_show(dev, NULL, buf);
- seq_printf(s, " sma3: %s", buf);
+ seq_printf(s, " sma3: %04x,%04x %s",
+ sma_val[2][0], sma_val[2][1], buf);
sma4_show(dev, NULL, buf);
- seq_printf(s, " sma4: %s", buf);
+ seq_printf(s, " sma4: %04x,%04x %s",
+ sma_val[3][0], sma_val[3][1], buf);
if (bp->ts0) {
ts_reg = bp->ts0->mem;
on = ioread32(&ts_reg->enable);
- src = "GNSS";
+ src = "GNSS1";
seq_printf(s, "%7s: %s, src: %s\n", "TS0",
on ? " ON" : "OFF", src);
}
@@ -2044,17 +3056,33 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
if (bp->ts1) {
ts_reg = bp->ts1->mem;
on = ioread32(&ts_reg->enable);
- src = gpio_map(sma_in, 2, "sma1", "sma2", "----");
+ gpio_input_map(buf, bp, sma_val, 2, NULL);
seq_printf(s, "%7s: %s, src: %s\n", "TS1",
- on ? " ON" : "OFF", src);
+ on ? " ON" : "OFF", buf);
}
if (bp->ts2) {
ts_reg = bp->ts2->mem;
on = ioread32(&ts_reg->enable);
- src = gpio_map(sma_in, 3, "sma1", "sma2", "----");
+ gpio_input_map(buf, bp, sma_val, 3, NULL);
seq_printf(s, "%7s: %s, src: %s\n", "TS2",
- on ? " ON" : "OFF", src);
+ on ? " ON" : "OFF", buf);
+ }
+
+ if (bp->ts3) {
+ ts_reg = bp->ts3->mem;
+ on = ioread32(&ts_reg->enable);
+ gpio_input_map(buf, bp, sma_val, 6, NULL);
+ seq_printf(s, "%7s: %s, src: %s\n", "TS3",
+ on ? " ON" : "OFF", buf);
+ }
+
+ if (bp->ts4) {
+ ts_reg = bp->ts4->mem;
+ on = ioread32(&ts_reg->enable);
+ gpio_input_map(buf, bp, sma_val, 7, NULL);
+ seq_printf(s, "%7s: %s, src: %s\n", "TS4",
+ on ? " ON" : "OFF", buf);
}
if (bp->pps) {
@@ -2062,7 +3090,7 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
src = "PHC";
on = ioread32(&ts_reg->enable);
map = !!(bp->pps_req_map & OCP_REQ_TIMESTAMP);
- seq_printf(s, "%7s: %s, src: %s\n", "TS3",
+ seq_printf(s, "%7s: %s, src: %s\n", "TS5",
on && map ? " ON" : "OFF", src);
map = !!(bp->pps_req_map & OCP_REQ_PPS);
@@ -2070,11 +3098,19 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
on && map ? " ON" : "OFF", src);
}
+ if (bp->fw_cap & OCP_CAP_SIGNAL)
+ for (i = 0; i < 4; i++)
+ _signal_summary_show(s, bp, i);
+
+ if (bp->fw_cap & OCP_CAP_FREQ)
+ for (i = 0; i < 4; i++)
+ _frequency_summary_show(s, i, bp->freq_in[i]);
+
if (bp->irig_out) {
ctrl = ioread32(&bp->irig_out->ctrl);
on = ctrl & IRIG_M_CTRL_ENABLE;
val = ioread32(&bp->irig_out->status);
- gpio_multi_map(buf, sma_out, 4, "sma3", "sma4", "----");
+ gpio_output_map(buf, bp, sma_val, 4);
seq_printf(s, "%7s: %s, error: %d, mode %d, out: %s\n", "IRIG",
on ? " ON" : "OFF", val, (ctrl >> 16), buf);
}
@@ -2082,15 +3118,15 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
if (bp->irig_in) {
on = ioread32(&bp->irig_in->ctrl) & IRIG_S_CTRL_ENABLE;
val = ioread32(&bp->irig_in->status);
- src = gpio_map(sma_in, 4, "sma1", "sma2", "----");
+ gpio_input_map(buf, bp, sma_val, 4, NULL);
seq_printf(s, "%7s: %s, error: %d, src: %s\n", "IRIG in",
- on ? " ON" : "OFF", val, src);
+ on ? " ON" : "OFF", val, buf);
}
if (bp->dcf_out) {
on = ioread32(&bp->dcf_out->ctrl) & DCF_M_CTRL_ENABLE;
val = ioread32(&bp->dcf_out->status);
- gpio_multi_map(buf, sma_out, 5, "sma3", "sma4", "----");
+ gpio_output_map(buf, bp, sma_val, 5);
seq_printf(s, "%7s: %s, error: %d, out: %s\n", "DCF",
on ? " ON" : "OFF", val, buf);
}
@@ -2098,9 +3134,9 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
if (bp->dcf_in) {
on = ioread32(&bp->dcf_in->ctrl) & DCF_S_CTRL_ENABLE;
val = ioread32(&bp->dcf_in->status);
- src = gpio_map(sma_in, 5, "sma1", "sma2", "----");
+ gpio_input_map(buf, bp, sma_val, 5, NULL);
seq_printf(s, "%7s: %s, error: %d, src: %s\n", "DCF in",
- on ? " ON" : "OFF", val, src);
+ on ? " ON" : "OFF", val, buf);
}
if (bp->nmea_out) {
@@ -2113,12 +3149,13 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
/* compute src for PPS1, used below. */
if (bp->pps_select) {
val = ioread32(&bp->pps_select->gpio1);
+ src = &buf[80];
if (val & 0x01)
- src = gpio_map(sma_in, 0, "sma1", "sma2", "----");
+ gpio_input_map(src, bp, sma_val, 0, NULL);
else if (val & 0x02)
src = "MAC";
else if (val & 0x04)
- src = "GNSS";
+ src = "GNSS1";
else
src = "----";
} else {
@@ -2151,8 +3188,8 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
/* reuses PPS1 src from earlier */
seq_printf(s, "MAC PPS1 src: %s\n", src);
- src = gpio_map(sma_in, 1, "sma1", "sma2", "GNSS2");
- seq_printf(s, "MAC PPS2 src: %s\n", src);
+ gpio_input_map(buf, bp, sma_val, 1, "GNSS2");
+ seq_printf(s, "MAC PPS2 src: %s\n", buf);
if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, &sts)) {
struct timespec64 sys_ts;
@@ -2179,6 +3216,57 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(ptp_ocp_summary);
+static int
+ptp_ocp_tod_status_show(struct seq_file *s, void *data)
+{
+ struct device *dev = s->private;
+ struct ptp_ocp *bp;
+ u32 val;
+ int idx;
+
+ bp = dev_get_drvdata(dev);
+
+ val = ioread32(&bp->tod->ctrl);
+ if (!(val & TOD_CTRL_ENABLE)) {
+ seq_printf(s, "TOD Slave disabled\n");
+ return 0;
+ }
+ seq_printf(s, "TOD Slave enabled, Control Register 0x%08X\n", val);
+
+ idx = val & TOD_CTRL_PROTOCOL ? 4 : 0;
+ idx += (val >> 16) & 3;
+ seq_printf(s, "Protocol %s\n", ptp_ocp_tod_proto_name(idx));
+
+ idx = (val >> TOD_CTRL_GNSS_SHIFT) & TOD_CTRL_GNSS_MASK;
+ seq_printf(s, "GNSS %s\n", ptp_ocp_tod_gnss_name(idx));
+
+ val = ioread32(&bp->tod->version);
+ seq_printf(s, "TOD Version %d.%d.%d\n",
+ val >> 24, (val >> 16) & 0xff, val & 0xffff);
+
+ val = ioread32(&bp->tod->status);
+ seq_printf(s, "Status register: 0x%08X\n", val);
+
+ val = ioread32(&bp->tod->adj_sec);
+ idx = (val & ~INT_MAX) ? -1 : 1;
+ idx *= (val & INT_MAX);
+ seq_printf(s, "Correction seconds: %d\n", idx);
+
+ val = ioread32(&bp->tod->utc_status);
+ seq_printf(s, "UTC status register: 0x%08X\n", val);
+ seq_printf(s, "UTC offset: %d valid:%d\n",
+ val & TOD_STATUS_UTC_MASK, val & TOD_STATUS_UTC_VALID ? 1 : 0);
+ seq_printf(s, "Leap second info valid:%d, Leap second announce %d\n",
+ val & TOD_STATUS_LEAP_VALID ? 1 : 0,
+ val & TOD_STATUS_LEAP_ANNOUNCE ? 1 : 0);
+
+ val = ioread32(&bp->tod->leap);
+ seq_printf(s, "Time to next leap second (in sec): %d\n", (s32) val);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ptp_ocp_tod_status);
+
static struct dentry *ptp_ocp_debugfs_root;
static void
@@ -2190,6 +3278,9 @@ ptp_ocp_debugfs_add_device(struct ptp_ocp *bp)
bp->debug_root = d;
debugfs_create_file("summary", 0444, bp->debug_root,
&bp->dev, &ptp_ocp_summary_fops);
+ if (bp->tod)
+ debugfs_create_file("tod_status", 0444, bp->debug_root,
+ &bp->dev, &ptp_ocp_tod_status_fops);
}
static void
@@ -2296,6 +3387,7 @@ ptp_ocp_complete(struct ptp_ocp *bp)
{
struct pps_device *pps;
char buf[32];
+ int i, err;
if (bp->gnss_port != -1) {
sprintf(buf, "ttyS%d", bp->gnss_port);
@@ -2320,8 +3412,13 @@ ptp_ocp_complete(struct ptp_ocp *bp)
if (pps)
ptp_ocp_symlink(bp, pps->dev, "pps");
- if (device_add_groups(&bp->dev, timecard_groups))
- pr_err("device add groups failed\n");
+ for (i = 0; bp->attr_tbl[i].cap; i++) {
+ if (!(bp->attr_tbl[i].cap & bp->fw_cap))
+ continue;
+ err = sysfs_create_group(&bp->dev.kobj, bp->attr_tbl[i].group);
+ if (err)
+ return err;
+ }
ptp_ocp_debugfs_add_device(bp);
@@ -2368,20 +3465,15 @@ ptp_ocp_info(struct ptp_ocp *bp)
u32 reg;
ptp_ocp_phc_info(bp);
- if (bp->tod)
- ptp_ocp_tod_info(bp);
- if (bp->image) {
- u32 ver = ioread32(&bp->image->version);
+ dev_info(dev, "version %x\n", bp->fw_version);
+ if (bp->fw_version & 0xffff)
+ dev_info(dev, "regular image, version %d\n",
+ bp->fw_version & 0xffff);
+ else
+ dev_info(dev, "golden image, version %d\n",
+ bp->fw_version >> 16);
- dev_info(dev, "version %x\n", ver);
- if (ver & 0xffff)
- dev_info(dev, "regular image, version %d\n",
- ver & 0xffff);
- else
- dev_info(dev, "golden image, version %d\n",
- ver >> 16);
- }
ptp_ocp_serial_info(dev, "GNSS", bp->gnss_port, 115200);
ptp_ocp_serial_info(dev, "GNSS2", bp->gnss2_port, 115200);
ptp_ocp_serial_info(dev, "MAC", bp->mac_port, 57600);
@@ -2399,17 +3491,22 @@ static void
ptp_ocp_detach_sysfs(struct ptp_ocp *bp)
{
struct device *dev = &bp->dev;
+ int i;
sysfs_remove_link(&dev->kobj, "ttyGNSS");
sysfs_remove_link(&dev->kobj, "ttyMAC");
sysfs_remove_link(&dev->kobj, "ptp");
sysfs_remove_link(&dev->kobj, "pps");
- device_remove_groups(dev, timecard_groups);
+ if (bp->attr_tbl)
+ for (i = 0; bp->attr_tbl[i].cap; i++)
+ sysfs_remove_group(&dev->kobj, bp->attr_tbl[i].group);
}
static void
ptp_ocp_detach(struct ptp_ocp *bp)
{
+ int i;
+
ptp_ocp_debugfs_remove_device(bp);
ptp_ocp_detach_sysfs(bp);
if (timer_pending(&bp->watchdog))
@@ -2420,8 +3517,15 @@ ptp_ocp_detach(struct ptp_ocp *bp)
ptp_ocp_unregister_ext(bp->ts1);
if (bp->ts2)
ptp_ocp_unregister_ext(bp->ts2);
+ if (bp->ts3)
+ ptp_ocp_unregister_ext(bp->ts3);
+ if (bp->ts4)
+ ptp_ocp_unregister_ext(bp->ts4);
if (bp->pps)
ptp_ocp_unregister_ext(bp->pps);
+ for (i = 0; i < 4; i++)
+ if (bp->signal_out[i])
+ ptp_ocp_unregister_ext(bp->signal_out[i]);
if (bp->gnss_port != -1)
serial8250_unregister_port(bp->gnss_port);
if (bp->gnss2_port != -1)
@@ -2440,6 +3544,7 @@ ptp_ocp_detach(struct ptp_ocp *bp)
pci_free_irq_vectors(bp->pdev);
if (bp->ptp)
ptp_clock_unregister(bp->ptp);
+ kfree(bp->ptp_info.pin_config);
device_unregister(&bp->dev);
}
@@ -2459,7 +3564,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "pci_enable_device\n");
- goto out_unregister;
+ goto out_free;
}
bp = devlink_priv(devlink);
@@ -2472,7 +3577,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* allow this - if not all of the IRQ's are returned, skip the
* extra devices and just register the clock.
*/
- err = pci_alloc_irq_vectors(pdev, 1, 11, PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ err = pci_alloc_irq_vectors(pdev, 1, 17, PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (err < 0) {
dev_err(&pdev->dev, "alloc_irq_vectors err: %d\n", err);
goto out;
@@ -2505,7 +3610,7 @@ out:
pci_set_drvdata(pdev, NULL);
out_disable:
pci_disable_device(pdev);
-out_unregister:
+out_free:
devlink_free(devlink);
return err;
}
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index 8070f3fd98f0..7d4da9e605ef 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -10,9 +10,10 @@
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -100,7 +101,6 @@ struct pch_ts_regs {
#define PCH_ECS_ETH (1 << 0)
#define PCH_ECS_CAN (1 << 1)
-#define PCH_STATION_BYTES 6
#define PCH_IEEE1588_ETH (1 << 0)
#define PCH_IEEE1588_CAN (1 << 1)
@@ -115,8 +115,6 @@ struct pch_dev {
int exts0_enabled;
int exts1_enabled;
- u32 mem_base;
- u32 mem_size;
u32 irq;
struct pci_dev *pdev;
spinlock_t register_lock;
@@ -148,28 +146,15 @@ static inline void pch_eth_enable_set(struct pch_dev *chip)
static u64 pch_systime_read(struct pch_ts_regs __iomem *regs)
{
u64 ns;
- u32 lo, hi;
- lo = ioread32(&regs->systime_lo);
- hi = ioread32(&regs->systime_hi);
+ ns = ioread64_lo_hi(&regs->systime_lo);
- ns = ((u64) hi) << 32;
- ns |= lo;
- ns <<= TICKS_NS_SHIFT;
-
- return ns;
+ return ns << TICKS_NS_SHIFT;
}
static void pch_systime_write(struct pch_ts_regs __iomem *regs, u64 ns)
{
- u32 hi, lo;
-
- ns >>= TICKS_NS_SHIFT;
- hi = ns >> 32;
- lo = ns & 0xffffffff;
-
- iowrite32(lo, &regs->systime_lo);
- iowrite32(hi, &regs->systime_hi);
+ iowrite64_lo_hi(ns >> TICKS_NS_SHIFT, &regs->systime_lo);
}
static inline void pch_block_reset(struct pch_dev *chip)
@@ -235,16 +220,10 @@ u64 pch_rx_snap_read(struct pci_dev *pdev)
{
struct pch_dev *chip = pci_get_drvdata(pdev);
u64 ns;
- u32 lo, hi;
- lo = ioread32(&chip->regs->rx_snap_lo);
- hi = ioread32(&chip->regs->rx_snap_hi);
+ ns = ioread64_lo_hi(&chip->regs->rx_snap_lo);
- ns = ((u64) hi) << 32;
- ns |= lo;
- ns <<= TICKS_NS_SHIFT;
-
- return ns;
+ return ns << TICKS_NS_SHIFT;
}
EXPORT_SYMBOL(pch_rx_snap_read);
@@ -252,16 +231,10 @@ u64 pch_tx_snap_read(struct pci_dev *pdev)
{
struct pch_dev *chip = pci_get_drvdata(pdev);
u64 ns;
- u32 lo, hi;
-
- lo = ioread32(&chip->regs->tx_snap_lo);
- hi = ioread32(&chip->regs->tx_snap_hi);
- ns = ((u64) hi) << 32;
- ns |= lo;
- ns <<= TICKS_NS_SHIFT;
+ ns = ioread64_lo_hi(&chip->regs->tx_snap_lo);
- return ns;
+ return ns << TICKS_NS_SHIFT;
}
EXPORT_SYMBOL(pch_tx_snap_read);
@@ -292,8 +265,9 @@ static void pch_reset(struct pch_dev *chip)
*/
int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
{
- s32 i;
struct pch_dev *chip = pci_get_drvdata(pdev);
+ bool valid;
+ u64 mac;
/* Verify the parameter */
if ((chip->regs == NULL) || addr == (u8 *)NULL) {
@@ -301,37 +275,15 @@ int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
"invalid params returning PCH_INVALIDPARAM\n");
return PCH_INVALIDPARAM;
}
- /* For all station address bytes */
- for (i = 0; i < PCH_STATION_BYTES; i++) {
- u32 val;
- s32 tmp;
-
- tmp = hex_to_bin(addr[i * 3]);
- if (tmp < 0) {
- dev_err(&pdev->dev,
- "invalid params returning PCH_INVALIDPARAM\n");
- return PCH_INVALIDPARAM;
- }
- val = tmp * 16;
- tmp = hex_to_bin(addr[(i * 3) + 1]);
- if (tmp < 0) {
- dev_err(&pdev->dev,
- "invalid params returning PCH_INVALIDPARAM\n");
- return PCH_INVALIDPARAM;
- }
- val += tmp;
- /* Expects ':' separated addresses */
- if ((i < 5) && (addr[(i * 3) + 2] != ':')) {
- dev_err(&pdev->dev,
- "invalid params returning PCH_INVALIDPARAM\n");
- return PCH_INVALIDPARAM;
- }
- /* Ideally we should set the address only after validating
- entire string */
- dev_dbg(&pdev->dev, "invoking pch_station_set\n");
- iowrite32(val, &chip->regs->ts_st[i]);
+ valid = mac_pton(addr, (u8 *)&mac);
+ if (!valid) {
+ dev_err(&pdev->dev, "invalid params returning PCH_INVALIDPARAM\n");
+ return PCH_INVALIDPARAM;
}
+
+ dev_dbg(&pdev->dev, "invoking pch_station_set\n");
+ iowrite64_lo_hi(mac, &chip->regs->ts_st);
return 0;
}
EXPORT_SYMBOL(pch_set_station_address);
@@ -344,19 +296,16 @@ static irqreturn_t isr(int irq, void *priv)
struct pch_dev *pch_dev = priv;
struct pch_ts_regs __iomem *regs = pch_dev->regs;
struct ptp_clock_event event;
- u32 ack = 0, lo, hi, val;
+ u32 ack = 0, val;
val = ioread32(&regs->event);
if (val & PCH_TSE_SNS) {
ack |= PCH_TSE_SNS;
if (pch_dev->exts0_enabled) {
- hi = ioread32(&regs->asms_hi);
- lo = ioread32(&regs->asms_lo);
event.type = PTP_CLOCK_EXTTS;
event.index = 0;
- event.timestamp = ((u64) hi) << 32;
- event.timestamp |= lo;
+ event.timestamp = ioread64_hi_lo(&regs->asms_hi);
event.timestamp <<= TICKS_NS_SHIFT;
ptp_clock_event(pch_dev->ptp_clock, &event);
}
@@ -365,12 +314,9 @@ static irqreturn_t isr(int irq, void *priv)
if (val & PCH_TSE_SNM) {
ack |= PCH_TSE_SNM;
if (pch_dev->exts1_enabled) {
- hi = ioread32(&regs->amms_hi);
- lo = ioread32(&regs->amms_lo);
event.type = PTP_CLOCK_EXTTS;
event.index = 1;
- event.timestamp = ((u64) hi) << 32;
- event.timestamp |= lo;
+ event.timestamp = ioread64_hi_lo(&regs->asms_hi);
event.timestamp <<= TICKS_NS_SHIFT;
ptp_clock_event(pch_dev->ptp_clock, &event);
}
@@ -501,31 +447,12 @@ static const struct ptp_clock_info ptp_pch_caps = {
.enable = ptp_pch_enable,
};
-#define pch_suspend NULL
-#define pch_resume NULL
-
static void pch_remove(struct pci_dev *pdev)
{
struct pch_dev *chip = pci_get_drvdata(pdev);
+ free_irq(pdev->irq, chip);
ptp_clock_unregister(chip->ptp_clock);
- /* free the interrupt */
- if (pdev->irq != 0)
- free_irq(pdev->irq, chip);
-
- /* unmap the virtual IO memory space */
- if (chip->regs != NULL) {
- iounmap(chip->regs);
- chip->regs = NULL;
- }
- /* release the reserved IO memory space */
- if (chip->mem_base != 0) {
- release_mem_region(chip->mem_base, chip->mem_size);
- chip->mem_base = 0;
- }
- pci_disable_device(pdev);
- kfree(chip);
- dev_info(&pdev->dev, "complete\n");
}
static s32
@@ -535,50 +462,29 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
unsigned long flags;
struct pch_dev *chip;
- chip = kzalloc(sizeof(struct pch_dev), GFP_KERNEL);
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
/* enable the 1588 pci device */
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret != 0) {
dev_err(&pdev->dev, "could not enable the pci device\n");
- goto err_pci_en;
+ return ret;
}
- chip->mem_base = pci_resource_start(pdev, IO_MEM_BAR);
- if (!chip->mem_base) {
+ ret = pcim_iomap_regions(pdev, BIT(IO_MEM_BAR), "1588_regs");
+ if (ret) {
dev_err(&pdev->dev, "could not locate IO memory address\n");
- ret = -ENODEV;
- goto err_pci_start;
- }
-
- /* retrieve the available length of the IO memory space */
- chip->mem_size = pci_resource_len(pdev, IO_MEM_BAR);
-
- /* allocate the memory for the device registers */
- if (!request_mem_region(chip->mem_base, chip->mem_size, "1588_regs")) {
- dev_err(&pdev->dev,
- "could not allocate register memory space\n");
- ret = -EBUSY;
- goto err_req_mem_region;
+ return ret;
}
/* get the virtual address to the 1588 registers */
- chip->regs = ioremap(chip->mem_base, chip->mem_size);
-
- if (!chip->regs) {
- dev_err(&pdev->dev, "Could not get virtual address\n");
- ret = -ENOMEM;
- goto err_ioremap;
- }
-
+ chip->regs = pcim_iomap_table(pdev)[IO_MEM_BAR];
chip->caps = ptp_pch_caps;
chip->ptp_clock = ptp_clock_register(&chip->caps, &pdev->dev);
- if (IS_ERR(chip->ptp_clock)) {
- ret = PTR_ERR(chip->ptp_clock);
- goto err_ptp_clock_reg;
- }
+ if (IS_ERR(chip->ptp_clock))
+ return PTR_ERR(chip->ptp_clock);
spin_lock_init(&chip->register_lock);
@@ -598,8 +504,7 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pch_reset(chip);
iowrite32(DEFAULT_ADDEND, &chip->regs->addend);
- iowrite32(1, &chip->regs->trgt_lo);
- iowrite32(0, &chip->regs->trgt_hi);
+ iowrite64_lo_hi(1, &chip->regs->trgt_lo);
iowrite32(PCH_TSE_TTIPEND, &chip->regs->event);
pch_eth_enable_set(chip);
@@ -617,21 +522,7 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_req_irq:
ptp_clock_unregister(chip->ptp_clock);
-err_ptp_clock_reg:
- iounmap(chip->regs);
- chip->regs = NULL;
-err_ioremap:
- release_mem_region(chip->mem_base, chip->mem_size);
-
-err_req_mem_region:
- chip->mem_base = 0;
-
-err_pci_start:
- pci_disable_device(pdev);
-
-err_pci_en:
- kfree(chip);
dev_err(&pdev->dev, "probe failed(ret=0x%x)\n", ret);
return ret;
@@ -646,33 +537,13 @@ static const struct pci_device_id pch_ieee1588_pcidev_id[] = {
};
MODULE_DEVICE_TABLE(pci, pch_ieee1588_pcidev_id);
-static SIMPLE_DEV_PM_OPS(pch_pm_ops, pch_suspend, pch_resume);
-
static struct pci_driver pch_driver = {
.name = KBUILD_MODNAME,
.id_table = pch_ieee1588_pcidev_id,
.probe = pch_probe,
.remove = pch_remove,
- .driver.pm = &pch_pm_ops,
};
-
-static void __exit ptp_pch_exit(void)
-{
- pci_unregister_driver(&pch_driver);
-}
-
-static s32 __init ptp_pch_init(void)
-{
- s32 ret;
-
- /* register the driver with the pci core */
- ret = pci_register_driver(&pch_driver);
-
- return ret;
-}
-
-module_init(ptp_pch_init);
-module_exit(ptp_pch_exit);
+module_pci_driver(pch_driver);
module_param_string(station,
pch_param.station, sizeof(pch_param.station), 0444);
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index 41b92dc2f011..9233bfedeb17 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -14,7 +14,7 @@ static ssize_t clock_name_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
- return snprintf(page, PAGE_SIZE-1, "%s\n", ptp->info->name);
+ return sysfs_emit(page, "%s\n", ptp->info->name);
}
static DEVICE_ATTR_RO(clock_name);
@@ -387,7 +387,7 @@ static ssize_t ptp_pin_show(struct device *dev, struct device_attribute *attr,
mutex_unlock(&ptp->pincfg_mux);
- return snprintf(page, PAGE_SIZE, "%u %u\n", func, chan);
+ return sysfs_emit(page, "%u %u\n", func, chan);
}
static ssize_t ptp_pin_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c
index ab1d233173e1..cb179a3ea508 100644
--- a/drivers/ptp/ptp_vclock.c
+++ b/drivers/ptp/ptp_vclock.c
@@ -57,6 +57,30 @@ static int ptp_vclock_gettime(struct ptp_clock_info *ptp,
return 0;
}
+static int ptp_vclock_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct ptp_vclock *vclock = info_to_vclock(ptp);
+ struct ptp_clock *pptp = vclock->pclock;
+ struct timespec64 pts;
+ unsigned long flags;
+ int err;
+ u64 ns;
+
+ err = pptp->info->gettimex64(pptp->info, &pts, sts);
+ if (err)
+ return err;
+
+ spin_lock_irqsave(&vclock->lock, flags);
+ ns = timecounter_cyc2time(&vclock->tc, timespec64_to_ns(&pts));
+ spin_unlock_irqrestore(&vclock->lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
static int ptp_vclock_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
@@ -71,6 +95,28 @@ static int ptp_vclock_settime(struct ptp_clock_info *ptp,
return 0;
}
+static int ptp_vclock_getcrosststamp(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *xtstamp)
+{
+ struct ptp_vclock *vclock = info_to_vclock(ptp);
+ struct ptp_clock *pptp = vclock->pclock;
+ unsigned long flags;
+ int err;
+ u64 ns;
+
+ err = pptp->info->getcrosststamp(pptp->info, xtstamp);
+ if (err)
+ return err;
+
+ spin_lock_irqsave(&vclock->lock, flags);
+ ns = timecounter_cyc2time(&vclock->tc, ktime_to_ns(xtstamp->device));
+ spin_unlock_irqrestore(&vclock->lock, flags);
+
+ xtstamp->device = ns_to_ktime(ns);
+
+ return 0;
+}
+
static long ptp_vclock_refresh(struct ptp_clock_info *ptp)
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
@@ -84,11 +130,9 @@ static long ptp_vclock_refresh(struct ptp_clock_info *ptp)
static const struct ptp_clock_info ptp_vclock_info = {
.owner = THIS_MODULE,
.name = "ptp virtual clock",
- /* The maximum ppb value that long scaled_ppm can support */
- .max_adj = 32767999,
+ .max_adj = 500000000,
.adjfine = ptp_vclock_adjfine,
.adjtime = ptp_vclock_adjtime,
- .gettime64 = ptp_vclock_gettime,
.settime64 = ptp_vclock_settime,
.do_aux_work = ptp_vclock_refresh,
};
@@ -124,6 +168,12 @@ struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock)
vclock->pclock = pclock;
vclock->info = ptp_vclock_info;
+ if (pclock->info->gettimex64)
+ vclock->info.gettimex64 = ptp_vclock_gettimex;
+ else
+ vclock->info.gettime64 = ptp_vclock_gettime;
+ if (pclock->info->getcrosststamp)
+ vclock->info.getcrosststamp = ptp_vclock_getcrosststamp;
vclock->cc = ptp_vclock_cc;
snprintf(vclock->info.name, PTP_CLOCK_NAME_LEN, "ptp%d_virt",
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 1c35fed20d34..c8ce6e5eea24 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -984,6 +984,7 @@ config REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY
tristate "Raspberry Pi 7-inch touchscreen panel ATTINY regulator"
depends on BACKLIGHT_CLASS_DEVICE
depends on I2C
+ depends on OF_GPIO
select REGMAP_I2C
help
This driver supports ATTINY regulator on the Raspberry Pi 7-inch
@@ -1046,6 +1047,16 @@ config REGULATOR_RT5033
RT5033 PMIC. The device supports multiple regulators like
current source, LDO and Buck.
+config REGULATOR_RT5190A
+ tristate "Richtek RT5190A PMIC"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This adds support for voltage regulator in Richtek RT5190A PMIC.
+ It integratas 1 channel buck controller, 3 channels high efficiency
+ buck converters, 1 LDO, mute AC OFF depop function, with the general
+ I2C control interface.
+
config REGULATOR_RT6160
tristate "Richtek RT6160 BuckBoost voltage regulator"
depends on I2C
@@ -1263,6 +1274,15 @@ config REGULATOR_TPS62360
high-frequency synchronous step down dc-dc converter optimized
for battery-powered portable applications.
+config REGULATOR_TPS6286X
+ tristate "TI TPS6286x Power Regulator"
+ depends on I2C && OF
+ select REGMAP_I2C
+ help
+ This driver supports TPS6236x voltage regulator chips. These are
+ high-frequency synchronous step-down converters with an I2C
+ interface.
+
config REGULATOR_TPS65023
tristate "TI TPS65023 Power regulators"
depends on I2C
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 2e1b087489fa..1b64ad5767be 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -126,6 +126,7 @@ obj-$(CONFIG_REGULATOR_ROHM) += rohm-regulator.o
obj-$(CONFIG_REGULATOR_RT4801) += rt4801-regulator.o
obj-$(CONFIG_REGULATOR_RT4831) += rt4831-regulator.o
obj-$(CONFIG_REGULATOR_RT5033) += rt5033-regulator.o
+obj-$(CONFIG_REGULATOR_RT5190A) += rt5190a-regulator.o
obj-$(CONFIG_REGULATOR_RT6160) += rt6160-regulator.o
obj-$(CONFIG_REGULATOR_RT6245) += rt6245-regulator.o
obj-$(CONFIG_REGULATOR_RTMV20) += rtmv20-regulator.o
@@ -149,6 +150,7 @@ obj-$(CONFIG_REGULATOR_SY8827N) += sy8827n.o
obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o
obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
obj-$(CONFIG_REGULATOR_TPS62360) += tps62360-regulator.o
+obj-$(CONFIG_REGULATOR_TPS6286X) += tps6286x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65086) += tps65086-regulator.o
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 80b65cb87cef..cb7e50003f70 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -459,7 +459,7 @@ static int max8973_thermal_read_temp(void *data, int *temp)
return ret;
}
- /* +1 degC to trigger cool devive */
+ /* +1 degC to trigger cool device */
if (val & MAX77621_CHIPID_TJINT_S)
*temp = mchip->junction_temp_warning + 1000;
else
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index a3bc0eb6ceb8..561de6b2e6e3 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -1121,6 +1121,39 @@ static const struct rpmh_vreg_init_data pmx55_vreg_data[] = {
{}
};
+static const struct rpmh_vreg_init_data pmx65_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps510, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_hfsmps510, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_hfsmps510, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic5_hfsmps510, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_hfsmps510, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps510, "vdd-s6"),
+ RPMH_VREG("smps7", "smp%s7", &pmic5_hfsmps510, "vdd-s7"),
+ RPMH_VREG("smps8", "smp%s8", &pmic5_hfsmps510, "vdd-s8"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l2-l18"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6-l16"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6-l16"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_nldo, "vdd-l7"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic5_nldo, "vdd-l8-l9"),
+ RPMH_VREG("ldo9", "ldo%s9", &pmic5_nldo, "vdd-l8-l9"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l10"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo, "vdd-l11-l13"),
+ RPMH_VREG("ldo12", "ldo%s12", &pmic5_nldo, "vdd-l12"),
+ RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l11-l13"),
+ RPMH_VREG("ldo14", "ldo%s14", &pmic5_nldo, "vdd-l14"),
+ RPMH_VREG("ldo15", "ldo%s15", &pmic5_nldo, "vdd-l15"),
+ RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l5-l6-l16"),
+ RPMH_VREG("ldo17", "ldo%s17", &pmic5_nldo, "vdd-l17"),
+ /* ldo18 not configured */
+ RPMH_VREG("ldo19", "ldo%s19", &pmic5_nldo, "vdd-l19"),
+ RPMH_VREG("ldo20", "ldo%s20", &pmic5_nldo, "vdd-l20"),
+ RPMH_VREG("ldo21", "ldo%s21", &pmic5_nldo, "vdd-l21"),
+ {}
+};
+
static const struct rpmh_vreg_init_data pm7325_vreg_data[] = {
RPMH_VREG("smps1", "smp%s1", &pmic5_hfsmps510, "vdd-s1"),
RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps520, "vdd-s2"),
@@ -1277,6 +1310,10 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = {
.data = pmx55_vreg_data,
},
{
+ .compatible = "qcom,pmx65-rpmh-regulators",
+ .data = pmx65_vreg_data,
+ },
+ {
.compatible = "qcom,pm7325-rpmh-regulators",
.data = pm7325_vreg_data,
},
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 9fc666107a06..8490aa8eecb1 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -1317,8 +1317,10 @@ static int rpm_reg_probe(struct platform_device *pdev)
for_each_available_child_of_node(dev->of_node, node) {
vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
- if (!vreg)
+ if (!vreg) {
+ of_node_put(node);
return -ENOMEM;
+ }
ret = rpm_regulator_init_vreg(vreg, dev, node, rpm, vreg_data);
diff --git a/drivers/regulator/rpi-panel-attiny-regulator.c b/drivers/regulator/rpi-panel-attiny-regulator.c
index ee46bfbf5eee..f7df0f4b2f87 100644
--- a/drivers/regulator/rpi-panel-attiny-regulator.c
+++ b/drivers/regulator/rpi-panel-attiny-regulator.c
@@ -8,6 +8,7 @@
#include <linux/backlight.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -21,63 +22,146 @@
/* I2C registers of the Atmel microcontroller. */
#define REG_ID 0x80
#define REG_PORTA 0x81
-#define REG_PORTA_HF BIT(2)
-#define REG_PORTA_VF BIT(3)
#define REG_PORTB 0x82
+#define REG_PORTC 0x83
#define REG_POWERON 0x85
#define REG_PWM 0x86
+#define REG_ADDR_L 0x8c
+#define REG_ADDR_H 0x8d
+#define REG_WRITE_DATA_H 0x90
+#define REG_WRITE_DATA_L 0x91
+
+#define PA_LCD_DITHB BIT(0)
+#define PA_LCD_MODE BIT(1)
+#define PA_LCD_LR BIT(2)
+#define PA_LCD_UD BIT(3)
+
+#define PB_BRIDGE_PWRDNX_N BIT(0)
+#define PB_LCD_VCC_N BIT(1)
+#define PB_LCD_MAIN BIT(7)
+
+#define PC_LED_EN BIT(0)
+#define PC_RST_TP_N BIT(1)
+#define PC_RST_LCD_N BIT(2)
+#define PC_RST_BRIDGE_N BIT(3)
+
+enum gpio_signals {
+ RST_BRIDGE_N, /* TC358762 bridge reset */
+ RST_TP_N, /* Touch controller reset */
+ NUM_GPIO
+};
+
+struct gpio_signal_mappings {
+ unsigned int reg;
+ unsigned int mask;
+};
+
+static const struct gpio_signal_mappings mappings[NUM_GPIO] = {
+ [RST_BRIDGE_N] = { REG_PORTC, PC_RST_BRIDGE_N | PC_RST_LCD_N },
+ [RST_TP_N] = { REG_PORTC, PC_RST_TP_N },
+};
+
+struct attiny_lcd {
+ /* lock to serialise overall accesses to the Atmel */
+ struct mutex lock;
+ struct regmap *regmap;
+ bool gpio_states[NUM_GPIO];
+ u8 port_states[3];
+
+ struct gpio_chip gc;
+};
static const struct regmap_config attiny_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = REG_PWM,
- .cache_type = REGCACHE_NONE,
+ .disable_locking = 1,
+ .max_register = REG_WRITE_DATA_L,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int attiny_set_port_state(struct attiny_lcd *state, int reg, u8 val)
+{
+ state->port_states[reg - REG_PORTA] = val;
+ return regmap_write(state->regmap, reg, val);
+};
+
+static u8 attiny_get_port_state(struct attiny_lcd *state, int reg)
+{
+ return state->port_states[reg - REG_PORTA];
};
static int attiny_lcd_power_enable(struct regulator_dev *rdev)
{
- unsigned int data;
+ struct attiny_lcd *state = rdev_get_drvdata(rdev);
- regmap_write(rdev->regmap, REG_POWERON, 1);
- /* Wait for nPWRDWN to go low to indicate poweron is done. */
- regmap_read_poll_timeout(rdev->regmap, REG_PORTB, data,
- data & BIT(0), 10, 1000000);
+ mutex_lock(&state->lock);
+
+ /* Ensure bridge, and tp stay in reset */
+ attiny_set_port_state(state, REG_PORTC, 0);
+ usleep_range(5000, 10000);
/* Default to the same orientation as the closed source
* firmware used for the panel. Runtime rotation
* configuration will be supported using VC4's plane
* orientation bits.
*/
- regmap_write(rdev->regmap, REG_PORTA, BIT(2));
+ attiny_set_port_state(state, REG_PORTA, PA_LCD_LR);
+ usleep_range(5000, 10000);
+ /* Main regulator on, and power to the panel (LCD_VCC_N) */
+ attiny_set_port_state(state, REG_PORTB, PB_LCD_MAIN);
+ usleep_range(5000, 10000);
+ /* Bring controllers out of reset */
+ attiny_set_port_state(state, REG_PORTC, PC_LED_EN);
+
+ msleep(80);
+
+ mutex_unlock(&state->lock);
return 0;
}
static int attiny_lcd_power_disable(struct regulator_dev *rdev)
{
+ struct attiny_lcd *state = rdev_get_drvdata(rdev);
+
+ mutex_lock(&state->lock);
+
regmap_write(rdev->regmap, REG_PWM, 0);
- regmap_write(rdev->regmap, REG_POWERON, 0);
- udelay(1);
+ usleep_range(5000, 10000);
+
+ attiny_set_port_state(state, REG_PORTA, 0);
+ usleep_range(5000, 10000);
+ attiny_set_port_state(state, REG_PORTB, PB_LCD_VCC_N);
+ usleep_range(5000, 10000);
+ attiny_set_port_state(state, REG_PORTC, 0);
+ msleep(30);
+
+ mutex_unlock(&state->lock);
+
return 0;
}
static int attiny_lcd_power_is_enabled(struct regulator_dev *rdev)
{
+ struct attiny_lcd *state = rdev_get_drvdata(rdev);
unsigned int data;
- int ret;
+ int ret, i;
- ret = regmap_read(rdev->regmap, REG_POWERON, &data);
- if (ret < 0)
- return ret;
+ mutex_lock(&state->lock);
- if (!(data & BIT(0)))
- return 0;
+ for (i = 0; i < 10; i++) {
+ ret = regmap_read(rdev->regmap, REG_PORTC, &data);
+ if (!ret)
+ break;
+ usleep_range(10000, 12000);
+ }
+
+ mutex_unlock(&state->lock);
- ret = regmap_read(rdev->regmap, REG_PORTB, &data);
if (ret < 0)
return ret;
- return data & BIT(0);
+ return data & PC_RST_BRIDGE_N;
}
static const struct regulator_init_data attiny_regulator_default = {
@@ -101,33 +185,104 @@ static const struct regulator_desc attiny_regulator = {
static int attiny_update_status(struct backlight_device *bl)
{
- struct regmap *regmap = bl_get_data(bl);
+ struct attiny_lcd *state = bl_get_data(bl);
+ struct regmap *regmap = state->regmap;
int brightness = bl->props.brightness;
+ int ret, i;
+
+ mutex_lock(&state->lock);
if (bl->props.power != FB_BLANK_UNBLANK ||
bl->props.fb_blank != FB_BLANK_UNBLANK)
brightness = 0;
- return regmap_write(regmap, REG_PWM, brightness);
-}
-
-static int attiny_get_brightness(struct backlight_device *bl)
-{
- struct regmap *regmap = bl_get_data(bl);
- int ret, brightness;
+ for (i = 0; i < 10; i++) {
+ ret = regmap_write(regmap, REG_PWM, brightness);
+ if (!ret)
+ break;
+ }
- ret = regmap_read(regmap, REG_PWM, &brightness);
- if (ret)
- return ret;
+ mutex_unlock(&state->lock);
- return brightness;
+ return ret;
}
static const struct backlight_ops attiny_bl = {
.update_status = attiny_update_status,
- .get_brightness = attiny_get_brightness,
};
+static int attiny_gpio_get_direction(struct gpio_chip *gc, unsigned int off)
+{
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static void attiny_gpio_set(struct gpio_chip *gc, unsigned int off, int val)
+{
+ struct attiny_lcd *state = gpiochip_get_data(gc);
+ u8 last_val;
+
+ if (off >= NUM_GPIO)
+ return;
+
+ mutex_lock(&state->lock);
+
+ last_val = attiny_get_port_state(state, mappings[off].reg);
+ if (val)
+ last_val |= mappings[off].mask;
+ else
+ last_val &= ~mappings[off].mask;
+
+ attiny_set_port_state(state, mappings[off].reg, last_val);
+
+ if (off == RST_BRIDGE_N && val) {
+ usleep_range(5000, 8000);
+ regmap_write(state->regmap, REG_ADDR_H, 0x04);
+ usleep_range(5000, 8000);
+ regmap_write(state->regmap, REG_ADDR_L, 0x7c);
+ usleep_range(5000, 8000);
+ regmap_write(state->regmap, REG_WRITE_DATA_H, 0x00);
+ usleep_range(5000, 8000);
+ regmap_write(state->regmap, REG_WRITE_DATA_L, 0x00);
+
+ msleep(100);
+ }
+
+ mutex_unlock(&state->lock);
+}
+
+static int attiny_i2c_read(struct i2c_client *client, u8 reg, unsigned int *buf)
+{
+ struct i2c_msg msgs[1];
+ u8 addr_buf[1] = { reg };
+ u8 data_buf[1] = { 0, };
+ int ret;
+
+ /* Write register address */
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = ARRAY_SIZE(addr_buf);
+ msgs[0].buf = addr_buf;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ usleep_range(5000, 10000);
+
+ /* Read data from register */
+ msgs[0].addr = client->addr;
+ msgs[0].flags = I2C_M_RD;
+ msgs[0].len = 1;
+ msgs[0].buf = data_buf;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *buf = data_buf[0];
+ return 0;
+}
+
/*
* I2C driver interface functions
*/
@@ -138,22 +293,30 @@ static int attiny_i2c_probe(struct i2c_client *i2c,
struct regulator_config config = { };
struct backlight_device *bl;
struct regulator_dev *rdev;
+ struct attiny_lcd *state;
struct regmap *regmap;
unsigned int data;
int ret;
+ state = devm_kzalloc(&i2c->dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ mutex_init(&state->lock);
+ i2c_set_clientdata(i2c, state);
+
regmap = devm_regmap_init_i2c(i2c, &attiny_regmap_config);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
- return ret;
+ goto error;
}
- ret = regmap_read(regmap, REG_ID, &data);
+ ret = attiny_i2c_read(i2c, REG_ID, &data);
if (ret < 0) {
dev_err(&i2c->dev, "Failed to read REG_ID reg: %d\n", ret);
- return ret;
+ goto error;
}
switch (data) {
@@ -162,34 +325,73 @@ static int attiny_i2c_probe(struct i2c_client *i2c,
break;
default:
dev_err(&i2c->dev, "Unknown Atmel firmware revision: 0x%02x\n", data);
- return -ENODEV;
+ ret = -ENODEV;
+ goto error;
}
regmap_write(regmap, REG_POWERON, 0);
- mdelay(1);
+ msleep(30);
+ regmap_write(regmap, REG_PWM, 0);
config.dev = &i2c->dev;
config.regmap = regmap;
config.of_node = i2c->dev.of_node;
config.init_data = &attiny_regulator_default;
+ config.driver_data = state;
rdev = devm_regulator_register(&i2c->dev, &attiny_regulator, &config);
if (IS_ERR(rdev)) {
dev_err(&i2c->dev, "Failed to register ATTINY regulator\n");
- return PTR_ERR(rdev);
+ ret = PTR_ERR(rdev);
+ goto error;
}
props.type = BACKLIGHT_RAW;
props.max_brightness = 0xff;
- bl = devm_backlight_device_register(&i2c->dev,
- "7inch-touchscreen-panel-bl",
- &i2c->dev, regmap, &attiny_bl,
+
+ state->regmap = regmap;
+
+ bl = devm_backlight_device_register(&i2c->dev, dev_name(&i2c->dev),
+ &i2c->dev, state, &attiny_bl,
&props);
- if (IS_ERR(bl))
- return PTR_ERR(bl);
+ if (IS_ERR(bl)) {
+ ret = PTR_ERR(bl);
+ goto error;
+ }
bl->props.brightness = 0xff;
+ state->gc.parent = &i2c->dev;
+ state->gc.label = i2c->name;
+ state->gc.owner = THIS_MODULE;
+ state->gc.of_node = i2c->dev.of_node;
+ state->gc.base = -1;
+ state->gc.ngpio = NUM_GPIO;
+
+ state->gc.set = attiny_gpio_set;
+ state->gc.get_direction = attiny_gpio_get_direction;
+ state->gc.can_sleep = true;
+
+ ret = devm_gpiochip_add_data(&i2c->dev, &state->gc, state);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to create gpiochip: %d\n", ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ mutex_destroy(&state->lock);
+
+ return ret;
+}
+
+static int attiny_i2c_remove(struct i2c_client *client)
+{
+ struct attiny_lcd *state = i2c_get_clientdata(client);
+
+ mutex_destroy(&state->lock);
+
return 0;
}
@@ -205,6 +407,7 @@ static struct i2c_driver attiny_regulator_driver = {
.of_match_table = of_match_ptr(attiny_dt_ids),
},
.probe = attiny_i2c_probe,
+ .remove = attiny_i2c_remove,
};
module_i2c_driver(attiny_regulator_driver);
diff --git a/drivers/regulator/rt5190a-regulator.c b/drivers/regulator/rt5190a-regulator.c
new file mode 100644
index 000000000000..155d4afd00b1
--- /dev/null
+++ b/drivers/regulator/rt5190a-regulator.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <dt-bindings/regulator/richtek,rt5190a-regulator.h>
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#define RT5190A_REG_MANUFACTURE 0x00
+#define RT5190A_REG_BUCK2VSEL 0x04
+#define RT5190A_REG_BUCK3VSEL 0x05
+#define RT5190A_REG_DCDCCNTL 0x06
+#define RT5190A_REG_ENABLE 0x07
+#define RT5190A_REG_DISCHARGE 0x09
+#define RT5190A_REG_PROTMODE 0x0A
+#define RT5190A_REG_MUTECNTL 0x0B
+#define RT5190A_REG_PGSTAT 0x0F
+#define RT5190A_REG_OVINT 0x10
+#define RT5190A_REG_HOTDIEMASK 0x17
+
+#define RT5190A_VSEL_MASK GENMASK(6, 0)
+#define RT5190A_RID_BITMASK(rid) BIT(rid + 1)
+#define RT5190A_BUCK1_DISCHG_MASK GENMASK(1, 0)
+#define RT5190A_BUCK1_DISCHG_ONVAL 0x01
+#define RT5190A_OVERVOLT_MASK GENMASK(7, 0)
+#define RT5190A_UNDERVOLT_MASK GENMASK(15, 8)
+#define RT5190A_CH234OT_MASK BIT(29)
+#define RT5190A_CHIPOT_MASK BIT(28)
+
+#define RT5190A_BUCK23_MINUV 600000
+#define RT5190A_BUCK23_MAXUV 1400000
+#define RT5190A_BUCK23_STEPUV 10000
+#define RT5190A_BUCK23_STEPNUM ((1400000 - 600000) / 10000 + 1)
+
+enum {
+ RT5190A_IDX_BUCK1 = 0,
+ RT5190A_IDX_BUCK2,
+ RT5190A_IDX_BUCK3,
+ RT5190A_IDX_BUCK4,
+ RT5190A_IDX_LDO,
+ RT5190A_MAX_IDX
+};
+
+struct rt5190a_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator_desc rdesc[RT5190A_MAX_IDX];
+ struct regulator_dev *rdev[RT5190A_MAX_IDX];
+};
+
+static int rt5190a_get_error_flags(struct regulator_dev *rdev,
+ unsigned int *flags)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ int rid = rdev_get_id(rdev);
+ unsigned int pgood_stat;
+ int ret;
+
+ ret = regmap_read(regmap, RT5190A_REG_PGSTAT, &pgood_stat);
+ if (ret)
+ return ret;
+
+ if (!(pgood_stat & RT5190A_RID_BITMASK(rid)))
+ *flags = REGULATOR_ERROR_FAIL;
+ else
+ *flags = 0;
+
+ return 0;
+}
+
+static int rt5190a_fixed_buck_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ int rid = rdev_get_id(rdev);
+ unsigned int mask = RT5190A_RID_BITMASK(rid), val;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = mask;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(regmap, RT5190A_REG_DCDCCNTL, mask, val);
+}
+
+static unsigned int rt5190a_fixed_buck_get_mode(struct regulator_dev *rdev)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ int rid = rdev_get_id(rdev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(regmap, RT5190A_REG_DCDCCNTL, &val);
+ if (ret) {
+ dev_err(&rdev->dev, "Failed to get mode [%d]\n", ret);
+ return ret;
+ }
+
+ if (val & RT5190A_RID_BITMASK(rid))
+ return REGULATOR_MODE_FAST;
+
+ return REGULATOR_MODE_NORMAL;
+}
+
+static const struct regulator_ops rt5190a_ranged_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .get_error_flags = rt5190a_get_error_flags,
+};
+
+static const struct regulator_ops rt5190a_fixed_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .set_mode = rt5190a_fixed_buck_set_mode,
+ .get_mode = rt5190a_fixed_buck_get_mode,
+ .get_error_flags = rt5190a_get_error_flags,
+};
+
+static const struct regulator_ops rt5190a_fixed_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .get_error_flags = rt5190a_get_error_flags,
+};
+
+static irqreturn_t rt5190a_irq_handler(int irq, void *data)
+{
+ struct rt5190a_priv *priv = data;
+ __le32 raws;
+ unsigned int events, fields;
+ static const struct {
+ unsigned int bitmask;
+ unsigned int report;
+ } event_tbl[] = {
+ { RT5190A_OVERVOLT_MASK, REGULATOR_ERROR_REGULATION_OUT },
+ { RT5190A_UNDERVOLT_MASK, REGULATOR_ERROR_UNDER_VOLTAGE }
+ };
+ int i, j, ret;
+
+ ret = regmap_raw_read(priv->regmap, RT5190A_REG_OVINT, &raws,
+ sizeof(raws));
+ if (ret) {
+ dev_err(priv->dev, "Failed to read events\n");
+ return IRQ_NONE;
+ }
+
+ events = le32_to_cpu(raws);
+
+ ret = regmap_raw_write(priv->regmap, RT5190A_REG_OVINT, &raws,
+ sizeof(raws));
+ if (ret)
+ dev_err(priv->dev, "Failed to write-clear events\n");
+
+ /* Handle OV,UV events */
+ for (i = 0; i < ARRAY_SIZE(event_tbl); i++) {
+ fields = events & event_tbl[i].bitmask;
+ fields >>= ffs(event_tbl[i].bitmask) - 1;
+
+ for (j = 0; j < RT5190A_MAX_IDX; j++) {
+ if (!(fields & RT5190A_RID_BITMASK(j)))
+ continue;
+
+ regulator_notifier_call_chain(priv->rdev[j],
+ event_tbl[i].report,
+ NULL);
+ }
+ }
+
+ /* Handle CH234 OT event */
+ if (events & RT5190A_CH234OT_MASK) {
+ for (j = RT5190A_IDX_BUCK2; j < RT5190A_IDX_LDO; j++) {
+ regulator_notifier_call_chain(priv->rdev[j],
+ REGULATOR_ERROR_OVER_TEMP,
+ NULL);
+ }
+ }
+
+ /* Warning if CHIP OT occur */
+ if (events & RT5190A_CHIPOT_MASK)
+ dev_warn(priv->dev, "CHIP overheat\n");
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int rt5190a_of_map_mode(unsigned int mode)
+{
+ switch (mode) {
+ case RT5190A_OPMODE_AUTO:
+ return REGULATOR_MODE_NORMAL;
+ case RT5190A_OPMODE_FPWM:
+ return REGULATOR_MODE_FAST;
+ default:
+ return REGULATOR_MODE_INVALID;
+ }
+}
+
+static int rt5190a_of_parse_cb(struct rt5190a_priv *priv, int rid,
+ struct of_regulator_match *match)
+{
+ struct regulator_desc *desc = priv->rdesc + rid;
+ struct regulator_init_data *init_data = match->init_data;
+ struct device_node *np = match->of_node;
+ bool latchup_enable;
+ unsigned int mask = RT5190A_RID_BITMASK(rid), val;
+
+ switch (rid) {
+ case RT5190A_IDX_BUCK1:
+ case RT5190A_IDX_BUCK4:
+ case RT5190A_IDX_LDO:
+ init_data->constraints.apply_uV = 0;
+
+ if (init_data->constraints.min_uV ==
+ init_data->constraints.max_uV)
+ desc->fixed_uV = init_data->constraints.min_uV;
+ else {
+ dev_err(priv->dev,
+ "Variable voltage for fixed regulator\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ latchup_enable = of_property_read_bool(np, "richtek,latchup-enable");
+
+ /* latchup: 0, default hiccup: 1 */
+ val = !latchup_enable ? mask : 0;
+
+ return regmap_update_bits(priv->regmap, RT5190A_REG_PROTMODE, mask, val);
+}
+
+static void rt5190a_fillin_regulator_desc(struct regulator_desc *desc, int rid)
+{
+ static const char * const regu_name[] = { "buck1", "buck2",
+ "buck3", "buck4",
+ "ldo" };
+ static const char * const supply[] = { NULL, "vin2", "vin3", "vin4",
+ "vinldo" };
+
+ desc->name = regu_name[rid];
+ desc->supply_name = supply[rid];
+ desc->owner = THIS_MODULE;
+ desc->type = REGULATOR_VOLTAGE;
+ desc->id = rid;
+ desc->enable_reg = RT5190A_REG_ENABLE;
+ desc->enable_mask = RT5190A_RID_BITMASK(rid);
+ desc->active_discharge_reg = RT5190A_REG_DISCHARGE;
+ desc->active_discharge_mask = RT5190A_RID_BITMASK(rid);
+ desc->active_discharge_on = RT5190A_RID_BITMASK(rid);
+
+ switch (rid) {
+ case RT5190A_IDX_BUCK1:
+ desc->active_discharge_mask = RT5190A_BUCK1_DISCHG_MASK;
+ desc->active_discharge_on = RT5190A_BUCK1_DISCHG_ONVAL;
+ desc->n_voltages = 1;
+ desc->ops = &rt5190a_fixed_buck_ops;
+ desc->of_map_mode = rt5190a_of_map_mode;
+ break;
+ case RT5190A_IDX_BUCK2:
+ desc->vsel_reg = RT5190A_REG_BUCK2VSEL;
+ desc->vsel_mask = RT5190A_VSEL_MASK;
+ desc->min_uV = RT5190A_BUCK23_MINUV;
+ desc->uV_step = RT5190A_BUCK23_STEPUV;
+ desc->n_voltages = RT5190A_BUCK23_STEPNUM;
+ desc->ops = &rt5190a_ranged_buck_ops;
+ break;
+ case RT5190A_IDX_BUCK3:
+ desc->vsel_reg = RT5190A_REG_BUCK3VSEL;
+ desc->vsel_mask = RT5190A_VSEL_MASK;
+ desc->min_uV = RT5190A_BUCK23_MINUV;
+ desc->uV_step = RT5190A_BUCK23_STEPUV;
+ desc->n_voltages = RT5190A_BUCK23_STEPNUM;
+ desc->ops = &rt5190a_ranged_buck_ops;
+ break;
+ case RT5190A_IDX_BUCK4:
+ desc->n_voltages = 1;
+ desc->ops = &rt5190a_fixed_buck_ops;
+ desc->of_map_mode = rt5190a_of_map_mode;
+ break;
+ case RT5190A_IDX_LDO:
+ desc->n_voltages = 1;
+ desc->ops = &rt5190a_fixed_ldo_ops;
+ break;
+ }
+}
+
+static struct of_regulator_match rt5190a_regulator_match[] = {
+ { .name = "buck1", },
+ { .name = "buck2", },
+ { .name = "buck3", },
+ { .name = "buck4", },
+ { .name = "ldo", }
+};
+
+static int rt5190a_parse_regulator_dt_data(struct rt5190a_priv *priv)
+{
+ struct device_node *regulator_np;
+ struct regulator_desc *reg_desc;
+ struct of_regulator_match *match;
+ int i, ret;
+
+ for (i = 0; i < RT5190A_MAX_IDX; i++) {
+ reg_desc = priv->rdesc + i;
+ match = rt5190a_regulator_match + i;
+
+ rt5190a_fillin_regulator_desc(reg_desc, i);
+
+ match->desc = reg_desc;
+ }
+
+ regulator_np = of_get_child_by_name(priv->dev->of_node, "regulators");
+ if (!regulator_np) {
+ dev_err(priv->dev, "Could not find 'regulators' node\n");
+ return -ENODEV;
+ }
+
+ ret = of_regulator_match(priv->dev, regulator_np,
+ rt5190a_regulator_match,
+ ARRAY_SIZE(rt5190a_regulator_match));
+
+ of_node_put(regulator_np);
+
+ if (ret < 0) {
+ dev_err(priv->dev,
+ "Error parsing regulator init data: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < RT5190A_MAX_IDX; i++) {
+ match = rt5190a_regulator_match + i;
+
+ ret = rt5190a_of_parse_cb(priv, i, match);
+ if (ret) {
+ dev_err(priv->dev, "Failed in [%d] of_parse_cb\n", i);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct reg_sequence rt5190a_init_patch[] = {
+ { 0x09, 0x3d, },
+ { 0x0a, 0x3e, },
+ { 0x0b, 0x01, },
+ { 0x10, 0xff, },
+ { 0x11, 0xff, },
+ { 0x12, 0xff, },
+ { 0x13, 0xff, },
+ { 0x14, 0, },
+ { 0x15, 0, },
+ { 0x16, 0x3e, },
+ { 0x17, 0, }
+};
+
+static int rt5190a_device_initialize(struct rt5190a_priv *priv)
+{
+ bool mute_enable;
+ int ret;
+
+ ret = regmap_register_patch(priv->regmap, rt5190a_init_patch,
+ ARRAY_SIZE(rt5190a_init_patch));
+ if (ret) {
+ dev_err(priv->dev, "Failed to do register patch\n");
+ return ret;
+ }
+
+ mute_enable = device_property_read_bool(priv->dev,
+ "richtek,mute-enable");
+
+ if (mute_enable) {
+ ret = regmap_write(priv->regmap, RT5190A_REG_MUTECNTL, 0x00);
+ if (ret) {
+ dev_err(priv->dev, "Failed to enable mute function\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int rt5190a_device_check(struct rt5190a_priv *priv)
+{
+ u16 devid;
+ int ret;
+
+ ret = regmap_raw_read(priv->regmap, RT5190A_REG_MANUFACTURE, &devid,
+ sizeof(devid));
+ if (ret)
+ return ret;
+
+ if (devid) {
+ dev_err(priv->dev, "Incorrect device id 0x%04x\n", devid);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct regmap_config rt5190a_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RT5190A_REG_HOTDIEMASK,
+};
+
+static int rt5190a_probe(struct i2c_client *i2c)
+{
+ struct rt5190a_priv *priv;
+ struct regulator_config cfg = {};
+ int i, ret;
+
+ priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &i2c->dev;
+
+ priv->regmap = devm_regmap_init_i2c(i2c, &rt5190a_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(&i2c->dev, "Failed to allocate regmap\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ ret = rt5190a_device_check(priv);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to check device %d\n", ret);
+ return ret;
+ }
+
+ ret = rt5190a_device_initialize(priv);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to initialize the device\n");
+ return ret;
+ }
+
+ ret = rt5190a_parse_regulator_dt_data(priv);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to parse regulator dt\n");
+ return ret;
+ }
+
+ cfg.dev = &i2c->dev;
+ cfg.regmap = priv->regmap;
+
+ for (i = 0; i < RT5190A_MAX_IDX; i++) {
+ struct regulator_desc *desc = priv->rdesc + i;
+ struct of_regulator_match *match = rt5190a_regulator_match + i;
+
+ cfg.init_data = match->init_data;
+ cfg.of_node = match->of_node;
+
+ priv->rdev[i] = devm_regulator_register(&i2c->dev, desc, &cfg);
+ if (IS_ERR(priv->rdev[i])) {
+ dev_err(&i2c->dev, "Failed to register regulator %s\n",
+ desc->name);
+ return PTR_ERR(priv->rdev[i]);
+ }
+ }
+
+ if (i2c->irq) {
+ ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+ rt5190a_irq_handler,
+ IRQF_ONESHOT,
+ dev_name(&i2c->dev), priv);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to register interrupt\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id __maybe_unused rt5190a_device_table[] = {
+ { .compatible = "richtek,rt5190a", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rt5190a_device_table);
+
+static struct i2c_driver rt5190a_driver = {
+ .driver = {
+ .name = "rt5190a",
+ .of_match_table = rt5190a_device_table,
+ },
+ .probe_new = rt5190a_probe,
+};
+module_i2c_driver(rt5190a_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RT5190A Regulator Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/sc2731-regulator.c b/drivers/regulator/sc2731-regulator.c
index 0f21f95c8981..71e5ceb679f4 100644
--- a/drivers/regulator/sc2731-regulator.c
+++ b/drivers/regulator/sc2731-regulator.c
@@ -1,4 +1,4 @@
- //SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Spreadtrum Communications Inc.
*/
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index 2931a0b89bff..bd7b2f287250 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -42,7 +42,7 @@
/**
* struct ti_abb_info - ABB information per voltage setting
* @opp_sel: one of TI_ABB macro
- * @vset: (optional) vset value that LDOVBB needs to be overriden with.
+ * @vset: (optional) vset value that LDOVBB needs to be overridden with.
*
* Array of per voltage entries organized in the same order as regulator_desc's
* volt_table list. (selector is used to index from this array)
@@ -484,7 +484,7 @@ static int ti_abb_init_timings(struct device *dev, struct ti_abb *abb)
/* Calculate cycle rate */
cycle_rate = DIV_ROUND_CLOSEST(clock_cycles * 10, clk_rate);
- /* Calulate SR2_WTCNT_VALUE */
+ /* Calculate SR2_WTCNT_VALUE */
sr2_wt_cnt_val = DIV_ROUND_CLOSEST(abb->settling_time * 10, cycle_rate);
dev_dbg(dev, "%s: Clk_rate=%ld, sr2_cnt=0x%08x\n", __func__,
@@ -688,7 +688,7 @@ MODULE_DEVICE_TABLE(of, ti_abb_of_match);
* @pdev: ABB platform device
*
* Initializes an individual ABB LDO for required Body-Bias. ABB is used to
- * addional bias supply to SoC modules for power savings or mandatory stability
+ * additional bias supply to SoC modules for power savings or mandatory stability
* configuration at certain Operating Performance Points(OPPs).
*
* Return: 0 on success or appropriate error value when fails
diff --git a/drivers/regulator/tps6286x-regulator.c b/drivers/regulator/tps6286x-regulator.c
new file mode 100644
index 000000000000..e29deda30d75
--- /dev/null
+++ b/drivers/regulator/tps6286x-regulator.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright Axis Communications AB
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/driver.h>
+
+#include <dt-bindings/regulator/ti,tps62864.h>
+
+#define TPS6286X_VOUT1 0x01
+#define TPS6286X_VOUT1_VO1_SET GENMASK(7, 0)
+
+#define TPS6286X_CONTROL 0x03
+#define TPS6286X_CONTROL_FPWM BIT(4)
+#define TPS6286X_CONTROL_SWEN BIT(5)
+
+#define TPS6286X_MIN_MV 400
+#define TPS6286X_MAX_MV 1675
+#define TPS6286X_STEP_MV 5
+
+static const struct regmap_config tps6286x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int tps6286x_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ unsigned int val;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ val = 0;
+ break;
+ case REGULATOR_MODE_FAST:
+ val = TPS6286X_CONTROL_FPWM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(rdev->regmap, TPS6286X_CONTROL,
+ TPS6286X_CONTROL_FPWM, val);
+}
+
+static unsigned int tps6286x_get_mode(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, TPS6286X_CONTROL, &val);
+ if (ret < 0)
+ return 0;
+
+ return (val & TPS6286X_CONTROL_FPWM) ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
+}
+
+static const struct regulator_ops tps6286x_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_mode = tps6286x_set_mode,
+ .get_mode = tps6286x_get_mode,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+};
+
+static unsigned int tps6286x_of_map_mode(unsigned int mode)
+{
+ switch (mode) {
+ case TPS62864_MODE_NORMAL:
+ return REGULATOR_MODE_NORMAL;
+ case TPS62864_MODE_FPWM:
+ return REGULATOR_MODE_FAST;
+ default:
+ return REGULATOR_MODE_INVALID;
+ }
+}
+
+static const struct regulator_desc tps6286x_reg = {
+ .name = "tps6286x",
+ .of_match = of_match_ptr("SW"),
+ .owner = THIS_MODULE,
+ .ops = &tps6286x_regulator_ops,
+ .of_map_mode = tps6286x_of_map_mode,
+ .regulators_node = of_match_ptr("regulators"),
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ((TPS6286X_MAX_MV - TPS6286X_MIN_MV) / TPS6286X_STEP_MV) + 1,
+ .min_uV = TPS6286X_MIN_MV * 1000,
+ .uV_step = TPS6286X_STEP_MV * 1000,
+ .vsel_reg = TPS6286X_VOUT1,
+ .vsel_mask = TPS6286X_VOUT1_VO1_SET,
+ .enable_reg = TPS6286X_CONTROL,
+ .enable_mask = TPS6286X_CONTROL_SWEN,
+ .ramp_delay = 1000,
+ /* tDelay + tRamp, rounded up */
+ .enable_time = 3000,
+};
+
+static const struct of_device_id tps6286x_dt_ids[] = {
+ { .compatible = "ti,tps62864", },
+ { .compatible = "ti,tps62866", },
+ { .compatible = "ti,tps62868", },
+ { .compatible = "ti,tps62869", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tps6286x_dt_ids);
+
+static int tps6286x_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &i2c->dev;
+ struct regulator_config config = {};
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(i2c, &tps6286x_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ config.dev = &i2c->dev;
+ config.of_node = dev->of_node;
+ config.regmap = regmap;
+
+ rdev = devm_regulator_register(&i2c->dev, &tps6286x_reg, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&i2c->dev, "Failed to register tps6286x regulator\n");
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id tps6286x_i2c_id[] = {
+ { "tps62864", 0 },
+ { "tps62866", 0 },
+ { "tps62868", 0 },
+ { "tps62869", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, tps6286x_i2c_id);
+
+static struct i2c_driver tps6286x_regulator_driver = {
+ .driver = {
+ .name = "tps6286x",
+ .of_match_table = of_match_ptr(tps6286x_dt_ids),
+ },
+ .probe = tps6286x_i2c_probe,
+ .id_table = tps6286x_i2c_id,
+};
+
+module_i2c_driver(tps6286x_regulator_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c
index d2a37978fc3a..aac7be3b33f7 100644
--- a/drivers/regulator/vctrl-regulator.c
+++ b/drivers/regulator/vctrl-regulator.c
@@ -185,10 +185,7 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
unsigned int next_sel;
int delay;
- if (selector >= vctrl->vtable[vctrl->sel].ovp_min_sel)
- next_sel = selector;
- else
- next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel;
+ next_sel = max_t(unsigned int, selector, vctrl->vtable[vctrl->sel].ovp_min_sel);
ret = regulator_set_voltage_rdev(rdev->supply->rdev,
vctrl->vtable[next_sel].ctrl,
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index 52c5a0e0acd8..5d32628a5011 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -13,6 +13,7 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/of.h>
struct virtual_consumer_data {
struct mutex lock;
@@ -281,26 +282,53 @@ static const struct attribute_group regulator_virtual_attr_group = {
.attrs = regulator_virtual_attributes,
};
+#ifdef CONFIG_OF
+static const struct of_device_id regulator_virtual_consumer_of_match[] = {
+ { .compatible = "regulator-virtual-consumer" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, regulator_virtual_consumer_of_match);
+#endif
+
static int regulator_virtual_probe(struct platform_device *pdev)
{
char *reg_id = dev_get_platdata(&pdev->dev);
struct virtual_consumer_data *drvdata;
+ static bool warned;
int ret;
+ if (!warned) {
+ warned = true;
+ pr_warn("**********************************************************\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("** **\n");
+ pr_warn("** regulator-virtual-consumer is only for testing and **\n");
+ pr_warn("** debugging. Do not use it in a production kernel. **\n");
+ pr_warn("** **\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("**********************************************************\n");
+ }
+
drvdata = devm_kzalloc(&pdev->dev, sizeof(struct virtual_consumer_data),
GFP_KERNEL);
if (drvdata == NULL)
return -ENOMEM;
+ /*
+ * This virtual consumer does not have any hardware-defined supply
+ * name, so just allow the regulator to be specified in a property
+ * named "default-supply" when we're being probed from devicetree.
+ */
+ if (!reg_id && pdev->dev.of_node)
+ reg_id = "default";
+
mutex_init(&drvdata->lock);
drvdata->regulator = devm_regulator_get(&pdev->dev, reg_id);
- if (IS_ERR(drvdata->regulator)) {
- ret = PTR_ERR(drvdata->regulator);
- dev_err(&pdev->dev, "Failed to obtain supply '%s': %d\n",
- reg_id, ret);
- return ret;
- }
+ if (IS_ERR(drvdata->regulator))
+ return dev_err_probe(&pdev->dev, PTR_ERR(drvdata->regulator),
+ "Failed to obtain supply '%s'\n",
+ reg_id);
ret = sysfs_create_group(&pdev->dev.kobj,
&regulator_virtual_attr_group);
@@ -334,6 +362,7 @@ static struct platform_driver regulator_virtual_consumer_driver = {
.remove = regulator_virtual_remove,
.driver = {
.name = "reg-virt-consumer",
+ .of_match_table = of_match_ptr(regulator_virtual_consumer_of_match),
},
};
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 6579bfdb0c26..b1d5aac8917d 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1112,7 +1112,7 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
if (pdev->id < WM8350_DCDC_1 || pdev->id > WM8350_ISINK_B)
return -ENODEV;
- /* do any regulatior specific init */
+ /* do any regulator specific init */
switch (pdev->id) {
case WM8350_DCDC_1:
val = wm8350_reg_read(wm8350, WM8350_DCDC1_LOW_POWER);
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 43ea8455546c..a2c231a17b2b 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -928,7 +928,8 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
}
-static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
+static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
+ const char *fw_name)
{
unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
dma_addr_t phys;
@@ -939,7 +940,7 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
void *ptr;
int ret;
- metadata = qcom_mdt_read_metadata(fw, &size);
+ metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev);
if (IS_ERR(metadata))
return PTR_ERR(metadata);
@@ -1289,7 +1290,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
/* Initialize the RMB validator */
writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
- ret = q6v5_mpss_init_image(qproc, fw);
+ ret = q6v5_mpss_init_image(qproc, fw, qproc->hexagon_mdt_image);
if (ret)
goto release_firmware;
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 184bb7cdf95a..1ae47cc153e5 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -79,6 +79,8 @@ struct qcom_adsp {
struct qcom_rproc_subdev smd_subdev;
struct qcom_rproc_ssr ssr_subdev;
struct qcom_sysmon *sysmon;
+
+ struct qcom_scm_pas_metadata pas_metadata;
};
static void adsp_minidump(struct rproc *rproc)
@@ -126,14 +128,34 @@ static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds,
}
}
+static int adsp_unprepare(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+
+ /*
+ * adsp_load() did pass pas_metadata to the SCM driver for storing
+ * metadata context. It might have been released already if
+ * auth_and_reset() was successful, but in other cases clean it up
+ * here.
+ */
+ qcom_scm_pas_metadata_release(&adsp->pas_metadata);
+
+ return 0;
+}
+
static int adsp_load(struct rproc *rproc, const struct firmware *fw)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
int ret;
- ret = qcom_mdt_load(adsp->dev, fw, rproc->firmware, adsp->pas_id,
- adsp->mem_region, adsp->mem_phys, adsp->mem_size,
- &adsp->mem_reloc);
+ ret = qcom_mdt_pas_init(adsp->dev, fw, rproc->firmware, adsp->pas_id,
+ adsp->mem_phys, &adsp->pas_metadata);
+ if (ret)
+ return ret;
+
+ ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, adsp->pas_id,
+ adsp->mem_region, adsp->mem_phys, adsp->mem_size,
+ &adsp->mem_reloc);
if (ret)
return ret;
@@ -185,6 +207,8 @@ static int adsp_start(struct rproc *rproc)
goto disable_px_supply;
}
+ qcom_scm_pas_metadata_release(&adsp->pas_metadata);
+
return 0;
disable_px_supply:
@@ -255,6 +279,7 @@ static unsigned long adsp_panic(struct rproc *rproc)
}
static const struct rproc_ops adsp_ops = {
+ .unprepare = adsp_unprepare,
.start = adsp_start,
.stop = adsp_stop,
.da_to_va = adsp_da_to_va,
@@ -264,6 +289,7 @@ static const struct rproc_ops adsp_ops = {
};
static const struct rproc_ops adsp_minidump_ops = {
+ .unprepare = adsp_unprepare,
.start = adsp_start,
.stop = adsp_stop,
.da_to_va = adsp_da_to_va,
@@ -853,6 +879,10 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sm8350-cdsp-pas", .data = &sm8350_cdsp_resource},
{ .compatible = "qcom,sm8350-slpi-pas", .data = &sm8350_slpi_resource},
{ .compatible = "qcom,sm8350-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sm8450-adsp-pas", .data = &sm8350_adsp_resource},
+ { .compatible = "qcom,sm8450-cdsp-pas", .data = &sm8350_cdsp_resource},
+ { .compatible = "qcom,sm8450-slpi-pas", .data = &sm8350_slpi_resource},
+ { .compatible = "qcom,sm8450-mpss-pas", .data = &mpss_resource_init},
{ },
};
MODULE_DEVICE_TABLE(of, adsp_of_match);
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 2f83adef966e..6d66ab5a8b17 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -185,10 +185,9 @@ static int ds1302_probe(struct spi_device *spi)
return 0;
}
-static int ds1302_remove(struct spi_device *spi)
+static void ds1302_remove(struct spi_device *spi)
{
spi_set_drvdata(spi, NULL);
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 9ef107b99b65..ed9360486953 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -720,7 +720,7 @@ static int ds1305_probe(struct spi_device *spi)
return 0;
}
-static int ds1305_remove(struct spi_device *spi)
+static void ds1305_remove(struct spi_device *spi)
{
struct ds1305 *ds1305 = spi_get_drvdata(spi);
@@ -730,8 +730,6 @@ static int ds1305_remove(struct spi_device *spi)
devm_free_irq(&spi->dev, spi->irq, ds1305);
cancel_work_sync(&ds1305->work);
}
-
- return 0;
}
static struct spi_driver ds1305_driver = {
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index f14ed6c96437..ed5a6ba89a3e 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -434,11 +434,9 @@ static int ds1343_probe(struct spi_device *spi)
return 0;
}
-static int ds1343_remove(struct spi_device *spi)
+static void ds1343_remove(struct spi_device *spi)
{
dev_pm_clear_wake_irq(&spi->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 8b458010f88a..3b7af00a7825 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -47,7 +47,6 @@
#include <linux/module.h>
#include <linux/wait.h>
#include <linux/blkdev.h>
-#include <linux/genhd.h>
#include <linux/hdreg.h>
#include <linux/interrupt.h>
#include <linux/log2.h>
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 61ecdcb2cc6a..2a9c0ddcade5 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
-#include <linux/genhd.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <asm/eadm.h>
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index a05a4297cfae..af82b3214774 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -6,7 +6,6 @@
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
-#include <linux/genhd.h>
#include <linux/list.h>
#include <asm/debug.h>
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 5db591cf7215..dfb84bb03d32 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1394,7 +1394,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
if (len < TH_HEADER_LENGTH) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
- "%s(%s): packet length %d to short",
+ "%s(%s): packet length %d too short",
CTCM_FUNTAIL, dev->name, len);
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 5ea7eeb07002..e0fdd54bfeb7 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -166,7 +166,7 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
ch->logflags = 0;
priv->stats.rx_packets++;
priv->stats.rx_bytes += skblen;
- netif_rx_ni(skb);
+ netif_rx(skb);
if (len > 0) {
skb_pull(pskb, header->length);
if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index a61d38a1b4ed..bab9b34926c6 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -223,7 +223,7 @@ lcs_setup_read_ccws(struct lcs_card *card)
* we do not need to do set_normalized_cda.
*/
card->read.ccws[cnt].cda =
- (__u32) __pa(card->read.iob[cnt].data);
+ (__u32)virt_to_phys(card->read.iob[cnt].data);
((struct lcs_header *)
card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
card->read.iob[cnt].callback = lcs_get_frames_cb;
@@ -236,7 +236,7 @@ lcs_setup_read_ccws(struct lcs_card *card)
/* Last ccw is a tic (transfer in channel). */
card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
card->read.ccws[LCS_NUM_BUFFS].cda =
- (__u32) __pa(card->read.ccws);
+ (__u32)virt_to_phys(card->read.ccws);
/* Setg initial state of the read channel. */
card->read.state = LCS_CH_STATE_INIT;
@@ -278,12 +278,12 @@ lcs_setup_write_ccws(struct lcs_card *card)
* we do not need to do set_normalized_cda.
*/
card->write.ccws[cnt].cda =
- (__u32) __pa(card->write.iob[cnt].data);
+ (__u32)virt_to_phys(card->write.iob[cnt].data);
}
/* Last ccw is a tic (transfer in channel). */
card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
card->write.ccws[LCS_NUM_BUFFS].cda =
- (__u32) __pa(card->write.ccws);
+ (__u32)virt_to_phys(card->write.ccws);
/* Set initial state of the write channel. */
card->read.state = LCS_CH_STATE_INIT;
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 981e7b1c6b96..65aa0a96c21d 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -620,11 +620,7 @@ static void netiucv_unpack_skb(struct iucv_connection *conn,
pskb->ip_summed = CHECKSUM_UNNECESSARY;
privptr->stats.rx_packets++;
privptr->stats.rx_bytes += skb->len;
- /*
- * Since receiving is always initiated from a tasklet (in iucv.c),
- * we must use netif_rx_ni() instead of netif_rx()
- */
- netif_rx_ni(skb);
+ netif_rx(skb);
skb_pull(pskb, header->next);
skb_put(pskb, NETIUCV_HDRLEN);
}
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 29f0111f8e11..d99c5b773e22 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -426,7 +426,7 @@ static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
ccw->cmd_code = cmd_code;
ccw->flags = flags | CCW_FLAG_SLI;
ccw->count = len;
- ccw->cda = (__u32) __pa(data);
+ ccw->cda = (__u32)virt_to_phys(data);
}
static int __qeth_issue_next_read(struct qeth_card *card)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 9251ad276ee8..d2f422a9a4f7 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1961,7 +1961,6 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
if (card->dev->reg_state == NETREG_REGISTERED)
unregister_netdev(card->dev);
- flush_workqueue(card->cmd_wq);
destroy_workqueue(card->cmd_wq);
qeth_l3_clear_ip_htable(card, 0);
qeth_l3_clear_ipato_list(card);
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
index bf0daeeb50a9..e1fbbf55c09d 100644
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ b/drivers/scsi/dpt/dpti_i2o.h
@@ -123,7 +123,7 @@ struct i2o_sys_tbl
u32 change_ind;
u32 reserved2;
u32 reserved3;
- struct i2o_sys_tbl_entry iops[0];
+ struct i2o_sys_tbl_entry iops[];
};
/*
diff --git a/drivers/scsi/elx/libefc_sli/sli4.h b/drivers/scsi/elx/libefc_sli/sli4.h
index ee2a9e65a88d..38af166cc786 100644
--- a/drivers/scsi/elx/libefc_sli/sli4.h
+++ b/drivers/scsi/elx/libefc_sli/sli4.h
@@ -609,7 +609,7 @@ struct sli4_rqst_cmn_create_cq_v2 {
__le16 cqe_count;
__le16 rsvd30;
__le32 rsvd32;
- struct sli4_dmaaddr page_phys_addr[0];
+ struct sli4_dmaaddr page_phys_addr[];
};
enum sli4_create_cqset_e {
@@ -634,7 +634,7 @@ struct sli4_rqst_cmn_create_cq_set_v0 {
__le16 num_cq_req;
__le16 dw6w1_flags;
__le16 eq_id[16];
- struct sli4_dmaaddr page_phys_addr[0];
+ struct sli4_dmaaddr page_phys_addr[];
};
/* CQE count */
@@ -764,7 +764,7 @@ struct sli4_rqst_cmn_create_mq_ext {
__le32 dw7_val;
__le32 dw8_flags;
__le32 rsvd36;
- struct sli4_dmaaddr page_phys_addr[0];
+ struct sli4_dmaaddr page_phys_addr[];
};
struct sli4_rsp_cmn_create_mq_ext {
@@ -802,7 +802,7 @@ struct sli4_rqst_cmn_create_cq_v0 {
__le32 dw6_flags;
__le32 rsvd28;
__le32 rsvd32;
- struct sli4_dmaaddr page_phys_addr[0];
+ struct sli4_dmaaddr page_phys_addr[];
};
enum sli4_create_rq_e {
@@ -887,7 +887,7 @@ struct sli4_rqst_rq_create_v2 {
__le16 base_cq_id;
__le16 rsvd26;
__le32 rsvd42;
- struct sli4_dmaaddr page_phys_addr[0];
+ struct sli4_dmaaddr page_phys_addr[];
};
struct sli4_rsp_rq_create_v2 {
@@ -3168,7 +3168,7 @@ struct sli4_rqst_cmn_read_object {
__le32 read_offset;
u8 object_name[104];
__le32 host_buffer_descriptor_count;
- struct sli4_bde host_buffer_descriptor[0];
+ struct sli4_bde host_buffer_descriptor[];
};
#define RSP_COM_READ_OBJ_EOF 0x80000000
@@ -3191,7 +3191,7 @@ struct sli4_rqst_cmn_write_object {
__le32 write_offset;
u8 object_name[104];
__le32 host_buffer_descriptor_count;
- struct sli4_bde host_buffer_descriptor[0];
+ struct sli4_bde host_buffer_descriptor[];
};
#define RSP_CHANGE_STATUS 0xff
@@ -3217,7 +3217,7 @@ struct sli4_rqst_cmn_read_object_list {
__le32 read_offset;
u8 object_name[104];
__le32 host_buffer_descriptor_count;
- struct sli4_bde host_buffer_descriptor[0];
+ struct sli4_bde host_buffer_descriptor[];
};
enum sli4_rqst_set_dump_flags {
@@ -3342,7 +3342,7 @@ struct sli4_rspource_descriptor_v1 {
u8 descriptor_type;
u8 descriptor_length;
__le16 rsvd16;
- __le32 type_specific[0];
+ __le32 type_specific[];
};
enum sli4_pcie_desc_flags {
@@ -3474,7 +3474,7 @@ struct sli4_rqst_post_hdr_templates {
struct sli4_rqst_hdr hdr;
__le16 rpi_offset;
__le16 page_count;
- struct sli4_dmaaddr page_descriptor[0];
+ struct sli4_dmaaddr page_descriptor[];
};
#define SLI4_HDR_TEMPLATE_SIZE 64
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 88c549f257db..40a52feb315d 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -986,8 +986,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
CMD_SP(sc) = NULL;
CMD_FLAGS(sc) |= FNIC_IO_DONE;
- spin_unlock_irqrestore(io_lock, flags);
-
if (hdr_status != FCPIO_SUCCESS) {
atomic64_inc(&fnic_stats->io_stats.io_failures);
shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
@@ -996,8 +994,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
fnic_release_ioreq_buf(fnic, io_req, sc);
- mempool_free(io_req, fnic->io_req_pool);
-
cmd_trace = ((u64)hdr_status << 56) |
(u64)icmnd_cmpl->scsi_status << 48 |
(u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
@@ -1021,6 +1017,12 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
} else
fnic->lport->host_stats.fcp_control_requests++;
+ /* Call SCSI completion function to complete the IO */
+ scsi_done(sc);
+ spin_unlock_irqrestore(io_lock, flags);
+
+ mempool_free(io_req, fnic->io_req_pool);
+
atomic64_dec(&fnic_stats->io_stats.active_ios);
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
@@ -1049,9 +1051,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
}
-
- /* Call SCSI completion function to complete the IO */
- scsi_done(sc);
}
/* fnic_fcpio_itmf_cmpl_handler
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index fc4eaf6d1e47..fb7d8775c9f7 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -878,7 +878,7 @@ struct mpi3mr_fwevt {
bool process_evt;
u32 evt_ctx;
struct kref ref_count;
- char event_data[0] __aligned(4);
+ char event_data[] __aligned(4);
};
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 511726f92d9a..76229b839560 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2011,9 +2011,10 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
enable_irq(reply_q->os_irq);
}
}
+
+ if (poll)
+ _base_process_reply_queue(reply_q);
}
- if (poll)
- _base_process_reply_queue(reply_q);
}
/**
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 0f8a4c7e52a2..6d2b0a7436c1 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -157,7 +157,7 @@ struct qla84_msg_mgmt {
uint16_t rsrvd;
struct qla84_mgmt_param mgmtp;/* parameters for cmd */
uint32_t len; /* bytes in payload following this struct */
- uint8_t payload[0]; /* payload for cmd */
+ uint8_t payload[]; /* payload for cmd */
};
struct qla_bsg_a84_mgmt {
@@ -216,7 +216,7 @@ struct qla_image_version {
struct qla_image_version_list {
uint32_t count;
- struct qla_image_version version[0];
+ struct qla_image_version version[];
} __packed;
struct qla_status_reg {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 9ebf4a234d9a..b6434c72dee3 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -5410,7 +5410,7 @@ struct ql_vnd_stat_entry {
struct ql_vnd_stats {
u64 entry_count; /* Num of entries */
u64 rservd;
- struct ql_vnd_stat_entry entry[0]; /* Place holder of entries */
+ struct ql_vnd_stat_entry entry[]; /* Place holder of entries */
} __packed;
struct ql_vnd_host_stats_resp {
diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h
index 53026d82ebff..5a26c77157da 100644
--- a/drivers/scsi/qla2xxx/qla_edif_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h
@@ -121,7 +121,7 @@ struct app_pinfo {
struct app_pinfo_reply {
uint8_t port_count;
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
- struct app_pinfo ports[0];
+ struct app_pinfo ports[];
} __packed;
struct app_sinfo_req {
@@ -140,7 +140,7 @@ struct app_sinfo {
struct app_stats_reply {
uint8_t elem_count;
- struct app_sinfo elem[0];
+ struct app_sinfo elem[];
} __packed;
struct qla_sa_update_frame {
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 073d06e88c58..0bb1d562f0bf 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1706,7 +1706,7 @@ struct qla_flt_header {
__le16 length;
__le16 checksum;
__le16 unused;
- struct qla_flt_region region[0];
+ struct qla_flt_region region[];
};
#define FLT_REGION_SIZE 16
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 4e1764df0a73..860ec61b51b9 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -1028,7 +1028,7 @@ struct crash_record {
uint8_t out_RISC_reg_dump[256]; /* 80 -17F */
uint8_t in_RISC_reg_dump[256]; /*180 -27F */
- uint8_t in_out_RISC_stack_dump[0]; /*280 - ??? */
+ uint8_t in_out_RISC_stack_dump[]; /*280 - ??? */
};
struct conn_event_log_entry {
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 2104973a35cd..911cc72dd7ac 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -23,7 +23,6 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/string.h>
-#include <linux/genhd.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0a70aa763a96..e30bc51578e9 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1276,7 +1276,7 @@ scsi_device_state_check(struct scsi_device *sdev, struct request *req)
* power management commands.
*/
if (req && !(req->rq_flags & RQF_PM))
- return BLK_STS_IOERR;
+ return BLK_STS_OFFLINE;
return BLK_STS_OK;
}
}
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index 0ffdb8f2995f..acdc0aceca5e 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fs.h>
-#include <linux/genhd.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/pagemap.h>
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 62eb9921cc94..73e6f5f0f37c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -38,7 +38,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/bio.h>
-#include <linux/genhd.h>
#include <linux/hdreg.h>
#include <linux/errno.h>
#include <linux/idr.h>
@@ -122,11 +121,6 @@ static void scsi_disk_release(struct device *cdev);
static DEFINE_IDA(sd_index_ida);
-/* This semaphore is used to mediate the 0->1 reference get in the
- * face of object destruction (i.e. we can't allow a get on an
- * object after last put) */
-static DEFINE_MUTEX(sd_ref_mutex);
-
static struct kmem_cache *sd_cdb_cache;
static mempool_t *sd_cdb_pool;
static mempool_t *sd_page_pool;
@@ -664,33 +658,6 @@ static int sd_major(int major_idx)
}
}
-static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
-{
- struct scsi_disk *sdkp = NULL;
-
- mutex_lock(&sd_ref_mutex);
-
- if (disk->private_data) {
- sdkp = scsi_disk(disk);
- if (scsi_device_get(sdkp->device) == 0)
- get_device(&sdkp->dev);
- else
- sdkp = NULL;
- }
- mutex_unlock(&sd_ref_mutex);
- return sdkp;
-}
-
-static void scsi_disk_put(struct scsi_disk *sdkp)
-{
- struct scsi_device *sdev = sdkp->device;
-
- mutex_lock(&sd_ref_mutex);
- put_device(&sdkp->dev);
- scsi_device_put(sdev);
- mutex_unlock(&sd_ref_mutex);
-}
-
#ifdef CONFIG_BLK_SED_OPAL
static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
size_t len, bool send)
@@ -1419,17 +1386,15 @@ static bool sd_need_revalidate(struct block_device *bdev,
**/
static int sd_open(struct block_device *bdev, fmode_t mode)
{
- struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk);
- struct scsi_device *sdev;
+ struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
+ struct scsi_device *sdev = sdkp->device;
int retval;
- if (!sdkp)
+ if (scsi_device_get(sdev))
return -ENXIO;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
- sdev = sdkp->device;
-
/*
* If the device is in error recovery, wait until it is done.
* If the device is offline, then disallow any access to it.
@@ -1474,7 +1439,7 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
return 0;
error_out:
- scsi_disk_put(sdkp);
+ scsi_device_put(sdev);
return retval;
}
@@ -1503,7 +1468,7 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
}
- scsi_disk_put(sdkp);
+ scsi_device_put(sdev);
}
static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -1617,7 +1582,7 @@ static int media_not_present(struct scsi_disk *sdkp,
**/
static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
{
- struct scsi_disk *sdkp = scsi_disk_get(disk);
+ struct scsi_disk *sdkp = disk->private_data;
struct scsi_device *sdp;
int retval;
bool disk_changed;
@@ -1680,7 +1645,6 @@ out:
*/
disk_changed = sdp->changed;
sdp->changed = 0;
- scsi_disk_put(sdkp);
return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
}
@@ -1888,6 +1852,13 @@ static const struct pr_ops sd_pr_ops = {
.pr_clear = sd_pr_clear,
};
+static void scsi_disk_free_disk(struct gendisk *disk)
+{
+ struct scsi_disk *sdkp = scsi_disk(disk);
+
+ put_device(&sdkp->disk_dev);
+}
+
static const struct block_device_operations sd_fops = {
.owner = THIS_MODULE,
.open = sd_open,
@@ -1899,6 +1870,7 @@ static const struct block_device_operations sd_fops = {
.unlock_native_capacity = sd_unlock_native_capacity,
.report_zones = sd_zbc_report_zones,
.get_unique_id = sd_get_unique_id,
+ .free_disk = scsi_disk_free_disk,
.pr_ops = &sd_pr_ops,
};
@@ -3516,7 +3488,6 @@ static int sd_probe(struct device *dev)
}
sdkp->device = sdp;
- sdkp->driver = &sd_template;
sdkp->disk = gd;
sdkp->index = index;
sdkp->max_retries = SD_MAX_RETRIES;
@@ -3531,14 +3502,14 @@ static int sd_probe(struct device *dev)
SD_MOD_TIMEOUT);
}
- device_initialize(&sdkp->dev);
- sdkp->dev.parent = get_device(dev);
- sdkp->dev.class = &sd_disk_class;
- dev_set_name(&sdkp->dev, "%s", dev_name(dev));
+ device_initialize(&sdkp->disk_dev);
+ sdkp->disk_dev.parent = get_device(dev);
+ sdkp->disk_dev.class = &sd_disk_class;
+ dev_set_name(&sdkp->disk_dev, "%s", dev_name(dev));
- error = device_add(&sdkp->dev);
+ error = device_add(&sdkp->disk_dev);
if (error) {
- put_device(&sdkp->dev);
+ put_device(&sdkp->disk_dev);
goto out;
}
@@ -3549,7 +3520,7 @@ static int sd_probe(struct device *dev)
gd->minors = SD_MINORS;
gd->fops = &sd_fops;
- gd->private_data = &sdkp->driver;
+ gd->private_data = sdkp;
/* defaults, until the device tells us otherwise */
sdp->sector_size = 512;
@@ -3579,7 +3550,7 @@ static int sd_probe(struct device *dev)
error = device_add_disk(dev, gd, NULL);
if (error) {
- put_device(&sdkp->dev);
+ put_device(&sdkp->disk_dev);
goto out;
}
@@ -3625,58 +3596,26 @@ static int sd_probe(struct device *dev)
**/
static int sd_remove(struct device *dev)
{
- struct scsi_disk *sdkp;
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
- sdkp = dev_get_drvdata(dev);
scsi_autopm_get_device(sdkp->device);
- device_del(&sdkp->dev);
+ device_del(&sdkp->disk_dev);
del_gendisk(sdkp->disk);
sd_shutdown(dev);
- free_opal_dev(sdkp->opal_dev);
-
- mutex_lock(&sd_ref_mutex);
- dev_set_drvdata(dev, NULL);
- put_device(&sdkp->dev);
- mutex_unlock(&sd_ref_mutex);
-
+ put_disk(sdkp->disk);
return 0;
}
-/**
- * scsi_disk_release - Called to free the scsi_disk structure
- * @dev: pointer to embedded class device
- *
- * sd_ref_mutex must be held entering this routine. Because it is
- * called on last put, you should always use the scsi_disk_get()
- * scsi_disk_put() helpers which manipulate the semaphore directly
- * and never do a direct put_device.
- **/
static void scsi_disk_release(struct device *dev)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- struct gendisk *disk = sdkp->disk;
- struct request_queue *q = disk->queue;
ida_free(&sd_index_ida, sdkp->index);
-
- /*
- * Wait until all requests that are in progress have completed.
- * This is necessary to avoid that e.g. scsi_end_request() crashes
- * due to clearing the disk->private_data pointer. Wait from inside
- * scsi_disk_release() instead of from sd_release() to avoid that
- * freezing and unfreezing the request queue affects user space I/O
- * in case multiple processes open a /dev/sd... node concurrently.
- */
- blk_mq_freeze_queue(q);
- blk_mq_unfreeze_queue(q);
-
- disk->private_data = NULL;
- put_disk(disk);
- put_device(&sdkp->device->sdev_gendev);
-
sd_zbc_release_disk(sdkp);
+ put_device(&sdkp->device->sdev_gendev);
+ free_opal_dev(sdkp->opal_dev);
kfree(sdkp);
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 2e5932bde43d..0a33a4b68ffb 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -68,9 +68,13 @@ enum {
};
struct scsi_disk {
- struct scsi_driver *driver; /* always &sd_template */
struct scsi_device *device;
- struct device dev;
+
+ /*
+ * disk_dev is used to show attributes in /sys/class/scsi_disk/,
+ * but otherwise not really needed. Do not use for refcounting.
+ */
+ struct device disk_dev;
struct gendisk *disk;
struct opal_dev *opal_dev;
#ifdef CONFIG_BLK_DEV_ZONED
@@ -127,11 +131,11 @@ struct scsi_disk {
unsigned security : 1;
unsigned ignore_medium_access_errors : 1;
};
-#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
+#define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev)
static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
{
- return container_of(disk->private_data, struct scsi_disk, driver);
+ return disk->private_data;
}
#define sd_printk(prefix, sdsk, fmt, a...) \
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 6b43e97bd417..aaa2376b9d34 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -224,11 +224,6 @@ static int sg_check_file_access(struct file *filp, const char *caller)
caller, task_tgid_vnr(current), current->comm);
return -EPERM;
}
- if (uaccess_kernel()) {
- pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n",
- caller, task_tgid_vnr(current), current->comm);
- return -EACCES;
- }
return 0;
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index f925b1f1f9ad..641552d6330b 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -109,11 +109,6 @@ static DEFINE_SPINLOCK(sr_index_lock);
static struct lock_class_key sr_bio_compl_lkclass;
-/* This semaphore is used to mediate the 0->1 reference get in the
- * face of object destruction (i.e. we can't allow a get on an
- * object after last put) */
-static DEFINE_MUTEX(sr_ref_mutex);
-
static int sr_open(struct cdrom_device_info *, int);
static void sr_release(struct cdrom_device_info *);
@@ -143,11 +138,9 @@ static const struct cdrom_device_ops sr_dops = {
.capability = SR_CAPABILITIES,
};
-static void sr_kref_release(struct kref *kref);
-
static inline struct scsi_cd *scsi_cd(struct gendisk *disk)
{
- return container_of(disk->private_data, struct scsi_cd, driver);
+ return disk->private_data;
}
static int sr_runtime_suspend(struct device *dev)
@@ -163,38 +156,6 @@ static int sr_runtime_suspend(struct device *dev)
return 0;
}
-/*
- * The get and put routines for the struct scsi_cd. Note this entity
- * has a scsi_device pointer and owns a reference to this.
- */
-static inline struct scsi_cd *scsi_cd_get(struct gendisk *disk)
-{
- struct scsi_cd *cd = NULL;
-
- mutex_lock(&sr_ref_mutex);
- if (disk->private_data == NULL)
- goto out;
- cd = scsi_cd(disk);
- kref_get(&cd->kref);
- if (scsi_device_get(cd->device)) {
- kref_put(&cd->kref, sr_kref_release);
- cd = NULL;
- }
- out:
- mutex_unlock(&sr_ref_mutex);
- return cd;
-}
-
-static void scsi_cd_put(struct scsi_cd *cd)
-{
- struct scsi_device *sdev = cd->device;
-
- mutex_lock(&sr_ref_mutex);
- kref_put(&cd->kref, sr_kref_release);
- scsi_device_put(sdev);
- mutex_unlock(&sr_ref_mutex);
-}
-
static unsigned int sr_get_events(struct scsi_device *sdev)
{
u8 buf[8];
@@ -522,15 +483,13 @@ static void sr_revalidate_disk(struct scsi_cd *cd)
static int sr_block_open(struct block_device *bdev, fmode_t mode)
{
- struct scsi_cd *cd;
- struct scsi_device *sdev;
- int ret = -ENXIO;
+ struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
+ struct scsi_device *sdev = cd->device;
+ int ret;
- cd = scsi_cd_get(bdev->bd_disk);
- if (!cd)
- goto out;
+ if (scsi_device_get(cd->device))
+ return -ENXIO;
- sdev = cd->device;
scsi_autopm_get_device(sdev);
if (bdev_check_media_change(bdev))
sr_revalidate_disk(cd);
@@ -541,9 +500,7 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode)
scsi_autopm_put_device(sdev);
if (ret)
- scsi_cd_put(cd);
-
-out:
+ scsi_device_put(cd->device);
return ret;
}
@@ -555,7 +512,7 @@ static void sr_block_release(struct gendisk *disk, fmode_t mode)
cdrom_release(&cd->cdi, mode);
mutex_unlock(&cd->lock);
- scsi_cd_put(cd);
+ scsi_device_put(cd->device);
}
static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
@@ -595,18 +552,24 @@ out:
static unsigned int sr_block_check_events(struct gendisk *disk,
unsigned int clearing)
{
- unsigned int ret = 0;
- struct scsi_cd *cd;
+ struct scsi_cd *cd = disk->private_data;
- cd = scsi_cd_get(disk);
- if (!cd)
+ if (atomic_read(&cd->device->disk_events_disable_depth))
return 0;
+ return cdrom_check_events(&cd->cdi, clearing);
+}
- if (!atomic_read(&cd->device->disk_events_disable_depth))
- ret = cdrom_check_events(&cd->cdi, clearing);
+static void sr_free_disk(struct gendisk *disk)
+{
+ struct scsi_cd *cd = disk->private_data;
- scsi_cd_put(cd);
- return ret;
+ spin_lock(&sr_index_lock);
+ clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
+ spin_unlock(&sr_index_lock);
+
+ unregister_cdrom(&cd->cdi);
+ mutex_destroy(&cd->lock);
+ kfree(cd);
}
static const struct block_device_operations sr_bdops =
@@ -617,6 +580,7 @@ static const struct block_device_operations sr_bdops =
.ioctl = sr_block_ioctl,
.compat_ioctl = blkdev_compat_ptr_ioctl,
.check_events = sr_block_check_events,
+ .free_disk = sr_free_disk,
};
static int sr_open(struct cdrom_device_info *cdi, int purpose)
@@ -660,8 +624,6 @@ static int sr_probe(struct device *dev)
if (!cd)
goto fail;
- kref_init(&cd->kref);
-
disk = __alloc_disk_node(sdev->request_queue, NUMA_NO_NODE,
&sr_bio_compl_lkclass);
if (!disk)
@@ -692,7 +654,6 @@ static int sr_probe(struct device *dev)
cd->device = sdev;
cd->disk = disk;
- cd->driver = &sr_template;
cd->capacity = 0x1fffff;
cd->device->changed = 1; /* force recheck CD type */
cd->media_present = 1;
@@ -713,7 +674,7 @@ static int sr_probe(struct device *dev)
sr_vendor_init(cd);
set_capacity(disk, cd->capacity);
- disk->private_data = &cd->driver;
+ disk->private_data = cd;
if (register_cdrom(disk, &cd->cdi))
goto fail_minor;
@@ -728,10 +689,8 @@ static int sr_probe(struct device *dev)
sr_revalidate_disk(cd);
error = device_add_disk(&sdev->sdev_gendev, disk, NULL);
- if (error) {
- kref_put(&cd->kref, sr_kref_release);
- goto fail;
- }
+ if (error)
+ goto unregister_cdrom;
sdev_printk(KERN_DEBUG, sdev,
"Attached scsi CD-ROM %s\n", cd->cdi.name);
@@ -739,6 +698,8 @@ static int sr_probe(struct device *dev)
return 0;
+unregister_cdrom:
+ unregister_cdrom(&cd->cdi);
fail_minor:
spin_lock(&sr_index_lock);
clear_bit(minor, sr_index_bits);
@@ -1010,36 +971,6 @@ out_put_request:
return ret;
}
-
-/**
- * sr_kref_release - Called to free the scsi_cd structure
- * @kref: pointer to embedded kref
- *
- * sr_ref_mutex must be held entering this routine. Because it is
- * called on last put, you should always use the scsi_cd_get()
- * scsi_cd_put() helpers which manipulate the semaphore directly
- * and never do a direct kref_put().
- **/
-static void sr_kref_release(struct kref *kref)
-{
- struct scsi_cd *cd = container_of(kref, struct scsi_cd, kref);
- struct gendisk *disk = cd->disk;
-
- spin_lock(&sr_index_lock);
- clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
- spin_unlock(&sr_index_lock);
-
- unregister_cdrom(&cd->cdi);
-
- disk->private_data = NULL;
-
- put_disk(disk);
-
- mutex_destroy(&cd->lock);
-
- kfree(cd);
-}
-
static int sr_remove(struct device *dev)
{
struct scsi_cd *cd = dev_get_drvdata(dev);
@@ -1047,11 +978,7 @@ static int sr_remove(struct device *dev)
scsi_autopm_get_device(cd->device);
del_gendisk(cd->disk);
- dev_set_drvdata(dev, NULL);
-
- mutex_lock(&sr_ref_mutex);
- kref_put(&cd->kref, sr_kref_release);
- mutex_unlock(&sr_ref_mutex);
+ put_disk(cd->disk);
return 0;
}
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index 339c624e04d8..1175f2e213b5 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -18,8 +18,6 @@
#ifndef _SR_H
#define _SR_H
-#include <linux/genhd.h>
-#include <linux/kref.h>
#include <linux/mutex.h>
#define MAX_RETRIES 3
@@ -33,7 +31,6 @@ struct scsi_device;
typedef struct scsi_cd {
- struct scsi_driver *driver;
unsigned capacity; /* size in blocks */
struct scsi_device *device;
unsigned int vendor; /* vendor code, see sr_vendor.c */
@@ -53,9 +50,6 @@ typedef struct scsi_cd {
struct cdrom_device_info cdi;
struct mutex lock;
- /* We hold gendisk and scsi_device references on probe and use
- * the refs on this kref to decide when to release them */
- struct kref kref;
struct gendisk *disk;
} Scsi_CD;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e869e90e05af..ebe9412c86f4 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4276,7 +4276,6 @@ static int st_probe(struct device *dev)
goto out_buffer_free;
}
kref_init(&tpnt->kref);
- tpnt->driver = &st_template;
tpnt->device = SDp;
if (SDp->scsi_level <= 2)
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index c0ef0d9aaf8a..7a68eaba7e81 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -117,7 +117,6 @@ struct scsi_tape_stats {
/* The tape drive descriptor */
struct scsi_tape {
- struct scsi_driver *driver;
struct scsi_device *device;
struct mutex lock; /* For serialization */
struct completion wait; /* For SCSI commands */
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index 2d36a0715fca..8970068314ef 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -494,7 +494,7 @@ static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
if (!map_req)
return NULL;
- bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
+ bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL);
if (!bio) {
ufshpb_put_req(hpb, map_req);
return NULL;
@@ -2050,7 +2050,7 @@ static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
INIT_LIST_HEAD(&pre_req->list_req);
pre_req->req = NULL;
- pre_req->bio = bio_alloc(GFP_KERNEL, 1);
+ pre_req->bio = bio_alloc(NULL, 1, 0, GFP_KERNEL);
if (!pre_req->bio)
goto release_mem;
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 12c10a5e3d93..7f421600cb66 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -233,12 +233,11 @@ static void scsifront_gnttab_done(struct vscsifrnt_info *info,
return;
for (i = 0; i < shadow->nr_grants; i++) {
- if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
+ if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) {
shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
"grant still in use by backend\n");
BUG();
}
- gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
}
kfree(shadow->sg);
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index a8562678c437..c5aae42673d3 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -13,6 +13,7 @@ source "drivers/soc/imx/Kconfig"
source "drivers/soc/ixp4xx/Kconfig"
source "drivers/soc/litex/Kconfig"
source "drivers/soc/mediatek/Kconfig"
+source "drivers/soc/microchip/Kconfig"
source "drivers/soc/qcom/Kconfig"
source "drivers/soc/renesas/Kconfig"
source "drivers/soc/rockchip/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index adb30c2d4fea..904eec2a7871 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -18,6 +18,7 @@ obj-y += ixp4xx/
obj-$(CONFIG_SOC_XWAY) += lantiq/
obj-$(CONFIG_LITEX_SOC_CONTROLLER) += litex/
obj-y += mediatek/
+obj-y += microchip/
obj-y += amlogic/
obj-y += qcom/
obj-y += renesas/
diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c
index 59bd195fa9c9..a10a417a87db 100644
--- a/drivers/soc/amlogic/meson-secure-pwrc.c
+++ b/drivers/soc/amlogic/meson-secure-pwrc.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <dt-bindings/power/meson-a1-power.h>
+#include <dt-bindings/power/meson-s4-power.h>
#include <linux/arm-smccc.h>
#include <linux/firmware/meson/meson_sm.h>
#include <linux/module.h>
@@ -119,6 +120,18 @@ static struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = {
SEC_PD(RSA, 0),
};
+static struct meson_secure_pwrc_domain_desc s4_pwrc_domains[] = {
+ SEC_PD(S4_DOS_HEVC, 0),
+ SEC_PD(S4_DOS_VDEC, 0),
+ SEC_PD(S4_VPU_HDMI, 0),
+ SEC_PD(S4_USB_COMB, 0),
+ SEC_PD(S4_GE2D, 0),
+ /* ETH is for ethernet online wakeup, and should be always on */
+ SEC_PD(S4_ETH, GENPD_FLAG_ALWAYS_ON),
+ SEC_PD(S4_DEMOD, 0),
+ SEC_PD(S4_AUDIO, 0),
+};
+
static int meson_secure_pwrc_probe(struct platform_device *pdev)
{
int i;
@@ -187,11 +200,20 @@ static struct meson_secure_pwrc_domain_data meson_secure_a1_pwrc_data = {
.count = ARRAY_SIZE(a1_pwrc_domains),
};
+static struct meson_secure_pwrc_domain_data meson_secure_s4_pwrc_data = {
+ .domains = s4_pwrc_domains,
+ .count = ARRAY_SIZE(s4_pwrc_domains),
+};
+
static const struct of_device_id meson_secure_pwrc_match_table[] = {
{
.compatible = "amlogic,meson-a1-pwrc",
.data = &meson_secure_a1_pwrc_data,
},
+ {
+ .compatible = "amlogic,meson-s4-pwrc",
+ .data = &meson_secure_s4_pwrc_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_secure_pwrc_match_table);
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index a490ad7e090f..b2d365ae0282 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -156,6 +156,9 @@ static const struct at91_soc socs[] __initconst = {
AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
AT91_CIDR_VERSION_MASK, SAMA5D28C_LD2G_EXID_MATCH,
"sama5d28c 256MiB LPDDR2 SiP", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D29CN_EXID_MATCH,
+ "sama5d29", "sama5d2"),
AT91_SOC(SAMA5D3_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
AT91_CIDR_VERSION_MASK, SAMA5D31_EXID_MATCH,
"sama5d31", "sama5d3"),
diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h
index c3eb3c8f0834..2ecaa75b00f0 100644
--- a/drivers/soc/atmel/soc.h
+++ b/drivers/soc/atmel/soc.h
@@ -95,6 +95,7 @@ at91_soc_init(const struct at91_soc *socs);
#define SAMA5D28C_LD2G_EXID_MATCH 0x00000072
#define SAMA5D28CU_EXID_MATCH 0x00000010
#define SAMA5D28CN_EXID_MATCH 0x00000020
+#define SAMA5D29CN_EXID_MATCH 0x00000023
#define SAMA5D3_CIDR_MATCH 0x0a5c07c0
#define SAMA5D31_EXID_MATCH 0x00444300
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index 058b78fac5e3..0a3fb6c115f4 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -743,8 +743,8 @@ int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
full_mask = s->eqcr.pi_ci_mask;
if (!s->eqcr.available) {
eqcr_ci = s->eqcr.ci;
- p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
- s->eqcr.ci = *p & full_mask;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.ci &= full_mask;
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
eqcr_ci, s->eqcr.ci);
if (!s->eqcr.available) {
@@ -887,8 +887,8 @@ int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
full_mask = s->eqcr.pi_ci_mask;
if (!s->eqcr.available) {
eqcr_ci = s->eqcr.ci;
- p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
- s->eqcr.ci = *p & full_mask;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.ci &= full_mask;
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
eqcr_ci, s->eqcr.ci);
if (!s->eqcr.available)
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 072473a16f4d..5ed2fc1c53a0 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -28,7 +28,6 @@ struct fsl_soc_die_attr {
static struct guts *guts;
static struct soc_device_attribute soc_dev_attr;
static struct soc_device *soc_dev;
-static struct device_node *root;
/* SoC die attribute definition for QorIQ platform */
@@ -138,7 +137,7 @@ static u32 fsl_guts_get_svr(void)
static int fsl_guts_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *root, *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
const struct fsl_soc_die_attr *soc_die;
const char *machine;
@@ -159,8 +158,14 @@ static int fsl_guts_probe(struct platform_device *pdev)
root = of_find_node_by_path("/");
if (of_property_read_string(root, "model", &machine))
of_property_read_string_index(root, "compatible", 0, &machine);
- if (machine)
- soc_dev_attr.machine = machine;
+ if (machine) {
+ soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+ if (!soc_dev_attr.machine) {
+ of_node_put(root);
+ return -ENOMEM;
+ }
+ }
+ of_node_put(root);
svr = fsl_guts_get_svr();
soc_die = fsl_soc_die_match(svr, fsl_soc_die);
@@ -195,7 +200,6 @@ static int fsl_guts_probe(struct platform_device *pdev)
static int fsl_guts_remove(struct platform_device *dev)
{
soc_device_unregister(soc_dev);
- of_node_put(root);
return 0;
}
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 4d38c80f8be8..b3c226eb5292 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -147,7 +147,7 @@ EXPORT_SYMBOL(qe_issue_cmd);
* memory mapped space.
* The BRG clock is the QE clock divided by 2.
* It was set up long ago during the initial boot phase and is
- * is given to us.
+ * given to us.
* Baud rate clocks are zero-based in the driver code (as that maps
* to port numbers). Documentation uses 1-based numbering.
*/
@@ -421,7 +421,7 @@ static void qe_upload_microcode(const void *base,
for (i = 0; i < be32_to_cpu(ucode->count); i++)
iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
-
+
/* Set I-RAM Ready Register */
iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready);
}
diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c
index e277c827bdf3..a5e2d0e5ab51 100644
--- a/drivers/soc/fsl/qe/qe_io.c
+++ b/drivers/soc/fsl/qe/qe_io.c
@@ -35,6 +35,8 @@ int par_io_init(struct device_node *np)
if (ret)
return ret;
par_io = ioremap(res.start, resource_size(&res));
+ if (!par_io)
+ return -ENOMEM;
if (!of_property_read_u32(np, "num-ports", &num_ports))
num_par_io_ports = num_ports;
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 3e59d479d001..3cb123016b3e 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -382,7 +382,8 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd)
return 0;
out_clk_disable:
- clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
+ if (!domain->keep_clocks)
+ clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
return ret;
}
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
index 511e74f0db8a..122f9c884b38 100644
--- a/drivers/soc/imx/imx8m-blk-ctrl.c
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -15,6 +15,7 @@
#include <dt-bindings/power/imx8mm-power.h>
#include <dt-bindings/power/imx8mn-power.h>
+#include <dt-bindings/power/imx8mq-power.h>
#define BLK_SFT_RSTN 0x0
#define BLK_CLK_EN 0x4
@@ -589,6 +590,68 @@ static const struct imx8m_blk_ctrl_data imx8mn_disp_blk_ctl_dev_data = {
.num_domains = ARRAY_SIZE(imx8mn_disp_blk_ctl_domain_data),
};
+static int imx8mq_vpu_power_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct imx8m_blk_ctrl *bc = container_of(nb, struct imx8m_blk_ctrl,
+ power_nb);
+
+ if (action != GENPD_NOTIFY_ON && action != GENPD_NOTIFY_PRE_OFF)
+ return NOTIFY_OK;
+
+ /*
+ * The ADB in the VPUMIX domain has no separate reset and clock
+ * enable bits, but is ungated and reset together with the VPUs. The
+ * reset and clock enable inputs to the ADB is a logical OR of the
+ * VPU bits. In order to set the G2 fuse bits, the G2 clock must
+ * also be enabled.
+ */
+ regmap_set_bits(bc->regmap, BLK_SFT_RSTN, BIT(0) | BIT(1));
+ regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(0) | BIT(1));
+
+ if (action == GENPD_NOTIFY_ON) {
+ /*
+ * On power up we have no software backchannel to the GPC to
+ * wait for the ADB handshake to happen, so we just delay for a
+ * bit. On power down the GPC driver waits for the handshake.
+ */
+ udelay(5);
+
+ /* set "fuse" bits to enable the VPUs */
+ regmap_set_bits(bc->regmap, 0x8, 0xffffffff);
+ regmap_set_bits(bc->regmap, 0xc, 0xffffffff);
+ regmap_set_bits(bc->regmap, 0x10, 0xffffffff);
+ }
+
+ return NOTIFY_OK;
+}
+
+static const struct imx8m_blk_ctrl_domain_data imx8mq_vpu_blk_ctl_domain_data[] = {
+ [IMX8MQ_VPUBLK_PD_G1] = {
+ .name = "vpublk-g1",
+ .clk_names = (const char *[]){ "g1", },
+ .num_clks = 1,
+ .gpc_name = "g1",
+ .rst_mask = BIT(1),
+ .clk_mask = BIT(1),
+ },
+ [IMX8MQ_VPUBLK_PD_G2] = {
+ .name = "vpublk-g2",
+ .clk_names = (const char *[]){ "g2", },
+ .num_clks = 1,
+ .gpc_name = "g2",
+ .rst_mask = BIT(0),
+ .clk_mask = BIT(0),
+ },
+};
+
+static const struct imx8m_blk_ctrl_data imx8mq_vpu_blk_ctl_dev_data = {
+ .max_reg = 0x14,
+ .power_notifier_fn = imx8mq_vpu_power_notifier,
+ .domains = imx8mq_vpu_blk_ctl_domain_data,
+ .num_domains = ARRAY_SIZE(imx8mq_vpu_blk_ctl_domain_data),
+};
+
static const struct of_device_id imx8m_blk_ctrl_of_match[] = {
{
.compatible = "fsl,imx8mm-vpu-blk-ctrl",
@@ -600,6 +663,9 @@ static const struct of_device_id imx8m_blk_ctrl_of_match[] = {
.compatible = "fsl,imx8mn-disp-blk-ctrl",
.data = &imx8mn_disp_blk_ctl_dev_data
}, {
+ .compatible = "fsl,imx8mq-vpu-blk-ctrl",
+ .data = &imx8mq_vpu_blk_ctl_dev_data
+ }, {
/* Sentinel */
}
};
diff --git a/drivers/soc/imx/soc-imx.c b/drivers/soc/imx/soc-imx.c
index 77bc12039c3d..fab668c83f98 100644
--- a/drivers/soc/imx/soc-imx.c
+++ b/drivers/soc/imx/soc-imx.c
@@ -40,9 +40,6 @@ static int __init imx_soc_device_init(void)
if (!__mxc_cpu_type)
return 0;
- if (of_machine_is_compatible("fsl,ls1021a"))
- return 0;
-
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
diff --git a/drivers/soc/ixp4xx/Kconfig b/drivers/soc/ixp4xx/Kconfig
index e3eb19b85fa4..c55f0c9ae513 100644
--- a/drivers/soc/ixp4xx/Kconfig
+++ b/drivers/soc/ixp4xx/Kconfig
@@ -12,6 +12,7 @@ config IXP4XX_QMGR
config IXP4XX_NPE
tristate "IXP4xx Network Processor Engine support"
select FW_LOADER
+ select MFD_SYSCON
help
This driver supports IXP4xx built-in network coprocessors
and is automatically selected by Ethernet and HSS drivers.
diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c
index f490c4ca51f5..613935cb6a48 100644
--- a/drivers/soc/ixp4xx/ixp4xx-npe.c
+++ b/drivers/soc/ixp4xx/ixp4xx-npe.c
@@ -16,6 +16,7 @@
#include <linux/firmware.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -284,6 +285,7 @@ static int __must_check npe_logical_reg_write32(struct npe *npe, u32 addr,
static int npe_reset(struct npe *npe)
{
+ u32 reset_bit = (IXP4XX_FEATURE_RESET_NPEA << npe->id);
u32 val, ctl, exec_count, ctx_reg2;
int i;
@@ -380,16 +382,19 @@ static int npe_reset(struct npe *npe)
__raw_writel(0, &npe->regs->action_points[3]);
__raw_writel(0, &npe->regs->watch_count);
- val = ixp4xx_read_feature_bits();
+ /*
+ * We need to work on cached values here because the register
+ * will read inverted but needs to be written non-inverted.
+ */
+ val = cpu_ixp4xx_features(npe->rmap);
/* reset the NPE */
- ixp4xx_write_feature_bits(val &
- ~(IXP4XX_FEATURE_RESET_NPEA << npe->id));
+ regmap_write(npe->rmap, IXP4XX_EXP_CNFG2, val & ~reset_bit);
/* deassert reset */
- ixp4xx_write_feature_bits(val |
- (IXP4XX_FEATURE_RESET_NPEA << npe->id));
+ regmap_write(npe->rmap, IXP4XX_EXP_CNFG2, val | reset_bit);
+
for (i = 0; i < MAX_RETRIES; i++) {
- if (ixp4xx_read_feature_bits() &
- (IXP4XX_FEATURE_RESET_NPEA << npe->id))
+ val = cpu_ixp4xx_features(npe->rmap);
+ if (val & reset_bit)
break; /* NPE is back alive */
udelay(1);
}
@@ -683,6 +688,14 @@ static int ixp4xx_npe_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct resource *res;
+ struct regmap *rmap;
+ u32 val;
+
+ /* This system has only one syscon, so fetch it */
+ rmap = syscon_regmap_lookup_by_compatible("syscon");
+ if (IS_ERR(rmap))
+ return dev_err_probe(dev, PTR_ERR(rmap),
+ "failed to look up syscon\n");
for (i = 0; i < NPE_COUNT; i++) {
struct npe *npe = &npe_tab[i];
@@ -691,8 +704,9 @@ static int ixp4xx_npe_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- if (!(ixp4xx_read_feature_bits() &
- (IXP4XX_FEATURE_RESET_NPEA << i))) {
+ val = cpu_ixp4xx_features(rmap);
+
+ if (!(val & (IXP4XX_FEATURE_RESET_NPEA << i))) {
dev_info(dev, "NPE%d at %pR not available\n",
i, res);
continue; /* NPE already disabled or not present */
@@ -700,6 +714,7 @@ static int ixp4xx_npe_probe(struct platform_device *pdev)
npe->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(npe->regs))
return PTR_ERR(npe->regs);
+ npe->rmap = rmap;
if (npe_reset(npe)) {
dev_info(dev, "NPE%d at %pR does not reset\n",
diff --git a/drivers/soc/mediatek/mt8167-pm-domains.h b/drivers/soc/mediatek/mt8167-pm-domains.h
index 15559ddf26e4..4d6c32759606 100644
--- a/drivers/soc/mediatek/mt8167-pm-domains.h
+++ b/drivers/soc/mediatek/mt8167-pm-domains.h
@@ -18,6 +18,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = {
.name = "mm",
.sta_mask = PWR_STATUS_DISP,
.ctl_offs = SPM_DIS_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.bp_infracfg = {
@@ -30,6 +32,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = {
.name = "vdec",
.sta_mask = PWR_STATUS_VDEC,
.ctl_offs = SPM_VDE_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.caps = MTK_SCPD_ACTIVE_WAKEUP,
@@ -38,6 +42,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = {
.name = "isp",
.sta_mask = PWR_STATUS_ISP,
.ctl_offs = SPM_ISP_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(13, 12),
.caps = MTK_SCPD_ACTIVE_WAKEUP,
@@ -46,6 +52,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = {
.name = "mfg_async",
.sta_mask = MT8167_PWR_STATUS_MFG_ASYNC,
.ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
.sram_pdn_bits = 0,
.sram_pdn_ack_bits = 0,
.bp_infracfg = {
@@ -57,6 +65,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = {
.name = "mfg_2d",
.sta_mask = MT8167_PWR_STATUS_MFG_2D,
.ctl_offs = SPM_MFG_2D_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
},
@@ -64,6 +74,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = {
.name = "mfg",
.sta_mask = PWR_STATUS_MFG,
.ctl_offs = SPM_MFG_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
},
@@ -71,6 +83,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = {
.name = "conn",
.sta_mask = PWR_STATUS_CONN,
.ctl_offs = SPM_CONN_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = 0,
.caps = MTK_SCPD_ACTIVE_WAKEUP,
@@ -85,8 +99,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = {
static const struct scpsys_soc_data mt8167_scpsys_data = {
.domains_data = scpsys_domain_data_mt8167,
.num_domains = ARRAY_SIZE(scpsys_domain_data_mt8167),
- .pwr_sta_offs = SPM_PWR_STATUS,
- .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
};
#endif /* __SOC_MEDIATEK_MT8167_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mt8173-pm-domains.h b/drivers/soc/mediatek/mt8173-pm-domains.h
index 714fa92575df..1a5dc63b7357 100644
--- a/drivers/soc/mediatek/mt8173-pm-domains.h
+++ b/drivers/soc/mediatek/mt8173-pm-domains.h
@@ -15,6 +15,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
.name = "vdec",
.sta_mask = PWR_STATUS_VDEC,
.ctl_offs = SPM_VDE_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
@@ -22,6 +24,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
.name = "venc",
.sta_mask = PWR_STATUS_VENC,
.ctl_offs = SPM_VEN_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
},
@@ -29,6 +33,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
.name = "isp",
.sta_mask = PWR_STATUS_ISP,
.ctl_offs = SPM_ISP_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(13, 12),
},
@@ -36,6 +42,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
.name = "mm",
.sta_mask = PWR_STATUS_DISP,
.ctl_offs = SPM_DIS_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.bp_infracfg = {
@@ -47,6 +55,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
.name = "venc_lt",
.sta_mask = PWR_STATUS_VENC_LT,
.ctl_offs = SPM_VEN2_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
},
@@ -54,6 +64,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
.name = "audio",
.sta_mask = PWR_STATUS_AUDIO,
.ctl_offs = SPM_AUDIO_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
},
@@ -61,6 +73,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
.name = "usb",
.sta_mask = PWR_STATUS_USB,
.ctl_offs = SPM_USB_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
.caps = MTK_SCPD_ACTIVE_WAKEUP,
@@ -69,6 +83,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
.name = "mfg_async",
.sta_mask = PWR_STATUS_MFG_ASYNC,
.ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = 0,
.caps = MTK_SCPD_DOMAIN_SUPPLY,
@@ -77,6 +93,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
.name = "mfg_2d",
.sta_mask = PWR_STATUS_MFG_2D,
.ctl_offs = SPM_MFG_2D_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(13, 12),
},
@@ -84,6 +102,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
.name = "mfg",
.sta_mask = PWR_STATUS_MFG,
.ctl_offs = SPM_MFG_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
.sram_pdn_bits = GENMASK(13, 8),
.sram_pdn_ack_bits = GENMASK(21, 16),
.bp_infracfg = {
@@ -98,8 +118,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
static const struct scpsys_soc_data mt8173_scpsys_data = {
.domains_data = scpsys_domain_data_mt8173,
.num_domains = ARRAY_SIZE(scpsys_domain_data_mt8173),
- .pwr_sta_offs = SPM_PWR_STATUS,
- .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
};
#endif /* __SOC_MEDIATEK_MT8173_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mt8183-mmsys.h b/drivers/soc/mediatek/mt8183-mmsys.h
index 9dee485807c9..0c021f4b76d2 100644
--- a/drivers/soc/mediatek/mt8183-mmsys.h
+++ b/drivers/soc/mediatek/mt8183-mmsys.h
@@ -25,6 +25,8 @@
#define MT8183_RDMA0_SOUT_COLOR0 0x1
#define MT8183_RDMA1_SOUT_DSI0 0x1
+#define MT8183_MMSYS_SW0_RST_B 0x140
+
static const struct mtk_mmsys_routes mmsys_mt8183_routing_table[] = {
{
DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h
index 98a9940d05fb..71b8757e552d 100644
--- a/drivers/soc/mediatek/mt8183-pm-domains.h
+++ b/drivers/soc/mediatek/mt8183-pm-domains.h
@@ -15,6 +15,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.name = "audio",
.sta_mask = PWR_STATUS_AUDIO,
.ctl_offs = 0x0314,
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
},
@@ -22,6 +24,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.name = "conn",
.sta_mask = PWR_STATUS_CONN,
.ctl_offs = 0x032c,
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184,
.sram_pdn_bits = 0,
.sram_pdn_ack_bits = 0,
.bp_infracfg = {
@@ -33,6 +37,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.name = "mfg_async",
.sta_mask = PWR_STATUS_MFG_ASYNC,
.ctl_offs = 0x0334,
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184,
.sram_pdn_bits = 0,
.sram_pdn_ack_bits = 0,
},
@@ -40,6 +46,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.name = "mfg",
.sta_mask = PWR_STATUS_MFG,
.ctl_offs = 0x0338,
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.caps = MTK_SCPD_DOMAIN_SUPPLY,
@@ -48,6 +56,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.name = "mfg_core0",
.sta_mask = BIT(7),
.ctl_offs = 0x034c,
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
@@ -55,6 +65,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.name = "mfg_core1",
.sta_mask = BIT(20),
.ctl_offs = 0x0310,
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
@@ -62,6 +74,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.name = "mfg_2d",
.sta_mask = PWR_STATUS_MFG_2D,
.ctl_offs = 0x0348,
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.bp_infracfg = {
@@ -75,6 +89,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.name = "disp",
.sta_mask = PWR_STATUS_DISP,
.ctl_offs = 0x030c,
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.bp_infracfg = {
@@ -94,6 +110,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.name = "cam",
.sta_mask = BIT(25),
.ctl_offs = 0x0344,
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184,
.sram_pdn_bits = GENMASK(9, 8),
.sram_pdn_ack_bits = GENMASK(13, 12),
.bp_infracfg = {
@@ -117,6 +135,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.name = "isp",
.sta_mask = PWR_STATUS_ISP,
.ctl_offs = 0x0308,
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184,
.sram_pdn_bits = GENMASK(9, 8),
.sram_pdn_ack_bits = GENMASK(13, 12),
.bp_infracfg = {
@@ -140,6 +160,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.name = "vdec",
.sta_mask = BIT(31),
.ctl_offs = 0x0300,
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.bp_smi = {
@@ -153,6 +175,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.name = "venc",
.sta_mask = PWR_STATUS_VENC,
.ctl_offs = 0x0304,
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
.bp_smi = {
@@ -166,6 +190,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.name = "vpu_top",
.sta_mask = BIT(26),
.ctl_offs = 0x0324,
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.bp_infracfg = {
@@ -193,6 +219,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.name = "vpu_core0",
.sta_mask = BIT(27),
.ctl_offs = 0x33c,
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(13, 12),
.bp_infracfg = {
@@ -211,6 +239,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.name = "vpu_core1",
.sta_mask = BIT(28),
.ctl_offs = 0x0340,
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(13, 12),
.bp_infracfg = {
@@ -230,8 +260,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
static const struct scpsys_soc_data mt8183_scpsys_data = {
.domains_data = scpsys_domain_data_mt8183,
.num_domains = ARRAY_SIZE(scpsys_domain_data_mt8183),
- .pwr_sta_offs = 0x0180,
- .pwr_sta2nd_offs = 0x0184
};
#endif /* __SOC_MEDIATEK_MT8183_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mt8186-mmsys.h b/drivers/soc/mediatek/mt8186-mmsys.h
new file mode 100644
index 000000000000..c72ccf86ea28
--- /dev/null
+++ b/drivers/soc/mediatek/mt8186-mmsys.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8186_MMSYS_H
+#define __SOC_MEDIATEK_MT8186_MMSYS_H
+
+#define MT8186_MMSYS_OVL_CON 0xF04
+#define MT8186_MMSYS_OVL0_CON_MASK 0x3
+#define MT8186_MMSYS_OVL0_2L_CON_MASK 0xC
+#define MT8186_OVL0_GO_BLEND BIT(0)
+#define MT8186_OVL0_GO_BG BIT(1)
+#define MT8186_OVL0_2L_GO_BLEND BIT(2)
+#define MT8186_OVL0_2L_GO_BG BIT(3)
+#define MT8186_DISP_RDMA0_SOUT_SEL 0xF0C
+#define MT8186_RDMA0_SOUT_SEL_MASK 0xF
+#define MT8186_RDMA0_SOUT_TO_DSI0 (0)
+#define MT8186_RDMA0_SOUT_TO_COLOR0 (1)
+#define MT8186_RDMA0_SOUT_TO_DPI0 (2)
+#define MT8186_DISP_OVL0_2L_MOUT_EN 0xF14
+#define MT8186_OVL0_2L_MOUT_EN_MASK 0xF
+#define MT8186_OVL0_2L_MOUT_TO_RDMA0 BIT(0)
+#define MT8186_OVL0_2L_MOUT_TO_RDMA1 BIT(3)
+#define MT8186_DISP_OVL0_MOUT_EN 0xF18
+#define MT8186_OVL0_MOUT_EN_MASK 0xF
+#define MT8186_OVL0_MOUT_TO_RDMA0 BIT(0)
+#define MT8186_OVL0_MOUT_TO_RDMA1 BIT(3)
+#define MT8186_DISP_DITHER0_MOUT_EN 0xF20
+#define MT8186_DITHER0_MOUT_EN_MASK 0xF
+#define MT8186_DITHER0_MOUT_TO_DSI0 BIT(0)
+#define MT8186_DITHER0_MOUT_TO_RDMA1 BIT(2)
+#define MT8186_DITHER0_MOUT_TO_DPI0 BIT(3)
+#define MT8186_DISP_RDMA0_SEL_IN 0xF28
+#define MT8186_RDMA0_SEL_IN_MASK 0xF
+#define MT8186_RDMA0_FROM_OVL0 0
+#define MT8186_RDMA0_FROM_OVL0_2L 2
+#define MT8186_DISP_DSI0_SEL_IN 0xF30
+#define MT8186_DSI0_SEL_IN_MASK 0xF
+#define MT8186_DSI0_FROM_RDMA0 0
+#define MT8186_DSI0_FROM_DITHER0 1
+#define MT8186_DSI0_FROM_RDMA1 2
+#define MT8186_DISP_RDMA1_MOUT_EN 0xF3C
+#define MT8186_RDMA1_MOUT_EN_MASK 0xF
+#define MT8186_RDMA1_MOUT_TO_DPI0_SEL BIT(0)
+#define MT8186_RDMA1_MOUT_TO_DSI0_SEL BIT(2)
+#define MT8186_DISP_RDMA1_SEL_IN 0xF40
+#define MT8186_RDMA1_SEL_IN_MASK 0xF
+#define MT8186_RDMA1_FROM_OVL0 0
+#define MT8186_RDMA1_FROM_OVL0_2L 2
+#define MT8186_RDMA1_FROM_DITHER0 3
+#define MT8186_DISP_DPI0_SEL_IN 0xF44
+#define MT8186_DPI0_SEL_IN_MASK 0xF
+#define MT8186_DPI0_FROM_RDMA1 0
+#define MT8186_DPI0_FROM_DITHER0 1
+#define MT8186_DPI0_FROM_RDMA0 2
+
+#define MT8186_MMSYS_SW0_RST_B 0x160
+
+static const struct mtk_mmsys_routes mmsys_mt8186_routing_table[] = {
+ {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+ MT8186_DISP_OVL0_MOUT_EN, MT8186_OVL0_MOUT_EN_MASK,
+ MT8186_OVL0_MOUT_TO_RDMA0
+ },
+ {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+ MT8186_DISP_RDMA0_SEL_IN, MT8186_RDMA0_SEL_IN_MASK,
+ MT8186_RDMA0_FROM_OVL0
+ },
+ {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+ MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_CON_MASK,
+ MT8186_OVL0_GO_BLEND
+ },
+ {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
+ MT8186_DISP_RDMA0_SOUT_SEL, MT8186_RDMA0_SOUT_SEL_MASK,
+ MT8186_RDMA0_SOUT_TO_COLOR0
+ },
+ {
+ DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
+ MT8186_DISP_DITHER0_MOUT_EN, MT8186_DITHER0_MOUT_EN_MASK,
+ MT8186_DITHER0_MOUT_TO_DSI0,
+ },
+ {
+ DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
+ MT8186_DISP_DSI0_SEL_IN, MT8186_DSI0_SEL_IN_MASK,
+ MT8186_DSI0_FROM_DITHER0
+ },
+ {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
+ MT8186_DISP_OVL0_2L_MOUT_EN, MT8186_OVL0_2L_MOUT_EN_MASK,
+ MT8186_OVL0_2L_MOUT_TO_RDMA1
+ },
+ {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
+ MT8186_DISP_RDMA1_SEL_IN, MT8186_RDMA1_SEL_IN_MASK,
+ MT8186_RDMA1_FROM_OVL0_2L
+ },
+ {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
+ MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_2L_CON_MASK,
+ MT8186_OVL0_2L_GO_BLEND
+ },
+ {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ MT8186_DISP_RDMA1_MOUT_EN, MT8186_RDMA1_MOUT_EN_MASK,
+ MT8186_RDMA1_MOUT_TO_DPI0_SEL
+ },
+ {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ MT8186_DISP_DPI0_SEL_IN, MT8186_DPI0_SEL_IN_MASK,
+ MT8186_DPI0_FROM_RDMA1
+ },
+};
+
+#endif /* __SOC_MEDIATEK_MT8186_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8186-pm-domains.h b/drivers/soc/mediatek/mt8186-pm-domains.h
new file mode 100644
index 000000000000..bf2dd0cdc3a8
--- /dev/null
+++ b/drivers/soc/mediatek/mt8186-pm-domains.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef __SOC_MEDIATEK_MT8186_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MT8186_PM_DOMAINS_H
+
+#include "mtk-pm-domains.h"
+#include <dt-bindings/power/mt8186-power.h>
+
+/*
+ * MT8186 power domain support
+ */
+
+static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = {
+ [MT8186_POWER_DOMAIN_MFG0] = {
+ .name = "mfg0",
+ .sta_mask = BIT(2),
+ .ctl_offs = 0x308,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY,
+ },
+ [MT8186_POWER_DOMAIN_MFG1] = {
+ .name = "mfg1",
+ .sta_mask = BIT(3),
+ .ctl_offs = 0x30c,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP1,
+ MT8186_TOP_AXI_PROT_EN_1_SET,
+ MT8186_TOP_AXI_PROT_EN_1_CLR,
+ MT8186_TOP_AXI_PROT_EN_1_STA),
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_MFG1_STEP2,
+ MT8186_TOP_AXI_PROT_EN_SET,
+ MT8186_TOP_AXI_PROT_EN_CLR,
+ MT8186_TOP_AXI_PROT_EN_STA),
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_MFG1_STEP3,
+ MT8186_TOP_AXI_PROT_EN_SET,
+ MT8186_TOP_AXI_PROT_EN_CLR,
+ MT8186_TOP_AXI_PROT_EN_STA),
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP4,
+ MT8186_TOP_AXI_PROT_EN_1_SET,
+ MT8186_TOP_AXI_PROT_EN_1_CLR,
+ MT8186_TOP_AXI_PROT_EN_1_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8186_POWER_DOMAIN_MFG2] = {
+ .name = "mfg2",
+ .sta_mask = BIT(4),
+ .ctl_offs = 0x310,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8186_POWER_DOMAIN_MFG3] = {
+ .name = "mfg3",
+ .sta_mask = BIT(5),
+ .ctl_offs = 0x314,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8186_POWER_DOMAIN_SSUSB] = {
+ .name = "ssusb",
+ .sta_mask = BIT(20),
+ .ctl_offs = 0x9F0,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8186_POWER_DOMAIN_SSUSB_P1] = {
+ .name = "ssusb_p1",
+ .sta_mask = BIT(19),
+ .ctl_offs = 0x9F4,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8186_POWER_DOMAIN_DIS] = {
+ .name = "dis",
+ .sta_mask = BIT(21),
+ .ctl_offs = 0x354,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_DIS_STEP1,
+ MT8186_TOP_AXI_PROT_EN_1_SET,
+ MT8186_TOP_AXI_PROT_EN_1_CLR,
+ MT8186_TOP_AXI_PROT_EN_1_STA),
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_DIS_STEP2,
+ MT8186_TOP_AXI_PROT_EN_SET,
+ MT8186_TOP_AXI_PROT_EN_CLR,
+ MT8186_TOP_AXI_PROT_EN_STA),
+ },
+ },
+ [MT8186_POWER_DOMAIN_IMG] = {
+ .name = "img",
+ .sta_mask = BIT(13),
+ .ctl_offs = 0x334,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_IMG_STEP1,
+ MT8186_TOP_AXI_PROT_EN_1_SET,
+ MT8186_TOP_AXI_PROT_EN_1_CLR,
+ MT8186_TOP_AXI_PROT_EN_1_STA),
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_IMG_STEP2,
+ MT8186_TOP_AXI_PROT_EN_1_SET,
+ MT8186_TOP_AXI_PROT_EN_1_CLR,
+ MT8186_TOP_AXI_PROT_EN_1_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8186_POWER_DOMAIN_IMG2] = {
+ .name = "img2",
+ .sta_mask = BIT(14),
+ .ctl_offs = 0x338,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8186_POWER_DOMAIN_IPE] = {
+ .name = "ipe",
+ .sta_mask = BIT(15),
+ .ctl_offs = 0x33C,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_IPE_STEP1,
+ MT8186_TOP_AXI_PROT_EN_1_SET,
+ MT8186_TOP_AXI_PROT_EN_1_CLR,
+ MT8186_TOP_AXI_PROT_EN_1_STA),
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_IPE_STEP2,
+ MT8186_TOP_AXI_PROT_EN_1_SET,
+ MT8186_TOP_AXI_PROT_EN_1_CLR,
+ MT8186_TOP_AXI_PROT_EN_1_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8186_POWER_DOMAIN_CAM] = {
+ .name = "cam",
+ .sta_mask = BIT(23),
+ .ctl_offs = 0x35C,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_CAM_STEP1,
+ MT8186_TOP_AXI_PROT_EN_1_SET,
+ MT8186_TOP_AXI_PROT_EN_1_CLR,
+ MT8186_TOP_AXI_PROT_EN_1_STA),
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_CAM_STEP2,
+ MT8186_TOP_AXI_PROT_EN_1_SET,
+ MT8186_TOP_AXI_PROT_EN_1_CLR,
+ MT8186_TOP_AXI_PROT_EN_1_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8186_POWER_DOMAIN_CAM_RAWA] = {
+ .name = "cam_rawa",
+ .sta_mask = BIT(24),
+ .ctl_offs = 0x360,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8186_POWER_DOMAIN_CAM_RAWB] = {
+ .name = "cam_rawb",
+ .sta_mask = BIT(25),
+ .ctl_offs = 0x364,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8186_POWER_DOMAIN_VENC] = {
+ .name = "venc",
+ .sta_mask = BIT(18),
+ .ctl_offs = 0x348,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_VENC_STEP1,
+ MT8186_TOP_AXI_PROT_EN_1_SET,
+ MT8186_TOP_AXI_PROT_EN_1_CLR,
+ MT8186_TOP_AXI_PROT_EN_1_STA),
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_VENC_STEP2,
+ MT8186_TOP_AXI_PROT_EN_1_SET,
+ MT8186_TOP_AXI_PROT_EN_1_CLR,
+ MT8186_TOP_AXI_PROT_EN_1_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8186_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
+ .sta_mask = BIT(16),
+ .ctl_offs = 0x340,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP1,
+ MT8186_TOP_AXI_PROT_EN_1_SET,
+ MT8186_TOP_AXI_PROT_EN_1_CLR,
+ MT8186_TOP_AXI_PROT_EN_1_STA),
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP2,
+ MT8186_TOP_AXI_PROT_EN_1_SET,
+ MT8186_TOP_AXI_PROT_EN_1_CLR,
+ MT8186_TOP_AXI_PROT_EN_1_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8186_POWER_DOMAIN_WPE] = {
+ .name = "wpe",
+ .sta_mask = BIT(0),
+ .ctl_offs = 0x3F8,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_2_WPE_STEP1,
+ MT8186_TOP_AXI_PROT_EN_2_SET,
+ MT8186_TOP_AXI_PROT_EN_2_CLR,
+ MT8186_TOP_AXI_PROT_EN_2_STA),
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_2_WPE_STEP2,
+ MT8186_TOP_AXI_PROT_EN_2_SET,
+ MT8186_TOP_AXI_PROT_EN_2_CLR,
+ MT8186_TOP_AXI_PROT_EN_2_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8186_POWER_DOMAIN_CONN_ON] = {
+ .name = "conn_on",
+ .sta_mask = BIT(1),
+ .ctl_offs = 0x304,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .bp_infracfg = {
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_CONN_ON_STEP1,
+ MT8186_TOP_AXI_PROT_EN_1_SET,
+ MT8186_TOP_AXI_PROT_EN_1_CLR,
+ MT8186_TOP_AXI_PROT_EN_1_STA),
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP2,
+ MT8186_TOP_AXI_PROT_EN_SET,
+ MT8186_TOP_AXI_PROT_EN_CLR,
+ MT8186_TOP_AXI_PROT_EN_STA),
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP3,
+ MT8186_TOP_AXI_PROT_EN_SET,
+ MT8186_TOP_AXI_PROT_EN_CLR,
+ MT8186_TOP_AXI_PROT_EN_STA),
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP4,
+ MT8186_TOP_AXI_PROT_EN_SET,
+ MT8186_TOP_AXI_PROT_EN_CLR,
+ MT8186_TOP_AXI_PROT_EN_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8186_POWER_DOMAIN_CSIRX_TOP] = {
+ .name = "csirx_top",
+ .sta_mask = BIT(6),
+ .ctl_offs = 0x318,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8186_POWER_DOMAIN_ADSP_AO] = {
+ .name = "adsp_ao",
+ .sta_mask = BIT(17),
+ .ctl_offs = 0x9FC,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8186_POWER_DOMAIN_ADSP_INFRA] = {
+ .name = "adsp_infra",
+ .sta_mask = BIT(10),
+ .ctl_offs = 0x9F8,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8186_POWER_DOMAIN_ADSP_TOP] = {
+ .name = "adsp_top",
+ .sta_mask = BIT(31),
+ .ctl_offs = 0x3E4,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP1,
+ MT8186_TOP_AXI_PROT_EN_3_SET,
+ MT8186_TOP_AXI_PROT_EN_3_CLR,
+ MT8186_TOP_AXI_PROT_EN_3_STA),
+ BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP2,
+ MT8186_TOP_AXI_PROT_EN_3_SET,
+ MT8186_TOP_AXI_PROT_EN_3_CLR,
+ MT8186_TOP_AXI_PROT_EN_3_STA),
+ },
+ .caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP,
+ },
+};
+
+static const struct scpsys_soc_data mt8186_scpsys_data = {
+ .domains_data = scpsys_domain_data_mt8186,
+ .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8186),
+};
+
+#endif /* __SOC_MEDIATEK_MT8186_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mt8192-mmsys.h b/drivers/soc/mediatek/mt8192-mmsys.h
index 6f0a57044a7b..6aae0b12b6ff 100644
--- a/drivers/soc/mediatek/mt8192-mmsys.h
+++ b/drivers/soc/mediatek/mt8192-mmsys.h
@@ -53,7 +53,8 @@ static const struct mtk_mmsys_routes mmsys_mt8192_routing_table[] = {
MT8192_AAL0_SEL_IN_CCORR0
}, {
DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
- MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0
+ MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0,
+ MT8192_DSI0_SEL_IN_DITHER0
}, {
DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0,
diff --git a/drivers/soc/mediatek/mt8192-pm-domains.h b/drivers/soc/mediatek/mt8192-pm-domains.h
index 543dda70de01..558c4ee4784a 100644
--- a/drivers/soc/mediatek/mt8192-pm-domains.h
+++ b/drivers/soc/mediatek/mt8192-pm-domains.h
@@ -15,6 +15,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "audio",
.sta_mask = BIT(21),
.ctl_offs = 0x0354,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.bp_infracfg = {
@@ -28,6 +30,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "conn",
.sta_mask = PWR_STATUS_CONN,
.ctl_offs = 0x0304,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = 0,
.sram_pdn_ack_bits = 0,
.bp_infracfg = {
@@ -50,6 +54,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "mfg0",
.sta_mask = BIT(2),
.ctl_offs = 0x0308,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
@@ -57,6 +63,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "mfg1",
.sta_mask = BIT(3),
.ctl_offs = 0x030c,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.bp_infracfg = {
@@ -82,6 +90,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "mfg2",
.sta_mask = BIT(4),
.ctl_offs = 0x0310,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
@@ -89,6 +99,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "mfg3",
.sta_mask = BIT(5),
.ctl_offs = 0x0314,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
@@ -96,6 +108,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "mfg4",
.sta_mask = BIT(6),
.ctl_offs = 0x0318,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
@@ -103,6 +117,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "mfg5",
.sta_mask = BIT(7),
.ctl_offs = 0x031c,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
@@ -110,6 +126,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "mfg6",
.sta_mask = BIT(8),
.ctl_offs = 0x0320,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
@@ -117,6 +135,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "disp",
.sta_mask = BIT(20),
.ctl_offs = 0x0350,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.bp_infracfg = {
@@ -146,6 +166,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "ipe",
.sta_mask = BIT(14),
.ctl_offs = 0x0338,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.bp_infracfg = {
@@ -163,6 +185,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "isp",
.sta_mask = BIT(12),
.ctl_offs = 0x0330,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.bp_infracfg = {
@@ -180,6 +204,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "isp2",
.sta_mask = BIT(13),
.ctl_offs = 0x0334,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.bp_infracfg = {
@@ -197,6 +223,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "mdp",
.sta_mask = BIT(19),
.ctl_offs = 0x034c,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.bp_infracfg = {
@@ -214,6 +242,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "venc",
.sta_mask = BIT(17),
.ctl_offs = 0x0344,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.bp_infracfg = {
@@ -231,6 +261,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "vdec",
.sta_mask = BIT(15),
.ctl_offs = 0x033c,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.bp_infracfg = {
@@ -248,6 +280,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "vdec2",
.sta_mask = BIT(16),
.ctl_offs = 0x0340,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
@@ -255,6 +289,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "cam",
.sta_mask = BIT(23),
.ctl_offs = 0x035c,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.bp_infracfg = {
@@ -284,6 +320,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "cam_rawa",
.sta_mask = BIT(24),
.ctl_offs = 0x0360,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
@@ -291,6 +329,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "cam_rawb",
.sta_mask = BIT(25),
.ctl_offs = 0x0364,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
@@ -298,6 +338,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.name = "cam_rawc",
.sta_mask = BIT(26),
.ctl_offs = 0x0368,
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
@@ -306,8 +348,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
static const struct scpsys_soc_data mt8192_scpsys_data = {
.domains_data = scpsys_domain_data_mt8192,
.num_domains = ARRAY_SIZE(scpsys_domain_data_mt8192),
- .pwr_sta_offs = 0x016c,
- .pwr_sta2nd_offs = 0x0170,
};
#endif /* __SOC_MEDIATEK_MT8192_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mt8195-pm-domains.h b/drivers/soc/mediatek/mt8195-pm-domains.h
new file mode 100644
index 000000000000..938f4d51f5ae
--- /dev/null
+++ b/drivers/soc/mediatek/mt8195-pm-domains.h
@@ -0,0 +1,613 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef __SOC_MEDIATEK_MT8195_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MT8195_PM_DOMAINS_H
+
+#include "mtk-pm-domains.h"
+#include <dt-bindings/power/mt8195-power.h>
+
+/*
+ * MT8195 power domain support
+ */
+
+static const struct scpsys_domain_data scpsys_domain_data_mt8195[] = {
+ [MT8195_POWER_DOMAIN_PCIE_MAC_P0] = {
+ .name = "pcie_mac_p0",
+ .sta_mask = BIT(11),
+ .ctl_offs = 0x328,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P0,
+ MT8195_TOP_AXI_PROT_EN_VDNR_SET,
+ MT8195_TOP_AXI_PROT_EN_VDNR_CLR,
+ MT8195_TOP_AXI_PROT_EN_VDNR_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P0,
+ MT8195_TOP_AXI_PROT_EN_VDNR_1_SET,
+ MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR,
+ MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1),
+ },
+ },
+ [MT8195_POWER_DOMAIN_PCIE_MAC_P1] = {
+ .name = "pcie_mac_p1",
+ .sta_mask = BIT(12),
+ .ctl_offs = 0x32C,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P1,
+ MT8195_TOP_AXI_PROT_EN_VDNR_SET,
+ MT8195_TOP_AXI_PROT_EN_VDNR_CLR,
+ MT8195_TOP_AXI_PROT_EN_VDNR_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P1,
+ MT8195_TOP_AXI_PROT_EN_VDNR_1_SET,
+ MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR,
+ MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1),
+ },
+ },
+ [MT8195_POWER_DOMAIN_PCIE_PHY] = {
+ .name = "pcie_phy",
+ .sta_mask = BIT(13),
+ .ctl_offs = 0x330,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8195_POWER_DOMAIN_SSUSB_PCIE_PHY] = {
+ .name = "ssusb_pcie_phy",
+ .sta_mask = BIT(14),
+ .ctl_offs = 0x334,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8195_POWER_DOMAIN_CSI_RX_TOP] = {
+ .name = "csi_rx_top",
+ .sta_mask = BIT(18),
+ .ctl_offs = 0x3C4,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_ETHER] = {
+ .name = "ether",
+ .sta_mask = BIT(3),
+ .ctl_offs = 0x344,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8195_POWER_DOMAIN_ADSP] = {
+ .name = "adsp",
+ .sta_mask = BIT(10),
+ .ctl_offs = 0x360,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_2_ADSP,
+ MT8195_TOP_AXI_PROT_EN_2_SET,
+ MT8195_TOP_AXI_PROT_EN_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_2_STA1),
+ },
+ .caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8195_POWER_DOMAIN_AUDIO] = {
+ .name = "audio",
+ .sta_mask = BIT(8),
+ .ctl_offs = 0x358,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_2_AUDIO,
+ MT8195_TOP_AXI_PROT_EN_2_SET,
+ MT8195_TOP_AXI_PROT_EN_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_2_STA1),
+ },
+ },
+ [MT8195_POWER_DOMAIN_MFG0] = {
+ .name = "mfg0",
+ .sta_mask = BIT(1),
+ .ctl_offs = 0x300,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY,
+ },
+ [MT8195_POWER_DOMAIN_MFG1] = {
+ .name = "mfg1",
+ .sta_mask = BIT(2),
+ .ctl_offs = 0x304,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MFG1,
+ MT8195_TOP_AXI_PROT_EN_SET,
+ MT8195_TOP_AXI_PROT_EN_CLR,
+ MT8195_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_2_MFG1,
+ MT8195_TOP_AXI_PROT_EN_2_SET,
+ MT8195_TOP_AXI_PROT_EN_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_2_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_1_MFG1,
+ MT8195_TOP_AXI_PROT_EN_1_SET,
+ MT8195_TOP_AXI_PROT_EN_1_CLR,
+ MT8195_TOP_AXI_PROT_EN_1_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_2_MFG1_2ND,
+ MT8195_TOP_AXI_PROT_EN_2_SET,
+ MT8195_TOP_AXI_PROT_EN_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_2_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MFG1_2ND,
+ MT8195_TOP_AXI_PROT_EN_SET,
+ MT8195_TOP_AXI_PROT_EN_CLR,
+ MT8195_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MFG1,
+ MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET,
+ MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR,
+ MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_MFG2] = {
+ .name = "mfg2",
+ .sta_mask = BIT(3),
+ .ctl_offs = 0x308,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_MFG3] = {
+ .name = "mfg3",
+ .sta_mask = BIT(4),
+ .ctl_offs = 0x30C,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_MFG4] = {
+ .name = "mfg4",
+ .sta_mask = BIT(5),
+ .ctl_offs = 0x310,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_MFG5] = {
+ .name = "mfg5",
+ .sta_mask = BIT(6),
+ .ctl_offs = 0x314,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_MFG6] = {
+ .name = "mfg6",
+ .sta_mask = BIT(7),
+ .ctl_offs = 0x318,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_VPPSYS0] = {
+ .name = "vppsys0",
+ .sta_mask = BIT(11),
+ .ctl_offs = 0x364,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VPPSYS0,
+ MT8195_TOP_AXI_PROT_EN_SET,
+ MT8195_TOP_AXI_PROT_EN_CLR,
+ MT8195_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0,
+ MT8195_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VPPSYS0_2ND,
+ MT8195_TOP_AXI_PROT_EN_SET,
+ MT8195_TOP_AXI_PROT_EN_CLR,
+ MT8195_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0_2ND,
+ MT8195_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VPPSYS0,
+ MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET,
+ MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR,
+ MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1),
+ },
+ },
+ [MT8195_POWER_DOMAIN_VDOSYS0] = {
+ .name = "vdosys0",
+ .sta_mask = BIT(13),
+ .ctl_offs = 0x36C,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDOSYS0,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDOSYS0,
+ MT8195_TOP_AXI_PROT_EN_SET,
+ MT8195_TOP_AXI_PROT_EN_CLR,
+ MT8195_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VDOSYS0,
+ MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET,
+ MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR,
+ MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1),
+ },
+ },
+ [MT8195_POWER_DOMAIN_VPPSYS1] = {
+ .name = "vppsys1",
+ .sta_mask = BIT(12),
+ .ctl_offs = 0x368,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1_2ND,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS1,
+ MT8195_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ },
+ [MT8195_POWER_DOMAIN_VDOSYS1] = {
+ .name = "vdosys1",
+ .sta_mask = BIT(14),
+ .ctl_offs = 0x370,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1_2ND,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VDOSYS1,
+ MT8195_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ },
+ [MT8195_POWER_DOMAIN_DP_TX] = {
+ .name = "dp_tx",
+ .sta_mask = BIT(16),
+ .ctl_offs = 0x378,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_1_DP_TX,
+ MT8195_TOP_AXI_PROT_EN_VDNR_1_SET,
+ MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR,
+ MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_EPD_TX] = {
+ .name = "epd_tx",
+ .sta_mask = BIT(17),
+ .ctl_offs = 0x37C,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_1_EPD_TX,
+ MT8195_TOP_AXI_PROT_EN_VDNR_1_SET,
+ MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR,
+ MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_HDMI_TX] = {
+ .name = "hdmi_tx",
+ .sta_mask = BIT(18),
+ .ctl_offs = 0x380,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8195_POWER_DOMAIN_WPESYS] = {
+ .name = "wpesys",
+ .sta_mask = BIT(15),
+ .ctl_offs = 0x374,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS,
+ MT8195_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_WPESYS,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS_2ND,
+ MT8195_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ },
+ [MT8195_POWER_DOMAIN_VDEC0] = {
+ .name = "vdec0",
+ .sta_mask = BIT(20),
+ .ctl_offs = 0x388,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDEC0,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0,
+ MT8195_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDEC0_2ND,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0_2ND,
+ MT8195_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_VDEC1] = {
+ .name = "vdec1",
+ .sta_mask = BIT(21),
+ .ctl_offs = 0x38C,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDEC1,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDEC1_2ND,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_VDEC2] = {
+ .name = "vdec2",
+ .sta_mask = BIT(22),
+ .ctl_offs = 0x390,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2,
+ MT8195_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2_2ND,
+ MT8195_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_VENC] = {
+ .name = "venc",
+ .sta_mask = BIT(23),
+ .ctl_offs = 0x394,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VENC,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VENC_2ND,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VENC,
+ MT8195_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_VENC_CORE1] = {
+ .name = "venc_core1",
+ .sta_mask = BIT(24),
+ .ctl_offs = 0x398,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VENC_CORE1,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VENC_CORE1,
+ MT8195_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_IMG] = {
+ .name = "img",
+ .sta_mask = BIT(29),
+ .ctl_offs = 0x3AC,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_IMG,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_IMG_2ND,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_DIP] = {
+ .name = "dip",
+ .sta_mask = BIT(30),
+ .ctl_offs = 0x3B0,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_IPE] = {
+ .name = "ipe",
+ .sta_mask = BIT(31),
+ .ctl_offs = 0x3B4,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_IPE,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_IPE,
+ MT8195_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_CAM] = {
+ .name = "cam",
+ .sta_mask = BIT(25),
+ .ctl_offs = 0x39C,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_2_CAM,
+ MT8195_TOP_AXI_PROT_EN_2_SET,
+ MT8195_TOP_AXI_PROT_EN_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_2_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_CAM,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_1_CAM,
+ MT8195_TOP_AXI_PROT_EN_1_SET,
+ MT8195_TOP_AXI_PROT_EN_1_CLR,
+ MT8195_TOP_AXI_PROT_EN_1_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_CAM_2ND,
+ MT8195_TOP_AXI_PROT_EN_MM_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_CAM,
+ MT8195_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8195_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8195_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_CAM_RAWA] = {
+ .name = "cam_rawa",
+ .sta_mask = BIT(26),
+ .ctl_offs = 0x3A0,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_CAM_RAWB] = {
+ .name = "cam_rawb",
+ .sta_mask = BIT(27),
+ .ctl_offs = 0x3A4,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8195_POWER_DOMAIN_CAM_MRAW] = {
+ .name = "cam_mraw",
+ .sta_mask = BIT(28),
+ .ctl_offs = 0x3A8,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+};
+
+static const struct scpsys_soc_data mt8195_scpsys_data = {
+ .domains_data = scpsys_domain_data_mt8195,
+ .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8195),
+};
+
+#endif /* __SOC_MEDIATEK_MT8195_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mtk-infracfg.c b/drivers/soc/mediatek/mtk-infracfg.c
index 0590b68e0d78..2acf19676af2 100644
--- a/drivers/soc/mediatek/mtk-infracfg.c
+++ b/drivers/soc/mediatek/mtk-infracfg.c
@@ -6,6 +6,7 @@
#include <linux/export.h>
#include <linux/jiffies.h>
#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#include <linux/soc/mediatek/infracfg.h>
#include <asm/processor.h>
@@ -72,3 +73,21 @@ int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask,
return ret;
}
+
+static int __init mtk_infracfg_init(void)
+{
+ struct regmap *infracfg;
+
+ /*
+ * MT8192 has an experimental path to route GPU traffic to the DSU's
+ * Accelerator Coherency Port, which is inadvertently enabled by
+ * default. It turns out not to work, so disable it to prevent spurious
+ * GPU faults.
+ */
+ infracfg = syscon_regmap_lookup_by_compatible("mediatek,mt8192-infracfg");
+ if (!IS_ERR(infracfg))
+ regmap_set_bits(infracfg, MT8192_INFRA_CTRL,
+ MT8192_INFRA_CTRL_DISABLE_MFG2ACP);
+ return 0;
+}
+postcore_initcall(mtk_infracfg_init);
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
index 1e448f1ffefb..4fc4c2c9ea20 100644
--- a/drivers/soc/mediatek/mtk-mmsys.c
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -15,6 +15,7 @@
#include "mtk-mmsys.h"
#include "mt8167-mmsys.h"
#include "mt8183-mmsys.h"
+#include "mt8186-mmsys.h"
#include "mt8192-mmsys.h"
#include "mt8365-mmsys.h"
@@ -48,12 +49,21 @@ static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
.clk_driver = "clk-mt8173-mm",
.routes = mmsys_default_routing_table,
.num_routes = ARRAY_SIZE(mmsys_default_routing_table),
+ .sw0_rst_offset = MT8183_MMSYS_SW0_RST_B,
};
static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
.clk_driver = "clk-mt8183-mm",
.routes = mmsys_mt8183_routing_table,
.num_routes = ARRAY_SIZE(mmsys_mt8183_routing_table),
+ .sw0_rst_offset = MT8183_MMSYS_SW0_RST_B,
+};
+
+static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = {
+ .clk_driver = "clk-mt8186-mm",
+ .routes = mmsys_mt8186_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_mt8186_routing_table),
+ .sw0_rst_offset = MT8186_MMSYS_SW0_RST_B,
};
static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
@@ -121,14 +131,14 @@ static int mtk_mmsys_reset_update(struct reset_controller_dev *rcdev, unsigned l
spin_lock_irqsave(&mmsys->lock, flags);
- reg = readl_relaxed(mmsys->regs + MMSYS_SW0_RST_B);
+ reg = readl_relaxed(mmsys->regs + mmsys->data->sw0_rst_offset);
if (assert)
reg &= ~BIT(id);
else
reg |= BIT(id);
- writel_relaxed(reg, mmsys->regs + MMSYS_SW0_RST_B);
+ writel_relaxed(reg, mmsys->regs + mmsys->data->sw0_rst_offset);
spin_unlock_irqrestore(&mmsys->lock, flags);
@@ -243,6 +253,10 @@ static const struct of_device_id of_match_mtk_mmsys[] = {
.data = &mt8183_mmsys_driver_data,
},
{
+ .compatible = "mediatek,mt8186-mmsys",
+ .data = &mt8186_mmsys_driver_data,
+ },
+ {
.compatible = "mediatek,mt8192-mmsys",
.data = &mt8192_mmsys_driver_data,
},
diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h
index 8b0ed05117ea..77f37f8c715b 100644
--- a/drivers/soc/mediatek/mtk-mmsys.h
+++ b/drivers/soc/mediatek/mtk-mmsys.h
@@ -78,8 +78,6 @@
#define DSI_SEL_IN_RDMA 0x1
#define DSI_SEL_IN_MASK 0x1
-#define MMSYS_SW0_RST_B 0x140
-
struct mtk_mmsys_routes {
u32 from_comp;
u32 to_comp;
@@ -92,6 +90,7 @@ struct mtk_mmsys_driver_data {
const char *clk_driver;
const struct mtk_mmsys_routes *routes;
const unsigned int num_routes;
+ const u16 sw0_rst_offset;
};
/*
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index 2ca55bb5a8be..aaf8fc1abb43 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -26,6 +26,23 @@
#define INT_MUTEX BIT(1)
+#define MT8186_MUTEX_MOD_DISP_OVL0 0
+#define MT8186_MUTEX_MOD_DISP_OVL0_2L 1
+#define MT8186_MUTEX_MOD_DISP_RDMA0 2
+#define MT8186_MUTEX_MOD_DISP_COLOR0 4
+#define MT8186_MUTEX_MOD_DISP_CCORR0 5
+#define MT8186_MUTEX_MOD_DISP_AAL0 7
+#define MT8186_MUTEX_MOD_DISP_GAMMA0 8
+#define MT8186_MUTEX_MOD_DISP_POSTMASK0 9
+#define MT8186_MUTEX_MOD_DISP_DITHER0 10
+#define MT8186_MUTEX_MOD_DISP_RDMA1 17
+
+#define MT8186_MUTEX_SOF_SINGLE_MODE 0
+#define MT8186_MUTEX_SOF_DSI0 1
+#define MT8186_MUTEX_SOF_DPI0 2
+#define MT8186_MUTEX_EOF_DSI0 (MT8186_MUTEX_SOF_DSI0 << 6)
+#define MT8186_MUTEX_EOF_DPI0 (MT8186_MUTEX_SOF_DPI0 << 6)
+
#define MT8167_MUTEX_MOD_DISP_PWM 1
#define MT8167_MUTEX_MOD_DISP_OVL0 6
#define MT8167_MUTEX_MOD_DISP_OVL1 7
@@ -226,6 +243,19 @@ static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA0] = MT8183_MUTEX_MOD_DISP_WDMA0,
};
+static const unsigned int mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL0] = MT8186_MUTEX_MOD_DISP_AAL0,
+ [DDP_COMPONENT_CCORR] = MT8186_MUTEX_MOD_DISP_CCORR0,
+ [DDP_COMPONENT_COLOR0] = MT8186_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_DITHER] = MT8186_MUTEX_MOD_DISP_DITHER0,
+ [DDP_COMPONENT_GAMMA] = MT8186_MUTEX_MOD_DISP_GAMMA0,
+ [DDP_COMPONENT_OVL0] = MT8186_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL_2L0] = MT8186_MUTEX_MOD_DISP_OVL0_2L,
+ [DDP_COMPONENT_POSTMASK0] = MT8186_MUTEX_MOD_DISP_POSTMASK0,
+ [DDP_COMPONENT_RDMA0] = MT8186_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT8186_MUTEX_MOD_DISP_RDMA1,
+};
+
static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8192_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_CCORR] = MT8192_MUTEX_MOD_DISP_CCORR0,
@@ -264,6 +294,12 @@ static const unsigned int mt8183_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
[MUTEX_SOF_DPI0] = MT8183_MUTEX_SOF_DPI0 | MT8183_MUTEX_EOF_DPI0,
};
+static const unsigned int mt8186_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
+ [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [MUTEX_SOF_DSI0] = MT8186_MUTEX_SOF_DSI0 | MT8186_MUTEX_EOF_DSI0,
+ [MUTEX_SOF_DPI0] = MT8186_MUTEX_SOF_DPI0 | MT8186_MUTEX_EOF_DPI0,
+};
+
static const struct mtk_mutex_data mt2701_mutex_driver_data = {
.mutex_mod = mt2701_mutex_mod,
.mutex_sof = mt2712_mutex_sof,
@@ -301,6 +337,13 @@ static const struct mtk_mutex_data mt8183_mutex_driver_data = {
.no_clk = true,
};
+static const struct mtk_mutex_data mt8186_mutex_driver_data = {
+ .mutex_mod = mt8186_mutex_mod,
+ .mutex_sof = mt8186_mutex_sof,
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+};
+
static const struct mtk_mutex_data mt8192_mutex_driver_data = {
.mutex_mod = mt8192_mutex_mod,
.mutex_sof = mt8183_mutex_sof,
@@ -540,6 +583,8 @@ static const struct of_device_id mutex_driver_dt_match[] = {
.data = &mt8173_mutex_driver_data},
{ .compatible = "mediatek,mt8183-disp-mutex",
.data = &mt8183_mutex_driver_data},
+ { .compatible = "mediatek,mt8186-disp-mutex",
+ .data = &mt8186_mutex_driver_data},
{ .compatible = "mediatek,mt8192-disp-mutex",
.data = &mt8192_mutex_driver_data},
{},
diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
index b762bc40f56b..5ced254b082b 100644
--- a/drivers/soc/mediatek/mtk-pm-domains.c
+++ b/drivers/soc/mediatek/mtk-pm-domains.c
@@ -19,7 +19,9 @@
#include "mt8167-pm-domains.h"
#include "mt8173-pm-domains.h"
#include "mt8183-pm-domains.h"
+#include "mt8186-pm-domains.h"
#include "mt8192-pm-domains.h"
+#include "mt8195-pm-domains.h"
#define MTK_POLL_DELAY_US 10
#define MTK_POLL_TIMEOUT USEC_PER_SEC
@@ -60,10 +62,10 @@ static bool scpsys_domain_is_on(struct scpsys_domain *pd)
struct scpsys *scpsys = pd->scpsys;
u32 status, status2;
- regmap_read(scpsys->base, scpsys->soc_data->pwr_sta_offs, &status);
+ regmap_read(scpsys->base, pd->data->pwr_sta_offs, &status);
status &= pd->data->sta_mask;
- regmap_read(scpsys->base, scpsys->soc_data->pwr_sta2nd_offs, &status2);
+ regmap_read(scpsys->base, pd->data->pwr_sta2nd_offs, &status2);
status2 &= pd->data->sta_mask;
/* A domain is on when both status bits are set. */
@@ -443,6 +445,9 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
pd->genpd.power_off = scpsys_power_off;
pd->genpd.power_on = scpsys_power_on;
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_ACTIVE_WAKEUP))
+ pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
+
if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF))
pm_genpd_init(&pd->genpd, NULL, true);
else
@@ -563,9 +568,17 @@ static const struct of_device_id scpsys_of_match[] = {
.data = &mt8183_scpsys_data,
},
{
+ .compatible = "mediatek,mt8186-power-controller",
+ .data = &mt8186_scpsys_data,
+ },
+ {
.compatible = "mediatek,mt8192-power-controller",
.data = &mt8192_scpsys_data,
},
+ {
+ .compatible = "mediatek,mt8195-power-controller",
+ .data = &mt8195_scpsys_data,
+ },
{ }
};
diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h
index c5ac649ae51b..daa24e890dd4 100644
--- a/drivers/soc/mediatek/mtk-pm-domains.h
+++ b/drivers/soc/mediatek/mtk-pm-domains.h
@@ -37,7 +37,7 @@
#define PWR_STATUS_AUDIO BIT(24)
#define PWR_STATUS_USB BIT(25)
-#define SPM_MAX_BUS_PROT_DATA 5
+#define SPM_MAX_BUS_PROT_DATA 6
#define _BUS_PROT(_mask, _set, _clr, _sta, _update, _ignore) { \
.bus_prot_mask = (_mask), \
@@ -72,8 +72,6 @@ struct scpsys_bus_prot_data {
bool ignore_clr_ack;
};
-#define MAX_SUBSYS_CLKS 10
-
/**
* struct scpsys_domain_data - scp domain data for power on/off flow
* @name: The name of the power domain.
@@ -94,13 +92,13 @@ struct scpsys_domain_data {
u8 caps;
const struct scpsys_bus_prot_data bp_infracfg[SPM_MAX_BUS_PROT_DATA];
const struct scpsys_bus_prot_data bp_smi[SPM_MAX_BUS_PROT_DATA];
+ int pwr_sta_offs;
+ int pwr_sta2nd_offs;
};
struct scpsys_soc_data {
const struct scpsys_domain_data *domains_data;
int num_domains;
- int pwr_sta_offs;
- int pwr_sta2nd_offs;
};
#endif /* __SOC_MEDIATEK_MTK_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index 952bc554f443..bf39a64f3ecc 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -30,6 +30,7 @@
#define PWRAP_GET_WACS_REQ(x) (((x) >> 19) & 0x00000001)
#define PWRAP_STATE_SYNC_IDLE0 BIT(20)
#define PWRAP_STATE_INIT_DONE0 BIT(21)
+#define PWRAP_STATE_INIT_DONE0_MT8186 BIT(22)
#define PWRAP_STATE_INIT_DONE1 BIT(15)
/* macro for WACS FSM */
@@ -77,6 +78,7 @@
#define PWRAP_CAP_INT1_EN BIT(3)
#define PWRAP_CAP_WDT_SRC1 BIT(4)
#define PWRAP_CAP_ARB BIT(5)
+#define PWRAP_CAP_ARB_MT8186 BIT(8)
/* defines for slave device wrapper registers */
enum dew_regs {
@@ -1063,6 +1065,55 @@ static int mt8516_regs[] = {
[PWRAP_MSB_FIRST] = 0x170,
};
+static int mt8186_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_RDDMY] = 0x20,
+ [PWRAP_CSHEXT_WRITE] = 0x24,
+ [PWRAP_CSHEXT_READ] = 0x28,
+ [PWRAP_CSLEXT_WRITE] = 0x2C,
+ [PWRAP_CSLEXT_READ] = 0x30,
+ [PWRAP_EXT_CK_WRITE] = 0x34,
+ [PWRAP_STAUPD_CTRL] = 0x3C,
+ [PWRAP_STAUPD_GRPEN] = 0x40,
+ [PWRAP_EINT_STA0_ADR] = 0x44,
+ [PWRAP_EINT_STA1_ADR] = 0x48,
+ [PWRAP_INT_CLR] = 0xC8,
+ [PWRAP_INT_FLG] = 0xC4,
+ [PWRAP_MAN_EN] = 0x7C,
+ [PWRAP_MAN_CMD] = 0x80,
+ [PWRAP_WACS0_EN] = 0x8C,
+ [PWRAP_WACS1_EN] = 0x94,
+ [PWRAP_WACS2_EN] = 0x9C,
+ [PWRAP_INIT_DONE0] = 0x90,
+ [PWRAP_INIT_DONE1] = 0x98,
+ [PWRAP_INIT_DONE2] = 0xA0,
+ [PWRAP_INT_EN] = 0xBC,
+ [PWRAP_INT1_EN] = 0xCC,
+ [PWRAP_INT1_FLG] = 0xD4,
+ [PWRAP_INT1_CLR] = 0xD8,
+ [PWRAP_TIMER_EN] = 0xF0,
+ [PWRAP_WDT_UNIT] = 0xF8,
+ [PWRAP_WDT_SRC_EN] = 0xFC,
+ [PWRAP_WDT_SRC_EN_1] = 0x100,
+ [PWRAP_WDT_FLG] = 0x104,
+ [PWRAP_SPMINF_STA] = 0x1B4,
+ [PWRAP_DCM_EN] = 0x1EC,
+ [PWRAP_DCM_DBC_PRD] = 0x1F0,
+ [PWRAP_GPSINF_0_STA] = 0x204,
+ [PWRAP_GPSINF_1_STA] = 0x208,
+ [PWRAP_WACS0_CMD] = 0xC00,
+ [PWRAP_WACS0_RDATA] = 0xC04,
+ [PWRAP_WACS0_VLDCLR] = 0xC08,
+ [PWRAP_WACS1_CMD] = 0xC10,
+ [PWRAP_WACS1_RDATA] = 0xC14,
+ [PWRAP_WACS1_VLDCLR] = 0xC18,
+ [PWRAP_WACS2_CMD] = 0xC20,
+ [PWRAP_WACS2_RDATA] = 0xC24,
+ [PWRAP_WACS2_VLDCLR] = 0xC28,
+};
+
enum pmic_type {
PMIC_MT6323,
PMIC_MT6351,
@@ -1083,6 +1134,7 @@ enum pwrap_type {
PWRAP_MT8135,
PWRAP_MT8173,
PWRAP_MT8183,
+ PWRAP_MT8186,
PWRAP_MT8195,
PWRAP_MT8516,
};
@@ -1535,6 +1587,7 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
case PWRAP_MT6779:
case PWRAP_MT6797:
case PWRAP_MT8173:
+ case PWRAP_MT8186:
case PWRAP_MT8516:
pwrap_writel(wrp, 1, PWRAP_CIPHER_EN);
break;
@@ -2069,6 +2122,19 @@ static struct pmic_wrapper_type pwrap_mt8516 = {
.init_soc_specific = NULL,
};
+static struct pmic_wrapper_type pwrap_mt8186 = {
+ .regs = mt8186_regs,
+ .type = PWRAP_MT8186,
+ .arb_en_all = 0xfb27f,
+ .int_en_all = 0xfffffffe, /* disable WatchDog Timeout for bit 1 */
+ .int1_en_all = 0x000017ff, /* disable Matching interrupt for bit 13 */
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = PWRAP_CAP_INT1_EN | PWRAP_CAP_ARB_MT8186,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
static const struct of_device_id of_pwrap_match_tbl[] = {
{
.compatible = "mediatek,mt2701-pwrap",
@@ -2098,6 +2164,9 @@ static const struct of_device_id of_pwrap_match_tbl[] = {
.compatible = "mediatek,mt8183-pwrap",
.data = &pwrap_mt8183,
}, {
+ .compatible = "mediatek,mt8186-pwrap",
+ .data = &pwrap_mt8186,
+ }, {
.compatible = "mediatek,mt8195-pwrap",
.data = &pwrap_mt8195,
}, {
@@ -2209,6 +2278,8 @@ static int pwrap_probe(struct platform_device *pdev)
if (HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB))
mask_done = PWRAP_STATE_INIT_DONE1;
+ else if (HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB_MT8186))
+ mask_done = PWRAP_STATE_INIT_DONE0_MT8186;
else
mask_done = PWRAP_STATE_INIT_DONE0;
diff --git a/drivers/soc/microchip/Kconfig b/drivers/soc/microchip/Kconfig
new file mode 100644
index 000000000000..eb656b33156b
--- /dev/null
+++ b/drivers/soc/microchip/Kconfig
@@ -0,0 +1,10 @@
+config POLARFIRE_SOC_SYS_CTRL
+ tristate "POLARFIRE_SOC_SYS_CTRL"
+ depends on POLARFIRE_SOC_MAILBOX
+ help
+ This driver adds support for the PolarFire SoC (MPFS) system controller.
+
+ To compile this driver as a module, choose M here. the
+ module will be called mpfs_system_controller.
+
+ If unsure, say N.
diff --git a/drivers/soc/microchip/Makefile b/drivers/soc/microchip/Makefile
new file mode 100644
index 000000000000..14489919fe4b
--- /dev/null
+++ b/drivers/soc/microchip/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_POLARFIRE_SOC_SYS_CTRL) += mpfs-sys-controller.o
diff --git a/drivers/soc/microchip/mpfs-sys-controller.c b/drivers/soc/microchip/mpfs-sys-controller.c
new file mode 100644
index 000000000000..6e20207b5756
--- /dev/null
+++ b/drivers/soc/microchip/mpfs-sys-controller.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip PolarFire SoC (MPFS) system controller driver
+ *
+ * Copyright (c) 2020-2021 Microchip Corporation. All rights reserved.
+ *
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of_platform.h>
+#include <linux/mailbox_client.h>
+#include <linux/platform_device.h>
+#include <soc/microchip/mpfs.h>
+
+static DEFINE_MUTEX(transaction_lock);
+
+struct mpfs_sys_controller {
+ struct mbox_client client;
+ struct mbox_chan *chan;
+ struct completion c;
+ struct kref consumers;
+};
+
+int mpfs_blocking_transaction(struct mpfs_sys_controller *sys_controller, struct mpfs_mss_msg *msg)
+{
+ int ret, err;
+
+ err = mutex_lock_interruptible(&transaction_lock);
+ if (err)
+ return err;
+
+ reinit_completion(&sys_controller->c);
+
+ ret = mbox_send_message(sys_controller->chan, msg);
+ if (ret >= 0) {
+ if (wait_for_completion_timeout(&sys_controller->c, HZ)) {
+ ret = 0;
+ } else {
+ ret = -ETIMEDOUT;
+ dev_warn(sys_controller->client.dev,
+ "MPFS sys controller transaction timeout\n");
+ }
+ } else {
+ dev_err(sys_controller->client.dev,
+ "mpfs sys controller transaction returned %d\n", ret);
+ }
+
+ mutex_unlock(&transaction_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(mpfs_blocking_transaction);
+
+static void rx_callback(struct mbox_client *client, void *msg)
+{
+ struct mpfs_sys_controller *sys_controller =
+ container_of(client, struct mpfs_sys_controller, client);
+
+ complete(&sys_controller->c);
+}
+
+static void mpfs_sys_controller_delete(struct kref *kref)
+{
+ struct mpfs_sys_controller *sys_controller = container_of(kref, struct mpfs_sys_controller,
+ consumers);
+
+ mbox_free_channel(sys_controller->chan);
+ kfree(sys_controller);
+}
+
+static void mpfs_sys_controller_put(void *data)
+{
+ struct mpfs_sys_controller *sys_controller = data;
+
+ kref_put(&sys_controller->consumers, mpfs_sys_controller_delete);
+}
+
+static struct platform_device subdevs[] = {
+ {
+ .name = "mpfs-rng",
+ .id = -1,
+ },
+ {
+ .name = "mpfs-generic-service",
+ .id = -1,
+ }
+};
+
+static int mpfs_sys_controller_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mpfs_sys_controller *sys_controller;
+ int i, ret;
+
+ sys_controller = kzalloc(sizeof(*sys_controller), GFP_KERNEL);
+ if (!sys_controller)
+ return -ENOMEM;
+
+ sys_controller->client.dev = dev;
+ sys_controller->client.rx_callback = rx_callback;
+ sys_controller->client.tx_block = 1U;
+
+ sys_controller->chan = mbox_request_channel(&sys_controller->client, 0);
+ if (IS_ERR(sys_controller->chan)) {
+ ret = dev_err_probe(dev, PTR_ERR(sys_controller->chan),
+ "Failed to get mbox channel\n");
+ kfree(sys_controller);
+ return ret;
+ }
+
+ init_completion(&sys_controller->c);
+ kref_init(&sys_controller->consumers);
+
+ platform_set_drvdata(pdev, sys_controller);
+
+ dev_info(&pdev->dev, "Registered MPFS system controller\n");
+
+ for (i = 0; i < ARRAY_SIZE(subdevs); i++) {
+ subdevs[i].dev.parent = dev;
+ if (platform_device_register(&subdevs[i]))
+ dev_warn(dev, "Error registering sub device %s\n", subdevs[i].name);
+ }
+
+ return 0;
+}
+
+static int mpfs_sys_controller_remove(struct platform_device *pdev)
+{
+ struct mpfs_sys_controller *sys_controller = platform_get_drvdata(pdev);
+
+ mpfs_sys_controller_put(sys_controller);
+
+ return 0;
+}
+
+static const struct of_device_id mpfs_sys_controller_of_match[] = {
+ {.compatible = "microchip,mpfs-sys-controller", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpfs_sys_controller_of_match);
+
+struct mpfs_sys_controller *mpfs_sys_controller_get(struct device *dev)
+{
+ const struct of_device_id *match;
+ struct mpfs_sys_controller *sys_controller;
+ int ret;
+
+ if (!dev->parent)
+ goto err_no_device;
+
+ match = of_match_node(mpfs_sys_controller_of_match, dev->parent->of_node);
+ of_node_put(dev->parent->of_node);
+ if (!match)
+ goto err_no_device;
+
+ sys_controller = dev_get_drvdata(dev->parent);
+ if (!sys_controller)
+ goto err_bad_device;
+
+ if (!kref_get_unless_zero(&sys_controller->consumers))
+ goto err_bad_device;
+
+ ret = devm_add_action_or_reset(dev, mpfs_sys_controller_put, sys_controller);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return sys_controller;
+
+err_no_device:
+ dev_dbg(dev, "Parent device was not an MPFS system controller\n");
+ return ERR_PTR(-ENODEV);
+
+err_bad_device:
+ dev_dbg(dev, "MPFS system controller found but could not register as a sub device\n");
+ return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(mpfs_sys_controller_get);
+
+static struct platform_driver mpfs_sys_controller_driver = {
+ .driver = {
+ .name = "mpfs-sys-controller",
+ .of_match_table = mpfs_sys_controller_of_match,
+ },
+ .probe = mpfs_sys_controller_probe,
+ .remove = mpfs_sys_controller_remove,
+};
+module_platform_driver(mpfs_sys_controller_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_DESCRIPTION("MPFS system controller driver");
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 82ca12c9328a..3caabd873322 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -653,7 +653,6 @@ static void apr_remove(struct rpmsg_device *rpdev)
pdr_handle_release(apr->pdr);
device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
- flush_workqueue(apr->rxwq);
destroy_workqueue(apr->rxwq);
}
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index ec52f29c8867..eecafeded56f 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -29,17 +29,13 @@
#define ATTR1_FIXED_SIZE_SHIFT 0x03
#define ATTR1_PRIORITY_SHIFT 0x04
#define ATTR1_MAX_CAP_SHIFT 0x10
-#define ATTR0_RES_WAYS_MASK GENMASK(11, 0)
-#define ATTR0_BONUS_WAYS_MASK GENMASK(27, 16)
+#define ATTR0_RES_WAYS_MASK GENMASK(15, 0)
+#define ATTR0_BONUS_WAYS_MASK GENMASK(31, 16)
#define ATTR0_BONUS_WAYS_SHIFT 0x10
#define LLCC_STATUS_READ_DELAY 100
#define CACHE_LINE_SIZE_SHIFT 6
-#define LLCC_COMMON_HW_INFO 0x00030000
-#define LLCC_MAJOR_VERSION_MASK GENMASK(31, 24)
-
-#define LLCC_COMMON_STATUS0 0x0003000c
#define LLCC_LB_CNT_MASK GENMASK(31, 28)
#define LLCC_LB_CNT_SHIFT 28
@@ -52,9 +48,13 @@
#define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x21f00
#define LLCC_TRP_PCB_ACT 0x21f04
#define LLCC_TRP_WRSC_EN 0x21f20
+#define LLCC_TRP_WRSC_CACHEABLE_EN 0x21f2c
#define BANK_OFFSET_STRIDE 0x80000
+#define LLCC_VERSION_2_0_0_0 0x02000000
+#define LLCC_VERSION_2_1_0_0 0x02010000
+
/**
* struct llcc_slice_config - Data associated with the llcc slice
* @usecase_id: Unique id for the client's use case
@@ -79,6 +79,8 @@
* collapse.
* @activate_on_init: Activate the slice immediately after it is programmed
* @write_scid_en: Bit enables write cache support for a given scid.
+ * @write_scid_cacheable_en: Enables write cache cacheable support for a
+ * given scid (not supported on v2 or older hardware).
*/
struct llcc_slice_config {
u32 usecase_id;
@@ -94,12 +96,19 @@ struct llcc_slice_config {
bool retain_on_pc;
bool activate_on_init;
bool write_scid_en;
+ bool write_scid_cacheable_en;
};
struct qcom_llcc_config {
const struct llcc_slice_config *sct_data;
int size;
bool need_llcc_cfg;
+ const u32 *reg_offset;
+};
+
+enum llcc_reg_offset {
+ LLCC_COMMON_HW_INFO,
+ LLCC_COMMON_STATUS0,
};
static const struct llcc_slice_config sc7180_data[] = {
@@ -217,42 +226,96 @@ static const struct llcc_slice_config sm8350_data[] = {
{ LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
};
+static const struct llcc_slice_config sm8450_data[] = {
+ {LLCC_CPUSS, 1, 3072, 1, 0, 0xFFFF, 0x0, 0, 0, 0, 1, 1, 0, 0 },
+ {LLCC_VIDSC0, 2, 512, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_AUDIO, 6, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
+ {LLCC_MDMHPGRW, 7, 1024, 3, 0, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_MODHW, 9, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_CMPT, 10, 4096, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_GPU, 12, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 1, 0 },
+ {LLCC_MMUHWT, 13, 768, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0 },
+ {LLCC_DISP, 16, 4096, 2, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_MDMPNG, 21, 1024, 1, 1, 0xF000, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
+ {LLCC_CVP, 28, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_MODPE, 29, 64, 1, 1, 0xF000, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0xF0, 1, 0, 0, 1, 0, 0, 0 },
+ {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0 },
+ {LLCC_CVPFW, 17, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_CPUSS1, 3, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_CAMEXP0, 4, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_CPUMTE, 23, 256, 1, 1, 0x0FFF, 0x0, 0, 0, 0, 0, 1, 0, 0 },
+ {LLCC_CPUHWT, 5, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 1, 0, 0 },
+ {LLCC_CAMEXP1, 27, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_AENPU, 8, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+static const u32 llcc_v1_2_reg_offset[] = {
+ [LLCC_COMMON_HW_INFO] = 0x00030000,
+ [LLCC_COMMON_STATUS0] = 0x0003000c,
+};
+
+static const u32 llcc_v21_reg_offset[] = {
+ [LLCC_COMMON_HW_INFO] = 0x00034000,
+ [LLCC_COMMON_STATUS0] = 0x0003400c,
+};
+
static const struct qcom_llcc_config sc7180_cfg = {
.sct_data = sc7180_data,
.size = ARRAY_SIZE(sc7180_data),
.need_llcc_cfg = true,
+ .reg_offset = llcc_v1_2_reg_offset,
};
static const struct qcom_llcc_config sc7280_cfg = {
.sct_data = sc7280_data,
.size = ARRAY_SIZE(sc7280_data),
.need_llcc_cfg = true,
+ .reg_offset = llcc_v1_2_reg_offset,
};
static const struct qcom_llcc_config sdm845_cfg = {
.sct_data = sdm845_data,
.size = ARRAY_SIZE(sdm845_data),
.need_llcc_cfg = false,
+ .reg_offset = llcc_v1_2_reg_offset,
};
static const struct qcom_llcc_config sm6350_cfg = {
.sct_data = sm6350_data,
.size = ARRAY_SIZE(sm6350_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_2_reg_offset,
};
static const struct qcom_llcc_config sm8150_cfg = {
.sct_data = sm8150_data,
.size = ARRAY_SIZE(sm8150_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_2_reg_offset,
};
static const struct qcom_llcc_config sm8250_cfg = {
.sct_data = sm8250_data,
.size = ARRAY_SIZE(sm8250_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_2_reg_offset,
};
static const struct qcom_llcc_config sm8350_cfg = {
.sct_data = sm8350_data,
.size = ARRAY_SIZE(sm8350_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_2_reg_offset,
+};
+
+static const struct qcom_llcc_config sm8450_cfg = {
+ .sct_data = sm8450_data,
+ .size = ARRAY_SIZE(sm8450_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v21_reg_offset,
};
static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
@@ -504,7 +567,7 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config,
return ret;
}
- if (drv_data->major_version == 2) {
+ if (drv_data->version >= LLCC_VERSION_2_0_0_0) {
u32 wren;
wren = config->write_scid_en << config->slice_id;
@@ -514,6 +577,16 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config,
return ret;
}
+ if (drv_data->version >= LLCC_VERSION_2_1_0_0) {
+ u32 wr_cache_en;
+
+ wr_cache_en = config->write_scid_cacheable_en << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_WRSC_CACHEABLE_EN,
+ BIT(config->slice_id), wr_cache_en);
+ if (ret)
+ return ret;
+ }
+
if (config->activate_on_init) {
desc.slice_id = config->slice_id;
ret = llcc_slice_activate(&desc);
@@ -598,15 +671,18 @@ static int qcom_llcc_probe(struct platform_device *pdev)
goto err;
}
- /* Extract major version of the IP */
- ret = regmap_read(drv_data->bcast_regmap, LLCC_COMMON_HW_INFO, &version);
+ cfg = of_device_get_match_data(&pdev->dev);
+
+ /* Extract version of the IP */
+ ret = regmap_read(drv_data->bcast_regmap, cfg->reg_offset[LLCC_COMMON_HW_INFO],
+ &version);
if (ret)
goto err;
- drv_data->major_version = FIELD_GET(LLCC_MAJOR_VERSION_MASK, version);
+ drv_data->version = version;
- ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0,
- &num_banks);
+ ret = regmap_read(drv_data->regmap, cfg->reg_offset[LLCC_COMMON_STATUS0],
+ &num_banks);
if (ret)
goto err;
@@ -614,7 +690,6 @@ static int qcom_llcc_probe(struct platform_device *pdev)
num_banks >>= LLCC_LB_CNT_SHIFT;
drv_data->num_banks = num_banks;
- cfg = of_device_get_match_data(&pdev->dev);
llcc_cfg = cfg->sct_data;
sz = cfg->size;
@@ -632,9 +707,8 @@ static int qcom_llcc_probe(struct platform_device *pdev)
for (i = 0; i < num_banks; i++)
drv_data->offsets[i] = i * BANK_OFFSET_STRIDE;
- drv_data->bitmap = devm_kcalloc(dev,
- BITS_TO_LONGS(drv_data->max_slices), sizeof(unsigned long),
- GFP_KERNEL);
+ drv_data->bitmap = devm_bitmap_zalloc(dev, drv_data->max_slices,
+ GFP_KERNEL);
if (!drv_data->bitmap) {
ret = -ENOMEM;
goto err;
@@ -672,6 +746,7 @@ static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg },
{ .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg },
{ .compatible = "qcom,sm8350-llcc", .data = &sm8350_cfg },
+ { .compatible = "qcom,sm8450-llcc", .data = &sm8450_cfg },
{ }
};
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
index 72fc2b539213..366db493579b 100644
--- a/drivers/soc/qcom/mdt_loader.c
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -31,6 +31,44 @@ static bool mdt_phdr_valid(const struct elf32_phdr *phdr)
return true;
}
+static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs,
+ unsigned int segment, const char *fw_name,
+ struct device *dev)
+{
+ const struct elf32_phdr *phdr = &phdrs[segment];
+ const struct firmware *seg_fw;
+ char *seg_name;
+ ssize_t ret;
+
+ if (strlen(fw_name) < 4)
+ return -EINVAL;
+
+ seg_name = kstrdup(fw_name, GFP_KERNEL);
+ if (!seg_name)
+ return -ENOMEM;
+
+ sprintf(seg_name + strlen(fw_name) - 3, "b%02d", segment);
+ ret = request_firmware_into_buf(&seg_fw, seg_name, dev,
+ ptr, phdr->p_filesz);
+ if (ret) {
+ dev_err(dev, "error %zd loading %s\n", ret, seg_name);
+ kfree(seg_name);
+ return ret;
+ }
+
+ if (seg_fw->size != phdr->p_filesz) {
+ dev_err(dev,
+ "failed to load segment %d from truncated file %s\n",
+ segment, seg_name);
+ ret = -EINVAL;
+ }
+
+ release_firmware(seg_fw);
+ kfree(seg_name);
+
+ return ret;
+}
+
/**
* qcom_mdt_get_size() - acquire size of the memory region needed to load mdt
* @fw: firmware object for the mdt file
@@ -83,13 +121,17 @@ EXPORT_SYMBOL_GPL(qcom_mdt_get_size);
*
* Return: pointer to data, or ERR_PTR()
*/
-void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len)
+void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len,
+ const char *fw_name, struct device *dev)
{
const struct elf32_phdr *phdrs;
const struct elf32_hdr *ehdr;
+ unsigned int hash_segment = 0;
size_t hash_offset;
size_t hash_size;
size_t ehdr_size;
+ unsigned int i;
+ ssize_t ret;
void *data;
ehdr = (struct elf32_hdr *)fw->data;
@@ -101,24 +143,44 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len)
if (phdrs[0].p_type == PT_LOAD)
return ERR_PTR(-EINVAL);
- if ((phdrs[1].p_flags & QCOM_MDT_TYPE_MASK) != QCOM_MDT_TYPE_HASH)
+ for (i = 1; i < ehdr->e_phnum; i++) {
+ if ((phdrs[i].p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) {
+ hash_segment = i;
+ break;
+ }
+ }
+
+ if (!hash_segment) {
+ dev_err(dev, "no hash segment found in %s\n", fw_name);
return ERR_PTR(-EINVAL);
+ }
ehdr_size = phdrs[0].p_filesz;
- hash_size = phdrs[1].p_filesz;
+ hash_size = phdrs[hash_segment].p_filesz;
data = kmalloc(ehdr_size + hash_size, GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
- /* Is the header and hash already packed */
- if (ehdr_size + hash_size == fw->size)
- hash_offset = phdrs[0].p_filesz;
- else
- hash_offset = phdrs[1].p_offset;
-
+ /* Copy ELF header */
memcpy(data, fw->data, ehdr_size);
- memcpy(data + ehdr_size, fw->data + hash_offset, hash_size);
+
+ if (ehdr_size + hash_size == fw->size) {
+ /* Firmware is split and hash is packed following the ELF header */
+ hash_offset = phdrs[0].p_filesz;
+ memcpy(data + ehdr_size, fw->data + hash_offset, hash_size);
+ } else if (phdrs[hash_segment].p_offset + hash_size <= fw->size) {
+ /* Hash is in its own segment, but within the loaded file */
+ hash_offset = phdrs[hash_segment].p_offset;
+ memcpy(data + ehdr_size, fw->data + hash_offset, hash_size);
+ } else {
+ /* Hash is in its own segment, beyond the loaded file */
+ ret = mdt_load_split_segment(data + ehdr_size, phdrs, hash_segment, fw_name, dev);
+ if (ret) {
+ kfree(data);
+ return ERR_PTR(ret);
+ }
+ }
*data_len = ehdr_size + hash_size;
@@ -126,23 +188,85 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len)
}
EXPORT_SYMBOL_GPL(qcom_mdt_read_metadata);
+/**
+ * qcom_mdt_pas_init() - initialize PAS region for firmware loading
+ * @dev: device handle to associate resources with
+ * @fw: firmware object for the mdt file
+ * @firmware: name of the firmware, for construction of segment file names
+ * @pas_id: PAS identifier
+ * @mem_phys: physical address of allocated memory region
+ * @ctx: PAS metadata context, to be released by caller
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
+ const char *fw_name, int pas_id, phys_addr_t mem_phys,
+ struct qcom_scm_pas_metadata *ctx)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ phys_addr_t min_addr = PHYS_ADDR_MAX;
+ phys_addr_t max_addr = 0;
+ size_t metadata_len;
+ void *metadata;
+ int ret;
+ int i;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (!mdt_phdr_valid(phdr))
+ continue;
+
+ if (phdr->p_paddr < min_addr)
+ min_addr = phdr->p_paddr;
+
+ if (phdr->p_paddr + phdr->p_memsz > max_addr)
+ max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
+ }
+
+ metadata = qcom_mdt_read_metadata(fw, &metadata_len, fw_name, dev);
+ if (IS_ERR(metadata)) {
+ ret = PTR_ERR(metadata);
+ dev_err(dev, "error %d reading firmware %s metadata\n", ret, fw_name);
+ goto out;
+ }
+
+ ret = qcom_scm_pas_init_image(pas_id, metadata, metadata_len, ctx);
+ kfree(metadata);
+ if (ret) {
+ /* Invalid firmware metadata */
+ dev_err(dev, "error %d initializing firmware %s\n", ret, fw_name);
+ goto out;
+ }
+
+ ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, max_addr - min_addr);
+ if (ret) {
+ /* Unable to set up relocation */
+ dev_err(dev, "error %d setting up firmware %s\n", ret, fw_name);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_pas_init);
+
static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
- const char *firmware, int pas_id, void *mem_region,
+ const char *fw_name, int pas_id, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base, bool pas_init)
{
const struct elf32_phdr *phdrs;
const struct elf32_phdr *phdr;
const struct elf32_hdr *ehdr;
- const struct firmware *seg_fw;
phys_addr_t mem_reloc;
phys_addr_t min_addr = PHYS_ADDR_MAX;
- phys_addr_t max_addr = 0;
- size_t metadata_len;
- size_t fw_name_len;
ssize_t offset;
- void *metadata;
- char *fw_name;
bool relocate = false;
void *ptr;
int ret = 0;
@@ -154,34 +278,6 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
ehdr = (struct elf32_hdr *)fw->data;
phdrs = (struct elf32_phdr *)(ehdr + 1);
- fw_name_len = strlen(firmware);
- if (fw_name_len <= 4)
- return -EINVAL;
-
- fw_name = kstrdup(firmware, GFP_KERNEL);
- if (!fw_name)
- return -ENOMEM;
-
- if (pas_init) {
- metadata = qcom_mdt_read_metadata(fw, &metadata_len);
- if (IS_ERR(metadata)) {
- ret = PTR_ERR(metadata);
- dev_err(dev, "error %d reading firmware %s metadata\n",
- ret, fw_name);
- goto out;
- }
-
- ret = qcom_scm_pas_init_image(pas_id, metadata, metadata_len);
-
- kfree(metadata);
- if (ret) {
- /* Invalid firmware metadata */
- dev_err(dev, "error %d initializing firmware %s\n",
- ret, fw_name);
- goto out;
- }
- }
-
for (i = 0; i < ehdr->e_phnum; i++) {
phdr = &phdrs[i];
@@ -193,23 +289,9 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
if (phdr->p_paddr < min_addr)
min_addr = phdr->p_paddr;
-
- if (phdr->p_paddr + phdr->p_memsz > max_addr)
- max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
}
if (relocate) {
- if (pas_init) {
- ret = qcom_scm_pas_mem_setup(pas_id, mem_phys,
- max_addr - min_addr);
- if (ret) {
- /* Unable to set up relocation */
- dev_err(dev, "error %d setting up firmware %s\n",
- ret, fw_name);
- goto out;
- }
- }
-
/*
* The image is relocatable, so offset each segment based on
* the lowest segment address.
@@ -246,7 +328,8 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
ptr = mem_region + offset;
- if (phdr->p_filesz && phdr->p_offset < fw->size) {
+ if (phdr->p_filesz && phdr->p_offset < fw->size &&
+ phdr->p_offset + phdr->p_filesz <= fw->size) {
/* Firmware is large enough to be non-split */
if (phdr->p_offset + phdr->p_filesz > fw->size) {
dev_err(dev, "file %s segment %d would be truncated\n",
@@ -258,25 +341,9 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
} else if (phdr->p_filesz) {
/* Firmware not large enough, load split-out segments */
- sprintf(fw_name + fw_name_len - 3, "b%02d", i);
- ret = request_firmware_into_buf(&seg_fw, fw_name, dev,
- ptr, phdr->p_filesz);
- if (ret) {
- dev_err(dev, "error %d loading %s\n",
- ret, fw_name);
- break;
- }
-
- if (seg_fw->size != phdr->p_filesz) {
- dev_err(dev,
- "failed to load segment %d from truncated file %s\n",
- i, fw_name);
- release_firmware(seg_fw);
- ret = -EINVAL;
+ ret = mdt_load_split_segment(ptr, phdrs, i, fw_name, dev);
+ if (ret)
break;
- }
-
- release_firmware(seg_fw);
}
if (phdr->p_memsz > phdr->p_filesz)
@@ -286,9 +353,6 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
if (reloc_base)
*reloc_base = mem_reloc;
-out:
- kfree(fw_name);
-
return ret;
}
@@ -310,6 +374,12 @@ int qcom_mdt_load(struct device *dev, const struct firmware *fw,
phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base)
{
+ int ret;
+
+ ret = qcom_mdt_pas_init(dev, fw, firmware, pas_id, mem_phys, NULL);
+ if (ret)
+ return ret;
+
return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
mem_size, reloc_base, true);
}
diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
index d2dacbbaafbd..97fd24c178f8 100644
--- a/drivers/soc/qcom/ocmem.c
+++ b/drivers/soc/qcom/ocmem.c
@@ -206,6 +206,7 @@ struct ocmem *of_get_ocmem(struct device *dev)
ocmem = platform_get_drvdata(pdev);
if (!ocmem) {
dev_err(dev, "Cannot get ocmem\n");
+ put_device(&pdev->dev);
return ERR_PTR(-ENODEV);
}
return ocmem;
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index cbe5e39fdaeb..a59bb34e5eba 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -451,7 +451,11 @@ struct qmp *qmp_get(struct device *dev)
qmp = platform_get_drvdata(pdev);
- return qmp ? qmp : ERR_PTR(-EPROBE_DEFER);
+ if (!qmp) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+ return qmp;
}
EXPORT_SYMBOL(qmp_get);
@@ -497,7 +501,7 @@ static int qmp_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(&pdev->dev, irq, qmp_intr, IRQF_ONESHOT,
+ ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0,
"aoss-qmp", qmp);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request interrupt\n");
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index 0a8d8d24bfb7..3b5b91621532 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -138,6 +138,22 @@ static const struct rpmpd_desc mdm9607_desc = {
.max_state = RPM_SMD_LEVEL_TURBO,
};
+/* msm8226 RPM Power Domains */
+DEFINE_RPMPD_PAIR(msm8226, vddcx, vddcx_ao, SMPA, CORNER, 1);
+DEFINE_RPMPD_VFC(msm8226, vddcx_vfc, SMPA, 1);
+
+static struct rpmpd *msm8226_rpmpds[] = {
+ [MSM8226_VDDCX] = &msm8226_vddcx,
+ [MSM8226_VDDCX_AO] = &msm8226_vddcx_ao,
+ [MSM8226_VDDCX_VFC] = &msm8226_vddcx_vfc,
+};
+
+static const struct rpmpd_desc msm8226_desc = {
+ .rpmpds = msm8226_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8226_rpmpds),
+ .max_state = MAX_CORNER_RPMPD_STATE,
+};
+
/* msm8939 RPM Power Domains */
DEFINE_RPMPD_PAIR(msm8939, vddmd, vddmd_ao, SMPA, CORNER, 1);
DEFINE_RPMPD_VFC(msm8939, vddmd_vfc, SMPA, 1);
@@ -436,6 +452,7 @@ static const struct rpmpd_desc qcm2290_desc = {
static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc },
+ { .compatible = "qcom,msm8226-rpmpd", .data = &msm8226_desc },
{ .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc },
{ .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc },
{ .compatible = "qcom,msm8953-rpmpd", .data = &msm8953_desc },
@@ -610,6 +627,9 @@ static int rpmpd_probe(struct platform_device *pdev)
data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains),
GFP_KERNEL);
+ if (!data->domains)
+ return -ENOMEM;
+
data->num_domains = num;
for (i = 0; i < num; i++) {
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 6dc0f39c0ec3..8b38d134720a 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -104,6 +104,14 @@ static const char *const pmic_models[] = {
[36] = "PM8009",
[38] = "PM8150C",
[41] = "SMB2351",
+ [47] = "PMK8350",
+ [48] = "PM8350",
+ [49] = "PM8350C",
+ [50] = "PM8350B",
+ [51] = "PMR735A",
+ [52] = "PMR735B",
+ [58] = "PM8450",
+ [65] = "PM8010",
};
#endif /* CONFIG_DEBUG_FS */
@@ -314,10 +322,14 @@ static const struct soc_id soc_id[] = {
{ 422, "IPQ6010" },
{ 425, "SC7180" },
{ 434, "SM6350" },
+ { 439, "SM8350" },
+ { 449, "SC8280XP" },
{ 453, "IPQ6005" },
{ 455, "QRB5165" },
{ 457, "SM8450" },
{ 459, "SM7225" },
+ { 460, "SA8540P" },
+ { 480, "SM8450" },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 2cbd03db2cc7..fdc99a05a7e0 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -40,6 +40,11 @@ config ARCH_RMOBILE
select SYS_SUPPORTS_SH_TMU
select SYSC_RMOBILE
+config ARCH_RZG2L
+ bool
+ select PM
+ select PM_GENERIC_DOMAINS
+
config ARCH_RZN1
bool
select ARM_AMBA
@@ -293,9 +298,16 @@ config ARCH_R8A774B1
config ARCH_R9A07G044
bool "ARM64 Platform support for RZ/G2L"
+ select ARCH_RZG2L
help
This enables support for the Renesas RZ/G2L SoC variants.
+config ARCH_R9A07G054
+ bool "ARM64 Platform support for RZ/V2L"
+ select ARCH_RZG2L
+ help
+ This enables support for the Renesas RZ/V2L SoC variants.
+
endif # ARM64
config RST_RCAR
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 62540ffc581a..92c7b42250ee 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -64,6 +64,10 @@ static const struct renesas_family fam_rzg2l __initconst __maybe_unused = {
.name = "RZ/G2L",
};
+static const struct renesas_family fam_rzv2l __initconst __maybe_unused = {
+ .name = "RZ/V2L",
+};
+
static const struct renesas_family fam_shmobile __initconst __maybe_unused = {
.name = "SH-Mobile",
.reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
@@ -144,6 +148,11 @@ static const struct renesas_soc soc_rz_g2l __initconst __maybe_unused = {
.id = 0x841c447,
};
+static const struct renesas_soc soc_rz_v2l __initconst __maybe_unused = {
+ .family = &fam_rzv2l,
+ .id = 0x8447447,
+};
+
static const struct renesas_soc soc_rcar_m1a __initconst __maybe_unused = {
.family = &fam_rcar_gen1,
};
@@ -334,6 +343,9 @@ static const struct of_device_id renesas_socs[] __initconst = {
#if defined(CONFIG_ARCH_R9A07G044)
{ .compatible = "renesas,r9a07g044", .data = &soc_rz_g2l },
#endif
+#if defined(CONFIG_ARCH_R9A07G054)
+ { .compatible = "renesas,r9a07g054", .data = &soc_rz_v2l },
+#endif
#ifdef CONFIG_ARCH_SH73A0
{ .compatible = "renesas,sh73a0", .data = &soc_shmobile_ag5 },
#endif
@@ -367,6 +379,7 @@ static const struct renesas_id id_prr __initconst = {
static const struct of_device_id renesas_ids[] __initconst = {
{ .compatible = "renesas,bsid", .data = &id_bsid },
{ .compatible = "renesas,r9a07g044-sysc", .data = &id_rzg2l },
+ { .compatible = "renesas,r9a07g054-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,prr", .data = &id_prr },
{ /* sentinel */ }
};
@@ -380,9 +393,11 @@ static int __init renesas_soc_init(void)
const struct renesas_soc *soc;
const struct renesas_id *id;
void __iomem *chipid = NULL;
+ const char *rev_prefix = "";
struct soc_device *soc_dev;
struct device_node *np;
const char *soc_id;
+ int ret;
match = of_match_node(renesas_socs, of_root);
if (!match)
@@ -403,6 +418,17 @@ static int __init renesas_soc_init(void)
chipid = ioremap(family->reg, 4);
}
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ np = of_find_node_by_path("/");
+ of_property_read_string(np, "model", &soc_dev_attr->machine);
+ of_node_put(np);
+
+ soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL);
+ soc_dev_attr->soc_id = kstrdup_const(soc_id, GFP_KERNEL);
+
if (chipid) {
product = readl(chipid + id->offset);
iounmap(chipid);
@@ -417,41 +443,39 @@ static int __init renesas_soc_init(void)
eshi = ((product >> 4) & 0x0f) + 1;
eslo = product & 0xf;
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u",
+ eshi, eslo);
+ } else if (id == &id_rzg2l) {
+ eshi = ((product >> 28) & 0x0f);
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%u",
+ eshi);
+ rev_prefix = "Rev ";
}
if (soc->id &&
((product & id->mask) >> __ffs(id->mask)) != soc->id) {
pr_warn("SoC mismatch (product = 0x%x)\n", product);
- return -ENODEV;
+ ret = -ENODEV;
+ goto free_soc_dev_attr;
}
}
- soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
- if (!soc_dev_attr)
- return -ENOMEM;
-
- np = of_find_node_by_path("/");
- of_property_read_string(np, "model", &soc_dev_attr->machine);
- of_node_put(np);
-
- soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL);
- soc_dev_attr->soc_id = kstrdup_const(soc_id, GFP_KERNEL);
- if (eshi)
- soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u", eshi,
- eslo);
-
- pr_info("Detected Renesas %s %s %s\n", soc_dev_attr->family,
- soc_dev_attr->soc_id, soc_dev_attr->revision ?: "");
+ pr_info("Detected Renesas %s %s %s%s\n", soc_dev_attr->family,
+ soc_dev_attr->soc_id, rev_prefix, soc_dev_attr->revision ?: "");
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
- kfree(soc_dev_attr->revision);
- kfree_const(soc_dev_attr->soc_id);
- kfree_const(soc_dev_attr->family);
- kfree(soc_dev_attr);
- return PTR_ERR(soc_dev);
+ ret = PTR_ERR(soc_dev);
+ goto free_soc_dev_attr;
}
return 0;
+
+free_soc_dev_attr:
+ kfree(soc_dev_attr->revision);
+ kfree_const(soc_dev_attr->soc_id);
+ kfree_const(soc_dev_attr->family);
+ kfree(soc_dev_attr);
+ return ret;
}
early_initcall(renesas_soc_init);
diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig
index 25eb2c1e31bb..156ac0e0c8fe 100644
--- a/drivers/soc/rockchip/Kconfig
+++ b/drivers/soc/rockchip/Kconfig
@@ -34,4 +34,12 @@ config ROCKCHIP_PM_DOMAINS
If unsure, say N.
+config ROCKCHIP_DTPM
+ tristate "Rockchip DTPM hierarchy"
+ depends on DTPM && m
+ help
+ Describe the hierarchy for the Dynamic Thermal Power
+ Management tree on this platform. That will create all the
+ power capping capable devices.
+
endif
diff --git a/drivers/soc/rockchip/Makefile b/drivers/soc/rockchip/Makefile
index 875032f7344e..05f31a4e743c 100644
--- a/drivers/soc/rockchip/Makefile
+++ b/drivers/soc/rockchip/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_ROCKCHIP_GRF) += grf.o
obj-$(CONFIG_ROCKCHIP_IODOMAIN) += io-domain.o
obj-$(CONFIG_ROCKCHIP_PM_DOMAINS) += pm_domains.o
+obj-$(CONFIG_ROCKCHIP_DTPM) += dtpm.o
diff --git a/drivers/soc/rockchip/dtpm.c b/drivers/soc/rockchip/dtpm.c
new file mode 100644
index 000000000000..5a23784b5221
--- /dev/null
+++ b/drivers/soc/rockchip/dtpm.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 Linaro Limited
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * DTPM hierarchy description
+ */
+#include <linux/dtpm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static struct dtpm_node __initdata rk3399_hierarchy[] = {
+ [0]{ .name = "rk3399",
+ .type = DTPM_NODE_VIRTUAL },
+ [1]{ .name = "package",
+ .type = DTPM_NODE_VIRTUAL,
+ .parent = &rk3399_hierarchy[0] },
+ [2]{ .name = "/cpus/cpu@0",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [3]{ .name = "/cpus/cpu@1",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [4]{ .name = "/cpus/cpu@2",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [5]{ .name = "/cpus/cpu@3",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [6]{ .name = "/cpus/cpu@100",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [7]{ .name = "/cpus/cpu@101",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [8]{ .name = "/gpu@ff9a0000",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [9]{ /* sentinel */ }
+};
+
+static struct of_device_id __initdata rockchip_dtpm_match_table[] = {
+ { .compatible = "rockchip,rk3399", .data = rk3399_hierarchy },
+ {},
+};
+
+static int __init rockchip_dtpm_init(void)
+{
+ return dtpm_create_hierarchy(rockchip_dtpm_match_table);
+}
+module_init(rockchip_dtpm_init);
+
+static void __exit rockchip_dtpm_exit(void)
+{
+ return dtpm_destroy_hierarchy();
+}
+module_exit(rockchip_dtpm_exit);
+
+MODULE_SOFTDEP("pre: panfrost cpufreq-dt");
+MODULE_DESCRIPTION("Rockchip DTPM driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:dtpm");
+MODULE_AUTHOR("Daniel Lezcano <daniel.lezcano@kernel.org");
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index 2746d05936d3..0fb3631e7346 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -204,7 +204,7 @@ module_platform_driver(exynos_chipid_driver);
MODULE_DESCRIPTION("Samsung Exynos ChipID controller and ASV driver");
MODULE_AUTHOR("Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>");
-MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_AUTHOR("Pankaj Dubey <pankaj.dubey@samsung.com>");
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index 913103ee5432..aa94fda282f4 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2013-2021, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/clk.h>
@@ -162,6 +162,12 @@ static const struct nvmem_cell_info tegra_fuse_cells[] = {
.bit_offset = 0,
.nbits = 32,
}, {
+ .name = "gcplex-config-fuse",
+ .offset = 0x1c8,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
.name = "tsensor-realignment",
.offset = 0x1fc,
.bytes = 4,
@@ -179,13 +185,25 @@ static const struct nvmem_cell_info tegra_fuse_cells[] = {
.bytes = 4,
.bit_offset = 0,
.nbits = 32,
+ }, {
+ .name = "pdi0",
+ .offset = 0x300,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "pdi1",
+ .offset = 0x304,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
},
};
static void tegra_fuse_restore(void *base)
{
+ fuse->base = (void __iomem *)base;
fuse->clk = NULL;
- fuse->base = base;
}
static int tegra_fuse_probe(struct platform_device *pdev)
@@ -195,7 +213,7 @@ static int tegra_fuse_probe(struct platform_device *pdev)
struct resource *res;
int err;
- err = devm_add_action(&pdev->dev, tegra_fuse_restore, base);
+ err = devm_add_action(&pdev->dev, tegra_fuse_restore, (void __force *)base);
if (err)
return err;
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 5aceacbd8ce0..fdf508e03400 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -3,7 +3,7 @@
* drivers/soc/tegra/pmc.c
*
* Copyright (c) 2010 Google, Inc
- * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Author:
* Colin Cross <ccross@google.com>
@@ -54,6 +54,7 @@
#include <dt-bindings/pinctrl/pinctrl-tegra-io-pad.h>
#include <dt-bindings/gpio/tegra186-gpio.h>
#include <dt-bindings/gpio/tegra194-gpio.h>
+#include <dt-bindings/gpio/tegra234-gpio.h>
#include <dt-bindings/soc/tegra-pmc.h>
#define PMC_CNTRL 0x0
@@ -3066,7 +3067,7 @@ static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
}
static const struct tegra_pmc_soc tegra20_pmc_soc = {
- .supports_core_domain = false,
+ .supports_core_domain = true,
.num_powergates = ARRAY_SIZE(tegra20_powergates),
.powergates = tegra20_powergates,
.num_cpu_powergates = 0,
@@ -3127,7 +3128,7 @@ static const char * const tegra30_reset_sources[] = {
};
static const struct tegra_pmc_soc tegra30_pmc_soc = {
- .supports_core_domain = false,
+ .supports_core_domain = true,
.num_powergates = ARRAY_SIZE(tegra30_powergates),
.powergates = tegra30_powergates,
.num_cpu_powergates = ARRAY_SIZE(tegra30_cpu_powergates),
@@ -3788,6 +3789,11 @@ static const char * const tegra234_reset_sources[] = {
"FUSECRC",
};
+static const struct tegra_wake_event tegra234_wake_events[] = {
+ TEGRA_WAKE_GPIO("power", 29, 1, TEGRA234_AON_GPIO(EE, 4)),
+ TEGRA_WAKE_IRQ("rtc", 73, 10),
+};
+
static const struct tegra_pmc_soc tegra234_pmc_soc = {
.supports_core_domain = false,
.num_powergates = 0,
@@ -3812,8 +3818,8 @@ static const struct tegra_pmc_soc tegra234_pmc_soc = {
.num_reset_sources = ARRAY_SIZE(tegra234_reset_sources),
.reset_levels = tegra186_reset_levels,
.num_reset_levels = ARRAY_SIZE(tegra186_reset_levels),
- .num_wake_events = 0,
- .wake_events = NULL,
+ .num_wake_events = ARRAY_SIZE(tegra234_wake_events),
+ .wake_events = tegra234_wake_events,
.pmc_clks_data = NULL,
.num_pmc_clks = 0,
.has_blink_output = false,
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
index 31ab6c657fec..f7bf18b8229a 100644
--- a/drivers/soc/ti/k3-ringacc.c
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -1402,12 +1402,10 @@ static int k3_ringacc_init(struct platform_device *pdev,
sizeof(*ringacc->rings) *
ringacc->num_rings,
GFP_KERNEL);
- ringacc->rings_inuse = devm_kcalloc(dev,
- BITS_TO_LONGS(ringacc->num_rings),
- sizeof(unsigned long), GFP_KERNEL);
- ringacc->proxy_inuse = devm_kcalloc(dev,
- BITS_TO_LONGS(ringacc->num_proxies),
- sizeof(unsigned long), GFP_KERNEL);
+ ringacc->rings_inuse = devm_bitmap_zalloc(dev, ringacc->num_rings,
+ GFP_KERNEL);
+ ringacc->proxy_inuse = devm_bitmap_zalloc(dev, ringacc->num_proxies,
+ GFP_KERNEL);
if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
return -ENOMEM;
@@ -1483,9 +1481,8 @@ struct k3_ringacc *k3_ringacc_dmarings_init(struct platform_device *pdev,
sizeof(*ringacc->rings) *
ringacc->num_rings * 2,
GFP_KERNEL);
- ringacc->rings_inuse = devm_kcalloc(dev,
- BITS_TO_LONGS(ringacc->num_rings),
- sizeof(unsigned long), GFP_KERNEL);
+ ringacc->rings_inuse = devm_bitmap_zalloc(dev, ringacc->num_rings,
+ GFP_KERNEL);
if (!ringacc->rings || !ringacc->rings_inuse)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index b6b2150aca4e..91f441ee6175 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -42,6 +42,7 @@ static const struct k3_soc_id {
{ 0xBB6D, "J7200" },
{ 0xBB38, "AM64X" },
{ 0xBB75, "J721S2"},
+ { 0xBB7E, "AM62X" },
};
static int
diff --git a/drivers/soc/ti/smartreflex.c b/drivers/soc/ti/smartreflex.c
index b5b2fa538d5c..ad2bb72e640c 100644
--- a/drivers/soc/ti/smartreflex.c
+++ b/drivers/soc/ti/smartreflex.c
@@ -819,7 +819,7 @@ static int omap_sr_probe(struct platform_device *pdev)
{
struct omap_sr *sr_info;
struct omap_sr_data *pdata = pdev->dev.platform_data;
- struct resource *mem, *irq;
+ struct resource *mem;
struct dentry *nvalue_dir;
int i, ret = 0;
@@ -844,7 +844,11 @@ static int omap_sr_probe(struct platform_device *pdev)
if (IS_ERR(sr_info->base))
return PTR_ERR(sr_info->base);
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ ret = platform_get_irq_optional(pdev, 0);
+ if (ret < 0 && ret != -ENXIO)
+ return dev_err_probe(&pdev->dev, ret, "failed to get IRQ resource\n");
+ if (ret > 0)
+ sr_info->irq = ret;
sr_info->fck = devm_clk_get(pdev->dev.parent, "fck");
if (IS_ERR(sr_info->fck))
@@ -870,9 +874,6 @@ static int omap_sr_probe(struct platform_device *pdev)
sr_info->autocomp_active = false;
sr_info->ip_type = pdata->ip_type;
- if (irq)
- sr_info->irq = irq->start;
-
sr_set_clk_length(sr_info);
list_add(&sr_info->node, &sr_list);
@@ -926,7 +927,7 @@ static int omap_sr_probe(struct platform_device *pdev)
}
- return ret;
+ return 0;
err_debugfs:
debugfs_remove_recursive(sr_info->dbg_dir);
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 72386bd393fe..2f03ced0f411 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -450,9 +450,9 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
return PTR_ERR(m3_ipc->ipc_mem_base);
irq = platform_get_irq(pdev, 0);
- if (!irq) {
+ if (irq < 0) {
dev_err(&pdev->dev, "no irq resource\n");
- return -ENXIO;
+ return irq;
}
ret = devm_request_irq(dev, irq, wkup_m3_txev_handler,
diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
index 0ca2a3e3a02e..747983743a14 100644
--- a/drivers/soundwire/dmi-quirks.c
+++ b/drivers/soundwire/dmi-quirks.c
@@ -59,7 +59,7 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Convertible"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Conv"),
},
.driver_data = (void *)intel_tgl_bios,
},
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index b2a8821971e1..31a2cef3790c 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -427,6 +427,45 @@ config SPI_INGENIC
To compile this driver as a module, choose M here: the module
will be called spi-ingenic.
+config SPI_INTEL
+ tristate
+
+config SPI_INTEL_PCI
+ tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
+ depends on PCI
+ depends on X86 || COMPILE_TEST
+ depends on SPI_MEM
+ select SPI_INTEL
+ help
+ This enables PCI support for the Intel PCH/PCU SPI controller in
+ master mode. This controller is present in modern Intel hardware
+ and is used to hold BIOS and other persistent settings. Using
+ this driver it is possible to upgrade BIOS directly from Linux.
+
+ Say N here unless you know what you are doing. Overwriting the
+ SPI flash may render the system unbootable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called spi-intel-pci.
+
+config SPI_INTEL_PLATFORM
+ tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)"
+ depends on X86 || COMPILE_TEST
+ depends on SPI_MEM
+ select SPI_INTEL
+ help
+ This enables platform support for the Intel PCH/PCU SPI
+ controller in master mode. This controller is present in modern
+ Intel hardware and is used to hold BIOS and other persistent
+ settings. Using this driver it is possible to upgrade BIOS
+ directly from Linux.
+
+ Say N here unless you know what you are doing. Overwriting the
+ SPI flash may render the system unbootable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called spi-intel-platform.
+
config SPI_JCORE
tristate "J-Core SPI Master"
depends on OF && (SUPERH || COMPILE_TEST)
@@ -866,6 +905,17 @@ config SPI_SUN6I
help
This enables using the SPI controller on the Allwinner A31 SoCs.
+config SPI_SUNPLUS_SP7021
+ tristate "Sunplus SP7021 SPI controller"
+ depends on SOC_SP7021 || COMPILE_TEST
+ help
+ This enables Sunplus SP7021 SPI controller driver on the SP7021 SoCs.
+ This driver can also be built as a module. If so, the module will be
+ called as spi-sunplus-sp7021.
+
+ If you have a Sunplus SP7021 platform say Y here.
+ If unsure, say N.
+
config SPI_SYNQUACER
tristate "Socionext's SynQuacer HighSpeed SPI controller"
depends on ARCH_SYNQUACER || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index dd7393a6046f..3aa28ed3f761 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -61,6 +61,9 @@ obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
obj-$(CONFIG_SPI_IMX) += spi-imx.o
obj-$(CONFIG_SPI_INGENIC) += spi-ingenic.o
+obj-$(CONFIG_SPI_INTEL) += spi-intel.o
+obj-$(CONFIG_SPI_INTEL_PCI) += spi-intel-pci.o
+obj-$(CONFIG_SPI_INTEL_PLATFORM) += spi-intel-platform.o
obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
obj-$(CONFIG_SPI_JCORE) += spi-jcore.o
obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
@@ -119,6 +122,7 @@ obj-$(CONFIG_SPI_STM32_QSPI) += spi-stm32-qspi.o
obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o
+obj-$(CONFIG_SPI_SUNPLUS_SP7021) += spi-sunplus-sp7021.o
obj-$(CONFIG_SPI_SYNQUACER) += spi-synquacer.o
obj-$(CONFIG_SPI_TEGRA210_QUAD) += spi-tegra210-quad.o
obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index 4b3ac7aceaf6..cba6a4486c24 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -12,12 +12,17 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
+#include <linux/iopoll.h>
#define AMD_SPI_CTRL0_REG 0x00
#define AMD_SPI_EXEC_CMD BIT(16)
#define AMD_SPI_FIFO_CLEAR BIT(20)
#define AMD_SPI_BUSY BIT(31)
+#define AMD_SPI_OPCODE_REG 0x45
+#define AMD_SPI_CMD_TRIGGER_REG 0x47
+#define AMD_SPI_TRIGGER_CMD BIT(7)
+
#define AMD_SPI_OPCODE_MASK 0xFF
#define AMD_SPI_ALT_CS_REG 0x1D
@@ -34,10 +39,15 @@
#define AMD_SPI_XFER_TX 1
#define AMD_SPI_XFER_RX 2
+enum amd_spi_versions {
+ AMD_SPI_V1 = 1, /* AMDI0061 */
+ AMD_SPI_V2, /* AMDI0062 */
+};
+
struct amd_spi {
void __iomem *io_remap_addr;
unsigned long io_base_addr;
- u32 rom_addr;
+ enum amd_spi_versions version;
};
static inline u8 amd_spi_readreg8(struct amd_spi *amd_spi, int idx)
@@ -81,14 +91,29 @@ static void amd_spi_select_chip(struct amd_spi *amd_spi, u8 cs)
amd_spi_setclear_reg8(amd_spi, AMD_SPI_ALT_CS_REG, cs, AMD_SPI_ALT_CS_MASK);
}
+static inline void amd_spi_clear_chip(struct amd_spi *amd_spi, u8 chip_select)
+{
+ amd_spi_writereg8(amd_spi, AMD_SPI_ALT_CS_REG, chip_select & ~AMD_SPI_ALT_CS_MASK);
+}
+
static void amd_spi_clear_fifo_ptr(struct amd_spi *amd_spi)
{
amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_FIFO_CLEAR, AMD_SPI_FIFO_CLEAR);
}
-static void amd_spi_set_opcode(struct amd_spi *amd_spi, u8 cmd_opcode)
+static int amd_spi_set_opcode(struct amd_spi *amd_spi, u8 cmd_opcode)
{
- amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, cmd_opcode, AMD_SPI_OPCODE_MASK);
+ switch (amd_spi->version) {
+ case AMD_SPI_V1:
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, cmd_opcode,
+ AMD_SPI_OPCODE_MASK);
+ return 0;
+ case AMD_SPI_V2:
+ amd_spi_writereg8(amd_spi, AMD_SPI_OPCODE_REG, cmd_opcode);
+ return 0;
+ default:
+ return -ENODEV;
+ }
}
static inline void amd_spi_set_rx_count(struct amd_spi *amd_spi, u8 rx_count)
@@ -103,16 +128,22 @@ static inline void amd_spi_set_tx_count(struct amd_spi *amd_spi, u8 tx_count)
static int amd_spi_busy_wait(struct amd_spi *amd_spi)
{
- int timeout = 100000;
-
- /* poll for SPI bus to become idle */
- while (amd_spi_readreg32(amd_spi, AMD_SPI_CTRL0_REG) & AMD_SPI_BUSY) {
- usleep_range(10, 20);
- if (timeout-- < 0)
- return -ETIMEDOUT;
+ u32 val;
+ int reg;
+
+ switch (amd_spi->version) {
+ case AMD_SPI_V1:
+ reg = AMD_SPI_CTRL0_REG;
+ break;
+ case AMD_SPI_V2:
+ reg = AMD_SPI_STATUS_REG;
+ break;
+ default:
+ return -ENODEV;
}
- return 0;
+ return readl_poll_timeout(amd_spi->io_remap_addr + reg, val,
+ !(val & AMD_SPI_BUSY), 20, 2000000);
}
static int amd_spi_execute_opcode(struct amd_spi *amd_spi)
@@ -123,10 +154,20 @@ static int amd_spi_execute_opcode(struct amd_spi *amd_spi)
if (ret)
return ret;
- /* Set ExecuteOpCode bit in the CTRL0 register */
- amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_EXEC_CMD, AMD_SPI_EXEC_CMD);
-
- return 0;
+ switch (amd_spi->version) {
+ case AMD_SPI_V1:
+ /* Set ExecuteOpCode bit in the CTRL0 register */
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_EXEC_CMD,
+ AMD_SPI_EXEC_CMD);
+ return 0;
+ case AMD_SPI_V2:
+ /* Trigger the command execution */
+ amd_spi_setclear_reg8(amd_spi, AMD_SPI_CMD_TRIGGER_REG,
+ AMD_SPI_TRIGGER_CMD, AMD_SPI_TRIGGER_CMD);
+ return 0;
+ default:
+ return -ENODEV;
+ }
}
static int amd_spi_master_setup(struct spi_device *spi)
@@ -196,6 +237,17 @@ static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
message->actual_length = tx_len + rx_len + 1;
/* complete the transaction */
message->status = 0;
+
+ switch (amd_spi->version) {
+ case AMD_SPI_V1:
+ break;
+ case AMD_SPI_V2:
+ amd_spi_clear_chip(amd_spi, message->spi->chip_select);
+ break;
+ default:
+ return -ENODEV;
+ }
+
spi_finalize_current_message(master);
return 0;
@@ -241,6 +293,8 @@ static int amd_spi_probe(struct platform_device *pdev)
}
dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr);
+ amd_spi->version = (enum amd_spi_versions) device_get_match_data(dev);
+
/* Initialize the spi_master fields */
master->bus_num = 0;
master->num_chipselect = 4;
@@ -266,7 +320,8 @@ err_free_master:
#ifdef CONFIG_ACPI
static const struct acpi_device_id spi_acpi_match[] = {
- { "AMDI0061", 0 },
+ { "AMDI0061", AMD_SPI_V1 },
+ { "AMDI0062", AMD_SPI_V2 },
{},
};
MODULE_DEVICE_TABLE(acpi, spi_acpi_match);
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index d1e287d2d9cd..607e7a49fb89 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/bitops.h>
#include <linux/clk.h>
@@ -133,6 +134,38 @@ static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned int nsecs,
return ath79_spi_rr(sp, AR71XX_SPI_REG_RDS);
}
+static int ath79_exec_mem_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct ath79_spi *sp = ath79_spidev_to_sp(mem->spi);
+
+ /* Ensures that reading is performed on device connected to hardware cs0 */
+ if (mem->spi->chip_select || mem->spi->cs_gpiod)
+ return -ENOTSUPP;
+
+ /* Only use for fast-read op. */
+ if (op->cmd.opcode != 0x0b || op->data.dir != SPI_MEM_DATA_IN ||
+ op->addr.nbytes != 3 || op->dummy.nbytes != 1)
+ return -ENOTSUPP;
+
+ /* disable GPIO mode */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
+
+ memcpy_fromio(op->data.buf.in, sp->base + op->addr.val, op->data.nbytes);
+
+ /* enable GPIO mode */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
+
+ /* restore IOC register */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops ath79_mem_ops = {
+ .exec_op = ath79_exec_mem_op,
+};
+
static int ath79_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
@@ -154,6 +187,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->flags = SPI_MASTER_GPIO_SS;
master->num_chipselect = 3;
+ master->mem_ops = &ath79_mem_ops;
sp->bitbang.master = master;
sp->bitbang.chipselect = ath79_spi_chipselect;
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 7d709a8c833b..e28521922330 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -22,7 +22,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
@@ -445,25 +444,12 @@ static void bcm2835aux_spi_handle_err(struct spi_master *master,
static int bcm2835aux_spi_setup(struct spi_device *spi)
{
- int ret;
-
/* sanity check for native cs */
if (spi->mode & SPI_NO_CS)
return 0;
- if (gpio_is_valid(spi->cs_gpio)) {
- /* with gpio-cs set the GPIO to the correct level
- * and as output (in case the dt has the gpio not configured
- * as output but native cs)
- */
- ret = gpio_direction_output(spi->cs_gpio,
- (spi->mode & SPI_CS_HIGH) ? 0 : 1);
- if (ret)
- dev_err(&spi->dev,
- "could not set gpio %i as output: %i\n",
- spi->cs_gpio, ret);
-
- return ret;
- }
+
+ if (spi->cs_gpiod)
+ return 0;
/* for dt-backwards compatibility: only support native on CS0
* known things not supported with broken native CS:
@@ -519,6 +505,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
master->prepare_message = bcm2835aux_spi_prepare_message;
master->unprepare_message = bcm2835aux_spi_unprepare_message;
master->dev.of_node = pdev->dev.of_node;
+ master->use_gpio_descriptors = true;
bs = spi_master_get_devdata(master);
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
index ae61d72c7d28..267342dfa738 100644
--- a/drivers/spi/spi-bitbang-txrx.h
+++ b/drivers/spi/spi-bitbang-txrx.h
@@ -41,6 +41,8 @@
* chips need ... there may be several reasons you'd need to tweak timings
* in these routines, not just to make it faster or slower to match a
* particular CPU clock rate.
+ *
+ * ToDo: Maybe the bitrev macros can be used to improve the code?
*/
static inline u32
@@ -106,3 +108,67 @@ bitbang_txrx_be_cpha1(struct spi_device *spi,
}
return word;
}
+
+static inline u32
+bitbang_txrx_le_cpha0(struct spi_device *spi,
+ unsigned int nsecs, unsigned int cpol, unsigned int flags,
+ u32 word, u8 bits)
+{
+ /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
+
+ u32 oldbit = !(word & 1);
+ /* clock starts at inactive polarity */
+ for (; likely(bits); bits--) {
+
+ /* setup LSB (to slave) on trailing edge */
+ if ((flags & SPI_MASTER_NO_TX) == 0) {
+ if ((word & 1) != oldbit) {
+ setmosi(spi, word & 1);
+ oldbit = word & 1;
+ }
+ }
+ spidelay(nsecs); /* T(setup) */
+
+ setsck(spi, !cpol);
+ spidelay(nsecs);
+
+ /* sample LSB (from slave) on leading edge */
+ word >>= 1;
+ if ((flags & SPI_MASTER_NO_RX) == 0)
+ word |= getmiso(spi) << (bits - 1);
+ setsck(spi, cpol);
+ }
+ return word;
+}
+
+static inline u32
+bitbang_txrx_le_cpha1(struct spi_device *spi,
+ unsigned int nsecs, unsigned int cpol, unsigned int flags,
+ u32 word, u8 bits)
+{
+ /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
+
+ u32 oldbit = !(word & 1);
+ /* clock starts at inactive polarity */
+ for (; likely(bits); bits--) {
+
+ /* setup LSB (to slave) on leading edge */
+ setsck(spi, !cpol);
+ if ((flags & SPI_MASTER_NO_TX) == 0) {
+ if ((word & 1) != oldbit) {
+ setmosi(spi, word & 1);
+ oldbit = word & 1;
+ }
+ }
+ spidelay(nsecs); /* T(setup) */
+
+ setsck(spi, cpol);
+ spidelay(nsecs);
+
+ /* sample LSB (from slave) on trailing edge */
+ word >>= 1;
+ if ((flags & SPI_MASTER_NO_RX) == 0)
+ word |= getmiso(spi) << (bits - 1);
+ }
+ return word;
+}
diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
index 4bc1b93fc276..3ab19be83095 100644
--- a/drivers/spi/spi-cadence-xspi.c
+++ b/drivers/spi/spi-cadence-xspi.c
@@ -578,10 +578,8 @@ static int cdns_xspi_probe(struct platform_device *pdev)
}
cdns_xspi->irq = platform_get_irq(pdev, 0);
- if (cdns_xspi->irq < 0) {
- dev_err(dev, "Failed to get IRQ\n");
+ if (cdns_xspi->irq < 0)
return -ENXIO;
- }
ret = devm_request_irq(dev, cdns_xspi->irq, cdns_xspi_irq_handler,
IRQF_SHARED, pdev->name, cdns_xspi);
diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
index b6c7467f0b59..d403a7a3021d 100644
--- a/drivers/spi/spi-fsi.c
+++ b/drivers/spi/spi-fsi.c
@@ -25,6 +25,7 @@
#define SPI_FSI_BASE 0x70000
#define SPI_FSI_INIT_TIMEOUT_MS 1000
+#define SPI_FSI_STATUS_TIMEOUT_MS 100
#define SPI_FSI_MAX_RX_SIZE 8
#define SPI_FSI_MAX_TX_SIZE 40
@@ -299,6 +300,7 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
struct spi_transfer *transfer)
{
int rc = 0;
+ unsigned long end;
u64 status = 0ULL;
if (transfer->tx_buf) {
@@ -315,10 +317,14 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
if (rc)
return rc;
+ end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS);
do {
rc = fsi_spi_status(ctx, &status, "TX");
if (rc)
return rc;
+
+ if (time_after(jiffies, end))
+ return -ETIMEDOUT;
} while (status & SPI_FSI_STATUS_TDR_FULL);
sent += nb;
@@ -329,10 +335,14 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
u8 *rx = transfer->rx_buf;
while (transfer->len > recv) {
+ end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS);
do {
rc = fsi_spi_status(ctx, &status, "RX");
if (rc)
return rc;
+
+ if (time_after(jiffies, end))
+ return -ETIMEDOUT;
} while (!(status & SPI_FSI_STATUS_RDR_FULL));
rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in);
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index f7d905d2a90f..4e83cc5b445d 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -898,11 +898,8 @@ static int spi_geni_probe(struct platform_device *pdev)
return irq;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (ret) {
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret)
- return dev_err_probe(dev, ret, "could not set DMA mask\n");
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "could not set DMA mask\n");
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 0584f4d2fde2..4b12c4964a66 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -135,25 +135,37 @@ static inline int getmiso(const struct spi_device *spi)
static u32 spi_gpio_txrx_word_mode0(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha0(spi, nsecs, 0, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_txrx_word_mode1(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha1(spi, nsecs, 0, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_txrx_word_mode2(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha0(spi, nsecs, 1, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
}
static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha1(spi, nsecs, 1, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
}
/*
@@ -170,28 +182,40 @@ static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
flags = spi->master->flags;
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha0(spi, nsecs, 0, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
flags = spi->master->flags;
- return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha1(spi, nsecs, 0, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
flags = spi->master->flags;
- return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha0(spi, nsecs, 1, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
}
static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
flags = spi->master->flags;
- return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha1(spi, nsecs, 1, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
}
/*----------------------------------------------------------------------*/
@@ -378,7 +402,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL |
- SPI_CS_HIGH;
+ SPI_CS_HIGH | SPI_LSB_FIRST;
if (!spi_gpio->mosi) {
/* HW configuration without MOSI pin
*
diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c b/drivers/spi/spi-intel-pci.c
index 1bc53b8bb88a..a5ef7a526a7f 100644
--- a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -2,34 +2,48 @@
/*
* Intel PCH/PCU SPI flash PCI driver.
*
- * Copyright (C) 2016, Intel Corporation
+ * Copyright (C) 2016 - 2022, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#include <linux/ioport.h>
-#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include "intel-spi.h"
+#include "spi-intel.h"
#define BCR 0xdc
#define BCR_WPD BIT(0)
+static bool intel_spi_pci_set_writeable(void __iomem *base, void *data)
+{
+ struct pci_dev *pdev = data;
+ u32 bcr;
+
+ /* Try to make the chip read/write */
+ pci_read_config_dword(pdev, BCR, &bcr);
+ if (!(bcr & BCR_WPD)) {
+ bcr |= BCR_WPD;
+ pci_write_config_dword(pdev, BCR, bcr);
+ pci_read_config_dword(pdev, BCR, &bcr);
+ }
+
+ return bcr & BCR_WPD;
+}
+
static const struct intel_spi_boardinfo bxt_info = {
.type = INTEL_SPI_BXT,
+ .set_writeable = intel_spi_pci_set_writeable,
};
static const struct intel_spi_boardinfo cnl_info = {
.type = INTEL_SPI_CNL,
+ .set_writeable = intel_spi_pci_set_writeable,
};
static int intel_spi_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct intel_spi_boardinfo *info;
- struct intel_spi *ispi;
- u32 bcr;
int ret;
ret = pcim_enable_device(pdev);
@@ -41,26 +55,8 @@ static int intel_spi_pci_probe(struct pci_dev *pdev,
if (!info)
return -ENOMEM;
- /* Try to make the chip read/write */
- pci_read_config_dword(pdev, BCR, &bcr);
- if (!(bcr & BCR_WPD)) {
- bcr |= BCR_WPD;
- pci_write_config_dword(pdev, BCR, bcr);
- pci_read_config_dword(pdev, BCR, &bcr);
- }
- info->writeable = !!(bcr & BCR_WPD);
-
- ispi = intel_spi_probe(&pdev->dev, &pdev->resource[0], info);
- if (IS_ERR(ispi))
- return PTR_ERR(ispi);
-
- pci_set_drvdata(pdev, ispi);
- return 0;
-}
-
-static void intel_spi_pci_remove(struct pci_dev *pdev)
-{
- intel_spi_remove(pci_get_drvdata(pdev));
+ info->data = pdev;
+ return intel_spi_probe(&pdev->dev, &pdev->resource[0], info);
}
static const struct pci_device_id intel_spi_pci_ids[] = {
@@ -70,6 +66,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x1bca), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x38a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
@@ -89,7 +86,6 @@ static struct pci_driver intel_spi_pci_driver = {
.name = "intel-spi",
.id_table = intel_spi_pci_ids,
.probe = intel_spi_pci_probe,
- .remove = intel_spi_pci_remove,
};
module_pci_driver(intel_spi_pci_driver);
diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-platform.c b/drivers/spi/spi-intel-platform.c
index f80f1086f928..2ef09fa35661 100644
--- a/drivers/mtd/spi-nor/controllers/intel-spi-platform.c
+++ b/drivers/spi/spi-intel-platform.c
@@ -2,20 +2,18 @@
/*
* Intel PCH/PCU SPI flash platform driver.
*
- * Copyright (C) 2016, Intel Corporation
+ * Copyright (C) 2016 - 2022, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include "intel-spi.h"
+#include "spi-intel.h"
static int intel_spi_platform_probe(struct platform_device *pdev)
{
struct intel_spi_boardinfo *info;
- struct intel_spi *ispi;
struct resource *mem;
info = dev_get_platdata(&pdev->dev);
@@ -23,24 +21,11 @@ static int intel_spi_platform_probe(struct platform_device *pdev)
return -EINVAL;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ispi = intel_spi_probe(&pdev->dev, mem, info);
- if (IS_ERR(ispi))
- return PTR_ERR(ispi);
-
- platform_set_drvdata(pdev, ispi);
- return 0;
-}
-
-static int intel_spi_platform_remove(struct platform_device *pdev)
-{
- struct intel_spi *ispi = platform_get_drvdata(pdev);
-
- return intel_spi_remove(ispi);
+ return intel_spi_probe(&pdev->dev, mem, info);
}
static struct platform_driver intel_spi_platform_driver = {
.probe = intel_spi_platform_probe,
- .remove = intel_spi_platform_remove,
.driver = {
.name = "intel-spi",
},
diff --git a/drivers/mtd/spi-nor/controllers/intel-spi.c b/drivers/spi/spi-intel.c
index a413892ff449..e937cfe85559 100644
--- a/drivers/mtd/spi-nor/controllers/intel-spi.c
+++ b/drivers/spi/spi-intel.c
@@ -2,21 +2,21 @@
/*
* Intel PCH/PCU SPI flash driver.
*
- * Copyright (C) 2016, Intel Corporation
+ * Copyright (C) 2016 - 2022, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#include <linux/err.h>
-#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/sizes.h>
-#include <linux/mtd/mtd.h>
+
#include <linux/mtd/partitions.h>
#include <linux/mtd/spi-nor.h>
-#include "intel-spi.h"
+#include <linux/spi/flash.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#include "spi-intel.h"
/* Offsets are from @ispi->base */
#define BFPREG 0x00
@@ -92,8 +92,6 @@
/* CPU specifics */
#define BYT_PR 0x74
#define BYT_SSFSTS_CTL 0x90
-#define BYT_BCR 0xfc
-#define BYT_BCR_WPD BIT(0)
#define BYT_FREG_NUM 5
#define BYT_PR_NUM 5
@@ -125,37 +123,43 @@
* struct intel_spi - Driver private data
* @dev: Device pointer
* @info: Pointer to board specific info
- * @nor: SPI NOR layer structure
* @base: Beginning of MMIO space
* @pregs: Start of protection registers
* @sregs: Start of software sequencer registers
+ * @master: Pointer to the SPI controller structure
* @nregions: Maximum number of regions
* @pr_num: Maximum number of protected range registers
- * @writeable: Is the chip writeable
* @locked: Is SPI setting locked
* @swseq_reg: Use SW sequencer in register reads/writes
* @swseq_erase: Use SW sequencer in erase operation
- * @erase_64k: 64k erase supported
* @atomic_preopcode: Holds preopcode when atomic sequence is requested
* @opcodes: Opcodes which are supported. This are programmed by BIOS
* before it locks down the controller.
+ * @mem_ops: Pointer to SPI MEM ops supported by the controller
*/
struct intel_spi {
struct device *dev;
const struct intel_spi_boardinfo *info;
- struct spi_nor nor;
void __iomem *base;
void __iomem *pregs;
void __iomem *sregs;
+ struct spi_controller *master;
size_t nregions;
size_t pr_num;
- bool writeable;
bool locked;
bool swseq_reg;
bool swseq_erase;
- bool erase_64k;
u8 atomic_preopcode;
u8 opcodes[8];
+ const struct intel_spi_mem_op *mem_ops;
+};
+
+struct intel_spi_mem_op {
+ struct spi_mem_op mem_op;
+ u32 replacement_op;
+ int (*exec_op)(struct intel_spi *ispi,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op);
};
static bool writeable;
@@ -201,9 +205,6 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
readl(ispi->sregs + OPMENU1));
}
- if (ispi->info->type == INTEL_SPI_BYT)
- dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
-
dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
@@ -219,9 +220,8 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
base = value & PR_BASE_MASK;
dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
- i, base << 12, (limit << 12) | 0xfff,
- value & PR_WPE ? 'W' : '.',
- value & PR_RPE ? 'R' : '.');
+ i, base << 12, (limit << 12) | 0xfff,
+ value & PR_WPE ? 'W' : '.', value & PR_RPE ? 'R' : '.');
}
dev_dbg(ispi->dev, "Flash regions:\n");
@@ -236,7 +236,7 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
dev_dbg(ispi->dev, " %02d disabled\n", i);
else
dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
- i, base << 12, (limit << 12) | 0xfff);
+ i, base << 12, (limit << 12) | 0xfff);
}
dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
@@ -304,124 +304,12 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
INTEL_SPI_TIMEOUT * 1000);
}
-static int intel_spi_init(struct intel_spi *ispi)
+static bool intel_spi_set_writeable(struct intel_spi *ispi)
{
- u32 opmenu0, opmenu1, lvscc, uvscc, val;
- int i;
-
- switch (ispi->info->type) {
- case INTEL_SPI_BYT:
- ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
- ispi->pregs = ispi->base + BYT_PR;
- ispi->nregions = BYT_FREG_NUM;
- ispi->pr_num = BYT_PR_NUM;
- ispi->swseq_reg = true;
-
- if (writeable) {
- /* Disable write protection */
- val = readl(ispi->base + BYT_BCR);
- if (!(val & BYT_BCR_WPD)) {
- val |= BYT_BCR_WPD;
- writel(val, ispi->base + BYT_BCR);
- val = readl(ispi->base + BYT_BCR);
- }
-
- ispi->writeable = !!(val & BYT_BCR_WPD);
- }
-
- break;
-
- case INTEL_SPI_LPT:
- ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
- ispi->pregs = ispi->base + LPT_PR;
- ispi->nregions = LPT_FREG_NUM;
- ispi->pr_num = LPT_PR_NUM;
- ispi->swseq_reg = true;
- break;
-
- case INTEL_SPI_BXT:
- ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
- ispi->pregs = ispi->base + BXT_PR;
- ispi->nregions = BXT_FREG_NUM;
- ispi->pr_num = BXT_PR_NUM;
- ispi->erase_64k = true;
- break;
-
- case INTEL_SPI_CNL:
- ispi->sregs = NULL;
- ispi->pregs = ispi->base + CNL_PR;
- ispi->nregions = CNL_FREG_NUM;
- ispi->pr_num = CNL_PR_NUM;
- break;
+ if (!ispi->info->set_writeable)
+ return false;
- default:
- return -EINVAL;
- }
-
- /* Disable #SMI generation from HW sequencer */
- val = readl(ispi->base + HSFSTS_CTL);
- val &= ~HSFSTS_CTL_FSMIE;
- writel(val, ispi->base + HSFSTS_CTL);
-
- /*
- * Determine whether erase operation should use HW or SW sequencer.
- *
- * The HW sequencer has a predefined list of opcodes, with only the
- * erase opcode being programmable in LVSCC and UVSCC registers.
- * If these registers don't contain a valid erase opcode, erase
- * cannot be done using HW sequencer.
- */
- lvscc = readl(ispi->base + LVSCC);
- uvscc = readl(ispi->base + UVSCC);
- if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
- ispi->swseq_erase = true;
- /* SPI controller on Intel BXT supports 64K erase opcode */
- if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
- if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
- !(uvscc & ERASE_64K_OPCODE_MASK))
- ispi->erase_64k = false;
-
- if (ispi->sregs == NULL && (ispi->swseq_reg || ispi->swseq_erase)) {
- dev_err(ispi->dev, "software sequencer not supported, but required\n");
- return -EINVAL;
- }
-
- /*
- * Some controllers can only do basic operations using hardware
- * sequencer. All other operations are supposed to be carried out
- * using software sequencer.
- */
- if (ispi->swseq_reg) {
- /* Disable #SMI generation from SW sequencer */
- val = readl(ispi->sregs + SSFSTS_CTL);
- val &= ~SSFSTS_CTL_FSMIE;
- writel(val, ispi->sregs + SSFSTS_CTL);
- }
-
- /* Check controller's lock status */
- val = readl(ispi->base + HSFSTS_CTL);
- ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
-
- if (ispi->locked && ispi->sregs) {
- /*
- * BIOS programs allowed opcodes and then locks down the
- * register. So read back what opcodes it decided to support.
- * That's the set we are going to support as well.
- */
- opmenu0 = readl(ispi->sregs + OPMENU0);
- opmenu1 = readl(ispi->sregs + OPMENU1);
-
- if (opmenu0 && opmenu1) {
- for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
- ispi->opcodes[i] = opmenu0 >> i * 8;
- ispi->opcodes[i + 4] = opmenu1 >> i * 8;
- }
- }
- }
-
- intel_spi_dump_regs(ispi);
-
- return 0;
+ return ispi->info->set_writeable(ispi->base, ispi->info->data);
}
static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
@@ -537,7 +425,6 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
default:
return -EINVAL;
}
-
}
writel(val, ispi->sregs + SSFSTS_CTL);
@@ -554,31 +441,35 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
return 0;
}
-static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
- size_t len)
+static int intel_spi_read_reg(struct intel_spi *ispi,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
{
- struct intel_spi *ispi = nor->priv;
+ size_t nbytes = op->data.nbytes;
+ u8 opcode = op->cmd.opcode;
int ret;
/* Address of the first chip */
writel(0, ispi->base + FADDR);
if (ispi->swseq_reg)
- ret = intel_spi_sw_cycle(ispi, opcode, len,
+ ret = intel_spi_sw_cycle(ispi, opcode, nbytes,
OPTYPE_READ_NO_ADDR);
else
- ret = intel_spi_hw_cycle(ispi, opcode, len);
+ ret = intel_spi_hw_cycle(ispi, opcode, nbytes);
if (ret)
return ret;
- return intel_spi_read_block(ispi, buf, len);
+ return intel_spi_read_block(ispi, op->data.buf.in, nbytes);
}
-static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
- size_t len)
+static int intel_spi_write_reg(struct intel_spi *ispi,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
{
- struct intel_spi *ispi = nor->priv;
+ size_t nbytes = op->data.nbytes;
+ u8 opcode = op->cmd.opcode;
int ret;
/*
@@ -623,23 +514,25 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
writel(0, ispi->base + FADDR);
/* Write the value beforehand */
- ret = intel_spi_write_block(ispi, buf, len);
+ ret = intel_spi_write_block(ispi, op->data.buf.out, nbytes);
if (ret)
return ret;
if (ispi->swseq_reg)
- return intel_spi_sw_cycle(ispi, opcode, len,
+ return intel_spi_sw_cycle(ispi, opcode, nbytes,
OPTYPE_WRITE_NO_ADDR);
- return intel_spi_hw_cycle(ispi, opcode, len);
+ return intel_spi_hw_cycle(ispi, opcode, nbytes);
}
-static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
- u_char *read_buf)
+static int intel_spi_read(struct intel_spi *ispi,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
{
- struct intel_spi *ispi = nor->priv;
- size_t block_size, retlen = 0;
+ void *read_buf = op->data.buf.in;
+ size_t block_size, nbytes = op->data.nbytes;
+ u32 addr = op->addr.val;
u32 val, status;
- ssize_t ret;
+ int ret;
/*
* Atomic sequence is not expected with HW sequencer reads. Make
@@ -648,24 +541,14 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
if (WARN_ON_ONCE(ispi->atomic_preopcode))
ispi->atomic_preopcode = 0;
- switch (nor->read_opcode) {
- case SPINOR_OP_READ:
- case SPINOR_OP_READ_FAST:
- case SPINOR_OP_READ_4B:
- case SPINOR_OP_READ_FAST_4B:
- break;
- default:
- return -EINVAL;
- }
-
- while (len > 0) {
- block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+ while (nbytes > 0) {
+ block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
/* Read cannot cross 4K boundary */
- block_size = min_t(loff_t, from + block_size,
- round_up(from + 1, SZ_4K)) - from;
+ block_size = min_t(loff_t, addr + block_size,
+ round_up(addr + 1, SZ_4K)) - addr;
- writel(from, ispi->base + FADDR);
+ writel(addr, ispi->base + FADDR);
val = readl(ispi->base + HSFSTS_CTL);
val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
@@ -686,8 +569,7 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
ret = -EACCES;
if (ret < 0) {
- dev_err(ispi->dev, "read error: %llx: %#x\n", from,
- status);
+ dev_err(ispi->dev, "read error: %x: %#x\n", addr, status);
return ret;
}
@@ -695,34 +577,35 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
if (ret)
return ret;
- len -= block_size;
- from += block_size;
- retlen += block_size;
+ nbytes -= block_size;
+ addr += block_size;
read_buf += block_size;
}
- return retlen;
+ return 0;
}
-static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
- const u_char *write_buf)
+static int intel_spi_write(struct intel_spi *ispi,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
{
- struct intel_spi *ispi = nor->priv;
- size_t block_size, retlen = 0;
+ size_t block_size, nbytes = op->data.nbytes;
+ const void *write_buf = op->data.buf.out;
+ u32 addr = op->addr.val;
u32 val, status;
- ssize_t ret;
+ int ret;
/* Not needed with HW sequencer write, make sure it is cleared */
ispi->atomic_preopcode = 0;
- while (len > 0) {
- block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+ while (nbytes > 0) {
+ block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
/* Write cannot cross 4K boundary */
- block_size = min_t(loff_t, to + block_size,
- round_up(to + 1, SZ_4K)) - to;
+ block_size = min_t(loff_t, addr + block_size,
+ round_up(addr + 1, SZ_4K)) - addr;
- writel(to, ispi->base + FADDR);
+ writel(addr, ispi->base + FADDR);
val = readl(ispi->base + HSFSTS_CTL);
val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
@@ -753,79 +636,476 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
ret = -EACCES;
if (ret < 0) {
- dev_err(ispi->dev, "write error: %llx: %#x\n", to,
- status);
+ dev_err(ispi->dev, "write error: %x: %#x\n", addr, status);
return ret;
}
- len -= block_size;
- to += block_size;
- retlen += block_size;
+ nbytes -= block_size;
+ addr += block_size;
write_buf += block_size;
}
- return retlen;
+ return 0;
}
-static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
+static int intel_spi_erase(struct intel_spi *ispi,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
{
- size_t erase_size, len = nor->mtd.erasesize;
- struct intel_spi *ispi = nor->priv;
- u32 val, status, cmd;
+ u8 opcode = op->cmd.opcode;
+ u32 addr = op->addr.val;
+ u32 val, status;
int ret;
- /* If the hardware can do 64k erase use that when possible */
- if (len >= SZ_64K && ispi->erase_64k) {
- cmd = HSFSTS_CTL_FCYCLE_ERASE_64K;
- erase_size = SZ_64K;
- } else {
- cmd = HSFSTS_CTL_FCYCLE_ERASE;
- erase_size = SZ_4K;
+ writel(addr, ispi->base + FADDR);
+
+ if (ispi->swseq_erase)
+ return intel_spi_sw_cycle(ispi, opcode, 0,
+ OPTYPE_WRITE_WITH_ADDR);
+
+ /* Not needed with HW sequencer erase, make sure it is cleared */
+ ispi->atomic_preopcode = 0;
+
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
+ val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+ val |= HSFSTS_CTL_FGO;
+ val |= iop->replacement_op;
+ writel(val, ispi->base + HSFSTS_CTL);
+
+ ret = intel_spi_wait_hw_busy(ispi);
+ if (ret)
+ return ret;
+
+ status = readl(ispi->base + HSFSTS_CTL);
+ if (status & HSFSTS_CTL_FCERR)
+ return -EIO;
+ if (status & HSFSTS_CTL_AEL)
+ return -EACCES;
+
+ return 0;
+}
+
+static bool intel_spi_cmp_mem_op(const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
+{
+ if (iop->mem_op.cmd.nbytes != op->cmd.nbytes ||
+ iop->mem_op.cmd.buswidth != op->cmd.buswidth ||
+ iop->mem_op.cmd.dtr != op->cmd.dtr ||
+ iop->mem_op.cmd.opcode != op->cmd.opcode)
+ return false;
+
+ if (iop->mem_op.addr.nbytes != op->addr.nbytes ||
+ iop->mem_op.addr.dtr != op->addr.dtr)
+ return false;
+
+ if (iop->mem_op.data.dir != op->data.dir ||
+ iop->mem_op.data.dtr != op->data.dtr)
+ return false;
+
+ if (iop->mem_op.data.dir != SPI_MEM_NO_DATA) {
+ if (iop->mem_op.data.buswidth != op->data.buswidth)
+ return false;
+ }
+
+ return true;
+}
+
+static const struct intel_spi_mem_op *
+intel_spi_match_mem_op(struct intel_spi *ispi, const struct spi_mem_op *op)
+{
+ const struct intel_spi_mem_op *iop;
+
+ for (iop = ispi->mem_ops; iop->mem_op.cmd.opcode; iop++) {
+ if (intel_spi_cmp_mem_op(iop, op))
+ break;
}
- if (ispi->swseq_erase) {
- while (len > 0) {
- writel(offs, ispi->base + FADDR);
+ return iop->mem_op.cmd.opcode ? iop : NULL;
+}
+
+static bool intel_spi_supports_mem_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
+ const struct intel_spi_mem_op *iop;
+
+ iop = intel_spi_match_mem_op(ispi, op);
+ if (!iop) {
+ dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
+ return false;
+ }
- ret = intel_spi_sw_cycle(ispi, nor->erase_opcode,
- 0, OPTYPE_WRITE_WITH_ADDR);
- if (ret)
- return ret;
+ /*
+ * For software sequencer check that the opcode is actually
+ * present in the opmenu if it is locked.
+ */
+ if (ispi->swseq_reg && ispi->locked) {
+ int i;
- offs += erase_size;
- len -= erase_size;
+ /* Check if it is in the locked opcodes list */
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) {
+ if (ispi->opcodes[i] == op->cmd.opcode)
+ return true;
}
- return 0;
+ dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
+ return false;
}
- /* Not needed with HW sequencer erase, make sure it is cleared */
- ispi->atomic_preopcode = 0;
+ return true;
+}
- while (len > 0) {
- writel(offs, ispi->base + FADDR);
+static int intel_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
+ const struct intel_spi_mem_op *iop;
- val = readl(ispi->base + HSFSTS_CTL);
- val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
- val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
- val |= cmd;
- val |= HSFSTS_CTL_FGO;
- writel(val, ispi->base + HSFSTS_CTL);
+ iop = intel_spi_match_mem_op(ispi, op);
+ if (!iop)
+ return -EOPNOTSUPP;
- ret = intel_spi_wait_hw_busy(ispi);
- if (ret)
- return ret;
+ return iop->exec_op(ispi, iop, op);
+}
- status = readl(ispi->base + HSFSTS_CTL);
- if (status & HSFSTS_CTL_FCERR)
- return -EIO;
- else if (status & HSFSTS_CTL_AEL)
- return -EACCES;
+static const char *intel_spi_get_name(struct spi_mem *mem)
+{
+ const struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
+
+ /*
+ * Return name of the flash controller device to be compatible
+ * with the MTD version.
+ */
+ return dev_name(ispi->dev);
+}
+
+static const struct spi_controller_mem_ops intel_spi_mem_ops = {
+ .supports_op = intel_spi_supports_mem_op,
+ .exec_op = intel_spi_exec_mem_op,
+ .get_name = intel_spi_get_name,
+};
+
+#define INTEL_SPI_OP_ADDR(__nbytes) \
+ { \
+ .nbytes = __nbytes, \
+ }
+
+#define INTEL_SPI_OP_NO_DATA \
+ { \
+ .dir = SPI_MEM_NO_DATA, \
+ }
+
+#define INTEL_SPI_OP_DATA_IN(__buswidth) \
+ { \
+ .dir = SPI_MEM_DATA_IN, \
+ .buswidth = __buswidth, \
+ }
+
+#define INTEL_SPI_OP_DATA_OUT(__buswidth) \
+ { \
+ .dir = SPI_MEM_DATA_OUT, \
+ .buswidth = __buswidth, \
+ }
+
+#define INTEL_SPI_MEM_OP(__cmd, __addr, __data, __exec_op) \
+ { \
+ .mem_op = { \
+ .cmd = __cmd, \
+ .addr = __addr, \
+ .data = __data, \
+ }, \
+ .exec_op = __exec_op, \
+ }
+
+#define INTEL_SPI_MEM_OP_REPL(__cmd, __addr, __data, __exec_op, __repl) \
+ { \
+ .mem_op = { \
+ .cmd = __cmd, \
+ .addr = __addr, \
+ .data = __data, \
+ }, \
+ .exec_op = __exec_op, \
+ .replacement_op = __repl, \
+ }
+
+/*
+ * The controller handles pretty much everything internally based on the
+ * SFDP data but we want to make sure we only support the operations
+ * actually possible. Only check buswidth and transfer direction, the
+ * core validates data.
+ */
+#define INTEL_SPI_GENERIC_OPS \
+ /* Status register operations */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read_reg), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read_reg), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ INTEL_SPI_OP_DATA_OUT(1), \
+ intel_spi_write_reg), \
+ /* Normal read */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ /* Fast read */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ /* Read with 4-byte address opcode */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ /* Fast read with 4-byte address opcode */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ /* Write operations */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_OUT(1), \
+ intel_spi_write), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_OUT(1), \
+ intel_spi_write), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_OUT(1), \
+ intel_spi_write), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DATA, \
+ intel_spi_write_reg), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DATA, \
+ intel_spi_write_reg), \
+ /* Erase operations */ \
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ SPI_MEM_OP_NO_DATA, \
+ intel_spi_erase, \
+ HSFSTS_CTL_FCYCLE_ERASE), \
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ SPI_MEM_OP_NO_DATA, \
+ intel_spi_erase, \
+ HSFSTS_CTL_FCYCLE_ERASE), \
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ SPI_MEM_OP_NO_DATA, \
+ intel_spi_erase, \
+ HSFSTS_CTL_FCYCLE_ERASE) \
+
+static const struct intel_spi_mem_op generic_mem_ops[] = {
+ INTEL_SPI_GENERIC_OPS,
+ { },
+};
+
+static const struct intel_spi_mem_op erase_64k_mem_ops[] = {
+ INTEL_SPI_GENERIC_OPS,
+ /* 64k sector erase operations */
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
+ INTEL_SPI_OP_ADDR(3),
+ SPI_MEM_OP_NO_DATA,
+ intel_spi_erase,
+ HSFSTS_CTL_FCYCLE_ERASE_64K),
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
+ INTEL_SPI_OP_ADDR(4),
+ SPI_MEM_OP_NO_DATA,
+ intel_spi_erase,
+ HSFSTS_CTL_FCYCLE_ERASE_64K),
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE_4B, 1),
+ INTEL_SPI_OP_ADDR(4),
+ SPI_MEM_OP_NO_DATA,
+ intel_spi_erase,
+ HSFSTS_CTL_FCYCLE_ERASE_64K),
+ { },
+};
+
+static int intel_spi_init(struct intel_spi *ispi)
+{
+ u32 opmenu0, opmenu1, lvscc, uvscc, val;
+ bool erase_64k = false;
+ int i;
+
+ switch (ispi->info->type) {
+ case INTEL_SPI_BYT:
+ ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
+ ispi->pregs = ispi->base + BYT_PR;
+ ispi->nregions = BYT_FREG_NUM;
+ ispi->pr_num = BYT_PR_NUM;
+ ispi->swseq_reg = true;
+ break;
+
+ case INTEL_SPI_LPT:
+ ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
+ ispi->pregs = ispi->base + LPT_PR;
+ ispi->nregions = LPT_FREG_NUM;
+ ispi->pr_num = LPT_PR_NUM;
+ ispi->swseq_reg = true;
+ break;
+
+ case INTEL_SPI_BXT:
+ ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
+ ispi->pregs = ispi->base + BXT_PR;
+ ispi->nregions = BXT_FREG_NUM;
+ ispi->pr_num = BXT_PR_NUM;
+ erase_64k = true;
+ break;
+
+ case INTEL_SPI_CNL:
+ ispi->sregs = NULL;
+ ispi->pregs = ispi->base + CNL_PR;
+ ispi->nregions = CNL_FREG_NUM;
+ ispi->pr_num = CNL_PR_NUM;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Try to disable write protection if user asked to do so */
+ if (writeable && !intel_spi_set_writeable(ispi)) {
+ dev_warn(ispi->dev, "can't disable chip write protection\n");
+ writeable = false;
+ }
+
+ /* Disable #SMI generation from HW sequencer */
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~HSFSTS_CTL_FSMIE;
+ writel(val, ispi->base + HSFSTS_CTL);
- offs += erase_size;
- len -= erase_size;
+ /*
+ * Determine whether erase operation should use HW or SW sequencer.
+ *
+ * The HW sequencer has a predefined list of opcodes, with only the
+ * erase opcode being programmable in LVSCC and UVSCC registers.
+ * If these registers don't contain a valid erase opcode, erase
+ * cannot be done using HW sequencer.
+ */
+ lvscc = readl(ispi->base + LVSCC);
+ uvscc = readl(ispi->base + UVSCC);
+ if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
+ ispi->swseq_erase = true;
+ /* SPI controller on Intel BXT supports 64K erase opcode */
+ if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
+ if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
+ !(uvscc & ERASE_64K_OPCODE_MASK))
+ erase_64k = false;
+
+ if (!ispi->sregs && (ispi->swseq_reg || ispi->swseq_erase)) {
+ dev_err(ispi->dev, "software sequencer not supported, but required\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Some controllers can only do basic operations using hardware
+ * sequencer. All other operations are supposed to be carried out
+ * using software sequencer.
+ */
+ if (ispi->swseq_reg) {
+ /* Disable #SMI generation from SW sequencer */
+ val = readl(ispi->sregs + SSFSTS_CTL);
+ val &= ~SSFSTS_CTL_FSMIE;
+ writel(val, ispi->sregs + SSFSTS_CTL);
}
+ /* Check controller's lock status */
+ val = readl(ispi->base + HSFSTS_CTL);
+ ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
+
+ if (ispi->locked && ispi->sregs) {
+ /*
+ * BIOS programs allowed opcodes and then locks down the
+ * register. So read back what opcodes it decided to support.
+ * That's the set we are going to support as well.
+ */
+ opmenu0 = readl(ispi->sregs + OPMENU0);
+ opmenu1 = readl(ispi->sregs + OPMENU1);
+
+ if (opmenu0 && opmenu1) {
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
+ ispi->opcodes[i] = opmenu0 >> i * 8;
+ ispi->opcodes[i + 4] = opmenu1 >> i * 8;
+ }
+ }
+ }
+
+ if (erase_64k) {
+ dev_dbg(ispi->dev, "Using erase_64k memory operations");
+ ispi->mem_ops = erase_64k_mem_ops;
+ } else {
+ dev_dbg(ispi->dev, "Using generic memory operations");
+ ispi->mem_ops = generic_mem_ops;
+ }
+
+ intel_spi_dump_regs(ispi);
return 0;
}
@@ -884,9 +1164,12 @@ static void intel_spi_fill_partition(struct intel_spi *ispi,
/*
* If any of the regions have protection bits set, make the
* whole partition read-only to be on the safe side.
+ *
+ * Also if the user did not ask the chip to be writeable
+ * mask the bit too.
*/
- if (intel_spi_is_protected(ispi, base, limit))
- ispi->writeable = false;
+ if (!writeable || intel_spi_is_protected(ispi, base, limit))
+ part->mask_flags |= MTD_WRITEABLE;
end = (limit << 12) + 4096;
if (end > part->size)
@@ -894,75 +1177,74 @@ static void intel_spi_fill_partition(struct intel_spi *ispi,
}
}
-static const struct spi_nor_controller_ops intel_spi_controller_ops = {
- .read_reg = intel_spi_read_reg,
- .write_reg = intel_spi_write_reg,
- .read = intel_spi_read,
- .write = intel_spi_write,
- .erase = intel_spi_erase,
-};
+static int intel_spi_populate_chip(struct intel_spi *ispi)
+{
+ struct flash_platform_data *pdata;
+ struct spi_board_info chip;
-struct intel_spi *intel_spi_probe(struct device *dev,
- struct resource *mem, const struct intel_spi_boardinfo *info)
+ pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->nr_parts = 1;
+ pdata->parts = devm_kcalloc(ispi->dev, sizeof(*pdata->parts),
+ pdata->nr_parts, GFP_KERNEL);
+ if (!pdata->parts)
+ return -ENOMEM;
+
+ intel_spi_fill_partition(ispi, pdata->parts);
+
+ memset(&chip, 0, sizeof(chip));
+ snprintf(chip.modalias, 8, "spi-nor");
+ chip.platform_data = pdata;
+
+ return spi_new_device(ispi->master, &chip) ? 0 : -ENODEV;
+}
+
+/**
+ * intel_spi_probe() - Probe the Intel SPI flash controller
+ * @dev: Pointer to the parent device
+ * @mem: MMIO resource
+ * @info: Platform spefific information
+ *
+ * Probes Intel SPI flash controller and creates the flash chip device.
+ * Returns %0 on success and negative errno in case of failure.
+ */
+int intel_spi_probe(struct device *dev, struct resource *mem,
+ const struct intel_spi_boardinfo *info)
{
- const struct spi_nor_hwcaps hwcaps = {
- .mask = SNOR_HWCAPS_READ |
- SNOR_HWCAPS_READ_FAST |
- SNOR_HWCAPS_PP,
- };
- struct mtd_partition part;
+ struct spi_controller *master;
struct intel_spi *ispi;
int ret;
- if (!info || !mem)
- return ERR_PTR(-EINVAL);
+ master = devm_spi_alloc_master(dev, sizeof(*ispi));
+ if (!master)
+ return -ENOMEM;
+
+ master->mem_ops = &intel_spi_mem_ops;
- ispi = devm_kzalloc(dev, sizeof(*ispi), GFP_KERNEL);
- if (!ispi)
- return ERR_PTR(-ENOMEM);
+ ispi = spi_master_get_devdata(master);
ispi->base = devm_ioremap_resource(dev, mem);
if (IS_ERR(ispi->base))
- return ERR_CAST(ispi->base);
+ return PTR_ERR(ispi->base);
ispi->dev = dev;
+ ispi->master = master;
ispi->info = info;
- ispi->writeable = info->writeable;
ret = intel_spi_init(ispi);
if (ret)
- return ERR_PTR(ret);
-
- ispi->nor.dev = ispi->dev;
- ispi->nor.priv = ispi;
- ispi->nor.controller_ops = &intel_spi_controller_ops;
-
- ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps);
- if (ret) {
- dev_info(dev, "failed to locate the chip\n");
- return ERR_PTR(ret);
- }
-
- intel_spi_fill_partition(ispi, &part);
-
- /* Prevent writes if not explicitly enabled */
- if (!ispi->writeable || !writeable)
- ispi->nor.mtd.flags &= ~MTD_WRITEABLE;
+ return ret;
- ret = mtd_device_register(&ispi->nor.mtd, &part, 1);
+ ret = devm_spi_register_master(dev, master);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- return ispi;
+ return intel_spi_populate_chip(ispi);
}
EXPORT_SYMBOL_GPL(intel_spi_probe);
-int intel_spi_remove(struct intel_spi *ispi)
-{
- return mtd_device_unregister(&ispi->nor.mtd);
-}
-EXPORT_SYMBOL_GPL(intel_spi_remove);
-
MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-intel.h b/drivers/spi/spi-intel.h
new file mode 100644
index 000000000000..a4f0327a46ff
--- /dev/null
+++ b/drivers/spi/spi-intel.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel PCH/PCU SPI flash driver.
+ *
+ * Copyright (C) 2016 - 2022, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#ifndef SPI_INTEL_H
+#define SPI_INTEL_H
+
+#include <linux/platform_data/x86/spi-intel.h>
+
+struct resource;
+
+int intel_spi_probe(struct device *dev, struct resource *mem,
+ const struct intel_spi_boardinfo *info);
+
+#endif /* SPI_INTEL_H */
diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c
index bcb52601804a..aae26f62ea87 100644
--- a/drivers/spi/spi-lantiq-ssc.c
+++ b/drivers/spi/spi-lantiq-ssc.c
@@ -906,17 +906,11 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
struct spi_master *master;
struct lantiq_ssc_spi *spi;
const struct lantiq_ssc_hwcfg *hwcfg;
- const struct of_device_id *match;
u32 id, supports_dma, revision;
unsigned int num_cs;
int err;
- match = of_match_device(lantiq_ssc_match, dev);
- if (!match) {
- dev_err(dev, "no device match\n");
- return -EINVAL;
- }
- hwcfg = match->data;
+ hwcfg = of_device_get_match_data(dev);
master = spi_alloc_master(dev, sizeof(struct lantiq_ssc_spi));
if (!master)
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 37f4443ce9a0..e9d83d65873b 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -854,15 +854,13 @@ static int spi_mem_probe(struct spi_device *spi)
return memdrv->probe(mem);
}
-static int spi_mem_remove(struct spi_device *spi)
+static void spi_mem_remove(struct spi_device *spi)
{
struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
struct spi_mem *mem = spi_get_drvdata(spi);
if (memdrv->remove)
- return memdrv->remove(mem);
-
- return 0;
+ memdrv->remove(mem);
}
static void spi_mem_shutdown(struct spi_device *spi)
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 78a9bca8cc68..03630359ce70 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -23,7 +23,6 @@
#include <linux/clk.h>
#include <linux/spi/spi.h>
#include <linux/fsl_devices.h>
-#include <linux/gpio.h>
#include <asm/mpc52xx_psc.h>
enum {
@@ -128,17 +127,28 @@ static void mpc512x_psc_spi_activate_cs(struct spi_device *spi)
out_be32(psc_addr(mps, ccr), ccr);
mps->bits_per_word = cs->bits_per_word;
- if (mps->cs_control && gpio_is_valid(spi->cs_gpio))
- mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0);
+ if (spi->cs_gpiod) {
+ if (mps->cs_control)
+ /* boardfile override */
+ mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0);
+ else
+ /* gpiolib will deal with the inversion */
+ gpiod_set_value(spi->cs_gpiod, 1);
+ }
}
static void mpc512x_psc_spi_deactivate_cs(struct spi_device *spi)
{
struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
- if (mps->cs_control && gpio_is_valid(spi->cs_gpio))
- mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
-
+ if (spi->cs_gpiod) {
+ if (mps->cs_control)
+ /* boardfile override */
+ mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+ else
+ /* gpiolib will deal with the inversion */
+ gpiod_set_value(spi->cs_gpiod, 0);
+ }
}
/* extract and scale size field in txsz or rxsz */
@@ -363,7 +373,6 @@ static int mpc512x_psc_spi_unprep_xfer_hw(struct spi_master *master)
static int mpc512x_psc_spi_setup(struct spi_device *spi)
{
struct mpc512x_psc_spi_cs *cs = spi->controller_state;
- int ret;
if (spi->bits_per_word % 8)
return -EINVAL;
@@ -373,18 +382,6 @@ static int mpc512x_psc_spi_setup(struct spi_device *spi)
if (!cs)
return -ENOMEM;
- if (gpio_is_valid(spi->cs_gpio)) {
- ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
- if (ret) {
- dev_err(&spi->dev, "can't get CS gpio: %d\n",
- ret);
- kfree(cs);
- return ret;
- }
- gpio_direction_output(spi->cs_gpio,
- spi->mode & SPI_CS_HIGH ? 0 : 1);
- }
-
spi->controller_state = cs;
}
@@ -396,8 +393,6 @@ static int mpc512x_psc_spi_setup(struct spi_device *spi)
static void mpc512x_psc_spi_cleanup(struct spi_device *spi)
{
- if (gpio_is_valid(spi->cs_gpio))
- gpio_free(spi->cs_gpio);
kfree(spi->controller_state);
}
@@ -476,11 +471,6 @@ static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id)
return IRQ_NONE;
}
-static void mpc512x_spi_cs_control(struct spi_device *spi, bool onoff)
-{
- gpio_set_value(spi->cs_gpio, onoff);
-}
-
static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
u32 size, unsigned int irq)
{
@@ -500,9 +490,7 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
mps->type = (int)of_device_get_match_data(dev);
mps->irq = irq;
- if (pdata == NULL) {
- mps->cs_control = mpc512x_spi_cs_control;
- } else {
+ if (pdata) {
mps->cs_control = pdata->cs_control;
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->max_chipselect;
@@ -513,6 +501,7 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
master->prepare_transfer_hardware = mpc512x_psc_spi_prep_xfer_hw;
master->transfer_one_message = mpc512x_psc_spi_msg_xfer;
master->unprepare_transfer_hardware = mpc512x_psc_spi_unprep_xfer_hw;
+ master->use_gpio_descriptors = true;
master->cleanup = mpc512x_psc_spi_cleanup;
master->dev.of_node = dev->of_node;
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 753bd313e6fd..1a0b3208dfca 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -12,7 +12,7 @@
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/platform_data/spi-mt65xx.h>
#include <linux/pm_runtime.h>
@@ -31,6 +31,7 @@
#define SPI_CFG2_REG 0x0028
#define SPI_TX_SRC_REG_64 0x002c
#define SPI_RX_DST_REG_64 0x0030
+#define SPI_CFG3_IPM_REG 0x0040
#define SPI_CFG0_SCK_HIGH_OFFSET 0
#define SPI_CFG0_SCK_LOW_OFFSET 8
@@ -43,11 +44,15 @@
#define SPI_CFG1_PACKET_LOOP_OFFSET 8
#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
#define SPI_CFG1_GET_TICK_DLY_OFFSET 29
+#define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30
#define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000
+#define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000
+
#define SPI_CFG1_CS_IDLE_MASK 0xff
#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
+#define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
#define SPI_CFG2_SCK_HIGH_OFFSET 0
#define SPI_CFG2_SCK_LOW_OFFSET 16
@@ -68,7 +73,13 @@
#define SPI_CMD_TX_ENDIAN BIT(15)
#define SPI_CMD_FINISH_IE BIT(16)
#define SPI_CMD_PAUSE_IE BIT(17)
+#define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
+#define SPI_CMD_IPM_SPIM_LOOP BIT(21)
+#define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
+#define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
+#define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
+#define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
#define MT8173_SPI_MAX_PAD_SEL 3
#define MTK_SPI_PAUSE_INT_STATUS 0x2
@@ -78,6 +89,7 @@
#define MTK_SPI_MAX_FIFO_SIZE 32U
#define MTK_SPI_PACKET_SIZE 1024
+#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
#define MTK_SPI_32BITS_MASK (0xffffffff)
#define DMA_ADDR_EXT_BITS (36)
@@ -93,6 +105,9 @@ struct mtk_spi_compatible {
bool dma_ext;
/* some IC no need unprepare SPI clk */
bool no_need_unprepare;
+ /* IPM design adjust and extend register to support more features */
+ bool ipm_design;
+
};
struct mtk_spi {
@@ -116,6 +131,12 @@ static const struct mtk_spi_compatible mt2712_compat = {
.must_tx = true,
};
+static const struct mtk_spi_compatible mtk_ipm_compat = {
+ .enhance_timing = true,
+ .dma_ext = true,
+ .ipm_design = true,
+};
+
static const struct mtk_spi_compatible mt6765_compat = {
.need_pad_sel = true,
.must_tx = true,
@@ -157,6 +178,9 @@ static const struct mtk_chip_config mtk_default_chip_info = {
};
static const struct of_device_id mtk_spi_of_match[] = {
+ { .compatible = "mediatek,spi-ipm",
+ .data = (void *)&mtk_ipm_compat,
+ },
{ .compatible = "mediatek,mt2701-spi",
.data = (void *)&mtk_common_compat,
},
@@ -275,12 +299,11 @@ static int mtk_spi_set_hw_cs_timing(struct spi_device *spi)
return 0;
}
-static int mtk_spi_prepare_message(struct spi_master *master,
- struct spi_message *msg)
+static int mtk_spi_hw_init(struct spi_master *master,
+ struct spi_device *spi)
{
u16 cpha, cpol;
u32 reg_val;
- struct spi_device *spi = msg->spi;
struct mtk_chip_config *chip_config = spi->controller_data;
struct mtk_spi *mdata = spi_master_get_devdata(master);
@@ -288,6 +311,15 @@ static int mtk_spi_prepare_message(struct spi_master *master,
cpol = spi->mode & SPI_CPOL ? 1 : 0;
reg_val = readl(mdata->base + SPI_CMD_REG);
+ if (mdata->dev_comp->ipm_design) {
+ /* SPI transfer without idle time until packet length done */
+ reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
+ if (spi->mode & SPI_LOOP)
+ reg_val |= SPI_CMD_IPM_SPIM_LOOP;
+ else
+ reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
+ }
+
if (cpha)
reg_val |= SPI_CMD_CPHA;
else
@@ -345,17 +377,39 @@ static int mtk_spi_prepare_message(struct spi_master *master,
mdata->base + SPI_PAD_SEL_REG);
/* tick delay */
- reg_val = readl(mdata->base + SPI_CFG1_REG);
- reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
- reg_val |= ((chip_config->tick_delay & 0x7)
- << SPI_CFG1_GET_TICK_DLY_OFFSET);
- writel(reg_val, mdata->base + SPI_CFG1_REG);
+ if (mdata->dev_comp->enhance_timing) {
+ if (mdata->dev_comp->ipm_design) {
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+ reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
+ reg_val |= ((chip_config->tick_delay & 0x7)
+ << SPI_CMD_IPM_GET_TICKDLY_OFFSET);
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+ } else {
+ reg_val = readl(mdata->base + SPI_CFG1_REG);
+ reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
+ reg_val |= ((chip_config->tick_delay & 0x7)
+ << SPI_CFG1_GET_TICK_DLY_OFFSET);
+ writel(reg_val, mdata->base + SPI_CFG1_REG);
+ }
+ } else {
+ reg_val = readl(mdata->base + SPI_CFG1_REG);
+ reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1;
+ reg_val |= ((chip_config->tick_delay & 0x3)
+ << SPI_CFG1_GET_TICK_DLY_OFFSET_V1);
+ writel(reg_val, mdata->base + SPI_CFG1_REG);
+ }
/* set hw cs timing */
mtk_spi_set_hw_cs_timing(spi);
return 0;
}
+static int mtk_spi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ return mtk_spi_hw_init(master, msg->spi);
+}
+
static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
{
u32 reg_val;
@@ -377,13 +431,13 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
}
static void mtk_spi_prepare_transfer(struct spi_master *master,
- struct spi_transfer *xfer)
+ u32 speed_hz)
{
u32 div, sck_time, reg_val;
struct mtk_spi *mdata = spi_master_get_devdata(master);
- if (xfer->speed_hz < mdata->spi_clk_hz / 2)
- div = DIV_ROUND_UP(mdata->spi_clk_hz, xfer->speed_hz);
+ if (speed_hz < mdata->spi_clk_hz / 2)
+ div = DIV_ROUND_UP(mdata->spi_clk_hz, speed_hz);
else
div = 1;
@@ -414,12 +468,24 @@ static void mtk_spi_setup_packet(struct spi_master *master)
u32 packet_size, packet_loop, reg_val;
struct mtk_spi *mdata = spi_master_get_devdata(master);
- packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE);
+ if (mdata->dev_comp->ipm_design)
+ packet_size = min_t(u32,
+ mdata->xfer_len,
+ MTK_SPI_IPM_PACKET_SIZE);
+ else
+ packet_size = min_t(u32,
+ mdata->xfer_len,
+ MTK_SPI_PACKET_SIZE);
+
packet_loop = mdata->xfer_len / packet_size;
reg_val = readl(mdata->base + SPI_CFG1_REG);
- reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK);
+ if (mdata->dev_comp->ipm_design)
+ reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
+ else
+ reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
+ reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
writel(reg_val, mdata->base + SPI_CFG1_REG);
}
@@ -514,7 +580,7 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
mdata->cur_transfer = xfer;
mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
mdata->num_xfered = 0;
- mtk_spi_prepare_transfer(master, xfer);
+ mtk_spi_prepare_transfer(master, xfer->speed_hz);
mtk_spi_setup_packet(master);
if (xfer->tx_buf) {
@@ -547,7 +613,7 @@ static int mtk_spi_dma_transfer(struct spi_master *master,
mdata->cur_transfer = xfer;
mdata->num_xfered = 0;
- mtk_spi_prepare_transfer(master, xfer);
+ mtk_spi_prepare_transfer(master, xfer->speed_hz);
cmd = readl(mdata->base + SPI_CMD_REG);
if (xfer->tx_buf)
@@ -582,6 +648,19 @@ static int mtk_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
+ struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+ u32 reg_val = 0;
+
+ /* prepare xfer direction and duplex mode */
+ if (mdata->dev_comp->ipm_design) {
+ if (!xfer->tx_buf || !xfer->rx_buf) {
+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
+ if (xfer->rx_buf)
+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
+ }
+ writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
+ }
+
if (master->can_dma(master, spi, xfer))
return mtk_spi_dma_transfer(master, spi, xfer);
else
@@ -605,8 +684,9 @@ static int mtk_spi_setup(struct spi_device *spi)
if (!spi->controller_data)
spi->controller_data = (void *)&mtk_default_chip_info;
- if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio))
- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ if (mdata->dev_comp->need_pad_sel && spi->cs_gpiod)
+ /* CS de-asserted, gpiolib will handle inversion */
+ gpiod_direction_output(spi->cs_gpiod, 0);
return 0;
}
@@ -730,6 +810,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
master->can_dma = mtk_spi_can_dma;
master->setup = mtk_spi_setup;
master->set_cs_timing = mtk_spi_set_hw_cs_timing;
+ master->use_gpio_descriptors = true;
of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
if (!of_id) {
@@ -746,6 +827,8 @@ static int mtk_spi_probe(struct platform_device *pdev)
if (mdata->dev_comp->must_tx)
master->flags = SPI_MASTER_MUST_TX;
+ if (mdata->dev_comp->ipm_design)
+ master->mode_bits |= SPI_LOOP;
if (mdata->dev_comp->need_pad_sel) {
mdata->pad_num = of_property_count_u32_elems(
@@ -853,25 +936,12 @@ static int mtk_spi_probe(struct platform_device *pdev)
goto err_disable_runtime_pm;
}
- if (!master->cs_gpios && master->num_chipselect > 1) {
+ if (!master->cs_gpiods && master->num_chipselect > 1) {
dev_err(&pdev->dev,
"cs_gpios not specified and num_chipselect > 1\n");
ret = -EINVAL;
goto err_disable_runtime_pm;
}
-
- if (master->cs_gpios) {
- for (i = 0; i < master->num_chipselect; i++) {
- ret = devm_gpio_request(&pdev->dev,
- master->cs_gpios[i],
- dev_name(&pdev->dev));
- if (ret) {
- dev_err(&pdev->dev,
- "can't get CS GPIO %i\n", i);
- goto err_disable_runtime_pm;
- }
- }
- }
}
if (mdata->dev_comp->dma_ext)
diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
index 5c93730615f8..94fb09696677 100644
--- a/drivers/spi/spi-mtk-nor.c
+++ b/drivers/spi/spi-mtk-nor.c
@@ -95,6 +95,17 @@
#define CLK_TO_US(sp, clkcnt) DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000)
+struct mtk_nor_caps {
+ u8 dma_bits;
+
+ /* extra_dummy_bit is adding for the IP of new SoCs.
+ * Some new SoCs modify the timing of fetching registers' values
+ * and IDs of nor flash, they need a extra_dummy_bit which can add
+ * more clock cycles for fetching data.
+ */
+ u8 extra_dummy_bit;
+};
+
struct mtk_nor {
struct spi_controller *ctlr;
struct device *dev;
@@ -104,11 +115,13 @@ struct mtk_nor {
struct clk *spi_clk;
struct clk *ctlr_clk;
struct clk *axi_clk;
+ struct clk *axi_s_clk;
unsigned int spi_freq;
bool wbuf_en;
bool has_irq;
bool high_dma;
struct completion op_done;
+ const struct mtk_nor_caps *caps;
};
static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr)
@@ -554,7 +567,12 @@ static int mtk_nor_spi_mem_prg(struct mtk_nor *sp, const struct spi_mem_op *op)
}
// trigger op
- writel(prg_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
+ if (rx_len)
+ writel(prg_len * BITS_PER_BYTE + sp->caps->extra_dummy_bit,
+ sp->base + MTK_NOR_REG_PRG_CNT);
+ else
+ writel(prg_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
+
ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM,
prg_len * BITS_PER_BYTE);
if (ret)
@@ -674,6 +692,7 @@ static void mtk_nor_disable_clk(struct mtk_nor *sp)
clk_disable_unprepare(sp->spi_clk);
clk_disable_unprepare(sp->ctlr_clk);
clk_disable_unprepare(sp->axi_clk);
+ clk_disable_unprepare(sp->axi_s_clk);
}
static int mtk_nor_enable_clk(struct mtk_nor *sp)
@@ -697,6 +716,14 @@ static int mtk_nor_enable_clk(struct mtk_nor *sp)
return ret;
}
+ ret = clk_prepare_enable(sp->axi_s_clk);
+ if (ret) {
+ clk_disable_unprepare(sp->spi_clk);
+ clk_disable_unprepare(sp->ctlr_clk);
+ clk_disable_unprepare(sp->axi_clk);
+ return ret;
+ }
+
return 0;
}
@@ -743,9 +770,25 @@ static const struct spi_controller_mem_ops mtk_nor_mem_ops = {
.exec_op = mtk_nor_exec_op
};
+static const struct mtk_nor_caps mtk_nor_caps_mt8173 = {
+ .dma_bits = 32,
+ .extra_dummy_bit = 0,
+};
+
+static const struct mtk_nor_caps mtk_nor_caps_mt8186 = {
+ .dma_bits = 32,
+ .extra_dummy_bit = 1,
+};
+
+static const struct mtk_nor_caps mtk_nor_caps_mt8192 = {
+ .dma_bits = 36,
+ .extra_dummy_bit = 0,
+};
+
static const struct of_device_id mtk_nor_match[] = {
- { .compatible = "mediatek,mt8192-nor", .data = (void *)36 },
- { .compatible = "mediatek,mt8173-nor", .data = (void *)32 },
+ { .compatible = "mediatek,mt8173-nor", .data = &mtk_nor_caps_mt8173 },
+ { .compatible = "mediatek,mt8186-nor", .data = &mtk_nor_caps_mt8186 },
+ { .compatible = "mediatek,mt8192-nor", .data = &mtk_nor_caps_mt8192 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mtk_nor_match);
@@ -754,10 +797,10 @@ static int mtk_nor_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
struct mtk_nor *sp;
+ struct mtk_nor_caps *caps;
void __iomem *base;
- struct clk *spi_clk, *ctlr_clk, *axi_clk;
+ struct clk *spi_clk, *ctlr_clk, *axi_clk, *axi_s_clk;
int ret, irq;
- unsigned long dma_bits;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -775,10 +818,16 @@ static int mtk_nor_probe(struct platform_device *pdev)
if (IS_ERR(axi_clk))
return PTR_ERR(axi_clk);
- dma_bits = (unsigned long)of_device_get_match_data(&pdev->dev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_bits))) {
- dev_err(&pdev->dev, "failed to set dma mask(%lu)\n", dma_bits);
- return -EINVAL;
+ axi_s_clk = devm_clk_get_optional(&pdev->dev, "axi_s");
+ if (IS_ERR(axi_s_clk))
+ return PTR_ERR(axi_s_clk);
+
+ caps = (struct mtk_nor_caps *)of_device_get_match_data(&pdev->dev);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(caps->dma_bits));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set dma mask(%u)\n", caps->dma_bits);
+ return ret;
}
ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*sp));
@@ -808,7 +857,9 @@ static int mtk_nor_probe(struct platform_device *pdev)
sp->spi_clk = spi_clk;
sp->ctlr_clk = ctlr_clk;
sp->axi_clk = axi_clk;
- sp->high_dma = (dma_bits > 32);
+ sp->axi_s_clk = axi_s_clk;
+ sp->caps = caps;
+ sp->high_dma = caps->dma_bits > 32;
sp->buffer = dmam_alloc_coherent(&pdev->dev,
MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
&sp->buffer_dma, GFP_KERNEL);
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index b62471ab6d7f..ba67dbed9fb8 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -201,7 +201,7 @@ struct fiu_data {
int fiu_max;
};
-static const struct npcm_fiu_info npxm7xx_fiu_info[] = {
+static const struct npcm_fiu_info npcm7xx_fiu_info[] = {
{.name = "FIU0", .fiu_id = FIU0,
.max_map_size = MAP_SIZE_128MB, .max_cs = 2},
{.name = "FIU3", .fiu_id = FIU3,
@@ -209,8 +209,8 @@ static const struct npcm_fiu_info npxm7xx_fiu_info[] = {
{.name = "FIUX", .fiu_id = FIUX,
.max_map_size = MAP_SIZE_16MB, .max_cs = 2} };
-static const struct fiu_data npxm7xx_fiu_data = {
- .npcm_fiu_data_info = npxm7xx_fiu_info,
+static const struct fiu_data npcm7xx_fiu_data = {
+ .npcm_fiu_data_info = npcm7xx_fiu_info,
.fiu_max = 3,
};
@@ -664,14 +664,13 @@ static const struct spi_controller_mem_ops npcm_fiu_mem_ops = {
};
static const struct of_device_id npcm_fiu_dt_ids[] = {
- { .compatible = "nuvoton,npcm750-fiu", .data = &npxm7xx_fiu_data },
+ { .compatible = "nuvoton,npcm750-fiu", .data = &npcm7xx_fiu_data },
{ /* sentinel */ }
};
static int npcm_fiu_probe(struct platform_device *pdev)
{
const struct fiu_data *fiu_data_match;
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct spi_controller *ctrl;
struct npcm_fiu_spi *fiu;
@@ -685,13 +684,12 @@ static int npcm_fiu_probe(struct platform_device *pdev)
fiu = spi_controller_get_devdata(ctrl);
- match = of_match_device(npcm_fiu_dt_ids, dev);
- if (!match || !match->data) {
+ fiu_data_match = of_device_get_match_data(dev);
+ if (!fiu_data_match) {
dev_err(dev, "No compatible OF match\n");
return -ENODEV;
}
- fiu_data_match = match->data;
id = of_alias_get_id(dev->of_node, "fiu");
if (id < 0 || id >= fiu_data_match->fiu_max) {
dev_err(dev, "Invalid platform device id: %d\n", id);
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
index f86433b29260..7e5c09a7d489 100644
--- a/drivers/spi/spi-pic32.c
+++ b/drivers/spi/spi-pic32.c
@@ -591,18 +591,16 @@ static int pic32_spi_setup(struct spi_device *spi)
* unreliable/erroneous SPI transactions.
* To avoid that we will always handle /CS by toggling GPIO.
*/
- if (!gpio_is_valid(spi->cs_gpio))
+ if (!spi->cs_gpiod)
return -EINVAL;
- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
-
return 0;
}
static void pic32_spi_cleanup(struct spi_device *spi)
{
- /* de-activate cs-gpio */
- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ /* de-activate cs-gpio, gpiolib will handle inversion */
+ gpiod_direction_output(spi->cs_gpiod, 0);
}
static int pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
@@ -784,6 +782,7 @@ static int pic32_spi_probe(struct platform_device *pdev)
master->unprepare_message = pic32_spi_unprepare_message;
master->prepare_transfer_hardware = pic32_spi_prepare_hardware;
master->unprepare_transfer_hardware = pic32_spi_unprepare_hardware;
+ master->use_gpio_descriptors = true;
/* optional DMA support */
ret = pic32_spi_dma_prep(pic32s, &pdev->dev);
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 2e134eb4bd2c..861b21c63504 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -15,32 +15,20 @@
#include <linux/dmaengine.h>
#include <linux/platform_data/dma-dw.h>
-enum {
- PORT_QUARK_X1000,
- PORT_BYT,
- PORT_MRFLD,
- PORT_BSW0,
- PORT_BSW1,
- PORT_BSW2,
- PORT_CE4100,
- PORT_LPT0,
- PORT_LPT1,
-};
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000 0x0935
+#define PCI_DEVICE_ID_INTEL_BYT 0x0f0e
+#define PCI_DEVICE_ID_INTEL_MRFLD 0x1194
+#define PCI_DEVICE_ID_INTEL_BSW0 0x228e
+#define PCI_DEVICE_ID_INTEL_BSW1 0x2290
+#define PCI_DEVICE_ID_INTEL_BSW2 0x22ac
+#define PCI_DEVICE_ID_INTEL_CE4100 0x2e6a
+#define PCI_DEVICE_ID_INTEL_LPT0_0 0x9c65
+#define PCI_DEVICE_ID_INTEL_LPT0_1 0x9c66
+#define PCI_DEVICE_ID_INTEL_LPT1_0 0x9ce5
+#define PCI_DEVICE_ID_INTEL_LPT1_1 0x9ce6
struct pxa_spi_info {
- enum pxa_ssp_type type;
- int port_id;
- int num_chipselect;
- unsigned long max_clk_rate;
-
- /* DMA channel request parameters */
- bool (*dma_filter)(struct dma_chan *chan, void *param);
- void *tx_param;
- void *rx_param;
-
- int dma_burst_size;
-
- int (*setup)(struct pci_dev *pdev, struct pxa_spi_info *c);
+ int (*setup)(struct pci_dev *pdev, struct pxa2xx_spi_controller *c);
};
static struct dw_dma_slave byt_tx_param = { .dst_id = 0 };
@@ -65,6 +53,24 @@ static struct dw_dma_slave lpt1_rx_param = { .src_id = 1 };
static struct dw_dma_slave lpt0_tx_param = { .dst_id = 2 };
static struct dw_dma_slave lpt0_rx_param = { .src_id = 3 };
+static void pxa2xx_spi_pci_clk_unregister(void *clk)
+{
+ clk_unregister(clk);
+}
+
+static int pxa2xx_spi_pci_clk_register(struct pci_dev *dev, struct ssp_device *ssp,
+ unsigned long rate)
+{
+ char buf[40];
+
+ snprintf(buf, sizeof(buf), "pxa2xx-spi.%d", ssp->port_id);
+ ssp->clk = clk_register_fixed_rate(&dev->dev, buf, NULL, 0, rate);
+ if (IS_ERR(ssp->clk))
+ return PTR_ERR(ssp->clk);
+
+ return devm_add_action_or_reset(&dev->dev, pxa2xx_spi_pci_clk_unregister, ssp->clk);
+}
+
static bool lpss_dma_filter(struct dma_chan *chan, void *param)
{
struct dw_dma_slave *dws = param;
@@ -76,55 +82,131 @@ static bool lpss_dma_filter(struct dma_chan *chan, void *param)
return true;
}
-static int lpss_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
+static void lpss_dma_put_device(void *dma_dev)
{
+ pci_dev_put(dma_dev);
+}
+
+static int lpss_spi_setup(struct pci_dev *dev, struct pxa2xx_spi_controller *c)
+{
+ struct ssp_device *ssp = &c->ssp;
+ struct dw_dma_slave *tx, *rx;
struct pci_dev *dma_dev;
+ int ret;
- c->num_chipselect = 1;
- c->max_clk_rate = 50000000;
+ switch (dev->device) {
+ case PCI_DEVICE_ID_INTEL_BYT:
+ ssp->type = LPSS_BYT_SSP;
+ ssp->port_id = 0;
+ c->tx_param = &byt_tx_param;
+ c->rx_param = &byt_rx_param;
+ break;
+ case PCI_DEVICE_ID_INTEL_BSW0:
+ ssp->type = LPSS_BSW_SSP;
+ ssp->port_id = 0;
+ c->tx_param = &bsw0_tx_param;
+ c->rx_param = &bsw0_rx_param;
+ break;
+ case PCI_DEVICE_ID_INTEL_BSW1:
+ ssp->type = LPSS_BSW_SSP;
+ ssp->port_id = 1;
+ c->tx_param = &bsw1_tx_param;
+ c->rx_param = &bsw1_rx_param;
+ break;
+ case PCI_DEVICE_ID_INTEL_BSW2:
+ ssp->type = LPSS_BSW_SSP;
+ ssp->port_id = 2;
+ c->tx_param = &bsw2_tx_param;
+ c->rx_param = &bsw2_rx_param;
+ break;
+ case PCI_DEVICE_ID_INTEL_LPT0_0:
+ case PCI_DEVICE_ID_INTEL_LPT1_0:
+ ssp->type = LPSS_LPT_SSP;
+ ssp->port_id = 0;
+ c->tx_param = &lpt0_tx_param;
+ c->rx_param = &lpt0_rx_param;
+ break;
+ case PCI_DEVICE_ID_INTEL_LPT0_1:
+ case PCI_DEVICE_ID_INTEL_LPT1_1:
+ ssp->type = LPSS_LPT_SSP;
+ ssp->port_id = 1;
+ c->tx_param = &lpt1_tx_param;
+ c->rx_param = &lpt1_rx_param;
+ break;
+ default:
+ return -ENODEV;
+ }
- dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ c->num_chipselect = 1;
- if (c->tx_param) {
- struct dw_dma_slave *slave = c->tx_param;
+ ret = pxa2xx_spi_pci_clk_register(dev, ssp, 50000000);
+ if (ret)
+ return ret;
- slave->dma_dev = &dma_dev->dev;
- slave->m_master = 0;
- slave->p_master = 1;
- }
+ dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev);
+ if (ret)
+ return ret;
- if (c->rx_param) {
- struct dw_dma_slave *slave = c->rx_param;
+ tx = c->tx_param;
+ tx->dma_dev = &dma_dev->dev;
+ tx->m_master = 0;
+ tx->p_master = 1;
- slave->dma_dev = &dma_dev->dev;
- slave->m_master = 0;
- slave->p_master = 1;
- }
+ rx = c->rx_param;
+ rx->dma_dev = &dma_dev->dev;
+ rx->m_master = 0;
+ rx->p_master = 1;
c->dma_filter = lpss_dma_filter;
+ c->dma_burst_size = 1;
+ c->enable_dma = 1;
return 0;
}
-static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
+static const struct pxa_spi_info lpss_info_config = {
+ .setup = lpss_spi_setup,
+};
+
+static int ce4100_spi_setup(struct pci_dev *dev, struct pxa2xx_spi_controller *c)
{
- struct pci_dev *dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0));
+ struct ssp_device *ssp = &c->ssp;
+
+ ssp->type = PXA25x_SSP;
+ ssp->port_id = dev->devfn;
+ c->num_chipselect = dev->devfn;
+
+ return pxa2xx_spi_pci_clk_register(dev, ssp, 3686400);
+}
+
+static const struct pxa_spi_info ce4100_info_config = {
+ .setup = ce4100_spi_setup,
+};
+
+static int mrfld_spi_setup(struct pci_dev *dev, struct pxa2xx_spi_controller *c)
+{
+ struct ssp_device *ssp = &c->ssp;
struct dw_dma_slave *tx, *rx;
+ struct pci_dev *dma_dev;
+ int ret;
+
+ ssp->type = MRFLD_SSP;
switch (PCI_FUNC(dev->devfn)) {
case 0:
- c->port_id = 3;
+ ssp->port_id = 3;
c->num_chipselect = 1;
c->tx_param = &mrfld3_tx_param;
c->rx_param = &mrfld3_rx_param;
break;
case 1:
- c->port_id = 5;
+ ssp->port_id = 5;
c->num_chipselect = 4;
c->tx_param = &mrfld5_tx_param;
c->rx_param = &mrfld5_rx_param;
break;
case 2:
- c->port_id = 6;
+ ssp->port_id = 6;
c->num_chipselect = 1;
c->tx_param = &mrfld6_tx_param;
c->rx_param = &mrfld6_rx_param;
@@ -133,6 +215,15 @@ static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
return -ENODEV;
}
+ ret = pxa2xx_spi_pci_clk_register(dev, ssp, 25000000);
+ if (ret)
+ return ret;
+
+ dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0));
+ ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev);
+ if (ret)
+ return ret;
+
tx = c->tx_param;
tx->dma_dev = &dma_dev->dev;
@@ -141,81 +232,38 @@ static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
c->dma_filter = lpss_dma_filter;
c->dma_burst_size = 8;
+ c->enable_dma = 1;
return 0;
}
-static struct pxa_spi_info spi_info_configs[] = {
- [PORT_CE4100] = {
- .type = PXA25x_SSP,
- .port_id = -1,
- .num_chipselect = -1,
- .max_clk_rate = 3686400,
- },
- [PORT_BYT] = {
- .type = LPSS_BYT_SSP,
- .port_id = 0,
- .setup = lpss_spi_setup,
- .tx_param = &byt_tx_param,
- .rx_param = &byt_rx_param,
- },
- [PORT_BSW0] = {
- .type = LPSS_BSW_SSP,
- .port_id = 0,
- .setup = lpss_spi_setup,
- .tx_param = &bsw0_tx_param,
- .rx_param = &bsw0_rx_param,
- },
- [PORT_BSW1] = {
- .type = LPSS_BSW_SSP,
- .port_id = 1,
- .setup = lpss_spi_setup,
- .tx_param = &bsw1_tx_param,
- .rx_param = &bsw1_rx_param,
- },
- [PORT_BSW2] = {
- .type = LPSS_BSW_SSP,
- .port_id = 2,
- .setup = lpss_spi_setup,
- .tx_param = &bsw2_tx_param,
- .rx_param = &bsw2_rx_param,
- },
- [PORT_MRFLD] = {
- .type = MRFLD_SSP,
- .max_clk_rate = 25000000,
- .setup = mrfld_spi_setup,
- },
- [PORT_QUARK_X1000] = {
- .type = QUARK_X1000_SSP,
- .port_id = -1,
- .num_chipselect = 1,
- .max_clk_rate = 50000000,
- },
- [PORT_LPT0] = {
- .type = LPSS_LPT_SSP,
- .port_id = 0,
- .setup = lpss_spi_setup,
- .tx_param = &lpt0_tx_param,
- .rx_param = &lpt0_rx_param,
- },
- [PORT_LPT1] = {
- .type = LPSS_LPT_SSP,
- .port_id = 1,
- .setup = lpss_spi_setup,
- .tx_param = &lpt1_tx_param,
- .rx_param = &lpt1_rx_param,
- },
+static const struct pxa_spi_info mrfld_info_config = {
+ .setup = mrfld_spi_setup,
+};
+
+static int qrk_spi_setup(struct pci_dev *dev, struct pxa2xx_spi_controller *c)
+{
+ struct ssp_device *ssp = &c->ssp;
+
+ ssp->type = QUARK_X1000_SSP;
+ ssp->port_id = dev->devfn;
+ c->num_chipselect = 1;
+
+ return pxa2xx_spi_pci_clk_register(dev, ssp, 50000000);
+}
+
+static const struct pxa_spi_info qrk_info_config = {
+ .setup = qrk_spi_setup,
};
static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
+ const struct pxa_spi_info *info;
struct platform_device_info pi;
int ret;
struct platform_device *pdev;
struct pxa2xx_spi_controller spi_pdata;
struct ssp_device *ssp;
- struct pxa_spi_info *c;
- char buf[40];
ret = pcim_enable_device(dev);
if (ret)
@@ -225,27 +273,17 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
if (ret)
return ret;
- c = &spi_info_configs[ent->driver_data];
- if (c->setup) {
- ret = c->setup(dev, c);
- if (ret)
- return ret;
- }
-
memset(&spi_pdata, 0, sizeof(spi_pdata));
- spi_pdata.num_chipselect = (c->num_chipselect > 0) ? c->num_chipselect : dev->devfn;
- spi_pdata.dma_filter = c->dma_filter;
- spi_pdata.tx_param = c->tx_param;
- spi_pdata.rx_param = c->rx_param;
- spi_pdata.enable_dma = c->rx_param && c->tx_param;
- spi_pdata.dma_burst_size = c->dma_burst_size ? c->dma_burst_size : 1;
ssp = &spi_pdata.ssp;
ssp->dev = &dev->dev;
ssp->phys_base = pci_resource_start(dev, 0);
ssp->mmio_base = pcim_iomap_table(dev)[0];
- ssp->port_id = (c->port_id >= 0) ? c->port_id : dev->devfn;
- ssp->type = c->type;
+
+ info = (struct pxa_spi_info *)ent->driver_data;
+ ret = info->setup(dev, &spi_pdata);
+ if (ret)
+ return ret;
pci_set_master(dev);
@@ -254,14 +292,8 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
return ret;
ssp->irq = pci_irq_vector(dev, 0);
- snprintf(buf, sizeof(buf), "pxa2xx-spi.%d", ssp->port_id);
- ssp->clk = clk_register_fixed_rate(&dev->dev, buf, NULL, 0,
- c->max_clk_rate);
- if (IS_ERR(ssp->clk))
- return PTR_ERR(ssp->clk);
-
memset(&pi, 0, sizeof(pi));
- pi.fwnode = dev->dev.fwnode;
+ pi.fwnode = dev_fwnode(&dev->dev);
pi.parent = &dev->dev;
pi.name = "pxa2xx-spi";
pi.id = ssp->port_id;
@@ -269,10 +301,8 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
pi.size_data = sizeof(spi_pdata);
pdev = platform_device_register_full(&pi);
- if (IS_ERR(pdev)) {
- clk_unregister(ssp->clk);
+ if (IS_ERR(pdev))
return PTR_ERR(pdev);
- }
pci_set_drvdata(dev, pdev);
@@ -282,26 +312,22 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
static void pxa2xx_spi_pci_remove(struct pci_dev *dev)
{
struct platform_device *pdev = pci_get_drvdata(dev);
- struct pxa2xx_spi_controller *spi_pdata;
-
- spi_pdata = dev_get_platdata(&pdev->dev);
platform_device_unregister(pdev);
- clk_unregister(spi_pdata->ssp.clk);
}
static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
- { PCI_VDEVICE(INTEL, 0x0935), PORT_QUARK_X1000 },
- { PCI_VDEVICE(INTEL, 0x0f0e), PORT_BYT },
- { PCI_VDEVICE(INTEL, 0x1194), PORT_MRFLD },
- { PCI_VDEVICE(INTEL, 0x228e), PORT_BSW0 },
- { PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 },
- { PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 },
- { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 },
- { PCI_VDEVICE(INTEL, 0x9c65), PORT_LPT0 },
- { PCI_VDEVICE(INTEL, 0x9c66), PORT_LPT1 },
- { PCI_VDEVICE(INTEL, 0x9ce5), PORT_LPT0 },
- { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT1 },
+ { PCI_DEVICE_DATA(INTEL, QUARK_X1000, &qrk_info_config) },
+ { PCI_DEVICE_DATA(INTEL, BYT, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, MRFLD, &mrfld_info_config) },
+ { PCI_DEVICE_DATA(INTEL, BSW0, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, BSW1, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, BSW2, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, CE4100, &ce4100_info_config) },
+ { PCI_DEVICE_DATA(INTEL, LPT0_0, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, LPT0_1, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, LPT1_0, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, LPT1_1, &lpss_info_config) },
{ }
};
MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index e88f86274eeb..edb42d08857d 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -13,7 +13,6 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
-#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
@@ -1163,57 +1162,6 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller)
return 0;
}
-static void cleanup_cs(struct spi_device *spi)
-{
- if (!gpio_is_valid(spi->cs_gpio))
- return;
-
- gpio_free(spi->cs_gpio);
- spi->cs_gpio = -ENOENT;
-}
-
-static int setup_cs(struct spi_device *spi, struct chip_data *chip,
- struct pxa2xx_spi_chip *chip_info)
-{
- struct driver_data *drv_data = spi_controller_get_devdata(spi->controller);
-
- if (chip == NULL)
- return 0;
-
- if (chip_info == NULL)
- return 0;
-
- if (drv_data->ssp_type == CE4100_SSP)
- return 0;
-
- /*
- * NOTE: setup() can be called multiple times, possibly with
- * different chip_info, release previously requested GPIO.
- */
- cleanup_cs(spi);
-
- if (gpio_is_valid(chip_info->gpio_cs)) {
- int gpio = chip_info->gpio_cs;
- int err;
-
- err = gpio_request(gpio, "SPI_CS");
- if (err) {
- dev_err(&spi->dev, "failed to request chip select GPIO%d\n", gpio);
- return err;
- }
-
- err = gpio_direction_output(gpio, !(spi->mode & SPI_CS_HIGH));
- if (err) {
- gpio_free(gpio);
- return err;
- }
-
- spi->cs_gpio = gpio;
- }
-
- return 0;
-}
-
static int setup(struct spi_device *spi)
{
struct pxa2xx_spi_chip *chip_info;
@@ -1222,7 +1170,6 @@ static int setup(struct spi_device *spi)
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
uint tx_thres, tx_hi_thres, rx_thres;
- int err;
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
@@ -1365,21 +1312,13 @@ static int setup(struct spi_device *spi)
spi_set_ctldata(spi, chip);
- if (drv_data->ssp_type == CE4100_SSP)
- return 0;
-
- err = setup_cs(spi, chip, chip_info);
- if (err)
- kfree(chip);
-
- return err;
+ return 0;
}
static void cleanup(struct spi_device *spi)
{
struct chip_data *chip = spi_get_ctldata(spi);
- cleanup_cs(spi);
kfree(chip);
}
@@ -1455,6 +1394,11 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x5ac6), LPSS_BXT_SSP },
+ /* RPL-S */
+ { PCI_VDEVICE(INTEL, 0x7a2a), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x7a2b), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x7a79), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x7a7b), LPSS_CNL_SSP },
/* ADL-S */
{ PCI_VDEVICE(INTEL, 0x7aaa), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x7aab), LPSS_CNL_SSP },
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index d39dec6d1c91..00d6084306b4 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -593,7 +593,6 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
{
struct spi_qup *controller = dev_id;
u32 opflags, qup_err, spi_err;
- unsigned long flags;
int error = 0;
qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
@@ -625,10 +624,10 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
error = -EIO;
}
- spin_lock_irqsave(&controller->lock, flags);
+ spin_lock(&controller->lock);
if (!controller->error)
controller->error = error;
- spin_unlock_irqrestore(&controller->lock, flags);
+ spin_unlock(&controller->lock);
if (spi_qup_is_dma_xfer(controller->mode)) {
writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
index a46b38544027..bd87d3c92dd3 100644
--- a/drivers/spi/spi-rockchip-sfc.c
+++ b/drivers/spi/spi-rockchip-sfc.c
@@ -624,10 +624,8 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
/* Find the irq */
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "Failed to get the irq\n");
+ if (ret < 0)
goto err_irq;
- }
ret = devm_request_irq(dev, ret, rockchip_sfc_irq_handler,
0, pdev->name, sfc);
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index c6a1bb09be05..cdc16eecaf6b 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -133,7 +133,8 @@
#define INT_TF_OVERFLOW (1 << 1)
#define INT_RF_UNDERFLOW (1 << 2)
#define INT_RF_OVERFLOW (1 << 3)
-#define INT_RF_FULL (1 << 4)
+#define INT_RF_FULL (1 << 4)
+#define INT_CS_INACTIVE (1 << 6)
/* Bit fields in ICR, 4bit */
#define ICR_MASK 0x0f
@@ -194,6 +195,8 @@ struct rockchip_spi {
bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
bool slave_abort;
+ bool cs_inactive; /* spi slave tansmition stop when cs inactive */
+ struct spi_transfer *xfer; /* Store xfer temporarily */
};
static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable)
@@ -275,8 +278,9 @@ static void rockchip_spi_handle_err(struct spi_controller *ctlr,
*/
spi_enable_chip(rs, false);
- /* make sure all interrupts are masked */
+ /* make sure all interrupts are masked and status cleared */
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
+ writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
if (atomic_read(&rs->state) & TXDMA)
dmaengine_terminate_async(ctlr->dma_tx);
@@ -343,6 +347,15 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
struct spi_controller *ctlr = dev_id;
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+ /* When int_cs_inactive comes, spi slave abort */
+ if (rs->cs_inactive && readl_relaxed(rs->regs + ROCKCHIP_SPI_IMR) & INT_CS_INACTIVE) {
+ ctlr->slave_abort(ctlr);
+ writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
+ writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
+
+ return IRQ_HANDLED;
+ }
+
if (rs->tx_left)
rockchip_spi_pio_writer(rs);
@@ -350,6 +363,7 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
if (!rs->rx_left) {
spi_enable_chip(rs, false);
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
+ writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
spi_finalize_current_transfer(ctlr);
}
@@ -357,14 +371,18 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
}
static int rockchip_spi_prepare_irq(struct rockchip_spi *rs,
- struct spi_transfer *xfer)
+ struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
{
rs->tx = xfer->tx_buf;
rs->rx = xfer->rx_buf;
rs->tx_left = rs->tx ? xfer->len / rs->n_bytes : 0;
rs->rx_left = xfer->len / rs->n_bytes;
- writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
+ if (rs->cs_inactive)
+ writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
+ else
+ writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
spi_enable_chip(rs, true);
if (rs->tx_left)
@@ -383,6 +401,9 @@ static void rockchip_spi_dma_rxcb(void *data)
if (state & TXDMA && !rs->slave_abort)
return;
+ if (rs->cs_inactive)
+ writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
+
spi_enable_chip(rs, false);
spi_finalize_current_transfer(ctlr);
}
@@ -423,14 +444,16 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
atomic_set(&rs->state, 0);
+ rs->tx = xfer->tx_buf;
+ rs->rx = xfer->rx_buf;
+
rxdesc = NULL;
if (xfer->rx_buf) {
struct dma_slave_config rxconf = {
.direction = DMA_DEV_TO_MEM,
.src_addr = rs->dma_addr_rx,
.src_addr_width = rs->n_bytes,
- .src_maxburst = rockchip_spi_calc_burst_size(xfer->len /
- rs->n_bytes),
+ .src_maxburst = rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes),
};
dmaengine_slave_config(ctlr->dma_rx, &rxconf);
@@ -474,10 +497,13 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
/* rx must be started before tx due to spi instinct */
if (rxdesc) {
atomic_or(RXDMA, &rs->state);
- dmaengine_submit(rxdesc);
+ ctlr->dma_rx->cookie = dmaengine_submit(rxdesc);
dma_async_issue_pending(ctlr->dma_rx);
}
+ if (rs->cs_inactive)
+ writel_relaxed(INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
+
spi_enable_chip(rs, true);
if (txdesc) {
@@ -584,7 +610,42 @@ static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
static int rockchip_spi_slave_abort(struct spi_controller *ctlr)
{
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+ u32 rx_fifo_left;
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ /* Get current dma rx point */
+ if (atomic_read(&rs->state) & RXDMA) {
+ dmaengine_pause(ctlr->dma_rx);
+ status = dmaengine_tx_status(ctlr->dma_rx, ctlr->dma_rx->cookie, &state);
+ if (status == DMA_ERROR) {
+ rs->rx = rs->xfer->rx_buf;
+ rs->xfer->len = 0;
+ rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
+ for (; rx_fifo_left; rx_fifo_left--)
+ readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
+ goto out;
+ } else {
+ rs->rx += rs->xfer->len - rs->n_bytes * state.residue;
+ }
+ }
+
+ /* Get the valid data left in rx fifo and set rs->xfer->len real rx size */
+ if (rs->rx) {
+ rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
+ for (; rx_fifo_left; rx_fifo_left--) {
+ u32 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
+
+ if (rs->n_bytes == 1)
+ *(u8 *)rs->rx = (u8)rxw;
+ else
+ *(u16 *)rs->rx = (u16)rxw;
+ rs->rx += rs->n_bytes;
+ }
+ rs->xfer->len = (unsigned int)(rs->rx - rs->xfer->rx_buf);
+ }
+out:
if (atomic_read(&rs->state) & RXDMA)
dmaengine_terminate_sync(ctlr->dma_rx);
if (atomic_read(&rs->state) & TXDMA)
@@ -626,7 +687,7 @@ static int rockchip_spi_transfer_one(
}
rs->n_bytes = xfer->bits_per_word <= 8 ? 1 : 2;
-
+ rs->xfer = xfer;
use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
@@ -636,7 +697,7 @@ static int rockchip_spi_transfer_one(
if (use_dma)
return rockchip_spi_prepare_dma(rs, ctlr, xfer);
- return rockchip_spi_prepare_irq(rs, xfer);
+ return rockchip_spi_prepare_irq(rs, ctlr, xfer);
}
static bool rockchip_spi_can_dma(struct spi_controller *ctlr,
@@ -653,6 +714,29 @@ static bool rockchip_spi_can_dma(struct spi_controller *ctlr,
return xfer->len / bytes_per_word >= rs->fifo_len;
}
+static int rockchip_spi_setup(struct spi_device *spi)
+{
+ struct rockchip_spi *rs = spi_controller_get_devdata(spi->controller);
+ u32 cr0;
+
+ pm_runtime_get_sync(rs->dev);
+
+ cr0 = readl_relaxed(rs->regs + ROCKCHIP_SPI_CTRLR0);
+
+ cr0 &= ~(0x3 << CR0_SCPH_OFFSET);
+ cr0 |= ((spi->mode & 0x3) << CR0_SCPH_OFFSET);
+ if (spi->mode & SPI_CS_HIGH && spi->chip_select <= 1)
+ cr0 |= BIT(spi->chip_select) << CR0_SOI_OFFSET;
+ else if (spi->chip_select <= 1)
+ cr0 &= ~(BIT(spi->chip_select) << CR0_SOI_OFFSET);
+
+ writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
+
+ pm_runtime_put(rs->dev);
+
+ return 0;
+}
+
static int rockchip_spi_probe(struct platform_device *pdev)
{
int ret;
@@ -780,6 +864,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
ctlr->min_speed_hz = rs->freq / BAUDR_SCKDV_MAX;
ctlr->max_speed_hz = min(rs->freq / BAUDR_SCKDV_MIN, MAX_SCLK_OUT);
+ ctlr->setup = rockchip_spi_setup;
ctlr->set_cs = rockchip_spi_set_cs;
ctlr->transfer_one = rockchip_spi_transfer_one;
ctlr->max_transfer_size = rockchip_spi_max_transfer_size;
@@ -815,8 +900,13 @@ static int rockchip_spi_probe(struct platform_device *pdev)
switch (readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION)) {
case ROCKCHIP_SPI_VER2_TYPE2:
ctlr->mode_bits |= SPI_CS_HIGH;
+ if (ctlr->can_dma && slave_mode)
+ rs->cs_inactive = true;
+ else
+ rs->cs_inactive = false;
break;
default:
+ rs->cs_inactive = false;
break;
}
@@ -875,14 +965,14 @@ static int rockchip_spi_suspend(struct device *dev)
{
int ret;
struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
ret = spi_controller_suspend(ctlr);
if (ret < 0)
return ret;
- ret = pm_runtime_force_suspend(dev);
- if (ret < 0)
- return ret;
+ clk_disable_unprepare(rs->spiclk);
+ clk_disable_unprepare(rs->apb_pclk);
pinctrl_pm_select_sleep_state(dev);
@@ -897,10 +987,14 @@ static int rockchip_spi_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
- ret = pm_runtime_force_resume(dev);
+ ret = clk_prepare_enable(rs->apb_pclk);
if (ret < 0)
return ret;
+ ret = clk_prepare_enable(rs->spiclk);
+ if (ret < 0)
+ clk_disable_unprepare(rs->apb_pclk);
+
ret = spi_controller_resume(ctlr);
if (ret < 0) {
clk_disable_unprepare(rs->spiclk);
@@ -942,7 +1036,7 @@ static int rockchip_spi_runtime_resume(struct device *dev)
#endif /* CONFIG_PM */
static const struct dev_pm_ops rockchip_spi_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend,
rockchip_spi_runtime_resume, NULL)
};
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index d6f51695ca5b..660aa866af06 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -12,7 +12,6 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -62,9 +61,6 @@ struct s3c24xx_spi {
unsigned char fiq_inuse;
unsigned char fiq_claimed;
- void (*set_cs)(struct s3c2410_spi_info *spi,
- int cs, int pol);
-
/* data buffers */
const unsigned char *tx;
unsigned char *rx;
@@ -84,29 +80,21 @@ static inline struct s3c24xx_spi *to_hw(struct spi_device *sdev)
return spi_master_get_devdata(sdev->master);
}
-static void s3c24xx_spi_gpiocs(struct s3c2410_spi_info *spi, int cs, int pol)
-{
- gpio_set_value(spi->pin_cs, pol);
-}
-
static void s3c24xx_spi_chipsel(struct spi_device *spi, int value)
{
struct s3c24xx_spi_devstate *cs = spi->controller_state;
struct s3c24xx_spi *hw = to_hw(spi);
- unsigned int cspol = spi->mode & SPI_CS_HIGH ? 1 : 0;
/* change the chipselect state and the state of the spi engine clock */
switch (value) {
case BITBANG_CS_INACTIVE:
- hw->set_cs(hw->pdata, spi->chip_select, cspol^1);
writeb(cs->spcon, hw->regs + S3C2410_SPCON);
break;
case BITBANG_CS_ACTIVE:
writeb(cs->spcon | S3C2410_SPCON_ENSCK,
hw->regs + S3C2410_SPCON);
- hw->set_cs(hw->pdata, spi->chip_select, cspol);
break;
}
}
@@ -452,14 +440,6 @@ static void s3c24xx_spi_initialsetup(struct s3c24xx_spi *hw)
writeb(0xff, hw->regs + S3C2410_SPPRE);
writeb(SPPIN_DEFAULT, hw->regs + S3C2410_SPPIN);
writeb(SPCON_DEFAULT, hw->regs + S3C2410_SPCON);
-
- if (hw->pdata) {
- if (hw->set_cs == s3c24xx_spi_gpiocs)
- gpio_direction_output(hw->pdata->pin_cs, 1);
-
- if (hw->pdata->gpio_setup)
- hw->pdata->gpio_setup(hw->pdata, 1);
- }
}
static int s3c24xx_spi_probe(struct platform_device *pdev)
@@ -502,6 +482,9 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
master->num_chipselect = hw->pdata->num_cs;
master->bus_num = pdata->bus_num;
master->bits_per_word_mask = SPI_BPW_MASK(8);
+ /* we need to call the local chipselect callback */
+ master->flags = SPI_MASTER_GPIO_SS;
+ master->use_gpio_descriptors = true;
/* setup the state for the bitbang driver */
@@ -541,27 +524,6 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
goto err_no_pdata;
}
- /* setup any gpio we can */
-
- if (!pdata->set_cs) {
- if (pdata->pin_cs < 0) {
- dev_err(&pdev->dev, "No chipselect pin\n");
- err = -EINVAL;
- goto err_register;
- }
-
- err = devm_gpio_request(&pdev->dev, pdata->pin_cs,
- dev_name(&pdev->dev));
- if (err) {
- dev_err(&pdev->dev, "Failed to get gpio for cs\n");
- goto err_register;
- }
-
- hw->set_cs = s3c24xx_spi_gpiocs;
- gpio_direction_output(pdata->pin_cs, 1);
- } else
- hw->set_cs = pdata->set_cs;
-
s3c24xx_spi_initialsetup(hw);
/* register our spi controller */
@@ -604,9 +566,6 @@ static int s3c24xx_spi_suspend(struct device *dev)
if (ret)
return ret;
- if (hw->pdata && hw->pdata->gpio_setup)
- hw->pdata->gpio_setup(hw->pdata, 0);
-
clk_disable(hw->clk);
return 0;
}
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 8755cd85e83c..c26440e9058d 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -13,10 +13,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/platform_data/spi-s3c64xx.h>
@@ -656,7 +654,11 @@ static int s3c64xx_spi_prepare_message(struct spi_master *master,
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
/* Configure feedback delay */
- writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
+ if (!cs)
+ /* No delay if not defined */
+ writel(0, sdd->regs + S3C64XX_SPI_FB_CLK);
+ else
+ writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
return 0;
}
@@ -796,16 +798,14 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
return ERR_PTR(-EINVAL);
}
- data_np = of_get_child_by_name(slave_np, "controller-data");
- if (!data_np) {
- dev_err(&spi->dev, "child node 'controller-data' not found\n");
- return ERR_PTR(-EINVAL);
- }
-
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
- if (!cs) {
- of_node_put(data_np);
+ if (!cs)
return ERR_PTR(-ENOMEM);
+
+ data_np = of_get_child_by_name(slave_np, "controller-data");
+ if (!data_np) {
+ dev_info(&spi->dev, "feedback delay set to default (0)\n");
+ return cs;
}
of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
@@ -830,34 +830,16 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
if (spi->dev.of_node) {
cs = s3c64xx_get_slave_ctrldata(spi);
spi->controller_data = cs;
- } else if (cs) {
- /* On non-DT platforms the SPI core will set spi->cs_gpio
- * to -ENOENT. The GPIO pin used to drive the chip select
- * is defined by using platform data so spi->cs_gpio value
- * has to be override to have the proper GPIO pin number.
- */
- spi->cs_gpio = cs->line;
}
- if (IS_ERR_OR_NULL(cs)) {
+ /* NULL is fine, we just avoid using the FB delay (=0) */
+ if (IS_ERR(cs)) {
dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
return -ENODEV;
}
- if (!spi_get_ctldata(spi)) {
- if (gpio_is_valid(spi->cs_gpio)) {
- err = gpio_request_one(spi->cs_gpio, GPIOF_OUT_INIT_HIGH,
- dev_name(&spi->dev));
- if (err) {
- dev_err(&spi->dev,
- "Failed to get /CS gpio [%d]: %d\n",
- spi->cs_gpio, err);
- goto err_gpio_req;
- }
- }
-
+ if (!spi_get_ctldata(spi))
spi_set_ctldata(spi, cs);
- }
pm_runtime_get_sync(&sdd->pdev->dev);
@@ -909,11 +891,9 @@ setup_exit:
/* setup() returns with device de-selected */
s3c64xx_spi_set_cs(spi, false);
- if (gpio_is_valid(spi->cs_gpio))
- gpio_free(spi->cs_gpio);
spi_set_ctldata(spi, NULL);
-err_gpio_req:
+ /* This was dynamically allocated on the DT path */
if (spi->dev.of_node)
kfree(cs);
@@ -924,19 +904,9 @@ static void s3c64xx_spi_cleanup(struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
- if (gpio_is_valid(spi->cs_gpio)) {
- gpio_free(spi->cs_gpio);
- if (spi->dev.of_node)
- kfree(cs);
- else {
- /* On non-DT platforms, the SPI core sets
- * spi->cs_gpio to -ENOENT and .setup()
- * overrides it with the GPIO pin value
- * passed using platform data.
- */
- spi->cs_gpio = -ENOENT;
- }
- }
+ /* This was dynamically allocated on the DT path */
+ if (spi->dev.of_node)
+ kfree(cs);
spi_set_ctldata(spi, NULL);
}
@@ -1131,6 +1101,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
master->prepare_message = s3c64xx_spi_prepare_message;
master->transfer_one = s3c64xx_spi_transfer_one;
master->num_chipselect = sci->num_cs;
+ master->use_gpio_descriptors = true;
master->dma_alignment = 8;
master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(8);
@@ -1442,6 +1413,16 @@ static const struct s3c64xx_spi_port_config exynos5433_spi_port_config = {
.quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
};
+static struct s3c64xx_spi_port_config fsd_spi_port_config = {
+ .fifo_lvl_mask = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f},
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .high_speed = true,
+ .clk_from_cmu = true,
+ .clk_ioclk = false,
+ .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
+};
+
static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
{
.name = "s3c2443-spi",
@@ -1472,6 +1453,9 @@ static const struct of_device_id s3c64xx_spi_dt_match[] = {
{ .compatible = "samsung,exynos5433-spi",
.data = (void *)&exynos5433_spi_port_config,
},
+ { .compatible = "tesla,fsd-spi",
+ .data = (void *)&fsd_spi_port_config,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
diff --git a/drivers/spi/spi-slave-system-control.c b/drivers/spi/spi-slave-system-control.c
index 169f3d595f60..d37cfe995a63 100644
--- a/drivers/spi/spi-slave-system-control.c
+++ b/drivers/spi/spi-slave-system-control.c
@@ -132,13 +132,12 @@ static int spi_slave_system_control_probe(struct spi_device *spi)
return 0;
}
-static int spi_slave_system_control_remove(struct spi_device *spi)
+static void spi_slave_system_control_remove(struct spi_device *spi)
{
struct spi_slave_system_control_priv *priv = spi_get_drvdata(spi);
spi_slave_abort(spi);
wait_for_completion(&priv->finished);
- return 0;
}
static struct spi_driver spi_slave_system_control_driver = {
diff --git a/drivers/spi/spi-slave-time.c b/drivers/spi/spi-slave-time.c
index f2e07a392d68..f56c1afb8534 100644
--- a/drivers/spi/spi-slave-time.c
+++ b/drivers/spi/spi-slave-time.c
@@ -106,13 +106,12 @@ static int spi_slave_time_probe(struct spi_device *spi)
return 0;
}
-static int spi_slave_time_remove(struct spi_device *spi)
+static void spi_slave_time_remove(struct spi_device *spi)
{
struct spi_slave_time_priv *priv = spi_get_drvdata(spi);
spi_slave_abort(spi);
wait_for_completion(&priv->finished);
- return 0;
}
static struct spi_driver spi_slave_time_driver = {
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index 6c44dda9ee8c..843be803696b 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -17,7 +17,6 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@@ -171,11 +170,6 @@ static int spi_st_transfer_one(struct spi_master *master,
return t->len;
}
-static void spi_st_cleanup(struct spi_device *spi)
-{
- gpio_free(spi->cs_gpio);
-}
-
/* the spi->mode bits understood by this driver: */
#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH)
static int spi_st_setup(struct spi_device *spi)
@@ -183,29 +177,17 @@ static int spi_st_setup(struct spi_device *spi)
struct spi_st *spi_st = spi_master_get_devdata(spi->master);
u32 spi_st_clk, sscbrg, var;
u32 hz = spi->max_speed_hz;
- int cs = spi->cs_gpio;
- int ret;
if (!hz) {
dev_err(&spi->dev, "max_speed_hz unspecified\n");
return -EINVAL;
}
- if (!gpio_is_valid(cs)) {
- dev_err(&spi->dev, "%d is not a valid gpio\n", cs);
+ if (!spi->cs_gpiod) {
+ dev_err(&spi->dev, "no valid gpio assigned\n");
return -EINVAL;
}
- ret = gpio_request(cs, dev_name(&spi->dev));
- if (ret) {
- dev_err(&spi->dev, "could not request gpio:%d\n", cs);
- return ret;
- }
-
- ret = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH);
- if (ret)
- goto out_free_gpio;
-
spi_st_clk = clk_get_rate(spi_st->clk);
/* Set SSC_BRF */
@@ -213,8 +195,7 @@ static int spi_st_setup(struct spi_device *spi)
if (sscbrg < 0x07 || sscbrg > BIT(16)) {
dev_err(&spi->dev,
"baudrate %d outside valid range %d\n", sscbrg, hz);
- ret = -EINVAL;
- goto out_free_gpio;
+ return -EINVAL;
}
spi_st->baud = spi_st_clk / (2 * sscbrg);
@@ -263,10 +244,6 @@ static int spi_st_setup(struct spi_device *spi)
readl_relaxed(spi_st->base + SSC_RBUF);
return 0;
-
-out_free_gpio:
- gpio_free(cs);
- return ret;
}
/* Interrupt fired when TX shift register becomes empty */
@@ -309,11 +286,11 @@ static int spi_st_probe(struct platform_device *pdev)
master->dev.of_node = np;
master->mode_bits = MODEBITS;
master->setup = spi_st_setup;
- master->cleanup = spi_st_cleanup;
master->transfer_one = spi_st_transfer_one;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->auto_runtime_pm = true;
master->bus_num = pdev->id;
+ master->use_gpio_descriptors = true;
spi_st = spi_master_get_devdata(master);
spi_st->clk = devm_clk_get(&pdev->dev, "ssc");
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 7fc24505a72c..a6adc20f6862 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -763,7 +763,7 @@ static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
if (!spi->cur_usedma && (spi->cur_comm == SPI_SIMPLEX_TX ||
spi->cur_comm == SPI_3WIRE_TX)) {
/* OVR flag shouldn't be handled for TX only mode */
- sr &= ~STM32F4_SPI_SR_OVR | STM32F4_SPI_SR_RXNE;
+ sr &= ~(STM32F4_SPI_SR_OVR | STM32F4_SPI_SR_RXNE);
mask |= STM32F4_SPI_SR_TXE;
}
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index 1fdfc6e6691d..6000d0761206 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -280,7 +280,7 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
* SPI_CLK = MOD_CLK / (2 ^ (cdr + 1))
* Or we can use CDR2, which is calculated with the formula:
* SPI_CLK = MOD_CLK / (2 * (cdr + 1))
- * Wether we use the former or the latter is set through the
+ * Whether we use the former or the latter is set through the
* DRS bit.
*
* First try CDR2, and if we can't reach the expected
diff --git a/drivers/spi/spi-sunplus-sp7021.c b/drivers/spi/spi-sunplus-sp7021.c
new file mode 100644
index 000000000000..f989f7b99296
--- /dev/null
+++ b/drivers/spi/spi-sunplus-sp7021.c
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2021 Sunplus Inc.
+// Author: Li-hao Kuo <lhjeff911@gmail.com>
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+#define SP7021_DATA_RDY_REG 0x0044
+#define SP7021_SLAVE_DMA_CTRL_REG 0x0048
+#define SP7021_SLAVE_DMA_LENGTH_REG 0x004c
+#define SP7021_SLAVE_DMA_ADDR_REG 0x004c
+
+#define SP7021_SLAVE_DATA_RDY BIT(0)
+#define SP7021_SLAVE_SW_RST BIT(1)
+#define SP7021_SLA_DMA_W_INT BIT(8)
+#define SP7021_SLAVE_CLR_INT BIT(8)
+#define SP7021_SLAVE_DMA_EN BIT(0)
+#define SP7021_SLAVE_DMA_RW BIT(6)
+#define SP7021_SLAVE_DMA_CMD GENMASK(3, 2)
+
+#define SP7021_FIFO_REG 0x0034
+#define SP7021_SPI_STATUS_REG 0x0038
+#define SP7021_SPI_CONFIG_REG 0x003c
+#define SP7021_INT_BUSY_REG 0x004c
+#define SP7021_DMA_CTRL_REG 0x0050
+
+#define SP7021_SPI_START_FD BIT(0)
+#define SP7021_FD_SW_RST BIT(1)
+#define SP7021_TX_EMP_FLAG BIT(2)
+#define SP7021_RX_EMP_FLAG BIT(4)
+#define SP7021_RX_FULL_FLAG BIT(5)
+#define SP7021_FINISH_FLAG BIT(6)
+
+#define SP7021_TX_CNT_MASK GENMASK(11, 8)
+#define SP7021_RX_CNT_MASK GENMASK(15, 12)
+#define SP7021_TX_LEN_MASK GENMASK(23, 16)
+#define SP7021_GET_LEN_MASK GENMASK(31, 24)
+#define SP7021_SET_TX_LEN GENMASK(23, 16)
+#define SP7021_SET_XFER_LEN GENMASK(31, 24)
+
+#define SP7021_CPOL_FD BIT(0)
+#define SP7021_CPHA_R BIT(1)
+#define SP7021_CPHA_W BIT(2)
+#define SP7021_LSB_SEL BIT(4)
+#define SP7021_CS_POR BIT(5)
+#define SP7021_FD_SEL BIT(6)
+
+#define SP7021_RX_UNIT GENMASK(8, 7)
+#define SP7021_TX_UNIT GENMASK(10, 9)
+#define SP7021_TX_EMP_FLAG_MASK BIT(11)
+#define SP7021_RX_FULL_FLAG_MASK BIT(14)
+#define SP7021_FINISH_FLAG_MASK BIT(15)
+#define SP7021_CLEAN_RW_BYTE GENMASK(10, 7)
+#define SP7021_CLEAN_FLUG_MASK GENMASK(15, 11)
+#define SP7021_CLK_MASK GENMASK(31, 16)
+
+#define SP7021_INT_BYPASS BIT(3)
+#define SP7021_CLR_MASTER_INT BIT(6)
+
+#define SP7021_SPI_DATA_SIZE (255)
+#define SP7021_FIFO_DATA_LEN (16)
+
+enum {
+ SP7021_MASTER_MODE = 0,
+ SP7021_SLAVE_MODE = 1,
+};
+
+struct sp7021_spi_ctlr {
+ struct device *dev;
+ struct spi_controller *ctlr;
+ void __iomem *m_base;
+ void __iomem *s_base;
+ u32 xfer_conf;
+ int mode;
+ int m_irq;
+ int s_irq;
+ struct clk *spi_clk;
+ struct reset_control *rstc;
+ // irq spin lock
+ spinlock_t lock;
+ // data xfer lock
+ struct mutex buf_lock;
+ struct completion isr_done;
+ struct completion slave_isr;
+ unsigned int rx_cur_len;
+ unsigned int tx_cur_len;
+ unsigned int data_unit;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+};
+
+static irqreturn_t sp7021_spi_slave_irq(int irq, void *dev)
+{
+ struct sp7021_spi_ctlr *pspim = dev;
+ unsigned int data_status;
+
+ data_status = readl(pspim->s_base + SP7021_DATA_RDY_REG);
+ data_status |= SP7021_SLAVE_CLR_INT;
+ writel(data_status , pspim->s_base + SP7021_DATA_RDY_REG);
+ complete(&pspim->slave_isr);
+ return IRQ_HANDLED;
+}
+
+static int sp7021_spi_slave_abort(struct spi_controller *ctlr)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ complete(&pspim->slave_isr);
+ complete(&pspim->isr_done);
+ return 0;
+}
+
+static int sp7021_spi_slave_tx(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(spi->controller);
+ u32 value;
+
+ reinit_completion(&pspim->slave_isr);
+ value = SP7021_SLAVE_DMA_EN | SP7021_SLAVE_DMA_RW | FIELD_PREP(SP7021_SLAVE_DMA_CMD, 3);
+ writel(value, pspim->s_base + SP7021_SLAVE_DMA_CTRL_REG);
+ writel(xfer->len, pspim->s_base + SP7021_SLAVE_DMA_LENGTH_REG);
+ writel(xfer->tx_dma, pspim->s_base + SP7021_SLAVE_DMA_ADDR_REG);
+ value = readl(pspim->s_base + SP7021_DATA_RDY_REG);
+ value |= SP7021_SLAVE_DATA_RDY;
+ writel(value, pspim->s_base + SP7021_DATA_RDY_REG);
+ if (wait_for_completion_interruptible(&pspim->isr_done)) {
+ dev_err(&spi->dev, "%s() wait_for_completion err\n", __func__);
+ return -EINTR;
+ }
+ return 0;
+}
+
+static int sp7021_spi_slave_rx(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(spi->controller);
+ u32 value;
+
+ reinit_completion(&pspim->isr_done);
+ value = SP7021_SLAVE_DMA_EN | FIELD_PREP(SP7021_SLAVE_DMA_CMD, 3);
+ writel(value, pspim->s_base + SP7021_SLAVE_DMA_CTRL_REG);
+ writel(xfer->len, pspim->s_base + SP7021_SLAVE_DMA_LENGTH_REG);
+ writel(xfer->rx_dma, pspim->s_base + SP7021_SLAVE_DMA_ADDR_REG);
+ if (wait_for_completion_interruptible(&pspim->isr_done)) {
+ dev_err(&spi->dev, "%s() wait_for_completion err\n", __func__);
+ return -EINTR;
+ }
+ writel(SP7021_SLAVE_SW_RST, pspim->s_base + SP7021_SLAVE_DMA_CTRL_REG);
+ return 0;
+}
+
+static void sp7021_spi_master_rb(struct sp7021_spi_ctlr *pspim, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ pspim->rx_buf[pspim->rx_cur_len] =
+ readl(pspim->m_base + SP7021_FIFO_REG);
+ pspim->rx_cur_len++;
+ }
+}
+
+static void sp7021_spi_master_wb(struct sp7021_spi_ctlr *pspim, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ writel(pspim->tx_buf[pspim->tx_cur_len],
+ pspim->m_base + SP7021_FIFO_REG);
+ pspim->tx_cur_len++;
+ }
+}
+
+static irqreturn_t sp7021_spi_master_irq(int irq, void *dev)
+{
+ struct sp7021_spi_ctlr *pspim = dev;
+ unsigned int tx_cnt, total_len;
+ unsigned int tx_len, rx_cnt;
+ unsigned int fd_status;
+ bool isrdone = false;
+ u32 value;
+
+ fd_status = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
+ tx_cnt = FIELD_GET(SP7021_TX_CNT_MASK, fd_status);
+ tx_len = FIELD_GET(SP7021_TX_LEN_MASK, fd_status);
+ total_len = FIELD_GET(SP7021_GET_LEN_MASK, fd_status);
+
+ if ((fd_status & SP7021_TX_EMP_FLAG) && (fd_status & SP7021_RX_EMP_FLAG) && total_len == 0)
+ return IRQ_NONE;
+
+ if (tx_len == 0 && total_len == 0)
+ return IRQ_NONE;
+
+ spin_lock_irq(&pspim->lock);
+
+ rx_cnt = FIELD_GET(SP7021_RX_CNT_MASK, fd_status);
+ if (fd_status & SP7021_RX_FULL_FLAG)
+ rx_cnt = pspim->data_unit;
+
+ tx_cnt = min(tx_len - pspim->tx_cur_len, pspim->data_unit - tx_cnt);
+ dev_dbg(pspim->dev, "fd_st=0x%x rx_c:%d tx_c:%d tx_l:%d",
+ fd_status, rx_cnt, tx_cnt, tx_len);
+
+ if (rx_cnt > 0)
+ sp7021_spi_master_rb(pspim, rx_cnt);
+ if (tx_cnt > 0)
+ sp7021_spi_master_wb(pspim, tx_cnt);
+
+ fd_status = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
+ tx_len = FIELD_GET(SP7021_TX_LEN_MASK, fd_status);
+ total_len = FIELD_GET(SP7021_GET_LEN_MASK, fd_status);
+
+ if (fd_status & SP7021_FINISH_FLAG || tx_len == pspim->tx_cur_len) {
+ while (total_len != pspim->rx_cur_len) {
+ fd_status = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
+ total_len = FIELD_GET(SP7021_GET_LEN_MASK, fd_status);
+ if (fd_status & SP7021_RX_FULL_FLAG)
+ rx_cnt = pspim->data_unit;
+ else
+ rx_cnt = FIELD_GET(SP7021_RX_CNT_MASK, fd_status);
+
+ if (rx_cnt > 0)
+ sp7021_spi_master_rb(pspim, rx_cnt);
+ }
+ value = readl(pspim->m_base + SP7021_INT_BUSY_REG);
+ value |= SP7021_CLR_MASTER_INT;
+ writel(value, pspim->m_base + SP7021_INT_BUSY_REG);
+ writel(SP7021_FINISH_FLAG, pspim->m_base + SP7021_SPI_STATUS_REG);
+ isrdone = true;
+ }
+
+ if (isrdone)
+ complete(&pspim->isr_done);
+ spin_unlock_irq(&pspim->lock);
+ return IRQ_HANDLED;
+}
+
+static void sp7021_prep_transfer(struct spi_controller *ctlr, struct spi_device *spi)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ pspim->tx_cur_len = 0;
+ pspim->rx_cur_len = 0;
+ pspim->data_unit = SP7021_FIFO_DATA_LEN;
+}
+
+// preliminary set CS, CPOL, CPHA and LSB
+static int sp7021_spi_controller_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *msg)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct spi_device *s = msg->spi;
+ u32 valus, rs = 0;
+
+ valus = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
+ valus |= SP7021_FD_SW_RST;
+ writel(valus, pspim->m_base + SP7021_SPI_STATUS_REG);
+ rs |= SP7021_FD_SEL;
+ if (s->mode & SPI_CPOL)
+ rs |= SP7021_CPOL_FD;
+
+ if (s->mode & SPI_LSB_FIRST)
+ rs |= SP7021_LSB_SEL;
+
+ if (s->mode & SPI_CS_HIGH)
+ rs |= SP7021_CS_POR;
+
+ if (s->mode & SPI_CPHA)
+ rs |= SP7021_CPHA_R;
+ else
+ rs |= SP7021_CPHA_W;
+
+ rs |= FIELD_PREP(SP7021_TX_UNIT, 0) | FIELD_PREP(SP7021_RX_UNIT, 0);
+ pspim->xfer_conf = rs;
+ if (pspim->xfer_conf & SP7021_CPOL_FD)
+ writel(pspim->xfer_conf, pspim->m_base + SP7021_SPI_CONFIG_REG);
+
+ return 0;
+}
+
+static void sp7021_spi_setup_clk(struct spi_controller *ctlr, struct spi_transfer *xfer)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ u32 clk_rate, clk_sel, div;
+
+ clk_rate = clk_get_rate(pspim->spi_clk);
+ div = max(2U, clk_rate / xfer->speed_hz);
+
+ clk_sel = (div / 2) - 1;
+ pspim->xfer_conf &= ~SP7021_CLK_MASK;
+ pspim->xfer_conf |= FIELD_PREP(SP7021_CLK_MASK, clk_sel);
+ writel(pspim->xfer_conf, pspim->m_base + SP7021_SPI_CONFIG_REG);
+}
+
+static int sp7021_spi_master_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ unsigned long timeout = msecs_to_jiffies(1000);
+ unsigned int xfer_cnt, xfer_len, last_len;
+ unsigned int i, len_temp;
+ u32 reg_temp;
+
+ xfer_cnt = xfer->len / SP7021_SPI_DATA_SIZE;
+ last_len = xfer->len % SP7021_SPI_DATA_SIZE;
+
+ for (i = 0; i <= xfer_cnt; i++) {
+ mutex_lock(&pspim->buf_lock);
+ sp7021_prep_transfer(ctlr, spi);
+ sp7021_spi_setup_clk(ctlr, xfer);
+ reinit_completion(&pspim->isr_done);
+
+ if (i == xfer_cnt)
+ xfer_len = last_len;
+ else
+ xfer_len = SP7021_SPI_DATA_SIZE;
+
+ pspim->tx_buf = xfer->tx_buf + i * SP7021_SPI_DATA_SIZE;
+ pspim->rx_buf = xfer->rx_buf + i * SP7021_SPI_DATA_SIZE;
+
+ if (pspim->tx_cur_len < xfer_len) {
+ len_temp = min(pspim->data_unit, xfer_len);
+ sp7021_spi_master_wb(pspim, len_temp);
+ }
+ reg_temp = readl(pspim->m_base + SP7021_SPI_CONFIG_REG);
+ reg_temp &= ~SP7021_CLEAN_RW_BYTE;
+ reg_temp &= ~SP7021_CLEAN_FLUG_MASK;
+ reg_temp |= SP7021_FD_SEL | SP7021_FINISH_FLAG_MASK |
+ SP7021_TX_EMP_FLAG_MASK | SP7021_RX_FULL_FLAG_MASK |
+ FIELD_PREP(SP7021_TX_UNIT, 0) | FIELD_PREP(SP7021_RX_UNIT, 0);
+ writel(reg_temp, pspim->m_base + SP7021_SPI_CONFIG_REG);
+
+ reg_temp = FIELD_PREP(SP7021_SET_TX_LEN, xfer_len) |
+ FIELD_PREP(SP7021_SET_XFER_LEN, xfer_len) |
+ SP7021_SPI_START_FD;
+ writel(reg_temp, pspim->m_base + SP7021_SPI_STATUS_REG);
+
+ if (!wait_for_completion_interruptible_timeout(&pspim->isr_done, timeout)) {
+ dev_err(&spi->dev, "wait_for_completion err\n");
+ mutex_unlock(&pspim->buf_lock);
+ return -ETIMEDOUT;
+ }
+
+ reg_temp = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
+ if (reg_temp & SP7021_FINISH_FLAG) {
+ writel(SP7021_FINISH_FLAG, pspim->m_base + SP7021_SPI_STATUS_REG);
+ writel(readl(pspim->m_base + SP7021_SPI_CONFIG_REG) &
+ SP7021_CLEAN_FLUG_MASK, pspim->m_base + SP7021_SPI_CONFIG_REG);
+ }
+
+ if (pspim->xfer_conf & SP7021_CPOL_FD)
+ writel(pspim->xfer_conf, pspim->m_base + SP7021_SPI_CONFIG_REG);
+
+ mutex_unlock(&pspim->buf_lock);
+ }
+ return 0;
+}
+
+static int sp7021_spi_slave_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct device *dev = pspim->dev;
+ int ret;
+
+ if (xfer->tx_buf && !xfer->rx_buf) {
+ xfer->tx_dma = dma_map_single(dev, (void *)xfer->tx_buf,
+ xfer->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, xfer->tx_dma))
+ return -ENOMEM;
+ ret = sp7021_spi_slave_tx(spi, xfer);
+ dma_unmap_single(dev, xfer->tx_dma, xfer->len, DMA_TO_DEVICE);
+ } else if (xfer->rx_buf && !xfer->tx_buf) {
+ xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, xfer->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, xfer->rx_dma))
+ return -ENOMEM;
+ ret = sp7021_spi_slave_rx(spi, xfer);
+ dma_unmap_single(dev, xfer->rx_dma, xfer->len, DMA_FROM_DEVICE);
+ } else {
+ dev_dbg(&ctlr->dev, "%s() wrong command\n", __func__);
+ return -EINVAL;
+ }
+
+ spi_finalize_current_transfer(ctlr);
+ return ret;
+}
+
+static void sp7021_spi_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static void sp7021_spi_reset_control_assert(void *data)
+{
+ reset_control_assert(data);
+}
+
+static int sp7021_spi_controller_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp7021_spi_ctlr *pspim;
+ struct spi_controller *ctlr;
+ int mode, ret;
+
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "sp_spi");
+
+ if (device_property_read_bool(dev, "spi-slave"))
+ mode = SP7021_SLAVE_MODE;
+ else
+ mode = SP7021_MASTER_MODE;
+
+ if (mode == SP7021_SLAVE_MODE)
+ ctlr = devm_spi_alloc_slave(dev, sizeof(*pspim));
+ else
+ ctlr = devm_spi_alloc_master(dev, sizeof(*pspim));
+ if (!ctlr)
+ return -ENOMEM;
+ device_set_node(&ctlr->dev, dev_fwnode(dev));
+ ctlr->bus_num = pdev->id;
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ ctlr->auto_runtime_pm = true;
+ ctlr->prepare_message = sp7021_spi_controller_prepare_message;
+ if (mode == SP7021_SLAVE_MODE) {
+ ctlr->transfer_one = sp7021_spi_slave_transfer_one;
+ ctlr->slave_abort = sp7021_spi_slave_abort;
+ ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ } else {
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->min_speed_hz = 40000;
+ ctlr->max_speed_hz = 25000000;
+ ctlr->use_gpio_descriptors = true;
+ ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+ ctlr->transfer_one = sp7021_spi_master_transfer_one;
+ }
+ platform_set_drvdata(pdev, ctlr);
+ pspim = spi_controller_get_devdata(ctlr);
+ pspim->mode = mode;
+ pspim->ctlr = ctlr;
+ pspim->dev = dev;
+ spin_lock_init(&pspim->lock);
+ mutex_init(&pspim->buf_lock);
+ init_completion(&pspim->isr_done);
+ init_completion(&pspim->slave_isr);
+
+ pspim->m_base = devm_platform_ioremap_resource_byname(pdev, "master");
+ if (IS_ERR(pspim->m_base))
+ return dev_err_probe(dev, PTR_ERR(pspim->m_base), "m_base get fail\n");
+
+ pspim->s_base = devm_platform_ioremap_resource_byname(pdev, "slave");
+ if (IS_ERR(pspim->s_base))
+ return dev_err_probe(dev, PTR_ERR(pspim->s_base), "s_base get fail\n");
+
+ pspim->m_irq = platform_get_irq_byname(pdev, "master_risc");
+ if (pspim->m_irq < 0)
+ return pspim->m_irq;
+
+ pspim->s_irq = platform_get_irq_byname(pdev, "slave_risc");
+ if (pspim->s_irq < 0)
+ return pspim->s_irq;
+
+ pspim->spi_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pspim->spi_clk))
+ return dev_err_probe(dev, PTR_ERR(pspim->spi_clk), "clk get fail\n");
+
+ pspim->rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(pspim->rstc))
+ return dev_err_probe(dev, PTR_ERR(pspim->rstc), "rst get fail\n");
+
+ ret = clk_prepare_enable(pspim->spi_clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable clk\n");
+
+ ret = devm_add_action_or_reset(dev, sp7021_spi_disable_unprepare, pspim->spi_clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(pspim->rstc);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to deassert reset\n");
+
+ ret = devm_add_action_or_reset(dev, sp7021_spi_reset_control_assert, pspim->rstc);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(dev, pspim->m_irq, sp7021_spi_master_irq,
+ IRQF_TRIGGER_RISING, pdev->name, pspim);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(dev, pspim->s_irq, sp7021_spi_slave_irq,
+ IRQF_TRIGGER_RISING, pdev->name, pspim);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(dev);
+ ret = spi_register_controller(ctlr);
+ if (ret) {
+ pm_runtime_disable(dev);
+ return dev_err_probe(dev, ret, "spi_register_master fail\n");
+ }
+ return 0;
+}
+
+static int sp7021_spi_controller_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
+
+ spi_unregister_controller(ctlr);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ return 0;
+}
+
+static int __maybe_unused sp7021_spi_controller_suspend(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ return reset_control_assert(pspim->rstc);
+}
+
+static int __maybe_unused sp7021_spi_controller_resume(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ reset_control_deassert(pspim->rstc);
+ return clk_prepare_enable(pspim->spi_clk);
+}
+
+#ifdef CONFIG_PM
+static int sp7021_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ return reset_control_assert(pspim->rstc);
+}
+
+static int sp7021_spi_runtime_resume(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ return reset_control_deassert(pspim->rstc);
+}
+#endif
+
+static const struct dev_pm_ops sp7021_spi_pm_ops = {
+ SET_RUNTIME_PM_OPS(sp7021_spi_runtime_suspend,
+ sp7021_spi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(sp7021_spi_controller_suspend,
+ sp7021_spi_controller_resume)
+};
+
+static const struct of_device_id sp7021_spi_controller_ids[] = {
+ { .compatible = "sunplus,sp7021-spi" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sp7021_spi_controller_ids);
+
+static struct platform_driver sp7021_spi_controller_driver = {
+ .probe = sp7021_spi_controller_probe,
+ .remove = sp7021_spi_controller_remove,
+ .driver = {
+ .name = "sunplus,sp7021-spi-controller",
+ .of_match_table = sp7021_spi_controller_ids,
+ .pm = &sp7021_spi_pm_ops,
+ },
+};
+module_platform_driver(sp7021_spi_controller_driver);
+
+MODULE_AUTHOR("Li-hao Kuo <lhjeff911@gmail.com>");
+MODULE_DESCRIPTION("Sunplus SPI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index e9de1d958bbd..8f345247a8c3 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -1352,6 +1352,10 @@ static int tegra_spi_probe(struct platform_device *pdev)
tspi->phys = r->start;
spi_irq = platform_get_irq(pdev, 0);
+ if (spi_irq < 0) {
+ ret = spi_irq;
+ goto exit_free_master;
+ }
tspi->irq = spi_irq;
tspi->clk = devm_clk_get(&pdev->dev, "spi");
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 2a03739a0c60..80c3787deea9 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1006,14 +1006,8 @@ static int tegra_slink_probe(struct platform_device *pdev)
struct resource *r;
int ret, spi_irq;
const struct tegra_slink_chip_data *cdata = NULL;
- const struct of_device_id *match;
- match = of_match_device(tegra_slink_of_match, &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "Error: No device match found\n");
- return -ENODEV;
- }
- cdata = match->data;
+ cdata = of_device_get_match_data(&pdev->dev);
master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
if (!master) {
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index ce1bdb4767ea..66f647f32876 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -21,6 +21,8 @@
#include <linux/of_device.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+#include <linux/property.h>
#define QSPI_COMMAND1 0x000
#define QSPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
@@ -119,11 +121,40 @@
#define QSPI_NUM_DUMMY_CYCLE(x) (((x) & 0xff) << 0)
#define QSPI_DUMMY_CYCLES_MAX 0xff
+#define QSPI_CMB_SEQ_CMD 0x19c
+#define QSPI_COMMAND_VALUE_SET(X) (((x) & 0xFF) << 0)
+
+#define QSPI_CMB_SEQ_CMD_CFG 0x1a0
+#define QSPI_COMMAND_X1_X2_X4(x) (((x) & 0x3) << 13)
+#define QSPI_COMMAND_X1_X2_X4_MASK (0x03 << 13)
+#define QSPI_COMMAND_SDR_DDR BIT(12)
+#define QSPI_COMMAND_SIZE_SET(x) (((x) & 0xFF) << 0)
+
+#define QSPI_GLOBAL_CONFIG 0X1a4
+#define QSPI_CMB_SEQ_EN BIT(0)
+
+#define QSPI_CMB_SEQ_ADDR 0x1a8
+#define QSPI_ADDRESS_VALUE_SET(X) (((x) & 0xFFFF) << 0)
+
+#define QSPI_CMB_SEQ_ADDR_CFG 0x1ac
+#define QSPI_ADDRESS_X1_X2_X4(x) (((x) & 0x3) << 13)
+#define QSPI_ADDRESS_X1_X2_X4_MASK (0x03 << 13)
+#define QSPI_ADDRESS_SDR_DDR BIT(12)
+#define QSPI_ADDRESS_SIZE_SET(x) (((x) & 0xFF) << 0)
+
#define DATA_DIR_TX BIT(0)
#define DATA_DIR_RX BIT(1)
#define QSPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
#define DEFAULT_QSPI_DMA_BUF_LEN (64 * 1024)
+#define CMD_TRANSFER 0
+#define ADDR_TRANSFER 1
+#define DATA_TRANSFER 2
+
+struct tegra_qspi_soc_data {
+ bool has_dma;
+ bool cmb_xfer_capable;
+};
struct tegra_qspi_client_data {
int tx_clk_tap_delay;
@@ -137,7 +168,6 @@ struct tegra_qspi {
spinlock_t lock;
struct clk *clk;
- struct reset_control *rst;
void __iomem *base;
phys_addr_t phys;
unsigned int irq;
@@ -185,6 +215,7 @@ struct tegra_qspi {
u32 *tx_dma_buf;
dma_addr_t tx_dma_phys;
struct dma_async_tx_descriptor *tx_dma_desc;
+ const struct tegra_qspi_soc_data *soc_data;
};
static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset)
@@ -767,7 +798,7 @@ static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_tran
u32 tx_tap = 0, rx_tap = 0;
int req_mode;
- if (speed != tqspi->cur_speed) {
+ if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) {
clk_set_rate(tqspi->clk, speed);
tqspi->cur_speed = speed;
}
@@ -875,16 +906,16 @@ static int tegra_qspi_start_transfer_one(struct spi_device *spi,
static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
{
struct tegra_qspi_client_data *cdata;
- struct device_node *slave_np = spi->dev.of_node;
cdata = devm_kzalloc(&spi->dev, sizeof(*cdata), GFP_KERNEL);
if (!cdata)
return NULL;
- of_property_read_u32(slave_np, "nvidia,tx-clk-tap-delay",
- &cdata->tx_clk_tap_delay);
- of_property_read_u32(slave_np, "nvidia,rx-clk-tap-delay",
- &cdata->rx_clk_tap_delay);
+ device_property_read_u32(&spi->dev, "nvidia,tx-clk-tap-delay",
+ &cdata->tx_clk_tap_delay);
+ device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay",
+ &cdata->rx_clk_tap_delay);
+
return cdata;
}
@@ -906,7 +937,6 @@ static int tegra_qspi_setup(struct spi_device *spi)
cdata = tegra_qspi_parse_cdata_dt(spi);
spi->controller_data = cdata;
}
-
spin_lock_irqsave(&tqspi->lock, flags);
/* keep default cs state to inactive */
@@ -948,9 +978,8 @@ static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
tegra_qspi_dump_regs(tqspi);
tegra_qspi_flush_fifos(tqspi, true);
- reset_control_assert(tqspi->rst);
- udelay(2);
- reset_control_deassert(tqspi->rst);
+ if (device_reset(tqspi->dev) < 0)
+ dev_warn_once(tqspi->dev, "device reset failed\n");
}
static void tegra_qspi_transfer_end(struct spi_device *spi)
@@ -966,19 +995,179 @@ static void tegra_qspi_transfer_end(struct spi_device *spi)
tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
}
-static int tegra_qspi_transfer_one_message(struct spi_master *master, struct spi_message *msg)
+static u32 tegra_qspi_cmd_config(bool is_ddr, u8 bus_width, u8 len)
+{
+ u32 cmd_config = 0;
+
+ /* Extract Command configuration and value */
+ if (is_ddr)
+ cmd_config |= QSPI_COMMAND_SDR_DDR;
+ else
+ cmd_config &= ~QSPI_COMMAND_SDR_DDR;
+
+ cmd_config |= QSPI_COMMAND_X1_X2_X4(bus_width);
+ cmd_config |= QSPI_COMMAND_SIZE_SET((len * 8) - 1);
+
+ return cmd_config;
+}
+
+static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len)
+{
+ u32 addr_config = 0;
+
+ /* Extract Address configuration and value */
+ is_ddr = 0; //Only SDR mode supported
+ bus_width = 0; //X1 mode
+
+ if (is_ddr)
+ addr_config |= QSPI_ADDRESS_SDR_DDR;
+ else
+ addr_config &= ~QSPI_ADDRESS_SDR_DDR;
+
+ addr_config |= QSPI_ADDRESS_X1_X2_X4(bus_width);
+ addr_config |= QSPI_ADDRESS_SIZE_SET((len * 8) - 1);
+
+ return addr_config;
+}
+
+static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
+ struct spi_message *msg)
+{
+ bool is_first_msg = true;
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ u8 transfer_phase = 0;
+ u32 cmd1 = 0, dma_ctl = 0;
+ int ret = 0;
+ u32 address_value = 0;
+ u32 cmd_config = 0, addr_config = 0;
+ u8 cmd_value = 0, val = 0;
+
+ /* Enable Combined sequence mode */
+ val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
+ val |= QSPI_CMB_SEQ_EN;
+ tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
+ /* Process individual transfer list */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ switch (transfer_phase) {
+ case CMD_TRANSFER:
+ /* X1 SDR mode */
+ cmd_config = tegra_qspi_cmd_config(false, 0,
+ xfer->len);
+ cmd_value = *((const u8 *)(xfer->tx_buf));
+ break;
+ case ADDR_TRANSFER:
+ /* X1 SDR mode */
+ addr_config = tegra_qspi_addr_config(false, 0,
+ xfer->len);
+ address_value = *((const u32 *)(xfer->tx_buf));
+ break;
+ case DATA_TRANSFER:
+ /* Program Command, Address value in register */
+ tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD);
+ tegra_qspi_writel(tqspi, address_value,
+ QSPI_CMB_SEQ_ADDR);
+ /* Program Command and Address config in register */
+ tegra_qspi_writel(tqspi, cmd_config,
+ QSPI_CMB_SEQ_CMD_CFG);
+ tegra_qspi_writel(tqspi, addr_config,
+ QSPI_CMB_SEQ_ADDR_CFG);
+
+ reinit_completion(&tqspi->xfer_completion);
+ cmd1 = tegra_qspi_setup_transfer_one(spi, xfer,
+ is_first_msg);
+ ret = tegra_qspi_start_transfer_one(spi, xfer,
+ cmd1);
+
+ if (ret < 0) {
+ dev_err(tqspi->dev, "Failed to start transfer-one: %d\n",
+ ret);
+ return ret;
+ }
+
+ is_first_msg = false;
+ ret = wait_for_completion_timeout
+ (&tqspi->xfer_completion,
+ QSPI_DMA_TIMEOUT);
+
+ if (WARN_ON(ret == 0)) {
+ dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
+ ret);
+ if (tqspi->is_curr_dma_xfer &&
+ (tqspi->cur_direction & DATA_DIR_TX))
+ dmaengine_terminate_all
+ (tqspi->tx_dma_chan);
+
+ if (tqspi->is_curr_dma_xfer &&
+ (tqspi->cur_direction & DATA_DIR_RX))
+ dmaengine_terminate_all
+ (tqspi->rx_dma_chan);
+
+ /* Abort transfer by resetting pio/dma bit */
+ if (!tqspi->is_curr_dma_xfer) {
+ cmd1 = tegra_qspi_readl
+ (tqspi,
+ QSPI_COMMAND1);
+ cmd1 &= ~QSPI_PIO;
+ tegra_qspi_writel
+ (tqspi, cmd1,
+ QSPI_COMMAND1);
+ } else {
+ dma_ctl = tegra_qspi_readl
+ (tqspi,
+ QSPI_DMA_CTL);
+ dma_ctl &= ~QSPI_DMA_EN;
+ tegra_qspi_writel(tqspi, dma_ctl,
+ QSPI_DMA_CTL);
+ }
+
+ /* Reset controller if timeout happens */
+ if (device_reset(tqspi->dev) < 0)
+ dev_warn_once(tqspi->dev,
+ "device reset failed\n");
+ ret = -EIO;
+ goto exit;
+ }
+
+ if (tqspi->tx_status || tqspi->rx_status) {
+ dev_err(tqspi->dev, "QSPI Transfer failed\n");
+ tqspi->tx_status = 0;
+ tqspi->rx_status = 0;
+ ret = -EIO;
+ goto exit;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto exit;
+ }
+ msg->actual_length += xfer->len;
+ transfer_phase++;
+ }
+
+exit:
+ msg->status = ret;
+
+ return ret;
+}
+
+static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
+ struct spi_message *msg)
{
- struct tegra_qspi *tqspi = spi_master_get_devdata(master);
struct spi_device *spi = msg->spi;
struct spi_transfer *transfer;
bool is_first_msg = true;
- int ret;
+ int ret = 0, val = 0;
msg->status = 0;
msg->actual_length = 0;
tqspi->tx_status = 0;
tqspi->rx_status = 0;
+ /* Disable Combined sequence mode */
+ val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
+ val &= ~QSPI_CMB_SEQ_EN;
+ tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
list_for_each_entry(transfer, &msg->transfers, transfer_list) {
struct spi_transfer *xfer = transfer;
u8 dummy_bytes = 0;
@@ -1016,7 +1205,6 @@ static int tegra_qspi_transfer_one_message(struct spi_master *master, struct spi
goto complete_xfer;
}
- is_first_msg = false;
ret = wait_for_completion_timeout(&tqspi->xfer_completion,
QSPI_DMA_TIMEOUT);
if (WARN_ON(ret == 0)) {
@@ -1061,7 +1249,48 @@ complete_xfer:
ret = 0;
exit:
msg->status = ret;
+
+ return ret;
+}
+
+static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
+ struct spi_message *msg)
+{
+ int transfer_count = 0;
+ struct spi_transfer *xfer;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ transfer_count++;
+ }
+ if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3)
+ return false;
+ xfer = list_first_entry(&msg->transfers, typeof(*xfer),
+ transfer_list);
+ if (xfer->len > 2)
+ return false;
+ xfer = list_next_entry(xfer, transfer_list);
+ if (xfer->len > 4 || xfer->len < 3)
+ return false;
+ xfer = list_next_entry(xfer, transfer_list);
+ if (!tqspi->soc_data->has_dma || xfer->len > (QSPI_FIFO_DEPTH << 2))
+ return false;
+
+ return true;
+}
+
+static int tegra_qspi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ int ret;
+
+ if (tegra_qspi_validate_cmb_seq(tqspi, msg))
+ ret = tegra_qspi_combined_seq_xfer(tqspi, msg);
+ else
+ ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg);
+
spi_finalize_current_message(master);
+
return ret;
}
@@ -1193,15 +1422,58 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
return handle_dma_based_xfer(tqspi);
}
+static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
+ .has_dma = true,
+ .cmb_xfer_capable = false,
+};
+
+static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
+ .has_dma = true,
+ .cmb_xfer_capable = true,
+};
+
+static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
+ .has_dma = false,
+ .cmb_xfer_capable = true,
+};
+
static const struct of_device_id tegra_qspi_of_match[] = {
- { .compatible = "nvidia,tegra210-qspi", },
- { .compatible = "nvidia,tegra186-qspi", },
- { .compatible = "nvidia,tegra194-qspi", },
+ {
+ .compatible = "nvidia,tegra210-qspi",
+ .data = &tegra210_qspi_soc_data,
+ }, {
+ .compatible = "nvidia,tegra186-qspi",
+ .data = &tegra186_qspi_soc_data,
+ }, {
+ .compatible = "nvidia,tegra194-qspi",
+ .data = &tegra186_qspi_soc_data,
+ }, {
+ .compatible = "nvidia,tegra234-qspi",
+ .data = &tegra234_qspi_soc_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, tegra_qspi_of_match);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id tegra_qspi_acpi_match[] = {
+ {
+ .id = "NVDA1213",
+ .driver_data = (kernel_ulong_t)&tegra210_qspi_soc_data,
+ }, {
+ .id = "NVDA1313",
+ .driver_data = (kernel_ulong_t)&tegra186_qspi_soc_data,
+ }, {
+ .id = "NVDA1413",
+ .driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match);
+#endif
+
static int tegra_qspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
@@ -1233,6 +1505,7 @@ static int tegra_qspi_probe(struct platform_device *pdev)
tqspi->dev = &pdev->dev;
spin_lock_init(&tqspi->lock);
+ tqspi->soc_data = device_get_match_data(&pdev->dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tqspi->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(tqspi->base))
@@ -1240,20 +1513,18 @@ static int tegra_qspi_probe(struct platform_device *pdev)
tqspi->phys = r->start;
qspi_irq = platform_get_irq(pdev, 0);
+ if (qspi_irq < 0)
+ return qspi_irq;
tqspi->irq = qspi_irq;
- tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
- if (IS_ERR(tqspi->clk)) {
- ret = PTR_ERR(tqspi->clk);
- dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
- return ret;
- }
+ if (!has_acpi_companion(tqspi->dev)) {
+ tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
+ if (IS_ERR(tqspi->clk)) {
+ ret = PTR_ERR(tqspi->clk);
+ dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
+ return ret;
+ }
- tqspi->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(tqspi->rst)) {
- ret = PTR_ERR(tqspi->rst);
- dev_err(&pdev->dev, "failed to get reset control: %d\n", ret);
- return ret;
}
tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2;
@@ -1277,9 +1548,8 @@ static int tegra_qspi_probe(struct platform_device *pdev)
goto exit_pm_disable;
}
- reset_control_assert(tqspi->rst);
- udelay(2);
- reset_control_deassert(tqspi->rst);
+ if (device_reset(tqspi->dev) < 0)
+ dev_warn_once(tqspi->dev, "device reset failed\n");
tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW | QSPI_CS_SW_VAL;
tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
@@ -1358,6 +1628,9 @@ static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ /* Runtime pm disabled with ACPI */
+ if (has_acpi_companion(tqspi->dev))
+ return 0;
/* flush all write which are in PPSB queue by reading back */
tegra_qspi_readl(tqspi, QSPI_COMMAND1);
@@ -1372,6 +1645,9 @@ static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
struct tegra_qspi *tqspi = spi_master_get_devdata(master);
int ret;
+ /* Runtime pm disabled with ACPI */
+ if (has_acpi_companion(tqspi->dev))
+ return 0;
ret = clk_prepare_enable(tqspi->clk);
if (ret < 0)
dev_err(tqspi->dev, "failed to enable clock: %d\n", ret);
@@ -1389,6 +1665,7 @@ static struct platform_driver tegra_qspi_driver = {
.name = "tegra-qspi",
.pm = &tegra_qspi_pm_ops,
.of_match_table = tegra_qspi_of_match,
+ .acpi_match_table = ACPI_PTR(tegra_qspi_acpi_match),
},
.probe = tegra_qspi_probe,
.remove = tegra_qspi_remove,
diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c
index f8ad0709d015..a565352f6381 100644
--- a/drivers/spi/spi-tle62x0.c
+++ b/drivers/spi/spi-tle62x0.c
@@ -288,7 +288,7 @@ static int tle62x0_probe(struct spi_device *spi)
return ret;
}
-static int tle62x0_remove(struct spi_device *spi)
+static void tle62x0_remove(struct spi_device *spi)
{
struct tle62x0_state *st = spi_get_drvdata(spi);
int ptr;
@@ -298,7 +298,6 @@ static int tle62x0_remove(struct spi_device *spi)
device_remove_file(&spi->dev, &dev_attr_status_show);
kfree(st);
- return 0;
}
static struct spi_driver tle62x0_driver = {
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 8c4615b76339..dfaa1d79a78b 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -103,6 +103,7 @@
static int use_dma = 1;
struct pch_spi_dma_ctrl {
+ struct pci_dev *dma_dev;
struct dma_async_tx_descriptor *desc_tx;
struct dma_async_tx_descriptor *desc_rx;
struct pch_dma_slave param_tx;
@@ -876,8 +877,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
if (!chan) {
dev_err(&data->master->dev,
"ERROR: dma_request_channel FAILS(Tx)\n");
- data->use_dma = 0;
- return;
+ goto out;
}
dma->chan_tx = chan;
@@ -893,10 +893,15 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
"ERROR: dma_request_channel FAILS(Rx)\n");
dma_release_channel(dma->chan_tx);
dma->chan_tx = NULL;
- data->use_dma = 0;
- return;
+ goto out;
}
dma->chan_rx = chan;
+
+ dma->dma_dev = dma_dev;
+ return;
+out:
+ pci_dev_put(dma_dev);
+ data->use_dma = 0;
}
static void pch_spi_release_dma(struct pch_spi_data *data)
@@ -912,6 +917,8 @@ static void pch_spi_release_dma(struct pch_spi_data *data)
dma_release_channel(dma->chan_rx);
dma->chan_rx = NULL;
}
+
+ pci_dev_put(dma->dma_dev);
}
static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 328b6559bb19..2b5afae8ff7f 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -1172,7 +1172,10 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
goto clk_dis_all;
}
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ if (ret)
+ goto clk_dis_all;
+
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS;
ctlr->mem_ops = &zynqmp_qspi_mem_ops;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 4599b121d744..c4dd1200fe99 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -18,7 +18,6 @@
#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
-#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
@@ -144,7 +143,7 @@ static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
unsigned long flags; \
ssize_t len; \
spin_lock_irqsave(&stat->lock, flags); \
- len = sprintf(buf, format_string, stat->field); \
+ len = sysfs_emit(buf, format_string "\n", stat->field); \
spin_unlock_irqrestore(&stat->lock, flags); \
return len; \
} \
@@ -404,15 +403,8 @@ static void spi_remove(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
- if (sdrv->remove) {
- int ret;
-
- ret = sdrv->remove(to_spi_device(dev));
- if (ret)
- dev_warn(dev,
- "Failed to unbind driver (%pe), ignoring\n",
- ERR_PTR(ret));
- }
+ if (sdrv->remove)
+ sdrv->remove(to_spi_device(dev));
dev_pm_domain_detach(dev, true);
}
@@ -532,7 +524,7 @@ static DEFINE_MUTEX(board_lock);
*
* Return: a pointer to the new device, or NULL.
*/
-static struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
+struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
{
struct spi_device *spi;
@@ -549,7 +541,6 @@ static struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
spi->dev.parent = &ctlr->dev;
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
- spi->cs_gpio = -ENOENT;
spi->mode = ctlr->buswidth_override_bits;
spin_lock_init(&spi->statistics.lock);
@@ -557,6 +548,7 @@ static struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
device_initialize(&spi->dev);
return spi;
}
+EXPORT_SYMBOL_GPL(spi_alloc_device);
static void spi_dev_set_name(struct spi_device *spi)
{
@@ -612,11 +604,8 @@ static int __spi_add_device(struct spi_device *spi)
return -ENODEV;
}
- /* Descriptors take precedence */
if (ctlr->cs_gpiods)
spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
- else if (ctlr->cs_gpios)
- spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
/*
* Drivers may modify this initial i/o setup, but will
@@ -652,7 +641,7 @@ static int __spi_add_device(struct spi_device *spi)
*
* Return: 0 on success; negative errno on failure
*/
-static int spi_add_device(struct spi_device *spi)
+int spi_add_device(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct device *dev = ctlr->dev.parent;
@@ -673,6 +662,7 @@ static int spi_add_device(struct spi_device *spi)
mutex_unlock(&ctlr->add_lock);
return status;
}
+EXPORT_SYMBOL_GPL(spi_add_device);
static int spi_add_device_locked(struct spi_device *spi)
{
@@ -936,48 +926,40 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
* Avoid calling into the driver (or doing delays) if the chip select
* isn't actually changing from the last time this was called.
*/
- if (!force && (spi->controller->last_cs_enable == enable) &&
+ if (!force && ((enable && spi->controller->last_cs == spi->chip_select) ||
+ (!enable && spi->controller->last_cs != spi->chip_select)) &&
(spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
return;
trace_spi_set_cs(spi, activate);
- spi->controller->last_cs_enable = enable;
+ spi->controller->last_cs = enable ? spi->chip_select : -1;
spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
- if ((spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
- !spi->controller->set_cs_timing) && !activate) {
+ if ((spi->cs_gpiod || !spi->controller->set_cs_timing) && !activate) {
spi_delay_exec(&spi->cs_hold, NULL);
}
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
- if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
+ if (spi->cs_gpiod) {
if (!(spi->mode & SPI_NO_CS)) {
- if (spi->cs_gpiod) {
- /*
- * Historically ACPI has no means of the GPIO polarity and
- * thus the SPISerialBus() resource defines it on the per-chip
- * basis. In order to avoid a chain of negations, the GPIO
- * polarity is considered being Active High. Even for the cases
- * when _DSD() is involved (in the updated versions of ACPI)
- * the GPIO CS polarity must be defined Active High to avoid
- * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
- * into account.
- */
- if (has_acpi_companion(&spi->dev))
- gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
- else
- /* Polarity handled by GPIO library */
- gpiod_set_value_cansleep(spi->cs_gpiod, activate);
- } else {
- /*
- * Invert the enable line, as active low is
- * default for SPI.
- */
- gpio_set_value_cansleep(spi->cs_gpio, !enable);
- }
+ /*
+ * Historically ACPI has no means of the GPIO polarity and
+ * thus the SPISerialBus() resource defines it on the per-chip
+ * basis. In order to avoid a chain of negations, the GPIO
+ * polarity is considered being Active High. Even for the cases
+ * when _DSD() is involved (in the updated versions of ACPI)
+ * the GPIO CS polarity must be defined Active High to avoid
+ * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
+ * into account.
+ */
+ if (has_acpi_companion(&spi->dev))
+ gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
+ else
+ /* Polarity handled by GPIO library */
+ gpiod_set_value_cansleep(spi->cs_gpiod, activate);
}
/* Some SPI masters need both GPIO CS & slave_select */
if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
@@ -987,8 +969,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
spi->controller->set_cs(spi, !enable);
}
- if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
- !spi->controller->set_cs_timing) {
+ if (spi->cs_gpiod || !spi->controller->set_cs_timing) {
if (activate)
spi_delay_exec(&spi->cs_setup, NULL);
else
@@ -1019,10 +1000,10 @@ int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
int i, ret;
if (vmalloced_buf || kmap_buf) {
- desc_len = min_t(int, max_seg_size, PAGE_SIZE);
+ desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
} else if (virt_addr_valid(buf)) {
- desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
+ desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
sgs = DIV_ROUND_UP(len, desc_len);
} else {
return -EINVAL;
@@ -2318,8 +2299,50 @@ struct acpi_spi_lookup {
int irq;
u8 bits_per_word;
u8 chip_select;
+ int n;
+ int index;
};
+static int acpi_spi_count(struct acpi_resource *ares, void *data)
+{
+ struct acpi_resource_spi_serialbus *sb;
+ int *count = data;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
+ return 1;
+
+ sb = &ares->data.spi_serial_bus;
+ if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
+ return 1;
+
+ *count = *count + 1;
+
+ return 1;
+}
+
+/**
+ * acpi_spi_count_resources - Count the number of SpiSerialBus resources
+ * @adev: ACPI device
+ *
+ * Returns the number of SpiSerialBus resources in the ACPI-device's
+ * resource-list; or a negative error code.
+ */
+int acpi_spi_count_resources(struct acpi_device *adev)
+{
+ LIST_HEAD(r);
+ int count = 0;
+ int ret;
+
+ ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
+ if (ret < 0)
+ return ret;
+
+ acpi_dev_free_resource_list(&r);
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
+
static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
struct acpi_spi_lookup *lookup)
{
@@ -2349,6 +2372,8 @@ static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
lookup->mode |= SPI_CPHA;
}
+static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
+
static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
{
struct acpi_spi_lookup *lookup = data;
@@ -2362,14 +2387,35 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
sb = &ares->data.spi_serial_bus;
if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
+ if (lookup->index != -1 && lookup->n++ != lookup->index)
+ return 1;
+
+ if (lookup->index == -1 && !ctlr)
+ return -ENODEV;
+
status = acpi_get_handle(NULL,
sb->resource_source.string_ptr,
&parent_handle);
- if (ACPI_FAILURE(status) ||
- ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
+ if (ACPI_FAILURE(status))
return -ENODEV;
+ if (ctlr) {
+ if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
+ return -ENODEV;
+ } else {
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(parent_handle, &adev))
+ return -ENODEV;
+
+ ctlr = acpi_spi_find_controller_by_adev(adev);
+ if (!ctlr)
+ return -ENODEV;
+
+ lookup->ctlr = ctlr;
+ }
+
/*
* ACPI DeviceSelection numbering is handled by the
* host controller driver in Windows and can vary
@@ -2408,8 +2454,25 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
return 1;
}
-static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
- struct acpi_device *adev)
+/**
+ * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
+ * @ctlr: controller to which the spi device belongs
+ * @adev: ACPI Device for the spi device
+ * @index: Index of the spi resource inside the ACPI Node
+ *
+ * This should be used to allocate a new spi device from and ACPI Node.
+ * The caller is responsible for calling spi_add_device to register the spi device.
+ *
+ * If ctlr is set to NULL, the Controller for the spi device will be looked up
+ * using the resource.
+ * If index is set to -1, index is not used.
+ * Note: If index is -1, ctlr must be set.
+ *
+ * Return: a pointer to the new device, or ERR_PTR on error.
+ */
+struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
+ struct acpi_device *adev,
+ int index)
{
acpi_handle parent_handle = NULL;
struct list_head resource_list;
@@ -2417,12 +2480,13 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
struct spi_device *spi;
int ret;
- if (acpi_bus_get_status(adev) || !adev->status.present ||
- acpi_device_enumerated(adev))
- return AE_OK;
+ if (!ctlr && index == -1)
+ return ERR_PTR(-EINVAL);
lookup.ctlr = ctlr;
lookup.irq = -1;
+ lookup.index = index;
+ lookup.n = 0;
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list,
@@ -2431,26 +2495,25 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
if (ret < 0)
/* found SPI in _CRS but it points to another controller */
- return AE_OK;
+ return ERR_PTR(-ENODEV);
if (!lookup.max_speed_hz &&
ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
- ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
+ ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
/* Apple does not use _CRS but nested devices for SPI slaves */
acpi_spi_parse_apple_properties(adev, &lookup);
}
if (!lookup.max_speed_hz)
- return AE_OK;
+ return ERR_PTR(-ENODEV);
- spi = spi_alloc_device(ctlr);
+ spi = spi_alloc_device(lookup.ctlr);
if (!spi) {
- dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
+ dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
dev_name(&adev->dev));
- return AE_NO_MEMORY;
+ return ERR_PTR(-ENOMEM);
}
-
ACPI_COMPANION_SET(&spi->dev, adev);
spi->max_speed_hz = lookup.max_speed_hz;
spi->mode |= lookup.mode;
@@ -2458,6 +2521,27 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
spi->bits_per_word = lookup.bits_per_word;
spi->chip_select = lookup.chip_select;
+ return spi;
+}
+EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
+
+static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
+ struct acpi_device *adev)
+{
+ struct spi_device *spi;
+
+ if (acpi_bus_get_status(adev) || !adev->status.present ||
+ acpi_device_enumerated(adev))
+ return AE_OK;
+
+ spi = acpi_spi_device_alloc(ctlr, adev, -1);
+ if (IS_ERR(spi)) {
+ if (PTR_ERR(spi) == -ENOMEM)
+ return AE_NO_MEMORY;
+ else
+ return AE_OK;
+ }
+
acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
sizeof(spi->modalias));
@@ -2480,10 +2564,10 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
void *data, void **return_value)
{
+ struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
struct spi_controller *ctlr = data;
- struct acpi_device *adev;
- if (acpi_bus_get_device(handle, &adev))
+ if (!adev)
return AE_OK;
return acpi_register_spi_device(ctlr, adev);
@@ -2729,46 +2813,6 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
}
EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
-#ifdef CONFIG_OF
-static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
-{
- int nb, i, *cs;
- struct device_node *np = ctlr->dev.of_node;
-
- if (!np)
- return 0;
-
- nb = of_gpio_named_count(np, "cs-gpios");
- ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
-
- /* Return error only for an incorrectly formed cs-gpios property */
- if (nb == 0 || nb == -ENOENT)
- return 0;
- else if (nb < 0)
- return nb;
-
- cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
- GFP_KERNEL);
- ctlr->cs_gpios = cs;
-
- if (!ctlr->cs_gpios)
- return -ENOMEM;
-
- for (i = 0; i < ctlr->num_chipselect; i++)
- cs[i] = -ENOENT;
-
- for (i = 0; i < nb; i++)
- cs[i] = of_get_named_gpio(np, "cs-gpios", i);
-
- return 0;
-}
-#else
-static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
-{
- return 0;
-}
-#endif
-
/**
* spi_get_gpio_descs() - grab chip select GPIOs for the master
* @ctlr: The SPI master to grab GPIO descriptors for
@@ -2953,22 +2997,15 @@ int spi_register_controller(struct spi_controller *ctlr)
*/
dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
- if (!spi_controller_is_slave(ctlr)) {
- if (ctlr->use_gpio_descriptors) {
- status = spi_get_gpio_descs(ctlr);
- if (status)
- goto free_bus_id;
- /*
- * A controller using GPIO descriptors always
- * supports SPI_CS_HIGH if need be.
- */
- ctlr->mode_bits |= SPI_CS_HIGH;
- } else {
- /* Legacy code path for GPIOs from DT */
- status = of_spi_get_gpio_numbers(ctlr);
- if (status)
- goto free_bus_id;
- }
+ if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
+ status = spi_get_gpio_descs(ctlr);
+ if (status)
+ goto free_bus_id;
+ /*
+ * A controller using GPIO descriptors always
+ * supports SPI_CS_HIGH if need be.
+ */
+ ctlr->mode_bits |= SPI_CS_HIGH;
}
/*
@@ -2980,6 +3017,9 @@ int spi_register_controller(struct spi_controller *ctlr)
goto free_bus_id;
}
+ /* setting last_cs to -1 means no chip selected */
+ ctlr->last_cs = -1;
+
status = device_add(&ctlr->dev);
if (status < 0)
goto free_bus_id;
@@ -3457,12 +3497,6 @@ int spi_setup(struct spi_device *spi)
*/
bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
SPI_NO_TX | SPI_NO_RX);
- /*
- * Nothing prevents from working with active-high CS in case if it
- * is driven by GPIO.
- */
- if (gpio_is_valid(spi->cs_gpio))
- bad_bits &= ~SPI_CS_HIGH;
ugly_bits = bad_bits &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
@@ -3588,8 +3622,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
* cs_change is set for each transfer.
*/
if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
- spi->cs_gpiod ||
- gpio_is_valid(spi->cs_gpio))) {
+ spi->cs_gpiod)) {
size_t maxsize;
int ret;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index a5cceca8b82b..53a551714265 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -453,22 +453,29 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
dev_dbg(&spi->dev, "%d bits per word\n", tmp);
}
break;
- case SPI_IOC_WR_MAX_SPEED_HZ:
+ case SPI_IOC_WR_MAX_SPEED_HZ: {
+ u32 save;
+
retval = get_user(tmp, (__u32 __user *)arg);
- if (retval == 0) {
- u32 save = spi->max_speed_hz;
+ if (retval)
+ break;
+ if (tmp == 0) {
+ retval = -EINVAL;
+ break;
+ }
- spi->max_speed_hz = tmp;
- retval = spi_setup(spi);
- if (retval == 0) {
- spidev->speed_hz = tmp;
- dev_dbg(&spi->dev, "%d Hz (max)\n",
- spidev->speed_hz);
- }
- spi->max_speed_hz = save;
+ save = spi->max_speed_hz;
+
+ spi->max_speed_hz = tmp;
+ retval = spi_setup(spi);
+ if (retval == 0) {
+ spidev->speed_hz = tmp;
+ dev_dbg(&spi->dev, "%d Hz (max)\n", spidev->speed_hz);
}
- break;
+ spi->max_speed_hz = save;
+ break;
+ }
default:
/* segmented and/or full-duplex I/O request */
/* Check message and copy into scratch area */
@@ -803,7 +810,7 @@ static int spidev_probe(struct spi_device *spi)
return status;
}
-static int spidev_remove(struct spi_device *spi)
+static void spidev_remove(struct spi_device *spi)
{
struct spidev_data *spidev = spi_get_drvdata(spi);
@@ -820,8 +827,6 @@ static int spidev_remove(struct spi_device *spi)
if (spidev->users == 0)
kfree(spidev);
mutex_unlock(&device_list_lock);
-
- return 0;
}
static struct spi_driver spidev_spi_driver = {
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 55677efc0138..b68f5f9b7c78 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -272,21 +272,39 @@ void fbtft_write_reg8_bus9(struct fbtft_par *par, int len, ...);
void fbtft_write_reg16_bus8(struct fbtft_par *par, int len, ...);
void fbtft_write_reg16_bus16(struct fbtft_par *par, int len, ...);
+#define FBTFT_DT_TABLE(_compatible) \
+static const struct of_device_id dt_ids[] = { \
+ { .compatible = _compatible }, \
+ {}, \
+}; \
+MODULE_DEVICE_TABLE(of, dt_ids);
+
+#define FBTFT_SPI_DRIVER(_name, _compatible, _display, _spi_ids) \
+ \
+static int fbtft_driver_probe_spi(struct spi_device *spi) \
+{ \
+ return fbtft_probe_common(_display, spi, NULL); \
+} \
+ \
+static void fbtft_driver_remove_spi(struct spi_device *spi) \
+{ \
+ struct fb_info *info = spi_get_drvdata(spi); \
+ \
+ fbtft_remove_common(&spi->dev, info); \
+} \
+ \
+static struct spi_driver fbtft_driver_spi_driver = { \
+ .driver = { \
+ .name = _name, \
+ .of_match_table = dt_ids, \
+ }, \
+ .id_table = _spi_ids, \
+ .probe = fbtft_driver_probe_spi, \
+ .remove = fbtft_driver_remove_spi, \
+};
+
#define FBTFT_REGISTER_DRIVER(_name, _compatible, _display) \
\
-static int fbtft_driver_probe_spi(struct spi_device *spi) \
-{ \
- return fbtft_probe_common(_display, spi, NULL); \
-} \
- \
-static int fbtft_driver_remove_spi(struct spi_device *spi) \
-{ \
- struct fb_info *info = spi_get_drvdata(spi); \
- \
- fbtft_remove_common(&spi->dev, info); \
- return 0; \
-} \
- \
static int fbtft_driver_probe_pdev(struct platform_device *pdev) \
{ \
return fbtft_probe_common(_display, NULL, pdev); \
@@ -300,22 +318,9 @@ static int fbtft_driver_remove_pdev(struct platform_device *pdev) \
return 0; \
} \
\
-static const struct of_device_id dt_ids[] = { \
- { .compatible = _compatible }, \
- {}, \
-}; \
- \
-MODULE_DEVICE_TABLE(of, dt_ids); \
+FBTFT_DT_TABLE(_compatible) \
\
- \
-static struct spi_driver fbtft_driver_spi_driver = { \
- .driver = { \
- .name = _name, \
- .of_match_table = dt_ids, \
- }, \
- .probe = fbtft_driver_probe_spi, \
- .remove = fbtft_driver_remove_spi, \
-}; \
+FBTFT_SPI_DRIVER(_name, _compatible, _display, NULL) \
\
static struct platform_driver fbtft_driver_platform_driver = { \
.driver = { \
@@ -351,42 +356,15 @@ module_exit(fbtft_driver_module_exit);
#define FBTFT_REGISTER_SPI_DRIVER(_name, _comp_vend, _comp_dev, _display) \
\
-static int fbtft_driver_probe_spi(struct spi_device *spi) \
-{ \
- return fbtft_probe_common(_display, spi, NULL); \
-} \
- \
-static int fbtft_driver_remove_spi(struct spi_device *spi) \
-{ \
- struct fb_info *info = spi_get_drvdata(spi); \
- \
- fbtft_remove_common(&spi->dev, info); \
- return 0; \
-} \
- \
-static const struct of_device_id dt_ids[] = { \
- { .compatible = _comp_vend "," _comp_dev }, \
- {}, \
-}; \
- \
-MODULE_DEVICE_TABLE(of, dt_ids); \
+FBTFT_DT_TABLE(_comp_vend "," _comp_dev) \
\
static const struct spi_device_id spi_ids[] = { \
{ .name = _comp_dev }, \
{}, \
}; \
- \
MODULE_DEVICE_TABLE(spi, spi_ids); \
\
-static struct spi_driver fbtft_driver_spi_driver = { \
- .driver = { \
- .name = _name, \
- .of_match_table = dt_ids, \
- }, \
- .id_table = spi_ids, \
- .probe = fbtft_driver_probe_spi, \
- .remove = fbtft_driver_remove_spi, \
-}; \
+FBTFT_SPI_DRIVER(_name, _comp_vend "," _comp_dev, _display, spi_ids) \
\
module_spi_driver(fbtft_driver_spi_driver);
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index 493ed4821515..8ebb21d4b24b 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -76,14 +76,15 @@ static void tx_complete(void *arg)
static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type)
{
- int ret;
+ int ret, len;
- ret = netif_rx_ni(skb);
+ len = skb->len + ETH_HLEN;
+ ret = netif_rx(skb);
if (ret == NET_RX_DROP) {
nic->stats.rx_dropped++;
} else {
nic->stats.rx_packets++;
- nic->stats.rx_bytes += skb->len + ETH_HLEN;
+ nic->stats.rx_bytes += len;
}
return 0;
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index 7e6347fe93f9..8a7cf1d0e968 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -391,10 +391,7 @@ static int gb_gpio_request_handler(struct gb_operation *op)
return -EINVAL;
}
- local_irq_disable();
- ret = generic_handle_irq(irq);
- local_irq_enable();
-
+ ret = generic_handle_irq_safe(irq);
if (ret)
dev_err(dev, "failed to invoke irq handler\n");
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index b81cfa74edb7..1fd6a0c6e1d8 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -36,8 +36,6 @@ source "drivers/staging/media/rkvdec/Kconfig"
source "drivers/staging/media/sunxi/Kconfig"
-source "drivers/staging/media/tegra-vde/Kconfig"
-
source "drivers/staging/media/zoran/Kconfig"
source "drivers/staging/media/tegra-video/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 7e2c86e3695d..66d6f6d51c86 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -7,7 +7,6 @@ obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_VIDEO_ROCKCHIP_VDEC) += rkvdec/
obj-$(CONFIG_VIDEO_SUNXI) += sunxi/
obj-$(CONFIG_VIDEO_TEGRA) += tegra-video/
-obj-$(CONFIG_TEGRA_VDE) += tegra-vde/
obj-$(CONFIG_VIDEO_HANTRO) += hantro/
obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/
obj-$(CONFIG_VIDEO_ZORAN) += zoran/
diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig
index aeed5803dfb1..2c8d7fdcc5f7 100644
--- a/drivers/staging/media/atomisp/Kconfig
+++ b/drivers/staging/media/atomisp/Kconfig
@@ -11,7 +11,7 @@ menuconfig INTEL_ATOMISP
config VIDEO_ATOMISP
tristate "Intel Atom Image Signal Processor Driver"
- depends on VIDEO_V4L2 && INTEL_ATOMISP
+ depends on VIDEO_DEV && INTEL_ATOMISP
depends on PMIC_OPREGION
select IOSF_MBI
select VIDEOBUF_VMALLOC
diff --git a/drivers/staging/media/atomisp/i2c/Kconfig b/drivers/staging/media/atomisp/i2c/Kconfig
index a772b833a85f..e726101b24e4 100644
--- a/drivers/staging/media/atomisp/i2c/Kconfig
+++ b/drivers/staging/media/atomisp/i2c/Kconfig
@@ -6,7 +6,7 @@
config VIDEO_ATOMISP_OV2722
tristate "OVT ov2722 sensor support"
depends on ACPI
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
help
This is a Video4Linux2 sensor-level driver for the OVT
OV2722 raw camera.
@@ -18,7 +18,7 @@ config VIDEO_ATOMISP_OV2722
config VIDEO_ATOMISP_GC2235
tristate "Galaxy gc2235 sensor support"
depends on ACPI
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
help
This is a Video4Linux2 sensor-level driver for the OVT
GC2235 raw camera.
@@ -40,7 +40,7 @@ config VIDEO_ATOMISP_MSRLIST_HELPER
config VIDEO_ATOMISP_MT9M114
tristate "Aptina mt9m114 sensor support"
depends on ACPI
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
help
This is a Video4Linux2 sensor-level driver for the Micron
mt9m114 1.3 Mpixel camera.
@@ -52,7 +52,7 @@ config VIDEO_ATOMISP_MT9M114
config VIDEO_ATOMISP_GC0310
tristate "GC0310 sensor support"
depends on ACPI
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
help
This is a Video4Linux2 sensor-level driver for the Galaxycore
GC0310 0.3MP sensor.
@@ -60,7 +60,7 @@ config VIDEO_ATOMISP_GC0310
config VIDEO_ATOMISP_OV2680
tristate "Omnivision OV2680 sensor support"
depends on ACPI
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
help
This is a Video4Linux2 sensor-level driver for the Omnivision
OV2680 raw camera.
@@ -72,7 +72,7 @@ config VIDEO_ATOMISP_OV2680
config VIDEO_ATOMISP_OV5693
tristate "Omnivision ov5693 sensor support"
depends on ACPI
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_DEV
help
This is a Video4Linux2 sensor-level driver for the Micron
ov5693 5 Mpixel camera.
@@ -88,7 +88,7 @@ config VIDEO_ATOMISP_OV5693
config VIDEO_ATOMISP_LM3554
tristate "LM3554 flash light driver"
depends on ACPI
- depends on VIDEO_V4L2 && I2C
+ depends on VIDEO_DEV && I2C
help
This is a Video4Linux2 sub-dev driver for the LM3554
flash light driver.
diff --git a/drivers/staging/media/atomisp/pci/atomisp_acc.c b/drivers/staging/media/atomisp/pci/atomisp_acc.c
index 9a1751895ab0..28cb271663c4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_acc.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_acc.c
@@ -439,6 +439,18 @@ int atomisp_acc_s_mapped_arg(struct atomisp_sub_device *asd,
return 0;
}
+static void atomisp_acc_unload_some_extensions(struct atomisp_sub_device *asd,
+ int i,
+ struct atomisp_acc_fw *acc_fw)
+{
+ while (--i >= 0) {
+ if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
+ atomisp_css_unload_acc_extension(asd, acc_fw->fw,
+ acc_flag_to_pipe[i].pipe_id);
+ }
+ }
+}
+
/*
* Appends the loaded acceleration binary extensions to the
* current ISP mode. Must be called just before sh_css_start().
@@ -479,16 +491,20 @@ int atomisp_acc_load_extensions(struct atomisp_sub_device *asd)
acc_fw->fw,
acc_flag_to_pipe[i].pipe_id,
acc_fw->type);
- if (ret)
+ if (ret) {
+ atomisp_acc_unload_some_extensions(asd, i, acc_fw);
goto error;
+ }
ext_loaded = true;
}
}
ret = atomisp_css_set_acc_parameters(acc_fw);
- if (ret < 0)
+ if (ret < 0) {
+ atomisp_acc_unload_some_extensions(asd, i, acc_fw);
goto error;
+ }
}
if (!ext_loaded)
@@ -497,6 +513,7 @@ int atomisp_acc_load_extensions(struct atomisp_sub_device *asd)
ret = atomisp_css_update_stream(asd);
if (ret) {
dev_err(isp->dev, "%s: update stream failed.\n", __func__);
+ atomisp_acc_unload_extensions(asd);
goto error;
}
@@ -504,13 +521,6 @@ int atomisp_acc_load_extensions(struct atomisp_sub_device *asd)
return 0;
error:
- while (--i >= 0) {
- if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
- atomisp_css_unload_acc_extension(asd, acc_fw->fw,
- acc_flag_to_pipe[i].pipe_id);
- }
- }
-
list_for_each_entry_continue_reverse(acc_fw, &asd->acc.fw, list) {
if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT &&
acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER)
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
index 1173be0e72b0..781a11cca599 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
@@ -963,7 +963,7 @@ int atomisp_css_irq_translate(struct atomisp_device *isp,
void atomisp_css_rx_get_irq_info(enum mipi_port_id port,
unsigned int *infos)
{
-#ifndef IS_ISP2401
+#ifndef ISP2401
ia_css_isys_rx_get_irq_info(port, infos);
#else
*infos = 0;
@@ -973,7 +973,7 @@ void atomisp_css_rx_get_irq_info(enum mipi_port_id port,
void atomisp_css_rx_clear_irq_info(enum mipi_port_id port,
unsigned int infos)
{
-#ifndef IS_ISP2401
+#ifndef ISP2401
ia_css_isys_rx_clear_irq_info(port, infos);
#endif
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
index 1cc581074ba7..7e47db82de07 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
@@ -38,7 +38,7 @@ enum clock_rate {
#define ELDO_CTRL_REG 0x12
#define ELDO1_SEL_REG 0x19
-#define ELDO1_1P8V 0x16
+#define ELDO1_1P6V 0x12
#define ELDO1_CTRL_SHIFT 0x00
#define ELDO2_SEL_REG 0x1a
@@ -89,7 +89,7 @@ struct gmin_subdev {
u8 pwm_i2c_addr;
/* For PMIC AXP */
- int eldo1_sel_reg, eldo1_1p8v, eldo1_ctrl_shift;
+ int eldo1_sel_reg, eldo1_1p6v, eldo1_ctrl_shift;
int eldo2_sel_reg, eldo2_1p8v, eldo2_ctrl_shift;
};
@@ -118,6 +118,10 @@ static const char *pmic_name[] = {
[PMIC_CRYSTALCOVE] = "Crystal Cove PMIC",
};
+static DEFINE_MUTEX(gmin_regulator_mutex);
+static int gmin_v1p8_enable_count;
+static int gmin_v2p8_enable_count;
+
/* The atomisp uses type==0 for the end-of-list marker, so leave space. */
static struct intel_v4l2_subdev_table pdata_subdevs[MAX_SUBDEVS + 1];
@@ -536,7 +540,7 @@ static int gmin_subdev_add(struct gmin_subdev *gs)
struct i2c_client *client = v4l2_get_subdevdata(gs->subdev);
struct device *dev = &client->dev;
struct acpi_device *adev = ACPI_COMPANION(dev);
- int ret, clock_num = -1;
+ int ret, default_val, clock_num = -1;
dev_info(dev, "%s: ACPI path is %pfw\n", __func__, dev_fwnode(dev));
@@ -544,7 +548,20 @@ static int gmin_subdev_add(struct gmin_subdev *gs)
gs->clock_src = gmin_get_var_int(dev, false, "ClkSrc",
VLV2_CLK_PLL_19P2MHZ);
- gs->csi_port = gmin_get_var_int(dev, false, "CsiPort", 0);
+ /*
+ * Get ACPI _PR0 derived clock here already because it is used
+ * to determine the csi_port default.
+ */
+ if (acpi_device_power_manageable(adev))
+ clock_num = atomisp_get_acpi_power(dev);
+
+ /* Compare clock to CsiPort 1 pmc-clock used in the CHT/BYT reference designs */
+ if (IS_ISP2401)
+ default_val = clock_num == 4 ? 1 : 0;
+ else
+ default_val = clock_num == 0 ? 1 : 0;
+
+ gs->csi_port = gmin_get_var_int(dev, false, "CsiPort", default_val);
gs->csi_lanes = gmin_get_var_int(dev, false, "CsiLanes", 1);
gs->gpio0 = gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW);
@@ -625,11 +642,7 @@ static int gmin_subdev_add(struct gmin_subdev *gs)
* otherwise.
*/
- /* Try first to use ACPI to get the clock resource */
- if (acpi_device_power_manageable(adev))
- clock_num = atomisp_get_acpi_power(dev);
-
- /* Fall-back use EFI and/or DMI match */
+ /* If getting the clock from _PR0 above failed, fall-back to EFI and/or DMI match */
if (clock_num < 0)
clock_num = gmin_get_var_int(dev, false, "CamClk", 0);
@@ -681,9 +694,9 @@ static int gmin_subdev_add(struct gmin_subdev *gs)
break;
case PMIC_AXP:
- gs->eldo1_1p8v = gmin_get_var_int(dev, false,
+ gs->eldo1_1p6v = gmin_get_var_int(dev, false,
"eldo1_1p8v",
- ELDO1_1P8V);
+ ELDO1_1P6V);
gs->eldo1_sel_reg = gmin_get_var_int(dev, false,
"eldo1_sel_reg",
ELDO1_SEL_REG);
@@ -741,13 +754,28 @@ static int axp_regulator_set(struct device *dev, struct gmin_subdev *gs,
val = on ? 1 << shift : 0;
- ret = gmin_i2c_write(dev, gs->pwm_i2c_addr, sel_reg, val, 1 << shift);
+ ret = gmin_i2c_write(dev, gs->pwm_i2c_addr, ctrl_reg, val, 1 << shift);
if (ret)
return ret;
return 0;
}
+/*
+ * Some boards contain a hw-bug where turning eldo2 back on after having turned
+ * it off causes the CPLM3218 ambient-light-sensor on the image-sensor's I2C bus
+ * to crash, hanging the bus. Do not turn eldo2 off on these systems.
+ */
+static const struct dmi_system_id axp_leave_eldo2_on_ids[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab duo W1 10.1 (VT4)"),
+ },
+ },
+ { }
+};
+
static int axp_v1p8_on(struct device *dev, struct gmin_subdev *gs)
{
int ret;
@@ -763,13 +791,8 @@ static int axp_v1p8_on(struct device *dev, struct gmin_subdev *gs)
*/
usleep_range(110, 150);
- ret = axp_regulator_set(dev, gs, gs->eldo1_sel_reg, gs->eldo1_1p8v,
- ELDO_CTRL_REG, gs->eldo1_ctrl_shift, true);
- if (ret)
- return ret;
-
- ret = axp_regulator_set(dev, gs, gs->eldo2_sel_reg, gs->eldo2_1p8v,
- ELDO_CTRL_REG, gs->eldo2_ctrl_shift, false);
+ ret = axp_regulator_set(dev, gs, gs->eldo1_sel_reg, gs->eldo1_1p6v,
+ ELDO_CTRL_REG, gs->eldo1_ctrl_shift, true);
return ret;
}
@@ -777,11 +800,14 @@ static int axp_v1p8_off(struct device *dev, struct gmin_subdev *gs)
{
int ret;
- ret = axp_regulator_set(dev, gs, gs->eldo1_sel_reg, gs->eldo1_1p8v,
+ ret = axp_regulator_set(dev, gs, gs->eldo1_sel_reg, gs->eldo1_1p6v,
ELDO_CTRL_REG, gs->eldo1_ctrl_shift, false);
if (ret)
return ret;
+ if (dmi_check_system(axp_leave_eldo2_on_ids))
+ return 0;
+
ret = axp_regulator_set(dev, gs, gs->eldo2_sel_reg, gs->eldo2_1p8v,
ELDO_CTRL_REG, gs->eldo2_ctrl_shift, false);
return ret;
@@ -851,38 +877,58 @@ static int gmin_v1p8_ctrl(struct v4l2_subdev *subdev, int on)
gs->v1p8_on = on;
+ ret = 0;
+ mutex_lock(&gmin_regulator_mutex);
+ if (on) {
+ gmin_v1p8_enable_count++;
+ if (gmin_v1p8_enable_count > 1)
+ goto out; /* Already on */
+ } else {
+ gmin_v1p8_enable_count--;
+ if (gmin_v1p8_enable_count > 0)
+ goto out; /* Still needed */
+ }
+
if (gs->v1p8_gpio >= 0)
gpio_set_value(gs->v1p8_gpio, on);
if (gs->v1p8_reg) {
regulator_set_voltage(gs->v1p8_reg, 1800000, 1800000);
if (on)
- return regulator_enable(gs->v1p8_reg);
+ ret = regulator_enable(gs->v1p8_reg);
else
- return regulator_disable(gs->v1p8_reg);
+ ret = regulator_disable(gs->v1p8_reg);
+
+ goto out;
}
switch (pmic_id) {
case PMIC_AXP:
if (on)
- return axp_v1p8_on(subdev->dev, gs);
+ ret = axp_v1p8_on(subdev->dev, gs);
else
- return axp_v1p8_off(subdev->dev, gs);
+ ret = axp_v1p8_off(subdev->dev, gs);
+ break;
case PMIC_TI:
value = on ? LDO_1P8V_ON : LDO_1P8V_OFF;
- return gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
- LDO10_REG, value, 0xff);
+ ret = gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
+ LDO10_REG, value, 0xff);
+ break;
case PMIC_CRYSTALCOVE:
value = on ? CRYSTAL_ON : CRYSTAL_OFF;
- return gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
- CRYSTAL_1P8V_REG, value, 0xff);
+ ret = gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
+ CRYSTAL_1P8V_REG, value, 0xff);
+ break;
default:
- dev_err(subdev->dev, "Couldn't set power mode for v1p2\n");
+ dev_err(subdev->dev, "Couldn't set power mode for v1p8\n");
+ ret = -EINVAL;
}
- return -EINVAL;
+out:
+ mutex_unlock(&gmin_regulator_mutex);
+ return ret;
}
static int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
@@ -908,37 +954,57 @@ static int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
return 0;
gs->v2p8_on = on;
+ ret = 0;
+ mutex_lock(&gmin_regulator_mutex);
+ if (on) {
+ gmin_v2p8_enable_count++;
+ if (gmin_v2p8_enable_count > 1)
+ goto out; /* Already on */
+ } else {
+ gmin_v2p8_enable_count--;
+ if (gmin_v2p8_enable_count > 0)
+ goto out; /* Still needed */
+ }
+
if (gs->v2p8_gpio >= 0)
gpio_set_value(gs->v2p8_gpio, on);
if (gs->v2p8_reg) {
regulator_set_voltage(gs->v2p8_reg, 2900000, 2900000);
if (on)
- return regulator_enable(gs->v2p8_reg);
+ ret = regulator_enable(gs->v2p8_reg);
else
- return regulator_disable(gs->v2p8_reg);
+ ret = regulator_disable(gs->v2p8_reg);
+
+ goto out;
}
switch (pmic_id) {
case PMIC_AXP:
- return axp_regulator_set(subdev->dev, gs, ALDO1_SEL_REG,
- ALDO1_2P8V, ALDO1_CTRL3_REG,
- ALDO1_CTRL3_SHIFT, on);
+ ret = axp_regulator_set(subdev->dev, gs, ALDO1_SEL_REG,
+ ALDO1_2P8V, ALDO1_CTRL3_REG,
+ ALDO1_CTRL3_SHIFT, on);
+ break;
case PMIC_TI:
value = on ? LDO_2P8V_ON : LDO_2P8V_OFF;
- return gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
- LDO9_REG, value, 0xff);
+ ret = gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
+ LDO9_REG, value, 0xff);
+ break;
case PMIC_CRYSTALCOVE:
value = on ? CRYSTAL_ON : CRYSTAL_OFF;
- return gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
- CRYSTAL_2P8V_REG, value, 0xff);
+ ret = gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
+ CRYSTAL_2P8V_REG, value, 0xff);
+ break;
default:
- dev_err(subdev->dev, "Couldn't set power mode for v1p2\n");
+ dev_err(subdev->dev, "Couldn't set power mode for v2p8\n");
+ ret = -EINVAL;
}
- return -EINVAL;
+out:
+ mutex_unlock(&gmin_regulator_mutex);
+ return ret;
}
static int gmin_acpi_pm_ctrl(struct v4l2_subdev *subdev, int on)
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
index 1b240891a6e2..49ccfb1646da 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -25,6 +25,7 @@
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/interrupt.h>
+#include <linux/bits.h>
#include <asm/iosf_mbi.h>
@@ -626,11 +627,11 @@ static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp)
* IRQ, if so, waiting for it to be served
*/
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
- irq = irq & 1 << INTR_IIR;
+ irq &= BIT(INTR_IIR);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
- if (!(irq & (1 << INTR_IIR)))
+ if (!(irq & BIT(INTR_IIR)))
goto done;
atomisp_css2_hw_store_32(MRFLD_INTR_CLEAR_REG, 0xFFFFFFFF);
@@ -643,11 +644,11 @@ static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp)
return -EAGAIN;
} else {
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
- irq = irq & 1 << INTR_IIR;
+ irq &= BIT(INTR_IIR);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
- if (!(irq & (1 << INTR_IIR))) {
+ if (!(irq & BIT(INTR_IIR))) {
atomisp_css2_hw_store_32(MRFLD_INTR_ENABLE_REG, 0x0);
goto done;
}
@@ -666,7 +667,7 @@ done:
* HW sighting:4568410.
*/
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
- irq &= ~(1 << INTR_IER);
+ irq &= ~BIT(INTR_IER);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
atomisp_msi_irq_uninit(isp);
@@ -1467,7 +1468,7 @@ static bool is_valid_device(struct pci_dev *pdev, const struct pci_device_id *id
* remove the if once the driver become generic
*/
-#if defined(ISP2400)
+#ifndef ISP2401
if (IS_ISP2401) {
dev_err(&pdev->dev, "Support for %s (ISP2401) was disabled at compile time\n",
name);
@@ -1549,7 +1550,7 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
start = pci_resource_start(pdev, ATOM_ISP_PCI_BAR);
dev_dbg(&pdev->dev, "start: 0x%x\n", start);
- err = pcim_iomap_regions(pdev, 1 << ATOM_ISP_PCI_BAR, pci_name(pdev));
+ err = pcim_iomap_regions(pdev, BIT(ATOM_ISP_PCI_BAR), pci_name(pdev));
if (err) {
dev_err(&pdev->dev, "Failed to I/O memory remapping (%d)\n", err);
goto ioremap_fail;
@@ -1838,11 +1839,11 @@ load_fw_fail:
*/
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
- irq = irq & 1 << INTR_IIR;
+ irq &= BIT(INTR_IIR);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
- irq &= ~(1 << INTR_IER);
+ irq &= ~BIT(INTR_IER);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
atomisp_msi_irq_uninit(isp);
@@ -1854,7 +1855,7 @@ load_fw_fail:
dev_err(&pdev->dev, "Failed to switch off ISP\n");
atomisp_dev_alloc_fail:
- pcim_iounmap_regions(pdev, 1 << ATOM_ISP_PCI_BAR);
+ pcim_iounmap_regions(pdev, BIT(ATOM_ISP_PCI_BAR));
ioremap_fail:
return err;
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c
index 6a5ee4607089..c1cda16f2dc0 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c
@@ -39,7 +39,7 @@
struct hmm_bo_device bo_device;
struct hmm_pool dynamic_pool;
struct hmm_pool reserved_pool;
-static ia_css_ptr dummy_ptr;
+static ia_css_ptr dummy_ptr = mmgr_EXCEPTION;
static bool hmm_initialized;
struct _hmm_mem_stat hmm_mem_stat;
@@ -209,7 +209,7 @@ int hmm_init(void)
void hmm_cleanup(void)
{
- if (!dummy_ptr)
+ if (dummy_ptr == mmgr_EXCEPTION)
return;
sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group);
@@ -288,7 +288,8 @@ void hmm_free(ia_css_ptr virt)
dev_dbg(atomisp_dev, "%s: free 0x%08x\n", __func__, virt);
- WARN_ON(!virt);
+ if (WARN_ON(virt == mmgr_EXCEPTION))
+ return;
bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
diff --git a/drivers/staging/media/atomisp/pci/ia_css_acc_types.h b/drivers/staging/media/atomisp/pci/ia_css_acc_types.h
index d0ce2f8ba653..a20879aedef6 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_acc_types.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_acc_types.h
@@ -24,6 +24,7 @@
#include <type_support.h>
#include <platform_support.h>
#include <debug_global.h>
+#include <linux/bits.h>
#include "ia_css_types.h"
#include "ia_css_frame_format.h"
@@ -466,7 +467,7 @@ struct ia_css_acc_fw {
enum ia_css_sp_sleep_mode {
SP_DISABLE_SLEEP_MODE = 0,
- SP_SLEEP_AFTER_FRAME = 1 << 0,
- SP_SLEEP_AFTER_IRQ = 1 << 1
+ SP_SLEEP_AFTER_FRAME = BIT(0),
+ SP_SLEEP_AFTER_IRQ = BIT(1),
};
#endif /* _IA_CSS_ACC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_env.h b/drivers/staging/media/atomisp/pci/ia_css_env.h
index 3b89bbd837a0..42bb1ec1c22d 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_env.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_env.h
@@ -18,6 +18,7 @@
#include <type_support.h>
#include <linux/stdarg.h> /* va_list */
+#include <linux/bits.h>
#include "ia_css_types.h"
#include "ia_css_acc_types.h"
@@ -28,10 +29,10 @@
/* Memory allocation attributes, for use in ia_css_css_mem_env. */
enum ia_css_mem_attr {
- IA_CSS_MEM_ATTR_CACHED = 1 << 0,
- IA_CSS_MEM_ATTR_ZEROED = 1 << 1,
- IA_CSS_MEM_ATTR_PAGEALIGN = 1 << 2,
- IA_CSS_MEM_ATTR_CONTIGUOUS = 1 << 3,
+ IA_CSS_MEM_ATTR_CACHED = BIT(0),
+ IA_CSS_MEM_ATTR_ZEROED = BIT(1),
+ IA_CSS_MEM_ATTR_PAGEALIGN = BIT(2),
+ IA_CSS_MEM_ATTR_CONTIGUOUS = BIT(3),
};
/* Environment with function pointers for local IA memory allocation.
diff --git a/drivers/staging/media/atomisp/pci/ia_css_event_public.h b/drivers/staging/media/atomisp/pci/ia_css_event_public.h
index 76219d741d2e..b052648d4fc2 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_event_public.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_event_public.h
@@ -24,6 +24,7 @@
#include <ia_css_err.h> /* ia_css_err */
#include <ia_css_types.h> /* ia_css_pipe */
#include <ia_css_timer.h> /* ia_css_timer */
+#include <linux/bits.h>
/* The event type, distinguishes the kind of events that
* can are generated by the CSS system.
@@ -35,38 +36,38 @@
* 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c)
*/
enum ia_css_event_type {
- IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE = 1 << 0,
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE = BIT(0),
/** Output frame ready. */
- IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE = 1 << 1,
+ IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE = BIT(1),
/** Second output frame ready. */
- IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE = 1 << 2,
+ IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE = BIT(2),
/** Viewfinder Output frame ready. */
- IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE = 1 << 3,
+ IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE = BIT(3),
/** Second viewfinder Output frame ready. */
- IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE = 1 << 4,
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE = BIT(4),
/** Indication that 3A statistics are available. */
- IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE = 1 << 5,
+ IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE = BIT(5),
/** Indication that DIS statistics are available. */
- IA_CSS_EVENT_TYPE_PIPELINE_DONE = 1 << 6,
+ IA_CSS_EVENT_TYPE_PIPELINE_DONE = BIT(6),
/** Pipeline Done event, sent after last pipeline stage. */
- IA_CSS_EVENT_TYPE_FRAME_TAGGED = 1 << 7,
+ IA_CSS_EVENT_TYPE_FRAME_TAGGED = BIT(7),
/** Frame tagged. */
- IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE = 1 << 8,
+ IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE = BIT(8),
/** Input frame ready. */
- IA_CSS_EVENT_TYPE_METADATA_DONE = 1 << 9,
+ IA_CSS_EVENT_TYPE_METADATA_DONE = BIT(9),
/** Metadata ready. */
- IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE = 1 << 10,
+ IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE = BIT(10),
/** Indication that LACE statistics are available. */
- IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE = 1 << 11,
+ IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE = BIT(11),
/** Extension stage complete. */
- IA_CSS_EVENT_TYPE_TIMER = 1 << 12,
+ IA_CSS_EVENT_TYPE_TIMER = BIT(12),
/** Timer event for measuring the SP side latencies. It contains the
32-bit timer value from the SP */
- IA_CSS_EVENT_TYPE_PORT_EOF = 1 << 13,
+ IA_CSS_EVENT_TYPE_PORT_EOF = BIT(13),
/** End Of Frame event, sent when in buffered sensor mode. */
- IA_CSS_EVENT_TYPE_FW_WARNING = 1 << 14,
+ IA_CSS_EVENT_TYPE_FW_WARNING = BIT(14),
/** Performance warning encounter by FW */
- IA_CSS_EVENT_TYPE_FW_ASSERT = 1 << 15,
+ IA_CSS_EVENT_TYPE_FW_ASSERT = BIT(15),
/** Assertion hit by FW */
};
diff --git a/drivers/staging/media/atomisp/pci/ia_css_irq.h b/drivers/staging/media/atomisp/pci/ia_css_irq.h
index 3b81a39cfe97..26b1b3c8ba62 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_irq.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_irq.h
@@ -23,6 +23,7 @@
#include "ia_css_err.h"
#include "ia_css_pipe_public.h"
#include "ia_css_input_port.h"
+#include <linux/bits.h>
/* Interrupt types, these enumerate all supported interrupt types.
*/
@@ -46,49 +47,49 @@ enum ia_css_irq_type {
* (SW) interrupts
*/
enum ia_css_irq_info {
- IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR = 1 << 0,
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR = BIT(0),
/** the css receiver has encountered an error */
- IA_CSS_IRQ_INFO_CSS_RECEIVER_FIFO_OVERFLOW = 1 << 1,
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_FIFO_OVERFLOW = BIT(1),
/** the FIFO in the csi receiver has overflown */
- IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF = 1 << 2,
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF = BIT(2),
/** the css receiver received the start of frame */
- IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF = 1 << 3,
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF = BIT(3),
/** the css receiver received the end of frame */
- IA_CSS_IRQ_INFO_CSS_RECEIVER_SOL = 1 << 4,
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_SOL = BIT(4),
/** the css receiver received the start of line */
- IA_CSS_IRQ_INFO_EVENTS_READY = 1 << 5,
+ IA_CSS_IRQ_INFO_EVENTS_READY = BIT(5),
/** One or more events are available in the PSYS event queue */
- IA_CSS_IRQ_INFO_CSS_RECEIVER_EOL = 1 << 6,
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_EOL = BIT(6),
/** the css receiver received the end of line */
- IA_CSS_IRQ_INFO_CSS_RECEIVER_SIDEBAND_CHANGED = 1 << 7,
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_SIDEBAND_CHANGED = BIT(7),
/** the css receiver received a change in side band signals */
- IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_0 = 1 << 8,
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_0 = BIT(8),
/** generic short packets (0) */
- IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_1 = 1 << 9,
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_1 = BIT(9),
/** generic short packets (1) */
- IA_CSS_IRQ_INFO_IF_PRIM_ERROR = 1 << 10,
+ IA_CSS_IRQ_INFO_IF_PRIM_ERROR = BIT(10),
/** the primary input formatter (A) has encountered an error */
- IA_CSS_IRQ_INFO_IF_PRIM_B_ERROR = 1 << 11,
+ IA_CSS_IRQ_INFO_IF_PRIM_B_ERROR = BIT(11),
/** the primary input formatter (B) has encountered an error */
- IA_CSS_IRQ_INFO_IF_SEC_ERROR = 1 << 12,
+ IA_CSS_IRQ_INFO_IF_SEC_ERROR = BIT(12),
/** the secondary input formatter has encountered an error */
- IA_CSS_IRQ_INFO_STREAM_TO_MEM_ERROR = 1 << 13,
+ IA_CSS_IRQ_INFO_STREAM_TO_MEM_ERROR = BIT(13),
/** the stream-to-memory device has encountered an error */
- IA_CSS_IRQ_INFO_SW_0 = 1 << 14,
+ IA_CSS_IRQ_INFO_SW_0 = BIT(14),
/** software interrupt 0 */
- IA_CSS_IRQ_INFO_SW_1 = 1 << 15,
+ IA_CSS_IRQ_INFO_SW_1 = BIT(15),
/** software interrupt 1 */
- IA_CSS_IRQ_INFO_SW_2 = 1 << 16,
+ IA_CSS_IRQ_INFO_SW_2 = BIT(16),
/** software interrupt 2 */
- IA_CSS_IRQ_INFO_ISP_BINARY_STATISTICS_READY = 1 << 17,
+ IA_CSS_IRQ_INFO_ISP_BINARY_STATISTICS_READY = BIT(17),
/** ISP binary statistics are ready */
- IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR = 1 << 18,
+ IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR = BIT(18),
/** the input system in in error */
- IA_CSS_IRQ_INFO_IF_ERROR = 1 << 19,
+ IA_CSS_IRQ_INFO_IF_ERROR = BIT(19),
/** the input formatter in in error */
- IA_CSS_IRQ_INFO_DMA_ERROR = 1 << 20,
+ IA_CSS_IRQ_INFO_DMA_ERROR = BIT(20),
/** the dma in in error */
- IA_CSS_IRQ_INFO_ISYS_EVENTS_READY = 1 << 21,
+ IA_CSS_IRQ_INFO_ISYS_EVENTS_READY = BIT(21),
/** end-of-frame events are ready in the isys_event queue */
};
@@ -103,23 +104,23 @@ enum ia_css_irq_info {
* different receiver types, or possibly none in case of tests systems.
*/
enum ia_css_rx_irq_info {
- IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN = 1U << 0, /** buffer overrun */
- IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE = 1U << 1, /** entering sleep mode */
- IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE = 1U << 2, /** exited sleep mode */
- IA_CSS_RX_IRQ_INFO_ECC_CORRECTED = 1U << 3, /** ECC corrected */
- IA_CSS_RX_IRQ_INFO_ERR_SOT = 1U << 4,
+ IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN = BIT(0), /** buffer overrun */
+ IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE = BIT(1), /** entering sleep mode */
+ IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE = BIT(2), /** exited sleep mode */
+ IA_CSS_RX_IRQ_INFO_ECC_CORRECTED = BIT(3), /** ECC corrected */
+ IA_CSS_RX_IRQ_INFO_ERR_SOT = BIT(4),
/** Start of transmission */
- IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC = 1U << 5, /** SOT sync (??) */
- IA_CSS_RX_IRQ_INFO_ERR_CONTROL = 1U << 6, /** Control (??) */
- IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE = 1U << 7, /** Double ECC */
- IA_CSS_RX_IRQ_INFO_ERR_CRC = 1U << 8, /** CRC error */
- IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID = 1U << 9, /** Unknown ID */
- IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC = 1U << 10,/** Frame sync error */
- IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA = 1U << 11,/** Frame data error */
- IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT = 1U << 12,/** Timeout occurred */
- IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC = 1U << 13,/** Unknown escape seq. */
- IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC = 1U << 14,/** Line Sync error */
- IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT = 1U << 15,
+ IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC = BIT(5), /** SOT sync (??) */
+ IA_CSS_RX_IRQ_INFO_ERR_CONTROL = BIT(6), /** Control (??) */
+ IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE = BIT(7), /** Double ECC */
+ IA_CSS_RX_IRQ_INFO_ERR_CRC = BIT(8), /** CRC error */
+ IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID = BIT(9), /** Unknown ID */
+ IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC = BIT(10), /** Frame sync error */
+ IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA = BIT(11), /** Frame data error */
+ IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT = BIT(12), /** Timeout occurred */
+ IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC = BIT(13), /** Unknown escape seq. */
+ IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC = BIT(14), /** Line Sync error */
+ IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT = BIT(15),
};
/* Interrupt info structure. This structure contains information about an
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c
index 562662ab8a44..a70bce1179da 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c
@@ -44,7 +44,7 @@ ia_css_macc1_5_vmem_encode(
unsigned int size)
{
unsigned int i, j, k, idx;
- unsigned int idx_map[] = {
+ static const unsigned int idx_map[] = {
0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8
};
diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h
index e37ef4232c55..fff89e9b4b01 100644
--- a/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h
+++ b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h
@@ -20,6 +20,7 @@
#include <type_support.h>
#include <linux/stdarg.h>
+#include <linux/bits.h>
#include "ia_css_types.h"
#include "ia_css_binary.h"
#include "ia_css_frame_public.h"
@@ -53,21 +54,21 @@ extern int dbg_level;
* Values can be combined to dump a combination of sets.
*/
enum ia_css_debug_enable_param_dump {
- IA_CSS_DEBUG_DUMP_FPN = 1 << 0, /** FPN table */
- IA_CSS_DEBUG_DUMP_OB = 1 << 1, /** OB table */
- IA_CSS_DEBUG_DUMP_SC = 1 << 2, /** Shading table */
- IA_CSS_DEBUG_DUMP_WB = 1 << 3, /** White balance */
- IA_CSS_DEBUG_DUMP_DP = 1 << 4, /** Defect Pixel */
- IA_CSS_DEBUG_DUMP_BNR = 1 << 5, /** Bayer Noise Reductions */
- IA_CSS_DEBUG_DUMP_S3A = 1 << 6, /** 3A Statistics */
- IA_CSS_DEBUG_DUMP_DE = 1 << 7, /** De Mosaicing */
- IA_CSS_DEBUG_DUMP_YNR = 1 << 8, /** Luma Noise Reduction */
- IA_CSS_DEBUG_DUMP_CSC = 1 << 9, /** Color Space Conversion */
- IA_CSS_DEBUG_DUMP_GC = 1 << 10, /** Gamma Correction */
- IA_CSS_DEBUG_DUMP_TNR = 1 << 11, /** Temporal Noise Reduction */
- IA_CSS_DEBUG_DUMP_ANR = 1 << 12, /** Advanced Noise Reduction */
- IA_CSS_DEBUG_DUMP_CE = 1 << 13, /** Chroma Enhancement */
- IA_CSS_DEBUG_DUMP_ALL = 1 << 14 /** Dump all device parameters */
+ IA_CSS_DEBUG_DUMP_FPN = BIT(0), /** FPN table */
+ IA_CSS_DEBUG_DUMP_OB = BIT(1), /** OB table */
+ IA_CSS_DEBUG_DUMP_SC = BIT(2), /** Shading table */
+ IA_CSS_DEBUG_DUMP_WB = BIT(3), /** White balance */
+ IA_CSS_DEBUG_DUMP_DP = BIT(4), /** Defect Pixel */
+ IA_CSS_DEBUG_DUMP_BNR = BIT(5), /** Bayer Noise Reductions */
+ IA_CSS_DEBUG_DUMP_S3A = BIT(6), /** 3A Statistics */
+ IA_CSS_DEBUG_DUMP_DE = BIT(7), /** De Mosaicing */
+ IA_CSS_DEBUG_DUMP_YNR = BIT(8), /** Luma Noise Reduction */
+ IA_CSS_DEBUG_DUMP_CSC = BIT(9), /** Color Space Conversion */
+ IA_CSS_DEBUG_DUMP_GC = BIT(10), /** Gamma Correction */
+ IA_CSS_DEBUG_DUMP_TNR = BIT(11), /** Temporal Noise Reduction */
+ IA_CSS_DEBUG_DUMP_ANR = BIT(12), /** Advanced Noise Reduction */
+ IA_CSS_DEBUG_DUMP_CE = BIT(13), /** Chroma Enhancement */
+ IA_CSS_DEBUG_DUMP_ALL = BIT(14), /** Dump all device parameters */
};
#define IA_CSS_ERROR(fmt, ...) \
diff --git a/drivers/staging/media/atomisp/pci/sh_css_firmware.c b/drivers/staging/media/atomisp/pci/sh_css_firmware.c
index 94149647b98b..dd688f8ab649 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_firmware.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_firmware.c
@@ -56,7 +56,11 @@ static struct firmware_header *firmware_header;
* which will be replaced with the actual RELEASE_VERSION
* during package generation. Please do not modify
*/
+#ifdef ISP2401
static const char *release_version = STR(irci_stable_candrpv_0415_20150521_0458);
+#else
+static const char *release_version = STR(irci_stable_candrpv_0415_20150423_1753);
+#endif
#define MAX_FW_REL_VER_NAME 300
static char FW_rel_ver_name[MAX_FW_REL_VER_NAME] = "---";
diff --git a/drivers/staging/media/hantro/Kconfig b/drivers/staging/media/hantro/Kconfig
index 3c5d833322c8..0172a6822ec2 100644
--- a/drivers/staging/media/hantro/Kconfig
+++ b/drivers/staging/media/hantro/Kconfig
@@ -2,7 +2,7 @@
config VIDEO_HANTRO
tristate "Hantro VPU driver"
depends on ARCH_MXC || ARCH_ROCKCHIP || ARCH_AT91 || ARCH_SUNXI || COMPILE_TEST
- depends on VIDEO_DEV && VIDEO_V4L2
+ depends on VIDEO_DEV
select MEDIA_CONTROLLER
select MEDIA_CONTROLLER_REQUEST_API
select VIDEOBUF2_DMA_CONTIG
diff --git a/drivers/staging/media/hantro/TODO b/drivers/staging/media/hantro/TODO
index fa0c94057007..1d7fed936019 100644
--- a/drivers/staging/media/hantro/TODO
+++ b/drivers/staging/media/hantro/TODO
@@ -4,10 +4,3 @@
the uABI, it will be required to have the driver in staging.
For this reason, we are keeping this driver in staging for now.
-
-* Add support for the S_SELECTION API.
- See the comment for VEPU_REG_ENC_OVER_FILL_STRM_OFFSET.
-
-* Instead of having a DMA bounce buffer, it could be possible to use a
- normal buffer and memmove() the payload to make space for the header.
- This might need to use extra JPEG markers for padding reasons.
diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h
index 06d0f3597694..357f83b86809 100644
--- a/drivers/staging/media/hantro/hantro.h
+++ b/drivers/staging/media/hantro/hantro.h
@@ -259,7 +259,6 @@ struct hantro_ctx {
/* Specific for particular codec modes. */
union {
struct hantro_h264_dec_hw_ctx h264_dec;
- struct hantro_jpeg_enc_hw_ctx jpeg_enc;
struct hantro_mpeg2_dec_hw_ctx mpeg2_dec;
struct hantro_vp8_dec_hw_ctx vp8_dec;
struct hantro_hevc_dec_hw_ctx hevc_dec;
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index 6a51f39dde56..dc768884cb79 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -219,21 +219,15 @@ queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
if (ret)
return ret;
+ dst_vq->bidirectional = true;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
/*
- * When encoding, the CAPTURE queue doesn't need dma memory,
- * as the CPU needs to create the JPEG frames, from the
- * hardware-produced JPEG payload.
- *
- * For the DMA destination buffer, we use a bounce buffer.
+ * The Kernel needs access to the JPEG destination buffer for the
+ * JPEG encoder to fill in the JPEG headers.
*/
- if (ctx->is_encoder) {
- dst_vq->mem_ops = &vb2_vmalloc_memops;
- } else {
- dst_vq->bidirectional = true;
- dst_vq->mem_ops = &vb2_dma_contig_memops;
- dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
- DMA_ATTR_NO_KERNEL_MAPPING;
- }
+ if (!ctx->is_encoder)
+ dst_vq->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
@@ -332,6 +326,11 @@ static const struct v4l2_ctrl_ops hantro_hevc_ctrl_ops = {
.s_ctrl = hantro_hevc_s_ctrl,
};
+#define HANTRO_JPEG_ACTIVE_MARKERS (V4L2_JPEG_ACTIVE_MARKER_APP0 | \
+ V4L2_JPEG_ACTIVE_MARKER_COM | \
+ V4L2_JPEG_ACTIVE_MARKER_DQT | \
+ V4L2_JPEG_ACTIVE_MARKER_DHT)
+
static const struct hantro_ctrl controls[] = {
{
.codec = HANTRO_JPEG_ENCODER,
@@ -344,6 +343,22 @@ static const struct hantro_ctrl controls[] = {
.ops = &hantro_jpeg_ctrl_ops,
},
}, {
+ .codec = HANTRO_JPEG_ENCODER,
+ .cfg = {
+ .id = V4L2_CID_JPEG_ACTIVE_MARKER,
+ .max = HANTRO_JPEG_ACTIVE_MARKERS,
+ .def = HANTRO_JPEG_ACTIVE_MARKERS,
+ /*
+ * Changing the set of active markers/segments also
+ * messes up the alignment of the JPEG header, which
+ * is needed to allow the hardware to write directly
+ * to the output buffer. Implementing this introduces
+ * a lot of complexity for little gain, as the markers
+ * enabled is already the minimum required set.
+ */
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+ },
+ }, {
.codec = HANTRO_MPEG2_DECODER,
.cfg = {
.id = V4L2_CID_STATELESS_MPEG2_SEQUENCE,
@@ -615,7 +630,9 @@ static const struct of_device_id of_hantro_match[] = {
{ .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
#endif
#ifdef CONFIG_VIDEO_HANTRO_IMX8M
+ { .compatible = "nxp,imx8mm-vpu-g1", .data = &imx8mm_vpu_g1_variant, },
{ .compatible = "nxp,imx8mq-vpu", .data = &imx8mq_vpu_variant, },
+ { .compatible = "nxp,imx8mq-vpu-g1", .data = &imx8mq_vpu_g1_variant },
{ .compatible = "nxp,imx8mq-vpu-g2", .data = &imx8mq_vpu_g2_variant },
#endif
#ifdef CONFIG_VIDEO_HANTRO_SAMA5D4
@@ -890,6 +907,15 @@ static int hantro_probe(struct platform_device *pdev)
match = of_match_node(of_hantro_match, pdev->dev.of_node);
vpu->variant = match->data;
+ /*
+ * Support for nxp,imx8mq-vpu is kept for backwards compatibility
+ * but it's deprecated. Please update your DTS file to use
+ * nxp,imx8mq-vpu-g1 or nxp,imx8mq-vpu-g2 instead.
+ */
+ if (of_device_is_compatible(pdev->dev.of_node, "nxp,imx8mq-vpu"))
+ dev_warn(&pdev->dev, "%s compatible is deprecated\n",
+ match->compatible);
+
INIT_DELAYED_WORK(&vpu->watchdog_work, hantro_watchdog);
vpu->clocks = devm_kcalloc(&pdev->dev, vpu->variant->num_clocks,
diff --git a/drivers/staging/media/hantro/hantro_g2_hevc_dec.c b/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
index 99d8ea7543da..c524af41baf5 100644
--- a/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
+++ b/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
@@ -255,24 +255,11 @@ static void set_params(struct hantro_ctx *ctx)
hantro_reg_write(vpu, &g2_apf_threshold, 8);
}
-static int find_ref_pic_index(const struct v4l2_hevc_dpb_entry *dpb, int pic_order_cnt)
-{
- int i;
-
- for (i = 0; i < V4L2_HEVC_DPB_ENTRIES_NUM_MAX; i++) {
- if (dpb[i].pic_order_cnt[0] == pic_order_cnt)
- return i;
- }
-
- return 0x0;
-}
-
static void set_ref_pic_list(struct hantro_ctx *ctx)
{
const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
struct hantro_dev *vpu = ctx->dev;
const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
- const struct v4l2_hevc_dpb_entry *dpb = decode_params->dpb;
u32 list0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX] = {};
u32 list1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX] = {};
static const struct hantro_reg ref_pic_regs0[] = {
@@ -316,11 +303,11 @@ static void set_ref_pic_list(struct hantro_ctx *ctx)
/* List 0 contains: short term before, short term after and long term */
j = 0;
for (i = 0; i < decode_params->num_poc_st_curr_before && j < ARRAY_SIZE(list0); i++)
- list0[j++] = find_ref_pic_index(dpb, decode_params->poc_st_curr_before[i]);
+ list0[j++] = decode_params->poc_st_curr_before[i];
for (i = 0; i < decode_params->num_poc_st_curr_after && j < ARRAY_SIZE(list0); i++)
- list0[j++] = find_ref_pic_index(dpb, decode_params->poc_st_curr_after[i]);
+ list0[j++] = decode_params->poc_st_curr_after[i];
for (i = 0; i < decode_params->num_poc_lt_curr && j < ARRAY_SIZE(list0); i++)
- list0[j++] = find_ref_pic_index(dpb, decode_params->poc_lt_curr[i]);
+ list0[j++] = decode_params->poc_lt_curr[i];
/* Fill the list, copying over and over */
i = 0;
@@ -329,11 +316,11 @@ static void set_ref_pic_list(struct hantro_ctx *ctx)
j = 0;
for (i = 0; i < decode_params->num_poc_st_curr_after && j < ARRAY_SIZE(list1); i++)
- list1[j++] = find_ref_pic_index(dpb, decode_params->poc_st_curr_after[i]);
+ list1[j++] = decode_params->poc_st_curr_after[i];
for (i = 0; i < decode_params->num_poc_st_curr_before && j < ARRAY_SIZE(list1); i++)
- list1[j++] = find_ref_pic_index(dpb, decode_params->poc_st_curr_before[i]);
+ list1[j++] = decode_params->poc_st_curr_before[i];
for (i = 0; i < decode_params->num_poc_lt_curr && j < ARRAY_SIZE(list1); i++)
- list1[j++] = find_ref_pic_index(dpb, decode_params->poc_lt_curr[i]);
+ list1[j++] = decode_params->poc_lt_curr[i];
i = 0;
while (j < ARRAY_SIZE(list1))
@@ -433,7 +420,7 @@ static int set_ref(struct hantro_ctx *ctx)
chroma_addr = luma_addr + cr_offset;
mv_addr = luma_addr + mv_offset;
- if (dpb[i].rps == V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR)
+ if (dpb[i].flags & V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE)
dpb_longterm_e |= BIT(V4L2_HEVC_DPB_ENTRIES_NUM_MAX - 1 - i);
hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), luma_addr);
diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
index 1450013d3685..12d69503d6ba 100644
--- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
+++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
@@ -18,29 +18,44 @@
static void hantro_h1_set_src_img_ctrl(struct hantro_dev *vpu,
struct hantro_ctx *ctx)
{
- struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
+ u32 overfill_r, overfill_b;
u32 reg;
- reg = H1_REG_IN_IMG_CTRL_ROW_LEN(pix_fmt->width)
- | H1_REG_IN_IMG_CTRL_OVRFLR_D4(0)
- | H1_REG_IN_IMG_CTRL_OVRFLB_D4(0)
+ /*
+ * The format width and height are already macroblock aligned
+ * by .vidioc_s_fmt_vid_cap_mplane() callback. Destination
+ * format width and height can be further modified by
+ * .vidioc_s_selection(), and the width is 4-aligned.
+ */
+ overfill_r = ctx->src_fmt.width - ctx->dst_fmt.width;
+ overfill_b = ctx->src_fmt.height - ctx->dst_fmt.height;
+
+ reg = H1_REG_IN_IMG_CTRL_ROW_LEN(ctx->src_fmt.width)
+ | H1_REG_IN_IMG_CTRL_OVRFLR_D4(overfill_r / 4)
+ | H1_REG_IN_IMG_CTRL_OVRFLB(overfill_b)
| H1_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt);
vepu_write_relaxed(vpu, reg, H1_REG_IN_IMG_CTRL);
}
static void hantro_h1_jpeg_enc_set_buffers(struct hantro_dev *vpu,
struct hantro_ctx *ctx,
- struct vb2_buffer *src_buf)
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf)
{
struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
dma_addr_t src[3];
+ u32 size_left;
+
+ size_left = vb2_plane_size(dst_buf, 0) - ctx->vpu_dst_fmt->header_size;
+ if (WARN_ON(vb2_plane_size(dst_buf, 0) < ctx->vpu_dst_fmt->header_size))
+ size_left = 0;
WARN_ON(pix_fmt->num_planes > 3);
- vepu_write_relaxed(vpu, ctx->jpeg_enc.bounce_buffer.dma,
+ vepu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(dst_buf, 0) +
+ ctx->vpu_dst_fmt->header_size,
H1_REG_ADDR_OUTPUT_STREAM);
- vepu_write_relaxed(vpu, ctx->jpeg_enc.bounce_buffer.size,
- H1_REG_STR_BUF_LIMIT);
+ vepu_write_relaxed(vpu, size_left, H1_REG_STR_BUF_LIMIT);
if (pix_fmt->num_planes == 1) {
src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
@@ -112,7 +127,8 @@ int hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
H1_REG_ENC_CTRL);
hantro_h1_set_src_img_ctrl(vpu, ctx);
- hantro_h1_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
+ hantro_h1_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf,
+ &dst_buf->vb2_buf);
hantro_h1_jpeg_enc_set_qtable(vpu, jpeg_ctx.hw_luma_qtable,
jpeg_ctx.hw_chroma_qtable);
@@ -145,13 +161,6 @@ void hantro_h1_jpeg_enc_done(struct hantro_ctx *ctx)
u32 bytesused = vepu_read(vpu, H1_REG_STR_BUF_LIMIT) / 8;
struct vb2_v4l2_buffer *dst_buf = hantro_get_dst_buf(ctx);
- /*
- * TODO: Rework the JPEG encoder to eliminate the need
- * for a bounce buffer.
- */
- memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0) +
- ctx->vpu_dst_fmt->header_size,
- ctx->jpeg_enc.bounce_buffer.cpu, bytesused);
vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
ctx->vpu_dst_fmt->header_size + bytesused);
}
diff --git a/drivers/staging/media/hantro/hantro_h1_regs.h b/drivers/staging/media/hantro/hantro_h1_regs.h
index d6e9825bb5c7..30e7e7b920b5 100644
--- a/drivers/staging/media/hantro/hantro_h1_regs.h
+++ b/drivers/staging/media/hantro/hantro_h1_regs.h
@@ -47,7 +47,7 @@
#define H1_REG_IN_IMG_CTRL 0x03c
#define H1_REG_IN_IMG_CTRL_ROW_LEN(x) ((x) << 12)
#define H1_REG_IN_IMG_CTRL_OVRFLR_D4(x) ((x) << 10)
-#define H1_REG_IN_IMG_CTRL_OVRFLB_D4(x) ((x) << 6)
+#define H1_REG_IN_IMG_CTRL_OVRFLB(x) ((x) << 6)
#define H1_REG_IN_IMG_CTRL_FMT(x) ((x) << 2)
#define H1_REG_ENC_CTRL0 0x040
#define H1_REG_ENC_CTRL0_INIT_QP(x) ((x) << 26)
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
index 4a19ae8940b9..ed018e293ba0 100644
--- a/drivers/staging/media/hantro/hantro_hw.h
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -43,15 +43,6 @@ struct hantro_aux_buf {
unsigned long attrs;
};
-/**
- * struct hantro_jpeg_enc_hw_ctx
- *
- * @bounce_buffer: Bounce buffer
- */
-struct hantro_jpeg_enc_hw_ctx {
- struct hantro_aux_buf bounce_buffer;
-};
-
/* Max. number of entries in the DPB (HW limitation). */
#define HANTRO_H264_DPB_SIZE 16
@@ -299,6 +290,8 @@ enum hantro_enc_fmt {
ROCKCHIP_VPU_ENC_FMT_UYVY422 = 3,
};
+extern const struct hantro_variant imx8mm_vpu_g1_variant;
+extern const struct hantro_variant imx8mq_vpu_g1_variant;
extern const struct hantro_variant imx8mq_vpu_g2_variant;
extern const struct hantro_variant imx8mq_vpu_variant;
extern const struct hantro_variant px30_vpu_variant;
@@ -327,8 +320,6 @@ void hantro_g1_reset(struct hantro_ctx *ctx);
int hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx);
int rockchip_vpu2_jpeg_enc_run(struct hantro_ctx *ctx);
-int hantro_jpeg_enc_init(struct hantro_ctx *ctx);
-void hantro_jpeg_enc_exit(struct hantro_ctx *ctx);
void hantro_h1_jpeg_enc_done(struct hantro_ctx *ctx);
void rockchip_vpu2_jpeg_enc_done(struct hantro_ctx *ctx);
diff --git a/drivers/staging/media/hantro/hantro_jpeg.c b/drivers/staging/media/hantro/hantro_jpeg.c
index df62fbdff7c9..d07b1b449b61 100644
--- a/drivers/staging/media/hantro/hantro_jpeg.c
+++ b/drivers/staging/media/hantro/hantro_jpeg.c
@@ -6,21 +6,23 @@
* Copyright (C) Jean-Francois Moine (http://moinejf.free.fr)
* Copyright (C) 2014 Philipp Zabel, Pengutronix
*/
-#include <linux/dma-mapping.h>
+
+#include <linux/align.h>
+#include <linux/build_bug.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include "hantro_jpeg.h"
#include "hantro.h"
-#define LUMA_QUANT_OFF 7
-#define CHROMA_QUANT_OFF 72
-#define HEIGHT_OFF 141
-#define WIDTH_OFF 143
+#define LUMA_QUANT_OFF 25
+#define CHROMA_QUANT_OFF 90
+#define HEIGHT_OFF 159
+#define WIDTH_OFF 161
-#define HUFF_LUMA_DC_OFF 160
-#define HUFF_LUMA_AC_OFF 193
-#define HUFF_CHROMA_DC_OFF 376
-#define HUFF_CHROMA_AC_OFF 409
+#define HUFF_LUMA_DC_OFF 178
+#define HUFF_LUMA_AC_OFF 211
+#define HUFF_CHROMA_DC_OFF 394
+#define HUFF_CHROMA_AC_OFF 427
/* Default tables from JPEG ITU-T.81
* (ISO/IEC 10918-1) Annex K, tables K.1 and K.2
@@ -47,7 +49,7 @@ static const unsigned char chroma_q_table[] = {
0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63
};
-static const unsigned char zigzag[64] = {
+static const unsigned char zigzag[] = {
0, 1, 8, 16, 9, 2, 3, 10,
17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34,
@@ -58,7 +60,7 @@ static const unsigned char zigzag[64] = {
53, 60, 61, 54, 47, 55, 62, 63
};
-static const u32 hw_reorder[64] = {
+static const u32 hw_reorder[] = {
0, 8, 16, 24, 1, 9, 17, 25,
32, 40, 48, 56, 33, 41, 49, 57,
2, 10, 18, 26, 3, 11, 19, 27,
@@ -140,10 +142,15 @@ static const unsigned char chroma_ac_table[] = {
* and we'll use fixed offsets to change the width, height
* quantization tables, etc.
*/
-static const unsigned char hantro_jpeg_header[JPEG_HEADER_SIZE] = {
+static const unsigned char hantro_jpeg_header[] = {
/* SOI */
0xff, 0xd8,
+ /* JFIF-APP0 */
+ 0xff, 0xe0, 0x00, 0x10, 0x4a, 0x46, 0x49, 0x46,
+ 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x00, 0x01,
+ 0x00, 0x00,
+
/* DQT */
0xff, 0xdb, 0x00, 0x84,
@@ -242,11 +249,29 @@ static const unsigned char hantro_jpeg_header[JPEG_HEADER_SIZE] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* COM */
+ 0xff, 0xfe, 0x00, 0x03, 0x00,
+
/* SOS */
0xff, 0xda, 0x00, 0x0c, 0x03, 0x01, 0x00, 0x02,
0x11, 0x03, 0x11, 0x00, 0x3f, 0x00,
};
+/*
+ * JPEG_HEADER_SIZE is used in other parts of the driver in lieu of
+ * "sizeof(hantro_jpeg_header)". The two must be equal.
+ */
+static_assert(sizeof(hantro_jpeg_header) == JPEG_HEADER_SIZE);
+
+/*
+ * hantro_jpeg_header is padded with a COM segment, so that the payload
+ * of the SOS segment (the entropy-encoded image scan), which should
+ * trail the whole header, is 8-byte aligned for the hardware to write
+ * to directly.
+ */
+static_assert(IS_ALIGNED(sizeof(hantro_jpeg_header), 8),
+ "Hantro JPEG header size needs to be 8-byte aligned.");
+
static unsigned char jpeg_scale_qp(const unsigned char qp, int scale)
{
unsigned int temp;
@@ -267,7 +292,10 @@ jpeg_scale_quant_table(unsigned char *file_q_tab,
{
int i;
- for (i = 0; i < 64; i++) {
+ BUILD_BUG_ON(ARRAY_SIZE(zigzag) != JPEG_QUANT_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(hw_reorder) != JPEG_QUANT_SIZE);
+
+ for (i = 0; i < JPEG_QUANT_SIZE; i++) {
file_q_tab[i] = jpeg_scale_qp(tab[zigzag[i]], scale);
reordered_q_tab[i] = jpeg_scale_qp(tab[hw_reorder[i]], scale);
}
@@ -286,6 +314,11 @@ static void jpeg_set_quality(struct hantro_jpeg_ctx *ctx)
else
scale = 200 - 2 * ctx->quality;
+ BUILD_BUG_ON(ARRAY_SIZE(luma_q_table) != JPEG_QUANT_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(chroma_q_table) != JPEG_QUANT_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(ctx->hw_luma_qtable) != JPEG_QUANT_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(ctx->hw_chroma_qtable) != JPEG_QUANT_SIZE);
+
jpeg_scale_quant_table(ctx->buffer + LUMA_QUANT_OFF,
ctx->hw_luma_qtable, luma_q_table, scale);
jpeg_scale_quant_table(ctx->buffer + CHROMA_QUANT_OFF,
@@ -313,30 +346,3 @@ void hantro_jpeg_header_assemble(struct hantro_jpeg_ctx *ctx)
jpeg_set_quality(ctx);
}
-
-int hantro_jpeg_enc_init(struct hantro_ctx *ctx)
-{
- ctx->jpeg_enc.bounce_buffer.size =
- ctx->dst_fmt.plane_fmt[0].sizeimage -
- ctx->vpu_dst_fmt->header_size;
-
- ctx->jpeg_enc.bounce_buffer.cpu =
- dma_alloc_attrs(ctx->dev->dev,
- ctx->jpeg_enc.bounce_buffer.size,
- &ctx->jpeg_enc.bounce_buffer.dma,
- GFP_KERNEL,
- DMA_ATTR_ALLOC_SINGLE_PAGES);
- if (!ctx->jpeg_enc.bounce_buffer.cpu)
- return -ENOMEM;
-
- return 0;
-}
-
-void hantro_jpeg_enc_exit(struct hantro_ctx *ctx)
-{
- dma_free_attrs(ctx->dev->dev,
- ctx->jpeg_enc.bounce_buffer.size,
- ctx->jpeg_enc.bounce_buffer.cpu,
- ctx->jpeg_enc.bounce_buffer.dma,
- DMA_ATTR_ALLOC_SINGLE_PAGES);
-}
diff --git a/drivers/staging/media/hantro/hantro_jpeg.h b/drivers/staging/media/hantro/hantro_jpeg.h
index 035ab25b803f..0b49d0b82caa 100644
--- a/drivers/staging/media/hantro/hantro_jpeg.h
+++ b/drivers/staging/media/hantro/hantro_jpeg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ */
-#define JPEG_HEADER_SIZE 601
+#define JPEG_HEADER_SIZE 624
#define JPEG_QUANT_SIZE 64
struct hantro_jpeg_ctx {
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index e595905b3bd7..67148ba346f5 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -554,6 +554,80 @@ vidioc_s_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f)
return hantro_set_fmt_cap(fh_to_ctx(priv), &f->fmt.pix_mp);
}
+static int vidioc_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+
+ /* Crop only supported on source. */
+ if (!ctx->is_encoder ||
+ sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = ctx->src_fmt.width;
+ sel->r.height = ctx->src_fmt.height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = ctx->dst_fmt.width;
+ sel->r.height = ctx->dst_fmt.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_rect *rect = &sel->r;
+ struct vb2_queue *vq;
+
+ /* Crop only supported on source. */
+ if (!ctx->is_encoder ||
+ sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ /* Change not allowed if the queue is streaming. */
+ vq = v4l2_m2m_get_src_vq(ctx->fh.m2m_ctx);
+ if (vb2_is_streaming(vq))
+ return -EBUSY;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ /*
+ * We do not support offsets, and we can crop only inside
+ * right-most or bottom-most macroblocks.
+ */
+ if (rect->left != 0 || rect->top != 0 ||
+ round_up(rect->width, MB_DIM) != ctx->src_fmt.width ||
+ round_up(rect->height, MB_DIM) != ctx->src_fmt.height) {
+ /* Default to full frame for incorrect settings. */
+ rect->left = 0;
+ rect->top = 0;
+ rect->width = ctx->src_fmt.width;
+ rect->height = ctx->src_fmt.height;
+ } else {
+ /* We support widths aligned to 4 pixels and arbitrary heights. */
+ rect->width = round_up(rect->width, 4);
+ }
+
+ ctx->dst_fmt.width = rect->width;
+ ctx->dst_fmt.height = rect->height;
+
+ return 0;
+}
+
const struct v4l2_ioctl_ops hantro_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_framesizes = vidioc_enum_framesizes,
@@ -580,6 +654,9 @@ const struct v4l2_ioctl_ops hantro_ioctl_ops = {
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_g_selection = vidioc_g_selection,
+ .vidioc_s_selection = vidioc_s_selection,
};
static int
diff --git a/drivers/staging/media/hantro/imx8m_vpu_hw.c b/drivers/staging/media/hantro/imx8m_vpu_hw.c
index f5991b8e553a..9802508bade2 100644
--- a/drivers/staging/media/hantro/imx8m_vpu_hw.c
+++ b/drivers/staging/media/hantro/imx8m_vpu_hw.c
@@ -205,13 +205,6 @@ static void imx8m_vpu_g1_reset(struct hantro_ctx *ctx)
imx8m_soft_reset(vpu, RESET_G1);
}
-static void imx8m_vpu_g2_reset(struct hantro_ctx *ctx)
-{
- struct hantro_dev *vpu = ctx->dev;
-
- imx8m_soft_reset(vpu, RESET_G2);
-}
-
/*
* Supported codec ops.
*/
@@ -237,17 +230,33 @@ static const struct hantro_codec_ops imx8mq_vpu_codec_ops[] = {
},
};
+static const struct hantro_codec_ops imx8mq_vpu_g1_codec_ops[] = {
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+};
+
static const struct hantro_codec_ops imx8mq_vpu_g2_codec_ops[] = {
[HANTRO_MODE_HEVC_DEC] = {
.run = hantro_g2_hevc_dec_run,
- .reset = imx8m_vpu_g2_reset,
.init = hantro_hevc_dec_init,
.exit = hantro_hevc_dec_exit,
},
[HANTRO_MODE_VP9_DEC] = {
.run = hantro_g2_vp9_dec_run,
.done = hantro_g2_vp9_dec_done,
- .reset = imx8m_vpu_g2_reset,
.init = hantro_vp9_dec_init,
.exit = hantro_vp9_dec_exit,
},
@@ -267,6 +276,8 @@ static const struct hantro_irq imx8mq_g2_irqs[] = {
static const char * const imx8mq_clk_names[] = { "g1", "g2", "bus" };
static const char * const imx8mq_reg_names[] = { "g1", "g2", "ctrl" };
+static const char * const imx8mq_g1_clk_names[] = { "g1" };
+static const char * const imx8mq_g2_clk_names[] = { "g2" };
const struct hantro_variant imx8mq_vpu_variant = {
.dec_fmts = imx8m_vpu_dec_fmts,
@@ -287,6 +298,21 @@ const struct hantro_variant imx8mq_vpu_variant = {
.num_regs = ARRAY_SIZE(imx8mq_reg_names)
};
+const struct hantro_variant imx8mq_vpu_g1_variant = {
+ .dec_fmts = imx8m_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts),
+ .postproc_fmts = imx8m_vpu_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(imx8m_vpu_postproc_fmts),
+ .postproc_ops = &hantro_g1_postproc_ops,
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
+ HANTRO_H264_DECODER,
+ .codec_ops = imx8mq_vpu_g1_codec_ops,
+ .irqs = imx8mq_irqs,
+ .num_irqs = ARRAY_SIZE(imx8mq_irqs),
+ .clk_names = imx8mq_g1_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_g1_clk_names),
+};
+
const struct hantro_variant imx8mq_vpu_g2_variant = {
.dec_offset = 0x0,
.dec_fmts = imx8m_vpu_g2_dec_fmts,
@@ -296,10 +322,20 @@ const struct hantro_variant imx8mq_vpu_g2_variant = {
.postproc_ops = &hantro_g2_postproc_ops,
.codec = HANTRO_HEVC_DECODER | HANTRO_VP9_DECODER,
.codec_ops = imx8mq_vpu_g2_codec_ops,
- .init = imx8mq_vpu_hw_init,
- .runtime_resume = imx8mq_runtime_resume,
.irqs = imx8mq_g2_irqs,
.num_irqs = ARRAY_SIZE(imx8mq_g2_irqs),
- .clk_names = imx8mq_clk_names,
- .num_clocks = ARRAY_SIZE(imx8mq_clk_names),
+ .clk_names = imx8mq_g2_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_g2_clk_names),
+};
+
+const struct hantro_variant imx8mm_vpu_g1_variant = {
+ .dec_fmts = imx8m_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts),
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
+ HANTRO_H264_DECODER,
+ .codec_ops = imx8mq_vpu_g1_codec_ops,
+ .irqs = imx8mq_irqs,
+ .num_irqs = ARRAY_SIZE(imx8mq_irqs),
+ .clk_names = imx8mq_g1_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_g1_clk_names),
};
diff --git a/drivers/staging/media/hantro/rockchip_vpu2_hw_jpeg_enc.c b/drivers/staging/media/hantro/rockchip_vpu2_hw_jpeg_enc.c
index 4df16f59fb97..8395c4d48dd0 100644
--- a/drivers/staging/media/hantro/rockchip_vpu2_hw_jpeg_enc.c
+++ b/drivers/staging/media/hantro/rockchip_vpu2_hw_jpeg_enc.c
@@ -35,18 +35,23 @@
static void rockchip_vpu2_set_src_img_ctrl(struct hantro_dev *vpu,
struct hantro_ctx *ctx)
{
- struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
+ u32 overfill_r, overfill_b;
u32 reg;
/*
- * The pix fmt width/height are already macroblock aligned
- * by .vidioc_s_fmt_vid_cap_mplane() callback
+ * The format width and height are already macroblock aligned
+ * by .vidioc_s_fmt_vid_cap_mplane() callback. Destination
+ * format width and height can be further modified by
+ * .vidioc_s_selection(), and the width is 4-aligned.
*/
- reg = VEPU_REG_IN_IMG_CTRL_ROW_LEN(pix_fmt->width);
+ overfill_r = ctx->src_fmt.width - ctx->dst_fmt.width;
+ overfill_b = ctx->src_fmt.height - ctx->dst_fmt.height;
+
+ reg = VEPU_REG_IN_IMG_CTRL_ROW_LEN(ctx->src_fmt.width);
vepu_write_relaxed(vpu, reg, VEPU_REG_INPUT_LUMA_INFO);
- reg = VEPU_REG_IN_IMG_CTRL_OVRFLR_D4(0) |
- VEPU_REG_IN_IMG_CTRL_OVRFLB(0);
+ reg = VEPU_REG_IN_IMG_CTRL_OVRFLR_D4(overfill_r / 4) |
+ VEPU_REG_IN_IMG_CTRL_OVRFLB(overfill_b);
/*
* This register controls the input crop, as the offset
* from the right/bottom within the last macroblock. The offset from the
@@ -61,17 +66,23 @@ static void rockchip_vpu2_set_src_img_ctrl(struct hantro_dev *vpu,
static void rockchip_vpu2_jpeg_enc_set_buffers(struct hantro_dev *vpu,
struct hantro_ctx *ctx,
- struct vb2_buffer *src_buf)
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf)
{
struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
dma_addr_t src[3];
+ u32 size_left;
+
+ size_left = vb2_plane_size(dst_buf, 0) - ctx->vpu_dst_fmt->header_size;
+ if (WARN_ON(vb2_plane_size(dst_buf, 0) < ctx->vpu_dst_fmt->header_size))
+ size_left = 0;
WARN_ON(pix_fmt->num_planes > 3);
- vepu_write_relaxed(vpu, ctx->jpeg_enc.bounce_buffer.dma,
+ vepu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(dst_buf, 0) +
+ ctx->vpu_dst_fmt->header_size,
VEPU_REG_ADDR_OUTPUT_STREAM);
- vepu_write_relaxed(vpu, ctx->jpeg_enc.bounce_buffer.size,
- VEPU_REG_STR_BUF_LIMIT);
+ vepu_write_relaxed(vpu, size_left, VEPU_REG_STR_BUF_LIMIT);
if (pix_fmt->num_planes == 1) {
src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
@@ -132,6 +143,9 @@ int rockchip_vpu2_jpeg_enc_run(struct hantro_ctx *ctx)
memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
+ if (!jpeg_ctx.buffer)
+ return -ENOMEM;
+
jpeg_ctx.width = ctx->dst_fmt.width;
jpeg_ctx.height = ctx->dst_fmt.height;
jpeg_ctx.quality = ctx->jpeg_quality;
@@ -142,7 +156,8 @@ int rockchip_vpu2_jpeg_enc_run(struct hantro_ctx *ctx)
VEPU_REG_ENCODE_START);
rockchip_vpu2_set_src_img_ctrl(vpu, ctx);
- rockchip_vpu2_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
+ rockchip_vpu2_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf,
+ &dst_buf->vb2_buf);
rockchip_vpu2_jpeg_enc_set_qtable(vpu, jpeg_ctx.hw_luma_qtable,
jpeg_ctx.hw_chroma_qtable);
@@ -177,13 +192,6 @@ void rockchip_vpu2_jpeg_enc_done(struct hantro_ctx *ctx)
u32 bytesused = vepu_read(vpu, VEPU_REG_STR_BUF_LIMIT) / 8;
struct vb2_v4l2_buffer *dst_buf = hantro_get_dst_buf(ctx);
- /*
- * TODO: Rework the JPEG encoder to eliminate the need
- * for a bounce buffer.
- */
- memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0) +
- ctx->vpu_dst_fmt->header_size,
- ctx->jpeg_enc.bounce_buffer.cpu, bytesused);
vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
ctx->vpu_dst_fmt->header_size + bytesused);
}
diff --git a/drivers/staging/media/hantro/rockchip_vpu_hw.c b/drivers/staging/media/hantro/rockchip_vpu_hw.c
index c203b606e6e7..163cf92eafca 100644
--- a/drivers/staging/media/hantro/rockchip_vpu_hw.c
+++ b/drivers/staging/media/hantro/rockchip_vpu_hw.c
@@ -343,9 +343,7 @@ static const struct hantro_codec_ops rk3066_vpu_codec_ops[] = {
[HANTRO_MODE_JPEG_ENC] = {
.run = hantro_h1_jpeg_enc_run,
.reset = rockchip_vpu1_enc_reset,
- .init = hantro_jpeg_enc_init,
.done = hantro_h1_jpeg_enc_done,
- .exit = hantro_jpeg_enc_exit,
},
[HANTRO_MODE_H264_DEC] = {
.run = hantro_g1_h264_dec_run,
@@ -371,9 +369,7 @@ static const struct hantro_codec_ops rk3288_vpu_codec_ops[] = {
[HANTRO_MODE_JPEG_ENC] = {
.run = hantro_h1_jpeg_enc_run,
.reset = rockchip_vpu1_enc_reset,
- .init = hantro_jpeg_enc_init,
.done = hantro_h1_jpeg_enc_done,
- .exit = hantro_jpeg_enc_exit,
},
[HANTRO_MODE_H264_DEC] = {
.run = hantro_g1_h264_dec_run,
@@ -399,9 +395,7 @@ static const struct hantro_codec_ops rk3399_vpu_codec_ops[] = {
[HANTRO_MODE_JPEG_ENC] = {
.run = rockchip_vpu2_jpeg_enc_run,
.reset = rockchip_vpu2_enc_reset,
- .init = hantro_jpeg_enc_init,
.done = rockchip_vpu2_jpeg_enc_done,
- .exit = hantro_jpeg_enc_exit,
},
[HANTRO_MODE_H264_DEC] = {
.run = rockchip_vpu2_h264_dec_run,
diff --git a/drivers/staging/media/hantro/sunxi_vpu_hw.c b/drivers/staging/media/hantro/sunxi_vpu_hw.c
index 90633406c4eb..c0edd5856a0c 100644
--- a/drivers/staging/media/hantro/sunxi_vpu_hw.c
+++ b/drivers/staging/media/hantro/sunxi_vpu_hw.c
@@ -29,10 +29,10 @@ static const struct hantro_fmt sunxi_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 3840,
- .step_width = MB_DIM,
+ .step_width = 32,
.min_height = 48,
.max_height = 2160,
- .step_height = MB_DIM,
+ .step_height = 32,
},
},
};
diff --git a/drivers/staging/media/imx/Kconfig b/drivers/staging/media/imx/Kconfig
index c3bf433ba3e3..0bacac302d7e 100644
--- a/drivers/staging/media/imx/Kconfig
+++ b/drivers/staging/media/imx/Kconfig
@@ -4,7 +4,7 @@ config VIDEO_IMX_MEDIA
depends on ARCH_MXC || COMPILE_TEST
depends on HAS_DMA
depends on VIDEO_DEV
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
select MEDIA_CONTROLLER
select V4L2_FWNODE
select V4L2_MEM2MEM_DEV
diff --git a/drivers/staging/media/imx/Makefile b/drivers/staging/media/imx/Makefile
index 19c2fc54d424..d82be898145b 100644
--- a/drivers/staging/media/imx/Makefile
+++ b/drivers/staging/media/imx/Makefile
@@ -15,5 +15,4 @@ obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-media-csi.o
obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-mipi-csi2.o
obj-$(CONFIG_VIDEO_IMX7_CSI) += imx7-media-csi.o
-obj-$(CONFIG_VIDEO_IMX7_CSI) += imx7-mipi-csis.o
obj-$(CONFIG_VIDEO_IMX7_CSI) += imx8mq-mipi-csi2.o
diff --git a/drivers/staging/media/imx/TODO b/drivers/staging/media/imx/TODO
index 06c94f20ecf8..5d3a337c8702 100644
--- a/drivers/staging/media/imx/TODO
+++ b/drivers/staging/media/imx/TODO
@@ -27,3 +27,28 @@
- i.MX7: all of the above, since it uses the imx media core
- i.MX7: use Frame Interval Monitor
+
+- imx7-media-csi: Restrict the supported formats list to the SoC version.
+
+ The imx7 CSI bridge can be configured to sample pixel components from the Rx
+ queue in single (8bpp) or double (16bpp) component modes. Image format
+ variants with different sample sizes (ie YUYV_2X8 vs YUYV_1X16) determine the
+ pixel components sampling size per each clock cycle and their packing mode
+ (see imx7_csi_configure() for details).
+
+ As the imx7 CSI bridge can be interfaced with different IP blocks depending on
+ the SoC model it is integrated on, the Rx queue sampling size should match
+ the size of the samples transferred by the transmitting IP block.
+
+ To avoid mis-configurations of the capture pipeline, the enumeration of the
+ supported formats should be restricted to match the pixel source transmitting
+ mode.
+
+ Example: i.MX8MM SoC integrates the CSI bridge with the Samsung CSIS CSI-2
+ receiver which operates in dual pixel sampling mode. The CSI bridge should
+ only expose the 1X16 formats variant which instructs it to operate in dual
+ pixel sampling mode. When the CSI bridge is instead integrated on an i.MX7,
+ which supports both serial and parallel input, it should expose both variants.
+
+ This currently only applies to YUYV formats, but other formats might need
+ to be handled in the same way.
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index bd7f156f2d52..b2b1f4dd41d7 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -718,9 +718,10 @@ static int csi_setup(struct csi_priv *priv)
/* compose mbus_config from the upstream endpoint */
mbus_cfg.type = priv->upstream_ep.bus_type;
- mbus_cfg.flags = is_parallel_bus(&priv->upstream_ep) ?
- priv->upstream_ep.bus.parallel.flags :
- priv->upstream_ep.bus.mipi_csi2.flags;
+ if (is_parallel_bus(&priv->upstream_ep))
+ mbus_cfg.bus.parallel = priv->upstream_ep.bus.parallel;
+ else
+ mbus_cfg.bus.mipi_csi2 = priv->upstream_ep.bus.mipi_csi2;
if_fmt = *infmt;
crop = priv->crop;
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index 558b256ac935..c4cb558a85c6 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -303,7 +303,6 @@ static void csi2ipu_gasket_init(struct csi2_dev *csi2)
static int csi2_get_active_lanes(struct csi2_dev *csi2, unsigned int *lanes)
{
struct v4l2_mbus_config mbus_config = { 0 };
- unsigned int num_lanes = UINT_MAX;
int ret;
*lanes = csi2->data_lanes;
@@ -326,32 +325,14 @@ static int csi2_get_active_lanes(struct csi2_dev *csi2, unsigned int *lanes)
return -EINVAL;
}
- switch (mbus_config.flags & V4L2_MBUS_CSI2_LANES) {
- case V4L2_MBUS_CSI2_1_LANE:
- num_lanes = 1;
- break;
- case V4L2_MBUS_CSI2_2_LANE:
- num_lanes = 2;
- break;
- case V4L2_MBUS_CSI2_3_LANE:
- num_lanes = 3;
- break;
- case V4L2_MBUS_CSI2_4_LANE:
- num_lanes = 4;
- break;
- default:
- num_lanes = csi2->data_lanes;
- break;
- }
-
- if (num_lanes > csi2->data_lanes) {
+ if (mbus_config.bus.mipi_csi2.num_data_lanes > csi2->data_lanes) {
dev_err(csi2->dev,
"Unsupported mbus config: too many data lanes %u\n",
- num_lanes);
+ mbus_config.bus.mipi_csi2.num_data_lanes);
return -EINVAL;
}
- *lanes = num_lanes;
+ *lanes = mbus_config.bus.mipi_csi2.num_data_lanes;
return 0;
}
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index 2288dadb2683..8467a1491048 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -122,6 +123,10 @@
#define BIT_DATA_FROM_MIPI BIT(22)
#define BIT_MIPI_YU_SWAP BIT(21)
#define BIT_MIPI_DOUBLE_CMPNT BIT(20)
+#define BIT_MASK_OPTION_FIRST_FRAME (0 << 18)
+#define BIT_MASK_OPTION_CSI_EN (1 << 18)
+#define BIT_MASK_OPTION_SECOND_FRAME (2 << 18)
+#define BIT_MASK_OPTION_ON_DATA (3 << 18)
#define BIT_BASEADDR_CHG_ERR_EN BIT(9)
#define BIT_BASEADDR_SWITCH_SEL BIT(5)
#define BIT_BASEADDR_SWITCH_EN BIT(4)
@@ -154,6 +159,11 @@
#define CSI_CSICR18 0x48
#define CSI_CSICR19 0x4c
+enum imx_csi_model {
+ IMX7_CSI_IMX7 = 0,
+ IMX7_CSI_IMX8MQ,
+};
+
struct imx7_csi {
struct device *dev;
struct v4l2_subdev sd;
@@ -189,6 +199,8 @@ struct imx7_csi {
bool is_csi2;
struct completion last_eof_completion;
+
+ enum imx_csi_model model;
};
static struct imx7_csi *
@@ -486,16 +498,40 @@ static void imx7_csi_configure(struct imx7_csi *csi)
cr3 |= BIT_TWO_8BIT_SENSOR;
cr18 |= BIT_MIPI_DATA_FORMAT_RAW14;
break;
+
/*
- * CSI-2 sources are supposed to use the 1X16 formats, but not
- * all of them comply. Support both variants.
+ * The CSI bridge has a 16-bit input bus. Depending on the
+ * connected source, data may be transmitted with 8 or 10 bits
+ * per clock sample (in bits [9:2] or [9:0] respectively) or
+ * with 16 bits per clock sample (in bits [15:0]). The data is
+ * then packed into a 32-bit FIFO (as shown in figure 13-11 of
+ * the i.MX8MM reference manual rev. 3).
+ *
+ * The data packing in a 32-bit FIFO input word is controlled by
+ * the CR3 TWO_8BIT_SENSOR field (also known as SENSOR_16BITS in
+ * the i.MX8MM reference manual). When set to 0, data packing
+ * groups four 8-bit input samples (bits [9:2]). When set to 1,
+ * data packing groups two 16-bit input samples (bits [15:0]).
+ *
+ * The register field CR18 MIPI_DOUBLE_CMPNT also needs to be
+ * configured according to the input format for YUV 4:2:2 data.
+ * The field controls the gasket between the CSI-2 receiver and
+ * the CSI bridge. On i.MX7 and i.MX8MM, the field must be set
+ * to 1 when the CSIS outputs 16-bit samples. On i.MX8MQ, the
+ * gasket ignores the MIPI_DOUBLE_CMPNT bit and YUV 4:2:2 always
+ * uses 16-bit samples. Setting MIPI_DOUBLE_CMPNT in that case
+ * has no effect, but doesn't cause any issue.
*/
case MEDIA_BUS_FMT_UYVY8_2X8:
- case MEDIA_BUS_FMT_UYVY8_1X16:
case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_YUYV8_1X16:
cr18 |= BIT_MIPI_DATA_FORMAT_YUV422_8B;
break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ cr3 |= BIT_TWO_8BIT_SENSOR;
+ cr18 |= BIT_MIPI_DATA_FORMAT_YUV422_8B |
+ BIT_MIPI_DOUBLE_CMPNT;
+ break;
}
}
@@ -537,6 +573,16 @@ static void imx7_csi_deinit(struct imx7_csi *csi,
clk_disable_unprepare(csi->mclk);
}
+static void imx7_csi_baseaddr_switch_on_second_frame(struct imx7_csi *csi)
+{
+ u32 cr18 = imx7_csi_reg_read(csi, CSI_CSICR18);
+
+ cr18 |= BIT_BASEADDR_SWITCH_EN | BIT_BASEADDR_SWITCH_SEL |
+ BIT_BASEADDR_CHG_ERR_EN;
+ cr18 |= BIT_MASK_OPTION_SECOND_FRAME;
+ imx7_csi_reg_write(csi, cr18, CSI_CSICR18);
+}
+
static void imx7_csi_enable(struct imx7_csi *csi)
{
/* Clear the Rx FIFO and reflash the DMA controller. */
@@ -552,6 +598,9 @@ static void imx7_csi_enable(struct imx7_csi *csi)
/* Enable the RxFIFO DMA and the CSI. */
imx7_csi_dmareq_rff_enable(csi);
imx7_csi_hw_enable(csi);
+
+ if (csi->model == IMX7_CSI_IMX8MQ)
+ imx7_csi_baseaddr_switch_on_second_frame(csi);
}
static void imx7_csi_disable(struct imx7_csi *csi)
@@ -1155,6 +1204,8 @@ static int imx7_csi_probe(struct platform_device *pdev)
if (IS_ERR(csi->regbase))
return PTR_ERR(csi->regbase);
+ csi->model = (enum imx_csi_model)(uintptr_t)of_device_get_match_data(&pdev->dev);
+
spin_lock_init(&csi->irqlock);
mutex_init(&csi->lock);
@@ -1249,8 +1300,9 @@ static int imx7_csi_remove(struct platform_device *pdev)
}
static const struct of_device_id imx7_csi_of_match[] = {
- { .compatible = "fsl,imx7-csi" },
- { .compatible = "fsl,imx6ul-csi" },
+ { .compatible = "fsl,imx8mq-csi", .data = (void *)IMX7_CSI_IMX8MQ },
+ { .compatible = "fsl,imx7-csi", .data = (void *)IMX7_CSI_IMX7 },
+ { .compatible = "fsl,imx6ul-csi", .data = (void *)IMX7_CSI_IMX7 },
{ },
};
MODULE_DEVICE_TABLE(of, imx7_csi_of_match);
diff --git a/drivers/staging/media/imx/imx8mq-mipi-csi2.c b/drivers/staging/media/imx/imx8mq-mipi-csi2.c
index 7adbdd14daa9..83194328d010 100644
--- a/drivers/staging/media/imx/imx8mq-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx8mq-mipi-csi2.c
@@ -117,7 +117,7 @@ struct csi_state {
struct v4l2_async_notifier notifier;
struct v4l2_subdev *src_sd;
- struct v4l2_fwnode_bus_mipi_csi2 bus;
+ struct v4l2_mbus_config_mipi_csi2 bus;
struct mutex lock; /* Protect csi2_fmt, format_mbus, state, hs_settle */
const struct csi2_pix_format *csi2_fmt;
@@ -200,12 +200,13 @@ static const struct csi2_pix_format imx8mq_mipi_csi_formats[] = {
}, {
.code = MEDIA_BUS_FMT_SRGGB14_1X14,
.width = 14,
- }, {
+ },
/* YUV formats */
- .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ {
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
.width = 16,
}, {
- .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
.width = 16,
}
};
@@ -398,9 +399,6 @@ static int imx8mq_mipi_csi_s_stream(struct v4l2_subdev *sd, int enable)
struct csi_state *state = mipi_sd_to_csi2_state(sd);
int ret = 0;
- imx8mq_mipi_csi_write(state, CSI2RX_IRQ_MASK,
- CSI2RX_IRQ_MASK_ULPS_STATUS_CHANGE);
-
if (enable) {
ret = pm_runtime_resume_and_get(state->dev);
if (ret < 0)
@@ -696,11 +694,10 @@ err_parse:
* Suspend/resume
*/
-static int imx8mq_mipi_csi_pm_suspend(struct device *dev, bool runtime)
+static void imx8mq_mipi_csi_pm_suspend(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct csi_state *state = mipi_sd_to_csi2_state(sd);
- int ret = 0;
mutex_lock(&state->lock);
@@ -708,36 +705,19 @@ static int imx8mq_mipi_csi_pm_suspend(struct device *dev, bool runtime)
imx8mq_mipi_csi_stop_stream(state);
imx8mq_mipi_csi_clk_disable(state);
state->state &= ~ST_POWERED;
- if (!runtime)
- state->state |= ST_SUSPENDED;
}
mutex_unlock(&state->lock);
-
- ret = icc_set_bw(state->icc_path, 0, 0);
- if (ret)
- dev_err(dev, "icc_set_bw failed with %d\n", ret);
-
- return ret ? -EAGAIN : 0;
}
-static int imx8mq_mipi_csi_pm_resume(struct device *dev, bool runtime)
+static int imx8mq_mipi_csi_pm_resume(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct csi_state *state = mipi_sd_to_csi2_state(sd);
int ret = 0;
- ret = icc_set_bw(state->icc_path, 0, state->icc_path_bw);
- if (ret) {
- dev_err(dev, "icc_set_bw failed with %d\n", ret);
- return ret;
- }
-
mutex_lock(&state->lock);
- if (!runtime && !(state->state & ST_SUSPENDED))
- goto unlock;
-
if (!(state->state & ST_POWERED)) {
state->state |= ST_POWERED;
ret = imx8mq_mipi_csi_clk_enable(state);
@@ -758,22 +738,55 @@ unlock:
static int __maybe_unused imx8mq_mipi_csi_suspend(struct device *dev)
{
- return imx8mq_mipi_csi_pm_suspend(dev, false);
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+
+ imx8mq_mipi_csi_pm_suspend(dev);
+
+ state->state |= ST_SUSPENDED;
+
+ return 0;
}
static int __maybe_unused imx8mq_mipi_csi_resume(struct device *dev)
{
- return imx8mq_mipi_csi_pm_resume(dev, false);
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+
+ if (!(state->state & ST_SUSPENDED))
+ return 0;
+
+ return imx8mq_mipi_csi_pm_resume(dev);
}
static int __maybe_unused imx8mq_mipi_csi_runtime_suspend(struct device *dev)
{
- return imx8mq_mipi_csi_pm_suspend(dev, true);
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+ int ret;
+
+ imx8mq_mipi_csi_pm_suspend(dev);
+
+ ret = icc_set_bw(state->icc_path, 0, 0);
+ if (ret)
+ dev_err(dev, "icc_set_bw failed with %d\n", ret);
+
+ return ret;
}
static int __maybe_unused imx8mq_mipi_csi_runtime_resume(struct device *dev)
{
- return imx8mq_mipi_csi_pm_resume(dev, true);
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+ int ret;
+
+ ret = icc_set_bw(state->icc_path, 0, state->icc_path_bw);
+ if (ret) {
+ dev_err(dev, "icc_set_bw failed with %d\n", ret);
+ return ret;
+ }
+
+ return imx8mq_mipi_csi_pm_resume(dev);
}
static const struct dev_pm_ops imx8mq_mipi_csi_pm_ops = {
@@ -921,7 +934,7 @@ static int imx8mq_mipi_csi_probe(struct platform_device *pdev)
/* Enable runtime PM. */
pm_runtime_enable(dev);
if (!pm_runtime_enabled(dev)) {
- ret = imx8mq_mipi_csi_pm_resume(dev, true);
+ ret = imx8mq_mipi_csi_runtime_resume(dev);
if (ret < 0)
goto icc;
}
@@ -934,7 +947,7 @@ static int imx8mq_mipi_csi_probe(struct platform_device *pdev)
cleanup:
pm_runtime_disable(&pdev->dev);
- imx8mq_mipi_csi_pm_suspend(&pdev->dev, true);
+ imx8mq_mipi_csi_runtime_suspend(&pdev->dev);
media_entity_cleanup(&state->sd.entity);
v4l2_async_nf_unregister(&state->notifier);
@@ -958,7 +971,7 @@ static int imx8mq_mipi_csi_remove(struct platform_device *pdev)
v4l2_async_unregister_subdev(&state->sd);
pm_runtime_disable(&pdev->dev);
- imx8mq_mipi_csi_pm_suspend(&pdev->dev, true);
+ imx8mq_mipi_csi_runtime_suspend(&pdev->dev);
media_entity_cleanup(&state->sd.entity);
mutex_destroy(&state->lock);
pm_runtime_set_suspended(&pdev->dev);
diff --git a/drivers/staging/media/ipu3/Kconfig b/drivers/staging/media/ipu3/Kconfig
index 3e9640523e50..114a1d8e7cc8 100644
--- a/drivers/staging/media/ipu3/Kconfig
+++ b/drivers/staging/media/ipu3/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config VIDEO_IPU3_IMGU
tristate "Intel ipu3-imgu driver"
- depends on PCI && VIDEO_V4L2
+ depends on PCI && VIDEO_DEV
depends on X86
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
diff --git a/drivers/staging/media/max96712/Kconfig b/drivers/staging/media/max96712/Kconfig
index acde14fd5c4d..117fadf81bd0 100644
--- a/drivers/staging/media/max96712/Kconfig
+++ b/drivers/staging/media/max96712/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_MAX96712
tristate "Maxim MAX96712 Quad GMSL2 Deserializer support"
depends on I2C
depends on OF_GPIO
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
select V4L2_FWNODE
select VIDEO_V4L2_SUBDEV_API
select MEDIA_CONTROLLER
diff --git a/drivers/staging/media/max96712/max96712.c b/drivers/staging/media/max96712/max96712.c
index 9bc72d9a858b..6b5abd958bff 100644
--- a/drivers/staging/media/max96712/max96712.c
+++ b/drivers/staging/media/max96712/max96712.c
@@ -30,7 +30,7 @@ struct max96712_priv {
struct regmap *regmap;
struct gpio_desc *gpiod_pwdn;
- struct v4l2_fwnode_bus_mipi_csi2 mipi;
+ struct v4l2_mbus_config_mipi_csi2 mipi;
struct v4l2_subdev sd;
struct v4l2_ctrl_handler ctrl_handler;
diff --git a/drivers/staging/media/meson/vdec/Kconfig b/drivers/staging/media/meson/vdec/Kconfig
index 9e1450193392..19ffea987b89 100644
--- a/drivers/staging/media/meson/vdec/Kconfig
+++ b/drivers/staging/media/meson/vdec/Kconfig
@@ -2,7 +2,7 @@
config VIDEO_MESON_VDEC
tristate "Amlogic video decoder driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
+ depends on VIDEO_DEV && HAS_DMA
depends on ARCH_MESON || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
diff --git a/drivers/staging/media/meson/vdec/esparser.c b/drivers/staging/media/meson/vdec/esparser.c
index db7022707ff8..86ccc8937afc 100644
--- a/drivers/staging/media/meson/vdec/esparser.c
+++ b/drivers/staging/media/meson/vdec/esparser.c
@@ -328,7 +328,12 @@ esparser_queue(struct amvdec_session *sess, struct vb2_v4l2_buffer *vbuf)
offset = esparser_get_offset(sess);
- amvdec_add_ts(sess, vb->timestamp, vbuf->timecode, offset, vbuf->flags);
+ ret = amvdec_add_ts(sess, vb->timestamp, vbuf->timecode, offset, vbuf->flags);
+ if (ret) {
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ return ret;
+ }
+
dev_dbg(core->dev, "esparser: ts = %llu pld_size = %u offset = %08X flags = %08X\n",
vb->timestamp, payload_size, offset, vbuf->flags);
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.c b/drivers/staging/media/meson/vdec/vdec_helpers.c
index 203d7afa085d..7d2a75653250 100644
--- a/drivers/staging/media/meson/vdec/vdec_helpers.c
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.c
@@ -227,13 +227,16 @@ int amvdec_set_canvases(struct amvdec_session *sess,
}
EXPORT_SYMBOL_GPL(amvdec_set_canvases);
-void amvdec_add_ts(struct amvdec_session *sess, u64 ts,
- struct v4l2_timecode tc, u32 offset, u32 vbuf_flags)
+int amvdec_add_ts(struct amvdec_session *sess, u64 ts,
+ struct v4l2_timecode tc, u32 offset, u32 vbuf_flags)
{
struct amvdec_timestamp *new_ts;
unsigned long flags;
new_ts = kzalloc(sizeof(*new_ts), GFP_KERNEL);
+ if (!new_ts)
+ return -ENOMEM;
+
new_ts->ts = ts;
new_ts->tc = tc;
new_ts->offset = offset;
@@ -242,6 +245,7 @@ void amvdec_add_ts(struct amvdec_session *sess, u64 ts,
spin_lock_irqsave(&sess->ts_spinlock, flags);
list_add_tail(&new_ts->list, &sess->timestamps);
spin_unlock_irqrestore(&sess->ts_spinlock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(amvdec_add_ts);
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.h b/drivers/staging/media/meson/vdec/vdec_helpers.h
index 88137d15aa3a..4bf3e61d081b 100644
--- a/drivers/staging/media/meson/vdec/vdec_helpers.h
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.h
@@ -56,8 +56,8 @@ void amvdec_dst_buf_done_offset(struct amvdec_session *sess,
* @offset: offset in the VIFIFO where the associated packet was written
* @flags: the vb2_v4l2_buffer flags
*/
-void amvdec_add_ts(struct amvdec_session *sess, u64 ts,
- struct v4l2_timecode tc, u32 offset, u32 flags);
+int amvdec_add_ts(struct amvdec_session *sess, u64 ts,
+ struct v4l2_timecode tc, u32 offset, u32 flags);
void amvdec_remove_ts(struct amvdec_session *sess, u64 ts);
/**
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.c b/drivers/staging/media/meson/vdec/vdec_platform.c
index eabbebab2da2..88c9d72e1c83 100644
--- a/drivers/staging/media/meson/vdec/vdec_platform.c
+++ b/drivers/staging/media/meson/vdec/vdec_platform.c
@@ -103,6 +103,18 @@ static const struct amvdec_format vdec_formats_gxl[] = {
static const struct amvdec_format vdec_formats_gxm[] = {
{
+ .pixfmt = V4L2_PIX_FMT_VP9,
+ .min_buffers = 16,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_hevc_ops,
+ .codec_ops = &codec_vp9_ops,
+ .firmware_path = "meson/vdec/gxl_vp9.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
.pixfmt = V4L2_PIX_FMT_H264,
.min_buffers = 2,
.max_buffers = 24,
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
index 6c254907a27b..6d1f55b09132 100644
--- a/drivers/staging/media/omap4iss/Kconfig
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -2,7 +2,7 @@
config VIDEO_OMAP4
tristate "OMAP 4 Camera support"
- depends on VIDEO_V4L2 && I2C
+ depends on VIDEO_DEV && I2C
depends on ARCH_OMAP4 || COMPILE_TEST
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
diff --git a/drivers/staging/media/rkvdec/Kconfig b/drivers/staging/media/rkvdec/Kconfig
index dc7292f346fa..e963d60cc6ad 100644
--- a/drivers/staging/media/rkvdec/Kconfig
+++ b/drivers/staging/media/rkvdec/Kconfig
@@ -2,7 +2,7 @@
config VIDEO_ROCKCHIP_VDEC
tristate "Rockchip Video Decoder driver"
depends on ARCH_ROCKCHIP || COMPILE_TEST
- depends on VIDEO_DEV && VIDEO_V4L2
+ depends on VIDEO_DEV
select MEDIA_CONTROLLER
select MEDIA_CONTROLLER_REQUEST_API
select VIDEOBUF2_DMA_CONTIG
diff --git a/drivers/staging/media/sunxi/cedrus/Kconfig b/drivers/staging/media/sunxi/cedrus/Kconfig
index da369950bbf2..21c13f9b6e33 100644
--- a/drivers/staging/media/sunxi/cedrus/Kconfig
+++ b/drivers/staging/media/sunxi/cedrus/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config VIDEO_SUNXI_CEDRUS
tristate "Allwinner Cedrus VPU driver"
- depends on VIDEO_DEV && VIDEO_V4L2
+ depends on VIDEO_DEV
depends on HAS_DMA
depends on OF
select MEDIA_CONTROLLER
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 4a4b714b0f26..68b3dcdb5df3 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -439,6 +439,8 @@ static int cedrus_probe(struct platform_device *pdev)
mutex_init(&dev->dev_mutex);
+ INIT_DELAYED_WORK(&dev->watchdog_work, cedrus_watchdog);
+
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to register V4L2 device\n");
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index c345f2984041..3bc094eb497f 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -24,6 +24,7 @@
#include <linux/iopoll.h>
#include <linux/platform_device.h>
+#include <linux/workqueue.h>
#define CEDRUS_NAME "cedrus"
@@ -194,6 +195,8 @@ struct cedrus_dev {
struct reset_control *rstc;
unsigned int capabilities;
+
+ struct delayed_work watchdog_work;
};
extern struct cedrus_dec_ops cedrus_dec_ops_mpeg2;
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
index a16c1422558f..9c7200299465 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -97,4 +97,8 @@ void cedrus_device_run(void *priv)
v4l2_ctrl_request_complete(src_req, &ctx->hdl);
dev->dec_ops[ctx->current_codec]->trigger(ctx);
+
+ /* Start the watchdog timer. */
+ schedule_delayed_work(&dev->watchdog_work,
+ msecs_to_jiffies(2000));
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index b4173a8926d6..d8fb93035470 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -38,7 +38,7 @@ struct cedrus_h264_sram_ref_pic {
#define CEDRUS_H264_FRAME_NUM 18
-#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (16 * SZ_1K)
+#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (32 * SZ_1K)
#define CEDRUS_MIN_PIC_INFO_BUF_SIZE (130 * SZ_1K)
static void cedrus_h264_write_sram(struct cedrus_dev *dev,
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
index 8829a7bab07e..44f385be9f6c 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
@@ -23,7 +23,7 @@
* Subsequent BSP implementations seem to double the neighbor info buffer size
* for the H6 SoC, which may be related to 10 bit H265 support.
*/
-#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (397 * SZ_1K)
+#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (794 * SZ_1K)
#define CEDRUS_H265_ENTRY_POINTS_BUF_SIZE (4 * SZ_1K)
#define CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE 160
@@ -169,7 +169,7 @@ static void cedrus_h265_ref_pic_list_write(struct cedrus_dev *dev,
unsigned int index = list[i];
u8 value = list[i];
- if (dpb[index].rps == V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR)
+ if (dpb[index].flags & V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE)
value |= VE_DEC_H265_SRAM_REF_PIC_LIST_LT_REF;
/* Each SRAM word gathers up to 4 references. */
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index 2d7663726467..a6470a89851e 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -118,6 +118,13 @@ static irqreturn_t cedrus_irq(int irq, void *data)
enum vb2_buffer_state state;
enum cedrus_irq_status status;
+ /*
+ * If cancel_delayed_work returns false it means watchdog already
+ * executed and finished the job.
+ */
+ if (!cancel_delayed_work(&dev->watchdog_work))
+ return IRQ_HANDLED;
+
ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
if (!ctx) {
v4l2_err(&dev->v4l2_dev,
@@ -143,6 +150,24 @@ static irqreturn_t cedrus_irq(int irq, void *data)
return IRQ_HANDLED;
}
+void cedrus_watchdog(struct work_struct *work)
+{
+ struct cedrus_dev *dev;
+ struct cedrus_ctx *ctx;
+
+ dev = container_of(to_delayed_work(work),
+ struct cedrus_dev, watchdog_work);
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (!ctx)
+ return;
+
+ v4l2_err(&dev->v4l2_dev, "frame processing timed out!\n");
+ reset_control_reset(dev->rstc);
+ v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
+ VB2_BUF_STATE_ERROR);
+}
+
int cedrus_hw_suspend(struct device *device)
{
struct cedrus_dev *dev = dev_get_drvdata(device);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.h b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
index 45f641f0bfa2..7c92f00e36da 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
@@ -28,4 +28,6 @@ int cedrus_hw_resume(struct device *device);
int cedrus_hw_probe(struct cedrus_dev *dev);
void cedrus_hw_remove(struct cedrus_dev *dev);
+void cedrus_watchdog(struct work_struct *work);
+
#endif
diff --git a/drivers/staging/media/tegra-vde/Kconfig b/drivers/staging/media/tegra-vde/Kconfig
deleted file mode 100644
index 0dc78afd09e0..000000000000
--- a/drivers/staging/media/tegra-vde/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config TEGRA_VDE
- tristate "NVIDIA Tegra Video Decoder Engine driver"
- depends on ARCH_TEGRA || COMPILE_TEST
- select DMA_SHARED_BUFFER
- select IOMMU_IOVA
- select SRAM
- help
- Say Y here to enable support for the NVIDIA Tegra video decoder
- driver.
diff --git a/drivers/staging/media/tegra-vde/Makefile b/drivers/staging/media/tegra-vde/Makefile
deleted file mode 100644
index 2827f7601de8..000000000000
--- a/drivers/staging/media/tegra-vde/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-tegra-vde-y := vde.o iommu.o dmabuf-cache.o
-obj-$(CONFIG_TEGRA_VDE) += tegra-vde.o
diff --git a/drivers/staging/media/tegra-vde/TODO b/drivers/staging/media/tegra-vde/TODO
deleted file mode 100644
index 31aaa3e66d80..000000000000
--- a/drivers/staging/media/tegra-vde/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
-TODO:
- - Implement V4L2 API once it gains support for stateless decoders.
-
-Contact: Dmitry Osipenko <digetx@gmail.com>
diff --git a/drivers/staging/media/tegra-vde/uapi.h b/drivers/staging/media/tegra-vde/uapi.h
deleted file mode 100644
index ffb4983e5bb6..000000000000
--- a/drivers/staging/media/tegra-vde/uapi.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/* Copyright (C) 2016-2017 Dmitry Osipenko <digetx@gmail.com> */
-#ifndef _UAPI_TEGRA_VDE_H_
-#define _UAPI_TEGRA_VDE_H_
-
-#include <linux/types.h>
-#include <asm/ioctl.h>
-
-#define FLAG_B_FRAME 0x1
-#define FLAG_REFERENCE 0x2
-
-struct tegra_vde_h264_frame {
- __s32 y_fd;
- __s32 cb_fd;
- __s32 cr_fd;
- __s32 aux_fd;
- __u32 y_offset;
- __u32 cb_offset;
- __u32 cr_offset;
- __u32 aux_offset;
- __u32 frame_num;
- __u32 flags;
-
- // Must be zero'ed
- __u32 reserved[6];
-};
-
-struct tegra_vde_h264_decoder_ctx {
- __s32 bitstream_data_fd;
- __u32 bitstream_data_offset;
-
- __u64 dpb_frames_ptr;
- __u32 dpb_frames_nb;
- __u32 dpb_ref_frames_with_earlier_poc_nb;
-
- // SPS
- __u32 baseline_profile;
- __u32 level_idc;
- __u32 log2_max_pic_order_cnt_lsb;
- __u32 log2_max_frame_num;
- __u32 pic_order_cnt_type;
- __u32 direct_8x8_inference_flag;
- __u32 pic_width_in_mbs;
- __u32 pic_height_in_mbs;
-
- // PPS
- __u32 pic_init_qp;
- __u32 deblocking_filter_control_present_flag;
- __u32 constrained_intra_pred_flag;
- __u32 chroma_qp_index_offset;
- __u32 pic_order_present_flag;
-
- // Slice header
- __u32 num_ref_idx_l0_active_minus1;
- __u32 num_ref_idx_l1_active_minus1;
-
- // Must be zero'ed
- __u32 reserved[11];
-};
-
-#define VDE_IOCTL_BASE ('v' + 0x20)
-
-#define VDE_IO(nr) _IO(VDE_IOCTL_BASE, nr)
-#define VDE_IOR(nr, type) _IOR(VDE_IOCTL_BASE, nr, type)
-#define VDE_IOW(nr, type) _IOW(VDE_IOCTL_BASE, nr, type)
-#define VDE_IOWR(nr, type) _IOWR(VDE_IOCTL_BASE, nr, type)
-
-#define TEGRA_VDE_DECODE_H264 0x00
-
-#define TEGRA_VDE_IOCTL_DECODE_H264 \
- VDE_IOW(TEGRA_VDE_DECODE_H264, struct tegra_vde_h264_decoder_ctx)
-
-#endif // _UAPI_TEGRA_VDE_H_
diff --git a/drivers/staging/media/tegra-vde/vde.c b/drivers/staging/media/tegra-vde/vde.c
deleted file mode 100644
index a8f1a024c343..000000000000
--- a/drivers/staging/media/tegra-vde/vde.c
+++ /dev/null
@@ -1,1358 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * NVIDIA Tegra Video decoder driver
- *
- * Copyright (C) 2016-2017 Dmitry Osipenko <digetx@gmail.com>
- *
- */
-
-#include <linux/clk.h>
-#include <linux/dma-buf.h>
-#include <linux/genalloc.h>
-#include <linux/interrupt.h>
-#include <linux/iopoll.h>
-#include <linux/list.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/reset.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-#include <soc/tegra/common.h>
-#include <soc/tegra/pmc.h>
-
-#include "uapi.h"
-#include "vde.h"
-
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-
-#define ICMDQUE_WR 0x00
-#define CMDQUE_CONTROL 0x08
-#define INTR_STATUS 0x18
-#define BSE_INT_ENB 0x40
-#define BSE_CONFIG 0x44
-
-#define BSE_ICMDQUE_EMPTY BIT(3)
-#define BSE_DMA_BUSY BIT(23)
-
-struct video_frame {
- struct dma_buf_attachment *y_dmabuf_attachment;
- struct dma_buf_attachment *cb_dmabuf_attachment;
- struct dma_buf_attachment *cr_dmabuf_attachment;
- struct dma_buf_attachment *aux_dmabuf_attachment;
- dma_addr_t y_addr;
- dma_addr_t cb_addr;
- dma_addr_t cr_addr;
- dma_addr_t aux_addr;
- u32 frame_num;
- u32 flags;
-};
-
-static void tegra_vde_writel(struct tegra_vde *vde,
- u32 value, void __iomem *base, u32 offset)
-{
- trace_vde_writel(vde, base, offset, value);
-
- writel_relaxed(value, base + offset);
-}
-
-static u32 tegra_vde_readl(struct tegra_vde *vde,
- void __iomem *base, u32 offset)
-{
- u32 value = readl_relaxed(base + offset);
-
- trace_vde_readl(vde, base, offset, value);
-
- return value;
-}
-
-static void tegra_vde_set_bits(struct tegra_vde *vde,
- u32 mask, void __iomem *base, u32 offset)
-{
- u32 value = tegra_vde_readl(vde, base, offset);
-
- tegra_vde_writel(vde, value | mask, base, offset);
-}
-
-static int tegra_vde_wait_mbe(struct tegra_vde *vde)
-{
- u32 tmp;
-
- return readl_relaxed_poll_timeout(vde->mbe + 0x8C, tmp,
- (tmp >= 0x10), 1, 100);
-}
-
-static int tegra_vde_alloc_bo(struct tegra_vde *vde,
- struct tegra_vde_bo **ret_bo,
- enum dma_data_direction dma_dir,
- size_t size)
-{
- struct device *dev = vde->miscdev.parent;
- struct tegra_vde_bo *bo;
- int err;
-
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
- return -ENOMEM;
-
- bo->vde = vde;
- bo->size = size;
- bo->dma_dir = dma_dir;
- bo->dma_attrs = DMA_ATTR_WRITE_COMBINE |
- DMA_ATTR_NO_KERNEL_MAPPING;
-
- if (!vde->domain)
- bo->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
-
- bo->dma_cookie = dma_alloc_attrs(dev, bo->size, &bo->dma_handle,
- GFP_KERNEL, bo->dma_attrs);
- if (!bo->dma_cookie) {
- dev_err(dev, "Failed to allocate DMA buffer of size: %zu\n",
- bo->size);
- err = -ENOMEM;
- goto free_bo;
- }
-
- err = dma_get_sgtable_attrs(dev, &bo->sgt, bo->dma_cookie,
- bo->dma_handle, bo->size, bo->dma_attrs);
- if (err) {
- dev_err(dev, "Failed to get DMA buffer SG table: %d\n", err);
- goto free_attrs;
- }
-
- err = dma_map_sgtable(dev, &bo->sgt, bo->dma_dir, bo->dma_attrs);
- if (err) {
- dev_err(dev, "Failed to map DMA buffer SG table: %d\n", err);
- goto free_table;
- }
-
- if (vde->domain) {
- err = tegra_vde_iommu_map(vde, &bo->sgt, &bo->iova, bo->size);
- if (err) {
- dev_err(dev, "Failed to map DMA buffer IOVA: %d\n", err);
- goto unmap_sgtable;
- }
-
- bo->dma_addr = iova_dma_addr(&vde->iova, bo->iova);
- } else {
- bo->dma_addr = sg_dma_address(bo->sgt.sgl);
- }
-
- *ret_bo = bo;
-
- return 0;
-
-unmap_sgtable:
- dma_unmap_sgtable(dev, &bo->sgt, bo->dma_dir, bo->dma_attrs);
-free_table:
- sg_free_table(&bo->sgt);
-free_attrs:
- dma_free_attrs(dev, bo->size, bo->dma_cookie, bo->dma_handle,
- bo->dma_attrs);
-free_bo:
- kfree(bo);
-
- return err;
-}
-
-static void tegra_vde_free_bo(struct tegra_vde_bo *bo)
-{
- struct tegra_vde *vde = bo->vde;
- struct device *dev = vde->miscdev.parent;
-
- if (vde->domain)
- tegra_vde_iommu_unmap(vde, bo->iova);
-
- dma_unmap_sgtable(dev, &bo->sgt, bo->dma_dir, bo->dma_attrs);
-
- sg_free_table(&bo->sgt);
-
- dma_free_attrs(dev, bo->size, bo->dma_cookie, bo->dma_handle,
- bo->dma_attrs);
- kfree(bo);
-}
-
-static int tegra_vde_setup_mbe_frame_idx(struct tegra_vde *vde,
- unsigned int refs_nb,
- bool setup_refs)
-{
- u32 frame_idx_enb_mask = 0;
- u32 value;
- unsigned int frame_idx;
- unsigned int idx;
- int err;
-
- tegra_vde_writel(vde, 0xD0000000 | (0 << 23), vde->mbe, 0x80);
- tegra_vde_writel(vde, 0xD0200000 | (0 << 23), vde->mbe, 0x80);
-
- err = tegra_vde_wait_mbe(vde);
- if (err)
- return err;
-
- if (!setup_refs)
- return 0;
-
- for (idx = 0, frame_idx = 1; idx < refs_nb; idx++, frame_idx++) {
- tegra_vde_writel(vde, 0xD0000000 | (frame_idx << 23),
- vde->mbe, 0x80);
- tegra_vde_writel(vde, 0xD0200000 | (frame_idx << 23),
- vde->mbe, 0x80);
-
- frame_idx_enb_mask |= frame_idx << (6 * (idx % 4));
-
- if (idx % 4 == 3 || idx == refs_nb - 1) {
- value = 0xC0000000;
- value |= (idx >> 2) << 24;
- value |= frame_idx_enb_mask;
-
- tegra_vde_writel(vde, value, vde->mbe, 0x80);
-
- err = tegra_vde_wait_mbe(vde);
- if (err)
- return err;
-
- frame_idx_enb_mask = 0;
- }
- }
-
- return 0;
-}
-
-static void tegra_vde_mbe_set_0xa_reg(struct tegra_vde *vde, int reg, u32 val)
-{
- tegra_vde_writel(vde, 0xA0000000 | (reg << 24) | (val & 0xFFFF),
- vde->mbe, 0x80);
- tegra_vde_writel(vde, 0xA0000000 | ((reg + 1) << 24) | (val >> 16),
- vde->mbe, 0x80);
-}
-
-static int tegra_vde_wait_bsev(struct tegra_vde *vde, bool wait_dma)
-{
- struct device *dev = vde->miscdev.parent;
- u32 value;
- int err;
-
- err = readl_relaxed_poll_timeout(vde->bsev + INTR_STATUS, value,
- !(value & BIT(2)), 1, 100);
- if (err) {
- dev_err(dev, "BSEV unknown bit timeout\n");
- return err;
- }
-
- err = readl_relaxed_poll_timeout(vde->bsev + INTR_STATUS, value,
- (value & BSE_ICMDQUE_EMPTY), 1, 100);
- if (err) {
- dev_err(dev, "BSEV ICMDQUE flush timeout\n");
- return err;
- }
-
- if (!wait_dma)
- return 0;
-
- err = readl_relaxed_poll_timeout(vde->bsev + INTR_STATUS, value,
- !(value & BSE_DMA_BUSY), 1, 100);
- if (err) {
- dev_err(dev, "BSEV DMA timeout\n");
- return err;
- }
-
- return 0;
-}
-
-static int tegra_vde_push_to_bsev_icmdqueue(struct tegra_vde *vde,
- u32 value, bool wait_dma)
-{
- tegra_vde_writel(vde, value, vde->bsev, ICMDQUE_WR);
-
- return tegra_vde_wait_bsev(vde, wait_dma);
-}
-
-static void tegra_vde_setup_frameid(struct tegra_vde *vde,
- struct video_frame *frame,
- unsigned int frameid,
- u32 mbs_width, u32 mbs_height)
-{
- u32 y_addr = frame ? frame->y_addr : 0x6CDEAD00;
- u32 cb_addr = frame ? frame->cb_addr : 0x6CDEAD00;
- u32 cr_addr = frame ? frame->cr_addr : 0x6CDEAD00;
- u32 value1 = frame ? ((mbs_width << 16) | mbs_height) : 0;
- u32 value2 = frame ? ((((mbs_width + 1) >> 1) << 6) | 1) : 0;
-
- tegra_vde_writel(vde, y_addr >> 8, vde->frameid, 0x000 + frameid * 4);
- tegra_vde_writel(vde, cb_addr >> 8, vde->frameid, 0x100 + frameid * 4);
- tegra_vde_writel(vde, cr_addr >> 8, vde->frameid, 0x180 + frameid * 4);
- tegra_vde_writel(vde, value1, vde->frameid, 0x080 + frameid * 4);
- tegra_vde_writel(vde, value2, vde->frameid, 0x280 + frameid * 4);
-}
-
-static void tegra_setup_frameidx(struct tegra_vde *vde,
- struct video_frame *frames,
- unsigned int frames_nb,
- u32 mbs_width, u32 mbs_height)
-{
- unsigned int idx;
-
- for (idx = 0; idx < frames_nb; idx++)
- tegra_vde_setup_frameid(vde, &frames[idx], idx,
- mbs_width, mbs_height);
-
- for (; idx < 17; idx++)
- tegra_vde_setup_frameid(vde, NULL, idx, 0, 0);
-}
-
-static void tegra_vde_setup_iram_entry(struct tegra_vde *vde,
- unsigned int table,
- unsigned int row,
- u32 value1, u32 value2)
-{
- u32 *iram_tables = vde->iram;
-
- trace_vde_setup_iram_entry(table, row, value1, value2);
-
- iram_tables[0x20 * table + row * 2] = value1;
- iram_tables[0x20 * table + row * 2 + 1] = value2;
-}
-
-static void tegra_vde_setup_iram_tables(struct tegra_vde *vde,
- struct video_frame *dpb_frames,
- unsigned int ref_frames_nb,
- unsigned int with_earlier_poc_nb)
-{
- struct video_frame *frame;
- u32 value, aux_addr;
- int with_later_poc_nb;
- unsigned int i, k;
-
- trace_vde_ref_l0(dpb_frames[0].frame_num);
-
- for (i = 0; i < 16; i++) {
- if (i < ref_frames_nb) {
- frame = &dpb_frames[i + 1];
-
- aux_addr = frame->aux_addr;
-
- value = (i + 1) << 26;
- value |= !(frame->flags & FLAG_B_FRAME) << 25;
- value |= 1 << 24;
- value |= frame->frame_num;
- } else {
- aux_addr = 0x6ADEAD00;
- value = 0x3f;
- }
-
- tegra_vde_setup_iram_entry(vde, 0, i, value, aux_addr);
- tegra_vde_setup_iram_entry(vde, 1, i, value, aux_addr);
- tegra_vde_setup_iram_entry(vde, 2, i, value, aux_addr);
- tegra_vde_setup_iram_entry(vde, 3, i, value, aux_addr);
- }
-
- if (!(dpb_frames[0].flags & FLAG_B_FRAME))
- return;
-
- if (with_earlier_poc_nb >= ref_frames_nb)
- return;
-
- with_later_poc_nb = ref_frames_nb - with_earlier_poc_nb;
-
- trace_vde_ref_l1(with_later_poc_nb, with_earlier_poc_nb);
-
- for (i = 0, k = with_earlier_poc_nb; i < with_later_poc_nb; i++, k++) {
- frame = &dpb_frames[k + 1];
-
- aux_addr = frame->aux_addr;
-
- value = (k + 1) << 26;
- value |= !(frame->flags & FLAG_B_FRAME) << 25;
- value |= 1 << 24;
- value |= frame->frame_num;
-
- tegra_vde_setup_iram_entry(vde, 2, i, value, aux_addr);
- }
-
- for (k = 0; i < ref_frames_nb; i++, k++) {
- frame = &dpb_frames[k + 1];
-
- aux_addr = frame->aux_addr;
-
- value = (k + 1) << 26;
- value |= !(frame->flags & FLAG_B_FRAME) << 25;
- value |= 1 << 24;
- value |= frame->frame_num;
-
- tegra_vde_setup_iram_entry(vde, 2, i, value, aux_addr);
- }
-}
-
-static int tegra_vde_setup_hw_context(struct tegra_vde *vde,
- struct tegra_vde_h264_decoder_ctx *ctx,
- struct video_frame *dpb_frames,
- dma_addr_t bitstream_data_addr,
- size_t bitstream_data_size,
- unsigned int macroblocks_nb)
-{
- struct device *dev = vde->miscdev.parent;
- u32 value;
- int err;
-
- tegra_vde_set_bits(vde, 0x000A, vde->sxe, 0xF0);
- tegra_vde_set_bits(vde, 0x000B, vde->bsev, CMDQUE_CONTROL);
- tegra_vde_set_bits(vde, 0x8002, vde->mbe, 0x50);
- tegra_vde_set_bits(vde, 0x000A, vde->mbe, 0xA0);
- tegra_vde_set_bits(vde, 0x000A, vde->ppe, 0x14);
- tegra_vde_set_bits(vde, 0x000A, vde->ppe, 0x28);
- tegra_vde_set_bits(vde, 0x0A00, vde->mce, 0x08);
- tegra_vde_set_bits(vde, 0x000A, vde->tfe, 0x00);
- tegra_vde_set_bits(vde, 0x0005, vde->vdma, 0x04);
-
- tegra_vde_writel(vde, 0x00000000, vde->vdma, 0x1C);
- tegra_vde_writel(vde, 0x00000000, vde->vdma, 0x00);
- tegra_vde_writel(vde, 0x00000007, vde->vdma, 0x04);
- tegra_vde_writel(vde, 0x00000007, vde->frameid, 0x200);
- tegra_vde_writel(vde, 0x00000005, vde->tfe, 0x04);
- tegra_vde_writel(vde, 0x00000000, vde->mbe, 0x84);
- tegra_vde_writel(vde, 0x00000010, vde->sxe, 0x08);
- tegra_vde_writel(vde, 0x00000150, vde->sxe, 0x54);
- tegra_vde_writel(vde, 0x0000054C, vde->sxe, 0x58);
- tegra_vde_writel(vde, 0x00000E34, vde->sxe, 0x5C);
- tegra_vde_writel(vde, 0x063C063C, vde->mce, 0x10);
- tegra_vde_writel(vde, 0x0003FC00, vde->bsev, INTR_STATUS);
- tegra_vde_writel(vde, 0x0000150D, vde->bsev, BSE_CONFIG);
- tegra_vde_writel(vde, 0x00000100, vde->bsev, BSE_INT_ENB);
- tegra_vde_writel(vde, 0x00000000, vde->bsev, 0x98);
- tegra_vde_writel(vde, 0x00000060, vde->bsev, 0x9C);
-
- memset(vde->iram + 128, 0, macroblocks_nb / 2);
-
- tegra_setup_frameidx(vde, dpb_frames, ctx->dpb_frames_nb,
- ctx->pic_width_in_mbs, ctx->pic_height_in_mbs);
-
- tegra_vde_setup_iram_tables(vde, dpb_frames,
- ctx->dpb_frames_nb - 1,
- ctx->dpb_ref_frames_with_earlier_poc_nb);
-
- /*
- * The IRAM mapping is write-combine, ensure that CPU buffers have
- * been flushed at this point.
- */
- wmb();
-
- tegra_vde_writel(vde, 0x00000000, vde->bsev, 0x8C);
- tegra_vde_writel(vde, bitstream_data_addr + bitstream_data_size,
- vde->bsev, 0x54);
-
- value = ctx->pic_width_in_mbs << 11 | ctx->pic_height_in_mbs << 3;
-
- tegra_vde_writel(vde, value, vde->bsev, 0x88);
-
- err = tegra_vde_wait_bsev(vde, false);
- if (err)
- return err;
-
- err = tegra_vde_push_to_bsev_icmdqueue(vde, 0x800003FC, false);
- if (err)
- return err;
-
- value = 0x01500000;
- value |= ((vde->iram_lists_addr + 512) >> 2) & 0xFFFF;
-
- err = tegra_vde_push_to_bsev_icmdqueue(vde, value, true);
- if (err)
- return err;
-
- err = tegra_vde_push_to_bsev_icmdqueue(vde, 0x840F054C, false);
- if (err)
- return err;
-
- err = tegra_vde_push_to_bsev_icmdqueue(vde, 0x80000080, false);
- if (err)
- return err;
-
- value = 0x0E340000 | ((vde->iram_lists_addr >> 2) & 0xFFFF);
-
- err = tegra_vde_push_to_bsev_icmdqueue(vde, value, true);
- if (err)
- return err;
-
- value = 0x00800005;
- value |= ctx->pic_width_in_mbs << 11;
- value |= ctx->pic_height_in_mbs << 3;
-
- tegra_vde_writel(vde, value, vde->sxe, 0x10);
-
- value = !ctx->baseline_profile << 17;
- value |= ctx->level_idc << 13;
- value |= ctx->log2_max_pic_order_cnt_lsb << 7;
- value |= ctx->pic_order_cnt_type << 5;
- value |= ctx->log2_max_frame_num;
-
- tegra_vde_writel(vde, value, vde->sxe, 0x40);
-
- value = ctx->pic_init_qp << 25;
- value |= !!(ctx->deblocking_filter_control_present_flag) << 2;
- value |= !!ctx->pic_order_present_flag;
-
- tegra_vde_writel(vde, value, vde->sxe, 0x44);
-
- value = ctx->chroma_qp_index_offset;
- value |= ctx->num_ref_idx_l0_active_minus1 << 5;
- value |= ctx->num_ref_idx_l1_active_minus1 << 10;
- value |= !!ctx->constrained_intra_pred_flag << 15;
-
- tegra_vde_writel(vde, value, vde->sxe, 0x48);
-
- value = 0x0C000000;
- value |= !!(dpb_frames[0].flags & FLAG_B_FRAME) << 24;
-
- tegra_vde_writel(vde, value, vde->sxe, 0x4C);
-
- value = 0x03800000;
- value |= bitstream_data_size & GENMASK(19, 15);
-
- tegra_vde_writel(vde, value, vde->sxe, 0x68);
-
- tegra_vde_writel(vde, bitstream_data_addr, vde->sxe, 0x6C);
-
- if (vde->soc->supports_ref_pic_marking)
- tegra_vde_writel(vde, vde->secure_bo->dma_addr, vde->sxe, 0x7c);
-
- value = 0x10000005;
- value |= ctx->pic_width_in_mbs << 11;
- value |= ctx->pic_height_in_mbs << 3;
-
- tegra_vde_writel(vde, value, vde->mbe, 0x80);
-
- value = 0x26800000;
- value |= ctx->level_idc << 4;
- value |= !ctx->baseline_profile << 1;
- value |= !!ctx->direct_8x8_inference_flag;
-
- tegra_vde_writel(vde, value, vde->mbe, 0x80);
-
- tegra_vde_writel(vde, 0xF4000001, vde->mbe, 0x80);
- tegra_vde_writel(vde, 0x20000000, vde->mbe, 0x80);
- tegra_vde_writel(vde, 0xF4000101, vde->mbe, 0x80);
-
- value = 0x20000000;
- value |= ctx->chroma_qp_index_offset << 8;
-
- tegra_vde_writel(vde, value, vde->mbe, 0x80);
-
- err = tegra_vde_setup_mbe_frame_idx(vde,
- ctx->dpb_frames_nb - 1,
- ctx->pic_order_cnt_type == 0);
- if (err) {
- dev_err(dev, "MBE frames setup failed %d\n", err);
- return err;
- }
-
- tegra_vde_mbe_set_0xa_reg(vde, 0, 0x000009FC);
- tegra_vde_mbe_set_0xa_reg(vde, 2, 0x61DEAD00);
- tegra_vde_mbe_set_0xa_reg(vde, 4, 0x62DEAD00);
- tegra_vde_mbe_set_0xa_reg(vde, 6, 0x63DEAD00);
- tegra_vde_mbe_set_0xa_reg(vde, 8, dpb_frames[0].aux_addr);
-
- value = 0xFC000000;
- value |= !!(dpb_frames[0].flags & FLAG_B_FRAME) << 2;
-
- if (!ctx->baseline_profile)
- value |= !!(dpb_frames[0].flags & FLAG_REFERENCE) << 1;
-
- tegra_vde_writel(vde, value, vde->mbe, 0x80);
-
- err = tegra_vde_wait_mbe(vde);
- if (err) {
- dev_err(dev, "MBE programming failed %d\n", err);
- return err;
- }
-
- return 0;
-}
-
-static void tegra_vde_decode_frame(struct tegra_vde *vde,
- unsigned int macroblocks_nb)
-{
- reinit_completion(&vde->decode_completion);
-
- tegra_vde_writel(vde, 0x00000001, vde->bsev, 0x8C);
- tegra_vde_writel(vde, 0x20000000 | (macroblocks_nb - 1),
- vde->sxe, 0x00);
-}
-
-static int tegra_vde_attach_dmabuf(struct tegra_vde *vde,
- int fd,
- unsigned long offset,
- size_t min_size,
- size_t align_size,
- struct dma_buf_attachment **a,
- dma_addr_t *addrp,
- size_t *size,
- enum dma_data_direction dma_dir)
-{
- struct device *dev = vde->miscdev.parent;
- struct dma_buf *dmabuf;
- int err;
-
- dmabuf = dma_buf_get(fd);
- if (IS_ERR(dmabuf)) {
- dev_err(dev, "Invalid dmabuf FD\n");
- return PTR_ERR(dmabuf);
- }
-
- if (dmabuf->size & (align_size - 1)) {
- dev_err(dev, "Unaligned dmabuf 0x%zX, should be aligned to 0x%zX\n",
- dmabuf->size, align_size);
- return -EINVAL;
- }
-
- if ((u64)offset + min_size > dmabuf->size) {
- dev_err(dev, "Too small dmabuf size %zu @0x%lX, should be at least %zu\n",
- dmabuf->size, offset, min_size);
- return -EINVAL;
- }
-
- err = tegra_vde_dmabuf_cache_map(vde, dmabuf, dma_dir, a, addrp);
- if (err)
- goto err_put;
-
- *addrp = *addrp + offset;
-
- if (size)
- *size = dmabuf->size - offset;
-
- return 0;
-
-err_put:
- dma_buf_put(dmabuf);
-
- return err;
-}
-
-static int tegra_vde_attach_dmabufs_to_frame(struct tegra_vde *vde,
- struct video_frame *frame,
- struct tegra_vde_h264_frame *src,
- enum dma_data_direction dma_dir,
- bool baseline_profile,
- size_t lsize, size_t csize)
-{
- int err;
-
- err = tegra_vde_attach_dmabuf(vde, src->y_fd,
- src->y_offset, lsize, SZ_256,
- &frame->y_dmabuf_attachment,
- &frame->y_addr,
- NULL, dma_dir);
- if (err)
- return err;
-
- err = tegra_vde_attach_dmabuf(vde, src->cb_fd,
- src->cb_offset, csize, SZ_256,
- &frame->cb_dmabuf_attachment,
- &frame->cb_addr,
- NULL, dma_dir);
- if (err)
- goto err_release_y;
-
- err = tegra_vde_attach_dmabuf(vde, src->cr_fd,
- src->cr_offset, csize, SZ_256,
- &frame->cr_dmabuf_attachment,
- &frame->cr_addr,
- NULL, dma_dir);
- if (err)
- goto err_release_cb;
-
- if (baseline_profile) {
- frame->aux_addr = 0x64DEAD00;
- return 0;
- }
-
- err = tegra_vde_attach_dmabuf(vde, src->aux_fd,
- src->aux_offset, csize, SZ_256,
- &frame->aux_dmabuf_attachment,
- &frame->aux_addr,
- NULL, dma_dir);
- if (err)
- goto err_release_cr;
-
- return 0;
-
-err_release_cr:
- tegra_vde_dmabuf_cache_unmap(vde, frame->cr_dmabuf_attachment, true);
-err_release_cb:
- tegra_vde_dmabuf_cache_unmap(vde, frame->cb_dmabuf_attachment, true);
-err_release_y:
- tegra_vde_dmabuf_cache_unmap(vde, frame->y_dmabuf_attachment, true);
-
- return err;
-}
-
-static void tegra_vde_release_frame_dmabufs(struct tegra_vde *vde,
- struct video_frame *frame,
- enum dma_data_direction dma_dir,
- bool baseline_profile,
- bool release)
-{
- if (!baseline_profile)
- tegra_vde_dmabuf_cache_unmap(vde, frame->aux_dmabuf_attachment,
- release);
-
- tegra_vde_dmabuf_cache_unmap(vde, frame->cr_dmabuf_attachment, release);
- tegra_vde_dmabuf_cache_unmap(vde, frame->cb_dmabuf_attachment, release);
- tegra_vde_dmabuf_cache_unmap(vde, frame->y_dmabuf_attachment, release);
-}
-
-static int tegra_vde_validate_frame(struct device *dev,
- struct tegra_vde_h264_frame *frame)
-{
- if (frame->frame_num > 0x7FFFFF) {
- dev_err(dev, "Bad frame_num %u\n", frame->frame_num);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tegra_vde_validate_h264_ctx(struct device *dev,
- struct tegra_vde_h264_decoder_ctx *ctx)
-{
- if (ctx->dpb_frames_nb == 0 || ctx->dpb_frames_nb > 17) {
- dev_err(dev, "Bad DPB size %u\n", ctx->dpb_frames_nb);
- return -EINVAL;
- }
-
- if (ctx->level_idc > 15) {
- dev_err(dev, "Bad level value %u\n", ctx->level_idc);
- return -EINVAL;
- }
-
- if (ctx->pic_init_qp > 52) {
- dev_err(dev, "Bad pic_init_qp value %u\n", ctx->pic_init_qp);
- return -EINVAL;
- }
-
- if (ctx->log2_max_pic_order_cnt_lsb > 16) {
- dev_err(dev, "Bad log2_max_pic_order_cnt_lsb value %u\n",
- ctx->log2_max_pic_order_cnt_lsb);
- return -EINVAL;
- }
-
- if (ctx->log2_max_frame_num > 16) {
- dev_err(dev, "Bad log2_max_frame_num value %u\n",
- ctx->log2_max_frame_num);
- return -EINVAL;
- }
-
- if (ctx->chroma_qp_index_offset > 31) {
- dev_err(dev, "Bad chroma_qp_index_offset value %u\n",
- ctx->chroma_qp_index_offset);
- return -EINVAL;
- }
-
- if (ctx->pic_order_cnt_type > 2) {
- dev_err(dev, "Bad pic_order_cnt_type value %u\n",
- ctx->pic_order_cnt_type);
- return -EINVAL;
- }
-
- if (ctx->num_ref_idx_l0_active_minus1 > 15) {
- dev_err(dev, "Bad num_ref_idx_l0_active_minus1 value %u\n",
- ctx->num_ref_idx_l0_active_minus1);
- return -EINVAL;
- }
-
- if (ctx->num_ref_idx_l1_active_minus1 > 15) {
- dev_err(dev, "Bad num_ref_idx_l1_active_minus1 value %u\n",
- ctx->num_ref_idx_l1_active_minus1);
- return -EINVAL;
- }
-
- if (!ctx->pic_width_in_mbs || ctx->pic_width_in_mbs > 127) {
- dev_err(dev, "Bad pic_width_in_mbs value %u\n",
- ctx->pic_width_in_mbs);
- return -EINVAL;
- }
-
- if (!ctx->pic_height_in_mbs || ctx->pic_height_in_mbs > 127) {
- dev_err(dev, "Bad pic_height_in_mbs value %u\n",
- ctx->pic_height_in_mbs);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tegra_vde_ioctl_decode_h264(struct tegra_vde *vde,
- unsigned long vaddr)
-{
- struct device *dev = vde->miscdev.parent;
- struct tegra_vde_h264_decoder_ctx ctx;
- struct tegra_vde_h264_frame *frames;
- struct tegra_vde_h264_frame __user *frames_user;
- struct video_frame *dpb_frames;
- struct dma_buf_attachment *bitstream_data_dmabuf_attachment;
- enum dma_data_direction dma_dir;
- dma_addr_t bitstream_data_addr;
- dma_addr_t bsev_ptr;
- size_t lsize, csize;
- size_t bitstream_data_size;
- unsigned int macroblocks_nb;
- unsigned int read_bytes;
- unsigned int cstride;
- unsigned int i;
- long timeout;
- int ret, err;
-
- if (copy_from_user(&ctx, (void __user *)vaddr, sizeof(ctx)))
- return -EFAULT;
-
- ret = tegra_vde_validate_h264_ctx(dev, &ctx);
- if (ret)
- return ret;
-
- ret = tegra_vde_attach_dmabuf(vde, ctx.bitstream_data_fd,
- ctx.bitstream_data_offset,
- SZ_16K, SZ_16K,
- &bitstream_data_dmabuf_attachment,
- &bitstream_data_addr,
- &bitstream_data_size,
- DMA_TO_DEVICE);
- if (ret)
- return ret;
-
- frames = kmalloc_array(ctx.dpb_frames_nb, sizeof(*frames), GFP_KERNEL);
- if (!frames) {
- ret = -ENOMEM;
- goto release_bitstream_dmabuf;
- }
-
- dpb_frames = kcalloc(ctx.dpb_frames_nb, sizeof(*dpb_frames),
- GFP_KERNEL);
- if (!dpb_frames) {
- ret = -ENOMEM;
- goto free_frames;
- }
-
- macroblocks_nb = ctx.pic_width_in_mbs * ctx.pic_height_in_mbs;
- frames_user = u64_to_user_ptr(ctx.dpb_frames_ptr);
-
- if (copy_from_user(frames, frames_user,
- ctx.dpb_frames_nb * sizeof(*frames))) {
- ret = -EFAULT;
- goto free_dpb_frames;
- }
-
- cstride = ALIGN(ctx.pic_width_in_mbs * 8, 16);
- csize = cstride * ctx.pic_height_in_mbs * 8;
- lsize = macroblocks_nb * 256;
-
- for (i = 0; i < ctx.dpb_frames_nb; i++) {
- ret = tegra_vde_validate_frame(dev, &frames[i]);
- if (ret)
- goto release_dpb_frames;
-
- dpb_frames[i].flags = frames[i].flags;
- dpb_frames[i].frame_num = frames[i].frame_num;
-
- dma_dir = (i == 0) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
- ret = tegra_vde_attach_dmabufs_to_frame(vde, &dpb_frames[i],
- &frames[i], dma_dir,
- ctx.baseline_profile,
- lsize, csize);
- if (ret)
- goto release_dpb_frames;
- }
-
- ret = mutex_lock_interruptible(&vde->lock);
- if (ret)
- goto release_dpb_frames;
-
- ret = pm_runtime_resume_and_get(dev);
- if (ret < 0)
- goto unlock;
-
- /*
- * We rely on the VDE registers reset value, otherwise VDE
- * causes bus lockup.
- */
- ret = reset_control_assert(vde->rst_mc);
- if (ret) {
- dev_err(dev, "DEC start: Failed to assert MC reset: %d\n",
- ret);
- goto put_runtime_pm;
- }
-
- ret = reset_control_reset(vde->rst);
- if (ret) {
- dev_err(dev, "DEC start: Failed to reset HW: %d\n", ret);
- goto put_runtime_pm;
- }
-
- ret = reset_control_deassert(vde->rst_mc);
- if (ret) {
- dev_err(dev, "DEC start: Failed to deassert MC reset: %d\n",
- ret);
- goto put_runtime_pm;
- }
-
- ret = tegra_vde_setup_hw_context(vde, &ctx, dpb_frames,
- bitstream_data_addr,
- bitstream_data_size,
- macroblocks_nb);
- if (ret)
- goto put_runtime_pm;
-
- tegra_vde_decode_frame(vde, macroblocks_nb);
-
- timeout = wait_for_completion_interruptible_timeout(
- &vde->decode_completion, msecs_to_jiffies(1000));
- if (timeout == 0) {
- bsev_ptr = tegra_vde_readl(vde, vde->bsev, 0x10);
- macroblocks_nb = tegra_vde_readl(vde, vde->sxe, 0xC8) & 0x1FFF;
- read_bytes = bsev_ptr ? bsev_ptr - bitstream_data_addr : 0;
-
- dev_err(dev, "Decoding failed: read 0x%X bytes, %u macroblocks parsed\n",
- read_bytes, macroblocks_nb);
-
- ret = -EIO;
- } else if (timeout < 0) {
- ret = timeout;
- }
-
- /*
- * At first reset memory client to avoid resetting VDE HW in the
- * middle of DMA which could result into memory corruption or hang
- * the whole system.
- */
- err = reset_control_assert(vde->rst_mc);
- if (err)
- dev_err(dev, "DEC end: Failed to assert MC reset: %d\n", err);
-
- err = reset_control_assert(vde->rst);
- if (err)
- dev_err(dev, "DEC end: Failed to assert HW reset: %d\n", err);
-
-put_runtime_pm:
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
-
-unlock:
- mutex_unlock(&vde->lock);
-
-release_dpb_frames:
- while (i--) {
- dma_dir = (i == 0) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
- tegra_vde_release_frame_dmabufs(vde, &dpb_frames[i], dma_dir,
- ctx.baseline_profile, ret != 0);
- }
-
-free_dpb_frames:
- kfree(dpb_frames);
-
-free_frames:
- kfree(frames);
-
-release_bitstream_dmabuf:
- tegra_vde_dmabuf_cache_unmap(vde, bitstream_data_dmabuf_attachment,
- ret != 0);
-
- return ret;
-}
-
-static long tegra_vde_unlocked_ioctl(struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- struct miscdevice *miscdev = filp->private_data;
- struct tegra_vde *vde = container_of(miscdev, struct tegra_vde,
- miscdev);
-
- switch (cmd) {
- case TEGRA_VDE_IOCTL_DECODE_H264:
- return tegra_vde_ioctl_decode_h264(vde, arg);
- }
-
- dev_err(miscdev->parent, "Invalid IOCTL command %u\n", cmd);
-
- return -ENOTTY;
-}
-
-static int tegra_vde_release_file(struct inode *inode, struct file *filp)
-{
- struct miscdevice *miscdev = filp->private_data;
- struct tegra_vde *vde = container_of(miscdev, struct tegra_vde,
- miscdev);
-
- tegra_vde_dmabuf_cache_unmap_sync(vde);
-
- return 0;
-}
-
-static const struct file_operations tegra_vde_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = tegra_vde_unlocked_ioctl,
- .release = tegra_vde_release_file,
-};
-
-static irqreturn_t tegra_vde_isr(int irq, void *data)
-{
- struct tegra_vde *vde = data;
-
- if (completion_done(&vde->decode_completion))
- return IRQ_NONE;
-
- tegra_vde_set_bits(vde, 0, vde->frameid, 0x208);
- complete(&vde->decode_completion);
-
- return IRQ_HANDLED;
-}
-
-static __maybe_unused int tegra_vde_runtime_suspend(struct device *dev)
-{
- struct tegra_vde *vde = dev_get_drvdata(dev);
- int err;
-
- if (!dev->pm_domain) {
- err = tegra_powergate_power_off(TEGRA_POWERGATE_VDEC);
- if (err) {
- dev_err(dev, "Failed to power down HW: %d\n", err);
- return err;
- }
- }
-
- clk_disable_unprepare(vde->clk);
- reset_control_release(vde->rst);
- reset_control_release(vde->rst_mc);
-
- return 0;
-}
-
-static __maybe_unused int tegra_vde_runtime_resume(struct device *dev)
-{
- struct tegra_vde *vde = dev_get_drvdata(dev);
- int err;
-
- err = reset_control_acquire(vde->rst_mc);
- if (err) {
- dev_err(dev, "Failed to acquire mc reset: %d\n", err);
- return err;
- }
-
- err = reset_control_acquire(vde->rst);
- if (err) {
- dev_err(dev, "Failed to acquire reset: %d\n", err);
- goto release_mc_reset;
- }
-
- if (!dev->pm_domain) {
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_VDEC,
- vde->clk, vde->rst);
- if (err) {
- dev_err(dev, "Failed to power up HW : %d\n", err);
- goto release_reset;
- }
- } else {
- /*
- * tegra_powergate_sequence_power_up() leaves clocks enabled,
- * while GENPD not.
- */
- err = clk_prepare_enable(vde->clk);
- if (err) {
- dev_err(dev, "Failed to enable clock: %d\n", err);
- goto release_reset;
- }
- }
-
- return 0;
-
-release_reset:
- reset_control_release(vde->rst);
-release_mc_reset:
- reset_control_release(vde->rst_mc);
-
- return err;
-}
-
-static int tegra_vde_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct tegra_vde *vde;
- int irq, err;
-
- vde = devm_kzalloc(dev, sizeof(*vde), GFP_KERNEL);
- if (!vde)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, vde);
-
- vde->soc = of_device_get_match_data(&pdev->dev);
-
- vde->sxe = devm_platform_ioremap_resource_byname(pdev, "sxe");
- if (IS_ERR(vde->sxe))
- return PTR_ERR(vde->sxe);
-
- vde->bsev = devm_platform_ioremap_resource_byname(pdev, "bsev");
- if (IS_ERR(vde->bsev))
- return PTR_ERR(vde->bsev);
-
- vde->mbe = devm_platform_ioremap_resource_byname(pdev, "mbe");
- if (IS_ERR(vde->mbe))
- return PTR_ERR(vde->mbe);
-
- vde->ppe = devm_platform_ioremap_resource_byname(pdev, "ppe");
- if (IS_ERR(vde->ppe))
- return PTR_ERR(vde->ppe);
-
- vde->mce = devm_platform_ioremap_resource_byname(pdev, "mce");
- if (IS_ERR(vde->mce))
- return PTR_ERR(vde->mce);
-
- vde->tfe = devm_platform_ioremap_resource_byname(pdev, "tfe");
- if (IS_ERR(vde->tfe))
- return PTR_ERR(vde->tfe);
-
- vde->ppb = devm_platform_ioremap_resource_byname(pdev, "ppb");
- if (IS_ERR(vde->ppb))
- return PTR_ERR(vde->ppb);
-
- vde->vdma = devm_platform_ioremap_resource_byname(pdev, "vdma");
- if (IS_ERR(vde->vdma))
- return PTR_ERR(vde->vdma);
-
- vde->frameid = devm_platform_ioremap_resource_byname(pdev, "frameid");
- if (IS_ERR(vde->frameid))
- return PTR_ERR(vde->frameid);
-
- vde->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(vde->clk)) {
- err = PTR_ERR(vde->clk);
- dev_err(dev, "Could not get VDE clk %d\n", err);
- return err;
- }
-
- vde->rst = devm_reset_control_get_exclusive_released(dev, NULL);
- if (IS_ERR(vde->rst)) {
- err = PTR_ERR(vde->rst);
- dev_err(dev, "Could not get VDE reset %d\n", err);
- return err;
- }
-
- vde->rst_mc = devm_reset_control_get_optional_exclusive_released(dev, "mc");
- if (IS_ERR(vde->rst_mc)) {
- err = PTR_ERR(vde->rst_mc);
- dev_err(dev, "Could not get MC reset %d\n", err);
- return err;
- }
-
- irq = platform_get_irq_byname(pdev, "sync-token");
- if (irq < 0)
- return irq;
-
- err = devm_request_irq(dev, irq, tegra_vde_isr, 0,
- dev_name(dev), vde);
- if (err) {
- dev_err(dev, "Could not request IRQ %d\n", err);
- return err;
- }
-
- err = devm_tegra_core_dev_init_opp_table_common(dev);
- if (err) {
- dev_err(dev, "Could initialize OPP table %d\n", err);
- return err;
- }
-
- vde->iram_pool = of_gen_pool_get(dev->of_node, "iram", 0);
- if (!vde->iram_pool) {
- dev_err(dev, "Could not get IRAM pool\n");
- return -EPROBE_DEFER;
- }
-
- vde->iram = gen_pool_dma_alloc(vde->iram_pool,
- gen_pool_size(vde->iram_pool),
- &vde->iram_lists_addr);
- if (!vde->iram) {
- dev_err(dev, "Could not reserve IRAM\n");
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&vde->map_list);
- mutex_init(&vde->map_lock);
- mutex_init(&vde->lock);
- init_completion(&vde->decode_completion);
-
- vde->miscdev.minor = MISC_DYNAMIC_MINOR;
- vde->miscdev.name = "tegra_vde";
- vde->miscdev.fops = &tegra_vde_fops;
- vde->miscdev.parent = dev;
-
- err = tegra_vde_iommu_init(vde);
- if (err) {
- dev_err(dev, "Failed to initialize IOMMU: %d\n", err);
- goto err_gen_free;
- }
-
- pm_runtime_enable(dev);
- pm_runtime_use_autosuspend(dev);
- pm_runtime_set_autosuspend_delay(dev, 300);
-
- /*
- * VDE partition may be left ON after bootloader, hence let's
- * power-cycle it in order to put hardware into a predictable lower
- * power state.
- */
- err = pm_runtime_resume_and_get(dev);
- if (err)
- goto err_pm_runtime;
-
- pm_runtime_put(dev);
-
- err = tegra_vde_alloc_bo(vde, &vde->secure_bo, DMA_FROM_DEVICE, 4096);
- if (err) {
- dev_err(dev, "Failed to allocate secure BO: %d\n", err);
- goto err_pm_runtime;
- }
-
- err = misc_register(&vde->miscdev);
- if (err) {
- dev_err(dev, "Failed to register misc device: %d\n", err);
- goto err_free_secure_bo;
- }
-
- return 0;
-
-err_free_secure_bo:
- tegra_vde_free_bo(vde->secure_bo);
-err_pm_runtime:
- pm_runtime_dont_use_autosuspend(dev);
- pm_runtime_disable(dev);
-
- tegra_vde_iommu_deinit(vde);
-
-err_gen_free:
- gen_pool_free(vde->iram_pool, (unsigned long)vde->iram,
- gen_pool_size(vde->iram_pool));
-
- return err;
-}
-
-static int tegra_vde_remove(struct platform_device *pdev)
-{
- struct tegra_vde *vde = platform_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
-
- misc_deregister(&vde->miscdev);
-
- tegra_vde_free_bo(vde->secure_bo);
-
- /*
- * As it increments RPM usage_count even on errors, we don't need to
- * check the returned code here.
- */
- pm_runtime_get_sync(dev);
-
- pm_runtime_dont_use_autosuspend(dev);
- pm_runtime_disable(dev);
-
- /*
- * Balance RPM state, the VDE power domain is left ON and hardware
- * is clock-gated. It's safe to reboot machine now.
- */
- pm_runtime_put_noidle(dev);
- clk_disable_unprepare(vde->clk);
-
- tegra_vde_dmabuf_cache_unmap_all(vde);
- tegra_vde_iommu_deinit(vde);
-
- gen_pool_free(vde->iram_pool, (unsigned long)vde->iram,
- gen_pool_size(vde->iram_pool));
-
- return 0;
-}
-
-static void tegra_vde_shutdown(struct platform_device *pdev)
-{
- /*
- * On some devices bootloader isn't ready to a power-gated VDE on
- * a warm-reboot, machine will hang in that case.
- */
- pm_runtime_get_sync(&pdev->dev);
-}
-
-static __maybe_unused int tegra_vde_pm_suspend(struct device *dev)
-{
- struct tegra_vde *vde = dev_get_drvdata(dev);
- int err;
-
- mutex_lock(&vde->lock);
-
- err = pm_runtime_force_suspend(dev);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-static __maybe_unused int tegra_vde_pm_resume(struct device *dev)
-{
- struct tegra_vde *vde = dev_get_drvdata(dev);
- int err;
-
- err = pm_runtime_force_resume(dev);
- if (err < 0)
- return err;
-
- mutex_unlock(&vde->lock);
-
- return 0;
-}
-
-static const struct dev_pm_ops tegra_vde_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_vde_runtime_suspend,
- tegra_vde_runtime_resume,
- NULL)
- SET_SYSTEM_SLEEP_PM_OPS(tegra_vde_pm_suspend,
- tegra_vde_pm_resume)
-};
-
-static const struct tegra_vde_soc tegra124_vde_soc = {
- .supports_ref_pic_marking = true,
-};
-
-static const struct tegra_vde_soc tegra114_vde_soc = {
- .supports_ref_pic_marking = true,
-};
-
-static const struct tegra_vde_soc tegra30_vde_soc = {
- .supports_ref_pic_marking = false,
-};
-
-static const struct tegra_vde_soc tegra20_vde_soc = {
- .supports_ref_pic_marking = false,
-};
-
-static const struct of_device_id tegra_vde_of_match[] = {
- { .compatible = "nvidia,tegra124-vde", .data = &tegra124_vde_soc },
- { .compatible = "nvidia,tegra114-vde", .data = &tegra114_vde_soc },
- { .compatible = "nvidia,tegra30-vde", .data = &tegra30_vde_soc },
- { .compatible = "nvidia,tegra20-vde", .data = &tegra20_vde_soc },
- { },
-};
-MODULE_DEVICE_TABLE(of, tegra_vde_of_match);
-
-static struct platform_driver tegra_vde_driver = {
- .probe = tegra_vde_probe,
- .remove = tegra_vde_remove,
- .shutdown = tegra_vde_shutdown,
- .driver = {
- .name = "tegra-vde",
- .of_match_table = tegra_vde_of_match,
- .pm = &tegra_vde_pm_ops,
- },
-};
-module_platform_driver(tegra_vde_driver);
-
-MODULE_DESCRIPTION("NVIDIA Tegra Video Decoder driver");
-MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/tegra-vde/vde.h b/drivers/staging/media/tegra-vde/vde.h
deleted file mode 100644
index bbd42b8d9991..000000000000
--- a/drivers/staging/media/tegra-vde/vde.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * NVIDIA Tegra Video decoder driver
- *
- * Copyright (C) 2016-2019 GRATE-DRIVER project
- */
-
-#ifndef TEGRA_VDE_H
-#define TEGRA_VDE_H
-
-#include <linux/completion.h>
-#include <linux/dma-direction.h>
-#include <linux/iova.h>
-#include <linux/list.h>
-#include <linux/miscdevice.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-
-struct clk;
-struct dma_buf;
-struct gen_pool;
-struct iommu_group;
-struct iommu_domain;
-struct reset_control;
-struct dma_buf_attachment;
-
-struct tegra_vde_soc {
- bool supports_ref_pic_marking;
-};
-
-struct tegra_vde_bo {
- struct iova *iova;
- struct sg_table sgt;
- struct tegra_vde *vde;
- enum dma_data_direction dma_dir;
- unsigned long dma_attrs;
- dma_addr_t dma_handle;
- dma_addr_t dma_addr;
- void *dma_cookie;
- size_t size;
-};
-
-struct tegra_vde {
- void __iomem *sxe;
- void __iomem *bsev;
- void __iomem *mbe;
- void __iomem *ppe;
- void __iomem *mce;
- void __iomem *tfe;
- void __iomem *ppb;
- void __iomem *vdma;
- void __iomem *frameid;
- struct mutex lock;
- struct mutex map_lock;
- struct list_head map_list;
- struct miscdevice miscdev;
- struct reset_control *rst;
- struct reset_control *rst_mc;
- struct gen_pool *iram_pool;
- struct completion decode_completion;
- struct clk *clk;
- struct iommu_domain *domain;
- struct iommu_group *group;
- struct iova_domain iova;
- struct iova *iova_resv_static_addresses;
- struct iova *iova_resv_last_page;
- const struct tegra_vde_soc *soc;
- struct tegra_vde_bo *secure_bo;
- dma_addr_t iram_lists_addr;
- u32 *iram;
-};
-
-int tegra_vde_iommu_init(struct tegra_vde *vde);
-void tegra_vde_iommu_deinit(struct tegra_vde *vde);
-int tegra_vde_iommu_map(struct tegra_vde *vde,
- struct sg_table *sgt,
- struct iova **iovap,
- size_t size);
-void tegra_vde_iommu_unmap(struct tegra_vde *vde, struct iova *iova);
-
-int tegra_vde_dmabuf_cache_map(struct tegra_vde *vde,
- struct dma_buf *dmabuf,
- enum dma_data_direction dma_dir,
- struct dma_buf_attachment **ap,
- dma_addr_t *addrp);
-void tegra_vde_dmabuf_cache_unmap(struct tegra_vde *vde,
- struct dma_buf_attachment *a,
- bool release);
-void tegra_vde_dmabuf_cache_unmap_sync(struct tegra_vde *vde);
-void tegra_vde_dmabuf_cache_unmap_all(struct tegra_vde *vde);
-
-static __maybe_unused char const *
-tegra_vde_reg_base_name(struct tegra_vde *vde, void __iomem *base)
-{
- if (vde->sxe == base)
- return "SXE";
-
- if (vde->bsev == base)
- return "BSEV";
-
- if (vde->mbe == base)
- return "MBE";
-
- if (vde->ppe == base)
- return "PPE";
-
- if (vde->mce == base)
- return "MCE";
-
- if (vde->tfe == base)
- return "TFE";
-
- if (vde->ppb == base)
- return "PPB";
-
- if (vde->vdma == base)
- return "VDMA";
-
- if (vde->frameid == base)
- return "FRAMEID";
-
- return "???";
-}
-
-#endif /* TEGRA_VDE_H */
diff --git a/drivers/staging/media/tegra-video/Kconfig b/drivers/staging/media/tegra-video/Kconfig
index 1f35da4b134e..df1b2cff2417 100644
--- a/drivers/staging/media/tegra-video/Kconfig
+++ b/drivers/staging/media/tegra-video/Kconfig
@@ -2,7 +2,7 @@
config VIDEO_TEGRA
tristate "NVIDIA Tegra VI driver"
depends on TEGRA_HOST1X
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
diff --git a/drivers/staging/media/zoran/Kconfig b/drivers/staging/media/zoran/Kconfig
index 7874842033ca..3fb3e27e04a8 100644
--- a/drivers/staging/media/zoran/Kconfig
+++ b/drivers/staging/media/zoran/Kconfig
@@ -1,8 +1,19 @@
config VIDEO_ZORAN
tristate "Zoran ZR36057/36067 Video For Linux (Deprecated)"
- depends on PCI && I2C_ALGOBIT && VIDEO_V4L2
+ depends on PCI && I2C_ALGOBIT && VIDEO_DEV
depends on !ALPHA
+ depends on DEBUG_FS
select VIDEOBUF2_DMA_CONTIG
+ select VIDEO_ADV7170 if VIDEO_ZORAN_LML33R10
+ select VIDEO_ADV7175 if VIDEO_ZORAN_DC10 || VIDEO_ZORAN_DC30
+ select VIDEO_BT819 if VIDEO_ZORAN_LML33
+ select VIDEO_BT856 if VIDEO_ZORAN_LML33 || VIDEO_ZORAN_AVS6EYES
+ select VIDEO_BT866 if VIDEO_ZORAN_AVS6EYES
+ select VIDEO_KS0127 if VIDEO_ZORAN_AVS6EYES
+ select VIDEO_SAA711X if VIDEO_ZORAN_BUZ || VIDEO_ZORAN_LML33R10
+ select VIDEO_SAA7110 if VIDEO_ZORAN_DC10
+ select VIDEO_SAA7185 if VIDEO_ZORAN_BUZ
+ select VIDEO_VPX3220 if VIDEO_ZORAN_DC30
help
Say Y for support for MJPEG capture cards based on the Zoran
36057/36067 PCI controller chipset. This includes the Iomega
@@ -14,17 +25,15 @@ config VIDEO_ZORAN
module will be called zr36067.
config VIDEO_ZORAN_DC30
- tristate "Pinnacle/Miro DC30(+) support"
+ bool "Pinnacle/Miro DC30(+) support"
depends on VIDEO_ZORAN
- select VIDEO_ADV7175 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_VPX3220 if MEDIA_SUBDRV_AUTOSELECT
help
Support for the Pinnacle/Miro DC30(+) MJPEG capture/playback
card. This also supports really old DC10 cards based on the
zr36050 MJPEG codec and zr36016 VFE.
config VIDEO_ZORAN_ZR36060
- tristate "Zoran ZR36060"
+ bool "Zoran ZR36060"
depends on VIDEO_ZORAN
help
Say Y to support Zoran boards based on 36060 chips.
@@ -32,45 +41,34 @@ config VIDEO_ZORAN_ZR36060
and 33 R10 and AverMedia 6 boards.
config VIDEO_ZORAN_BUZ
- tristate "Iomega Buz support"
+ bool "Iomega Buz support"
depends on VIDEO_ZORAN_ZR36060
- select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_SAA7185 if MEDIA_SUBDRV_AUTOSELECT
help
Support for the Iomega Buz MJPEG capture/playback card.
config VIDEO_ZORAN_DC10
- tristate "Pinnacle/Miro DC10(+) support"
+ bool "Pinnacle/Miro DC10(+) support"
depends on VIDEO_ZORAN_ZR36060
- select VIDEO_SAA7110 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_ADV7175 if MEDIA_SUBDRV_AUTOSELECT
help
Support for the Pinnacle/Miro DC10(+) MJPEG capture/playback
card.
config VIDEO_ZORAN_LML33
- tristate "Linux Media Labs LML33 support"
+ bool "Linux Media Labs LML33 support"
depends on VIDEO_ZORAN_ZR36060
- select VIDEO_BT819 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_BT856 if MEDIA_SUBDRV_AUTOSELECT
help
Support for the Linux Media Labs LML33 MJPEG capture/playback
card.
config VIDEO_ZORAN_LML33R10
- tristate "Linux Media Labs LML33R10 support"
+ bool "Linux Media Labs LML33R10 support"
depends on VIDEO_ZORAN_ZR36060
- select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_ADV7170 if MEDIA_SUBDRV_AUTOSELECT
help
support for the Linux Media Labs LML33R10 MJPEG capture/playback
card.
config VIDEO_ZORAN_AVS6EYES
- tristate "AverMedia 6 Eyes support"
+ bool "AverMedia 6 Eyes support"
depends on VIDEO_ZORAN_ZR36060
- select VIDEO_BT856 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_BT866 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_KS0127 if MEDIA_SUBDRV_AUTOSELECT
help
Support for the AverMedia 6 Eyes video surveillance card.
diff --git a/drivers/staging/media/zoran/Makefile b/drivers/staging/media/zoran/Makefile
index 7023158e3892..9603bac0195c 100644
--- a/drivers/staging/media/zoran/Makefile
+++ b/drivers/staging/media/zoran/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
zr36067-objs := zoran_device.o \
- zoran_driver.o zoran_card.o
+ zoran_driver.o zoran_card.o videocodec.o
-obj-$(CONFIG_VIDEO_ZORAN) += zr36067.o videocodec.o
-obj-$(CONFIG_VIDEO_ZORAN_DC30) += zr36050.o zr36016.o
-obj-$(CONFIG_VIDEO_ZORAN_ZR36060) += zr36060.o
+obj-$(CONFIG_VIDEO_ZORAN) += zr36067.o
+zr36067-$(CONFIG_VIDEO_ZORAN_DC30) += zr36050.o zr36016.o
+zr36067-$(CONFIG_VIDEO_ZORAN_ZR36060) += zr36060.o
diff --git a/drivers/staging/media/zoran/videocodec.c b/drivers/staging/media/zoran/videocodec.c
index 28031d3fd757..3af7d02bd910 100644
--- a/drivers/staging/media/zoran/videocodec.c
+++ b/drivers/staging/media/zoran/videocodec.c
@@ -8,31 +8,21 @@
* (c) 2002 Wolfgang Scherr <scherr@net4you.at>
*/
-#define VIDEOCODEC_VERSION "v0.2"
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
-// kernel config is here (procfs flag)
-
-#ifdef CONFIG_PROC_FS
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#endif
-
#include "videocodec.h"
-static int debug;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0-4)");
+static int videocodec_debug;
+module_param(videocodec_debug, int, 0);
+MODULE_PARM_DESC(videocodec_debug, "Debug level (0-4)");
#define dprintk(num, format, args...) \
do { \
- if (debug >= num) \
+ if (videocodec_debug >= num) \
printk(format, ##args); \
} while (0)
@@ -80,12 +70,9 @@ struct videocodec *videocodec_attach(struct videocodec_master *master)
if ((master->flags & h->codec->flags) == master->flags) {
dprintk(4, "%s: try '%s'\n", __func__, h->codec->name);
- if (!try_module_get(h->codec->owner))
- return NULL;
-
codec = kmemdup(h->codec, sizeof(struct videocodec), GFP_KERNEL);
if (!codec)
- goto out_module_put;
+ goto out_kfree;
res = strlen(codec->name);
snprintf(codec->name + res, sizeof(codec->name) - res, "[%d]", h->attached);
@@ -121,13 +108,10 @@ struct videocodec *videocodec_attach(struct videocodec_master *master)
pr_err("%s: no codec found!\n", __func__);
return NULL;
- out_module_put:
- module_put(h->codec->owner);
out_kfree:
kfree(codec);
return NULL;
}
-EXPORT_SYMBOL(videocodec_attach);
int videocodec_detach(struct videocodec *codec)
{
@@ -168,7 +152,6 @@ int videocodec_detach(struct videocodec *codec)
prev->next = a->next;
dprintk(4, "videocodec: delete middle\n");
}
- module_put(a->codec->owner);
kfree(a->codec);
kfree(a);
h->attached -= 1;
@@ -183,7 +166,6 @@ int videocodec_detach(struct videocodec *codec)
pr_err("%s: given codec not found!\n", __func__);
return -EINVAL;
}
-EXPORT_SYMBOL(videocodec_detach);
int videocodec_register(const struct videocodec *codec)
{
@@ -216,7 +198,6 @@ int videocodec_register(const struct videocodec *codec)
return 0;
}
-EXPORT_SYMBOL(videocodec_register);
int videocodec_unregister(const struct videocodec *codec)
{
@@ -263,10 +244,8 @@ int videocodec_unregister(const struct videocodec *codec)
pr_err("%s: given codec not found!\n", __func__);
return -EINVAL;
}
-EXPORT_SYMBOL(videocodec_unregister);
-#ifdef CONFIG_PROC_FS
-static int proc_videocodecs_show(struct seq_file *m, void *v)
+int videocodec_debugfs_show(struct seq_file *m)
{
struct codec_list *h = codeclist_top;
struct attached_list *a;
@@ -293,38 +272,3 @@ static int proc_videocodecs_show(struct seq_file *m, void *v)
return 0;
}
-#endif
-
-/* ===================== */
-/* hook in driver module */
-/* ===================== */
-static int __init videocodec_init(void)
-{
-#ifdef CONFIG_PROC_FS
- static struct proc_dir_entry *videocodec_proc_entry;
-#endif
-
- pr_info("Linux video codec intermediate layer: %s\n", VIDEOCODEC_VERSION);
-
-#ifdef CONFIG_PROC_FS
- videocodec_proc_entry = proc_create_single("videocodecs", 0, NULL, proc_videocodecs_show);
- if (!videocodec_proc_entry)
- pr_err("videocodec: can't init procfs.\n");
-#endif
- return 0;
-}
-
-static void __exit videocodec_exit(void)
-{
-#ifdef CONFIG_PROC_FS
- remove_proc_entry("videocodecs", NULL);
-#endif
-}
-
-module_init(videocodec_init);
-module_exit(videocodec_exit);
-
-MODULE_AUTHOR("Wolfgang Scherr <scherr@net4you.at>");
-MODULE_DESCRIPTION("Intermediate API module for video codecs "
- VIDEOCODEC_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/zoran/videocodec.h b/drivers/staging/media/zoran/videocodec.h
index 8a5003dda9f4..9dea348fee40 100644
--- a/drivers/staging/media/zoran/videocodec.h
+++ b/drivers/staging/media/zoran/videocodec.h
@@ -123,6 +123,7 @@ M zr36055[1] 0001 0000c001 00000000 (zr36050[1])
#ifndef __LINUX_VIDEOCODEC_H
#define __LINUX_VIDEOCODEC_H
+#include <linux/debugfs.h>
#include <linux/videodev2.h>
#define CODEC_DO_COMPRESSION 0
@@ -233,7 +234,6 @@ struct jpeg_app_marker {
};
struct videocodec {
- struct module *owner;
/* -- filled in by slave device during register -- */
char name[32];
unsigned long magic; /* may be used for client<->master attaching */
@@ -305,4 +305,6 @@ extern int videocodec_unregister(const struct videocodec *);
/* the other calls are directly done via the videocodec structure! */
+int videocodec_debugfs_show(struct seq_file *m);
+
#endif /*ifndef __LINUX_VIDEOCODEC_H */
diff --git a/drivers/staging/media/zoran/zoran.h b/drivers/staging/media/zoran/zoran.h
index b1ad2a2b914c..654c95fa5aba 100644
--- a/drivers/staging/media/zoran/zoran.h
+++ b/drivers/staging/media/zoran/zoran.h
@@ -18,6 +18,7 @@
#ifndef _BUZ_H_
#define _BUZ_H_
+#include <linux/debugfs.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/videobuf2-core.h>
@@ -53,22 +54,8 @@ static inline struct zr_buffer *vb2_to_zr_buffer(struct vb2_buffer *vb)
#define BUZ_NUM_STAT_COM 4
#define BUZ_MASK_STAT_COM 3
-#define BUZ_MAX_FRAME 256 /* Must be a power of 2 */
-#define BUZ_MASK_FRAME 255 /* Must be BUZ_MAX_FRAME-1 */
-
#define BUZ_MAX_INPUT 16
-#if VIDEO_MAX_FRAME <= 32
-# define V4L_MAX_FRAME 32
-#elif VIDEO_MAX_FRAME <= 64
-# define V4L_MAX_FRAME 64
-#else
-# error "Too many video frame buffers to handle"
-#endif
-#define V4L_MASK_FRAME (V4L_MAX_FRAME - 1)
-
-#define MAX_FRAME (BUZ_MAX_FRAME > VIDEO_MAX_FRAME ? BUZ_MAX_FRAME : VIDEO_MAX_FRAME)
-
#include "zr36057.h"
enum card_type {
@@ -295,6 +282,7 @@ struct zoran {
struct list_head queued_bufs;
spinlock_t queued_bufs_lock; /* Protects queued_bufs */
struct zr_buffer *inuse[BUZ_NUM_STAT_COM * 2];
+ struct dentry *dbgfs_dir;
};
static inline struct zoran *to_zoran(struct v4l2_device *v4l2_dev)
@@ -313,6 +301,6 @@ static inline struct zoran *to_zoran(struct v4l2_device *v4l2_dev)
#endif
-int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq);
+int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq, int dir);
void zoran_queue_exit(struct zoran *zr);
int zr_set_buf(struct zoran *zr);
diff --git a/drivers/staging/media/zoran/zoran_card.c b/drivers/staging/media/zoran/zoran_card.c
index f259585b0689..26f978a1cc72 100644
--- a/drivers/staging/media/zoran/zoran_card.c
+++ b/drivers/staging/media/zoran/zoran_card.c
@@ -29,6 +29,9 @@
#include "zoran.h"
#include "zoran_card.h"
#include "zoran_device.h"
+#include "zr36016.h"
+#include "zr36050.h"
+#include "zr36060.h"
extern const struct zoran_format zoran_formats[];
@@ -36,17 +39,6 @@ static int card[BUZ_MAX] = { [0 ... (BUZ_MAX - 1)] = -1 };
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(card, "Card type");
-/*
- * The video mem address of the video card. The driver has a little database
- * for some videocards to determine it from there. If your video card is not
- * in there you have either to give it to the driver as a parameter or set
- * in a VIDIOCSFBUF ioctl
- */
-
-static unsigned long vidmem; /* default = 0 - Video memory base address */
-module_param_hw(vidmem, ulong, iomem, 0444);
-MODULE_PARM_DESC(vidmem, "Default video memory base address");
-
/* Default input and video norm at startup of the driver. */
static unsigned int default_input; /* default 0 = Composite, 1 = S-Video */
@@ -68,20 +60,6 @@ static int video_nr[BUZ_MAX] = { [0 ... (BUZ_MAX - 1)] = -1 };
module_param_array(video_nr, int, NULL, 0444);
MODULE_PARM_DESC(video_nr, "Video device number (-1=Auto)");
-int v4l_nbufs = 4;
-int v4l_bufsize = 864; /* Everybody should be able to work with this setting */
-module_param(v4l_nbufs, int, 0644);
-MODULE_PARM_DESC(v4l_nbufs, "Maximum number of V4L buffers to use");
-module_param(v4l_bufsize, int, 0644);
-MODULE_PARM_DESC(v4l_bufsize, "Maximum size per V4L buffer (in kB)");
-
-int jpg_nbufs = 32;
-int jpg_bufsize = 512; /* max size for 100% quality full-PAL frame */
-module_param(jpg_nbufs, int, 0644);
-MODULE_PARM_DESC(jpg_nbufs, "Maximum number of JPG buffers to use");
-module_param(jpg_bufsize, int, 0644);
-MODULE_PARM_DESC(jpg_bufsize, "Maximum size per JPG buffer (in kB)");
-
/* 1=Pass through TV signal when device is not used */
/* 0=Show color bar when device is not used (LML33: only if lml33dpath=1) */
int pass_through;
@@ -266,6 +244,96 @@ static const char *codecid_to_modulename(u16 codecid)
return name;
}
+static int codec_init(struct zoran *zr, u16 codecid)
+{
+ switch (codecid) {
+ case CODEC_TYPE_ZR36060:
+#ifdef CONFIG_VIDEO_ZORAN_ZR36060
+ return zr36060_init_module();
+#else
+ pci_err(zr->pci_dev, "ZR36060 support is not enabled\n");
+ return -EINVAL;
+#endif
+ break;
+ case CODEC_TYPE_ZR36050:
+#ifdef CONFIG_VIDEO_ZORAN_DC30
+ return zr36050_init_module();
+#else
+ pci_err(zr->pci_dev, "ZR36050 support is not enabled\n");
+ return -EINVAL;
+#endif
+ break;
+ case CODEC_TYPE_ZR36016:
+#ifdef CONFIG_VIDEO_ZORAN_DC30
+ return zr36016_init_module();
+#else
+ pci_err(zr->pci_dev, "ZR36016 support is not enabled\n");
+ return -EINVAL;
+#endif
+ break;
+ }
+
+ pci_err(zr->pci_dev, "unknown codec id %x\n", codecid);
+ return -EINVAL;
+}
+
+static void codec_exit(struct zoran *zr, u16 codecid)
+{
+ switch (codecid) {
+ case CODEC_TYPE_ZR36060:
+#ifdef CONFIG_VIDEO_ZORAN_ZR36060
+ zr36060_cleanup_module();
+#endif
+ break;
+ case CODEC_TYPE_ZR36050:
+#ifdef CONFIG_VIDEO_ZORAN_DC30
+ zr36050_cleanup_module();
+#endif
+ break;
+ case CODEC_TYPE_ZR36016:
+#ifdef CONFIG_VIDEO_ZORAN_DC30
+ zr36016_cleanup_module();
+#endif
+ break;
+ }
+}
+
+static int videocodec_init(struct zoran *zr)
+{
+ const char *codec_name, *vfe_name;
+ int result;
+
+ codec_name = codecid_to_modulename(zr->card.video_codec);
+ if (codec_name) {
+ result = codec_init(zr, zr->card.video_codec);
+ if (result < 0) {
+ pci_err(zr->pci_dev, "failed to load video codec %s: %d\n",
+ codec_name, result);
+ return result;
+ }
+ }
+ vfe_name = codecid_to_modulename(zr->card.video_vfe);
+ if (vfe_name) {
+ result = codec_init(zr, zr->card.video_vfe);
+ if (result < 0) {
+ pci_err(zr->pci_dev, "failed to load video vfe %s: %d\n",
+ vfe_name, result);
+ if (codec_name)
+ codec_exit(zr, zr->card.video_codec);
+ return result;
+ }
+ }
+ return 0;
+}
+
+static void videocodec_exit(struct zoran *zr)
+{
+ if (zr->card.video_codec != CODEC_TYPE_NONE)
+ codec_exit(zr, zr->card.video_codec);
+ if (zr->card.video_vfe != CODEC_TYPE_NONE)
+ codec_exit(zr, zr->card.video_vfe);
+}
+
// struct tvnorm {
// u16 wt, wa, h_start, h_sync_start, ht, ha, v_start;
// };
@@ -803,6 +871,99 @@ int zoran_check_jpg_settings(struct zoran *zr,
return 0;
}
+static int zoran_init_video_device(struct zoran *zr, struct video_device *video_dev, int dir)
+{
+ int err;
+
+ /* Now add the template and register the device unit. */
+ *video_dev = zoran_template;
+ video_dev->v4l2_dev = &zr->v4l2_dev;
+ video_dev->lock = &zr->lock;
+ video_dev->device_caps = V4L2_CAP_STREAMING | dir;
+
+ strscpy(video_dev->name, ZR_DEVNAME(zr), sizeof(video_dev->name));
+ /*
+ * It's not a mem2mem device, but you can both capture and output from one and the same
+ * device. This should really be split up into two device nodes, but that's a job for
+ * another day.
+ */
+ video_dev->vfl_dir = VFL_DIR_M2M;
+ zoran_queue_init(zr, &zr->vq, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ err = video_register_device(video_dev, VFL_TYPE_VIDEO, video_nr[zr->id]);
+ if (err < 0)
+ return err;
+ video_set_drvdata(video_dev, zr);
+ return 0;
+}
+
+static void zoran_exit_video_devices(struct zoran *zr)
+{
+ video_unregister_device(zr->video_dev);
+ kfree(zr->video_dev);
+}
+
+static int zoran_init_video_devices(struct zoran *zr)
+{
+ int err;
+
+ zr->video_dev = video_device_alloc();
+ if (!zr->video_dev)
+ return -ENOMEM;
+
+ err = zoran_init_video_device(zr, zr->video_dev, V4L2_CAP_VIDEO_CAPTURE);
+ if (err)
+ kfree(zr->video_dev);
+ return err;
+}
+
+/*
+ * v4l2_device_unregister() will care about removing zr->encoder/zr->decoder
+ * via v4l2_i2c_subdev_unregister()
+ */
+static int zoran_i2c_init(struct zoran *zr)
+{
+ int err;
+
+ pci_info(zr->pci_dev, "Initializing i2c bus...\n");
+
+ err = zoran_register_i2c(zr);
+ if (err) {
+ pci_err(zr->pci_dev, "%s - cannot initialize i2c bus\n", __func__);
+ return err;
+ }
+
+ zr->decoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, &zr->i2c_adapter,
+ zr->card.i2c_decoder, 0,
+ zr->card.addrs_decoder);
+ if (!zr->decoder) {
+ pci_err(zr->pci_dev, "Fail to get decoder %s\n", zr->card.i2c_decoder);
+ err = -EINVAL;
+ goto error_decoder;
+ }
+
+ if (zr->card.i2c_encoder) {
+ zr->encoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, &zr->i2c_adapter,
+ zr->card.i2c_encoder, 0,
+ zr->card.addrs_encoder);
+ if (!zr->encoder) {
+ pci_err(zr->pci_dev, "Fail to get encoder %s\n", zr->card.i2c_encoder);
+ err = -EINVAL;
+ goto error_decoder;
+ }
+ }
+ return 0;
+
+error_decoder:
+ zoran_unregister_i2c(zr);
+ return err;
+}
+
+static void zoran_i2c_exit(struct zoran *zr)
+{
+ zoran_unregister_i2c(zr);
+}
+
void zoran_open_init_params(struct zoran *zr)
{
int i;
@@ -874,17 +1035,11 @@ static int zr36057_init(struct zoran *zr)
zoran_open_init_params(zr);
/* allocate memory *before* doing anything to the hardware in case allocation fails */
- zr->video_dev = video_device_alloc();
- if (!zr->video_dev) {
- err = -ENOMEM;
- goto exit;
- }
zr->stat_com = dma_alloc_coherent(&zr->pci_dev->dev,
BUZ_NUM_STAT_COM * sizeof(u32),
&zr->p_sc, GFP_KERNEL);
if (!zr->stat_com) {
- err = -ENOMEM;
- goto exit_video;
+ return -ENOMEM;
}
for (j = 0; j < BUZ_NUM_STAT_COM; j++)
zr->stat_com[j] = cpu_to_le32(1); /* mark as unavailable to zr36057 */
@@ -897,26 +1052,9 @@ static int zr36057_init(struct zoran *zr)
goto exit_statcom;
}
- /* Now add the template and register the device unit. */
- *zr->video_dev = zoran_template;
- zr->video_dev->v4l2_dev = &zr->v4l2_dev;
- zr->video_dev->lock = &zr->lock;
- zr->video_dev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
-
- strscpy(zr->video_dev->name, ZR_DEVNAME(zr), sizeof(zr->video_dev->name));
- /*
- * It's not a mem2mem device, but you can both capture and output from one and the same
- * device. This should really be split up into two device nodes, but that's a job for
- * another day.
- */
- zr->video_dev->vfl_dir = VFL_DIR_M2M;
-
- zoran_queue_init(zr, &zr->vq);
-
- err = video_register_device(zr->video_dev, VFL_TYPE_VIDEO, video_nr[zr->id]);
- if (err < 0)
+ err = zoran_init_video_devices(zr);
+ if (err)
goto exit_statcomb;
- video_set_drvdata(zr->video_dev, zr);
zoran_init_hardware(zr);
if (!pass_through) {
@@ -931,9 +1069,6 @@ exit_statcomb:
dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2, zr->stat_comb, zr->p_scb);
exit_statcom:
dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32), zr->stat_com, zr->p_sc);
-exit_video:
- kfree(zr->video_dev);
-exit:
return err;
}
@@ -945,6 +1080,8 @@ static void zoran_remove(struct pci_dev *pdev)
if (!zr->initialized)
goto exit_free;
+ debugfs_remove_recursive(zr->dbgfs_dir);
+
zoran_queue_exit(zr);
/* unregister videocodec bus */
@@ -952,9 +1089,10 @@ static void zoran_remove(struct pci_dev *pdev)
videocodec_detach(zr->codec);
if (zr->vfe)
videocodec_detach(zr->vfe);
+ videocodec_exit(zr);
/* unregister i2c bus */
- zoran_unregister_i2c(zr);
+ zoran_i2c_exit(zr);
/* disable PCI bus-mastering */
zoran_set_pci_master(zr, 0);
/* put chip into reset */
@@ -965,7 +1103,7 @@ static void zoran_remove(struct pci_dev *pdev)
dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2, zr->stat_comb, zr->p_scb);
pci_release_regions(pdev);
pci_disable_device(zr->pci_dev);
- video_unregister_device(zr->video_dev);
+ zoran_exit_video_devices(zr);
exit_free:
v4l2_ctrl_handler_free(&zr->hdl);
v4l2_device_unregister(&zr->v4l2_dev);
@@ -1051,6 +1189,39 @@ static const struct v4l2_ctrl_ops zoran_video_ctrl_ops = {
.s_ctrl = zoran_video_set_ctrl,
};
+static int zoran_debugfs_show(struct seq_file *seq, void *v)
+{
+ struct zoran *zr = seq->private;
+
+ seq_printf(seq, "Running mode %x\n", zr->running);
+ seq_printf(seq, "Codec mode %x\n", zr->codec_mode);
+ seq_printf(seq, "Norm %llx\n", zr->norm);
+ seq_printf(seq, "Input %d\n", zr->input);
+ seq_printf(seq, "Buffersize %d\n", zr->buffer_size);
+
+ seq_printf(seq, "V4L width %dx%d\n", zr->v4l_settings.width, zr->v4l_settings.height);
+ seq_printf(seq, "V4L bytesperline %d\n", zr->v4l_settings.bytesperline);
+
+ seq_printf(seq, "JPG decimation %u\n", zr->jpg_settings.decimation);
+ seq_printf(seq, "JPG hor_dcm %u\n", zr->jpg_settings.hor_dcm);
+ seq_printf(seq, "JPG ver_dcm %u\n", zr->jpg_settings.ver_dcm);
+ seq_printf(seq, "JPG tmp_dcm %u\n", zr->jpg_settings.tmp_dcm);
+ seq_printf(seq, "JPG odd_even %u\n", zr->jpg_settings.odd_even);
+ seq_printf(seq, "JPG crop %dx%d %d %d\n",
+ zr->jpg_settings.img_x,
+ zr->jpg_settings.img_y,
+ zr->jpg_settings.img_width,
+ zr->jpg_settings.img_height);
+
+ seq_printf(seq, "Prepared %u\n", zr->prepared);
+ seq_printf(seq, "Queued %u\n", zr->queued);
+
+ videocodec_debugfs_show(seq);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(zoran_debugfs);
+
/*
* Scan for a Buz card (actually for the PCI controller ZR36057),
* request the irq and map the io memory
@@ -1063,14 +1234,22 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct videocodec_master *master_vfe = NULL;
struct videocodec_master *master_codec = NULL;
int card_num;
- const char *codec_name, *vfe_name;
unsigned int nr;
int err;
+ pci_info(pdev, "Zoran MJPEG board driver version %s\n", ZORAN_VERSION);
+
+ /* some mainboards might not do PCI-PCI data transfer well */
+ if (pci_pci_problems & (PCIPCI_FAIL | PCIAGP_FAIL | PCIPCI_ALIMAGIK))
+ pci_warn(pdev, "%s: chipset does not support reliable PCI-PCI DMA\n",
+ ZORAN_NAME);
+
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err)
- return -ENODEV;
- vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+ return err;
+ err = vb2_dma_contig_set_max_seg_size(&pdev->dev, U32_MAX);
+ if (err)
+ return err;
nr = zoran_num++;
if (nr >= BUZ_MAX) {
@@ -1174,41 +1353,15 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
zr36057_restart(zr);
- /* i2c */
- pci_info(zr->pci_dev, "Initializing i2c bus...\n");
- if (zoran_register_i2c(zr) < 0) {
- pci_err(pdev, "%s - can't initialize i2c bus\n", __func__);
+ err = zoran_i2c_init(zr);
+ if (err)
goto zr_free_irq;
- }
-
- zr->decoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, &zr->i2c_adapter,
- zr->card.i2c_decoder, 0,
- zr->card.addrs_decoder);
-
- if (zr->card.i2c_encoder)
- zr->encoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, &zr->i2c_adapter,
- zr->card.i2c_encoder, 0,
- zr->card.addrs_encoder);
pci_info(zr->pci_dev, "Initializing videocodec bus...\n");
-
- if (zr->card.video_codec) {
- codec_name = codecid_to_modulename(zr->card.video_codec);
- if (codec_name) {
- result = request_module(codec_name);
- if (result)
- pci_err(pdev, "failed to load modules %s: %d\n", codec_name, result);
- }
- }
- if (zr->card.video_vfe) {
- vfe_name = codecid_to_modulename(zr->card.video_vfe);
- if (vfe_name) {
- result = request_module(vfe_name);
- if (result < 0)
- pci_err(pdev, "failed to load modules %s: %d\n", vfe_name, result);
- }
- }
+ err = videocodec_init(zr);
+ if (err)
+ goto zr_unreg_i2c;
/* reset JPEG codec */
jpeg_codec_sleep(zr, 1);
@@ -1218,15 +1371,15 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (zr->card.video_codec != 0) {
master_codec = zoran_setup_videocodec(zr, zr->card.video_codec);
if (!master_codec)
- goto zr_unreg_i2c;
+ goto zr_unreg_videocodec;
zr->codec = videocodec_attach(master_codec);
if (!zr->codec) {
pci_err(pdev, "%s - no codec found\n", __func__);
- goto zr_unreg_i2c;
+ goto zr_unreg_videocodec;
}
if (zr->codec->type != zr->card.video_codec) {
pci_err(pdev, "%s - wrong codec\n", __func__);
- goto zr_detach_codec;
+ goto zr_unreg_videocodec;
}
}
if (zr->card.video_vfe != 0) {
@@ -1253,14 +1406,19 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
zr->map_mode = ZORAN_MAP_MODE_RAW;
+ zr->dbgfs_dir = debugfs_create_dir(ZR_DEVNAME(zr), NULL);
+ debugfs_create_file("debug", 0444, zr->dbgfs_dir, zr,
+ &zoran_debugfs_fops);
return 0;
zr_detach_vfe:
videocodec_detach(zr->vfe);
zr_detach_codec:
videocodec_detach(zr->codec);
+zr_unreg_videocodec:
+ videocodec_exit(zr);
zr_unreg_i2c:
- zoran_unregister_i2c(zr);
+ zoran_i2c_exit(zr);
zr_free_irq:
btwrite(0, ZR36057_SPGPPCR);
pci_free_irq(zr->pci_dev, 0, zr);
@@ -1281,54 +1439,4 @@ static struct pci_driver zoran_driver = {
.remove = zoran_remove,
};
-static int __init zoran_init(void)
-{
- int res;
-
- pr_info("Zoran MJPEG board driver version %s\n", ZORAN_VERSION);
-
- /* check the parameters we have been given, adjust if necessary */
- if (v4l_nbufs < 2)
- v4l_nbufs = 2;
- if (v4l_nbufs > VIDEO_MAX_FRAME)
- v4l_nbufs = VIDEO_MAX_FRAME;
- /* The user specifies the in KB, we want them in byte (and page aligned) */
- v4l_bufsize = PAGE_ALIGN(v4l_bufsize * 1024);
- if (v4l_bufsize < 32768)
- v4l_bufsize = 32768;
- /* 2 MB is arbitrary but sufficient for the maximum possible images */
- if (v4l_bufsize > 2048 * 1024)
- v4l_bufsize = 2048 * 1024;
- if (jpg_nbufs < 4)
- jpg_nbufs = 4;
- if (jpg_nbufs > BUZ_MAX_FRAME)
- jpg_nbufs = BUZ_MAX_FRAME;
- jpg_bufsize = PAGE_ALIGN(jpg_bufsize * 1024);
- if (jpg_bufsize < 8192)
- jpg_bufsize = 8192;
- if (jpg_bufsize > (512 * 1024))
- jpg_bufsize = 512 * 1024;
- /* Use parameter for vidmem or try to find a video card */
- if (vidmem)
- pr_info("%s: Using supplied video memory base address @ 0x%lx\n", ZORAN_NAME, vidmem);
-
- /* some mainboards might not do PCI-PCI data transfer well */
- if (pci_pci_problems & (PCIPCI_FAIL | PCIAGP_FAIL | PCIPCI_ALIMAGIK))
- pr_warn("%s: chipset does not support reliable PCI-PCI DMA\n", ZORAN_NAME);
-
- res = pci_register_driver(&zoran_driver);
- if (res) {
- pr_err("Unable to register ZR36057 driver\n");
- return res;
- }
-
- return 0;
-}
-
-static void __exit zoran_exit(void)
-{
- pci_unregister_driver(&zoran_driver);
-}
-
-module_init(zoran_init);
-module_exit(zoran_exit);
+module_pci_driver(zoran_driver);
diff --git a/drivers/staging/media/zoran/zoran_device.c b/drivers/staging/media/zoran/zoran_device.c
index 5b12a730a229..2470889a58fa 100644
--- a/drivers/staging/media/zoran/zoran_device.c
+++ b/drivers/staging/media/zoran/zoran_device.c
@@ -239,7 +239,7 @@ static void zr36057_set_vfe(struct zoran *zr, int video_width, int video_height,
wa = tvn->wa;
ha = tvn->ha;
- pci_info(zr->pci_dev, "set_vfe() - width = %d, height = %d\n", video_width, video_height);
+ pci_dbg(zr->pci_dev, "set_vfe() - width = %d, height = %d\n", video_width, video_height);
if (video_width < BUZ_MIN_WIDTH ||
video_height < BUZ_MIN_HEIGHT ||
@@ -664,7 +664,7 @@ void zr36057_enable_jpg(struct zoran *zr, enum zoran_codec_mode mode)
zr36057_set_jpg(zr, mode); // \P_Reset, ... Video param, FIFO
clear_interrupt_counters(zr);
- pci_info(zr->pci_dev, "enable_jpg(MOTION_COMPRESS)\n");
+ pci_dbg(zr->pci_dev, "enable_jpg(MOTION_COMPRESS)\n");
break;
}
@@ -693,7 +693,7 @@ void zr36057_enable_jpg(struct zoran *zr, enum zoran_codec_mode mode)
zr36057_set_jpg(zr, mode); // \P_Reset, ... Video param, FIFO
clear_interrupt_counters(zr);
- pci_info(zr->pci_dev, "enable_jpg(MOTION_DECOMPRESS)\n");
+ pci_dbg(zr->pci_dev, "enable_jpg(MOTION_DECOMPRESS)\n");
break;
case BUZ_MODE_IDLE:
@@ -720,7 +720,7 @@ void zr36057_enable_jpg(struct zoran *zr, enum zoran_codec_mode mode)
decoder_call(zr, video, s_stream, 1);
encoder_call(zr, video, s_routing, 0, 0, 0);
- pci_info(zr->pci_dev, "enable_jpg(IDLE)\n");
+ pci_dbg(zr->pci_dev, "enable_jpg(IDLE)\n");
break;
}
}
@@ -814,7 +814,7 @@ static void zoran_reap_stat_com(struct zoran *zr)
if (zr->jpg_settings.tmp_dcm == 1)
i = (zr->jpg_dma_tail - zr->jpg_err_shift) & BUZ_MASK_STAT_COM;
else
- i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2 + 1;
+ i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2;
stat_com = le32_to_cpu(zr->stat_com[i]);
if ((stat_com & 1) == 0) {
@@ -826,6 +826,11 @@ static void zoran_reap_stat_com(struct zoran *zr)
size = (stat_com & GENMASK(22, 1)) >> 1;
buf = zr->inuse[i];
+ if (!buf) {
+ spin_unlock_irqrestore(&zr->queued_bufs_lock, flags);
+ pci_err(zr->pci_dev, "No buffer at slot %d\n", i);
+ return;
+ }
buf->vbuf.vb2_buf.timestamp = ktime_get_ns();
if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) {
diff --git a/drivers/staging/media/zoran/zoran_device.h b/drivers/staging/media/zoran/zoran_device.h
index 6c5d70238228..322b04c55d41 100644
--- a/drivers/staging/media/zoran/zoran_device.h
+++ b/drivers/staging/media/zoran/zoran_device.h
@@ -47,9 +47,7 @@ extern void zr36057_restart(struct zoran *zr);
extern const struct zoran_format zoran_formats[];
-extern int v4l_nbufs;
extern int v4l_bufsize;
-extern int jpg_nbufs;
extern int jpg_bufsize;
extern int pass_through;
diff --git a/drivers/staging/media/zoran/zoran_driver.c b/drivers/staging/media/zoran/zoran_driver.c
index 46382e43f1bf..4304b7e21709 100644
--- a/drivers/staging/media/zoran/zoran_driver.c
+++ b/drivers/staging/media/zoran/zoran_driver.c
@@ -153,8 +153,6 @@ static __u32 zoran_v4l2_calc_bufsize(struct zoran_jpg_settings *settings)
result <<= 1;
}
- if (result > jpg_bufsize)
- return jpg_bufsize;
if (result < 8192)
return 8192;
@@ -173,7 +171,7 @@ static int zoran_v4l_set_format(struct zoran *zr, int width, int height,
if (height < BUZ_MIN_HEIGHT || width < BUZ_MIN_WIDTH ||
height > BUZ_MAX_HEIGHT || width > BUZ_MAX_WIDTH) {
- pci_err(zr->pci_dev, "%s - wrong frame size (%dx%d)\n", __func__, width, height);
+ pci_dbg(zr->pci_dev, "%s - wrong frame size (%dx%d)\n", __func__, width, height);
return -EINVAL;
}
@@ -183,7 +181,7 @@ static int zoran_v4l_set_format(struct zoran *zr, int width, int height,
/* Check against available buffer size */
if (height * width * bpp > zr->buffer_size) {
- pci_err(zr->pci_dev, "%s - video buffer size (%d kB) is too small\n",
+ pci_dbg(zr->pci_dev, "%s - video buffer size (%d kB) is too small\n",
__func__, zr->buffer_size >> 10);
return -EINVAL;
}
@@ -191,7 +189,7 @@ static int zoran_v4l_set_format(struct zoran *zr, int width, int height,
/* The video front end needs 4-byte alinged line sizes */
if ((bpp == 2 && (width & 1)) || (bpp == 3 && (width & 3))) {
- pci_err(zr->pci_dev, "%s - wrong frame alignment\n", __func__);
+ pci_dbg(zr->pci_dev, "%s - wrong frame alignment\n", __func__);
return -EINVAL;
}
@@ -207,7 +205,7 @@ static int zoran_set_norm(struct zoran *zr, v4l2_std_id norm)
{
if (!(norm & zr->card.norms)) {
- pci_err(zr->pci_dev, "%s - unsupported norm %llx\n", __func__, norm);
+ pci_dbg(zr->pci_dev, "%s - unsupported norm %llx\n", __func__, norm);
return -EINVAL;
}
@@ -233,7 +231,7 @@ static int zoran_set_input(struct zoran *zr, int input)
return 0;
if (input < 0 || input >= zr->card.inputs) {
- pci_err(zr->pci_dev, "%s - unsupported input %d\n", __func__, input);
+ pci_dbg(zr->pci_dev, "%s - unsupported input %d\n", __func__, input);
return -EINVAL;
}
@@ -255,8 +253,6 @@ static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability
strscpy(cap->card, ZR_DEVNAME(zr), sizeof(cap->card));
strscpy(cap->driver, "zoran", sizeof(cap->driver));
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", pci_name(zr->pci_dev));
- cap->device_caps = zr->video_dev->device_caps;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -402,7 +398,6 @@ static int zoran_try_fmt_vid_out(struct file *file, void *__fh,
V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM);
fmt->fmt.pix.sizeimage = zoran_v4l2_calc_bufsize(&settings);
- zr->buffer_size = fmt->fmt.pix.sizeimage;
fmt->fmt.pix.bytesperline = 0;
fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return res;
@@ -437,6 +432,8 @@ static int zoran_try_fmt_vid_cap(struct file *file, void *__fh,
bpp = DIV_ROUND_UP(zoran_formats[i].depth, 8);
v4l_bound_align_image(&fmt->fmt.pix.width, BUZ_MIN_WIDTH, BUZ_MAX_WIDTH, bpp == 2 ? 1 : 2,
&fmt->fmt.pix.height, BUZ_MIN_HEIGHT, BUZ_MAX_HEIGHT, 0, 0);
+ fmt->fmt.pix.bytesperline = fmt->fmt.pix.width * bpp;
+ fmt->fmt.pix.sizeimage = fmt->fmt.pix.bytesperline * fmt->fmt.pix.height;
return 0;
}
@@ -535,7 +532,7 @@ static int zoran_s_fmt_vid_cap(struct file *file, void *__fh,
if (fmt->fmt.pix.pixelformat == zoran_formats[i].fourcc)
break;
if (i == NUM_FORMATS) {
- pci_err(zr->pci_dev, "VIDIOC_S_FMT - unknown/unsupported format 0x%x\n",
+ pci_dbg(zr->pci_dev, "VIDIOC_S_FMT - unknown/unsupported format 0x%x\n",
fmt->fmt.pix.pixelformat);
/* TODO do not return here to fix the TRY_FMT cannot handle an invalid pixelformat*/
return -EINVAL;
@@ -582,6 +579,9 @@ static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id std)
struct zoran *zr = video_drvdata(file);
int res = 0;
+ if (zr->norm == std)
+ return 0;
+
if (zr->running != ZORAN_MAP_MODE_NONE)
return -EBUSY;
@@ -666,7 +666,7 @@ static int zoran_g_selection(struct file *file, void *__fh, struct v4l2_selectio
if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- pci_err(zr->pci_dev, "%s invalid selection type combination\n", __func__);
+ pci_dbg(zr->pci_dev, "%s invalid selection type combination\n", __func__);
return -EINVAL;
}
@@ -712,7 +712,7 @@ static int zoran_s_selection(struct file *file, void *__fh, struct v4l2_selectio
return -EINVAL;
if (zr->map_mode == ZORAN_MAP_MODE_RAW) {
- pci_err(zr->pci_dev, "VIDIOC_S_SELECTION - subcapture only supported for compressed capture\n");
+ pci_dbg(zr->pci_dev, "VIDIOC_S_SELECTION - subcapture only supported for compressed capture\n");
return -EINVAL;
}
@@ -734,14 +734,6 @@ static int zoran_s_selection(struct file *file, void *__fh, struct v4l2_selectio
return res;
}
-static int zoran_g_parm(struct file *file, void *priv, struct v4l2_streamparm *parm)
-{
- if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- return 0;
-}
-
/*
* Output is disabled temporarily
* Zoran is picky about jpeg data it accepts. At least it seems to unsupport COM and APPn.
@@ -749,7 +741,6 @@ static int zoran_g_parm(struct file *file, void *priv, struct v4l2_streamparm *p
*/
static const struct v4l2_ioctl_ops zoran_ioctl_ops = {
.vidioc_querycap = zoran_querycap,
- .vidioc_g_parm = zoran_g_parm,
.vidioc_s_selection = zoran_s_selection,
.vidioc_g_selection = zoran_g_selection,
.vidioc_enum_input = zoran_enum_input,
@@ -785,8 +776,6 @@ static const struct v4l2_file_operations zoran_fops = {
.unlocked_ioctl = video_ioctl2,
.open = v4l2_fh_open,
.release = vb2_fop_release,
- .read = vb2_fop_read,
- .write = vb2_fop_write,
.mmap = vb2_fop_mmap,
.poll = vb2_fop_poll,
};
@@ -869,6 +858,10 @@ int zr_set_buf(struct zoran *zr)
vbuf = &buf->vbuf;
buf->vbuf.field = V4L2_FIELD_INTERLACED;
+ if (BUZ_MAX_HEIGHT < (zr->v4l_settings.height * 2))
+ buf->vbuf.field = V4L2_FIELD_INTERLACED;
+ else
+ buf->vbuf.field = V4L2_FIELD_TOP;
vb2_set_plane_payload(&buf->vbuf.vb2_buf, 0, zr->buffer_size);
vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_DONE);
zr->inuse[0] = NULL;
@@ -889,6 +882,7 @@ int zr_set_buf(struct zoran *zr)
return -EINVAL;
}
list_del(&buf->queue);
+ zr->buf_in_reserve--;
spin_unlock_irqrestore(&zr->queued_bufs_lock, flags);
vbuf = &buf->vbuf;
@@ -928,9 +922,10 @@ static int zr_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
zr->stat_com[j] = cpu_to_le32(1);
zr->inuse[j] = NULL;
}
+ zr->vbseq = 0;
if (zr->map_mode != ZORAN_MAP_MODE_RAW) {
- pci_info(zr->pci_dev, "START JPG\n");
+ pci_dbg(zr->pci_dev, "START JPG\n");
zr36057_restart(zr);
zoran_init_hardware(zr);
if (zr->map_mode == ZORAN_MAP_MODE_JPG_REC)
@@ -944,7 +939,7 @@ static int zr_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
return 0;
}
- pci_info(zr->pci_dev, "START RAW\n");
+ pci_dbg(zr->pci_dev, "START RAW\n");
zr36057_restart(zr);
zoran_init_hardware(zr);
@@ -994,7 +989,7 @@ static void zr_vb2_stop_streaming(struct vb2_queue *vq)
}
spin_unlock_irqrestore(&zr->queued_bufs_lock, flags);
if (zr->buf_in_reserve)
- pci_err(zr->pci_dev, "Buffer remaining %d\n", zr->buf_in_reserve);
+ pci_dbg(zr->pci_dev, "Buffer remaining %d\n", zr->buf_in_reserve);
zr->map_mode = ZORAN_MAP_MODE_RAW;
}
@@ -1008,7 +1003,7 @@ static const struct vb2_ops zr_video_qops = {
.wait_finish = vb2_ops_wait_finish,
};
-int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq)
+int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq, int dir)
{
int err;
@@ -1016,8 +1011,9 @@ int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq)
INIT_LIST_HEAD(&zr->queued_bufs);
vq->dev = &zr->pci_dev->dev;
- vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- vq->io_modes = VB2_USERPTR | VB2_DMABUF | VB2_MMAP | VB2_READ | VB2_WRITE;
+ vq->type = dir;
+
+ vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_READ | VB2_WRITE;
vq->drv_priv = zr;
vq->buf_struct_size = sizeof(struct zr_buffer);
vq->ops = &zr_video_qops;
diff --git a/drivers/staging/media/zoran/zr36016.c b/drivers/staging/media/zoran/zr36016.c
index 9b350a885879..26c7c32b6bc0 100644
--- a/drivers/staging/media/zoran/zr36016.c
+++ b/drivers/staging/media/zoran/zr36016.c
@@ -22,14 +22,14 @@
/* amount of chips attached via this driver */
static int zr36016_codecs;
-/* debugging is available via module parameter */
-static int debug;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0-4)");
+static int zr36016_debug;
+module_param(zr36016_debug, int, 0);
+MODULE_PARM_DESC(zr36016_debug, "Debug level (0-4)");
+
#define dprintk(num, format, args...) \
do { \
- if (debug >= num) \
+ if (zr36016_debug >= num) \
printk(format, ##args); \
} while (0)
@@ -120,7 +120,7 @@ static u8 zr36016_read_version(struct zr36016 *ptr)
static int zr36016_basic_test(struct zr36016 *ptr)
{
- if (debug) {
+ if (zr36016_debug) {
int i;
zr36016_writei(ptr, ZR016I_PAX_LO, 0x55);
@@ -390,7 +390,6 @@ static int zr36016_setup(struct videocodec *codec)
}
static const struct videocodec zr36016_codec = {
- .owner = THIS_MODULE,
.name = "zr36016",
.magic = 0L, /* magic not used */
.flags =
@@ -409,14 +408,13 @@ static const struct videocodec zr36016_codec = {
HOOK IN DRIVER AS KERNEL MODULE
========================================================================= */
-static int __init zr36016_init_module(void)
+int zr36016_init_module(void)
{
- //dprintk(1, "ZR36016 driver %s\n",ZR016_VERSION);
zr36016_codecs = 0;
return videocodec_register(&zr36016_codec);
}
-static void __exit zr36016_cleanup_module(void)
+void zr36016_cleanup_module(void)
{
if (zr36016_codecs) {
dprintk(1,
@@ -425,10 +423,3 @@ static void __exit zr36016_cleanup_module(void)
}
videocodec_unregister(&zr36016_codec);
}
-
-module_init(zr36016_init_module);
-module_exit(zr36016_cleanup_module);
-
-MODULE_AUTHOR("Wolfgang Scherr <scherr@net4you.at>");
-MODULE_DESCRIPTION("Driver module for ZR36016 video frontends");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/zoran/zr36016.h b/drivers/staging/media/zoran/zr36016.h
index 1475f971cc24..04afba35669d 100644
--- a/drivers/staging/media/zoran/zr36016.h
+++ b/drivers/staging/media/zoran/zr36016.h
@@ -89,4 +89,6 @@ struct zr36016 {
#define ZR016_SIGN 0x02
#define ZR016_YMCS 0x01
+int zr36016_init_module(void);
+void zr36016_cleanup_module(void);
#endif /*fndef ZR36016_H */
diff --git a/drivers/staging/media/zoran/zr36050.c b/drivers/staging/media/zoran/zr36050.c
index c62af27f2683..38f7021e7b06 100644
--- a/drivers/staging/media/zoran/zr36050.c
+++ b/drivers/staging/media/zoran/zr36050.c
@@ -5,8 +5,6 @@
* Copyright (C) 2001 Wolfgang Scherr <scherr@net4you.at>
*/
-#define ZR050_VERSION "v0.7.1"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -32,13 +30,13 @@
static int zr36050_codecs;
/* debugging is available via module parameter */
-static int debug;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0-4)");
+static int zr36050_debug;
+module_param(zr36050_debug, int, 0);
+MODULE_PARM_DESC(zr36050_debug, "Debug level (0-4)");
#define dprintk(num, format, args...) \
do { \
- if (debug >= num) \
+ if (zr36050_debug >= num) \
printk(format, ##args); \
} while (0)
@@ -798,7 +796,6 @@ static int zr36050_setup(struct videocodec *codec)
}
static const struct videocodec zr36050_codec = {
- .owner = THIS_MODULE,
.name = "zr36050",
.magic = 0L, // magic not used
.flags =
@@ -817,14 +814,13 @@ static const struct videocodec zr36050_codec = {
HOOK IN DRIVER AS KERNEL MODULE
========================================================================= */
-static int __init zr36050_init_module(void)
+int zr36050_init_module(void)
{
- //dprintk(1, "ZR36050 driver %s\n",ZR050_VERSION);
zr36050_codecs = 0;
return videocodec_register(&zr36050_codec);
}
-static void __exit zr36050_cleanup_module(void)
+void zr36050_cleanup_module(void)
{
if (zr36050_codecs) {
dprintk(1,
@@ -833,11 +829,3 @@ static void __exit zr36050_cleanup_module(void)
}
videocodec_unregister(&zr36050_codec);
}
-
-module_init(zr36050_init_module);
-module_exit(zr36050_cleanup_module);
-
-MODULE_AUTHOR("Wolfgang Scherr <scherr@net4you.at>");
-MODULE_DESCRIPTION("Driver module for ZR36050 jpeg processors "
- ZR050_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/zoran/zr36050.h b/drivers/staging/media/zoran/zr36050.h
index 8f972d045b58..f9b58f4c77b9 100644
--- a/drivers/staging/media/zoran/zr36050.h
+++ b/drivers/staging/media/zoran/zr36050.h
@@ -160,4 +160,6 @@ struct zr36050 {
#define ZR050_U_COMPONENT 1
#define ZR050_V_COMPONENT 2
+int zr36050_init_module(void);
+void zr36050_cleanup_module(void);
#endif /*fndef ZR36050_H */
diff --git a/drivers/staging/media/zoran/zr36060.c b/drivers/staging/media/zoran/zr36060.c
index 1c3af11b5f24..d0c369e31c81 100644
--- a/drivers/staging/media/zoran/zr36060.c
+++ b/drivers/staging/media/zoran/zr36060.c
@@ -5,8 +5,6 @@
* Copyright (C) 2002 Laurent Pinchart <laurent.pinchart@skynet.be>
*/
-#define ZR060_VERSION "v0.7"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -34,14 +32,13 @@ static bool low_bitrate;
module_param(low_bitrate, bool, 0);
MODULE_PARM_DESC(low_bitrate, "Buz compatibility option, halves bitrate");
-/* debugging is available via module parameter */
-static int debug;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0-4)");
+static int zr36060_debug;
+module_param(zr36060_debug, int, 0);
+MODULE_PARM_DESC(zr36060_debug, "Debug level (0-4)");
#define dprintk(num, format, args...) \
do { \
- if (debug >= num) \
+ if (zr36060_debug >= num) \
printk(format, ##args); \
} while (0)
@@ -832,7 +829,6 @@ static int zr36060_setup(struct videocodec *codec)
}
static const struct videocodec zr36060_codec = {
- .owner = THIS_MODULE,
.name = "zr36060",
.magic = 0L, // magic not used
.flags =
@@ -847,13 +843,13 @@ static const struct videocodec zr36060_codec = {
// others are not used
};
-static int __init zr36060_init_module(void)
+int zr36060_init_module(void)
{
zr36060_codecs = 0;
return videocodec_register(&zr36060_codec);
}
-static void __exit zr36060_cleanup_module(void)
+void zr36060_cleanup_module(void)
{
if (zr36060_codecs) {
dprintk(1,
@@ -864,10 +860,3 @@ static void __exit zr36060_cleanup_module(void)
/* however, we can't just stay alive */
videocodec_unregister(&zr36060_codec);
}
-
-module_init(zr36060_init_module);
-module_exit(zr36060_cleanup_module);
-
-MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@skynet.be>");
-MODULE_DESCRIPTION("Driver module for ZR36060 jpeg processors " ZR060_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/zoran/zr36060.h b/drivers/staging/media/zoran/zr36060.h
index d2cdc26bf625..fbf5429534ac 100644
--- a/drivers/staging/media/zoran/zr36060.h
+++ b/drivers/staging/media/zoran/zr36060.h
@@ -198,4 +198,6 @@ struct zr36060 {
#define ZR060_SR_H_SCALE2 BIT(0)
#define ZR060_SR_H_SCALE4 (2 << 0)
+int zr36060_init_module(void);
+void zr36060_cleanup_module(void);
#endif /*fndef ZR36060_H */
diff --git a/drivers/staging/most/video/Kconfig b/drivers/staging/most/video/Kconfig
index e0964ca5e7b3..e16cc5e104b7 100644
--- a/drivers/staging/most/video/Kconfig
+++ b/drivers/staging/most/video/Kconfig
@@ -5,7 +5,7 @@
config MOST_VIDEO
tristate "Video"
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
help
Say Y here if you want to commumicate via Video 4 Linux.
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index 68c09fa016ed..1d31c35875e3 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -1264,7 +1264,7 @@ RX_failed:
return retval;
}
-static int pi433_remove(struct spi_device *spi)
+static void pi433_remove(struct spi_device *spi)
{
struct pi433_device *device = spi_get_drvdata(spi);
@@ -1284,8 +1284,6 @@ static int pi433_remove(struct spi_device *spi)
kfree(device->rx_buffer);
kfree(device);
-
- return 0;
}
static const struct of_device_id pi433_dt_ids[] = {
diff --git a/drivers/staging/r8188eu/include/rtw_cmd.h b/drivers/staging/r8188eu/include/rtw_cmd.h
index cf0945ae11c1..f8991a0493d0 100644
--- a/drivers/staging/r8188eu/include/rtw_cmd.h
+++ b/drivers/staging/r8188eu/include/rtw_cmd.h
@@ -73,7 +73,7 @@ struct c2h_evt_hdr {
u8 id:4;
u8 plen:4;
u8 seq;
- u8 payload[0];
+ u8 payload[];
};
#define c2h_evt_exist(c2h_evt) ((c2h_evt)->id || (c2h_evt)->plen)
@@ -662,25 +662,25 @@ struct getcurtxpwrlevel_rspi {
struct setprobereqextraie_parm {
unsigned char e_id;
unsigned char ie_len;
- unsigned char ie[0];
+ unsigned char ie[];
};
struct setassocreqextraie_parm {
unsigned char e_id;
unsigned char ie_len;
- unsigned char ie[0];
+ unsigned char ie[];
};
struct setproberspextraie_parm {
unsigned char e_id;
unsigned char ie_len;
- unsigned char ie[0];
+ unsigned char ie[];
};
struct setassocrspextraie_parm {
unsigned char e_id;
unsigned char ie_len;
- unsigned char ie[0];
+ unsigned char ie[];
};
struct addBaReq_parm {
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h
index ddd69c4ae208..95e9ea5b2d98 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.h
+++ b/drivers/staging/rtl8712/rtl871x_cmd.h
@@ -657,25 +657,25 @@ struct setra_parm {
struct setprobereqextraie_parm {
unsigned char e_id;
unsigned char ie_len;
- unsigned char ie[0];
+ unsigned char ie[];
};
struct setassocreqextraie_parm {
unsigned char e_id;
unsigned char ie_len;
- unsigned char ie[0];
+ unsigned char ie[];
};
struct setproberspextraie_parm {
unsigned char e_id;
unsigned char ie_len;
- unsigned char ie[0];
+ unsigned char ie[];
};
struct setassocrspextraie_parm {
unsigned char e_id;
unsigned char ie_len;
- unsigned char ie[0];
+ unsigned char ie[];
};
struct addBaReq_parm {
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 0f82f5031c43..49a3f45cb771 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -5907,6 +5907,7 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
struct sta_info *psta_bmc;
struct list_head *xmitframe_plist, *xmitframe_phead, *tmp;
struct xmit_frame *pxmitframe = NULL;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct sta_priv *pstapriv = &padapter->stapriv;
/* for BC/MC Frames */
@@ -5917,7 +5918,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) {
msleep(10);/* 10ms, ATIM(HIQ) Windows */
- spin_lock_bh(&psta_bmc->sleep_q.lock);
+ /* spin_lock_bh(&psta_bmc->sleep_q.lock); */
+ spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) {
@@ -5940,7 +5942,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
}
- spin_unlock_bh(&psta_bmc->sleep_q.lock);
+ /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
+ spin_unlock_bh(&pxmitpriv->lock);
/* check hi queue and bmc_sleepq */
rtw_chk_hi_queue_cmd(padapter);
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 41bfca549c64..105fe0e3482a 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -957,8 +957,10 @@ static signed int validate_recv_ctrl_frame(struct adapter *padapter, union recv_
if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) {
struct list_head *xmitframe_plist, *xmitframe_phead;
struct xmit_frame *pxmitframe = NULL;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- spin_lock_bh(&psta->sleep_q.lock);
+ /* spin_lock_bh(&psta->sleep_q.lock); */
+ spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
xmitframe_plist = get_next(xmitframe_phead);
@@ -989,10 +991,12 @@ static signed int validate_recv_ctrl_frame(struct adapter *padapter, union recv_
update_beacon(padapter, WLAN_EID_TIM, NULL, true);
}
- spin_unlock_bh(&psta->sleep_q.lock);
+ /* spin_unlock_bh(&psta->sleep_q.lock); */
+ spin_unlock_bh(&pxmitpriv->lock);
} else {
- spin_unlock_bh(&psta->sleep_q.lock);
+ /* spin_unlock_bh(&psta->sleep_q.lock); */
+ spin_unlock_bh(&pxmitpriv->lock);
if (pstapriv->tim_bitmap&BIT(psta->aid)) {
if (psta->sleepq_len == 0) {
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index 0c9ea1520fd0..beb11d89db18 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -293,48 +293,46 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
/* list_del_init(&psta->wakeup_list); */
- spin_lock_bh(&psta->sleep_q.lock);
+ spin_lock_bh(&pxmitpriv->lock);
+
rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q);
psta->sleepq_len = 0;
- spin_unlock_bh(&psta->sleep_q.lock);
-
- spin_lock_bh(&pxmitpriv->lock);
/* vo */
- spin_lock_bh(&pstaxmitpriv->vo_q.sta_pending.lock);
+ /* spin_lock_bh(&(pxmitpriv->vo_pending.lock)); */
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
list_del_init(&(pstaxmitpriv->vo_q.tx_pending));
phwxmit = pxmitpriv->hwxmits;
phwxmit->accnt -= pstaxmitpriv->vo_q.qcnt;
pstaxmitpriv->vo_q.qcnt = 0;
- spin_unlock_bh(&pstaxmitpriv->vo_q.sta_pending.lock);
+ /* spin_unlock_bh(&(pxmitpriv->vo_pending.lock)); */
/* vi */
- spin_lock_bh(&pstaxmitpriv->vi_q.sta_pending.lock);
+ /* spin_lock_bh(&(pxmitpriv->vi_pending.lock)); */
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
list_del_init(&(pstaxmitpriv->vi_q.tx_pending));
phwxmit = pxmitpriv->hwxmits+1;
phwxmit->accnt -= pstaxmitpriv->vi_q.qcnt;
pstaxmitpriv->vi_q.qcnt = 0;
- spin_unlock_bh(&pstaxmitpriv->vi_q.sta_pending.lock);
+ /* spin_unlock_bh(&(pxmitpriv->vi_pending.lock)); */
/* be */
- spin_lock_bh(&pstaxmitpriv->be_q.sta_pending.lock);
+ /* spin_lock_bh(&(pxmitpriv->be_pending.lock)); */
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
list_del_init(&(pstaxmitpriv->be_q.tx_pending));
phwxmit = pxmitpriv->hwxmits+2;
phwxmit->accnt -= pstaxmitpriv->be_q.qcnt;
pstaxmitpriv->be_q.qcnt = 0;
- spin_unlock_bh(&pstaxmitpriv->be_q.sta_pending.lock);
+ /* spin_unlock_bh(&(pxmitpriv->be_pending.lock)); */
/* bk */
- spin_lock_bh(&pstaxmitpriv->bk_q.sta_pending.lock);
+ /* spin_lock_bh(&(pxmitpriv->bk_pending.lock)); */
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
list_del_init(&(pstaxmitpriv->bk_q.tx_pending));
phwxmit = pxmitpriv->hwxmits+3;
phwxmit->accnt -= pstaxmitpriv->bk_q.qcnt;
pstaxmitpriv->bk_q.qcnt = 0;
- spin_unlock_bh(&pstaxmitpriv->bk_q.sta_pending.lock);
+ /* spin_unlock_bh(&(pxmitpriv->bk_pending.lock)); */
spin_unlock_bh(&pxmitpriv->lock);
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index 13b8bd5ffabc..f466bfd248fb 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -1734,12 +1734,15 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
struct list_head *plist, *phead, *tmp;
struct xmit_frame *pxmitframe;
+ spin_lock_bh(&pframequeue->lock);
+
phead = get_list_head(pframequeue);
list_for_each_safe(plist, tmp, phead) {
pxmitframe = list_entry(plist, struct xmit_frame, list);
rtw_free_xmitframe(pxmitpriv, pxmitframe);
}
+ spin_unlock_bh(&pframequeue->lock);
}
s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
@@ -1794,7 +1797,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
struct sta_info *psta;
struct tx_servq *ptxservq;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct xmit_priv *xmit_priv = &padapter->xmitpriv;
struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
signed int res = _SUCCESS;
@@ -1812,14 +1814,12 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
- spin_lock_bh(&xmit_priv->lock);
if (list_empty(&ptxservq->tx_pending))
list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue));
list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
ptxservq->qcnt++;
phwxmits[ac_index].accnt++;
- spin_unlock_bh(&xmit_priv->lock);
exit:
@@ -2202,10 +2202,11 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
struct list_head *xmitframe_plist, *xmitframe_phead, *tmp;
struct xmit_frame *pxmitframe = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
psta_bmc = rtw_get_bcmc_stainfo(padapter);
- spin_lock_bh(&psta->sleep_q.lock);
+ spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) {
@@ -2306,7 +2307,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
_exit:
- spin_unlock_bh(&psta->sleep_q.lock);
+ spin_unlock_bh(&pxmitpriv->lock);
if (update_mask)
update_beacon(padapter, WLAN_EID_TIM, NULL, true);
@@ -2318,8 +2319,9 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
struct list_head *xmitframe_plist, *xmitframe_phead, *tmp;
struct xmit_frame *pxmitframe = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- spin_lock_bh(&psta->sleep_q.lock);
+ spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) {
@@ -2372,7 +2374,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
}
}
- spin_unlock_bh(&psta->sleep_q.lock);
+ spin_unlock_bh(&pxmitpriv->lock);
}
void enqueue_pending_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
index b5d5e922231c..15810438a472 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
@@ -502,7 +502,9 @@ s32 rtl8723bs_hal_xmit(
rtw_issue_addbareq_cmd(padapter, pxmitframe);
}
+ spin_lock_bh(&pxmitpriv->lock);
err = rtw_xmitframe_enqueue(padapter, pxmitframe);
+ spin_unlock_bh(&pxmitpriv->lock);
if (err != _SUCCESS) {
rtw_free_xmitframe(pxmitpriv, pxmitframe);
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index c11d7e2d2347..1e627dc0044d 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -204,7 +204,7 @@ struct ieee_param {
struct ieee_param_ex {
u32 cmd;
u8 sta_addr[ETH_ALEN];
- u8 data[0];
+ u8 data[];
};
struct sta_data {
diff --git a/drivers/staging/rtl8723bs/include/rtw_cmd.h b/drivers/staging/rtl8723bs/include/rtw_cmd.h
index 28d2d2732374..1bf030cbbbbe 100644
--- a/drivers/staging/rtl8723bs/include/rtw_cmd.h
+++ b/drivers/staging/rtl8723bs/include/rtw_cmd.h
@@ -94,7 +94,7 @@ struct c2h_evt_hdr {
u8 id:4;
u8 plen:4;
u8 seq;
- u8 payload[0];
+ u8 payload[];
};
struct c2h_evt_hdr_88xx {
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme.h b/drivers/staging/rtl8723bs/include/rtw_mlme.h
index c94fa7d8d5a9..1b343b434f4d 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme.h
@@ -102,13 +102,17 @@ there are several "locks" in mlme_priv,
since mlme_priv is a shared resource between many threads,
like ISR/Call-Back functions, the OID handlers, and even timer functions.
-
Each struct __queue has its own locks, already.
-Other items are protected by mlme_priv.lock.
+Other items in mlme_priv are protected by mlme_priv.lock, while items in
+xmit_priv are protected by xmit_priv.lock.
To avoid possible dead lock, any thread trying to modifiying mlme_priv
SHALL not lock up more than one locks at a time!
+The only exception is that queue functions which take the __queue.lock
+may be called with the xmit_priv.lock held. In this case the order
+MUST always be first lock xmit_priv.lock and then call any queue functions
+which take __queue.lock.
*/
diff --git a/drivers/staging/vc04_services/bcm2835-camera/Kconfig b/drivers/staging/vc04_services/bcm2835-camera/Kconfig
index d0653d1ed3c7..dcda565f9b38 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/Kconfig
+++ b/drivers/staging/vc04_services/bcm2835-camera/Kconfig
@@ -2,7 +2,7 @@
config VIDEO_BCM2835
tristate "BCM2835 Camera"
depends on MEDIA_SUPPORT
- depends on VIDEO_V4L2 && (ARCH_BCM2835 || COMPILE_TEST)
+ depends on VIDEO_DEV && (ARCH_BCM2835 || COMPILE_TEST)
select BCM2835_VCHIQ
select BCM2835_VCHIQ_MMAL
select VIDEOBUF2_VMALLOC
diff --git a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
index 81db7fb76d6d..c93f2f3e87bb 100644
--- a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
+++ b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
@@ -45,7 +45,7 @@ struct vchiq_header {
/* Size of message data. */
unsigned int size;
- char data[0]; /* message */
+ char data[]; /* message */
};
struct vchiq_element {
diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c
index 55ffcd7c42e2..fa0ff66a457d 100644
--- a/drivers/staging/wfx/bus_spi.c
+++ b/drivers/staging/wfx/bus_spi.c
@@ -232,12 +232,11 @@ static int wfx_spi_probe(struct spi_device *func)
return wfx_probe(bus->core);
}
-static int wfx_spi_remove(struct spi_device *func)
+static void wfx_spi_remove(struct spi_device *func)
{
struct wfx_spi_priv *bus = spi_get_drvdata(func);
wfx_release(bus->core);
- return 0;
}
/* For dynamic driver binding, kernel does not use OF to match driver. It only
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 255500448ad3..e04fc666d218 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -255,7 +255,7 @@ static int p80211_convert_to_ether(struct wlandevice *wlandev,
if (skb_p80211_to_ether(wlandev, wlandev->ethconv, skb) == 0) {
wlandev->netdev->stats.rx_packets++;
wlandev->netdev->stats.rx_bytes += skb->len;
- netif_rx_ni(skb);
+ netif_rx(skb);
return 0;
}
@@ -290,7 +290,7 @@ static void p80211netdev_rx_bh(struct tasklet_struct *t)
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
- netif_rx_ni(skb);
+ netif_rx(skb);
continue;
} else {
if (!p80211_convert_to_ether(wlandev, skb))
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index bf8ae4825a06..87ede165ddba 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -20,7 +20,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/bio.h>
-#include <linux/genhd.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
@@ -353,18 +352,16 @@ static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
* Only allocate as many vector entries as the bio code allows us to,
* we'll loop later on until we have handled the whole request.
*/
- bio = bio_alloc_bioset(GFP_NOIO, bio_max_segs(sg_num),
- &ib_dev->ibd_bio_set);
+ bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf,
+ GFP_NOIO, &ib_dev->ibd_bio_set);
if (!bio) {
pr_err("Unable to allocate memory for bio\n");
return NULL;
}
- bio_set_dev(bio, ib_dev->ibd_bd);
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
bio->bi_iter.bi_sector = lba;
- bio->bi_opf = opf;
return bio;
}
@@ -418,10 +415,9 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
if (immed)
target_complete_cmd(cmd, SAM_STAT_GOOD);
- bio = bio_alloc(GFP_KERNEL, 0);
+ bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH,
+ GFP_KERNEL);
bio->bi_end_io = iblock_end_io_flush;
- bio_set_dev(bio, ib_dev->ibd_bd);
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
if (!immed)
bio->bi_private = cmd;
submit_bio(bio);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 807d06ecadee..0fae71ac5cc8 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -17,7 +17,6 @@
#include <linux/blk_types.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/genhd.h>
#include <linux/cdrom.h>
#include <linux/ratelimit.h>
#include <linux/module.h>
diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
index 07f36ac834c8..cec6e70f0ac9 100644
--- a/drivers/tee/amdtee/call.c
+++ b/drivers/tee/amdtee/call.c
@@ -122,7 +122,7 @@ static int amd_params_to_tee_params(struct tee_param *tee, u32 count,
}
static DEFINE_MUTEX(ta_refcount_mutex);
-static struct list_head ta_list = LIST_HEAD_INIT(ta_list);
+static LIST_HEAD(ta_list);
static u32 get_ta_refcount(u32 ta_handle)
{
diff --git a/drivers/tee/amdtee/shm_pool.c b/drivers/tee/amdtee/shm_pool.c
index 065854e2db18..f87f96a291c9 100644
--- a/drivers/tee/amdtee/shm_pool.c
+++ b/drivers/tee/amdtee/shm_pool.c
@@ -8,13 +8,17 @@
#include <linux/psp-sev.h>
#include "amdtee_private.h"
-static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm,
- size_t size)
+static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align)
{
unsigned int order = get_order(size);
unsigned long va;
int rc;
+ /*
+ * Ignore alignment since this is already going to be page aligned
+ * and there's no need for any larger alignment.
+ */
va = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!va)
return -ENOMEM;
@@ -34,7 +38,7 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm,
return 0;
}
-static void pool_op_free(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm)
+static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm)
{
/* Unmap the shared memory from TEE */
amdtee_unmap_shmem(shm);
@@ -42,52 +46,25 @@ static void pool_op_free(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm)
shm->kaddr = NULL;
}
-static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+static void pool_op_destroy_pool(struct tee_shm_pool *pool)
{
- kfree(poolm);
+ kfree(pool);
}
-static const struct tee_shm_pool_mgr_ops pool_ops = {
+static const struct tee_shm_pool_ops pool_ops = {
.alloc = pool_op_alloc,
.free = pool_op_free,
- .destroy_poolmgr = pool_op_destroy_poolmgr,
+ .destroy_pool = pool_op_destroy_pool,
};
-static struct tee_shm_pool_mgr *pool_mem_mgr_alloc(void)
-{
- struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
-
- if (!mgr)
- return ERR_PTR(-ENOMEM);
-
- mgr->ops = &pool_ops;
-
- return mgr;
-}
-
struct tee_shm_pool *amdtee_config_shm(void)
{
- struct tee_shm_pool_mgr *priv_mgr;
- struct tee_shm_pool_mgr *dmabuf_mgr;
- void *rc;
+ struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
- rc = pool_mem_mgr_alloc();
- if (IS_ERR(rc))
- return rc;
- priv_mgr = rc;
-
- rc = pool_mem_mgr_alloc();
- if (IS_ERR(rc)) {
- tee_shm_pool_mgr_destroy(priv_mgr);
- return rc;
- }
- dmabuf_mgr = rc;
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
- rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
- if (IS_ERR(rc)) {
- tee_shm_pool_mgr_destroy(priv_mgr);
- tee_shm_pool_mgr_destroy(dmabuf_mgr);
- }
+ pool->ops = &pool_ops;
- return rc;
+ return pool;
}
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig
index 3ca71e3812ed..f121c224e682 100644
--- a/drivers/tee/optee/Kconfig
+++ b/drivers/tee/optee/Kconfig
@@ -7,11 +7,3 @@ config OPTEE
help
This implements the OP-TEE Trusted Execution Environment (TEE)
driver.
-
-config OPTEE_SHM_NUM_PRIV_PAGES
- int "Private Shared Memory Pages"
- default 1
- depends on OPTEE
- help
- This sets the number of private shared memory pages to be
- used by OP-TEE TEE driver.
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index b25cc1fac945..bd49ec934060 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -120,7 +120,7 @@ struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params,
if (optee->rpc_arg_count)
sz += OPTEE_MSG_GET_ARG_SIZE(optee->rpc_arg_count);
- shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ shm = tee_shm_alloc_priv_buf(ctx, sz);
if (IS_ERR(shm))
return shm;
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 17a6f51d3089..daf947e98d14 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -18,8 +18,8 @@
#include <linux/workqueue.h>
#include "optee_private.h"
-int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
- struct tee_shm *shm, size_t size,
+int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align,
int (*shm_register)(struct tee_context *ctx,
struct tee_shm *shm,
struct page **pages,
@@ -30,6 +30,10 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
struct page *page;
int rc = 0;
+ /*
+ * Ignore alignment since this is already going to be page aligned
+ * and there's no need for any larger alignment.
+ */
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!page)
return -ENOMEM;
@@ -51,7 +55,6 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
for (i = 0; i < nr_pages; i++)
pages[i] = page + i;
- shm->flags |= TEE_SHM_REGISTER;
rc = shm_register(shm->ctx, shm, pages, nr_pages,
(unsigned long)shm->kaddr);
kfree(pages);
@@ -62,10 +65,20 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
return 0;
err:
- __free_pages(page, order);
+ free_pages((unsigned long)shm->kaddr, order);
return rc;
}
+void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
+ int (*shm_unregister)(struct tee_context *ctx,
+ struct tee_shm *shm))
+{
+ if (shm_unregister)
+ shm_unregister(shm->ctx, shm);
+ free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ shm->kaddr = NULL;
+}
+
static void optee_bus_scan(struct work_struct *work)
{
WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index 128a2d2a50a1..f3947be13e2e 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -121,10 +121,9 @@ static int __optee_enumerate_devices(u32 func)
if (rc < 0 || !shm_size)
goto out_sess;
- device_shm = tee_shm_alloc(ctx, shm_size,
- TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ device_shm = tee_shm_alloc_kernel_buf(ctx, shm_size);
if (IS_ERR(device_shm)) {
- pr_err("tee_shm_alloc failed\n");
+ pr_err("tee_shm_alloc_kernel_buf failed\n");
rc = PTR_ERR(device_shm);
goto out_sess;
}
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index f2bf6c61197f..a5eb4ef46971 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -369,30 +369,28 @@ static int optee_ffa_shm_unregister_supp(struct tee_context *ctx,
* The main function is optee_ffa_shm_pool_alloc_pages().
*/
-static int pool_ffa_op_alloc(struct tee_shm_pool_mgr *poolm,
- struct tee_shm *shm, size_t size)
+static int pool_ffa_op_alloc(struct tee_shm_pool *pool,
+ struct tee_shm *shm, size_t size, size_t align)
{
- return optee_pool_op_alloc_helper(poolm, shm, size,
+ return optee_pool_op_alloc_helper(pool, shm, size, align,
optee_ffa_shm_register);
}
-static void pool_ffa_op_free(struct tee_shm_pool_mgr *poolm,
+static void pool_ffa_op_free(struct tee_shm_pool *pool,
struct tee_shm *shm)
{
- optee_ffa_shm_unregister(shm->ctx, shm);
- free_pages((unsigned long)shm->kaddr, get_order(shm->size));
- shm->kaddr = NULL;
+ optee_pool_op_free_helper(pool, shm, optee_ffa_shm_unregister);
}
-static void pool_ffa_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+static void pool_ffa_op_destroy_pool(struct tee_shm_pool *pool)
{
- kfree(poolm);
+ kfree(pool);
}
-static const struct tee_shm_pool_mgr_ops pool_ffa_ops = {
+static const struct tee_shm_pool_ops pool_ffa_ops = {
.alloc = pool_ffa_op_alloc,
.free = pool_ffa_op_free,
- .destroy_poolmgr = pool_ffa_op_destroy_poolmgr,
+ .destroy_pool = pool_ffa_op_destroy_pool,
};
/**
@@ -401,16 +399,16 @@ static const struct tee_shm_pool_mgr_ops pool_ffa_ops = {
* This pool is used with OP-TEE over FF-A. In this case command buffers
* and such are allocated from kernel's own memory.
*/
-static struct tee_shm_pool_mgr *optee_ffa_shm_pool_alloc_pages(void)
+static struct tee_shm_pool *optee_ffa_shm_pool_alloc_pages(void)
{
- struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
- if (!mgr)
+ if (!pool)
return ERR_PTR(-ENOMEM);
- mgr->ops = &pool_ffa_ops;
+ pool->ops = &pool_ffa_ops;
- return mgr;
+ return pool;
}
/*
@@ -440,8 +438,8 @@ static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b);
break;
case OPTEE_RPC_SHM_TYPE_KERNEL:
- shm = tee_shm_alloc(optee->ctx, arg->params[0].u.value.b,
- TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ shm = tee_shm_alloc_priv_buf(optee->ctx,
+ arg->params[0].u.value.b);
break;
default:
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
@@ -700,33 +698,6 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
return true;
}
-static struct tee_shm_pool *optee_ffa_config_dyn_shm(void)
-{
- struct tee_shm_pool_mgr *priv_mgr;
- struct tee_shm_pool_mgr *dmabuf_mgr;
- void *rc;
-
- rc = optee_ffa_shm_pool_alloc_pages();
- if (IS_ERR(rc))
- return rc;
- priv_mgr = rc;
-
- rc = optee_ffa_shm_pool_alloc_pages();
- if (IS_ERR(rc)) {
- tee_shm_pool_mgr_destroy(priv_mgr);
- return rc;
- }
- dmabuf_mgr = rc;
-
- rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
- if (IS_ERR(rc)) {
- tee_shm_pool_mgr_destroy(priv_mgr);
- tee_shm_pool_mgr_destroy(dmabuf_mgr);
- }
-
- return rc;
-}
-
static void optee_ffa_get_version(struct tee_device *teedev,
struct tee_ioctl_version_data *vers)
{
@@ -824,7 +795,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
if (!optee)
return -ENOMEM;
- pool = optee_ffa_config_dyn_shm();
+ pool = optee_ffa_shm_pool_alloc_pages();
if (IS_ERR(pool)) {
rc = PTR_ERR(pool);
goto err_free_optee;
@@ -869,8 +840,10 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
optee_supp_init(&optee->supp);
ffa_dev_set_drvdata(ffa_dev, optee);
ctx = teedev_open(optee->teedev);
- if (IS_ERR(ctx))
+ if (IS_ERR(ctx)) {
+ rc = PTR_ERR(ctx);
goto err_rhashtable_free;
+ }
optee->ctx = ctx;
rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
if (rc)
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 92bc47bef95f..e77765c78878 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -229,13 +229,16 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
int optee_enumerate_devices(u32 func);
void optee_unregister_devices(void);
-int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
- struct tee_shm *shm, size_t size,
+int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align,
int (*shm_register)(struct tee_context *ctx,
struct tee_shm *shm,
struct page **pages,
size_t num_pages,
unsigned long start));
+void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
+ int (*shm_unregister)(struct tee_context *ctx,
+ struct tee_shm *shm));
void optee_remove_common(struct optee *optee);
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 1a55339c7072..67b7f7d2ff27 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -42,7 +42,15 @@
* 6. Driver initialization.
*/
-#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
+/*
+ * A typical OP-TEE private shm allocation is 224 bytes (argument struct
+ * with 6 parameters, needed for open session). So with an alignment of 512
+ * we'll waste a bit more than 50%. However, it's only expected that we'll
+ * have a handful of these structs allocated at a time. Most memory will
+ * be allocated aligned to the page size, So all in all this should scale
+ * up and down quite well.
+ */
+#define OPTEE_MIN_STATIC_POOL_ALIGN 9 /* 512 bytes aligned */
/*
* 1. Convert between struct tee_param and struct optee_msg_param
@@ -220,7 +228,7 @@ static int optee_to_msg_param(struct optee *optee,
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
- if (tee_shm_is_registered(p->u.memref.shm))
+ if (tee_shm_is_dynamic(p->u.memref.shm))
rc = to_msg_param_reg_mem(mp, p);
else
rc = to_msg_param_tmp_mem(mp, p);
@@ -522,38 +530,38 @@ static int optee_shm_unregister_supp(struct tee_context *ctx,
* The main function is optee_shm_pool_alloc_pages().
*/
-static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
- struct tee_shm *shm, size_t size)
+static int pool_op_alloc(struct tee_shm_pool *pool,
+ struct tee_shm *shm, size_t size, size_t align)
{
/*
* Shared memory private to the OP-TEE driver doesn't need
* to be registered with OP-TEE.
*/
if (shm->flags & TEE_SHM_PRIV)
- return optee_pool_op_alloc_helper(poolm, shm, size, NULL);
+ return optee_pool_op_alloc_helper(pool, shm, size, align, NULL);
- return optee_pool_op_alloc_helper(poolm, shm, size, optee_shm_register);
+ return optee_pool_op_alloc_helper(pool, shm, size, align,
+ optee_shm_register);
}
-static void pool_op_free(struct tee_shm_pool_mgr *poolm,
+static void pool_op_free(struct tee_shm_pool *pool,
struct tee_shm *shm)
{
if (!(shm->flags & TEE_SHM_PRIV))
- optee_shm_unregister(shm->ctx, shm);
-
- free_pages((unsigned long)shm->kaddr, get_order(shm->size));
- shm->kaddr = NULL;
+ optee_pool_op_free_helper(pool, shm, optee_shm_unregister);
+ else
+ optee_pool_op_free_helper(pool, shm, NULL);
}
-static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+static void pool_op_destroy_pool(struct tee_shm_pool *pool)
{
- kfree(poolm);
+ kfree(pool);
}
-static const struct tee_shm_pool_mgr_ops pool_ops = {
+static const struct tee_shm_pool_ops pool_ops = {
.alloc = pool_op_alloc,
.free = pool_op_free,
- .destroy_poolmgr = pool_op_destroy_poolmgr,
+ .destroy_pool = pool_op_destroy_pool,
};
/**
@@ -562,16 +570,16 @@ static const struct tee_shm_pool_mgr_ops pool_ops = {
* This pool is used when OP-TEE supports dymanic SHM. In this case
* command buffers and such are allocated from kernel's own memory.
*/
-static struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
+static struct tee_shm_pool *optee_shm_pool_alloc_pages(void)
{
- struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
- if (!mgr)
+ if (!pool)
return ERR_PTR(-ENOMEM);
- mgr->ops = &pool_ops;
+ pool->ops = &pool_ops;
- return mgr;
+ return pool;
}
/*
@@ -642,8 +650,7 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
break;
case OPTEE_RPC_SHM_TYPE_KERNEL:
- shm = tee_shm_alloc(optee->ctx, sz,
- TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ shm = tee_shm_alloc_priv_buf(optee->ctx, sz);
break;
default:
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
@@ -662,7 +669,7 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
sz = tee_shm_get_size(shm);
- if (tee_shm_is_registered(shm)) {
+ if (tee_shm_is_dynamic(shm)) {
struct page **pages;
u64 *pages_list;
size_t page_num;
@@ -768,8 +775,7 @@ static void optee_handle_rpc(struct tee_context *ctx,
switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
case OPTEE_SMC_RPC_FUNC_ALLOC:
- shm = tee_shm_alloc(optee->ctx, param->a1,
- TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ shm = tee_shm_alloc_priv_buf(optee->ctx, param->a1);
if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
reg_pair_from_64(&param->a1, &param->a2, pa);
reg_pair_from_64(&param->a4, &param->a5,
@@ -1143,33 +1149,6 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
return true;
}
-static struct tee_shm_pool *optee_config_dyn_shm(void)
-{
- struct tee_shm_pool_mgr *priv_mgr;
- struct tee_shm_pool_mgr *dmabuf_mgr;
- void *rc;
-
- rc = optee_shm_pool_alloc_pages();
- if (IS_ERR(rc))
- return rc;
- priv_mgr = rc;
-
- rc = optee_shm_pool_alloc_pages();
- if (IS_ERR(rc)) {
- tee_shm_pool_mgr_destroy(priv_mgr);
- return rc;
- }
- dmabuf_mgr = rc;
-
- rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
- if (IS_ERR(rc)) {
- tee_shm_pool_mgr_destroy(priv_mgr);
- tee_shm_pool_mgr_destroy(dmabuf_mgr);
- }
-
- return rc;
-}
-
static struct tee_shm_pool *
optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
{
@@ -1183,10 +1162,7 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
phys_addr_t begin;
phys_addr_t end;
void *va;
- struct tee_shm_pool_mgr *priv_mgr;
- struct tee_shm_pool_mgr *dmabuf_mgr;
void *rc;
- const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
if (res.result.status != OPTEE_SMC_RETURN_OK) {
@@ -1204,11 +1180,6 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
paddr = begin;
size = end - begin;
- if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
- pr_err("too small shared memory area\n");
- return ERR_PTR(-EINVAL);
- }
-
va = memremap(paddr, size, MEMREMAP_WB);
if (!va) {
pr_err("shared memory ioremap failed\n");
@@ -1216,35 +1187,13 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
}
vaddr = (unsigned long)va;
- rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
- 3 /* 8 bytes aligned */);
- if (IS_ERR(rc))
- goto err_memunmap;
- priv_mgr = rc;
-
- vaddr += sz;
- paddr += sz;
- size -= sz;
-
- rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
+ rc = tee_shm_pool_alloc_res_mem(vaddr, paddr, size,
+ OPTEE_MIN_STATIC_POOL_ALIGN);
if (IS_ERR(rc))
- goto err_free_priv_mgr;
- dmabuf_mgr = rc;
-
- rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
- if (IS_ERR(rc))
- goto err_free_dmabuf_mgr;
-
- *memremaped_shm = va;
-
- return rc;
+ memunmap(va);
+ else
+ *memremaped_shm = va;
-err_free_dmabuf_mgr:
- tee_shm_pool_mgr_destroy(dmabuf_mgr);
-err_free_priv_mgr:
- tee_shm_pool_mgr_destroy(priv_mgr);
-err_memunmap:
- memunmap(va);
return rc;
}
@@ -1366,7 +1315,7 @@ static int optee_probe(struct platform_device *pdev)
* Try to use dynamic shared memory if possible
*/
if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
- pool = optee_config_dyn_shm();
+ pool = optee_shm_pool_alloc_pages();
/*
* If dynamic shared memory is not available or failed - try static one
@@ -1417,8 +1366,10 @@ static int optee_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, optee);
ctx = teedev_open(optee->teedev);
- if (IS_ERR(ctx))
+ if (IS_ERR(ctx)) {
+ rc = PTR_ERR(ctx);
goto err_supp_uninit;
+ }
optee->ctx = ctx;
rc = optee_notif_init(optee, max_notif_value);
if (rc)
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 3fc426dad2df..8aa1a4836b92 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -297,7 +297,7 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx,
if (data.flags)
return -EINVAL;
- shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ shm = tee_shm_alloc_user_buf(ctx, data.size);
if (IS_ERR(shm))
return PTR_ERR(shm);
@@ -334,8 +334,7 @@ tee_ioctl_shm_register(struct tee_context *ctx,
if (data.flags)
return -EINVAL;
- shm = tee_shm_register(ctx, data.addr, data.length,
- TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
+ shm = tee_shm_register_user_buf(ctx, data.addr, data.length);
if (IS_ERR(shm))
return PTR_ERR(shm);
diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h
index e55204df31ce..409cadcc1cff 100644
--- a/drivers/tee/tee_private.h
+++ b/drivers/tee/tee_private.h
@@ -12,17 +12,6 @@
#include <linux/mutex.h>
#include <linux/types.h>
-/**
- * struct tee_shm_pool - shared memory pool
- * @private_mgr: pool manager for shared memory only between kernel
- * and secure world
- * @dma_buf_mgr: pool manager for shared memory exported to user space
- */
-struct tee_shm_pool {
- struct tee_shm_pool_mgr *private_mgr;
- struct tee_shm_pool_mgr *dma_buf_mgr;
-};
-
#define TEE_DEVICE_FLAG_REGISTERED 0x1
#define TEE_MAX_DEV_NAME_LEN 32
@@ -68,4 +57,8 @@ void tee_device_put(struct tee_device *teedev);
void teedev_ctx_get(struct tee_context *ctx);
void teedev_ctx_put(struct tee_context *ctx);
+struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size);
+struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
+ unsigned long addr, size_t length);
+
#endif /*TEE_PRIVATE_H*/
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 499fccba3d74..f31e29e8f1ca 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -12,17 +12,43 @@
#include <linux/uio.h>
#include "tee_private.h"
+static void shm_put_kernel_pages(struct page **pages, size_t page_count)
+{
+ size_t n;
+
+ for (n = 0; n < page_count; n++)
+ put_page(pages[n]);
+}
+
+static int shm_get_kernel_pages(unsigned long start, size_t page_count,
+ struct page **pages)
+{
+ struct kvec *kiov;
+ size_t n;
+ int rc;
+
+ kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL);
+ if (!kiov)
+ return -ENOMEM;
+
+ for (n = 0; n < page_count; n++) {
+ kiov[n].iov_base = (void *)(start + n * PAGE_SIZE);
+ kiov[n].iov_len = PAGE_SIZE;
+ }
+
+ rc = get_kernel_pages(kiov, page_count, 0, pages);
+ kfree(kiov);
+
+ return rc;
+}
+
static void release_registered_pages(struct tee_shm *shm)
{
if (shm->pages) {
- if (shm->flags & TEE_SHM_USER_MAPPED) {
+ if (shm->flags & TEE_SHM_USER_MAPPED)
unpin_user_pages(shm->pages, shm->num_pages);
- } else {
- size_t n;
-
- for (n = 0; n < shm->num_pages; n++)
- put_page(shm->pages[n]);
- }
+ else
+ shm_put_kernel_pages(shm->pages, shm->num_pages);
kfree(shm->pages);
}
@@ -31,15 +57,8 @@ static void release_registered_pages(struct tee_shm *shm)
static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
{
if (shm->flags & TEE_SHM_POOL) {
- struct tee_shm_pool_mgr *poolm;
-
- if (shm->flags & TEE_SHM_DMA_BUF)
- poolm = teedev->pool->dma_buf_mgr;
- else
- poolm = teedev->pool->private_mgr;
-
- poolm->ops->free(poolm, shm);
- } else if (shm->flags & TEE_SHM_REGISTER) {
+ teedev->pool->ops->free(teedev->pool, shm);
+ } else if (shm->flags & TEE_SHM_DYNAMIC) {
int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
if (rc)
@@ -56,25 +75,14 @@ static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
tee_device_put(teedev);
}
-struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
+static struct tee_shm *shm_alloc_helper(struct tee_context *ctx, size_t size,
+ size_t align, u32 flags, int id)
{
struct tee_device *teedev = ctx->teedev;
- struct tee_shm_pool_mgr *poolm = NULL;
struct tee_shm *shm;
void *ret;
int rc;
- if (!(flags & TEE_SHM_MAPPED)) {
- dev_err(teedev->dev.parent,
- "only mapped allocations supported\n");
- return ERR_PTR(-EINVAL);
- }
-
- if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_PRIV))) {
- dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
- return ERR_PTR(-EINVAL);
- }
-
if (!tee_device_get(teedev))
return ERR_PTR(-EINVAL);
@@ -91,41 +99,76 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
}
refcount_set(&shm->refcount, 1);
- shm->flags = flags | TEE_SHM_POOL;
+ shm->flags = flags;
+ shm->id = id;
+
+ /*
+ * We're assigning this as it is needed if the shm is to be
+ * registered. If this function returns OK then the caller expected
+ * to call teedev_ctx_get() or clear shm->ctx in case it's not
+ * needed any longer.
+ */
shm->ctx = ctx;
- if (flags & TEE_SHM_DMA_BUF)
- poolm = teedev->pool->dma_buf_mgr;
- else
- poolm = teedev->pool->private_mgr;
- rc = poolm->ops->alloc(poolm, shm, size);
+ rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align);
if (rc) {
ret = ERR_PTR(rc);
goto err_kfree;
}
- if (flags & TEE_SHM_DMA_BUF) {
- mutex_lock(&teedev->mutex);
- shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
- mutex_unlock(&teedev->mutex);
- if (shm->id < 0) {
- ret = ERR_PTR(shm->id);
- goto err_pool_free;
- }
- }
-
teedev_ctx_get(ctx);
-
return shm;
-err_pool_free:
- poolm->ops->free(poolm, shm);
err_kfree:
kfree(shm);
err_dev_put:
tee_device_put(teedev);
return ret;
}
-EXPORT_SYMBOL_GPL(tee_shm_alloc);
+
+/**
+ * tee_shm_alloc_user_buf() - Allocate shared memory for user space
+ * @ctx: Context that allocates the shared memory
+ * @size: Requested size of shared memory
+ *
+ * Memory allocated as user space shared memory is automatically freed when
+ * the TEE file pointer is closed. The primary usage of this function is
+ * when the TEE driver doesn't support registering ordinary user space
+ * memory.
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size)
+{
+ u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL;
+ struct tee_device *teedev = ctx->teedev;
+ struct tee_shm *shm;
+ void *ret;
+ int id;
+
+ mutex_lock(&teedev->mutex);
+ id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
+ mutex_unlock(&teedev->mutex);
+ if (id < 0)
+ return ERR_PTR(id);
+
+ shm = shm_alloc_helper(ctx, size, PAGE_SIZE, flags, id);
+ if (IS_ERR(shm)) {
+ mutex_lock(&teedev->mutex);
+ idr_remove(&teedev->idr, id);
+ mutex_unlock(&teedev->mutex);
+ return shm;
+ }
+
+ mutex_lock(&teedev->mutex);
+ ret = idr_replace(&teedev->idr, shm, id);
+ mutex_unlock(&teedev->mutex);
+ if (IS_ERR(ret)) {
+ tee_shm_free(shm);
+ return ret;
+ }
+
+ return shm;
+}
/**
* tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
@@ -141,32 +184,54 @@ EXPORT_SYMBOL_GPL(tee_shm_alloc);
*/
struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
{
- return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED);
+ u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL;
+
+ return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1);
}
EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
-struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
- size_t length, u32 flags)
+/**
+ * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared
+ * kernel buffer
+ * @ctx: Context that allocates the shared memory
+ * @size: Requested size of shared memory
+ *
+ * This function returns similar shared memory as
+ * tee_shm_alloc_kernel_buf(), but with the difference that the memory
+ * might not be registered in secure world in case the driver supports
+ * passing memory not registered in advance.
+ *
+ * This function should normally only be used internally in the TEE
+ * drivers.
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
+{
+ u32 flags = TEE_SHM_PRIV | TEE_SHM_POOL;
+
+ return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
+
+static struct tee_shm *
+register_shm_helper(struct tee_context *ctx, unsigned long addr,
+ size_t length, u32 flags, int id)
{
struct tee_device *teedev = ctx->teedev;
- const u32 req_user_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
- const u32 req_kernel_flags = TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED;
struct tee_shm *shm;
+ unsigned long start;
+ size_t num_pages;
void *ret;
int rc;
- int num_pages;
- unsigned long start;
-
- if (flags != req_user_flags && flags != req_kernel_flags)
- return ERR_PTR(-ENOTSUPP);
if (!tee_device_get(teedev))
return ERR_PTR(-EINVAL);
if (!teedev->desc->ops->shm_register ||
!teedev->desc->ops->shm_unregister) {
- tee_device_put(teedev);
- return ERR_PTR(-ENOTSUPP);
+ ret = ERR_PTR(-ENOTSUPP);
+ goto err_dev_put;
}
teedev_ctx_get(ctx);
@@ -174,13 +239,13 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
shm = kzalloc(sizeof(*shm), GFP_KERNEL);
if (!shm) {
ret = ERR_PTR(-ENOMEM);
- goto err;
+ goto err_ctx_put;
}
refcount_set(&shm->refcount, 1);
- shm->flags = flags | TEE_SHM_REGISTER;
+ shm->flags = flags;
shm->ctx = ctx;
- shm->id = -1;
+ shm->id = id;
addr = untagged_addr(addr);
start = rounddown(addr, PAGE_SIZE);
shm->offset = addr - start;
@@ -189,71 +254,106 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
if (!shm->pages) {
ret = ERR_PTR(-ENOMEM);
- goto err;
+ goto err_free_shm;
}
- if (flags & TEE_SHM_USER_MAPPED) {
+ if (flags & TEE_SHM_USER_MAPPED)
rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE,
shm->pages);
- } else {
- struct kvec *kiov;
- int i;
-
- kiov = kcalloc(num_pages, sizeof(*kiov), GFP_KERNEL);
- if (!kiov) {
- ret = ERR_PTR(-ENOMEM);
- goto err;
- }
-
- for (i = 0; i < num_pages; i++) {
- kiov[i].iov_base = (void *)(start + i * PAGE_SIZE);
- kiov[i].iov_len = PAGE_SIZE;
- }
-
- rc = get_kernel_pages(kiov, num_pages, 0, shm->pages);
- kfree(kiov);
- }
+ else
+ rc = shm_get_kernel_pages(start, num_pages, shm->pages);
if (rc > 0)
shm->num_pages = rc;
if (rc != num_pages) {
if (rc >= 0)
rc = -ENOMEM;
ret = ERR_PTR(rc);
- goto err;
- }
-
- mutex_lock(&teedev->mutex);
- shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
- mutex_unlock(&teedev->mutex);
-
- if (shm->id < 0) {
- ret = ERR_PTR(shm->id);
- goto err;
+ goto err_put_shm_pages;
}
rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
shm->num_pages, start);
if (rc) {
ret = ERR_PTR(rc);
- goto err;
+ goto err_put_shm_pages;
}
return shm;
-err:
- if (shm) {
- if (shm->id >= 0) {
- mutex_lock(&teedev->mutex);
- idr_remove(&teedev->idr, shm->id);
- mutex_unlock(&teedev->mutex);
- }
- release_registered_pages(shm);
- }
+err_put_shm_pages:
+ if (flags & TEE_SHM_USER_MAPPED)
+ unpin_user_pages(shm->pages, shm->num_pages);
+ else
+ shm_put_kernel_pages(shm->pages, shm->num_pages);
+ kfree(shm->pages);
+err_free_shm:
kfree(shm);
+err_ctx_put:
teedev_ctx_put(ctx);
+err_dev_put:
tee_device_put(teedev);
return ret;
}
-EXPORT_SYMBOL_GPL(tee_shm_register);
+
+/**
+ * tee_shm_register_user_buf() - Register a userspace shared memory buffer
+ * @ctx: Context that registers the shared memory
+ * @addr: The userspace address of the shared buffer
+ * @length: Length of the shared buffer
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
+ unsigned long addr, size_t length)
+{
+ u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC;
+ struct tee_device *teedev = ctx->teedev;
+ struct tee_shm *shm;
+ void *ret;
+ int id;
+
+ mutex_lock(&teedev->mutex);
+ id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
+ mutex_unlock(&teedev->mutex);
+ if (id < 0)
+ return ERR_PTR(id);
+
+ shm = register_shm_helper(ctx, addr, length, flags, id);
+ if (IS_ERR(shm)) {
+ mutex_lock(&teedev->mutex);
+ idr_remove(&teedev->idr, id);
+ mutex_unlock(&teedev->mutex);
+ return shm;
+ }
+
+ mutex_lock(&teedev->mutex);
+ ret = idr_replace(&teedev->idr, shm, id);
+ mutex_unlock(&teedev->mutex);
+ if (IS_ERR(ret)) {
+ tee_shm_free(shm);
+ return ret;
+ }
+
+ return shm;
+}
+
+/**
+ * tee_shm_register_kernel_buf() - Register kernel memory to be shared with
+ * secure world
+ * @ctx: Context that registers the shared memory
+ * @addr: The buffer
+ * @length: Length of the buffer
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+
+struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
+ void *addr, size_t length)
+{
+ u32 flags = TEE_SHM_DYNAMIC;
+
+ return register_shm_helper(ctx, (unsigned long)addr, length, flags, -1);
+}
+EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf);
static int tee_shm_fop_release(struct inode *inode, struct file *filp)
{
@@ -293,7 +393,7 @@ int tee_shm_get_fd(struct tee_shm *shm)
{
int fd;
- if (!(shm->flags & TEE_SHM_DMA_BUF))
+ if (shm->id < 0)
return -EINVAL;
/* matched by tee_shm_put() in tee_shm_op_release() */
@@ -323,7 +423,7 @@ EXPORT_SYMBOL_GPL(tee_shm_free);
*/
int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
{
- if (!(shm->flags & TEE_SHM_MAPPED))
+ if (!shm->kaddr)
return -EINVAL;
/* Check that we're in the range of the shm */
if ((char *)va < (char *)shm->kaddr)
@@ -345,7 +445,7 @@ EXPORT_SYMBOL_GPL(tee_shm_va2pa);
*/
int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
{
- if (!(shm->flags & TEE_SHM_MAPPED))
+ if (!shm->kaddr)
return -EINVAL;
/* Check that we're in the range of the shm */
if (pa < shm->paddr)
@@ -373,7 +473,7 @@ EXPORT_SYMBOL_GPL(tee_shm_pa2va);
*/
void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
{
- if (!(shm->flags & TEE_SHM_MAPPED))
+ if (!shm->kaddr)
return ERR_PTR(-EINVAL);
if (offs >= shm->size)
return ERR_PTR(-EINVAL);
@@ -448,7 +548,7 @@ void tee_shm_put(struct tee_shm *shm)
* the refcount_inc() in tee_shm_get_from_id() never starts
* from 0.
*/
- if (shm->flags & TEE_SHM_DMA_BUF)
+ if (shm->id >= 0)
idr_remove(&teedev->idr, shm->id);
do_release = true;
}
diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c
index fcbb461fc59c..058bfbac657a 100644
--- a/drivers/tee/tee_shm_pool.c
+++ b/drivers/tee/tee_shm_pool.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2015, 2017, 2022 Linaro Limited
*/
#include <linux/device.h>
#include <linux/dma-buf.h>
@@ -9,14 +9,16 @@
#include <linux/tee_drv.h>
#include "tee_private.h"
-static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm,
- struct tee_shm *shm, size_t size)
+static int pool_op_gen_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align)
{
unsigned long va;
- struct gen_pool *genpool = poolm->private_data;
- size_t s = roundup(size, 1 << genpool->min_alloc_order);
+ struct gen_pool *genpool = pool->private_data;
+ size_t a = max_t(size_t, align, BIT(genpool->min_alloc_order));
+ struct genpool_data_align data = { .align = a };
+ size_t s = roundup(size, a);
- va = gen_pool_alloc(genpool, s);
+ va = gen_pool_alloc_algo(genpool, s, gen_pool_first_fit_align, &data);
if (!va)
return -ENOMEM;
@@ -24,163 +26,67 @@ static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm,
shm->kaddr = (void *)va;
shm->paddr = gen_pool_virt_to_phys(genpool, va);
shm->size = s;
+ /*
+ * This is from a static shared memory pool so no need to register
+ * each chunk, and no need to unregister later either.
+ */
+ shm->flags &= ~TEE_SHM_DYNAMIC;
return 0;
}
-static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
- struct tee_shm *shm)
+static void pool_op_gen_free(struct tee_shm_pool *pool, struct tee_shm *shm)
{
- gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr,
+ gen_pool_free(pool->private_data, (unsigned long)shm->kaddr,
shm->size);
shm->kaddr = NULL;
}
-static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+static void pool_op_gen_destroy_pool(struct tee_shm_pool *pool)
{
- gen_pool_destroy(poolm->private_data);
- kfree(poolm);
+ gen_pool_destroy(pool->private_data);
+ kfree(pool);
}
-static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
+static const struct tee_shm_pool_ops pool_ops_generic = {
.alloc = pool_op_gen_alloc,
.free = pool_op_gen_free,
- .destroy_poolmgr = pool_op_gen_destroy_poolmgr,
+ .destroy_pool = pool_op_gen_destroy_pool,
};
-/**
- * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
- * memory range
- * @priv_info: Information for driver private shared memory pool
- * @dmabuf_info: Information for dma-buf shared memory pool
- *
- * Start and end of pools will must be page aligned.
- *
- * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
- * in @dmabuf, others will use the range provided by @priv.
- *
- * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
- */
-struct tee_shm_pool *
-tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
- struct tee_shm_pool_mem_info *dmabuf_info)
-{
- struct tee_shm_pool_mgr *priv_mgr;
- struct tee_shm_pool_mgr *dmabuf_mgr;
- void *rc;
-
- /*
- * Create the pool for driver private shared memory
- */
- rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr,
- priv_info->size,
- 3 /* 8 byte aligned */);
- if (IS_ERR(rc))
- return rc;
- priv_mgr = rc;
-
- /*
- * Create the pool for dma_buf shared memory
- */
- rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr,
- dmabuf_info->paddr,
- dmabuf_info->size, PAGE_SHIFT);
- if (IS_ERR(rc))
- goto err_free_priv_mgr;
- dmabuf_mgr = rc;
-
- rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
- if (IS_ERR(rc))
- goto err_free_dmabuf_mgr;
-
- return rc;
-
-err_free_dmabuf_mgr:
- tee_shm_pool_mgr_destroy(dmabuf_mgr);
-err_free_priv_mgr:
- tee_shm_pool_mgr_destroy(priv_mgr);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
-
-struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
- phys_addr_t paddr,
- size_t size,
- int min_alloc_order)
+struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr,
+ phys_addr_t paddr, size_t size,
+ int min_alloc_order)
{
const size_t page_mask = PAGE_SIZE - 1;
- struct tee_shm_pool_mgr *mgr;
+ struct tee_shm_pool *pool;
int rc;
/* Start and end must be page aligned */
if (vaddr & page_mask || paddr & page_mask || size & page_mask)
return ERR_PTR(-EINVAL);
- mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
return ERR_PTR(-ENOMEM);
- mgr->private_data = gen_pool_create(min_alloc_order, -1);
- if (!mgr->private_data) {
+ pool->private_data = gen_pool_create(min_alloc_order, -1);
+ if (!pool->private_data) {
rc = -ENOMEM;
goto err;
}
- gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL);
- rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1);
+ rc = gen_pool_add_virt(pool->private_data, vaddr, paddr, size, -1);
if (rc) {
- gen_pool_destroy(mgr->private_data);
+ gen_pool_destroy(pool->private_data);
goto err;
}
- mgr->ops = &pool_ops_generic;
+ pool->ops = &pool_ops_generic;
- return mgr;
+ return pool;
err:
- kfree(mgr);
+ kfree(pool);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem);
-
-static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr)
-{
- return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free &&
- mgr->ops->destroy_poolmgr;
-}
-
-struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
- struct tee_shm_pool_mgr *dmabuf_mgr)
-{
- struct tee_shm_pool *pool;
-
- if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr))
- return ERR_PTR(-EINVAL);
-
- pool = kzalloc(sizeof(*pool), GFP_KERNEL);
- if (!pool)
- return ERR_PTR(-ENOMEM);
-
- pool->private_mgr = priv_mgr;
- pool->dma_buf_mgr = dmabuf_mgr;
-
- return pool;
-}
-EXPORT_SYMBOL_GPL(tee_shm_pool_alloc);
-
-/**
- * tee_shm_pool_free() - Free a shared memory pool
- * @pool: The shared memory pool to free
- *
- * There must be no remaining shared memory allocated from this pool when
- * this function is called.
- */
-void tee_shm_pool_free(struct tee_shm_pool *pool)
-{
- if (pool->private_mgr)
- tee_shm_pool_mgr_destroy(pool->private_mgr);
- if (pool->dma_buf_mgr)
- tee_shm_pool_mgr_destroy(pool->dma_buf_mgr);
- kfree(pool);
-}
-EXPORT_SYMBOL_GPL(tee_shm_pool_free);
+EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
index 8df5edef1ded..0cedb8b4f00a 100644
--- a/drivers/thermal/broadcom/brcmstb_thermal.c
+++ b/drivers/thermal/broadcom/brcmstb_thermal.c
@@ -351,7 +351,7 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
priv->thermal = thermal;
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq >= 0) {
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
brcmstb_tmon_irq_thread,
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
index c83ea5d04a1d..f0c845679250 100644
--- a/drivers/thermal/intel/Kconfig
+++ b/drivers/thermal/intel/Kconfig
@@ -99,3 +99,17 @@ config INTEL_MENLOW
Intel Menlow platform.
If unsure, say N.
+
+config INTEL_HFI_THERMAL
+ bool "Intel Hardware Feedback Interface"
+ depends on NET
+ depends on CPU_SUP_INTEL
+ depends on X86_THERMAL_VECTOR
+ select THERMAL_NETLINK
+ help
+ Select this option to enable the Hardware Feedback Interface. If
+ selected, hardware provides guidance to the operating system on
+ the performance and energy efficiency capabilities of each CPU.
+ These capabilities may change as a result of changes in the operating
+ conditions of the system such power and thermal limits. If selected,
+ the kernel relays updates in CPUs' capabilities to userspace.
diff --git a/drivers/thermal/intel/Makefile b/drivers/thermal/intel/Makefile
index 960b56268b4a..9a8d8054f316 100644
--- a/drivers/thermal/intel/Makefile
+++ b/drivers/thermal/intel/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
obj-$(CONFIG_INTEL_TCC_COOLING) += intel_tcc_cooling.o
obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
+obj-$(CONFIG_INTEL_HFI_THERMAL) += intel_hfi.o
diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
index e90690a234c4..01b80331eab6 100644
--- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
@@ -72,7 +72,6 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
int i;
int nr_bad_entries = 0;
struct trt *trts;
- struct acpi_device *adev;
union acpi_object *p;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer element = { 0, NULL };
@@ -112,12 +111,10 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
if (!create_dev)
continue;
- result = acpi_bus_get_device(trt->source, &adev);
- if (result)
+ if (!acpi_fetch_acpi_dev(trt->source))
pr_warn("Failed to get source ACPI device\n");
- result = acpi_bus_get_device(trt->target, &adev);
- if (result)
+ if (!acpi_fetch_acpi_dev(trt->target))
pr_warn("Failed to get target ACPI device\n");
}
@@ -149,7 +146,6 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
int i;
int nr_bad_entries = 0;
struct art *arts;
- struct acpi_device *adev;
union acpi_object *p;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer element = { 0, NULL };
@@ -191,16 +187,11 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
if (!create_dev)
continue;
- if (art->source) {
- result = acpi_bus_get_device(art->source, &adev);
- if (result)
- pr_warn("Failed to get source ACPI device\n");
- }
- if (art->target) {
- result = acpi_bus_get_device(art->target, &adev);
- if (result)
- pr_warn("Failed to get target ACPI device\n");
- }
+ if (!acpi_fetch_acpi_dev(art->source))
+ pr_warn("Failed to get source ACPI device\n");
+
+ if (!acpi_fetch_acpi_dev(art->target))
+ pr_warn("Failed to get target ACPI device\n");
}
*artp = arts;
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 4f478812cb51..4954800b9850 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -17,8 +17,8 @@
#define INT3400_KEEP_ALIVE 0xA0
enum int3400_thermal_uuid {
+ INT3400_THERMAL_ACTIVE = 0,
INT3400_THERMAL_PASSIVE_1,
- INT3400_THERMAL_ACTIVE,
INT3400_THERMAL_CRITICAL,
INT3400_THERMAL_ADAPTIVE_PERFORMANCE,
INT3400_THERMAL_EMERGENCY_CALL_MODE,
@@ -31,8 +31,8 @@ enum int3400_thermal_uuid {
};
static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
- "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
"3A95C389-E4B8-4629-A526-C52C88626BAE",
+ "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
"97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
"63BE270F-1C11-48FD-A6F7-3AF253FF3E2D",
"5349962F-71E6-431D-9AE8-0A635B710AEE",
@@ -53,12 +53,13 @@ struct int3400_thermal_priv {
struct art *arts;
int trt_count;
struct trt *trts;
- u8 uuid_bitmap;
+ u32 uuid_bitmap;
int rel_misc_dev_res;
int current_uuid_index;
char *data_vault;
int odvp_count;
int *odvp;
+ u32 os_uuid_mask;
struct odvp_attr *odvp_attrs;
};
@@ -142,12 +143,55 @@ static ssize_t current_uuid_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct int3400_thermal_priv *priv = dev_get_drvdata(dev);
+ int i, length = 0;
- if (priv->current_uuid_index == -1)
- return sprintf(buf, "INVALID\n");
+ if (priv->current_uuid_index > 0)
+ return sprintf(buf, "%s\n",
+ int3400_thermal_uuids[priv->current_uuid_index]);
- return sprintf(buf, "%s\n",
- int3400_thermal_uuids[priv->current_uuid_index]);
+ for (i = 0; i <= INT3400_THERMAL_CRITICAL; i++) {
+ if (priv->os_uuid_mask & BIT(i))
+ length += scnprintf(&buf[length],
+ PAGE_SIZE - length,
+ "%s\n",
+ int3400_thermal_uuids[i]);
+ }
+
+ if (length)
+ return length;
+
+ return sprintf(buf, "INVALID\n");
+}
+
+static int int3400_thermal_run_osc(acpi_handle handle, char *uuid_str, int *enable)
+{
+ u32 ret, buf[2];
+ acpi_status status;
+ int result = 0;
+ struct acpi_osc_context context = {
+ .uuid_str = NULL,
+ .rev = 1,
+ .cap.length = 8,
+ };
+
+ context.uuid_str = uuid_str;
+
+ buf[OSC_QUERY_DWORD] = 0;
+ buf[OSC_SUPPORT_DWORD] = *enable;
+
+ context.cap.pointer = buf;
+
+ status = acpi_run_osc(handle, &context);
+ if (ACPI_SUCCESS(status)) {
+ ret = *((u32 *)(context.ret.pointer + 4));
+ if (ret != *enable)
+ result = -EPERM;
+ } else
+ result = -EPERM;
+
+ kfree(context.ret.pointer);
+
+ return result;
}
static ssize_t current_uuid_store(struct device *dev,
@@ -164,16 +208,47 @@ static ssize_t current_uuid_store(struct device *dev,
* If we have a list of supported UUIDs, make sure
* this one is supported.
*/
- if (priv->uuid_bitmap &&
- !(priv->uuid_bitmap & (1 << i)))
+ if (priv->uuid_bitmap & BIT(i)) {
+ priv->current_uuid_index = i;
+ return count;
+ }
+
+ /*
+ * There is support of only 3 policies via the new
+ * _OSC to inform OS capability:
+ * INT3400_THERMAL_ACTIVE
+ * INT3400_THERMAL_PASSIVE_1
+ * INT3400_THERMAL_CRITICAL
+ */
+
+ if (i > INT3400_THERMAL_CRITICAL)
return -EINVAL;
- priv->current_uuid_index = i;
- return count;
+ priv->os_uuid_mask |= BIT(i);
+
+ break;
}
}
- return -EINVAL;
+ if (priv->os_uuid_mask) {
+ int cap, ret;
+
+ /*
+ * Capability bits:
+ * Bit 0: set to 1 to indicate DPTF is active
+ * Bi1 1: set to 1 to active cooling is supported by user space daemon
+ * Bit 2: set to 1 to passive cooling is supported by user space daemon
+ * Bit 3: set to 1 to critical trip is handled by user space daemon
+ */
+ cap = ((priv->os_uuid_mask << 1) | 0x01);
+ ret = int3400_thermal_run_osc(priv->adev->handle,
+ "b23ba85d-c8b7-3542-88de-8de2ffcfd698",
+ &cap);
+ if (ret)
+ return ret;
+ }
+
+ return count;
}
static DEVICE_ATTR_RW(current_uuid);
@@ -236,41 +311,6 @@ end:
return result;
}
-static int int3400_thermal_run_osc(acpi_handle handle,
- enum int3400_thermal_uuid uuid, bool enable)
-{
- u32 ret, buf[2];
- acpi_status status;
- int result = 0;
- struct acpi_osc_context context = {
- .uuid_str = NULL,
- .rev = 1,
- .cap.length = 8,
- };
-
- if (uuid < 0 || uuid >= INT3400_THERMAL_MAXIMUM_UUID)
- return -EINVAL;
-
- context.uuid_str = int3400_thermal_uuids[uuid];
-
- buf[OSC_QUERY_DWORD] = 0;
- buf[OSC_SUPPORT_DWORD] = enable;
-
- context.cap.pointer = buf;
-
- status = acpi_run_osc(handle, &context);
- if (ACPI_SUCCESS(status)) {
- ret = *((u32 *)(context.ret.pointer + 4));
- if (ret != enable)
- result = -EPERM;
- } else
- result = -EPERM;
-
- kfree(context.ret.pointer);
-
- return result;
-}
-
static ssize_t odvp_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
@@ -426,10 +466,18 @@ static int int3400_thermal_change_mode(struct thermal_zone_device *thermal,
if (!priv)
return -EINVAL;
- if (mode != thermal->mode)
+ if (mode != thermal->mode) {
+ int enabled;
+
+ if (priv->current_uuid_index < 0 ||
+ priv->current_uuid_index >= INT3400_THERMAL_MAXIMUM_UUID)
+ return -EINVAL;
+
+ enabled = (mode == THERMAL_DEVICE_ENABLED);
result = int3400_thermal_run_osc(priv->adev->handle,
- priv->current_uuid_index,
- mode == THERMAL_DEVICE_ENABLED);
+ int3400_thermal_uuids[priv->current_uuid_index],
+ &enabled);
+ }
evaluate_odvp(priv);
@@ -468,6 +516,11 @@ static void int3400_setup_gddv(struct int3400_thermal_priv *priv)
priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer,
obj->package.elements[0].buffer.length,
GFP_KERNEL);
+ if (!priv->data_vault) {
+ kfree(buffer.pointer);
+ return;
+ }
+
bin_attr_data_vault.private = priv->data_vault;
bin_attr_data_vault.size = obj->package.elements[0].buffer.length;
kfree(buffer.pointer);
diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
new file mode 100644
index 000000000000..730fd121df6e
--- /dev/null
+++ b/drivers/thermal/intel/intel_hfi.c
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Hardware Feedback Interface Driver
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ *
+ * Authors: Aubrey Li <aubrey.li@linux.intel.com>
+ * Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+ *
+ *
+ * The Hardware Feedback Interface provides a performance and energy efficiency
+ * capability information for each CPU in the system. Depending on the processor
+ * model, hardware may periodically update these capabilities as a result of
+ * changes in the operating conditions (e.g., power limits or thermal
+ * constraints). On other processor models, there is a single HFI update
+ * at boot.
+ *
+ * This file provides functionality to process HFI updates and relay these
+ * updates to userspace.
+ */
+
+#define pr_fmt(fmt) "intel-hfi: " fmt
+
+#include <linux/bitops.h>
+#include <linux/cpufeature.h>
+#include <linux/cpumask.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/mutex.h>
+#include <linux/percpu-defs.h>
+#include <linux/printk.h>
+#include <linux/processor.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/topology.h>
+#include <linux/workqueue.h>
+
+#include <asm/msr.h>
+
+#include "../thermal_core.h"
+#include "intel_hfi.h"
+
+#define THERM_STATUS_CLEAR_PKG_MASK (BIT(1) | BIT(3) | BIT(5) | BIT(7) | \
+ BIT(9) | BIT(11) | BIT(26))
+
+/* Hardware Feedback Interface MSR configuration bits */
+#define HW_FEEDBACK_PTR_VALID_BIT BIT(0)
+#define HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT BIT(0)
+
+/* CPUID detection and enumeration definitions for HFI */
+
+#define CPUID_HFI_LEAF 6
+
+union hfi_capabilities {
+ struct {
+ u8 performance:1;
+ u8 energy_efficiency:1;
+ u8 __reserved:6;
+ } split;
+ u8 bits;
+};
+
+union cpuid6_edx {
+ struct {
+ union hfi_capabilities capabilities;
+ u32 table_pages:4;
+ u32 __reserved:4;
+ s32 index:16;
+ } split;
+ u32 full;
+};
+
+/**
+ * struct hfi_cpu_data - HFI capabilities per CPU
+ * @perf_cap: Performance capability
+ * @ee_cap: Energy efficiency capability
+ *
+ * Capabilities of a logical processor in the HFI table. These capabilities are
+ * unitless.
+ */
+struct hfi_cpu_data {
+ u8 perf_cap;
+ u8 ee_cap;
+} __packed;
+
+/**
+ * struct hfi_hdr - Header of the HFI table
+ * @perf_updated: Hardware updated performance capabilities
+ * @ee_updated: Hardware updated energy efficiency capabilities
+ *
+ * Properties of the data in an HFI table.
+ */
+struct hfi_hdr {
+ u8 perf_updated;
+ u8 ee_updated;
+} __packed;
+
+/**
+ * struct hfi_instance - Representation of an HFI instance (i.e., a table)
+ * @local_table: Base of the local copy of the HFI table
+ * @timestamp: Timestamp of the last update of the local table.
+ * Located at the base of the local table.
+ * @hdr: Base address of the header of the local table
+ * @data: Base address of the data of the local table
+ * @cpus: CPUs represented in this HFI table instance
+ * @hw_table: Pointer to the HFI table of this instance
+ * @update_work: Delayed work to process HFI updates
+ * @table_lock: Lock to protect acceses to the table of this instance
+ * @event_lock: Lock to process HFI interrupts
+ *
+ * A set of parameters to parse and navigate a specific HFI table.
+ */
+struct hfi_instance {
+ union {
+ void *local_table;
+ u64 *timestamp;
+ };
+ void *hdr;
+ void *data;
+ cpumask_var_t cpus;
+ void *hw_table;
+ struct delayed_work update_work;
+ raw_spinlock_t table_lock;
+ raw_spinlock_t event_lock;
+};
+
+/**
+ * struct hfi_features - Supported HFI features
+ * @nr_table_pages: Size of the HFI table in 4KB pages
+ * @cpu_stride: Stride size to locate the capability data of a logical
+ * processor within the table (i.e., row stride)
+ * @hdr_size: Size of the table header
+ *
+ * Parameters and supported features that are common to all HFI instances
+ */
+struct hfi_features {
+ unsigned int nr_table_pages;
+ unsigned int cpu_stride;
+ unsigned int hdr_size;
+};
+
+/**
+ * struct hfi_cpu_info - Per-CPU attributes to consume HFI data
+ * @index: Row of this CPU in its HFI table
+ * @hfi_instance: Attributes of the HFI table to which this CPU belongs
+ *
+ * Parameters to link a logical processor to an HFI table and a row within it.
+ */
+struct hfi_cpu_info {
+ s16 index;
+ struct hfi_instance *hfi_instance;
+};
+
+static DEFINE_PER_CPU(struct hfi_cpu_info, hfi_cpu_info) = { .index = -1 };
+
+static int max_hfi_instances;
+static struct hfi_instance *hfi_instances;
+
+static struct hfi_features hfi_features;
+static DEFINE_MUTEX(hfi_instance_lock);
+
+static struct workqueue_struct *hfi_updates_wq;
+#define HFI_UPDATE_INTERVAL HZ
+#define HFI_MAX_THERM_NOTIFY_COUNT 16
+
+static void get_hfi_caps(struct hfi_instance *hfi_instance,
+ struct thermal_genl_cpu_caps *cpu_caps)
+{
+ int cpu, i = 0;
+
+ raw_spin_lock_irq(&hfi_instance->table_lock);
+ for_each_cpu(cpu, hfi_instance->cpus) {
+ struct hfi_cpu_data *caps;
+ s16 index;
+
+ index = per_cpu(hfi_cpu_info, cpu).index;
+ caps = hfi_instance->data + index * hfi_features.cpu_stride;
+ cpu_caps[i].cpu = cpu;
+
+ /*
+ * Scale performance and energy efficiency to
+ * the [0, 1023] interval that thermal netlink uses.
+ */
+ cpu_caps[i].performance = caps->perf_cap << 2;
+ cpu_caps[i].efficiency = caps->ee_cap << 2;
+
+ ++i;
+ }
+ raw_spin_unlock_irq(&hfi_instance->table_lock);
+}
+
+/*
+ * Call update_capabilities() when there are changes in the HFI table.
+ */
+static void update_capabilities(struct hfi_instance *hfi_instance)
+{
+ struct thermal_genl_cpu_caps *cpu_caps;
+ int i = 0, cpu_count;
+
+ /* CPUs may come online/offline while processing an HFI update. */
+ mutex_lock(&hfi_instance_lock);
+
+ cpu_count = cpumask_weight(hfi_instance->cpus);
+
+ /* No CPUs to report in this hfi_instance. */
+ if (!cpu_count)
+ goto out;
+
+ cpu_caps = kcalloc(cpu_count, sizeof(*cpu_caps), GFP_KERNEL);
+ if (!cpu_caps)
+ goto out;
+
+ get_hfi_caps(hfi_instance, cpu_caps);
+
+ if (cpu_count < HFI_MAX_THERM_NOTIFY_COUNT)
+ goto last_cmd;
+
+ /* Process complete chunks of HFI_MAX_THERM_NOTIFY_COUNT capabilities. */
+ for (i = 0;
+ (i + HFI_MAX_THERM_NOTIFY_COUNT) <= cpu_count;
+ i += HFI_MAX_THERM_NOTIFY_COUNT)
+ thermal_genl_cpu_capability_event(HFI_MAX_THERM_NOTIFY_COUNT,
+ &cpu_caps[i]);
+
+ cpu_count = cpu_count - i;
+
+last_cmd:
+ /* Process the remaining capabilities if any. */
+ if (cpu_count)
+ thermal_genl_cpu_capability_event(cpu_count, &cpu_caps[i]);
+
+ kfree(cpu_caps);
+out:
+ mutex_unlock(&hfi_instance_lock);
+}
+
+static void hfi_update_work_fn(struct work_struct *work)
+{
+ struct hfi_instance *hfi_instance;
+
+ hfi_instance = container_of(to_delayed_work(work), struct hfi_instance,
+ update_work);
+ if (!hfi_instance)
+ return;
+
+ update_capabilities(hfi_instance);
+}
+
+void intel_hfi_process_event(__u64 pkg_therm_status_msr_val)
+{
+ struct hfi_instance *hfi_instance;
+ int cpu = smp_processor_id();
+ struct hfi_cpu_info *info;
+ u64 new_timestamp;
+
+ if (!pkg_therm_status_msr_val)
+ return;
+
+ info = &per_cpu(hfi_cpu_info, cpu);
+ if (!info)
+ return;
+
+ /*
+ * A CPU is linked to its HFI instance before the thermal vector in the
+ * local APIC is unmasked. Hence, info->hfi_instance cannot be NULL
+ * when receiving an HFI event.
+ */
+ hfi_instance = info->hfi_instance;
+ if (unlikely(!hfi_instance)) {
+ pr_debug("Received event on CPU %d but instance was null", cpu);
+ return;
+ }
+
+ /*
+ * On most systems, all CPUs in the package receive a package-level
+ * thermal interrupt when there is an HFI update. It is sufficient to
+ * let a single CPU to acknowledge the update and queue work to
+ * process it. The remaining CPUs can resume their work.
+ */
+ if (!raw_spin_trylock(&hfi_instance->event_lock))
+ return;
+
+ /* Skip duplicated updates. */
+ new_timestamp = *(u64 *)hfi_instance->hw_table;
+ if (*hfi_instance->timestamp == new_timestamp) {
+ raw_spin_unlock(&hfi_instance->event_lock);
+ return;
+ }
+
+ raw_spin_lock(&hfi_instance->table_lock);
+
+ /*
+ * Copy the updated table into our local copy. This includes the new
+ * timestamp.
+ */
+ memcpy(hfi_instance->local_table, hfi_instance->hw_table,
+ hfi_features.nr_table_pages << PAGE_SHIFT);
+
+ raw_spin_unlock(&hfi_instance->table_lock);
+ raw_spin_unlock(&hfi_instance->event_lock);
+
+ /*
+ * Let hardware know that we are done reading the HFI table and it is
+ * free to update it again.
+ */
+ pkg_therm_status_msr_val &= THERM_STATUS_CLEAR_PKG_MASK &
+ ~PACKAGE_THERM_STATUS_HFI_UPDATED;
+ wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS, pkg_therm_status_msr_val);
+
+ queue_delayed_work(hfi_updates_wq, &hfi_instance->update_work,
+ HFI_UPDATE_INTERVAL);
+}
+
+static void init_hfi_cpu_index(struct hfi_cpu_info *info)
+{
+ union cpuid6_edx edx;
+
+ /* Do not re-read @cpu's index if it has already been initialized. */
+ if (info->index > -1)
+ return;
+
+ edx.full = cpuid_edx(CPUID_HFI_LEAF);
+ info->index = edx.split.index;
+}
+
+/*
+ * The format of the HFI table depends on the number of capabilities that the
+ * hardware supports. Keep a data structure to navigate the table.
+ */
+static void init_hfi_instance(struct hfi_instance *hfi_instance)
+{
+ /* The HFI header is below the time-stamp. */
+ hfi_instance->hdr = hfi_instance->local_table +
+ sizeof(*hfi_instance->timestamp);
+
+ /* The HFI data starts below the header. */
+ hfi_instance->data = hfi_instance->hdr + hfi_features.hdr_size;
+}
+
+/**
+ * intel_hfi_online() - Enable HFI on @cpu
+ * @cpu: CPU in which the HFI will be enabled
+ *
+ * Enable the HFI to be used in @cpu. The HFI is enabled at the die/package
+ * level. The first CPU in the die/package to come online does the full HFI
+ * initialization. Subsequent CPUs will just link themselves to the HFI
+ * instance of their die/package.
+ *
+ * This function is called before enabling the thermal vector in the local APIC
+ * in order to ensure that @cpu has an associated HFI instance when it receives
+ * an HFI event.
+ */
+void intel_hfi_online(unsigned int cpu)
+{
+ struct hfi_instance *hfi_instance;
+ struct hfi_cpu_info *info;
+ phys_addr_t hw_table_pa;
+ u64 msr_val;
+ u16 die_id;
+
+ /* Nothing to do if hfi_instances are missing. */
+ if (!hfi_instances)
+ return;
+
+ /*
+ * Link @cpu to the HFI instance of its package/die. It does not
+ * matter whether the instance has been initialized.
+ */
+ info = &per_cpu(hfi_cpu_info, cpu);
+ die_id = topology_logical_die_id(cpu);
+ hfi_instance = info->hfi_instance;
+ if (!hfi_instance) {
+ if (die_id < 0 || die_id >= max_hfi_instances)
+ return;
+
+ hfi_instance = &hfi_instances[die_id];
+ info->hfi_instance = hfi_instance;
+ }
+
+ init_hfi_cpu_index(info);
+
+ /*
+ * Now check if the HFI instance of the package/die of @cpu has been
+ * initialized (by checking its header). In such case, all we have to
+ * do is to add @cpu to this instance's cpumask.
+ */
+ mutex_lock(&hfi_instance_lock);
+ if (hfi_instance->hdr) {
+ cpumask_set_cpu(cpu, hfi_instance->cpus);
+ goto unlock;
+ }
+
+ /*
+ * Hardware is programmed with the physical address of the first page
+ * frame of the table. Hence, the allocated memory must be page-aligned.
+ */
+ hfi_instance->hw_table = alloc_pages_exact(hfi_features.nr_table_pages,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!hfi_instance->hw_table)
+ goto unlock;
+
+ hw_table_pa = virt_to_phys(hfi_instance->hw_table);
+
+ /*
+ * Allocate memory to keep a local copy of the table that
+ * hardware generates.
+ */
+ hfi_instance->local_table = kzalloc(hfi_features.nr_table_pages << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (!hfi_instance->local_table)
+ goto free_hw_table;
+
+ /*
+ * Program the address of the feedback table of this die/package. On
+ * some processors, hardware remembers the old address of the HFI table
+ * even after having been reprogrammed and re-enabled. Thus, do not free
+ * the pages allocated for the table or reprogram the hardware with a
+ * new base address. Namely, program the hardware only once.
+ */
+ msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT;
+ wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
+
+ init_hfi_instance(hfi_instance);
+
+ INIT_DELAYED_WORK(&hfi_instance->update_work, hfi_update_work_fn);
+ raw_spin_lock_init(&hfi_instance->table_lock);
+ raw_spin_lock_init(&hfi_instance->event_lock);
+
+ cpumask_set_cpu(cpu, hfi_instance->cpus);
+
+ /*
+ * Enable the hardware feedback interface and never disable it. See
+ * comment on programming the address of the table.
+ */
+ rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+ msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
+ wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+
+unlock:
+ mutex_unlock(&hfi_instance_lock);
+ return;
+
+free_hw_table:
+ free_pages_exact(hfi_instance->hw_table, hfi_features.nr_table_pages);
+ goto unlock;
+}
+
+/**
+ * intel_hfi_offline() - Disable HFI on @cpu
+ * @cpu: CPU in which the HFI will be disabled
+ *
+ * Remove @cpu from those covered by its HFI instance.
+ *
+ * On some processors, hardware remembers previous programming settings even
+ * after being reprogrammed. Thus, keep HFI enabled even if all CPUs in the
+ * die/package of @cpu are offline. See note in intel_hfi_online().
+ */
+void intel_hfi_offline(unsigned int cpu)
+{
+ struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, cpu);
+ struct hfi_instance *hfi_instance;
+
+ /*
+ * Check if @cpu as an associated, initialized (i.e., with a non-NULL
+ * header). Also, HFI instances are only initialized if X86_FEATURE_HFI
+ * is present.
+ */
+ hfi_instance = info->hfi_instance;
+ if (!hfi_instance)
+ return;
+
+ if (!hfi_instance->hdr)
+ return;
+
+ mutex_lock(&hfi_instance_lock);
+ cpumask_clear_cpu(cpu, hfi_instance->cpus);
+ mutex_unlock(&hfi_instance_lock);
+}
+
+static __init int hfi_parse_features(void)
+{
+ unsigned int nr_capabilities;
+ union cpuid6_edx edx;
+
+ if (!boot_cpu_has(X86_FEATURE_HFI))
+ return -ENODEV;
+
+ /*
+ * If we are here we know that CPUID_HFI_LEAF exists. Parse the
+ * supported capabilities and the size of the HFI table.
+ */
+ edx.full = cpuid_edx(CPUID_HFI_LEAF);
+
+ if (!edx.split.capabilities.split.performance) {
+ pr_debug("Performance reporting not supported! Not using HFI\n");
+ return -ENODEV;
+ }
+
+ /*
+ * The number of supported capabilities determines the number of
+ * columns in the HFI table. Exclude the reserved bits.
+ */
+ edx.split.capabilities.split.__reserved = 0;
+ nr_capabilities = hweight8(edx.split.capabilities.bits);
+
+ /* The number of 4KB pages required by the table */
+ hfi_features.nr_table_pages = edx.split.table_pages + 1;
+
+ /*
+ * The header contains change indications for each supported feature.
+ * The size of the table header is rounded up to be a multiple of 8
+ * bytes.
+ */
+ hfi_features.hdr_size = DIV_ROUND_UP(nr_capabilities, 8) * 8;
+
+ /*
+ * Data of each logical processor is also rounded up to be a multiple
+ * of 8 bytes.
+ */
+ hfi_features.cpu_stride = DIV_ROUND_UP(nr_capabilities, 8) * 8;
+
+ return 0;
+}
+
+void __init intel_hfi_init(void)
+{
+ struct hfi_instance *hfi_instance;
+ int i, j;
+
+ if (hfi_parse_features())
+ return;
+
+ /* There is one HFI instance per die/package. */
+ max_hfi_instances = topology_max_packages() *
+ topology_max_die_per_package();
+
+ /*
+ * This allocation may fail. CPU hotplug callbacks must check
+ * for a null pointer.
+ */
+ hfi_instances = kcalloc(max_hfi_instances, sizeof(*hfi_instances),
+ GFP_KERNEL);
+ if (!hfi_instances)
+ return;
+
+ for (i = 0; i < max_hfi_instances; i++) {
+ hfi_instance = &hfi_instances[i];
+ if (!zalloc_cpumask_var(&hfi_instance->cpus, GFP_KERNEL))
+ goto err_nomem;
+ }
+
+ hfi_updates_wq = create_singlethread_workqueue("hfi-updates");
+ if (!hfi_updates_wq)
+ goto err_nomem;
+
+ return;
+
+err_nomem:
+ for (j = 0; j < i; ++j) {
+ hfi_instance = &hfi_instances[j];
+ free_cpumask_var(hfi_instance->cpus);
+ }
+
+ kfree(hfi_instances);
+ hfi_instances = NULL;
+}
diff --git a/drivers/thermal/intel/intel_hfi.h b/drivers/thermal/intel/intel_hfi.h
new file mode 100644
index 000000000000..325aa78b745c
--- /dev/null
+++ b/drivers/thermal/intel/intel_hfi.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _INTEL_HFI_H
+#define _INTEL_HFI_H
+
+#if defined(CONFIG_INTEL_HFI_THERMAL)
+void __init intel_hfi_init(void);
+void intel_hfi_online(unsigned int cpu);
+void intel_hfi_offline(unsigned int cpu);
+void intel_hfi_process_event(__u64 pkg_therm_status_msr_val);
+#else
+static inline void intel_hfi_init(void) { }
+static inline void intel_hfi_online(unsigned int cpu) { }
+static inline void intel_hfi_offline(unsigned int cpu) { }
+static inline void intel_hfi_process_event(__u64 pkg_therm_status_msr_val) { }
+#endif /* CONFIG_INTEL_HFI_THERMAL */
+
+#endif /* _INTEL_HFI_H */
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index 14256421d98c..c841ab37e7c6 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -556,12 +556,9 @@ static void end_power_clamp(void)
* stop faster.
*/
clamping = false;
- if (bitmap_weight(cpu_clamping_mask, num_possible_cpus())) {
- for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) {
- pr_debug("clamping worker for cpu %d alive, destroy\n",
- i);
- stop_power_clamp_worker(i);
- }
+ for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) {
+ pr_debug("clamping worker for cpu %d alive, destroy\n", i);
+ stop_power_clamp_worker(i);
}
}
diff --git a/drivers/thermal/intel/therm_throt.c b/drivers/thermal/intel/therm_throt.c
index dab7e8fb1059..8352083b87c7 100644
--- a/drivers/thermal/intel/therm_throt.c
+++ b/drivers/thermal/intel/therm_throt.c
@@ -32,6 +32,7 @@
#include <asm/irq.h>
#include <asm/msr.h>
+#include "intel_hfi.h"
#include "thermal_interrupt.h"
/* How long to wait between reporting thermal events */
@@ -475,6 +476,13 @@ static int thermal_throttle_online(unsigned int cpu)
INIT_DELAYED_WORK(&state->package_throttle.therm_work, throttle_active_work);
INIT_DELAYED_WORK(&state->core_throttle.therm_work, throttle_active_work);
+ /*
+ * The first CPU coming online will enable the HFI. Usually this causes
+ * hardware to issue an HFI thermal interrupt. Such interrupt will reach
+ * the CPU once we enable the thermal vector in the local APIC.
+ */
+ intel_hfi_online(cpu);
+
/* Unmask the thermal vector after the above workqueues are initialized. */
l = apic_read(APIC_LVTTHMR);
apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
@@ -492,6 +500,8 @@ static int thermal_throttle_offline(unsigned int cpu)
l = apic_read(APIC_LVTTHMR);
apic_write(APIC_LVTTHMR, l | APIC_LVT_MASKED);
+ intel_hfi_offline(cpu);
+
cancel_delayed_work_sync(&state->package_throttle.therm_work);
cancel_delayed_work_sync(&state->core_throttle.therm_work);
@@ -509,6 +519,8 @@ static __init int thermal_throttle_init_device(void)
if (!atomic_read(&therm_throt_en))
return 0;
+ intel_hfi_init();
+
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/therm:online",
thermal_throttle_online,
thermal_throttle_offline);
@@ -608,6 +620,10 @@ void intel_thermal_interrupt(void)
PACKAGE_THERM_STATUS_POWER_LIMIT,
POWER_LIMIT_EVENT,
PACKAGE_LEVEL);
+
+ if (this_cpu_has(X86_FEATURE_HFI))
+ intel_hfi_process_event(msr_val &
+ PACKAGE_THERM_STATUS_HFI_UPDATED);
}
}
@@ -717,6 +733,12 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
l | (PACKAGE_THERM_INT_LOW_ENABLE
| PACKAGE_THERM_INT_HIGH_ENABLE), h);
+
+ if (cpu_has(c, X86_FEATURE_HFI)) {
+ rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+ wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ l | PACKAGE_THERM_INT_HFI_ENABLE, h);
+ }
}
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
index eafa7526eb8b..c7f91cbdccc7 100644
--- a/drivers/thermal/qcom/lmh.c
+++ b/drivers/thermal/qcom/lmh.c
@@ -28,6 +28,8 @@
#define LMH_REG_DCVS_INTR_CLR 0x8
+#define LMH_ENABLE_ALGOS 1
+
struct lmh_hw_data {
void __iomem *base;
struct irq_domain *domain;
@@ -90,6 +92,7 @@ static int lmh_probe(struct platform_device *pdev)
struct device_node *cpu_node;
struct lmh_hw_data *lmh_data;
int temp_low, temp_high, temp_arm, cpu_id, ret;
+ unsigned int enable_alg;
u32 node_id;
lmh_data = devm_kzalloc(dev, sizeof(*lmh_data), GFP_KERNEL);
@@ -141,32 +144,36 @@ static int lmh_probe(struct platform_device *pdev)
if (!qcom_scm_lmh_dcvsh_available())
return -EINVAL;
- ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_CRNT, LMH_ALGO_MODE_ENABLE, 1,
- LMH_NODE_DCVS, node_id, 0);
- if (ret)
- dev_err(dev, "Error %d enabling current subfunction\n", ret);
-
- ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_REL, LMH_ALGO_MODE_ENABLE, 1,
- LMH_NODE_DCVS, node_id, 0);
- if (ret)
- dev_err(dev, "Error %d enabling reliability subfunction\n", ret);
-
- ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_BCL, LMH_ALGO_MODE_ENABLE, 1,
- LMH_NODE_DCVS, node_id, 0);
- if (ret)
- dev_err(dev, "Error %d enabling BCL subfunction\n", ret);
-
- ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_ALGO_MODE_ENABLE, 1,
- LMH_NODE_DCVS, node_id, 0);
- if (ret) {
- dev_err(dev, "Error %d enabling thermal subfunction\n", ret);
- return ret;
- }
-
- ret = qcom_scm_lmh_profile_change(0x1);
- if (ret) {
- dev_err(dev, "Error %d changing profile\n", ret);
- return ret;
+ enable_alg = (uintptr_t)of_device_get_match_data(dev);
+
+ if (enable_alg) {
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_CRNT, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret)
+ dev_err(dev, "Error %d enabling current subfunction\n", ret);
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_REL, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret)
+ dev_err(dev, "Error %d enabling reliability subfunction\n", ret);
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_BCL, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret)
+ dev_err(dev, "Error %d enabling BCL subfunction\n", ret);
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret) {
+ dev_err(dev, "Error %d enabling thermal subfunction\n", ret);
+ return ret;
+ }
+
+ ret = qcom_scm_lmh_profile_change(0x1);
+ if (ret) {
+ dev_err(dev, "Error %d changing profile\n", ret);
+ return ret;
+ }
}
/* Set default thermal trips */
@@ -213,7 +220,8 @@ static int lmh_probe(struct platform_device *pdev)
}
static const struct of_device_id lmh_table[] = {
- { .compatible = "qcom,sdm845-lmh", },
+ { .compatible = "qcom,sdm845-lmh", .data = (void *)LMH_ENABLE_ALGOS},
+ { .compatible = "qcom,sm8150-lmh", },
{}
};
MODULE_DEVICE_TABLE(of, lmh_table);
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 99a8d9f3e03c..154d3cb19c88 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -18,6 +18,7 @@
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/thermal.h>
+#include "../thermal_hwmon.h"
#include "tsens.h"
/**
@@ -1060,6 +1061,10 @@ static int tsens_register(struct tsens_priv *priv)
priv->sensor[i].tzd = tzd;
if (priv->ops->enable)
priv->ops->enable(priv, i);
+
+ if (devm_thermal_add_hwmon_sysfs(tzd))
+ dev_warn(priv->dev,
+ "Failed to add hwmon sysfs attributes\n");
}
/* VER_0 require to set MIN and MAX THRESH
diff --git a/drivers/thermal/tegra/tegra-bpmp-thermal.c b/drivers/thermal/tegra/tegra-bpmp-thermal.c
index 94f1da1dcd69..5affc3d196be 100644
--- a/drivers/thermal/tegra/tegra-bpmp-thermal.c
+++ b/drivers/thermal/tegra/tegra-bpmp-thermal.c
@@ -52,6 +52,8 @@ static int tegra_bpmp_thermal_get_temp(void *data, int *out_temp)
err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg);
if (err)
return err;
+ if (msg.rx.ret)
+ return -EINVAL;
*out_temp = reply.get_temp.temp;
@@ -63,6 +65,7 @@ static int tegra_bpmp_thermal_set_trips(void *data, int low, int high)
struct tegra_bpmp_thermal_zone *zone = data;
struct mrq_thermal_host_to_bpmp_request req;
struct tegra_bpmp_message msg;
+ int err;
memset(&req, 0, sizeof(req));
req.type = CMD_THERMAL_SET_TRIP;
@@ -76,7 +79,13 @@ static int tegra_bpmp_thermal_set_trips(void *data, int low, int high)
msg.tx.data = &req;
msg.tx.size = sizeof(req);
- return tegra_bpmp_transfer(zone->tegra->bpmp, &msg);
+ err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
}
static void tz_device_update_work_fn(struct work_struct *work)
@@ -140,6 +149,8 @@ static int tegra_bpmp_thermal_get_num_zones(struct tegra_bpmp *bpmp,
err = tegra_bpmp_transfer(bpmp, &msg);
if (err)
return err;
+ if (msg.rx.ret)
+ return -EINVAL;
*num_zones = reply.get_num_zones.num;
diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c
index a16dd4d5d710..32fea5174cc0 100644
--- a/drivers/thermal/thermal_netlink.c
+++ b/drivers/thermal/thermal_netlink.c
@@ -43,6 +43,11 @@ static const struct nla_policy thermal_genl_policy[THERMAL_GENL_ATTR_MAX + 1] =
[THERMAL_GENL_ATTR_CDEV_MAX_STATE] = { .type = NLA_U32 },
[THERMAL_GENL_ATTR_CDEV_NAME] = { .type = NLA_STRING,
.len = THERMAL_NAME_LENGTH },
+ /* CPU capabilities */
+ [THERMAL_GENL_ATTR_CPU_CAPABILITY] = { .type = NLA_NESTED },
+ [THERMAL_GENL_ATTR_CPU_CAPABILITY_ID] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY] = { .type = NLA_U32 },
};
struct param {
@@ -58,6 +63,8 @@ struct param {
int temp;
int cdev_state;
int cdev_max_state;
+ struct thermal_genl_cpu_caps *cpu_capabilities;
+ int cpu_capabilities_count;
};
typedef int (*cb_t)(struct param *);
@@ -190,6 +197,42 @@ static int thermal_genl_event_gov_change(struct param *p)
return 0;
}
+static int thermal_genl_event_cpu_capability_change(struct param *p)
+{
+ struct thermal_genl_cpu_caps *cpu_cap = p->cpu_capabilities;
+ struct sk_buff *msg = p->msg;
+ struct nlattr *start_cap;
+ int i;
+
+ start_cap = nla_nest_start(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY);
+ if (!start_cap)
+ return -EMSGSIZE;
+
+ for (i = 0; i < p->cpu_capabilities_count; ++i) {
+ if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_ID,
+ cpu_cap->cpu))
+ goto out_cancel_nest;
+
+ if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE,
+ cpu_cap->performance))
+ goto out_cancel_nest;
+
+ if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY,
+ cpu_cap->efficiency))
+ goto out_cancel_nest;
+
+ ++cpu_cap;
+ }
+
+ nla_nest_end(msg, start_cap);
+
+ return 0;
+out_cancel_nest:
+ nla_nest_cancel(msg, start_cap);
+
+ return -EMSGSIZE;
+}
+
int thermal_genl_event_tz_delete(struct param *p)
__attribute__((alias("thermal_genl_event_tz")));
@@ -219,6 +262,7 @@ static cb_t event_cb[] = {
[THERMAL_GENL_EVENT_CDEV_DELETE] = thermal_genl_event_cdev_delete,
[THERMAL_GENL_EVENT_CDEV_STATE_UPDATE] = thermal_genl_event_cdev_state_update,
[THERMAL_GENL_EVENT_TZ_GOV_CHANGE] = thermal_genl_event_gov_change,
+ [THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE] = thermal_genl_event_cpu_capability_change,
};
/*
@@ -356,6 +400,15 @@ int thermal_notify_tz_gov_change(int tz_id, const char *name)
return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_GOV_CHANGE, &p);
}
+int thermal_genl_cpu_capability_event(int count,
+ struct thermal_genl_cpu_caps *caps)
+{
+ struct param p = { .cpu_capabilities_count = count, .cpu_capabilities = caps };
+
+ return thermal_genl_send_event(THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE, &p);
+}
+EXPORT_SYMBOL_GPL(thermal_genl_cpu_capability_event);
+
/*************************** Command encoding ********************************/
static int __thermal_genl_cmd_tz_get_id(struct thermal_zone_device *tz,
@@ -419,11 +472,12 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p)
for (i = 0; i < tz->trips; i++) {
enum thermal_trip_type type;
- int temp, hyst;
+ int temp, hyst = 0;
tz->ops->get_trip_type(tz, i, &type);
tz->ops->get_trip_temp(tz, i, &temp);
- tz->ops->get_trip_hyst(tz, i, &hyst);
+ if (tz->ops->get_trip_hyst)
+ tz->ops->get_trip_hyst(tz, i, &hyst);
if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, i) ||
nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, type) ||
diff --git a/drivers/thermal/thermal_netlink.h b/drivers/thermal/thermal_netlink.h
index e554f76291f4..1052f523188d 100644
--- a/drivers/thermal/thermal_netlink.h
+++ b/drivers/thermal/thermal_netlink.h
@@ -4,6 +4,12 @@
* Author: Daniel Lezcano <daniel.lezcano@linaro.org>
*/
+struct thermal_genl_cpu_caps {
+ int cpu;
+ int performance;
+ int efficiency;
+};
+
/* Netlink notification function */
#ifdef CONFIG_THERMAL_NETLINK
int __init thermal_netlink_init(void);
@@ -23,6 +29,8 @@ int thermal_notify_cdev_add(int cdev_id, const char *name, int max_state);
int thermal_notify_cdev_delete(int cdev_id);
int thermal_notify_tz_gov_change(int tz_id, const char *name);
int thermal_genl_sampling_temp(int id, int temp);
+int thermal_genl_cpu_capability_event(int count,
+ struct thermal_genl_cpu_caps *caps);
#else
static inline int thermal_netlink_init(void)
{
@@ -101,4 +109,10 @@ static inline int thermal_genl_sampling_temp(int id, int temp)
{
return 0;
}
+
+static inline int thermal_genl_cpu_capability_event(int count, struct thermal_genl_cpu_caps *caps)
+{
+ return 0;
+}
+
#endif /* CONFIG_THERMAL_NETLINK */
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index f84375865c97..703039d8b937 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -21,6 +21,7 @@
#include "ti-thermal.h"
#include "ti-bandgap.h"
+#include "../thermal_hwmon.h"
/* common data structures */
struct ti_thermal_data {
@@ -106,14 +107,6 @@ static inline int __ti_thermal_get_temp(void *devdata, int *temp)
return ret;
}
-static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
- int *temp)
-{
- struct ti_thermal_data *data = thermal->devdata;
-
- return __ti_thermal_get_temp(data, temp);
-}
-
static int __ti_thermal_get_trend(void *p, int trip, enum thermal_trend *trend)
{
struct ti_thermal_data *data = p;
@@ -189,6 +182,9 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
ti_bandgap_set_sensor_data(bgp, id, data);
ti_bandgap_write_update_interval(bgp, data->sensor_id, interval);
+ if (devm_thermal_add_hwmon_sysfs(data->ti_thermal))
+ dev_warn(bgp->dev, "failed to add hwmon sysfs attributes\n");
+
return 0;
}
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 3c92d4e01488..516cff362434 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -805,7 +805,7 @@ static int max3100_probe(struct spi_device *spi)
return 0;
}
-static int max3100_remove(struct spi_device *spi)
+static void max3100_remove(struct spi_device *spi)
{
struct max3100_port *s = spi_get_drvdata(spi);
int i;
@@ -828,13 +828,12 @@ static int max3100_remove(struct spi_device *spi)
for (i = 0; i < MAX_MAX3100; i++)
if (max3100s[i]) {
mutex_unlock(&max3100s_lock);
- return 0;
+ return;
}
pr_debug("removing max3100 driver\n");
uart_unregister_driver(&max3100_uart_driver);
mutex_unlock(&max3100s_lock);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index dde0824b2fa5..3112b4a05448 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1487,10 +1487,9 @@ static int max310x_spi_probe(struct spi_device *spi)
return max310x_probe(&spi->dev, devtype, regmap, spi->irq);
}
-static int max310x_spi_remove(struct spi_device *spi)
+static void max310x_spi_remove(struct spi_device *spi)
{
max310x_remove(&spi->dev);
- return 0;
}
static const struct spi_device_id max310x_id_table[] = {
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 38d1c0748533..3a6c68e19c80 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1443,11 +1443,9 @@ static int sc16is7xx_spi_probe(struct spi_device *spi)
return sc16is7xx_probe(&spi->dev, devtype, regmap, spi->irq);
}
-static int sc16is7xx_spi_remove(struct spi_device *spi)
+static void sc16is7xx_spi_remove(struct spi_device *spi)
{
sc16is7xx_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id sc16is7xx_spi_id_table[] = {
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 7e8b3bd59c7b..8fec1d8648f5 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -3088,7 +3088,7 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
{
struct tty_struct *tty;
- tty = kzalloc(sizeof(*tty), GFP_KERNEL);
+ tty = kzalloc(sizeof(*tty), GFP_KERNEL_ACCOUNT);
if (!tty)
return NULL;
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 73f419adce61..4bb6d304eb4b 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1919,6 +1919,7 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
struct usbtmc_ctrlrequest request;
u8 *buffer = NULL;
int rv;
+ unsigned int is_in, pipe;
unsigned long res;
res = copy_from_user(&request, arg, sizeof(struct usbtmc_ctrlrequest));
@@ -1928,12 +1929,14 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
if (request.req.wLength > USBTMC_BUFSIZE)
return -EMSGSIZE;
+ is_in = request.req.bRequestType & USB_DIR_IN;
+
if (request.req.wLength) {
buffer = kmalloc(request.req.wLength, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
- if ((request.req.bRequestType & USB_DIR_IN) == 0) {
+ if (!is_in) {
/* Send control data to device */
res = copy_from_user(buffer, request.data,
request.req.wLength);
@@ -1944,8 +1947,12 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
}
}
+ if (is_in)
+ pipe = usb_rcvctrlpipe(data->usb_dev, 0);
+ else
+ pipe = usb_sndctrlpipe(data->usb_dev, 0);
rv = usb_control_msg(data->usb_dev,
- usb_rcvctrlpipe(data->usb_dev, 0),
+ pipe,
request.req.bRequest,
request.req.bRequestType,
request.req.wValue,
@@ -1957,7 +1964,7 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
goto exit;
}
- if (rv && (request.req.bRequestType & USB_DIR_IN)) {
+ if (rv && is_in) {
/* Read control data from device */
res = copy_to_user(request.data, buffer, rv);
if (res)
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index d630cccd2e6e..dd44e37a454a 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -446,7 +446,7 @@ static int suspend_common(struct device *dev, bool do_wakeup)
HCD_WAKEUP_PENDING(hcd->shared_hcd))
return -EBUSY;
retval = hcd->driver->pci_suspend(hcd, do_wakeup);
- suspend_report_result(hcd->driver->pci_suspend, retval);
+ suspend_report_result(dev, hcd->driver->pci_suspend, retval);
/* Check again in case wakeup raced with pci_suspend */
if ((retval == 0 && do_wakeup && HCD_WAKEUP_PENDING(hcd)) ||
@@ -556,7 +556,7 @@ static int hcd_pci_suspend_noirq(struct device *dev)
dev_dbg(dev, "--> PCI %s\n",
pci_power_name(pci_dev->current_state));
} else {
- suspend_report_result(pci_prepare_to_sleep, retval);
+ suspend_report_result(dev, pci_prepare_to_sleep, retval);
return retval;
}
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index dd58094f0b85..4fa2ddf322b4 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -448,7 +448,7 @@ config USB_CONFIGFS_F_HID
config USB_CONFIGFS_F_UVC
bool "USB Webcam function"
depends on USB_CONFIGFS
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
depends on VIDEO_DEV
select VIDEOBUF2_DMA_SG
select VIDEOBUF2_VMALLOC
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 46dd11dcb3a8..7371c6e65b10 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -179,6 +179,7 @@
#include <linux/kthread.h>
#include <linux/sched/signal.h>
#include <linux/limits.h>
+#include <linux/pagemap.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 00b3f6b3bb31..713efd9aefde 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -640,6 +640,7 @@ static int rndis_set_response(struct rndis_params *params,
BufLength = le32_to_cpu(buf->InformationBufferLength);
BufOffset = le32_to_cpu(buf->InformationBufferOffset);
if ((BufLength > RNDIS_MAX_TOTAL_SIZE) ||
+ (BufOffset > RNDIS_MAX_TOTAL_SIZE) ||
(BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE))
return -EINVAL;
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index de6668e58481..0a7b382fbe27 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -500,7 +500,7 @@ endif
# or video class gadget drivers), or specific hardware, here.
config USB_G_WEBCAM
tristate "USB Webcam Gadget"
- depends on VIDEO_V4L2
+ depends on VIDEO_DEV
select USB_LIBCOMPOSITE
select VIDEOBUF2_DMA_SG
select VIDEOBUF2_VMALLOC
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 568534a0d17c..c109b069f511 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1436,7 +1436,6 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
usb_gadget_udc_stop(udc);
udc->driver = NULL;
- udc->dev.driver = NULL;
udc->gadget->dev.driver = NULL;
}
@@ -1498,7 +1497,6 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
driver->function);
udc->driver = driver;
- udc->dev.driver = &driver->driver;
udc->gadget->dev.driver = &driver->driver;
usb_gadget_udc_set_speed(udc, driver->max_speed);
@@ -1521,7 +1519,6 @@ err1:
dev_err(&udc->dev, "failed to start %s: %d\n",
udc->driver->function, ret);
udc->driver = NULL;
- udc->dev.driver = NULL;
udc->gadget->dev.driver = NULL;
return ret;
}
diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c
index d2a2b20cc1ad..7d9bd16190c0 100644
--- a/drivers/usb/gadget/udc/max3420_udc.c
+++ b/drivers/usb/gadget/udc/max3420_udc.c
@@ -1292,7 +1292,7 @@ del_gadget:
return err;
}
-static int max3420_remove(struct spi_device *spi)
+static void max3420_remove(struct spi_device *spi)
{
struct max3420_udc *udc = spi_get_drvdata(spi);
unsigned long flags;
@@ -1304,8 +1304,6 @@ static int max3420_remove(struct spi_device *spi)
kthread_stop(udc->thread_task);
spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
}
static const struct of_device_id max3420_udc_of_match[] = {
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index 30de85a707fe..99a5523a79fb 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1926,7 +1926,7 @@ error:
return retval;
}
-static int
+static void
max3421_remove(struct spi_device *spi)
{
struct max3421_hcd *max3421_hcd;
@@ -1947,7 +1947,6 @@ max3421_remove(struct spi_device *spi)
free_irq(spi->irq, hcd);
usb_put_hcd(hcd);
- return 0;
}
static const struct of_device_id max3421_of_match_table[] = {
diff --git a/drivers/usb/host/xen-hcd.c b/drivers/usb/host/xen-hcd.c
index be09fd9bac58..19b8c7ed74cb 100644
--- a/drivers/usb/host/xen-hcd.c
+++ b/drivers/usb/host/xen-hcd.c
@@ -716,8 +716,9 @@ static int xenhcd_map_urb_for_request(struct xenhcd_info *info, struct urb *urb,
return 0;
}
-static void xenhcd_gnttab_done(struct usb_shadow *shadow)
+static void xenhcd_gnttab_done(struct xenhcd_info *info, unsigned int id)
{
+ struct usb_shadow *shadow = info->shadow + id;
int nr_segs = 0;
int i;
@@ -726,8 +727,10 @@ static void xenhcd_gnttab_done(struct usb_shadow *shadow)
if (xenusb_pipeisoc(shadow->req.pipe))
nr_segs += shadow->req.u.isoc.nr_frame_desc_segs;
- for (i = 0; i < nr_segs; i++)
- gnttab_end_foreign_access(shadow->req.seg[i].gref, 0, 0UL);
+ for (i = 0; i < nr_segs; i++) {
+ if (!gnttab_try_end_foreign_access(shadow->req.seg[i].gref))
+ xenhcd_set_error(info, "backend didn't release grant");
+ }
shadow->req.nr_buffer_segs = 0;
shadow->req.u.isoc.nr_frame_desc_segs = 0;
@@ -841,7 +844,9 @@ static void xenhcd_cancel_all_enqueued_urbs(struct xenhcd_info *info)
list_for_each_entry_safe(urbp, tmp, &info->in_progress_list, list) {
req_id = urbp->req_id;
if (!urbp->unlinked) {
- xenhcd_gnttab_done(&info->shadow[req_id]);
+ xenhcd_gnttab_done(info, req_id);
+ if (info->error)
+ return;
if (urbp->urb->status == -EINPROGRESS)
/* not dequeued */
xenhcd_giveback_urb(info, urbp->urb,
@@ -942,8 +947,7 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info)
rp = info->urb_ring.sring->rsp_prod;
if (RING_RESPONSE_PROD_OVERFLOW(&info->urb_ring, rp)) {
xenhcd_set_error(info, "Illegal index on urb-ring");
- spin_unlock_irqrestore(&info->lock, flags);
- return 0;
+ goto err;
}
rmb(); /* ensure we see queued responses up to "rp" */
@@ -952,11 +956,13 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info)
id = res.id;
if (id >= XENUSB_URB_RING_SIZE) {
xenhcd_set_error(info, "Illegal data on urb-ring");
- continue;
+ goto err;
}
if (likely(xenusb_pipesubmit(info->shadow[id].req.pipe))) {
- xenhcd_gnttab_done(&info->shadow[id]);
+ xenhcd_gnttab_done(info, id);
+ if (info->error)
+ goto err;
urb = info->shadow[id].urb;
if (likely(urb)) {
urb->actual_length = res.actual_length;
@@ -978,6 +984,10 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info)
spin_unlock_irqrestore(&info->lock, flags);
return more_to_do;
+
+ err:
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
}
static int xenhcd_conn_notify(struct xenhcd_info *info)
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 7d4d0713f4f0..d2b7e613eb34 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -327,7 +327,6 @@ static int omap2430_probe(struct platform_device *pdev)
musb->dev.parent = &pdev->dev;
musb->dev.dma_mask = &omap2430_dmamask;
musb->dev.coherent_dma_mask = omap2430_dmamask;
- device_set_of_node_from_dev(&musb->dev, &pdev->dev);
glue->dev = &pdev->dev;
glue->musb = musb;
diff --git a/drivers/usb/typec/port-mapper.c b/drivers/usb/typec/port-mapper.c
index a7d507802509..a929e000d0e2 100644
--- a/drivers/usb/typec/port-mapper.c
+++ b/drivers/usb/typec/port-mapper.c
@@ -59,7 +59,7 @@ int typec_link_ports(struct typec_port *con)
if (!has_acpi_companion(&con->dev))
return 0;
- bus_for_each_dev(&acpi_bus_type, NULL, &arg, typec_port_match);
+ acpi_bus_for_each_dev(typec_port_match, &arg);
if (!arg.match)
return 0;
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index f648f1c54a0f..d0f91078600e 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1563,11 +1563,27 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
switch (cmd) {
case VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET:
+ /* This mq feature check aligns with pre-existing userspace
+ * implementation.
+ *
+ * Without it, an untrusted driver could fake a multiqueue config
+ * request down to a non-mq device that may cause kernel to
+ * panic due to uninitialized resources for extra vqs. Even with
+ * a well behaving guest driver, it is not expected to allow
+ * changing the number of vqs on a non-mq device.
+ */
+ if (!MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ))
+ break;
+
read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)&mq, sizeof(mq));
if (read != sizeof(mq))
break;
newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
+ if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
+ newqps > mlx5_vdpa_max_qps(mvdev->max_vqs))
+ break;
+
if (ndev->cur_num_vqs == 2 * newqps) {
status = VIRTIO_NET_OK;
break;
@@ -1897,11 +1913,25 @@ static u64 mlx5_vdpa_get_device_features(struct vdpa_device *vdev)
return ndev->mvdev.mlx_features;
}
-static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features)
+static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features)
{
+ /* Minimum features to expect */
if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)))
return -EOPNOTSUPP;
+ /* Double check features combination sent down by the driver.
+ * Fail invalid features due to absence of the depended feature.
+ *
+ * Per VIRTIO v1.1 specification, section 5.1.3.1 Feature bit
+ * requirements: "VIRTIO_NET_F_MQ Requires VIRTIO_NET_F_CTRL_VQ".
+ * By failing the invalid features sent down by untrusted drivers,
+ * we're assured the assumption made upon is_index_valid() and
+ * is_ctrl_vq_idx() will not be compromised.
+ */
+ if ((features & (BIT_ULL(VIRTIO_NET_F_MQ) | BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) ==
+ BIT_ULL(VIRTIO_NET_F_MQ))
+ return -EINVAL;
+
return 0;
}
@@ -1977,7 +2007,7 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
print_features(mvdev, features, true);
- err = verify_min_features(mvdev, features);
+ err = verify_driver_features(mvdev, features);
if (err)
return err;
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 9846c9de4bfa..1ea525433a5c 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -393,7 +393,7 @@ static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
* If it does happen we assume a legacy guest.
*/
if (!vdev->features_valid)
- vdpa_set_features(vdev, 0, true);
+ vdpa_set_features_unlocked(vdev, 0);
ops->get_config(vdev, offset, buf, len);
}
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 2b1143f11d8f..0a4d93edc4c0 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -294,7 +294,7 @@ vduse_domain_alloc_iova(struct iova_domain *iovad,
iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true);
- return iova_pfn << shift;
+ return (dma_addr_t)iova_pfn << shift;
}
static void vduse_domain_free_iova(struct iova_domain *iovad,
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index a57e381e830b..cce101e6a940 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -533,8 +533,8 @@ static void vp_vdpa_remove(struct pci_dev *pdev)
{
struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev);
- vdpa_unregister_device(&vp_vdpa->vdpa);
vp_modern_remove(&vp_vdpa->mdev);
+ vdpa_unregister_device(&vp_vdpa->vdpa);
}
static struct pci_driver vp_vdpa_driver = {
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 860424ccda1b..4da1914425e1 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -43,4 +43,9 @@ config VFIO_PCI_IGD
To enable Intel IGD assignment through vfio-pci, say Y.
endif
+
+source "drivers/vfio/pci/mlx5/Kconfig"
+
+source "drivers/vfio/pci/hisilicon/Kconfig"
+
endif
diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
index 349d68d242b4..7052ebd893e0 100644
--- a/drivers/vfio/pci/Makefile
+++ b/drivers/vfio/pci/Makefile
@@ -7,3 +7,7 @@ obj-$(CONFIG_VFIO_PCI_CORE) += vfio-pci-core.o
vfio-pci-y := vfio_pci.o
vfio-pci-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o
obj-$(CONFIG_VFIO_PCI) += vfio-pci.o
+
+obj-$(CONFIG_MLX5_VFIO_PCI) += mlx5/
+
+obj-$(CONFIG_HISI_ACC_VFIO_PCI) += hisilicon/
diff --git a/drivers/vfio/pci/hisilicon/Kconfig b/drivers/vfio/pci/hisilicon/Kconfig
new file mode 100644
index 000000000000..5daa0f45d2f9
--- /dev/null
+++ b/drivers/vfio/pci/hisilicon/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config HISI_ACC_VFIO_PCI
+ tristate "VFIO PCI support for HiSilicon ACC devices"
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on VFIO_PCI_CORE
+ depends on PCI_MSI
+ depends on CRYPTO_DEV_HISI_QM
+ depends on CRYPTO_DEV_HISI_HPRE
+ depends on CRYPTO_DEV_HISI_SEC2
+ depends on CRYPTO_DEV_HISI_ZIP
+ help
+ This provides generic PCI support for HiSilicon ACC devices
+ using the VFIO framework.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/vfio/pci/hisilicon/Makefile b/drivers/vfio/pci/hisilicon/Makefile
new file mode 100644
index 000000000000..c66b3783f2f9
--- /dev/null
+++ b/drivers/vfio/pci/hisilicon/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_HISI_ACC_VFIO_PCI) += hisi-acc-vfio-pci.o
+hisi-acc-vfio-pci-y := hisi_acc_vfio_pci.o
+
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
new file mode 100644
index 000000000000..767b5d47631a
--- /dev/null
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -0,0 +1,1326 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, HiSilicon Ltd.
+ */
+
+#include <linux/device.h>
+#include <linux/eventfd.h>
+#include <linux/file.h>
+#include <linux/hisi_acc_qm.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/vfio.h>
+#include <linux/vfio_pci_core.h>
+#include <linux/anon_inodes.h>
+
+#include "hisi_acc_vfio_pci.h"
+
+/* return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */
+static int qm_wait_dev_not_ready(struct hisi_qm *qm)
+{
+ u32 val;
+
+ return readl_relaxed_poll_timeout(qm->io_base + QM_VF_STATE,
+ val, !(val & 0x1), MB_POLL_PERIOD_US,
+ MB_POLL_TIMEOUT_US);
+}
+
+/*
+ * Each state Reg is checked 100 times,
+ * with a delay of 100 microseconds after each check
+ */
+static u32 qm_check_reg_state(struct hisi_qm *qm, u32 regs)
+{
+ int check_times = 0;
+ u32 state;
+
+ state = readl(qm->io_base + regs);
+ while (state && check_times < ERROR_CHECK_TIMEOUT) {
+ udelay(CHECK_DELAY_TIME);
+ state = readl(qm->io_base + regs);
+ check_times++;
+ }
+
+ return state;
+}
+
+static int qm_read_regs(struct hisi_qm *qm, u32 reg_addr,
+ u32 *data, u8 nums)
+{
+ int i;
+
+ if (nums < 1 || nums > QM_REGS_MAX_LEN)
+ return -EINVAL;
+
+ for (i = 0; i < nums; i++) {
+ data[i] = readl(qm->io_base + reg_addr);
+ reg_addr += QM_REG_ADDR_OFFSET;
+ }
+
+ return 0;
+}
+
+static int qm_write_regs(struct hisi_qm *qm, u32 reg,
+ u32 *data, u8 nums)
+{
+ int i;
+
+ if (nums < 1 || nums > QM_REGS_MAX_LEN)
+ return -EINVAL;
+
+ for (i = 0; i < nums; i++)
+ writel(data[i], qm->io_base + reg + i * QM_REG_ADDR_OFFSET);
+
+ return 0;
+}
+
+static int qm_get_vft(struct hisi_qm *qm, u32 *base)
+{
+ u64 sqc_vft;
+ u32 qp_num;
+ int ret;
+
+ ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
+ if (ret)
+ return ret;
+
+ sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
+ ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
+ QM_XQC_ADDR_OFFSET);
+ *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
+ qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
+ (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
+
+ return qp_num;
+}
+
+static int qm_get_sqc(struct hisi_qm *qm, u64 *addr)
+{
+ int ret;
+
+ ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, 0, 0, 1);
+ if (ret)
+ return ret;
+
+ *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
+ ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
+ QM_XQC_ADDR_OFFSET);
+
+ return 0;
+}
+
+static int qm_get_cqc(struct hisi_qm *qm, u64 *addr)
+{
+ int ret;
+
+ ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, 0, 0, 1);
+ if (ret)
+ return ret;
+
+ *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
+ ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
+ QM_XQC_ADDR_OFFSET);
+
+ return 0;
+}
+
+static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
+{
+ struct device *dev = &qm->pdev->dev;
+ int ret;
+
+ ret = qm_read_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
+ if (ret) {
+ dev_err(dev, "failed to read QM_VF_AEQ_INT_MASK\n");
+ return ret;
+ }
+
+ ret = qm_read_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
+ if (ret) {
+ dev_err(dev, "failed to read QM_VF_EQ_INT_MASK\n");
+ return ret;
+ }
+
+ ret = qm_read_regs(qm, QM_IFC_INT_SOURCE_V,
+ &vf_data->ifc_int_source, 1);
+ if (ret) {
+ dev_err(dev, "failed to read QM_IFC_INT_SOURCE_V\n");
+ return ret;
+ }
+
+ ret = qm_read_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
+ if (ret) {
+ dev_err(dev, "failed to read QM_IFC_INT_MASK\n");
+ return ret;
+ }
+
+ ret = qm_read_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
+ if (ret) {
+ dev_err(dev, "failed to read QM_IFC_INT_SET_V\n");
+ return ret;
+ }
+
+ ret = qm_read_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
+ if (ret) {
+ dev_err(dev, "failed to read QM_PAGE_SIZE\n");
+ return ret;
+ }
+
+ /* QM_EQC_DW has 7 regs */
+ ret = qm_read_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
+ if (ret) {
+ dev_err(dev, "failed to read QM_EQC_DW\n");
+ return ret;
+ }
+
+ /* QM_AEQC_DW has 7 regs */
+ ret = qm_read_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
+ if (ret) {
+ dev_err(dev, "failed to read QM_AEQC_DW\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
+{
+ struct device *dev = &qm->pdev->dev;
+ int ret;
+
+ /* check VF state */
+ if (unlikely(hisi_qm_wait_mb_ready(qm))) {
+ dev_err(&qm->pdev->dev, "QM device is not ready to write\n");
+ return -EBUSY;
+ }
+
+ ret = qm_write_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_VF_AEQ_INT_MASK\n");
+ return ret;
+ }
+
+ ret = qm_write_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_VF_EQ_INT_MASK\n");
+ return ret;
+ }
+
+ ret = qm_write_regs(qm, QM_IFC_INT_SOURCE_V,
+ &vf_data->ifc_int_source, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_IFC_INT_SOURCE_V\n");
+ return ret;
+ }
+
+ ret = qm_write_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_IFC_INT_MASK\n");
+ return ret;
+ }
+
+ ret = qm_write_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_IFC_INT_SET_V\n");
+ return ret;
+ }
+
+ ret = qm_write_regs(qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_QUE_ISO_CFG_V\n");
+ return ret;
+ }
+
+ ret = qm_write_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_PAGE_SIZE\n");
+ return ret;
+ }
+
+ /* QM_EQC_DW has 7 regs */
+ ret = qm_write_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
+ if (ret) {
+ dev_err(dev, "failed to write QM_EQC_DW\n");
+ return ret;
+ }
+
+ /* QM_AEQC_DW has 7 regs */
+ ret = qm_write_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
+ if (ret) {
+ dev_err(dev, "failed to write QM_AEQC_DW\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd,
+ u16 index, u8 priority)
+{
+ u64 doorbell;
+ u64 dbase;
+ u16 randata = 0;
+
+ if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
+ dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
+ else
+ dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
+
+ doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
+ ((u64)randata << QM_DB_RAND_SHIFT_V2) |
+ ((u64)index << QM_DB_INDEX_SHIFT_V2) |
+ ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
+
+ writeq(doorbell, qm->io_base + dbase);
+}
+
+static int pf_qm_get_qp_num(struct hisi_qm *qm, int vf_id, u32 *rbase)
+{
+ unsigned int val;
+ u64 sqc_vft;
+ u32 qp_num;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
+ val & BIT(0), MB_POLL_PERIOD_US,
+ MB_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
+ /* 0 mean SQC VFT */
+ writel(0x0, qm->io_base + QM_VFT_CFG_TYPE);
+ writel(vf_id, qm->io_base + QM_VFT_CFG);
+
+ writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
+ writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
+ val & BIT(0), MB_POLL_PERIOD_US,
+ MB_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ sqc_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
+ ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) <<
+ QM_XQC_ADDR_OFFSET);
+ *rbase = QM_SQC_VFT_BASE_MASK_V2 &
+ (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
+ qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
+ (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
+
+ return qp_num;
+}
+
+static void qm_dev_cmd_init(struct hisi_qm *qm)
+{
+ /* Clear VF communication status registers. */
+ writel(0x1, qm->io_base + QM_IFC_INT_SOURCE_V);
+
+ /* Enable pf and vf communication. */
+ writel(0x0, qm->io_base + QM_IFC_INT_MASK);
+}
+
+static int vf_qm_cache_wb(struct hisi_qm *qm)
+{
+ unsigned int val;
+
+ writel(0x1, qm->io_base + QM_CACHE_WB_START);
+ if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
+ val, val & BIT(0), MB_POLL_PERIOD_US,
+ MB_POLL_TIMEOUT_US)) {
+ dev_err(&qm->pdev->dev, "vf QM writeback sqc cache fail\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vf_qm_fun_reset(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ struct hisi_qm *qm)
+{
+ int i;
+
+ for (i = 0; i < qm->qp_num; i++)
+ qm_db(qm, i, QM_DOORBELL_CMD_SQ, 0, 1);
+}
+
+static int vf_qm_func_stop(struct hisi_qm *qm)
+{
+ return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0);
+}
+
+static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ struct hisi_acc_vf_migration_file *migf)
+{
+ struct acc_vf_data *vf_data = &migf->vf_data;
+ struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
+ struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
+ struct device *dev = &vf_qm->pdev->dev;
+ u32 que_iso_state;
+ int ret;
+
+ if (migf->total_length < QM_MATCH_SIZE)
+ return -EINVAL;
+
+ if (vf_data->acc_magic != ACC_DEV_MAGIC) {
+ dev_err(dev, "failed to match ACC_DEV_MAGIC\n");
+ return -EINVAL;
+ }
+
+ if (vf_data->dev_id != hisi_acc_vdev->vf_dev->device) {
+ dev_err(dev, "failed to match VF devices\n");
+ return -EINVAL;
+ }
+
+ /* vf qp num check */
+ ret = qm_get_vft(vf_qm, &vf_qm->qp_base);
+ if (ret <= 0) {
+ dev_err(dev, "failed to get vft qp nums\n");
+ return -EINVAL;
+ }
+
+ if (ret != vf_data->qp_num) {
+ dev_err(dev, "failed to match VF qp num\n");
+ return -EINVAL;
+ }
+
+ vf_qm->qp_num = ret;
+
+ /* vf isolation state check */
+ ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &que_iso_state, 1);
+ if (ret) {
+ dev_err(dev, "failed to read QM_QUE_ISO_CFG_V\n");
+ return ret;
+ }
+
+ if (vf_data->que_iso_cfg != que_iso_state) {
+ dev_err(dev, "failed to match isolation state\n");
+ return ret;
+ }
+
+ ret = qm_write_regs(vf_qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_VF_STATE\n");
+ return ret;
+ }
+
+ hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
+ return 0;
+}
+
+static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ struct acc_vf_data *vf_data)
+{
+ struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
+ struct device *dev = &pf_qm->pdev->dev;
+ int vf_id = hisi_acc_vdev->vf_id;
+ int ret;
+
+ vf_data->acc_magic = ACC_DEV_MAGIC;
+ /* save device id */
+ vf_data->dev_id = hisi_acc_vdev->vf_dev->device;
+
+ /* vf qp num save from PF */
+ ret = pf_qm_get_qp_num(pf_qm, vf_id, &vf_data->qp_base);
+ if (ret <= 0) {
+ dev_err(dev, "failed to get vft qp nums!\n");
+ return -EINVAL;
+ }
+
+ vf_data->qp_num = ret;
+
+ /* VF isolation state save from PF */
+ ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
+ if (ret) {
+ dev_err(dev, "failed to read QM_QUE_ISO_CFG_V!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ struct hisi_acc_vf_migration_file *migf)
+{
+ struct hisi_qm *qm = &hisi_acc_vdev->vf_qm;
+ struct device *dev = &qm->pdev->dev;
+ struct acc_vf_data *vf_data = &migf->vf_data;
+ int ret;
+
+ /* Return if only match data was transferred */
+ if (migf->total_length == QM_MATCH_SIZE)
+ return 0;
+
+ if (migf->total_length < sizeof(struct acc_vf_data))
+ return -EINVAL;
+
+ qm->eqe_dma = vf_data->eqe_dma;
+ qm->aeqe_dma = vf_data->aeqe_dma;
+ qm->sqc_dma = vf_data->sqc_dma;
+ qm->cqc_dma = vf_data->cqc_dma;
+
+ qm->qp_base = vf_data->qp_base;
+ qm->qp_num = vf_data->qp_num;
+
+ ret = qm_set_regs(qm, vf_data);
+ if (ret) {
+ dev_err(dev, "Set VF regs failed\n");
+ return ret;
+ }
+
+ ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
+ if (ret) {
+ dev_err(dev, "Set sqc failed\n");
+ return ret;
+ }
+
+ ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
+ if (ret) {
+ dev_err(dev, "Set cqc failed\n");
+ return ret;
+ }
+
+ qm_dev_cmd_init(qm);
+ return 0;
+}
+
+static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ struct hisi_acc_vf_migration_file *migf)
+{
+ struct acc_vf_data *vf_data = &migf->vf_data;
+ struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
+ struct device *dev = &vf_qm->pdev->dev;
+ int ret;
+
+ ret = vf_qm_get_match_data(hisi_acc_vdev, vf_data);
+ if (ret)
+ return ret;
+
+ if (unlikely(qm_wait_dev_not_ready(vf_qm))) {
+ /* Update state and return with match data */
+ vf_data->vf_qm_state = QM_NOT_READY;
+ hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
+ migf->total_length = QM_MATCH_SIZE;
+ return 0;
+ }
+
+ vf_data->vf_qm_state = QM_READY;
+ hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
+
+ ret = vf_qm_cache_wb(vf_qm);
+ if (ret) {
+ dev_err(dev, "failed to writeback QM Cache!\n");
+ return ret;
+ }
+
+ ret = qm_get_regs(vf_qm, vf_data);
+ if (ret)
+ return -EINVAL;
+
+ /* Every reg is 32 bit, the dma address is 64 bit. */
+ vf_data->eqe_dma = vf_data->qm_eqc_dw[2];
+ vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
+ vf_data->eqe_dma |= vf_data->qm_eqc_dw[1];
+ vf_data->aeqe_dma = vf_data->qm_aeqc_dw[2];
+ vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
+ vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[1];
+
+ /* Through SQC_BT/CQC_BT to get sqc and cqc address */
+ ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma);
+ if (ret) {
+ dev_err(dev, "failed to read SQC addr!\n");
+ return -EINVAL;
+ }
+
+ ret = qm_get_cqc(vf_qm, &vf_data->cqc_dma);
+ if (ret) {
+ dev_err(dev, "failed to read CQC addr!\n");
+ return -EINVAL;
+ }
+
+ migf->total_length = sizeof(struct acc_vf_data);
+ return 0;
+}
+
+/* Check the PF's RAS state and Function INT state */
+static int
+hisi_acc_check_int_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+ struct hisi_qm *vfqm = &hisi_acc_vdev->vf_qm;
+ struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
+ struct pci_dev *vf_pdev = hisi_acc_vdev->vf_dev;
+ struct device *dev = &qm->pdev->dev;
+ u32 state;
+
+ /* Check RAS state */
+ state = qm_check_reg_state(qm, QM_ABNORMAL_INT_STATUS);
+ if (state) {
+ dev_err(dev, "failed to check QM RAS state!\n");
+ return -EBUSY;
+ }
+
+ /* Check Function Communication state between PF and VF */
+ state = qm_check_reg_state(vfqm, QM_IFC_INT_STATUS);
+ if (state) {
+ dev_err(dev, "failed to check QM IFC INT state!\n");
+ return -EBUSY;
+ }
+ state = qm_check_reg_state(vfqm, QM_IFC_INT_SET_V);
+ if (state) {
+ dev_err(dev, "failed to check QM IFC INT SET state!\n");
+ return -EBUSY;
+ }
+
+ /* Check submodule task state */
+ switch (vf_pdev->device) {
+ case PCI_DEVICE_ID_HUAWEI_SEC_VF:
+ state = qm_check_reg_state(qm, SEC_CORE_INT_STATUS);
+ if (state) {
+ dev_err(dev, "failed to check QM SEC Core INT state!\n");
+ return -EBUSY;
+ }
+ return 0;
+ case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
+ state = qm_check_reg_state(qm, HPRE_HAC_INT_STATUS);
+ if (state) {
+ dev_err(dev, "failed to check QM HPRE HAC INT state!\n");
+ return -EBUSY;
+ }
+ return 0;
+ case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
+ state = qm_check_reg_state(qm, HZIP_CORE_INT_STATUS);
+ if (state) {
+ dev_err(dev, "failed to check QM ZIP Core INT state!\n");
+ return -EBUSY;
+ }
+ return 0;
+ default:
+ dev_err(dev, "failed to detect acc module type!\n");
+ return -EINVAL;
+ }
+}
+
+static void hisi_acc_vf_disable_fd(struct hisi_acc_vf_migration_file *migf)
+{
+ mutex_lock(&migf->lock);
+ migf->disabled = true;
+ migf->total_length = 0;
+ migf->filp->f_pos = 0;
+ mutex_unlock(&migf->lock);
+}
+
+static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+ if (hisi_acc_vdev->resuming_migf) {
+ hisi_acc_vf_disable_fd(hisi_acc_vdev->resuming_migf);
+ fput(hisi_acc_vdev->resuming_migf->filp);
+ hisi_acc_vdev->resuming_migf = NULL;
+ }
+
+ if (hisi_acc_vdev->saving_migf) {
+ hisi_acc_vf_disable_fd(hisi_acc_vdev->saving_migf);
+ fput(hisi_acc_vdev->saving_migf->filp);
+ hisi_acc_vdev->saving_migf = NULL;
+ }
+}
+
+/*
+ * This function is called in all state_mutex unlock cases to
+ * handle a 'deferred_reset' if exists.
+ */
+static void
+hisi_acc_vf_state_mutex_unlock(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+again:
+ spin_lock(&hisi_acc_vdev->reset_lock);
+ if (hisi_acc_vdev->deferred_reset) {
+ hisi_acc_vdev->deferred_reset = false;
+ spin_unlock(&hisi_acc_vdev->reset_lock);
+ hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
+ hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
+ hisi_acc_vf_disable_fds(hisi_acc_vdev);
+ goto again;
+ }
+ mutex_unlock(&hisi_acc_vdev->state_mutex);
+ spin_unlock(&hisi_acc_vdev->reset_lock);
+}
+
+static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+ struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
+
+ if (hisi_acc_vdev->vf_qm_state != QM_READY)
+ return;
+
+ vf_qm_fun_reset(hisi_acc_vdev, vf_qm);
+}
+
+static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+ struct device *dev = &hisi_acc_vdev->vf_dev->dev;
+ struct hisi_acc_vf_migration_file *migf = hisi_acc_vdev->resuming_migf;
+ int ret;
+
+ /* Check dev compatibility */
+ ret = vf_qm_check_match(hisi_acc_vdev, migf);
+ if (ret) {
+ dev_err(dev, "failed to match the VF!\n");
+ return ret;
+ }
+ /* Recover data to VF */
+ ret = vf_qm_load_data(hisi_acc_vdev, migf);
+ if (ret) {
+ dev_err(dev, "failed to recover the VF!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hisi_acc_vf_release_file(struct inode *inode, struct file *filp)
+{
+ struct hisi_acc_vf_migration_file *migf = filp->private_data;
+
+ hisi_acc_vf_disable_fd(migf);
+ mutex_destroy(&migf->lock);
+ kfree(migf);
+ return 0;
+}
+
+static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct hisi_acc_vf_migration_file *migf = filp->private_data;
+ loff_t requested_length;
+ ssize_t done = 0;
+ int ret;
+
+ if (pos)
+ return -ESPIPE;
+ pos = &filp->f_pos;
+
+ if (*pos < 0 ||
+ check_add_overflow((loff_t)len, *pos, &requested_length))
+ return -EINVAL;
+
+ if (requested_length > sizeof(struct acc_vf_data))
+ return -ENOMEM;
+
+ mutex_lock(&migf->lock);
+ if (migf->disabled) {
+ done = -ENODEV;
+ goto out_unlock;
+ }
+
+ ret = copy_from_user(&migf->vf_data, buf, len);
+ if (ret) {
+ done = -EFAULT;
+ goto out_unlock;
+ }
+ *pos += len;
+ done = len;
+ migf->total_length += len;
+out_unlock:
+ mutex_unlock(&migf->lock);
+ return done;
+}
+
+static const struct file_operations hisi_acc_vf_resume_fops = {
+ .owner = THIS_MODULE,
+ .write = hisi_acc_vf_resume_write,
+ .release = hisi_acc_vf_release_file,
+ .llseek = no_llseek,
+};
+
+static struct hisi_acc_vf_migration_file *
+hisi_acc_vf_pci_resume(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+ struct hisi_acc_vf_migration_file *migf;
+
+ migf = kzalloc(sizeof(*migf), GFP_KERNEL);
+ if (!migf)
+ return ERR_PTR(-ENOMEM);
+
+ migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_resume_fops, migf,
+ O_WRONLY);
+ if (IS_ERR(migf->filp)) {
+ int err = PTR_ERR(migf->filp);
+
+ kfree(migf);
+ return ERR_PTR(err);
+ }
+
+ stream_open(migf->filp->f_inode, migf->filp);
+ mutex_init(&migf->lock);
+ return migf;
+}
+
+static ssize_t hisi_acc_vf_save_read(struct file *filp, char __user *buf, size_t len,
+ loff_t *pos)
+{
+ struct hisi_acc_vf_migration_file *migf = filp->private_data;
+ ssize_t done = 0;
+ int ret;
+
+ if (pos)
+ return -ESPIPE;
+ pos = &filp->f_pos;
+
+ mutex_lock(&migf->lock);
+ if (*pos > migf->total_length) {
+ done = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (migf->disabled) {
+ done = -ENODEV;
+ goto out_unlock;
+ }
+
+ len = min_t(size_t, migf->total_length - *pos, len);
+ if (len) {
+ ret = copy_to_user(buf, &migf->vf_data, len);
+ if (ret) {
+ done = -EFAULT;
+ goto out_unlock;
+ }
+ *pos += len;
+ done = len;
+ }
+out_unlock:
+ mutex_unlock(&migf->lock);
+ return done;
+}
+
+static const struct file_operations hisi_acc_vf_save_fops = {
+ .owner = THIS_MODULE,
+ .read = hisi_acc_vf_save_read,
+ .release = hisi_acc_vf_release_file,
+ .llseek = no_llseek,
+};
+
+static struct hisi_acc_vf_migration_file *
+hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+ struct hisi_acc_vf_migration_file *migf;
+ int ret;
+
+ migf = kzalloc(sizeof(*migf), GFP_KERNEL);
+ if (!migf)
+ return ERR_PTR(-ENOMEM);
+
+ migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_save_fops, migf,
+ O_RDONLY);
+ if (IS_ERR(migf->filp)) {
+ int err = PTR_ERR(migf->filp);
+
+ kfree(migf);
+ return ERR_PTR(err);
+ }
+
+ stream_open(migf->filp->f_inode, migf->filp);
+ mutex_init(&migf->lock);
+
+ ret = vf_qm_state_save(hisi_acc_vdev, migf);
+ if (ret) {
+ fput(migf->filp);
+ return ERR_PTR(ret);
+ }
+
+ return migf;
+}
+
+static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+ struct device *dev = &hisi_acc_vdev->vf_dev->dev;
+ struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
+ int ret;
+
+ ret = vf_qm_func_stop(vf_qm);
+ if (ret) {
+ dev_err(dev, "failed to stop QM VF function!\n");
+ return ret;
+ }
+
+ ret = hisi_acc_check_int_state(hisi_acc_vdev);
+ if (ret) {
+ dev_err(dev, "failed to check QM INT state!\n");
+ return ret;
+ }
+ return 0;
+}
+
+static struct file *
+hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ u32 new)
+{
+ u32 cur = hisi_acc_vdev->mig_state;
+ int ret;
+
+ if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_STOP) {
+ ret = hisi_acc_vf_stop_device(hisi_acc_vdev);
+ if (ret)
+ return ERR_PTR(ret);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
+ struct hisi_acc_vf_migration_file *migf;
+
+ migf = hisi_acc_vf_stop_copy(hisi_acc_vdev);
+ if (IS_ERR(migf))
+ return ERR_CAST(migf);
+ get_file(migf->filp);
+ hisi_acc_vdev->saving_migf = migf;
+ return migf->filp;
+ }
+
+ if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP)) {
+ hisi_acc_vf_disable_fds(hisi_acc_vdev);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
+ struct hisi_acc_vf_migration_file *migf;
+
+ migf = hisi_acc_vf_pci_resume(hisi_acc_vdev);
+ if (IS_ERR(migf))
+ return ERR_CAST(migf);
+ get_file(migf->filp);
+ hisi_acc_vdev->resuming_migf = migf;
+ return migf->filp;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
+ ret = hisi_acc_vf_load_state(hisi_acc_vdev);
+ if (ret)
+ return ERR_PTR(ret);
+ hisi_acc_vf_disable_fds(hisi_acc_vdev);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING) {
+ hisi_acc_vf_start_device(hisi_acc_vdev);
+ return NULL;
+ }
+
+ /*
+ * vfio_mig_get_next_state() does not use arcs other than the above
+ */
+ WARN_ON(true);
+ return ERR_PTR(-EINVAL);
+}
+
+static struct file *
+hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev,
+ enum vfio_device_mig_state new_state)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev,
+ struct hisi_acc_vf_core_device, core_device.vdev);
+ enum vfio_device_mig_state next_state;
+ struct file *res = NULL;
+ int ret;
+
+ mutex_lock(&hisi_acc_vdev->state_mutex);
+ while (new_state != hisi_acc_vdev->mig_state) {
+ ret = vfio_mig_get_next_state(vdev,
+ hisi_acc_vdev->mig_state,
+ new_state, &next_state);
+ if (ret) {
+ res = ERR_PTR(-EINVAL);
+ break;
+ }
+
+ res = hisi_acc_vf_set_device_state(hisi_acc_vdev, next_state);
+ if (IS_ERR(res))
+ break;
+ hisi_acc_vdev->mig_state = next_state;
+ if (WARN_ON(res && new_state != hisi_acc_vdev->mig_state)) {
+ fput(res);
+ res = ERR_PTR(-EINVAL);
+ break;
+ }
+ }
+ hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
+ return res;
+}
+
+static int
+hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
+ enum vfio_device_mig_state *curr_state)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev,
+ struct hisi_acc_vf_core_device, core_device.vdev);
+
+ mutex_lock(&hisi_acc_vdev->state_mutex);
+ *curr_state = hisi_acc_vdev->mig_state;
+ hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
+ return 0;
+}
+
+static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = dev_get_drvdata(&pdev->dev);
+
+ if (hisi_acc_vdev->core_device.vdev.migration_flags !=
+ VFIO_MIGRATION_STOP_COPY)
+ return;
+
+ /*
+ * As the higher VFIO layers are holding locks across reset and using
+ * those same locks with the mm_lock we need to prevent ABBA deadlock
+ * with the state_mutex and mm_lock.
+ * In case the state_mutex was taken already we defer the cleanup work
+ * to the unlock flow of the other running context.
+ */
+ spin_lock(&hisi_acc_vdev->reset_lock);
+ hisi_acc_vdev->deferred_reset = true;
+ if (!mutex_trylock(&hisi_acc_vdev->state_mutex)) {
+ spin_unlock(&hisi_acc_vdev->reset_lock);
+ return;
+ }
+ spin_unlock(&hisi_acc_vdev->reset_lock);
+ hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
+}
+
+static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+ struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
+ struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
+ struct pci_dev *vf_dev = vdev->pdev;
+
+ /*
+ * ACC VF dev BAR2 region consists of both functional register space
+ * and migration control register space. For migration to work, we
+ * need access to both. Hence, we map the entire BAR2 region here.
+ * But unnecessarily exposing the migration BAR region to the Guest
+ * has the potential to prevent/corrupt the Guest migration. Hence,
+ * we restrict access to the migration control space from
+ * Guest(Please see mmap/ioctl/read/write override functions).
+ *
+ * Please note that it is OK to expose the entire VF BAR if migration
+ * is not supported or required as this cannot affect the ACC PF
+ * configurations.
+ *
+ * Also the HiSilicon ACC VF devices supported by this driver on
+ * HiSilicon hardware platforms are integrated end point devices
+ * and the platform lacks the capability to perform any PCIe P2P
+ * between these devices.
+ */
+
+ vf_qm->io_base =
+ ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX),
+ pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX));
+ if (!vf_qm->io_base)
+ return -EIO;
+
+ vf_qm->fun_type = QM_HW_VF;
+ vf_qm->pdev = vf_dev;
+ mutex_init(&vf_qm->mailbox_lock);
+
+ return 0;
+}
+
+static struct hisi_qm *hisi_acc_get_pf_qm(struct pci_dev *pdev)
+{
+ struct hisi_qm *pf_qm;
+ struct pci_driver *pf_driver;
+
+ if (!pdev->is_virtfn)
+ return NULL;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_HUAWEI_SEC_VF:
+ pf_driver = hisi_sec_get_pf_driver();
+ break;
+ case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
+ pf_driver = hisi_hpre_get_pf_driver();
+ break;
+ case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
+ pf_driver = hisi_zip_get_pf_driver();
+ break;
+ default:
+ return NULL;
+ }
+
+ if (!pf_driver)
+ return NULL;
+
+ pf_qm = pci_iov_get_pf_drvdata(pdev, pf_driver);
+
+ return !IS_ERR(pf_qm) ? pf_qm : NULL;
+}
+
+static int hisi_acc_pci_rw_access_check(struct vfio_device *core_vdev,
+ size_t count, loff_t *ppos,
+ size_t *new_count)
+{
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+
+ if (index == VFIO_PCI_BAR2_REGION_INDEX) {
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
+
+ /* Check if access is for migration control region */
+ if (pos >= end)
+ return -EINVAL;
+
+ *new_count = min(count, (size_t)(end - pos));
+ }
+
+ return 0;
+}
+
+static int hisi_acc_vfio_pci_mmap(struct vfio_device *core_vdev,
+ struct vm_area_struct *vma)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ unsigned int index;
+
+ index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+ if (index == VFIO_PCI_BAR2_REGION_INDEX) {
+ u64 req_len, pgoff, req_start;
+ resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
+
+ req_len = vma->vm_end - vma->vm_start;
+ pgoff = vma->vm_pgoff &
+ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+ req_start = pgoff << PAGE_SHIFT;
+
+ if (req_start + req_len > end)
+ return -EINVAL;
+ }
+
+ return vfio_pci_core_mmap(core_vdev, vma);
+}
+
+static ssize_t hisi_acc_vfio_pci_write(struct vfio_device *core_vdev,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ size_t new_count = count;
+ int ret;
+
+ ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
+ if (ret)
+ return ret;
+
+ return vfio_pci_core_write(core_vdev, buf, new_count, ppos);
+}
+
+static ssize_t hisi_acc_vfio_pci_read(struct vfio_device *core_vdev,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ size_t new_count = count;
+ int ret;
+
+ ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
+ if (ret)
+ return ret;
+
+ return vfio_pci_core_read(core_vdev, buf, new_count, ppos);
+}
+
+static long hisi_acc_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
+ unsigned long arg)
+{
+ if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ struct pci_dev *pdev = vdev->pdev;
+ struct vfio_region_info info;
+ unsigned long minsz;
+
+ minsz = offsetofend(struct vfio_region_info, offset);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ if (info.index == VFIO_PCI_BAR2_REGION_INDEX) {
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+
+ /*
+ * ACC VF dev BAR2 region consists of both functional
+ * register space and migration control register space.
+ * Report only the functional region to Guest.
+ */
+ info.size = pci_resource_len(pdev, info.index) / 2;
+
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE |
+ VFIO_REGION_INFO_FLAG_MMAP;
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+ }
+ }
+ return vfio_pci_core_ioctl(core_vdev, cmd, arg);
+}
+
+static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
+ struct hisi_acc_vf_core_device, core_device.vdev);
+ struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
+ int ret;
+
+ ret = vfio_pci_core_enable(vdev);
+ if (ret)
+ return ret;
+
+ if (core_vdev->ops->migration_set_state) {
+ ret = hisi_acc_vf_qm_init(hisi_acc_vdev);
+ if (ret) {
+ vfio_pci_core_disable(vdev);
+ return ret;
+ }
+ hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
+ }
+
+ vfio_pci_core_finish_enable(vdev);
+ return 0;
+}
+
+static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
+ struct hisi_acc_vf_core_device, core_device.vdev);
+ struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
+
+ iounmap(vf_qm->io_base);
+ vfio_pci_core_close_device(core_vdev);
+}
+
+static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
+ .name = "hisi-acc-vfio-pci-migration",
+ .open_device = hisi_acc_vfio_pci_open_device,
+ .close_device = hisi_acc_vfio_pci_close_device,
+ .ioctl = hisi_acc_vfio_pci_ioctl,
+ .device_feature = vfio_pci_core_ioctl_feature,
+ .read = hisi_acc_vfio_pci_read,
+ .write = hisi_acc_vfio_pci_write,
+ .mmap = hisi_acc_vfio_pci_mmap,
+ .request = vfio_pci_core_request,
+ .match = vfio_pci_core_match,
+ .migration_set_state = hisi_acc_vfio_pci_set_device_state,
+ .migration_get_state = hisi_acc_vfio_pci_get_device_state,
+};
+
+static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
+ .name = "hisi-acc-vfio-pci",
+ .open_device = hisi_acc_vfio_pci_open_device,
+ .close_device = vfio_pci_core_close_device,
+ .ioctl = vfio_pci_core_ioctl,
+ .device_feature = vfio_pci_core_ioctl_feature,
+ .read = vfio_pci_core_read,
+ .write = vfio_pci_core_write,
+ .mmap = vfio_pci_core_mmap,
+ .request = vfio_pci_core_request,
+ .match = vfio_pci_core_match,
+};
+
+static int
+hisi_acc_vfio_pci_migrn_init(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ struct pci_dev *pdev, struct hisi_qm *pf_qm)
+{
+ int vf_id;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ hisi_acc_vdev->vf_id = vf_id + 1;
+ hisi_acc_vdev->core_device.vdev.migration_flags =
+ VFIO_MIGRATION_STOP_COPY;
+ hisi_acc_vdev->pf_qm = pf_qm;
+ hisi_acc_vdev->vf_dev = pdev;
+ mutex_init(&hisi_acc_vdev->state_mutex);
+
+ return 0;
+}
+
+static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev;
+ struct hisi_qm *pf_qm;
+ int ret;
+
+ hisi_acc_vdev = kzalloc(sizeof(*hisi_acc_vdev), GFP_KERNEL);
+ if (!hisi_acc_vdev)
+ return -ENOMEM;
+
+ pf_qm = hisi_acc_get_pf_qm(pdev);
+ if (pf_qm && pf_qm->ver >= QM_HW_V3) {
+ ret = hisi_acc_vfio_pci_migrn_init(hisi_acc_vdev, pdev, pf_qm);
+ if (!ret) {
+ vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
+ &hisi_acc_vfio_pci_migrn_ops);
+ } else {
+ pci_warn(pdev, "migration support failed, continue with generic interface\n");
+ vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
+ &hisi_acc_vfio_pci_ops);
+ }
+ } else {
+ vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
+ &hisi_acc_vfio_pci_ops);
+ }
+
+ ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device);
+ if (ret)
+ goto out_free;
+
+ dev_set_drvdata(&pdev->dev, hisi_acc_vdev);
+ return 0;
+
+out_free:
+ vfio_pci_core_uninit_device(&hisi_acc_vdev->core_device);
+ kfree(hisi_acc_vdev);
+ return ret;
+}
+
+static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = dev_get_drvdata(&pdev->dev);
+
+ vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device);
+ vfio_pci_core_uninit_device(&hisi_acc_vdev->core_device);
+ kfree(hisi_acc_vdev);
+}
+
+static const struct pci_device_id hisi_acc_vfio_pci_table[] = {
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) },
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) },
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, hisi_acc_vfio_pci_table);
+
+static const struct pci_error_handlers hisi_acc_vf_err_handlers = {
+ .reset_done = hisi_acc_vf_pci_aer_reset_done,
+ .error_detected = vfio_pci_core_aer_err_detected,
+};
+
+static struct pci_driver hisi_acc_vfio_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = hisi_acc_vfio_pci_table,
+ .probe = hisi_acc_vfio_pci_probe,
+ .remove = hisi_acc_vfio_pci_remove,
+ .err_handler = &hisi_acc_vf_err_handlers,
+};
+
+module_pci_driver(hisi_acc_vfio_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Liu Longfang <liulongfang@huawei.com>");
+MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
+MODULE_DESCRIPTION("HiSilicon VFIO PCI - VFIO PCI driver with live migration support for HiSilicon ACC device family");
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
new file mode 100644
index 000000000000..5494f4983bbe
--- /dev/null
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021 HiSilicon Ltd. */
+
+#ifndef HISI_ACC_VFIO_PCI_H
+#define HISI_ACC_VFIO_PCI_H
+
+#include <linux/hisi_acc_qm.h>
+
+#define MB_POLL_PERIOD_US 10
+#define MB_POLL_TIMEOUT_US 1000
+#define QM_CACHE_WB_START 0x204
+#define QM_CACHE_WB_DONE 0x208
+#define QM_MB_CMD_PAUSE_QM 0xe
+#define QM_ABNORMAL_INT_STATUS 0x100008
+#define QM_IFC_INT_STATUS 0x0028
+#define SEC_CORE_INT_STATUS 0x301008
+#define HPRE_HAC_INT_STATUS 0x301800
+#define HZIP_CORE_INT_STATUS 0x3010AC
+#define QM_QUE_ISO_CFG 0x301154
+
+#define QM_VFT_CFG_RDY 0x10006c
+#define QM_VFT_CFG_OP_WR 0x100058
+#define QM_VFT_CFG_TYPE 0x10005c
+#define QM_VFT_CFG 0x100060
+#define QM_VFT_CFG_OP_ENABLE 0x100054
+#define QM_VFT_CFG_DATA_L 0x100064
+#define QM_VFT_CFG_DATA_H 0x100068
+
+#define ERROR_CHECK_TIMEOUT 100
+#define CHECK_DELAY_TIME 100
+
+#define QM_SQC_VFT_BASE_SHIFT_V2 28
+#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
+#define QM_SQC_VFT_NUM_SHIFT_V2 45
+#define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0)
+
+/* RW regs */
+#define QM_REGS_MAX_LEN 7
+#define QM_REG_ADDR_OFFSET 0x0004
+
+#define QM_XQC_ADDR_OFFSET 32U
+#define QM_VF_AEQ_INT_MASK 0x0004
+#define QM_VF_EQ_INT_MASK 0x000c
+#define QM_IFC_INT_SOURCE_V 0x0020
+#define QM_IFC_INT_MASK 0x0024
+#define QM_IFC_INT_SET_V 0x002c
+#define QM_QUE_ISO_CFG_V 0x0030
+#define QM_PAGE_SIZE 0x0034
+
+#define QM_EQC_DW0 0X8000
+#define QM_AEQC_DW0 0X8020
+
+struct acc_vf_data {
+#define QM_MATCH_SIZE offsetofend(struct acc_vf_data, qm_rsv_state)
+ /* QM match information */
+#define ACC_DEV_MAGIC 0XCDCDCDCDFEEDAACC
+ u64 acc_magic;
+ u32 qp_num;
+ u32 dev_id;
+ u32 que_iso_cfg;
+ u32 qp_base;
+ u32 vf_qm_state;
+ /* QM reserved match information */
+ u32 qm_rsv_state[3];
+
+ /* QM RW regs */
+ u32 aeq_int_mask;
+ u32 eq_int_mask;
+ u32 ifc_int_source;
+ u32 ifc_int_mask;
+ u32 ifc_int_set;
+ u32 page_size;
+
+ /* QM_EQC_DW has 7 regs */
+ u32 qm_eqc_dw[7];
+
+ /* QM_AEQC_DW has 7 regs */
+ u32 qm_aeqc_dw[7];
+
+ /* QM reserved 5 regs */
+ u32 qm_rsv_regs[5];
+ u32 padding;
+ /* qm memory init information */
+ u64 eqe_dma;
+ u64 aeqe_dma;
+ u64 sqc_dma;
+ u64 cqc_dma;
+};
+
+struct hisi_acc_vf_migration_file {
+ struct file *filp;
+ struct mutex lock;
+ bool disabled;
+
+ struct acc_vf_data vf_data;
+ size_t total_length;
+};
+
+struct hisi_acc_vf_core_device {
+ struct vfio_pci_core_device core_device;
+ u8 deferred_reset:1;
+ /* for migration state */
+ struct mutex state_mutex;
+ enum vfio_device_mig_state mig_state;
+ struct pci_dev *pf_dev;
+ struct pci_dev *vf_dev;
+ struct hisi_qm *pf_qm;
+ struct hisi_qm vf_qm;
+ u32 vf_qm_state;
+ int vf_id;
+ /* for reset handler */
+ spinlock_t reset_lock;
+ struct hisi_acc_vf_migration_file *resuming_migf;
+ struct hisi_acc_vf_migration_file *saving_migf;
+};
+#endif /* HISI_ACC_VFIO_PCI_H */
diff --git a/drivers/vfio/pci/mlx5/Kconfig b/drivers/vfio/pci/mlx5/Kconfig
new file mode 100644
index 000000000000..29ba9c504a75
--- /dev/null
+++ b/drivers/vfio/pci/mlx5/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MLX5_VFIO_PCI
+ tristate "VFIO support for MLX5 PCI devices"
+ depends on MLX5_CORE
+ depends on VFIO_PCI_CORE
+ help
+ This provides migration support for MLX5 devices using the VFIO
+ framework.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/vfio/pci/mlx5/Makefile b/drivers/vfio/pci/mlx5/Makefile
new file mode 100644
index 000000000000..689627da7ff5
--- /dev/null
+++ b/drivers/vfio/pci/mlx5/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MLX5_VFIO_PCI) += mlx5-vfio-pci.o
+mlx5-vfio-pci-y := main.o cmd.o
+
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
new file mode 100644
index 000000000000..5c9f9218cc1d
--- /dev/null
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include "cmd.h"
+
+int mlx5vf_cmd_suspend_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod)
+{
+ struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
+ u32 out[MLX5_ST_SZ_DW(suspend_vhca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(suspend_vhca_in)] = {};
+ int ret;
+
+ if (!mdev)
+ return -ENOTCONN;
+
+ MLX5_SET(suspend_vhca_in, in, opcode, MLX5_CMD_OP_SUSPEND_VHCA);
+ MLX5_SET(suspend_vhca_in, in, vhca_id, vhca_id);
+ MLX5_SET(suspend_vhca_in, in, op_mod, op_mod);
+
+ ret = mlx5_cmd_exec_inout(mdev, suspend_vhca, in, out);
+ mlx5_vf_put_core_dev(mdev);
+ return ret;
+}
+
+int mlx5vf_cmd_resume_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod)
+{
+ struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
+ u32 out[MLX5_ST_SZ_DW(resume_vhca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(resume_vhca_in)] = {};
+ int ret;
+
+ if (!mdev)
+ return -ENOTCONN;
+
+ MLX5_SET(resume_vhca_in, in, opcode, MLX5_CMD_OP_RESUME_VHCA);
+ MLX5_SET(resume_vhca_in, in, vhca_id, vhca_id);
+ MLX5_SET(resume_vhca_in, in, op_mod, op_mod);
+
+ ret = mlx5_cmd_exec_inout(mdev, resume_vhca, in, out);
+ mlx5_vf_put_core_dev(mdev);
+ return ret;
+}
+
+int mlx5vf_cmd_query_vhca_migration_state(struct pci_dev *pdev, u16 vhca_id,
+ size_t *state_size)
+{
+ struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
+ u32 out[MLX5_ST_SZ_DW(query_vhca_migration_state_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_vhca_migration_state_in)] = {};
+ int ret;
+
+ if (!mdev)
+ return -ENOTCONN;
+
+ MLX5_SET(query_vhca_migration_state_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE);
+ MLX5_SET(query_vhca_migration_state_in, in, vhca_id, vhca_id);
+ MLX5_SET(query_vhca_migration_state_in, in, op_mod, 0);
+
+ ret = mlx5_cmd_exec_inout(mdev, query_vhca_migration_state, in, out);
+ if (ret)
+ goto end;
+
+ *state_size = MLX5_GET(query_vhca_migration_state_out, out,
+ required_umem_size);
+
+end:
+ mlx5_vf_put_core_dev(mdev);
+ return ret;
+}
+
+int mlx5vf_cmd_get_vhca_id(struct pci_dev *pdev, u16 function_id, u16 *vhca_id)
+{
+ struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
+ int out_size;
+ void *out;
+ int ret;
+
+ if (!mdev)
+ return -ENOTCONN;
+
+ out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ out = kzalloc(out_size, GFP_KERNEL);
+ if (!out) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, other_function, 1);
+ MLX5_SET(query_hca_cap_in, in, function_id, function_id);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
+ HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+ if (ret)
+ goto err_exec;
+
+ *vhca_id = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.vhca_id);
+
+err_exec:
+ kfree(out);
+end:
+ mlx5_vf_put_core_dev(mdev);
+ return ret;
+}
+
+static int _create_state_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+ struct mlx5_vf_migration_file *migf, u32 *mkey)
+{
+ size_t npages = DIV_ROUND_UP(migf->total_length, PAGE_SIZE);
+ struct sg_dma_page_iter dma_iter;
+ int err = 0, inlen;
+ __be64 *mtt;
+ void *mkc;
+ u32 *in;
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
+ sizeof(*mtt) * round_up(npages, 2);
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ DIV_ROUND_UP(npages, 2));
+ mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+
+ for_each_sgtable_dma_page(&migf->table.sgt, &dma_iter, 0)
+ *mtt++ = cpu_to_be64(sg_page_iter_dma_address(&dma_iter));
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, rr, 1);
+ MLX5_SET(mkc, mkc, rw, 1);
+ MLX5_SET(mkc, mkc, pd, pdn);
+ MLX5_SET(mkc, mkc, bsf_octword_size, 0);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
+ MLX5_SET(mkc, mkc, translations_octword_size, DIV_ROUND_UP(npages, 2));
+ MLX5_SET64(mkc, mkc, len, migf->total_length);
+ err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
+ kvfree(in);
+ return err;
+}
+
+int mlx5vf_cmd_save_vhca_state(struct pci_dev *pdev, u16 vhca_id,
+ struct mlx5_vf_migration_file *migf)
+{
+ struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
+ u32 out[MLX5_ST_SZ_DW(save_vhca_state_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {};
+ u32 pdn, mkey;
+ int err;
+
+ if (!mdev)
+ return -ENOTCONN;
+
+ err = mlx5_core_alloc_pd(mdev, &pdn);
+ if (err)
+ goto end;
+
+ err = dma_map_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE,
+ 0);
+ if (err)
+ goto err_dma_map;
+
+ err = _create_state_mkey(mdev, pdn, migf, &mkey);
+ if (err)
+ goto err_create_mkey;
+
+ MLX5_SET(save_vhca_state_in, in, opcode,
+ MLX5_CMD_OP_SAVE_VHCA_STATE);
+ MLX5_SET(save_vhca_state_in, in, op_mod, 0);
+ MLX5_SET(save_vhca_state_in, in, vhca_id, vhca_id);
+ MLX5_SET(save_vhca_state_in, in, mkey, mkey);
+ MLX5_SET(save_vhca_state_in, in, size, migf->total_length);
+
+ err = mlx5_cmd_exec_inout(mdev, save_vhca_state, in, out);
+ if (err)
+ goto err_exec;
+
+ migf->total_length =
+ MLX5_GET(save_vhca_state_out, out, actual_image_size);
+
+ mlx5_core_destroy_mkey(mdev, mkey);
+ mlx5_core_dealloc_pd(mdev, pdn);
+ dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
+ mlx5_vf_put_core_dev(mdev);
+
+ return 0;
+
+err_exec:
+ mlx5_core_destroy_mkey(mdev, mkey);
+err_create_mkey:
+ dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
+err_dma_map:
+ mlx5_core_dealloc_pd(mdev, pdn);
+end:
+ mlx5_vf_put_core_dev(mdev);
+ return err;
+}
+
+int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id,
+ struct mlx5_vf_migration_file *migf)
+{
+ struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
+ u32 out[MLX5_ST_SZ_DW(save_vhca_state_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {};
+ u32 pdn, mkey;
+ int err;
+
+ if (!mdev)
+ return -ENOTCONN;
+
+ mutex_lock(&migf->lock);
+ if (!migf->total_length) {
+ err = -EINVAL;
+ goto end;
+ }
+
+ err = mlx5_core_alloc_pd(mdev, &pdn);
+ if (err)
+ goto end;
+
+ err = dma_map_sgtable(mdev->device, &migf->table.sgt, DMA_TO_DEVICE, 0);
+ if (err)
+ goto err_reg;
+
+ err = _create_state_mkey(mdev, pdn, migf, &mkey);
+ if (err)
+ goto err_mkey;
+
+ MLX5_SET(load_vhca_state_in, in, opcode,
+ MLX5_CMD_OP_LOAD_VHCA_STATE);
+ MLX5_SET(load_vhca_state_in, in, op_mod, 0);
+ MLX5_SET(load_vhca_state_in, in, vhca_id, vhca_id);
+ MLX5_SET(load_vhca_state_in, in, mkey, mkey);
+ MLX5_SET(load_vhca_state_in, in, size, migf->total_length);
+
+ err = mlx5_cmd_exec_inout(mdev, load_vhca_state, in, out);
+
+ mlx5_core_destroy_mkey(mdev, mkey);
+err_mkey:
+ dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_TO_DEVICE, 0);
+err_reg:
+ mlx5_core_dealloc_pd(mdev, pdn);
+end:
+ mlx5_vf_put_core_dev(mdev);
+ mutex_unlock(&migf->lock);
+ return err;
+}
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
new file mode 100644
index 000000000000..1392a11a9cc0
--- /dev/null
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ */
+
+#ifndef MLX5_VFIO_CMD_H
+#define MLX5_VFIO_CMD_H
+
+#include <linux/kernel.h>
+#include <linux/mlx5/driver.h>
+
+struct mlx5_vf_migration_file {
+ struct file *filp;
+ struct mutex lock;
+ bool disabled;
+
+ struct sg_append_table table;
+ size_t total_length;
+ size_t allocated_length;
+
+ /* Optimize mlx5vf_get_migration_page() for sequential access */
+ struct scatterlist *last_offset_sg;
+ unsigned int sg_last_entry;
+ unsigned long last_offset;
+};
+
+int mlx5vf_cmd_suspend_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod);
+int mlx5vf_cmd_resume_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod);
+int mlx5vf_cmd_query_vhca_migration_state(struct pci_dev *pdev, u16 vhca_id,
+ size_t *state_size);
+int mlx5vf_cmd_get_vhca_id(struct pci_dev *pdev, u16 function_id, u16 *vhca_id);
+int mlx5vf_cmd_save_vhca_state(struct pci_dev *pdev, u16 vhca_id,
+ struct mlx5_vf_migration_file *migf);
+int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id,
+ struct mlx5_vf_migration_file *migf);
+#endif /* MLX5_VFIO_CMD_H */
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
new file mode 100644
index 000000000000..bbec5d288fee
--- /dev/null
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -0,0 +1,676 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include <linux/device.h>
+#include <linux/eventfd.h>
+#include <linux/file.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+#include <linux/sched/mm.h>
+#include <linux/vfio_pci_core.h>
+#include <linux/anon_inodes.h>
+
+#include "cmd.h"
+
+/* Arbitrary to prevent userspace from consuming endless memory */
+#define MAX_MIGRATION_SIZE (512*1024*1024)
+
+struct mlx5vf_pci_core_device {
+ struct vfio_pci_core_device core_device;
+ u16 vhca_id;
+ u8 migrate_cap:1;
+ u8 deferred_reset:1;
+ /* protect migration state */
+ struct mutex state_mutex;
+ enum vfio_device_mig_state mig_state;
+ /* protect the reset_done flow */
+ spinlock_t reset_lock;
+ struct mlx5_vf_migration_file *resuming_migf;
+ struct mlx5_vf_migration_file *saving_migf;
+};
+
+static struct page *
+mlx5vf_get_migration_page(struct mlx5_vf_migration_file *migf,
+ unsigned long offset)
+{
+ unsigned long cur_offset = 0;
+ struct scatterlist *sg;
+ unsigned int i;
+
+ /* All accesses are sequential */
+ if (offset < migf->last_offset || !migf->last_offset_sg) {
+ migf->last_offset = 0;
+ migf->last_offset_sg = migf->table.sgt.sgl;
+ migf->sg_last_entry = 0;
+ }
+
+ cur_offset = migf->last_offset;
+
+ for_each_sg(migf->last_offset_sg, sg,
+ migf->table.sgt.orig_nents - migf->sg_last_entry, i) {
+ if (offset < sg->length + cur_offset) {
+ migf->last_offset_sg = sg;
+ migf->sg_last_entry += i;
+ migf->last_offset = cur_offset;
+ return nth_page(sg_page(sg),
+ (offset - cur_offset) / PAGE_SIZE);
+ }
+ cur_offset += sg->length;
+ }
+ return NULL;
+}
+
+static int mlx5vf_add_migration_pages(struct mlx5_vf_migration_file *migf,
+ unsigned int npages)
+{
+ unsigned int to_alloc = npages;
+ struct page **page_list;
+ unsigned long filled;
+ unsigned int to_fill;
+ int ret;
+
+ to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list));
+ page_list = kvzalloc(to_fill * sizeof(*page_list), GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ do {
+ filled = alloc_pages_bulk_array(GFP_KERNEL, to_fill, page_list);
+ if (!filled) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ to_alloc -= filled;
+ ret = sg_alloc_append_table_from_pages(
+ &migf->table, page_list, filled, 0,
+ filled << PAGE_SHIFT, UINT_MAX, SG_MAX_SINGLE_ALLOC,
+ GFP_KERNEL);
+
+ if (ret)
+ goto err;
+ migf->allocated_length += filled * PAGE_SIZE;
+ /* clean input for another bulk allocation */
+ memset(page_list, 0, filled * sizeof(*page_list));
+ to_fill = min_t(unsigned int, to_alloc,
+ PAGE_SIZE / sizeof(*page_list));
+ } while (to_alloc > 0);
+
+ kvfree(page_list);
+ return 0;
+
+err:
+ kvfree(page_list);
+ return ret;
+}
+
+static void mlx5vf_disable_fd(struct mlx5_vf_migration_file *migf)
+{
+ struct sg_page_iter sg_iter;
+
+ mutex_lock(&migf->lock);
+ /* Undo alloc_pages_bulk_array() */
+ for_each_sgtable_page(&migf->table.sgt, &sg_iter, 0)
+ __free_page(sg_page_iter_page(&sg_iter));
+ sg_free_append_table(&migf->table);
+ migf->disabled = true;
+ migf->total_length = 0;
+ migf->allocated_length = 0;
+ migf->filp->f_pos = 0;
+ mutex_unlock(&migf->lock);
+}
+
+static int mlx5vf_release_file(struct inode *inode, struct file *filp)
+{
+ struct mlx5_vf_migration_file *migf = filp->private_data;
+
+ mlx5vf_disable_fd(migf);
+ mutex_destroy(&migf->lock);
+ kfree(migf);
+ return 0;
+}
+
+static ssize_t mlx5vf_save_read(struct file *filp, char __user *buf, size_t len,
+ loff_t *pos)
+{
+ struct mlx5_vf_migration_file *migf = filp->private_data;
+ ssize_t done = 0;
+
+ if (pos)
+ return -ESPIPE;
+ pos = &filp->f_pos;
+
+ mutex_lock(&migf->lock);
+ if (*pos > migf->total_length) {
+ done = -EINVAL;
+ goto out_unlock;
+ }
+ if (migf->disabled) {
+ done = -ENODEV;
+ goto out_unlock;
+ }
+
+ len = min_t(size_t, migf->total_length - *pos, len);
+ while (len) {
+ size_t page_offset;
+ struct page *page;
+ size_t page_len;
+ u8 *from_buff;
+ int ret;
+
+ page_offset = (*pos) % PAGE_SIZE;
+ page = mlx5vf_get_migration_page(migf, *pos - page_offset);
+ if (!page) {
+ if (done == 0)
+ done = -EINVAL;
+ goto out_unlock;
+ }
+
+ page_len = min_t(size_t, len, PAGE_SIZE - page_offset);
+ from_buff = kmap_local_page(page);
+ ret = copy_to_user(buf, from_buff + page_offset, page_len);
+ kunmap_local(from_buff);
+ if (ret) {
+ done = -EFAULT;
+ goto out_unlock;
+ }
+ *pos += page_len;
+ len -= page_len;
+ done += page_len;
+ buf += page_len;
+ }
+
+out_unlock:
+ mutex_unlock(&migf->lock);
+ return done;
+}
+
+static const struct file_operations mlx5vf_save_fops = {
+ .owner = THIS_MODULE,
+ .read = mlx5vf_save_read,
+ .release = mlx5vf_release_file,
+ .llseek = no_llseek,
+};
+
+static struct mlx5_vf_migration_file *
+mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev)
+{
+ struct mlx5_vf_migration_file *migf;
+ int ret;
+
+ migf = kzalloc(sizeof(*migf), GFP_KERNEL);
+ if (!migf)
+ return ERR_PTR(-ENOMEM);
+
+ migf->filp = anon_inode_getfile("mlx5vf_mig", &mlx5vf_save_fops, migf,
+ O_RDONLY);
+ if (IS_ERR(migf->filp)) {
+ int err = PTR_ERR(migf->filp);
+
+ kfree(migf);
+ return ERR_PTR(err);
+ }
+
+ stream_open(migf->filp->f_inode, migf->filp);
+ mutex_init(&migf->lock);
+
+ ret = mlx5vf_cmd_query_vhca_migration_state(
+ mvdev->core_device.pdev, mvdev->vhca_id, &migf->total_length);
+ if (ret)
+ goto out_free;
+
+ ret = mlx5vf_add_migration_pages(
+ migf, DIV_ROUND_UP_ULL(migf->total_length, PAGE_SIZE));
+ if (ret)
+ goto out_free;
+
+ ret = mlx5vf_cmd_save_vhca_state(mvdev->core_device.pdev,
+ mvdev->vhca_id, migf);
+ if (ret)
+ goto out_free;
+ return migf;
+out_free:
+ fput(migf->filp);
+ return ERR_PTR(ret);
+}
+
+static ssize_t mlx5vf_resume_write(struct file *filp, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct mlx5_vf_migration_file *migf = filp->private_data;
+ loff_t requested_length;
+ ssize_t done = 0;
+
+ if (pos)
+ return -ESPIPE;
+ pos = &filp->f_pos;
+
+ if (*pos < 0 ||
+ check_add_overflow((loff_t)len, *pos, &requested_length))
+ return -EINVAL;
+
+ if (requested_length > MAX_MIGRATION_SIZE)
+ return -ENOMEM;
+
+ mutex_lock(&migf->lock);
+ if (migf->disabled) {
+ done = -ENODEV;
+ goto out_unlock;
+ }
+
+ if (migf->allocated_length < requested_length) {
+ done = mlx5vf_add_migration_pages(
+ migf,
+ DIV_ROUND_UP(requested_length - migf->allocated_length,
+ PAGE_SIZE));
+ if (done)
+ goto out_unlock;
+ }
+
+ while (len) {
+ size_t page_offset;
+ struct page *page;
+ size_t page_len;
+ u8 *to_buff;
+ int ret;
+
+ page_offset = (*pos) % PAGE_SIZE;
+ page = mlx5vf_get_migration_page(migf, *pos - page_offset);
+ if (!page) {
+ if (done == 0)
+ done = -EINVAL;
+ goto out_unlock;
+ }
+
+ page_len = min_t(size_t, len, PAGE_SIZE - page_offset);
+ to_buff = kmap_local_page(page);
+ ret = copy_from_user(to_buff + page_offset, buf, page_len);
+ kunmap_local(to_buff);
+ if (ret) {
+ done = -EFAULT;
+ goto out_unlock;
+ }
+ *pos += page_len;
+ len -= page_len;
+ done += page_len;
+ buf += page_len;
+ migf->total_length += page_len;
+ }
+out_unlock:
+ mutex_unlock(&migf->lock);
+ return done;
+}
+
+static const struct file_operations mlx5vf_resume_fops = {
+ .owner = THIS_MODULE,
+ .write = mlx5vf_resume_write,
+ .release = mlx5vf_release_file,
+ .llseek = no_llseek,
+};
+
+static struct mlx5_vf_migration_file *
+mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev)
+{
+ struct mlx5_vf_migration_file *migf;
+
+ migf = kzalloc(sizeof(*migf), GFP_KERNEL);
+ if (!migf)
+ return ERR_PTR(-ENOMEM);
+
+ migf->filp = anon_inode_getfile("mlx5vf_mig", &mlx5vf_resume_fops, migf,
+ O_WRONLY);
+ if (IS_ERR(migf->filp)) {
+ int err = PTR_ERR(migf->filp);
+
+ kfree(migf);
+ return ERR_PTR(err);
+ }
+ stream_open(migf->filp->f_inode, migf->filp);
+ mutex_init(&migf->lock);
+ return migf;
+}
+
+static void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
+{
+ if (mvdev->resuming_migf) {
+ mlx5vf_disable_fd(mvdev->resuming_migf);
+ fput(mvdev->resuming_migf->filp);
+ mvdev->resuming_migf = NULL;
+ }
+ if (mvdev->saving_migf) {
+ mlx5vf_disable_fd(mvdev->saving_migf);
+ fput(mvdev->saving_migf->filp);
+ mvdev->saving_migf = NULL;
+ }
+}
+
+static struct file *
+mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
+ u32 new)
+{
+ u32 cur = mvdev->mig_state;
+ int ret;
+
+ if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) {
+ ret = mlx5vf_cmd_suspend_vhca(
+ mvdev->core_device.pdev, mvdev->vhca_id,
+ MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER);
+ if (ret)
+ return ERR_PTR(ret);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
+ ret = mlx5vf_cmd_resume_vhca(
+ mvdev->core_device.pdev, mvdev->vhca_id,
+ MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_RESPONDER);
+ if (ret)
+ return ERR_PTR(ret);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
+ ret = mlx5vf_cmd_suspend_vhca(
+ mvdev->core_device.pdev, mvdev->vhca_id,
+ MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_INITIATOR);
+ if (ret)
+ return ERR_PTR(ret);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) {
+ ret = mlx5vf_cmd_resume_vhca(
+ mvdev->core_device.pdev, mvdev->vhca_id,
+ MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_INITIATOR);
+ if (ret)
+ return ERR_PTR(ret);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
+ struct mlx5_vf_migration_file *migf;
+
+ migf = mlx5vf_pci_save_device_data(mvdev);
+ if (IS_ERR(migf))
+ return ERR_CAST(migf);
+ get_file(migf->filp);
+ mvdev->saving_migf = migf;
+ return migf->filp;
+ }
+
+ if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP)) {
+ mlx5vf_disable_fds(mvdev);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
+ struct mlx5_vf_migration_file *migf;
+
+ migf = mlx5vf_pci_resume_device_data(mvdev);
+ if (IS_ERR(migf))
+ return ERR_CAST(migf);
+ get_file(migf->filp);
+ mvdev->resuming_migf = migf;
+ return migf->filp;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
+ ret = mlx5vf_cmd_load_vhca_state(mvdev->core_device.pdev,
+ mvdev->vhca_id,
+ mvdev->resuming_migf);
+ if (ret)
+ return ERR_PTR(ret);
+ mlx5vf_disable_fds(mvdev);
+ return NULL;
+ }
+
+ /*
+ * vfio_mig_get_next_state() does not use arcs other than the above
+ */
+ WARN_ON(true);
+ return ERR_PTR(-EINVAL);
+}
+
+/*
+ * This function is called in all state_mutex unlock cases to
+ * handle a 'deferred_reset' if exists.
+ */
+static void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev)
+{
+again:
+ spin_lock(&mvdev->reset_lock);
+ if (mvdev->deferred_reset) {
+ mvdev->deferred_reset = false;
+ spin_unlock(&mvdev->reset_lock);
+ mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
+ mlx5vf_disable_fds(mvdev);
+ goto again;
+ }
+ mutex_unlock(&mvdev->state_mutex);
+ spin_unlock(&mvdev->reset_lock);
+}
+
+static struct file *
+mlx5vf_pci_set_device_state(struct vfio_device *vdev,
+ enum vfio_device_mig_state new_state)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ vdev, struct mlx5vf_pci_core_device, core_device.vdev);
+ enum vfio_device_mig_state next_state;
+ struct file *res = NULL;
+ int ret;
+
+ mutex_lock(&mvdev->state_mutex);
+ while (new_state != mvdev->mig_state) {
+ ret = vfio_mig_get_next_state(vdev, mvdev->mig_state,
+ new_state, &next_state);
+ if (ret) {
+ res = ERR_PTR(ret);
+ break;
+ }
+ res = mlx5vf_pci_step_device_state_locked(mvdev, next_state);
+ if (IS_ERR(res))
+ break;
+ mvdev->mig_state = next_state;
+ if (WARN_ON(res && new_state != mvdev->mig_state)) {
+ fput(res);
+ res = ERR_PTR(-EINVAL);
+ break;
+ }
+ }
+ mlx5vf_state_mutex_unlock(mvdev);
+ return res;
+}
+
+static int mlx5vf_pci_get_device_state(struct vfio_device *vdev,
+ enum vfio_device_mig_state *curr_state)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ vdev, struct mlx5vf_pci_core_device, core_device.vdev);
+
+ mutex_lock(&mvdev->state_mutex);
+ *curr_state = mvdev->mig_state;
+ mlx5vf_state_mutex_unlock(mvdev);
+ return 0;
+}
+
+static void mlx5vf_pci_aer_reset_done(struct pci_dev *pdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = dev_get_drvdata(&pdev->dev);
+
+ if (!mvdev->migrate_cap)
+ return;
+
+ /*
+ * As the higher VFIO layers are holding locks across reset and using
+ * those same locks with the mm_lock we need to prevent ABBA deadlock
+ * with the state_mutex and mm_lock.
+ * In case the state_mutex was taken already we defer the cleanup work
+ * to the unlock flow of the other running context.
+ */
+ spin_lock(&mvdev->reset_lock);
+ mvdev->deferred_reset = true;
+ if (!mutex_trylock(&mvdev->state_mutex)) {
+ spin_unlock(&mvdev->reset_lock);
+ return;
+ }
+ spin_unlock(&mvdev->reset_lock);
+ mlx5vf_state_mutex_unlock(mvdev);
+}
+
+static int mlx5vf_pci_open_device(struct vfio_device *core_vdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ core_vdev, struct mlx5vf_pci_core_device, core_device.vdev);
+ struct vfio_pci_core_device *vdev = &mvdev->core_device;
+ int vf_id;
+ int ret;
+
+ ret = vfio_pci_core_enable(vdev);
+ if (ret)
+ return ret;
+
+ if (!mvdev->migrate_cap) {
+ vfio_pci_core_finish_enable(vdev);
+ return 0;
+ }
+
+ vf_id = pci_iov_vf_id(vdev->pdev);
+ if (vf_id < 0) {
+ ret = vf_id;
+ goto out_disable;
+ }
+
+ ret = mlx5vf_cmd_get_vhca_id(vdev->pdev, vf_id + 1, &mvdev->vhca_id);
+ if (ret)
+ goto out_disable;
+
+ mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
+ vfio_pci_core_finish_enable(vdev);
+ return 0;
+out_disable:
+ vfio_pci_core_disable(vdev);
+ return ret;
+}
+
+static void mlx5vf_pci_close_device(struct vfio_device *core_vdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ core_vdev, struct mlx5vf_pci_core_device, core_device.vdev);
+
+ mlx5vf_disable_fds(mvdev);
+ vfio_pci_core_close_device(core_vdev);
+}
+
+static const struct vfio_device_ops mlx5vf_pci_ops = {
+ .name = "mlx5-vfio-pci",
+ .open_device = mlx5vf_pci_open_device,
+ .close_device = mlx5vf_pci_close_device,
+ .ioctl = vfio_pci_core_ioctl,
+ .device_feature = vfio_pci_core_ioctl_feature,
+ .read = vfio_pci_core_read,
+ .write = vfio_pci_core_write,
+ .mmap = vfio_pci_core_mmap,
+ .request = vfio_pci_core_request,
+ .match = vfio_pci_core_match,
+ .migration_set_state = mlx5vf_pci_set_device_state,
+ .migration_get_state = mlx5vf_pci_get_device_state,
+};
+
+static int mlx5vf_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct mlx5vf_pci_core_device *mvdev;
+ int ret;
+
+ mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL);
+ if (!mvdev)
+ return -ENOMEM;
+ vfio_pci_core_init_device(&mvdev->core_device, pdev, &mlx5vf_pci_ops);
+
+ if (pdev->is_virtfn) {
+ struct mlx5_core_dev *mdev =
+ mlx5_vf_get_core_dev(pdev);
+
+ if (mdev) {
+ if (MLX5_CAP_GEN(mdev, migration)) {
+ mvdev->migrate_cap = 1;
+ mvdev->core_device.vdev.migration_flags =
+ VFIO_MIGRATION_STOP_COPY |
+ VFIO_MIGRATION_P2P;
+ mutex_init(&mvdev->state_mutex);
+ spin_lock_init(&mvdev->reset_lock);
+ }
+ mlx5_vf_put_core_dev(mdev);
+ }
+ }
+
+ ret = vfio_pci_core_register_device(&mvdev->core_device);
+ if (ret)
+ goto out_free;
+
+ dev_set_drvdata(&pdev->dev, mvdev);
+ return 0;
+
+out_free:
+ vfio_pci_core_uninit_device(&mvdev->core_device);
+ kfree(mvdev);
+ return ret;
+}
+
+static void mlx5vf_pci_remove(struct pci_dev *pdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = dev_get_drvdata(&pdev->dev);
+
+ vfio_pci_core_unregister_device(&mvdev->core_device);
+ vfio_pci_core_uninit_device(&mvdev->core_device);
+ kfree(mvdev);
+}
+
+static const struct pci_device_id mlx5vf_pci_table[] = {
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_MELLANOX, 0x101e) }, /* ConnectX Family mlx5Gen Virtual Function */
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, mlx5vf_pci_table);
+
+static const struct pci_error_handlers mlx5vf_err_handlers = {
+ .reset_done = mlx5vf_pci_aer_reset_done,
+ .error_detected = vfio_pci_core_aer_err_detected,
+};
+
+static struct pci_driver mlx5vf_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mlx5vf_pci_table,
+ .probe = mlx5vf_pci_probe,
+ .remove = mlx5vf_pci_remove,
+ .err_handler = &mlx5vf_err_handlers,
+};
+
+static void __exit mlx5vf_pci_cleanup(void)
+{
+ pci_unregister_driver(&mlx5vf_pci_driver);
+}
+
+static int __init mlx5vf_pci_init(void)
+{
+ return pci_register_driver(&mlx5vf_pci_driver);
+}
+
+module_init(mlx5vf_pci_init);
+module_exit(mlx5vf_pci_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Max Gurtovoy <mgurtovoy@nvidia.com>");
+MODULE_AUTHOR("Yishai Hadas <yishaih@nvidia.com>");
+MODULE_DESCRIPTION(
+ "MLX5 VFIO PCI - User Level meta-driver for MLX5 device family");
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index a5ce92beb655..2b047469e02f 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -130,6 +130,7 @@ static const struct vfio_device_ops vfio_pci_ops = {
.open_device = vfio_pci_open_device,
.close_device = vfio_pci_core_close_device,
.ioctl = vfio_pci_core_ioctl,
+ .device_feature = vfio_pci_core_ioctl_feature,
.read = vfio_pci_core_read,
.write = vfio_pci_core_write,
.mmap = vfio_pci_core_mmap,
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index f948e6cd2993..b7bb16f92ac6 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -228,6 +228,19 @@ int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t stat
if (!ret) {
/* D3 might be unsupported via quirk, skip unless in D3 */
if (needs_save && pdev->current_state >= PCI_D3hot) {
+ /*
+ * The current PCI state will be saved locally in
+ * 'pm_save' during the D3hot transition. When the
+ * device state is changed to D0 again with the current
+ * function, then pci_store_saved_state() will restore
+ * the state and will free the memory pointed by
+ * 'pm_save'. There are few cases where the PCI power
+ * state can be changed to D0 without the involvement
+ * of the driver. For these cases, free the earlier
+ * allocated memory first before overwriting 'pm_save'
+ * to prevent the memory leak.
+ */
+ kfree(vdev->pm_save);
vdev->pm_save = pci_store_saved_state(pdev);
} else if (needs_restore) {
pci_load_and_free_saved_state(pdev, &vdev->pm_save);
@@ -322,6 +335,17 @@ void vfio_pci_core_disable(struct vfio_pci_core_device *vdev)
/* For needs_reset */
lockdep_assert_held(&vdev->vdev.dev_set->lock);
+ /*
+ * This function can be invoked while the power state is non-D0.
+ * This function calls __pci_reset_function_locked() which internally
+ * can use pci_pm_reset() for the function reset. pci_pm_reset() will
+ * fail if the power state is non-D0. Also, for the devices which
+ * have NoSoftRst-, the reset function can cause the PCI config space
+ * reset without restoring the original state (saved locally in
+ * 'vdev->pm_save').
+ */
+ vfio_pci_set_power_state(vdev, PCI_D0);
+
/* Stop the device from further DMA */
pci_clear_master(pdev);
@@ -921,6 +945,19 @@ long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
return -EINVAL;
vfio_pci_zap_and_down_write_memory_lock(vdev);
+
+ /*
+ * This function can be invoked while the power state is non-D0.
+ * If pci_try_reset_function() has been called while the power
+ * state is non-D0, then pci_try_reset_function() will
+ * internally set the power state to D0 without vfio driver
+ * involvement. For the devices which have NoSoftRst-, the
+ * reset function can cause the PCI config space reset without
+ * restoring the original state (saved locally in
+ * 'vdev->pm_save').
+ */
+ vfio_pci_set_power_state(vdev, PCI_D0);
+
ret = pci_try_reset_function(vdev->pdev);
up_write(&vdev->memory_lock);
@@ -1114,70 +1151,50 @@ hot_reset_release:
return vfio_pci_ioeventfd(vdev, ioeventfd.offset,
ioeventfd.data, count, ioeventfd.fd);
- } else if (cmd == VFIO_DEVICE_FEATURE) {
- struct vfio_device_feature feature;
- uuid_t uuid;
-
- minsz = offsetofend(struct vfio_device_feature, flags);
-
- if (copy_from_user(&feature, (void __user *)arg, minsz))
- return -EFAULT;
-
- if (feature.argsz < minsz)
- return -EINVAL;
-
- /* Check unknown flags */
- if (feature.flags & ~(VFIO_DEVICE_FEATURE_MASK |
- VFIO_DEVICE_FEATURE_SET |
- VFIO_DEVICE_FEATURE_GET |
- VFIO_DEVICE_FEATURE_PROBE))
- return -EINVAL;
-
- /* GET & SET are mutually exclusive except with PROBE */
- if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) &&
- (feature.flags & VFIO_DEVICE_FEATURE_SET) &&
- (feature.flags & VFIO_DEVICE_FEATURE_GET))
- return -EINVAL;
-
- switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) {
- case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN:
- if (!vdev->vf_token)
- return -ENOTTY;
-
- /*
- * We do not support GET of the VF Token UUID as this
- * could expose the token of the previous device user.
- */
- if (feature.flags & VFIO_DEVICE_FEATURE_GET)
- return -EINVAL;
-
- if (feature.flags & VFIO_DEVICE_FEATURE_PROBE)
- return 0;
+ }
+ return -ENOTTY;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl);
- /* Don't SET unless told to do so */
- if (!(feature.flags & VFIO_DEVICE_FEATURE_SET))
- return -EINVAL;
+static int vfio_pci_core_feature_token(struct vfio_device *device, u32 flags,
+ void __user *arg, size_t argsz)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(device, struct vfio_pci_core_device, vdev);
+ uuid_t uuid;
+ int ret;
- if (feature.argsz < minsz + sizeof(uuid))
- return -EINVAL;
+ if (!vdev->vf_token)
+ return -ENOTTY;
+ /*
+ * We do not support GET of the VF Token UUID as this could
+ * expose the token of the previous device user.
+ */
+ ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
+ sizeof(uuid));
+ if (ret != 1)
+ return ret;
- if (copy_from_user(&uuid, (void __user *)(arg + minsz),
- sizeof(uuid)))
- return -EFAULT;
+ if (copy_from_user(&uuid, arg, sizeof(uuid)))
+ return -EFAULT;
- mutex_lock(&vdev->vf_token->lock);
- uuid_copy(&vdev->vf_token->uuid, &uuid);
- mutex_unlock(&vdev->vf_token->lock);
+ mutex_lock(&vdev->vf_token->lock);
+ uuid_copy(&vdev->vf_token->uuid, &uuid);
+ mutex_unlock(&vdev->vf_token->lock);
+ return 0;
+}
- return 0;
- default:
- return -ENOTTY;
- }
+int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
+ void __user *arg, size_t argsz)
+{
+ switch (flags & VFIO_DEVICE_FEATURE_MASK) {
+ case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN:
+ return vfio_pci_core_feature_token(device, flags, arg, argsz);
+ default:
+ return -ENOTTY;
}
-
- return -ENOTTY;
}
-EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl);
+EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl_feature);
static ssize_t vfio_pci_rw(struct vfio_pci_core_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
@@ -1891,8 +1908,8 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
}
EXPORT_SYMBOL_GPL(vfio_pci_core_unregister_device);
-static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
{
struct vfio_pci_core_device *vdev;
struct vfio_device *device;
@@ -1914,6 +1931,7 @@ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_CAN_RECOVER;
}
+EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected);
int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
{
@@ -1936,7 +1954,7 @@ int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
const struct pci_error_handlers vfio_pci_core_err_handlers = {
- .error_detected = vfio_pci_aer_err_detected,
+ .error_detected = vfio_pci_core_aer_err_detected,
};
EXPORT_SYMBOL_GPL(vfio_pci_core_err_handlers);
@@ -2055,6 +2073,18 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
}
cur_mem = NULL;
+ /*
+ * The pci_reset_bus() will reset all the devices in the bus.
+ * The power state can be non-D0 for some of the devices in the bus.
+ * For these devices, the pci_reset_bus() will internally set
+ * the power state to D0 without vfio driver involvement.
+ * For the devices which have NoSoftRst-, the reset function can
+ * cause the PCI config space reset without restoring the original
+ * state (saved locally in 'vdev->pm_save').
+ */
+ list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
+ vfio_pci_set_power_state(cur, PCI_D0);
+
ret = pci_reset_bus(pdev);
err_undo:
@@ -2108,6 +2138,18 @@ static bool vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set)
if (!pdev)
return false;
+ /*
+ * The pci_reset_bus() will reset all the devices in the bus.
+ * The power state can be non-D0 for some of the devices in the bus.
+ * For these devices, the pci_reset_bus() will internally set
+ * the power state to D0 without vfio driver involvement.
+ * For the devices which have NoSoftRst-, the reset function can
+ * cause the PCI config space reset without restoring the original
+ * state (saved locally in 'vdev->pm_save').
+ */
+ list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
+ vfio_pci_set_power_state(cur, PCI_D0);
+
ret = pci_reset_bus(pdev);
if (ret)
return false;
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 57d3b2cbbd8e..82ac1569deb0 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -288,6 +288,7 @@ out:
return done;
}
+#ifdef CONFIG_VFIO_PCI_VGA
ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
{
@@ -355,6 +356,7 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
return done;
}
+#endif
static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd,
bool test_mem)
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 735d1d344af9..a4555014bd1e 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -1557,15 +1557,303 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep)
return 0;
}
+/*
+ * vfio_mig_get_next_state - Compute the next step in the FSM
+ * @cur_fsm - The current state the device is in
+ * @new_fsm - The target state to reach
+ * @next_fsm - Pointer to the next step to get to new_fsm
+ *
+ * Return 0 upon success, otherwise -errno
+ * Upon success the next step in the state progression between cur_fsm and
+ * new_fsm will be set in next_fsm.
+ *
+ * This breaks down requests for combination transitions into smaller steps and
+ * returns the next step to get to new_fsm. The function may need to be called
+ * multiple times before reaching new_fsm.
+ *
+ */
+int vfio_mig_get_next_state(struct vfio_device *device,
+ enum vfio_device_mig_state cur_fsm,
+ enum vfio_device_mig_state new_fsm,
+ enum vfio_device_mig_state *next_fsm)
+{
+ enum { VFIO_DEVICE_NUM_STATES = VFIO_DEVICE_STATE_RUNNING_P2P + 1 };
+ /*
+ * The coding in this table requires the driver to implement the
+ * following FSM arcs:
+ * RESUMING -> STOP
+ * STOP -> RESUMING
+ * STOP -> STOP_COPY
+ * STOP_COPY -> STOP
+ *
+ * If P2P is supported then the driver must also implement these FSM
+ * arcs:
+ * RUNNING -> RUNNING_P2P
+ * RUNNING_P2P -> RUNNING
+ * RUNNING_P2P -> STOP
+ * STOP -> RUNNING_P2P
+ * Without P2P the driver must implement:
+ * RUNNING -> STOP
+ * STOP -> RUNNING
+ *
+ * The coding will step through multiple states for some combination
+ * transitions; if all optional features are supported, this means the
+ * following ones:
+ * RESUMING -> STOP -> RUNNING_P2P
+ * RESUMING -> STOP -> RUNNING_P2P -> RUNNING
+ * RESUMING -> STOP -> STOP_COPY
+ * RUNNING -> RUNNING_P2P -> STOP
+ * RUNNING -> RUNNING_P2P -> STOP -> RESUMING
+ * RUNNING -> RUNNING_P2P -> STOP -> STOP_COPY
+ * RUNNING_P2P -> STOP -> RESUMING
+ * RUNNING_P2P -> STOP -> STOP_COPY
+ * STOP -> RUNNING_P2P -> RUNNING
+ * STOP_COPY -> STOP -> RESUMING
+ * STOP_COPY -> STOP -> RUNNING_P2P
+ * STOP_COPY -> STOP -> RUNNING_P2P -> RUNNING
+ */
+ static const u8 vfio_from_fsm_table[VFIO_DEVICE_NUM_STATES][VFIO_DEVICE_NUM_STATES] = {
+ [VFIO_DEVICE_STATE_STOP] = {
+ [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
+ [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING_P2P,
+ [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY,
+ [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING,
+ [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
+ [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
+ },
+ [VFIO_DEVICE_STATE_RUNNING] = {
+ [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING_P2P,
+ [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING,
+ [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_RUNNING_P2P,
+ [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING_P2P,
+ [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
+ [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
+ },
+ [VFIO_DEVICE_STATE_STOP_COPY] = {
+ [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
+ [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP,
+ [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY,
+ [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP,
+ [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP,
+ [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
+ },
+ [VFIO_DEVICE_STATE_RESUMING] = {
+ [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
+ [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP,
+ [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP,
+ [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING,
+ [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP,
+ [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
+ },
+ [VFIO_DEVICE_STATE_RUNNING_P2P] = {
+ [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
+ [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING,
+ [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP,
+ [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP,
+ [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
+ [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
+ },
+ [VFIO_DEVICE_STATE_ERROR] = {
+ [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_ERROR,
+ [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_ERROR,
+ [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_ERROR,
+ [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_ERROR,
+ [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_ERROR,
+ [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
+ },
+ };
+
+ static const unsigned int state_flags_table[VFIO_DEVICE_NUM_STATES] = {
+ [VFIO_DEVICE_STATE_STOP] = VFIO_MIGRATION_STOP_COPY,
+ [VFIO_DEVICE_STATE_RUNNING] = VFIO_MIGRATION_STOP_COPY,
+ [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_MIGRATION_STOP_COPY,
+ [VFIO_DEVICE_STATE_RESUMING] = VFIO_MIGRATION_STOP_COPY,
+ [VFIO_DEVICE_STATE_RUNNING_P2P] =
+ VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P,
+ [VFIO_DEVICE_STATE_ERROR] = ~0U,
+ };
+
+ if (WARN_ON(cur_fsm >= ARRAY_SIZE(vfio_from_fsm_table) ||
+ (state_flags_table[cur_fsm] & device->migration_flags) !=
+ state_flags_table[cur_fsm]))
+ return -EINVAL;
+
+ if (new_fsm >= ARRAY_SIZE(vfio_from_fsm_table) ||
+ (state_flags_table[new_fsm] & device->migration_flags) !=
+ state_flags_table[new_fsm])
+ return -EINVAL;
+
+ /*
+ * Arcs touching optional and unsupported states are skipped over. The
+ * driver will instead see an arc from the original state to the next
+ * logical state, as per the above comment.
+ */
+ *next_fsm = vfio_from_fsm_table[cur_fsm][new_fsm];
+ while ((state_flags_table[*next_fsm] & device->migration_flags) !=
+ state_flags_table[*next_fsm])
+ *next_fsm = vfio_from_fsm_table[*next_fsm][new_fsm];
+
+ return (*next_fsm != VFIO_DEVICE_STATE_ERROR) ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(vfio_mig_get_next_state);
+
+/*
+ * Convert the drivers's struct file into a FD number and return it to userspace
+ */
+static int vfio_ioct_mig_return_fd(struct file *filp, void __user *arg,
+ struct vfio_device_feature_mig_state *mig)
+{
+ int ret;
+ int fd;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ ret = fd;
+ goto out_fput;
+ }
+
+ mig->data_fd = fd;
+ if (copy_to_user(arg, mig, sizeof(*mig))) {
+ ret = -EFAULT;
+ goto out_put_unused;
+ }
+ fd_install(fd, filp);
+ return 0;
+
+out_put_unused:
+ put_unused_fd(fd);
+out_fput:
+ fput(filp);
+ return ret;
+}
+
+static int
+vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
+ u32 flags, void __user *arg,
+ size_t argsz)
+{
+ size_t minsz =
+ offsetofend(struct vfio_device_feature_mig_state, data_fd);
+ struct vfio_device_feature_mig_state mig;
+ struct file *filp = NULL;
+ int ret;
+
+ if (!device->ops->migration_set_state ||
+ !device->ops->migration_get_state)
+ return -ENOTTY;
+
+ ret = vfio_check_feature(flags, argsz,
+ VFIO_DEVICE_FEATURE_SET |
+ VFIO_DEVICE_FEATURE_GET,
+ sizeof(mig));
+ if (ret != 1)
+ return ret;
+
+ if (copy_from_user(&mig, arg, minsz))
+ return -EFAULT;
+
+ if (flags & VFIO_DEVICE_FEATURE_GET) {
+ enum vfio_device_mig_state curr_state;
+
+ ret = device->ops->migration_get_state(device, &curr_state);
+ if (ret)
+ return ret;
+ mig.device_state = curr_state;
+ goto out_copy;
+ }
+
+ /* Handle the VFIO_DEVICE_FEATURE_SET */
+ filp = device->ops->migration_set_state(device, mig.device_state);
+ if (IS_ERR(filp) || !filp)
+ goto out_copy;
+
+ return vfio_ioct_mig_return_fd(filp, arg, &mig);
+out_copy:
+ mig.data_fd = -1;
+ if (copy_to_user(arg, &mig, sizeof(mig)))
+ return -EFAULT;
+ if (IS_ERR(filp))
+ return PTR_ERR(filp);
+ return 0;
+}
+
+static int vfio_ioctl_device_feature_migration(struct vfio_device *device,
+ u32 flags, void __user *arg,
+ size_t argsz)
+{
+ struct vfio_device_feature_migration mig = {
+ .flags = device->migration_flags,
+ };
+ int ret;
+
+ if (!device->ops->migration_set_state ||
+ !device->ops->migration_get_state)
+ return -ENOTTY;
+
+ ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET,
+ sizeof(mig));
+ if (ret != 1)
+ return ret;
+ if (copy_to_user(arg, &mig, sizeof(mig)))
+ return -EFAULT;
+ return 0;
+}
+
+static int vfio_ioctl_device_feature(struct vfio_device *device,
+ struct vfio_device_feature __user *arg)
+{
+ size_t minsz = offsetofend(struct vfio_device_feature, flags);
+ struct vfio_device_feature feature;
+
+ if (copy_from_user(&feature, arg, minsz))
+ return -EFAULT;
+
+ if (feature.argsz < minsz)
+ return -EINVAL;
+
+ /* Check unknown flags */
+ if (feature.flags &
+ ~(VFIO_DEVICE_FEATURE_MASK | VFIO_DEVICE_FEATURE_SET |
+ VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_PROBE))
+ return -EINVAL;
+
+ /* GET & SET are mutually exclusive except with PROBE */
+ if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) &&
+ (feature.flags & VFIO_DEVICE_FEATURE_SET) &&
+ (feature.flags & VFIO_DEVICE_FEATURE_GET))
+ return -EINVAL;
+
+ switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) {
+ case VFIO_DEVICE_FEATURE_MIGRATION:
+ return vfio_ioctl_device_feature_migration(
+ device, feature.flags, arg->data,
+ feature.argsz - minsz);
+ case VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE:
+ return vfio_ioctl_device_feature_mig_device_state(
+ device, feature.flags, arg->data,
+ feature.argsz - minsz);
+ default:
+ if (unlikely(!device->ops->device_feature))
+ return -EINVAL;
+ return device->ops->device_feature(device, feature.flags,
+ arg->data,
+ feature.argsz - minsz);
+ }
+}
+
static long vfio_device_fops_unl_ioctl(struct file *filep,
unsigned int cmd, unsigned long arg)
{
struct vfio_device *device = filep->private_data;
- if (unlikely(!device->ops->ioctl))
- return -EINVAL;
-
- return device->ops->ioctl(device, cmd, arg);
+ switch (cmd) {
+ case VFIO_DEVICE_FEATURE:
+ return vfio_ioctl_device_feature(device, (void __user *)arg);
+ default:
+ if (unlikely(!device->ops->ioctl))
+ return -EINVAL;
+ return device->ops->ioctl(device, cmd, arg);
+ }
}
static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c
index 670d56c879e5..40b098320b2a 100644
--- a/drivers/vhost/iotlb.c
+++ b/drivers/vhost/iotlb.c
@@ -57,6 +57,17 @@ int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb,
if (last < start)
return -EFAULT;
+ /* If the range being mapped is [0, ULONG_MAX], split it into two entries
+ * otherwise its size would overflow u64.
+ */
+ if (start == 0 && last == ULONG_MAX) {
+ u64 mid = last / 2;
+
+ vhost_iotlb_add_range_ctx(iotlb, start, mid, addr, perm, opaque);
+ addr += mid + 1;
+ start = mid + 1;
+ }
+
if (iotlb->limit &&
iotlb->nmaps == iotlb->limit &&
iotlb->flags & VHOST_IOTLB_FLAG_RETIRE) {
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 28ef323882fb..792ab5f23647 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -473,6 +473,7 @@ static void vhost_tx_batch(struct vhost_net *net,
goto signal_used;
msghdr->msg_control = &ctl;
+ msghdr->msg_controllen = sizeof(ctl);
err = sock->ops->sendmsg(sock, msghdr, 0);
if (unlikely(err < 0)) {
vq_err(&nvq->vq, "Fail to batch sending packets\n");
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 851539807bc9..ec5249e8c32d 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -286,7 +286,7 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
if (copy_from_user(&features, featurep, sizeof(features)))
return -EFAULT;
- if (vdpa_set_features(vdpa, features, false))
+ if (vdpa_set_features(vdpa, features))
return -EINVAL;
return 0;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 59edb5a1ffe2..1768362115c6 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1170,6 +1170,13 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
goto done;
}
+ if ((msg.type == VHOST_IOTLB_UPDATE ||
+ msg.type == VHOST_IOTLB_INVALIDATE) &&
+ msg.size == 0) {
+ ret = -EINVAL;
+ goto done;
+ }
+
if (dev->msg_handler)
ret = dev->msg_handler(dev, &msg);
else
@@ -1981,7 +1988,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
return 0;
}
-static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
+static int vhost_update_avail_event(struct vhost_virtqueue *vq)
{
if (vhost_put_avail_event(vq))
return -EFAULT;
@@ -2527,7 +2534,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
return false;
}
} else {
- r = vhost_update_avail_event(vq, vq->avail_idx);
+ r = vhost_update_avail_event(vq);
if (r) {
vq_err(vq, "Failed to update avail event index at %p: %d\n",
vhost_avail_event(vq), r);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 37f0b4274113..e6c9d41db1de 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -753,7 +753,8 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
/* Iterating over all connections for all CIDs to find orphans is
* inefficient. Room for improvement here. */
- vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
+ vsock_for_each_connected_socket(&vhost_transport.transport,
+ vhost_vsock_reset_orphans);
/* Don't check the owner, because we are in the release path, so we
* need to stop the vsock device in any case.
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index 8a4361e95a11..522dd81110b8 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -506,12 +506,11 @@ static int ams369fg06_probe(struct spi_device *spi)
return 0;
}
-static int ams369fg06_remove(struct spi_device *spi)
+static void ams369fg06_remove(struct spi_device *spi)
{
struct ams369fg06 *lcd = spi_get_drvdata(spi);
ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 33f5d80495e6..0a57033ae31d 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -542,7 +542,7 @@ static int corgi_lcd_probe(struct spi_device *spi)
return 0;
}
-static int corgi_lcd_remove(struct spi_device *spi)
+static void corgi_lcd_remove(struct spi_device *spi)
{
struct corgi_lcd *lcd = spi_get_drvdata(spi);
@@ -550,7 +550,6 @@ static int corgi_lcd_remove(struct spi_device *spi)
lcd->bl_dev->props.brightness = 0;
backlight_update_status(lcd->bl_dev);
corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN);
- return 0;
}
static struct spi_driver corgi_lcd_driver = {
diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c
index 328aba9cddad..e7b6bd827986 100644
--- a/drivers/video/backlight/ili922x.c
+++ b/drivers/video/backlight/ili922x.c
@@ -526,10 +526,9 @@ static int ili922x_probe(struct spi_device *spi)
return 0;
}
-static int ili922x_remove(struct spi_device *spi)
+static void ili922x_remove(struct spi_device *spi)
{
ili922x_poweroff(spi);
- return 0;
}
static struct spi_driver ili922x_driver = {
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 46f97d1c3d21..cc763cf15f53 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -223,12 +223,11 @@ static int l4f00242t03_probe(struct spi_device *spi)
return 0;
}
-static int l4f00242t03_remove(struct spi_device *spi)
+static void l4f00242t03_remove(struct spi_device *spi)
{
struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
- return 0;
}
static void l4f00242t03_shutdown(struct spi_device *spi)
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index e8b185bb6f5e..1d17c439430e 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -594,7 +594,6 @@ static int lm3630a_remove(struct i2c_client *client)
if (pchip->irq) {
free_irq(pchip->irq, pchip);
- flush_workqueue(pchip->irqthread);
destroy_workqueue(pchip->irqthread);
}
return 0;
diff --git a/drivers/video/backlight/lms501kf03.c b/drivers/video/backlight/lms501kf03.c
index f949b66dce1b..5c46df8022bf 100644
--- a/drivers/video/backlight/lms501kf03.c
+++ b/drivers/video/backlight/lms501kf03.c
@@ -364,12 +364,11 @@ static int lms501kf03_probe(struct spi_device *spi)
return 0;
}
-static int lms501kf03_remove(struct spi_device *spi)
+static void lms501kf03_remove(struct spi_device *spi)
{
struct lms501kf03 *lcd = spi_get_drvdata(spi);
lms501kf03_power(lcd, FB_BLANK_POWERDOWN);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 5cbf621e48bd..b6d373af6e3f 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -255,12 +255,11 @@ static int ltv350qv_probe(struct spi_device *spi)
return 0;
}
-static int ltv350qv_remove(struct spi_device *spi)
+static void ltv350qv_remove(struct spi_device *spi)
{
struct ltv350qv *lcd = spi_get_drvdata(spi);
ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
index 306bcc6ccb92..527210e85795 100644
--- a/drivers/video/backlight/qcom-wled.c
+++ b/drivers/video/backlight/qcom-wled.c
@@ -1734,6 +1734,7 @@ static const struct of_device_id wled_match_table[] = {
{ .compatible = "qcom,pmi8994-wled", .data = (void *)4 },
{ .compatible = "qcom,pmi8998-wled", .data = (void *)4 },
{ .compatible = "qcom,pm660l-wled", .data = (void *)4 },
+ { .compatible = "qcom,pm6150l-wled", .data = (void *)5 },
{ .compatible = "qcom,pm8150l-wled", .data = (void *)5 },
{}
};
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 0de044dcafd5..fc6fbaf85594 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -397,12 +397,11 @@ static int tdo24m_probe(struct spi_device *spi)
return 0;
}
-static int tdo24m_remove(struct spi_device *spi)
+static void tdo24m_remove(struct spi_device *spi)
{
struct tdo24m *lcd = spi_get_drvdata(spi);
tdo24m_power(lcd, FB_BLANK_POWERDOWN);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 38765544345b..23d6c6bf0f54 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -232,15 +232,13 @@ err_register:
return ret;
}
-static int tosa_lcd_remove(struct spi_device *spi)
+static void tosa_lcd_remove(struct spi_device *spi)
{
struct tosa_lcd_data *data = spi_get_drvdata(spi);
i2c_unregister_device(data->i2c);
tosa_lcd_tg_off(data);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/backlight/vgg2432a4.c b/drivers/video/backlight/vgg2432a4.c
index 3567b45f9ba9..bfc1913e8b55 100644
--- a/drivers/video/backlight/vgg2432a4.c
+++ b/drivers/video/backlight/vgg2432a4.c
@@ -233,11 +233,9 @@ static int vgg2432a4_probe(struct spi_device *spi)
return 0;
}
-static int vgg2432a4_remove(struct spi_device *spi)
+static void vgg2432a4_remove(struct spi_device *spi)
{
ili9320_remove(spi_get_drvdata(spi));
-
- return 0;
}
static void vgg2432a4_shutdown(struct spi_device *spi)
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index fcc46380e7c9..40c50fa2dd70 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -9,7 +9,7 @@ config VGA_CONSOLE
bool "VGA text console" if EXPERT || !X86
depends on !4xx && !PPC_8xx && !SPARC && !M68K && !PARISC && !SUPERH && \
(!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \
- !ARM64 && !ARC && !MICROBLAZE && !OPENRISC && !NDS32 && !S390 && !UML
+ !ARM64 && !ARC && !MICROBLAZE && !OPENRISC && !S390 && !UML
default y
help
Saying Y here will allow you to use Linux in text mode through a
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 6ed5e608dd04..93b8d84c34cf 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -829,7 +829,7 @@ config FB_PVR2
You can pass several parameters to the driver at boot time or at
module load time. The parameters look like "video=pvr2:XXX", where
the meaning of XXX can be found at the end of the main source file
- (<file:drivers/video/pvr2fb.c>). Please see the file
+ (<file:drivers/video/fbdev/pvr2fb.c>). Please see the file
<file:Documentation/fb/pvr2fb.rst>.
config FB_OPENCORES
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index e3812a8ff55a..52a35b661643 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -76,8 +76,6 @@
#define SWITCH_SND7 0x80
#define SWITCH_NONE 0x00
-#define up(x, r) (((x) + (r) - 1) & ~((r)-1))
-
static int default_par; /* default resolution (0=none) */
@@ -487,8 +485,8 @@ static struct fb_videomode atafb_modedb[] __initdata = {
"tt-mid", 60, 640, 480, 31041, 120, 100, 8, 16, 140, 30,
0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
}, {
- /* 1280x960, 29 kHz, 60 Hz (TT high) */
- "tt-high", 57, 640, 960, 31041, 120, 100, 8, 16, 140, 30,
+ /* 1280x960, 72 kHz, 72 Hz (TT high) */
+ "tt-high", 57, 1280, 960, 7760, 260, 60, 36, 4, 192, 4,
0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
},
@@ -1649,12 +1647,12 @@ static int falcon_pan_display(struct fb_var_screeninfo *var,
int bpp = info->var.bits_per_pixel;
if (bpp == 1)
- var->xoffset = up(var->xoffset, 32);
+ var->xoffset = round_up(var->xoffset, 32);
if (bpp != 16)
par->hw.falcon.xoffset = var->xoffset & 15;
else {
par->hw.falcon.xoffset = 0;
- var->xoffset = up(var->xoffset, 2);
+ var->xoffset = round_up(var->xoffset, 2);
}
par->hw.falcon.line_offset = bpp *
(info->var.xres_virtual - info->var.xres) / 16;
@@ -1683,9 +1681,9 @@ static int falcon_setcolreg(unsigned int regno, unsigned int red,
((blue & 0xfc00) >> 8));
if (regno < 16) {
shifter_tt.color_reg[regno] =
- (((red & 0xe000) >> 13) | ((red & 0x1000) >> 12) << 8) |
- (((green & 0xe000) >> 13) | ((green & 0x1000) >> 12) << 4) |
- ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12);
+ ((((red & 0xe000) >> 13) | ((red & 0x1000) >> 12)) << 8) |
+ ((((green & 0xe000) >> 13) | ((green & 0x1000) >> 12)) << 4) |
+ ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12);
((u32 *)info->pseudo_palette)[regno] = ((red & 0xf800) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11));
@@ -1971,9 +1969,9 @@ static int stste_setcolreg(unsigned int regno, unsigned int red,
green >>= 12;
if (ATARIHW_PRESENT(EXTD_SHIFTER))
shifter_tt.color_reg[regno] =
- (((red & 0xe) >> 1) | ((red & 1) << 3) << 8) |
- (((green & 0xe) >> 1) | ((green & 1) << 3) << 4) |
- ((blue & 0xe) >> 1) | ((blue & 1) << 3);
+ ((((red & 0xe) >> 1) | ((red & 1) << 3)) << 8) |
+ ((((green & 0xe) >> 1) | ((green & 1) << 3)) << 4) |
+ ((blue & 0xe) >> 1) | ((blue & 1) << 3);
else
shifter_tt.color_reg[regno] =
((red & 0xe) << 7) |
@@ -2268,7 +2266,7 @@ static int pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
if (!fbhw->set_screen_base ||
(!ATARIHW_PRESENT(EXTD_SHIFTER) && var->xoffset))
return -EINVAL;
- var->xoffset = up(var->xoffset, 16);
+ var->xoffset = round_up(var->xoffset, 16);
par->screen_base = screen_base +
(var->yoffset * info->var.xres_virtual + var->xoffset)
* info->var.bits_per_pixel / 8;
@@ -2406,16 +2404,6 @@ static void atafb_set_disp(struct fb_info *info)
atari_stram_to_virt(info->fix.smem_start));
}
-static int atafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
- u_int transp, struct fb_info *info)
-{
- red >>= 8;
- green >>= 8;
- blue >>= 8;
-
- return info->fbops->fb_setcolreg(regno, red, green, blue, transp, info);
-}
-
static int
atafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
@@ -2726,7 +2714,6 @@ static struct fb_ops atafb_ops = {
.owner = THIS_MODULE,
.fb_check_var = atafb_check_var,
.fb_set_par = atafb_set_par,
- .fb_setcolreg = atafb_setcolreg,
.fb_blank = atafb_blank,
.fb_pan_display = atafb_pan_display,
.fb_fillrect = atafb_fillrect,
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 355b6120dc4f..1fc8de4ecbeb 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -1062,15 +1062,16 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&info->modelist);
- if (pdev->dev.of_node) {
- ret = atmel_lcdfb_of_init(sinfo);
- if (ret)
- goto free_info;
- } else {
+ if (!pdev->dev.of_node) {
dev_err(dev, "cannot get default configuration\n");
goto free_info;
}
+ ret = atmel_lcdfb_of_init(sinfo);
+ if (ret)
+ goto free_info;
+
+ ret = -ENODEV;
if (!sinfo->config)
goto free_info;
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index e6a48689c294..6ff16d3132e5 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -952,7 +952,7 @@ static void aty128_timings(struct aty128fb_par *par)
u32 x_mpll_ref_fb_div;
u32 xclk_cntl;
u32 Nx, M;
- unsigned PostDivSet[] = { 0, 1, 2, 4, 8, 3, 6, 12 };
+ static const unsigned int PostDivSet[] = { 0, 1, 2, 4, 8, 3, 6, 12 };
#endif
if (!par->constants.ref_clk)
@@ -1321,8 +1321,10 @@ static void aty128_set_pll(struct aty128_pll *pll,
{
u32 div3;
- unsigned char post_conv[] = /* register values for post dividers */
- { 2, 0, 1, 4, 2, 2, 6, 2, 3, 2, 2, 2, 7 };
+ /* register values for post dividers */
+ static const unsigned char post_conv[] = {
+ 2, 0, 1, 4, 2, 2, 6, 2, 3, 2, 2, 2, 7
+ };
/* select PPLL_DIV_3 */
aty_st_le32(CLOCK_CNTL_INDEX, aty_ld_le32(CLOCK_CNTL_INDEX) | (3 << 8));
@@ -1360,7 +1362,7 @@ static int aty128_var_to_pll(u32 period_in_ps, struct aty128_pll *pll,
const struct aty128fb_par *par)
{
const struct aty128_constants c = par->constants;
- unsigned char post_dividers[] = {1,2,4,8,3,6,12};
+ static const unsigned char post_dividers[] = { 1, 2, 4, 8, 3, 6, 12 };
u32 output_freq;
u32 vclk; /* in .01 MHz */
int i = 0;
diff --git a/drivers/video/fbdev/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c
index 011b07e44e0d..e967536af166 100644
--- a/drivers/video/fbdev/aty/mach64_ct.c
+++ b/drivers/video/fbdev/aty/mach64_ct.c
@@ -22,13 +22,11 @@ static u32 aty_pll_to_var_ct(const struct fb_info *info, const union aty_pll *pl
u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par)
{
- u8 res;
/* write addr byte */
aty_st_8(CLOCK_CNTL_ADDR, (offset << 2) & PLL_ADDR, par);
/* read the register value */
- res = aty_ld_8(CLOCK_CNTL_DATA, par);
- return res;
+ return aty_ld_8(CLOCK_CNTL_DATA, par);
}
static void aty_st_pll_ct(int offset, u8 val, const struct atyfb_par *par)
diff --git a/drivers/video/fbdev/aty/mach64_gx.c b/drivers/video/fbdev/aty/mach64_gx.c
index 9c37e28fb78b..d06d24830080 100644
--- a/drivers/video/fbdev/aty/mach64_gx.c
+++ b/drivers/video/fbdev/aty/mach64_gx.c
@@ -352,10 +352,8 @@ static int aty_var_to_pll_18818(const struct fb_info *info, u32 vclk_per,
post_divider = 1;
if (MHz100 > MAX_FREQ_2595) {
- MHz100 = MAX_FREQ_2595;
return -EINVAL;
} else if (MHz100 < ABS_MIN_FREQ_2595) {
- program_bits = 0; /* MHz100 = 257 */
return -EINVAL;
} else {
while (MHz100 < MIN_FREQ_2595) {
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c
index 37a6512feda0..52f731a61482 100644
--- a/drivers/video/fbdev/au1100fb.c
+++ b/drivers/video/fbdev/au1100fb.c
@@ -239,7 +239,7 @@ int au1100fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned
u32 value;
fbdev = to_au1100fb_device(fbi);
- palette = fbdev->regs->lcd_pallettebase;
+ palette = fbdev->regs->lcd_palettebase;
if (regno > (AU1100_LCD_NBR_PALETTE_ENTRIES - 1))
return -EINVAL;
diff --git a/drivers/video/fbdev/au1100fb.h b/drivers/video/fbdev/au1100fb.h
index e7239bceefd3..79f4048726f1 100644
--- a/drivers/video/fbdev/au1100fb.h
+++ b/drivers/video/fbdev/au1100fb.h
@@ -92,7 +92,7 @@ struct au1100fb_regs
u32 lcd_pwmdiv;
u32 lcd_pwmhi;
u32 reserved[(0x0400-0x002C)/4];
- u32 lcd_pallettebase[256];
+ u32 lcd_palettebase[256];
};
struct au1100fb_device {
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index c00e01a17368..81c315454428 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1233,8 +1233,8 @@ static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct au1200fb_device *fbdev = info->par;
- return dma_mmap_attrs(fbdev->dev, vma, fbdev->fb_mem, fbdev->fb_phys,
- fbdev->fb_len, 0);
+ return dma_mmap_coherent(fbdev->dev, vma,
+ fbdev->fb_mem, fbdev->fb_phys, fbdev->fb_len);
}
static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index 93802abbbc72..3d47c347b897 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -469,7 +469,7 @@ static int cirrusfb_check_mclk(struct fb_info *info, long freq)
return 0;
}
-static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var,
+static int cirrusfb_check_pixclock(struct fb_var_screeninfo *var,
struct fb_info *info)
{
long freq;
@@ -478,9 +478,7 @@ static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var,
unsigned maxclockidx = var->bits_per_pixel >> 3;
/* convert from ps to kHz */
- freq = PICOS2KHZ(var->pixclock);
-
- dev_dbg(info->device, "desired pixclock: %ld kHz\n", freq);
+ freq = PICOS2KHZ(var->pixclock ? : 1);
maxclock = cirrusfb_board_info[cinfo->btype].maxclock[maxclockidx];
cinfo->multiplexing = 0;
@@ -488,11 +486,13 @@ static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var,
/* If the frequency is greater than we can support, we might be able
* to use multiplexing for the video mode */
if (freq > maxclock) {
- dev_err(info->device,
- "Frequency greater than maxclock (%ld kHz)\n",
- maxclock);
- return -EINVAL;
+ var->pixclock = KHZ2PICOS(maxclock);
+
+ while ((freq = PICOS2KHZ(var->pixclock)) > maxclock)
+ var->pixclock++;
}
+ dev_dbg(info->device, "desired pixclock: %ld kHz\n", freq);
+
/*
* Additional constraint: 8bpp uses DAC clock doubling to allow maximum
* pixel clock
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 509311471d51..bd59e7b11ed5 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -67,7 +67,9 @@
#define out_8(addr, val) (void)(val)
#define in_le32(addr) 0
#define out_le32(addr, val) (void)(val)
+#ifndef pgprot_cached_wthru
#define pgprot_cached_wthru(prot) (prot)
+#endif
#else
static void invalid_vram_cache(void __force *addr)
{
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 98b0f23bf5e2..842c66b3e33d 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -169,15 +169,8 @@ static const struct vm_operations_struct fb_deferred_io_vm_ops = {
.page_mkwrite = fb_deferred_io_mkwrite,
};
-static int fb_deferred_io_set_page_dirty(struct page *page)
-{
- if (!PageDirty(page))
- SetPageDirty(page);
- return 0;
-}
-
static const struct address_space_operations fb_deferred_io_aops = {
- .set_page_dirty = fb_deferred_io_set_page_dirty,
+ .dirty_folio = noop_dirty_folio,
};
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
diff --git a/drivers/video/fbdev/core/fbcvt.c b/drivers/video/fbdev/core/fbcvt.c
index 55d2bd0ce5c0..64843464c661 100644
--- a/drivers/video/fbdev/core/fbcvt.c
+++ b/drivers/video/fbdev/core/fbcvt.c
@@ -214,9 +214,11 @@ static u32 fb_cvt_aspect_ratio(struct fb_cvt_data *cvt)
static void fb_cvt_print_name(struct fb_cvt_data *cvt)
{
u32 pixcount, pixcount_mod;
- int cnt = 255, offset = 0, read = 0;
- u8 *buf = kzalloc(256, GFP_KERNEL);
+ int size = 256;
+ int off = 0;
+ u8 *buf;
+ buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return;
@@ -224,43 +226,30 @@ static void fb_cvt_print_name(struct fb_cvt_data *cvt)
pixcount_mod = (cvt->xres * (cvt->yres/cvt->interlace)) % 1000000;
pixcount_mod /= 1000;
- read = snprintf(buf+offset, cnt, "fbcvt: %dx%d@%d: CVT Name - ",
- cvt->xres, cvt->yres, cvt->refresh);
- offset += read;
- cnt -= read;
+ off += scnprintf(buf + off, size - off, "fbcvt: %dx%d@%d: CVT Name - ",
+ cvt->xres, cvt->yres, cvt->refresh);
- if (cvt->status)
- snprintf(buf+offset, cnt, "Not a CVT standard - %d.%03d Mega "
- "Pixel Image\n", pixcount, pixcount_mod);
- else {
- if (pixcount) {
- read = snprintf(buf+offset, cnt, "%d", pixcount);
- cnt -= read;
- offset += read;
- }
+ if (cvt->status) {
+ off += scnprintf(buf + off, size - off,
+ "Not a CVT standard - %d.%03d Mega Pixel Image\n",
+ pixcount, pixcount_mod);
+ } else {
+ if (pixcount)
+ off += scnprintf(buf + off, size - off, "%d", pixcount);
- read = snprintf(buf+offset, cnt, ".%03dM", pixcount_mod);
- cnt -= read;
- offset += read;
+ off += scnprintf(buf + off, size - off, ".%03dM", pixcount_mod);
if (cvt->aspect_ratio == 0)
- read = snprintf(buf+offset, cnt, "3");
+ off += scnprintf(buf + off, size - off, "3");
else if (cvt->aspect_ratio == 3)
- read = snprintf(buf+offset, cnt, "4");
+ off += scnprintf(buf + off, size - off, "4");
else if (cvt->aspect_ratio == 1 || cvt->aspect_ratio == 4)
- read = snprintf(buf+offset, cnt, "9");
+ off += scnprintf(buf + off, size - off, "9");
else if (cvt->aspect_ratio == 2)
- read = snprintf(buf+offset, cnt, "A");
- else
- read = 0;
- cnt -= read;
- offset += read;
-
- if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK) {
- read = snprintf(buf+offset, cnt, "-R");
- cnt -= read;
- offset += read;
- }
+ off += scnprintf(buf + off, size - off, "A");
+
+ if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK)
+ off += scnprintf(buf + off, size - off, "-R");
}
printk(KERN_INFO "%s\n", buf);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index ad9aac06427a..34d6bb1bf82e 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -397,18 +397,14 @@ static void fb_rotate_logo(struct fb_info *info, u8 *dst,
} else if (rotate == FB_ROTATE_CW) {
fb_rotate_logo_cw(image->data, dst, image->width,
image->height);
- tmp = image->width;
- image->width = image->height;
- image->height = tmp;
+ swap(image->width, image->height);
tmp = image->dy;
image->dy = image->dx;
image->dx = info->var.xres - image->width - tmp;
} else if (rotate == FB_ROTATE_CCW) {
fb_rotate_logo_ccw(image->data, dst, image->width,
image->height);
- tmp = image->width;
- image->width = image->height;
- image->height = tmp;
+ swap(image->width, image->height);
tmp = image->dx;
image->dx = image->dy;
image->dy = info->var.yres - image->height - tmp;
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index 005ac3c17aa1..ae76a2111c77 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -1354,10 +1354,9 @@ static int fb_probe(struct platform_device *device)
return PTR_ERR(da8xx_fb_reg_base);
tmp_lcdc_clk = devm_clk_get(&device->dev, "fck");
- if (IS_ERR(tmp_lcdc_clk)) {
- dev_err(&device->dev, "Can not get device clock\n");
- return PTR_ERR(tmp_lcdc_clk);
- }
+ if (IS_ERR(tmp_lcdc_clk))
+ return dev_err_probe(&device->dev, PTR_ERR(tmp_lcdc_clk),
+ "Can not get device clock\n");
pm_runtime_enable(&device->dev);
pm_runtime_get_sync(&device->dev);
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index ad598257ab38..68288756ffff 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -1083,6 +1083,8 @@ static int imxfb_remove(struct platform_device *pdev)
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
imxfb_disable_controller(fbi);
diff --git a/drivers/video/fbdev/kyro/STG4000InitDevice.c b/drivers/video/fbdev/kyro/STG4000InitDevice.c
index 21875d3c2dc2..edfa0a04854d 100644
--- a/drivers/video/fbdev/kyro/STG4000InitDevice.c
+++ b/drivers/video/fbdev/kyro/STG4000InitDevice.c
@@ -124,7 +124,7 @@ u32 ProgramClock(u32 refClock,
u32 ulScore, ulPhaseScore, ulVcoScore;
u32 ulTmp = 0, ulVCO;
u32 ulScaleClockReq, ulMinClock, ulMaxClock;
- u32 ODValues[] = { 1, 2, 0 };
+ static const unsigned char ODValues[] = { 1, 2, 0 };
/* Translate clock in Hz */
coreClock *= 100; /* in Hz */
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 5c82611e93d9..236521b19daf 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -1377,7 +1377,7 @@ static struct video_board vbG200 = {
.lowlevel = &matrox_G100
};
static struct video_board vbG200eW = {
- .maxvram = 0x800000,
+ .maxvram = 0x100000,
.maxdisplayable = 0x800000,
.accelID = FB_ACCEL_MATROX_MGAG200,
.lowlevel = &matrox_G100
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
index d40b806461ca..61aed7fc0b8d 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
@@ -132,7 +132,7 @@ static void mb86290fb_imageblit8(u32 *cmd, u16 step, u16 dx, u16 dy,
cmd[2] = (height << 16) | width;
i = 0;
- line = ptr = image->data;
+ line = image->data;
bytes = image->width;
while (i < height) {
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index 061a105afb86..a9df8ee79810 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
@@ -514,7 +514,8 @@ static int mmphw_probe(struct platform_device *pdev)
/* get clock */
ctrl->clk = devm_clk_get(ctrl->dev, mi->clk_name);
if (IS_ERR(ctrl->clk)) {
- dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
+ dev_err_probe(ctrl->dev, ret,
+ "unable to get clk %s\n", mi->clk_name);
ret = -ENOENT;
goto failed;
}
diff --git a/drivers/video/fbdev/nvidia/nv_i2c.c b/drivers/video/fbdev/nvidia/nv_i2c.c
index d7994a173245..0b48965a6420 100644
--- a/drivers/video/fbdev/nvidia/nv_i2c.c
+++ b/drivers/video/fbdev/nvidia/nv_i2c.c
@@ -86,7 +86,7 @@ static int nvidia_setup_i2c_bus(struct nvidia_i2c_chan *chan, const char *name,
{
int rc;
- strcpy(chan->adapter.name, name);
+ strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
chan->adapter.owner = THIS_MODULE;
chan->adapter.class = i2c_class;
chan->adapter.algo_data = &chan->algo;
diff --git a/drivers/video/fbdev/ocfb.c b/drivers/video/fbdev/ocfb.c
index bfa4ed421148..da7e1457e58f 100644
--- a/drivers/video/fbdev/ocfb.c
+++ b/drivers/video/fbdev/ocfb.c
@@ -387,7 +387,7 @@ static int ocfb_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id ocfb_match[] = {
+static const struct of_device_id ocfb_match[] = {
{ .compatible = "opencores,ocfb", },
{},
};
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index 4501e848a36f..afdb6aa48add 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -412,7 +412,7 @@ static void __init offb_init_fb(const char *name,
info = framebuffer_alloc(sizeof(u32) * 16, NULL);
- if (info == 0) {
+ if (!info) {
release_mem_region(res_start, res_size);
return;
}
diff --git a/drivers/video/fbdev/omap/lcd_ams_delta.c b/drivers/video/fbdev/omap/lcd_ams_delta.c
index 8e54aae544a0..bbf871f9d862 100644
--- a/drivers/video/fbdev/omap/lcd_ams_delta.c
+++ b/drivers/video/fbdev/omap/lcd_ams_delta.c
@@ -131,18 +131,14 @@ static int ams_delta_panel_probe(struct platform_device *pdev)
int ret;
gpiod_vblen = devm_gpiod_get(&pdev->dev, "vblen", GPIOD_OUT_LOW);
- if (IS_ERR(gpiod_vblen)) {
- ret = PTR_ERR(gpiod_vblen);
- dev_err(&pdev->dev, "VBLEN GPIO request failed (%d)\n", ret);
- return ret;
- }
+ if (IS_ERR(gpiod_vblen))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpiod_vblen),
+ "VBLEN GPIO request failed\n");
gpiod_ndisp = devm_gpiod_get(&pdev->dev, "ndisp", GPIOD_OUT_LOW);
- if (IS_ERR(gpiod_ndisp)) {
- ret = PTR_ERR(gpiod_ndisp);
- dev_err(&pdev->dev, "NDISP GPIO request failed (%d)\n", ret);
- return ret;
- }
+ if (IS_ERR(gpiod_ndisp))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpiod_ndisp),
+ "NDISP GPIO request failed\n");
#ifdef CONFIG_LCD_CLASS_DEVICE
lcd_device = lcd_device_register("omapfb", &pdev->dev, NULL,
diff --git a/drivers/video/fbdev/omap/lcd_mipid.c b/drivers/video/fbdev/omap/lcd_mipid.c
index a75ae0c9b14c..03cff39d392d 100644
--- a/drivers/video/fbdev/omap/lcd_mipid.c
+++ b/drivers/video/fbdev/omap/lcd_mipid.c
@@ -570,14 +570,12 @@ static int mipid_spi_probe(struct spi_device *spi)
return 0;
}
-static int mipid_spi_remove(struct spi_device *spi)
+static void mipid_spi_remove(struct spi_device *spi)
{
struct mipid_device *md = dev_get_drvdata(&spi->dev);
mipid_disable(&md->panel);
kfree(md);
-
- return 0;
}
static struct spi_driver mipid_spi_driver = {
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index b495c09e6102..083388a4ceeb 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/sysfs.h>
#include <linux/omap-dma.h>
@@ -1303,7 +1304,7 @@ static ssize_t omapfb_show_panel_name(struct device *dev,
{
struct omapfb_device *fbdev = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->panel->name);
+ return sysfs_emit(buf, "%s\n", fbdev->panel->name);
}
static ssize_t omapfb_show_bklight_level(struct device *dev,
@@ -1314,8 +1315,8 @@ static ssize_t omapfb_show_bklight_level(struct device *dev,
int r;
if (fbdev->panel->get_bklight_level) {
- r = snprintf(buf, PAGE_SIZE, "%d\n",
- fbdev->panel->get_bklight_level(fbdev->panel));
+ r = sysfs_emit(buf, "%d\n",
+ fbdev->panel->get_bklight_level(fbdev->panel));
} else
r = -ENODEV;
return r;
@@ -1348,8 +1349,8 @@ static ssize_t omapfb_show_bklight_max(struct device *dev,
int r;
if (fbdev->panel->get_bklight_level) {
- r = snprintf(buf, PAGE_SIZE, "%d\n",
- fbdev->panel->get_bklight_max(fbdev->panel));
+ r = sysfs_emit(buf, "%d\n",
+ fbdev->panel->get_bklight_max(fbdev->panel));
} else
r = -ENODEV;
return r;
@@ -1379,7 +1380,7 @@ static ssize_t omapfb_show_ctrl_name(struct device *dev,
{
struct omapfb_device *fbdev = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->ctrl->name);
+ return sysfs_emit(buf, "%s\n", fbdev->ctrl->name);
}
static struct device_attribute dev_attr_ctrl_name =
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
index 2fa436475b40..c8ad3ef42bd3 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
@@ -246,6 +246,7 @@ static int dvic_probe_of(struct platform_device *pdev)
adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
if (adapter_node) {
adapter = of_get_i2c_adapter_by_node(adapter_node);
+ of_node_put(adapter_node);
if (adapter == NULL) {
dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
omap_dss_put_device(ddata->in);
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
index 4b0793abdd84..a2c7c5cb1523 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
@@ -409,7 +409,7 @@ static ssize_t dsicm_num_errors_show(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", errors);
+ return sysfs_emit(buf, "%d\n", errors);
}
static ssize_t dsicm_hw_revision_show(struct device *dev,
@@ -439,7 +439,7 @@ static ssize_t dsicm_hw_revision_show(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
+ return sysfs_emit(buf, "%02x.%02x.%02x\n", id1, id2, id3);
}
static ssize_t dsicm_store_ulps(struct device *dev,
@@ -487,7 +487,7 @@ static ssize_t dsicm_show_ulps(struct device *dev,
t = ddata->ulps_enabled;
mutex_unlock(&ddata->lock);
- return snprintf(buf, PAGE_SIZE, "%u\n", t);
+ return sysfs_emit(buf, "%u\n", t);
}
static ssize_t dsicm_store_ulps_timeout(struct device *dev,
@@ -532,7 +532,7 @@ static ssize_t dsicm_show_ulps_timeout(struct device *dev,
t = ddata->ulps_timeout;
mutex_unlock(&ddata->lock);
- return snprintf(buf, PAGE_SIZE, "%u\n", t);
+ return sysfs_emit(buf, "%u\n", t);
}
static DEVICE_ATTR(num_dsi_errors, S_IRUGO, dsicm_num_errors_show, NULL);
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
index 1bec7a4422e8..3ce1f9d2e7c4 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
@@ -244,10 +244,9 @@ static int lb035q02_probe_of(struct spi_device *spi)
struct gpio_desc *gpio;
gpio = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(gpio)) {
- dev_err(&spi->dev, "failed to parse enable gpio\n");
- return PTR_ERR(gpio);
- }
+ if (IS_ERR(gpio))
+ return dev_err_probe(&spi->dev, PTR_ERR(gpio),
+ "failed to parse enable gpio\n");
ddata->enable_gpio = gpio;
@@ -316,7 +315,7 @@ err_gpio:
return r;
}
-static int lb035q02_panel_spi_remove(struct spi_device *spi)
+static void lb035q02_panel_spi_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = spi_get_drvdata(spi);
struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -328,8 +327,6 @@ static int lb035q02_panel_spi_remove(struct spi_device *spi)
lb035q02_disconnect(dssdev);
omap_dss_put_device(in);
-
- return 0;
}
static const struct of_device_id lb035q02_of_match[] = {
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
index dff9ebbadfc0..be9910ff6e62 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
@@ -327,7 +327,7 @@ err_gpio:
return r;
}
-static int nec_8048_remove(struct spi_device *spi)
+static void nec_8048_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -341,8 +341,6 @@ static int nec_8048_remove(struct spi_device *spi)
nec_8048_disconnect(dssdev);
omap_dss_put_device(in);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
index 602324c5c9f9..f1072c319de8 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
@@ -211,10 +211,9 @@ static int sharp_ls_probe_of(struct platform_device *pdev)
int r;
ddata->vcc = devm_regulator_get(&pdev->dev, "envdd");
- if (IS_ERR(ddata->vcc)) {
- dev_err(&pdev->dev, "failed to get regulator\n");
- return PTR_ERR(ddata->vcc);
- }
+ if (IS_ERR(ddata->vcc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ddata->vcc),
+ "failed to get regulator\n");
/* lcd INI */
r = sharp_ls_get_gpio_of(&pdev->dev, 0, 0, "enable", &ddata->ini_gpio);
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
index 8d8b5ff7d43c..c0965bee12c5 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
@@ -476,7 +476,7 @@ static ssize_t show_cabc_available_modes(struct device *dev,
int i;
if (!ddata->has_cabc)
- return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]);
+ return sysfs_emit(buf, "%s\n", cabc_modes[0]);
for (i = 0, len = 0;
len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
@@ -857,7 +857,7 @@ err_gpio:
return r;
}
-static int acx565akm_remove(struct spi_device *spi)
+static void acx565akm_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -874,8 +874,6 @@ static int acx565akm_remove(struct spi_device *spi)
acx565akm_disconnect(dssdev);
omap_dss_put_device(in);
-
- return 0;
}
static const struct of_device_id acx565akm_of_match[] = {
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
index 595ebd8bd5dc..3c0f887d3092 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
@@ -425,7 +425,7 @@ err_reg:
return r;
}
-static int td028ttec1_panel_remove(struct spi_device *spi)
+static void td028ttec1_panel_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -439,8 +439,6 @@ static int td028ttec1_panel_remove(struct spi_device *spi)
td028ttec1_panel_disconnect(dssdev);
omap_dss_put_device(in);
-
- return 0;
}
static const struct of_device_id td028ttec1_of_match[] = {
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
index afac1d9445aa..c0e4e0315b6b 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
@@ -169,7 +169,7 @@ static ssize_t tpo_td043_vmirror_show(struct device *dev,
{
struct panel_drv_data *ddata = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ddata->vmirror);
+ return sysfs_emit(buf, "%d\n", ddata->vmirror);
}
static ssize_t tpo_td043_vmirror_store(struct device *dev,
@@ -199,7 +199,7 @@ static ssize_t tpo_td043_mode_show(struct device *dev,
{
struct panel_drv_data *ddata = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ddata->mode);
+ return sysfs_emit(buf, "%d\n", ddata->mode);
}
static ssize_t tpo_td043_mode_store(struct device *dev,
@@ -517,8 +517,7 @@ static int tpo_td043_probe(struct spi_device *spi)
ddata->vcc_reg = devm_regulator_get(&spi->dev, "vcc");
if (IS_ERR(ddata->vcc_reg)) {
- dev_err(&spi->dev, "failed to get LCD VCC regulator\n");
- r = PTR_ERR(ddata->vcc_reg);
+ r = dev_err_probe(&spi->dev, r, "failed to get LCD VCC regulator\n");
goto err_regulator;
}
@@ -564,7 +563,7 @@ err_regulator:
return r;
}
-static int tpo_td043_remove(struct spi_device *spi)
+static void tpo_td043_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -580,8 +579,6 @@ static int tpo_td043_remove(struct spi_device *spi)
omap_dss_put_device(in);
sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
index 8f355d1caf86..bc5a44c2a144 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
@@ -265,6 +265,7 @@ static struct attribute *display_sysfs_attrs[] = {
&display_attr_wss.attr,
NULL
};
+ATTRIBUTE_GROUPS(display_sysfs);
static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
@@ -303,7 +304,7 @@ static const struct sysfs_ops display_sysfs_ops = {
static struct kobj_type display_ktype = {
.sysfs_ops = &display_sysfs_ops,
- .default_attrs = display_sysfs_attrs,
+ .default_groups = display_sysfs_groups,
};
int display_init_sysfs(struct platform_device *pdev)
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
index 3ffb1fe4a38a..ba21c4a2633d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
@@ -457,6 +457,7 @@ static struct attribute *manager_sysfs_attrs[] = {
&manager_attr_cpr_coef.attr,
NULL
};
+ATTRIBUTE_GROUPS(manager_sysfs);
static ssize_t manager_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
@@ -495,7 +496,7 @@ static const struct sysfs_ops manager_sysfs_ops = {
static struct kobj_type manager_ktype = {
.sysfs_ops = &manager_sysfs_ops,
- .default_attrs = manager_sysfs_attrs,
+ .default_groups = manager_sysfs_groups,
};
int dss_manager_kobj_init(struct omap_overlay_manager *mgr,
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
index 421dcb7564ad..601c0beb6de9 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
@@ -390,6 +390,7 @@ static struct attribute *overlay_sysfs_attrs[] = {
&overlay_attr_zorder.attr,
NULL
};
+ATTRIBUTE_GROUPS(overlay_sysfs);
static ssize_t overlay_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
@@ -428,7 +429,7 @@ static const struct sysfs_ops overlay_sysfs_ops = {
static struct kobj_type overlay_ktype = {
.sysfs_ops = &overlay_sysfs_ops,
- .default_attrs = overlay_sysfs_attrs,
+ .default_groups = overlay_sysfs_groups,
};
int dss_overlay_kobj_init(struct omap_overlay *ovl,
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index a3decc7fadde..afa688e754b9 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1854,7 +1854,6 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev)
}
if (fbdev->auto_update_wq != NULL) {
- flush_workqueue(fbdev->auto_update_wq);
destroy_workqueue(fbdev->auto_update_wq);
fbdev->auto_update_wq = NULL;
}
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index 47e6a1d0d229..e943300d23e8 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -593,8 +593,8 @@ static void pxa168fb_init_mode(struct fb_info *info,
static int pxa168fb_probe(struct platform_device *pdev)
{
struct pxa168fb_mach_info *mi;
- struct fb_info *info = 0;
- struct pxa168fb_info *fbi = 0;
+ struct fb_info *info = NULL;
+ struct pxa168fb_info *fbi = NULL;
struct resource *res;
struct clk *clk;
int irq, ret;
@@ -606,10 +606,9 @@ static int pxa168fb_probe(struct platform_device *pdev)
}
clk = devm_clk_get(&pdev->dev, "LCDCLK");
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get LCDCLK");
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "unable to get LCDCLK");
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
@@ -618,10 +617,8 @@ static int pxa168fb_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no IRQ defined\n");
+ if (irq < 0)
return -ENOENT;
- }
info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev);
if (info == NULL) {
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 4279e13a3b58..350b3139c863 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -606,17 +606,13 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
/* enable the clock */
priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to get clock\n");
- return PTR_ERR(priv->clk);
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "failed to get clock\n");
/* request the IRQ */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no IRQ defined: %d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, pxa3xx_gcu_handle_irq,
0, DRV_NAME, priv);
diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index 68408c499627..3abbc5737c3b 100644
--- a/drivers/video/fbdev/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
@@ -1360,7 +1360,6 @@ static int s3c_fb_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct s3c_fb_platdata *pd;
struct s3c_fb *sfb;
- struct resource *res;
int win;
int ret = 0;
u32 reg;
@@ -1392,18 +1391,17 @@ static int s3c_fb_probe(struct platform_device *pdev)
spin_lock_init(&sfb->slock);
sfb->bus_clk = devm_clk_get(dev, "lcd");
- if (IS_ERR(sfb->bus_clk)) {
- dev_err(dev, "failed to get bus clock\n");
- return PTR_ERR(sfb->bus_clk);
- }
+ if (IS_ERR(sfb->bus_clk))
+ return dev_err_probe(dev, PTR_ERR(sfb->bus_clk),
+ "failed to get bus clock\n");
clk_prepare_enable(sfb->bus_clk);
if (!sfb->variant.has_clksel) {
sfb->lcd_clk = devm_clk_get(dev, "sclk_fimd");
if (IS_ERR(sfb->lcd_clk)) {
- dev_err(dev, "failed to get lcd clock\n");
- ret = PTR_ERR(sfb->lcd_clk);
+ ret = dev_err_probe(dev, PTR_ERR(sfb->lcd_clk),
+ "failed to get lcd clock\n");
goto err_bus_clk;
}
@@ -1418,13 +1416,12 @@ static int s3c_fb_probe(struct platform_device *pdev)
goto err_lcd_clk;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "failed to acquire irq resource\n");
+ sfb->irq_no = platform_get_irq(pdev, 0);
+ if (sfb->irq_no < 0) {
ret = -ENOENT;
goto err_lcd_clk;
}
- sfb->irq_no = res->start;
+
ret = devm_request_irq(dev, sfb->irq_no, s3c_fb_irq,
0, "s3c_fb", sfb);
if (ret) {
@@ -1810,4 +1807,3 @@ module_platform_driver(s3c_fb_driver);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("Samsung S3C SoC Framebuffer driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c-fb");
diff --git a/drivers/video/fbdev/savage/savagefb.h b/drivers/video/fbdev/savage/savagefb.h
index 3314d5b6b43b..b6b8cc208293 100644
--- a/drivers/video/fbdev/savage/savagefb.h
+++ b/drivers/video/fbdev/savage/savagefb.h
@@ -195,7 +195,6 @@ struct savagefb_par {
struct savage_reg initial;
struct vgastate vgastate;
struct mutex open_lock;
- unsigned char *edid;
u32 pseudo_palette[16];
u32 open_count;
int paletteEnabled;
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index 0ac750cc5ea1..8114c921ceb8 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -2170,6 +2170,7 @@ static int savagefb_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct fb_info *info;
struct savagefb_par *par;
u_int h_sync, v_sync;
+ unsigned char __maybe_unused *edid;
int err, lpitch;
int video_len;
@@ -2212,9 +2213,9 @@ static int savagefb_probe(struct pci_dev *dev, const struct pci_device_id *id)
INIT_LIST_HEAD(&info->modelist);
#if defined(CONFIG_FB_SAVAGE_I2C)
savagefb_create_i2c_busses(info);
- savagefb_probe_i2c_connector(info, &par->edid);
- fb_edid_to_monspecs(par->edid, &info->monspecs);
- kfree(par->edid);
+ savagefb_probe_i2c_connector(info, &edid);
+ fb_edid_to_monspecs(edid, &info->monspecs);
+ kfree(edid);
fb_videomode_to_modelist(info->monspecs.modedb,
info->monspecs.modedb_len,
&info->modelist);
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index 266a5582f94d..742f62986b80 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -213,7 +213,7 @@ static void sisfb_search_mode(char *name, bool quiet)
/* This does some fuzzy mode naming detection */
if(sscanf(strbuf1, "%u %u %u %u", &xres, &yres, &depth, &rate) == 4) {
if((rate <= 32) || (depth > 32)) {
- j = rate; rate = depth; depth = j;
+ swap(rate, depth);
}
sprintf(strbuf, "%ux%ux%u", xres, yres, depth);
nameptr = strbuf;
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 0dbc6bf8268a..092a1caa1208 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -1047,7 +1047,7 @@ static ssize_t smtcfb_read(struct fb_info *info, char __user *buf,
if (count + p > total_size)
count = total_size - p;
- buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
+ buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -1059,25 +1059,14 @@ static ssize_t smtcfb_read(struct fb_info *info, char __user *buf,
while (count) {
c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
dst = buffer;
- for (i = c >> 2; i--;) {
- *dst = fb_readl(src++);
- *dst = big_swap(*dst);
+ for (i = (c + 3) >> 2; i--;) {
+ u32 val;
+
+ val = fb_readl(src);
+ *dst = big_swap(val);
+ src++;
dst++;
}
- if (c & 3) {
- u8 *dst8 = (u8 *)dst;
- u8 __iomem *src8 = (u8 __iomem *)src;
-
- for (i = c & 3; i--;) {
- if (i & 1) {
- *dst8++ = fb_readb(++src8);
- } else {
- *dst8++ = fb_readb(--src8);
- src8 += 2;
- }
- }
- src = (u32 __iomem *)src8;
- }
if (copy_to_user(buf, buffer, c)) {
err = -EFAULT;
@@ -1130,7 +1119,7 @@ static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf,
count = total_size - p;
}
- buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
+ buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -1148,24 +1137,11 @@ static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf,
break;
}
- for (i = c >> 2; i--;) {
- fb_writel(big_swap(*src), dst++);
+ for (i = (c + 3) >> 2; i--;) {
+ fb_writel(big_swap(*src), dst);
+ dst++;
src++;
}
- if (c & 3) {
- u8 *src8 = (u8 *)src;
- u8 __iomem *dst8 = (u8 __iomem *)dst;
-
- for (i = c & 3; i--;) {
- if (i & 1) {
- fb_writeb(*src8++, ++dst8);
- } else {
- fb_writeb(*src8++, --dst8);
- dst8 += 2;
- }
- }
- dst = (u32 __iomem *)dst8;
- }
*ppos += c;
buf += c;
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index bfac3ee4a642..28768c272b73 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -1656,6 +1656,7 @@ static int ufx_usb_probe(struct usb_interface *interface,
info->par = dev;
info->pseudo_palette = dev->pseudo_palette;
info->fbops = &ufx_ops;
+ INIT_LIST_HEAD(&info->modelist);
retval = fb_alloc_cmap(&info->cmap, 256, 0);
if (retval < 0) {
@@ -1666,8 +1667,6 @@ static int ufx_usb_probe(struct usb_interface *interface,
INIT_DELAYED_WORK(&dev->free_framebuffer_work,
ufx_free_framebuffer_work);
- INIT_LIST_HEAD(&info->modelist);
-
retval = ufx_reg_read(dev, 0x3000, &id_rev);
check_warn_goto_error(retval, "error %d reading 0x3000 register from device", retval);
dev_dbg(dev->gdev, "ID_REV register value 0x%08x", id_rev);
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 1e2f71c2f8a8..c6d5df31111d 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -658,9 +658,8 @@ static int ssd1307fb_probe(struct i2c_client *client)
par->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(par->reset)) {
- dev_err(dev, "failed to get reset gpio: %ld\n",
- PTR_ERR(par->reset));
- ret = PTR_ERR(par->reset);
+ ret = dev_err_probe(dev, PTR_ERR(par->reset),
+ "failed to get reset gpio\n");
goto fb_alloc_error;
}
@@ -670,7 +669,7 @@ static int ssd1307fb_probe(struct i2c_client *client)
if (ret == -ENODEV) {
par->vbat_reg = NULL;
} else {
- dev_err(dev, "failed to get VBAT regulator: %d\n", ret);
+ dev_err_probe(dev, ret, "failed to get VBAT regulator\n");
goto fb_alloc_error;
}
}
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 265865610edc..bebb2eea6448 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1041,6 +1041,47 @@ stifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
SETUP_FB(fb);
}
+#define ARTIST_VRAM_SIZE 0x000804
+#define ARTIST_VRAM_SRC 0x000808
+#define ARTIST_VRAM_SIZE_TRIGGER_WINFILL 0x000a04
+#define ARTIST_VRAM_DEST_TRIGGER_BLOCKMOVE 0x000b00
+#define ARTIST_SRC_BM_ACCESS 0x018008
+#define ARTIST_FGCOLOR 0x018010
+#define ARTIST_BGCOLOR 0x018014
+#define ARTIST_BITMAP_OP 0x01801c
+
+static void
+stifb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct stifb_info *fb = container_of(info, struct stifb_info, info);
+
+ if (rect->rop != ROP_COPY)
+ return cfb_fillrect(info, rect);
+
+ SETUP_HW(fb);
+
+ if (fb->info.var.bits_per_pixel == 32) {
+ WRITE_WORD(0xBBA0A000, fb, REG_10);
+
+ NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffffff);
+ } else {
+ WRITE_WORD(fb->id == S9000_ID_HCRX ? 0x13a02000 : 0x13a01000, fb, REG_10);
+
+ NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xff);
+ }
+
+ WRITE_WORD(0x03000300, fb, ARTIST_BITMAP_OP);
+ WRITE_WORD(0x2ea01000, fb, ARTIST_SRC_BM_ACCESS);
+ NGLE_QUICK_SET_DST_BM_ACCESS(fb, 0x2ea01000);
+ NGLE_REALLY_SET_IMAGE_FG_COLOR(fb, rect->color);
+ WRITE_WORD(0, fb, ARTIST_BGCOLOR);
+
+ NGLE_SET_DSTXY(fb, (rect->dx << 16) | (rect->dy));
+ SET_LENXY_START_RECFILL(fb, (rect->width << 16) | (rect->height));
+
+ SETUP_FB(fb);
+}
+
static void __init
stifb_init_display(struct stifb_info *fb)
{
@@ -1105,7 +1146,7 @@ static const struct fb_ops stifb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = stifb_setcolreg,
.fb_blank = stifb_blank,
- .fb_fillrect = cfb_fillrect,
+ .fb_fillrect = stifb_fillrect,
.fb_copyarea = stifb_copyarea,
.fb_imageblit = cfb_imageblit,
};
@@ -1297,7 +1338,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
goto out_err0;
}
info->screen_size = fix->smem_len;
- info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA;
+ info->flags = FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
info->pseudo_palette = &fb->pseudo_palette;
/* This has to be done !!! */
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 184bb8433b78..b6ec0b8e2b72 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1427,7 +1427,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dlfb = fb_info->par;
- return snprintf(buf, PAGE_SIZE, "%u\n",
+ return sysfs_emit(buf, "%u\n",
atomic_read(&dlfb->bytes_rendered));
}
@@ -1435,7 +1435,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dlfb = fb_info->par;
- return snprintf(buf, PAGE_SIZE, "%u\n",
+ return sysfs_emit(buf, "%u\n",
atomic_read(&dlfb->bytes_identical));
}
@@ -1443,7 +1443,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dlfb = fb_info->par;
- return snprintf(buf, PAGE_SIZE, "%u\n",
+ return sysfs_emit(buf, "%u\n",
atomic_read(&dlfb->bytes_sent));
}
@@ -1451,7 +1451,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dlfb = fb_info->par;
- return snprintf(buf, PAGE_SIZE, "%u\n",
+ return sysfs_emit(buf, "%u\n",
atomic_read(&dlfb->cpu_kcycles_used));
}
diff --git a/drivers/video/fbdev/via/lcd.c b/drivers/video/fbdev/via/lcd.c
index 088b962076b5..beec5c8d4d08 100644
--- a/drivers/video/fbdev/via/lcd.c
+++ b/drivers/video/fbdev/via/lcd.c
@@ -543,7 +543,7 @@ void viafb_lcd_set_mode(const struct fb_var_screeninfo *var, u16 cxres,
/* Get panel table Pointer */
panel_crt_table = viafb_get_best_mode(panel_hres, panel_vres, 60);
viafb_fill_var_timing_info(&panel_var, panel_crt_table);
- DEBUG_MSG(KERN_INFO "bellow viafb_lcd_set_mode!!\n");
+ DEBUG_MSG(KERN_INFO "below viafb_lcd_set_mode!!\n");
if (VT1636_LVDS == plvds_chip_info->lvds_chip_name)
viafb_init_lvds_vt1636(plvds_setting_info, plvds_chip_info);
clock = PICOS2KHZ(panel_crt_table->pixclock) * 1000;
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index 22deb340a048..2d67c92c5774 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -1939,8 +1939,12 @@ static int __init viafb_setup(void)
if (!strncmp(this_opt, "viafb_mode1=", 12)) {
viafb_mode1 = kstrdup(this_opt + 12, GFP_KERNEL);
+ if (!viafb_mode1)
+ return -ENOMEM;
} else if (!strncmp(this_opt, "viafb_mode=", 11)) {
viafb_mode = kstrdup(this_opt + 11, GFP_KERNEL);
+ if (!viafb_mode)
+ return -ENOMEM;
} else if (!strncmp(this_opt, "viafb_bpp1=", 11)) {
if (kstrtouint(this_opt + 11, 0, &viafb_bpp1) < 0)
return -EINVAL;
@@ -1969,6 +1973,8 @@ static int __init viafb_setup(void)
return -EINVAL;
} else if (!strncmp(this_opt, "viafb_active_dev=", 17)) {
viafb_active_dev = kstrdup(this_opt + 17, GFP_KERNEL);
+ if (!viafb_active_dev)
+ return -ENOMEM;
} else if (!strncmp(this_opt,
"viafb_display_hardware_layout=", 30)) {
if (kstrtoint(this_opt + 30, 0,
@@ -1995,8 +2001,12 @@ static int __init viafb_setup(void)
return -EINVAL;
} else if (!strncmp(this_opt, "viafb_lcd_port=", 15)) {
viafb_lcd_port = kstrdup(this_opt + 15, GFP_KERNEL);
+ if (!viafb_lcd_port)
+ return -ENOMEM;
} else if (!strncmp(this_opt, "viafb_dvi_port=", 15)) {
viafb_dvi_port = kstrdup(this_opt + 15, GFP_KERNEL);
+ if (!viafb_dvi_port)
+ return -ENOMEM;
}
}
return 0;
diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
index d96ab28f8ce4..4e641a780726 100644
--- a/drivers/video/fbdev/w100fb.c
+++ b/drivers/video/fbdev/w100fb.c
@@ -770,12 +770,18 @@ out:
fb_dealloc_cmap(&info->cmap);
kfree(info->pseudo_palette);
}
- if (remapped_fbuf != NULL)
+ if (remapped_fbuf != NULL) {
iounmap(remapped_fbuf);
- if (remapped_regs != NULL)
+ remapped_fbuf = NULL;
+ }
+ if (remapped_regs != NULL) {
iounmap(remapped_regs);
- if (remapped_base != NULL)
+ remapped_regs = NULL;
+ }
+ if (remapped_base != NULL) {
iounmap(remapped_base);
+ remapped_base = NULL;
+ }
if (info)
framebuffer_release(info);
return err;
@@ -795,8 +801,11 @@ static int w100fb_remove(struct platform_device *pdev)
fb_dealloc_cmap(&info->cmap);
iounmap(remapped_base);
+ remapped_base = NULL;
iounmap(remapped_regs);
+ remapped_regs = NULL;
iounmap(remapped_fbuf);
+ remapped_fbuf = NULL;
framebuffer_release(info);
diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig
index 8061e8ef449f..121b9293c737 100644
--- a/drivers/virt/Kconfig
+++ b/drivers/virt/Kconfig
@@ -13,6 +13,17 @@ menuconfig VIRT_DRIVERS
if VIRT_DRIVERS
+config VMGENID
+ tristate "Virtual Machine Generation ID driver"
+ default y
+ depends on ACPI
+ help
+ Say Y here to use the hypervisor-provided Virtual Machine Generation ID
+ to reseed the RNG when the VM is cloned. This is highly recommended if
+ you intend to do any rollback / cloning / snapshotting of VMs.
+
+ Prefer Y to M so that this protection is activated very early.
+
config FSL_HV_MANAGER
tristate "Freescale hypervisor management driver"
depends on FSL_SOC
diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile
index 3e272ea60cd9..108d0ffcc9aa 100644
--- a/drivers/virt/Makefile
+++ b/drivers/virt/Makefile
@@ -4,6 +4,7 @@
#
obj-$(CONFIG_FSL_HV_MANAGER) += fsl_hypervisor.o
+obj-$(CONFIG_VMGENID) += vmgenid.o
obj-y += vboxguest/
obj-$(CONFIG_NITRO_ENCLAVES) += nitro_enclaves/
diff --git a/drivers/virt/vmgenid.c b/drivers/virt/vmgenid.c
new file mode 100644
index 000000000000..0ae1a39f2e28
--- /dev/null
+++ b/drivers/virt/vmgenid.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * The "Virtual Machine Generation ID" is exposed via ACPI and changes when a
+ * virtual machine forks or is cloned. This driver exists for shepherding that
+ * information to random.c.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/random.h>
+
+ACPI_MODULE_NAME("vmgenid");
+
+enum { VMGENID_SIZE = 16 };
+
+struct vmgenid_state {
+ u8 *next_id;
+ u8 this_id[VMGENID_SIZE];
+};
+
+static int vmgenid_add(struct acpi_device *device)
+{
+ struct acpi_buffer parsed = { ACPI_ALLOCATE_BUFFER };
+ struct vmgenid_state *state;
+ union acpi_object *obj;
+ phys_addr_t phys_addr;
+ acpi_status status;
+ int ret = 0;
+
+ state = devm_kmalloc(&device->dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ status = acpi_evaluate_object(device->handle, "ADDR", NULL, &parsed);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating ADDR"));
+ return -ENODEV;
+ }
+ obj = parsed.pointer;
+ if (!obj || obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 2 ||
+ obj->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ obj->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ phys_addr = (obj->package.elements[0].integer.value << 0) |
+ (obj->package.elements[1].integer.value << 32);
+ state->next_id = devm_memremap(&device->dev, phys_addr, VMGENID_SIZE, MEMREMAP_WB);
+ if (IS_ERR(state->next_id)) {
+ ret = PTR_ERR(state->next_id);
+ goto out;
+ }
+
+ memcpy(state->this_id, state->next_id, sizeof(state->this_id));
+ add_device_randomness(state->this_id, sizeof(state->this_id));
+
+ device->driver_data = state;
+
+out:
+ ACPI_FREE(parsed.pointer);
+ return ret;
+}
+
+static void vmgenid_notify(struct acpi_device *device, u32 event)
+{
+ struct vmgenid_state *state = acpi_driver_data(device);
+ u8 old_id[VMGENID_SIZE];
+
+ memcpy(old_id, state->this_id, sizeof(old_id));
+ memcpy(state->this_id, state->next_id, sizeof(state->this_id));
+ if (!memcmp(old_id, state->this_id, sizeof(old_id)))
+ return;
+ add_vmfork_randomness(state->this_id, sizeof(state->this_id));
+}
+
+static const struct acpi_device_id vmgenid_ids[] = {
+ { "VM_GEN_COUNTER", 0 },
+ { }
+};
+
+static struct acpi_driver vmgenid_driver = {
+ .name = "vmgenid",
+ .ids = vmgenid_ids,
+ .owner = THIS_MODULE,
+ .ops = {
+ .add = vmgenid_add,
+ .notify = vmgenid_notify
+ }
+};
+
+module_acpi_driver(vmgenid_driver);
+
+MODULE_DEVICE_TABLE(acpi, vmgenid_ids);
+MODULE_DESCRIPTION("Virtual Machine Generation ID");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 34f80b7a8a64..492fc26f0b65 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -105,7 +105,6 @@ config VIRTIO_BALLOON
config VIRTIO_MEM
tristate "Virtio mem driver"
- default m
depends on X86_64
depends on VIRTIO
depends on MEMORY_HOTPLUG
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 00ac9db792a4..22f15f444f75 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -166,14 +166,13 @@ void virtio_add_status(struct virtio_device *dev, unsigned int status)
}
EXPORT_SYMBOL_GPL(virtio_add_status);
-int virtio_finalize_features(struct virtio_device *dev)
+/* Do some validation, then set FEATURES_OK */
+static int virtio_features_ok(struct virtio_device *dev)
{
- int ret = dev->config->finalize_features(dev);
unsigned status;
+ int ret;
might_sleep();
- if (ret)
- return ret;
ret = arch_has_restricted_virtio_memory_access();
if (ret) {
@@ -202,8 +201,23 @@ int virtio_finalize_features(struct virtio_device *dev)
}
return 0;
}
-EXPORT_SYMBOL_GPL(virtio_finalize_features);
+/**
+ * virtio_reset_device - quiesce device for removal
+ * @dev: the device to reset
+ *
+ * Prevents device from sending interrupts and accessing memory.
+ *
+ * Generally used for cleanup during driver / device removal.
+ *
+ * Once this has been invoked, caller must ensure that
+ * virtqueue_notify / virtqueue_kick are not in progress.
+ *
+ * Note: this guarantees that vq callbacks are not in progress, however caller
+ * is responsible for preventing access from other contexts, such as a system
+ * call/workqueue/bh. Invoking virtio_break_device then flushing any such
+ * contexts is one way to handle that.
+ * */
void virtio_reset_device(struct virtio_device *dev)
{
dev->config->reset(dev);
@@ -245,17 +259,6 @@ static int virtio_dev_probe(struct device *_d)
driver_features_legacy = driver_features;
}
- /*
- * Some devices detect legacy solely via F_VERSION_1. Write
- * F_VERSION_1 to force LE config space accesses before FEATURES_OK for
- * these when needed.
- */
- if (drv->validate && !virtio_legacy_is_little_endian()
- && device_features & BIT_ULL(VIRTIO_F_VERSION_1)) {
- dev->features = BIT_ULL(VIRTIO_F_VERSION_1);
- dev->config->finalize_features(dev);
- }
-
if (device_features & (1ULL << VIRTIO_F_VERSION_1))
dev->features = driver_features & device_features;
else
@@ -266,13 +269,26 @@ static int virtio_dev_probe(struct device *_d)
if (device_features & (1ULL << i))
__virtio_set_bit(dev, i);
+ err = dev->config->finalize_features(dev);
+ if (err)
+ goto err;
+
if (drv->validate) {
+ u64 features = dev->features;
+
err = drv->validate(dev);
if (err)
goto err;
+
+ /* Did validation change any features? Then write them again. */
+ if (features != dev->features) {
+ err = dev->config->finalize_features(dev);
+ if (err)
+ goto err;
+ }
}
- err = virtio_finalize_features(dev);
+ err = virtio_features_ok(dev);
if (err)
goto err;
@@ -496,7 +512,11 @@ int virtio_device_restore(struct virtio_device *dev)
/* We have a driver! */
virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
- ret = virtio_finalize_features(dev);
+ ret = dev->config->finalize_features(dev);
+ if (ret)
+ goto err;
+
+ ret = virtio_features_ok(dev);
if (ret)
goto err;
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 38becd8d578c..e7d6b679596d 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -2476,13 +2476,10 @@ static int virtio_mem_init_hotplug(struct virtio_mem *vm)
VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD);
/*
- * We want subblocks to span at least MAX_ORDER_NR_PAGES and
- * pageblock_nr_pages pages. This:
- * - Is required for now for alloc_contig_range() to work reliably -
- * it doesn't properly handle smaller granularity on ZONE_NORMAL.
+ * TODO: once alloc_contig_range() works reliably with pageblock
+ * granularity on ZONE_NORMAL, use pageblock_nr_pages instead.
*/
- sb_size = max_t(uint64_t, MAX_ORDER_NR_PAGES,
- pageblock_nr_pages) * PAGE_SIZE;
+ sb_size = PAGE_SIZE * MAX_ORDER_NR_PAGES;
sb_size = max_t(uint64_t, vm->device_block_size, sb_size);
if (sb_size < memory_block_size_bytes() && !force_bbm) {
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 7767a7f0119b..76504559bc25 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -317,7 +317,7 @@ static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
- return vdpa_set_features(vdpa, vdev->features, false);
+ return vdpa_set_features(vdpa, vdev->features);
}
static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)
diff --git a/drivers/visorbus/vbuschannel.h b/drivers/visorbus/vbuschannel.h
index 4aaf6564eb9f..98711fb6d66e 100644
--- a/drivers/visorbus/vbuschannel.h
+++ b/drivers/visorbus/vbuschannel.h
@@ -89,7 +89,7 @@ struct visor_vbus_channel {
struct visor_vbus_headerinfo hdr_info;
struct visor_vbus_deviceinfo chp_info;
struct visor_vbus_deviceinfo bus_info;
- struct visor_vbus_deviceinfo dev_info[0];
+ struct visor_vbus_deviceinfo dev_info[];
} __packed;
#endif
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 3fa40c723e8e..edb0acd0b832 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -169,20 +169,14 @@ undo:
__del_gref(gref);
}
- /* It's possible for the target domain to map the just-allocated grant
- * references by blindly guessing their IDs; if this is done, then
- * __del_gref will leave them in the queue_gref list. They need to be
- * added to the global list so that we can free them when they are no
- * longer referenced.
- */
- if (unlikely(!list_empty(&queue_gref)))
- list_splice_tail(&queue_gref, &gref_list);
mutex_unlock(&gref_mutex);
return rc;
}
static void __del_gref(struct gntalloc_gref *gref)
{
+ unsigned long addr;
+
if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
uint8_t *tmp = kmap(gref->page);
tmp[gref->notify.pgoff] = 0;
@@ -196,21 +190,16 @@ static void __del_gref(struct gntalloc_gref *gref)
gref->notify.flags = 0;
if (gref->gref_id) {
- if (gnttab_query_foreign_access(gref->gref_id))
- return;
-
- if (!gnttab_end_foreign_access_ref(gref->gref_id, 0))
- return;
-
- gnttab_free_grant_reference(gref->gref_id);
+ if (gref->page) {
+ addr = (unsigned long)page_to_virt(gref->page);
+ gnttab_end_foreign_access(gref->gref_id, 0, addr);
+ } else
+ gnttab_free_grant_reference(gref->gref_id);
}
gref_size--;
list_del(&gref->next_gref);
- if (gref->page)
- __free_page(gref->page);
-
kfree(gref);
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 3729bea0c989..5c83d41766c8 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -134,12 +134,9 @@ struct gnttab_ops {
*/
unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
/*
- * Query the status of a grant entry. Ref parameter is reference of
- * queried grant entry, return value is the status of queried entry.
- * Detailed status(writing/reading) can be gotten from the return value
- * by bit operations.
+ * Read the frame number related to a given grant reference.
*/
- int (*query_foreign_access)(grant_ref_t ref);
+ unsigned long (*read_frame)(grant_ref_t ref);
};
struct unmap_refs_callback_data {
@@ -284,22 +281,6 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
-static int gnttab_query_foreign_access_v1(grant_ref_t ref)
-{
- return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
-}
-
-static int gnttab_query_foreign_access_v2(grant_ref_t ref)
-{
- return grstatus[ref] & (GTF_reading|GTF_writing);
-}
-
-int gnttab_query_foreign_access(grant_ref_t ref)
-{
- return gnttab_interface->query_foreign_access(ref);
-}
-EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
-
static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
{
u16 flags, nflags;
@@ -353,6 +334,16 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
+static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
+{
+ return gnttab_shared.v1[ref].frame;
+}
+
+static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
+{
+ return gnttab_shared.v2[ref].full_page.frame;
+}
+
struct deferred_entry {
struct list_head list;
grant_ref_t ref;
@@ -382,12 +373,9 @@ static void gnttab_handle_deferred(struct timer_list *unused)
spin_unlock_irqrestore(&gnttab_list_lock, flags);
if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
put_free_entry(entry->ref);
- if (entry->page) {
- pr_debug("freeing g.e. %#x (pfn %#lx)\n",
- entry->ref, page_to_pfn(entry->page));
- put_page(entry->page);
- } else
- pr_info("freeing g.e. %#x\n", entry->ref);
+ pr_debug("freeing g.e. %#x (pfn %#lx)\n",
+ entry->ref, page_to_pfn(entry->page));
+ put_page(entry->page);
kfree(entry);
entry = NULL;
} else {
@@ -412,9 +400,18 @@ static void gnttab_handle_deferred(struct timer_list *unused)
static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
struct page *page)
{
- struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ struct deferred_entry *entry;
+ gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
const char *what = KERN_WARNING "leaking";
+ entry = kmalloc(sizeof(*entry), gfp);
+ if (!page) {
+ unsigned long gfn = gnttab_interface->read_frame(ref);
+
+ page = pfn_to_page(gfn_to_pfn(gfn));
+ get_page(page);
+ }
+
if (entry) {
unsigned long flags;
@@ -435,11 +432,21 @@ static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
what, ref, page ? page_to_pfn(page) : -1);
}
+int gnttab_try_end_foreign_access(grant_ref_t ref)
+{
+ int ret = _gnttab_end_foreign_access_ref(ref, 0);
+
+ if (ret)
+ put_free_entry(ref);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
+
void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
unsigned long page)
{
- if (gnttab_end_foreign_access_ref(ref, readonly)) {
- put_free_entry(ref);
+ if (gnttab_try_end_foreign_access(ref)) {
if (page != 0)
put_page(virt_to_page(page));
} else
@@ -1417,7 +1424,7 @@ static const struct gnttab_ops gnttab_v1_ops = {
.update_entry = gnttab_update_entry_v1,
.end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
.end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
- .query_foreign_access = gnttab_query_foreign_access_v1,
+ .read_frame = gnttab_read_frame_v1,
};
static const struct gnttab_ops gnttab_v2_ops = {
@@ -1429,7 +1436,7 @@ static const struct gnttab_ops gnttab_v2_ops = {
.update_entry = gnttab_update_entry_v2,
.end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
.end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
- .query_foreign_access = gnttab_query_foreign_access_v2,
+ .read_frame = gnttab_read_frame_v2,
};
static bool gnttab_need_v2(void)
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 3c9ae156b597..0ca351f30a6d 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -337,8 +337,8 @@ static void free_active_ring(struct sock_mapping *map)
if (!map->active.ring)
return;
- free_pages((unsigned long)map->active.data.in,
- map->active.ring->ring_order);
+ free_pages_exact(map->active.data.in,
+ PAGE_SIZE << map->active.ring->ring_order);
free_page((unsigned long)map->active.ring);
}
@@ -352,8 +352,8 @@ static int alloc_active_ring(struct sock_mapping *map)
goto out;
map->active.ring->ring_order = PVCALLS_RING_ORDER;
- bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- PVCALLS_RING_ORDER);
+ bytes = alloc_pages_exact(PAGE_SIZE << PVCALLS_RING_ORDER,
+ GFP_KERNEL | __GFP_ZERO);
if (!bytes)
goto out;
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index e8bed1cb76ba..df6890681231 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -379,7 +379,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
unsigned int nr_pages, grant_ref_t *grefs)
{
int err;
- int i, j;
+ unsigned int i;
+ grant_ref_t gref_head;
+
+ err = gnttab_alloc_grant_references(nr_pages, &gref_head);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "granting access to ring page");
+ return err;
+ }
for (i = 0; i < nr_pages; i++) {
unsigned long gfn;
@@ -389,23 +396,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
else
gfn = virt_to_gfn(vaddr);
- err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
- if (err < 0) {
- xenbus_dev_fatal(dev, err,
- "granting access to ring page");
- goto fail;
- }
- grefs[i] = err;
+ grefs[i] = gnttab_claim_grant_reference(&gref_head);
+ gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
+ gfn, 0);
vaddr = vaddr + XEN_PAGE_SIZE;
}
return 0;
-
-fail:
- for (j = 0; j < i; j++)
- gnttab_end_foreign_access_ref(grefs[j], 0);
- return err;
}
EXPORT_SYMBOL_GPL(xenbus_grant_ring);